From 4a50de92e342a2ed705da8e443110689ca46da87 Mon Sep 17 00:00:00 2001 From: despiegk Date: Wed, 25 Dec 2024 08:40:56 +0100 Subject: [PATCH] the base --- .gitignore | 29 + .vdocignore | 3 + LICENSE | 2 +- aiprompts/starter/0_start_here.md | 62 + .../3_heroscript & params instructions.md | 309 +++ cli/compile.vsh | 76 + cli/hero.v | 102 + doc.vsh | 66 + install.vsh | 25 + lib/code/codemodel/README.md | 38 + lib/code/codemodel/codefile.v | 99 + lib/code/codemodel/example.v | 9 + lib/code/codemodel/model.v | 205 ++ lib/code/codemodel/model_const.v | 42 + lib/code/codemodel/model_import.v | 24 + lib/code/codemodel/module.v | 38 + .../codemodel/templates/comment/comment.py | 0 .../codemodel/templates/comment/comment.v | 1 + .../templates/function/function.v.template | 6 + .../codemodel/templates/function/method.py | 0 .../templates/struct/struct.v.template | 26 + lib/code/codemodel/utils.v | 92 + lib/code/codemodel/vgen.v | 274 ++ lib/code/codeparser/README.md | 179 ++ lib/code/codeparser/parse_example.v | 32 + lib/code/codeparser/parse_example_test.v | 31 + lib/code/codeparser/testdata/file.v | 31 + .../codeparser/testdata/flatdir/anotherfile.v | 32 + .../codeparser/testdata/flatdir/subfile.v | 32 + lib/code/codeparser/vparser.v | 529 ++++ lib/code/codeparser/vparser_test.v | 643 +++++ lib/core/installers/redis.v | 108 + .../installers/template/redis_config.conf | 2320 +++++++++++++++++ lib/core/playbook/action.v | 95 + lib/core/playbook/factory.v | 57 + lib/core/playbook/filter1_test.v | 83 + lib/core/playbook/filter_sort.v | 189 ++ lib/core/playbook/filter_test.v | 148 ++ lib/core/playbook/parser_test.v | 56 + lib/core/playbook/playbook.v | 202 ++ lib/core/playbook/playbook_add.v | 174 ++ lib/core/playbook/playbook_test.v | 84 + lib/core/playbook/readme.md | 129 + lib/core/rootpath/README.md | 48 + lib/core/rootpath/rootpath.v | 72 + lib/core/smartid/sid.v | 118 + lib/data/hjson/README.md | 70 + lib/data/hjson/hjson.v | 136 + lib/osal/cmds.v | 304 +++ lib/osal/downloader.v | 138 + lib/osal/env.v | 79 + lib/osal/env_test.v | 41 + lib/osal/exec.v | 445 ++++ lib/osal/exec_test.v | 78 + lib/osal/exec_to_scriptpath.v | 78 + lib/osal/file.v | 61 + lib/osal/hostsfile/hostsfile.v | 141 + lib/osal/net.v | 108 + lib/osal/net_test.v | 18 + lib/osal/notifier/notifier.v | 28 + lib/osal/notifier/readme.md | 8 + lib/osal/osinstaller/diskmgmt.v | 126 + lib/osal/osinstaller/factory.v | 24 + lib/osal/package.v | 113 + lib/osal/package_test.v | 42 + lib/osal/platform.v | 148 ++ lib/osal/platform_test.v | 9 + lib/osal/ps_tool.v | 230 ++ lib/osal/readme.md | 200 ++ lib/osal/rsync/readme.md | 6 + lib/osal/rsync/rsync.v | 61 + lib/osal/rsync/rsyncd.v | 60 + lib/osal/rsync/templates/rsyncd.conf | 27 + lib/osal/rsync/templates/rsyncd.secrets | 2 + lib/osal/rsync/usermgmt.v | 59 + lib/osal/screen/factory.v | 177 ++ lib/osal/screen/readme.md | 13 + lib/osal/screen/screen.v | 142 + lib/osal/screen/screen_test.v | 17 + lib/osal/sleep.v | 8 + lib/osal/ssh.v | 86 + lib/osal/ssh_key.v | 41 + lib/osal/sshagent/factory.v | 32 + lib/osal/sshagent/get.v | 55 + lib/osal/sshagent/interactive.v | 128 + lib/osal/sshagent/readme.md | 44 + lib/osal/sshagent/sshagent.v | 186 ++ lib/osal/sshagent/sshkey.v | 88 + lib/osal/sshagent/tools.v | 12 + lib/osal/systemd/journalctl.v | 15 + lib/osal/systemd/readme.md | 7 + lib/osal/systemd/systemd.v | 184 ++ lib/osal/systemd/systemd_list.v | 94 + lib/osal/systemd/systemd_process.v | 143 + lib/osal/systemd/systemd_process_test.v | 61 + lib/osal/systemd/templates/service.yaml | 17 + lib/osal/tmux/readme.md | 24 + lib/osal/tmux/testdata/tmux_session_test.v | 86 + lib/osal/tmux/testdata/tmux_window_test.v | 67 + lib/osal/tmux/tmux.v | 116 + lib/osal/tmux/tmux_scan.v | 95 + lib/osal/tmux/tmux_session.v | 153 ++ lib/osal/tmux/tmux_test.v | 118 + lib/osal/tmux/tmux_window.v | 257 ++ lib/osal/ufw/model.v | 59 + lib/osal/ufw/play.v | 46 + lib/osal/ufw/readme.md | 6 + lib/osal/ufw/ufw.v | 91 + lib/osal/ufw/ufw_list.v | 94 + lib/osal/users.v | 45 + lib/osal/utils.v | 41 + lib/osal/zinit/readme.md | 50 + lib/osal/zinit/rpc.v | 200 ++ lib/osal/zinit/rpc_test.v | 60 + lib/osal/zinit/zinit.v | 160 ++ lib/osal/zinit/zinit/service_1.yaml | 1 + lib/osal/zinit/zinit/service_2.yaml | 3 + lib/osal/zinit/zinit_factory.v | 36 + lib/osal/zinit/zinit_stateless.v | 134 + lib/osal/zinit/zprocess.v | 290 +++ lib/osal/zinit/zprocess_load.v | 80 + lib/ui/console/array.v | 51 + lib/ui/console/chalk.v | 234 ++ lib/ui/console/console.v | 103 + lib/ui/console/dropdown.v | 180 ++ lib/ui/console/factory.v | 79 + lib/ui/console/question.v | 53 + lib/ui/console/readme.md | 48 + lib/ui/console/time_date.v | 11 + lib/ui/console/yesno.v | 51 + lib/ui/factory.v | 27 + lib/ui/generic/dropdown.v | 49 + lib/ui/generic/editor.v | 26 + lib/ui/generic/info.v | 37 + lib/ui/generic/log.v | 30 + lib/ui/generic/model.v | 19 + lib/ui/generic/payment.v | 18 + lib/ui/generic/question.v | 22 + lib/ui/generic/yesno.v | 19 + lib/ui/logger/logger.v | 52 + lib/ui/readme.md | 109 + lib/ui/telegram/README.md | 55 + lib/ui/telegram/channel.v | 52 + lib/ui/telegram/client/client.v | 126 + lib/ui/telegram/client_test.v | 74 + lib/ui/telegram/flow_supervisor.v | 27 + lib/ui/telegram/questions.v | 114 + lib/ui/template/console.v | 7 + lib/ui/template/dropdown.v | 31 + lib/ui/template/factory.v | 11 + lib/ui/template/question.v | 17 + lib/ui/template/time_date.v | 11 + lib/ui/template/yesno.v | 14 + lib/ui/uimodel/uimodel.v | 116 + manual/best_practices/osal/silence.md | 21 + manual/best_practices/scripts/scripts.md | 61 + manual/best_practices/scripts/shebang.md | 14 + .../best_practices/using_args_in_function.md | 32 + manual/core/base.md | 87 + manual/core/concepts/global_ids.md | 0 manual/core/concepts/name_registry.md | 75 + manual/core/concepts/objects.md | 32 + manual/core/concepts/sid.md | 69 + manual/core/context.md | 100 + manual/core/context_session_job.md | 26 + manual/core/play.md | 35 + manual/core/session.md | 82 + manual/documentation/docextractor.md | 17 + v_install.sh | 331 +++ 169 files changed, 16476 insertions(+), 1 deletion(-) create mode 100644 .gitignore create mode 100644 .vdocignore create mode 100644 aiprompts/starter/0_start_here.md create mode 100644 aiprompts/starter/3_heroscript & params instructions.md create mode 100755 cli/compile.vsh create mode 100644 cli/hero.v create mode 100644 doc.vsh create mode 100755 install.vsh create mode 100644 lib/code/codemodel/README.md create mode 100644 lib/code/codemodel/codefile.v create mode 100644 lib/code/codemodel/example.v create mode 100644 lib/code/codemodel/model.v create mode 100644 lib/code/codemodel/model_const.v create mode 100644 lib/code/codemodel/model_import.v create mode 100644 lib/code/codemodel/module.v create mode 100644 lib/code/codemodel/templates/comment/comment.py create mode 100644 lib/code/codemodel/templates/comment/comment.v create mode 100644 lib/code/codemodel/templates/function/function.v.template create mode 100644 lib/code/codemodel/templates/function/method.py create mode 100644 lib/code/codemodel/templates/struct/struct.v.template create mode 100644 lib/code/codemodel/utils.v create mode 100644 lib/code/codemodel/vgen.v create mode 100644 lib/code/codeparser/README.md create mode 100644 lib/code/codeparser/parse_example.v create mode 100644 lib/code/codeparser/parse_example_test.v create mode 100644 lib/code/codeparser/testdata/file.v create mode 100644 lib/code/codeparser/testdata/flatdir/anotherfile.v create mode 100644 lib/code/codeparser/testdata/flatdir/subfile.v create mode 100644 lib/code/codeparser/vparser.v create mode 100644 lib/code/codeparser/vparser_test.v create mode 100644 lib/core/installers/redis.v create mode 100644 lib/core/installers/template/redis_config.conf create mode 100644 lib/core/playbook/action.v create mode 100644 lib/core/playbook/factory.v create mode 100644 lib/core/playbook/filter1_test.v create mode 100644 lib/core/playbook/filter_sort.v create mode 100644 lib/core/playbook/filter_test.v create mode 100644 lib/core/playbook/parser_test.v create mode 100644 lib/core/playbook/playbook.v create mode 100644 lib/core/playbook/playbook_add.v create mode 100644 lib/core/playbook/playbook_test.v create mode 100644 lib/core/playbook/readme.md create mode 100644 lib/core/rootpath/README.md create mode 100644 lib/core/rootpath/rootpath.v create mode 100644 lib/core/smartid/sid.v create mode 100644 lib/data/hjson/README.md create mode 100644 lib/data/hjson/hjson.v create mode 100644 lib/osal/cmds.v create mode 100644 lib/osal/downloader.v create mode 100644 lib/osal/env.v create mode 100644 lib/osal/env_test.v create mode 100644 lib/osal/exec.v create mode 100644 lib/osal/exec_test.v create mode 100644 lib/osal/exec_to_scriptpath.v create mode 100644 lib/osal/file.v create mode 100644 lib/osal/hostsfile/hostsfile.v create mode 100644 lib/osal/net.v create mode 100644 lib/osal/net_test.v create mode 100644 lib/osal/notifier/notifier.v create mode 100644 lib/osal/notifier/readme.md create mode 100644 lib/osal/osinstaller/diskmgmt.v create mode 100644 lib/osal/osinstaller/factory.v create mode 100644 lib/osal/package.v create mode 100644 lib/osal/package_test.v create mode 100644 lib/osal/platform.v create mode 100644 lib/osal/platform_test.v create mode 100644 lib/osal/ps_tool.v create mode 100644 lib/osal/readme.md create mode 100644 lib/osal/rsync/readme.md create mode 100644 lib/osal/rsync/rsync.v create mode 100644 lib/osal/rsync/rsyncd.v create mode 100644 lib/osal/rsync/templates/rsyncd.conf create mode 100644 lib/osal/rsync/templates/rsyncd.secrets create mode 100644 lib/osal/rsync/usermgmt.v create mode 100644 lib/osal/screen/factory.v create mode 100644 lib/osal/screen/readme.md create mode 100644 lib/osal/screen/screen.v create mode 100644 lib/osal/screen/screen_test.v create mode 100644 lib/osal/sleep.v create mode 100644 lib/osal/ssh.v create mode 100644 lib/osal/ssh_key.v create mode 100644 lib/osal/sshagent/factory.v create mode 100644 lib/osal/sshagent/get.v create mode 100644 lib/osal/sshagent/interactive.v create mode 100644 lib/osal/sshagent/readme.md create mode 100644 lib/osal/sshagent/sshagent.v create mode 100644 lib/osal/sshagent/sshkey.v create mode 100644 lib/osal/sshagent/tools.v create mode 100644 lib/osal/systemd/journalctl.v create mode 100644 lib/osal/systemd/readme.md create mode 100644 lib/osal/systemd/systemd.v create mode 100644 lib/osal/systemd/systemd_list.v create mode 100644 lib/osal/systemd/systemd_process.v create mode 100644 lib/osal/systemd/systemd_process_test.v create mode 100644 lib/osal/systemd/templates/service.yaml create mode 100644 lib/osal/tmux/readme.md create mode 100644 lib/osal/tmux/testdata/tmux_session_test.v create mode 100644 lib/osal/tmux/testdata/tmux_window_test.v create mode 100644 lib/osal/tmux/tmux.v create mode 100644 lib/osal/tmux/tmux_scan.v create mode 100644 lib/osal/tmux/tmux_session.v create mode 100644 lib/osal/tmux/tmux_test.v create mode 100644 lib/osal/tmux/tmux_window.v create mode 100644 lib/osal/ufw/model.v create mode 100644 lib/osal/ufw/play.v create mode 100644 lib/osal/ufw/readme.md create mode 100644 lib/osal/ufw/ufw.v create mode 100644 lib/osal/ufw/ufw_list.v create mode 100644 lib/osal/users.v create mode 100644 lib/osal/utils.v create mode 100644 lib/osal/zinit/readme.md create mode 100644 lib/osal/zinit/rpc.v create mode 100644 lib/osal/zinit/rpc_test.v create mode 100644 lib/osal/zinit/zinit.v create mode 100644 lib/osal/zinit/zinit/service_1.yaml create mode 100644 lib/osal/zinit/zinit/service_2.yaml create mode 100644 lib/osal/zinit/zinit_factory.v create mode 100644 lib/osal/zinit/zinit_stateless.v create mode 100644 lib/osal/zinit/zprocess.v create mode 100644 lib/osal/zinit/zprocess_load.v create mode 100644 lib/ui/console/array.v create mode 100644 lib/ui/console/chalk.v create mode 100644 lib/ui/console/console.v create mode 100644 lib/ui/console/dropdown.v create mode 100644 lib/ui/console/factory.v create mode 100644 lib/ui/console/question.v create mode 100644 lib/ui/console/readme.md create mode 100644 lib/ui/console/time_date.v create mode 100644 lib/ui/console/yesno.v create mode 100644 lib/ui/factory.v create mode 100644 lib/ui/generic/dropdown.v create mode 100644 lib/ui/generic/editor.v create mode 100644 lib/ui/generic/info.v create mode 100644 lib/ui/generic/log.v create mode 100644 lib/ui/generic/model.v create mode 100644 lib/ui/generic/payment.v create mode 100644 lib/ui/generic/question.v create mode 100644 lib/ui/generic/yesno.v create mode 100644 lib/ui/logger/logger.v create mode 100644 lib/ui/readme.md create mode 100644 lib/ui/telegram/README.md create mode 100644 lib/ui/telegram/channel.v create mode 100644 lib/ui/telegram/client/client.v create mode 100644 lib/ui/telegram/client_test.v create mode 100644 lib/ui/telegram/flow_supervisor.v create mode 100644 lib/ui/telegram/questions.v create mode 100644 lib/ui/template/console.v create mode 100644 lib/ui/template/dropdown.v create mode 100644 lib/ui/template/factory.v create mode 100644 lib/ui/template/question.v create mode 100644 lib/ui/template/time_date.v create mode 100644 lib/ui/template/yesno.v create mode 100644 lib/ui/uimodel/uimodel.v create mode 100644 manual/best_practices/osal/silence.md create mode 100644 manual/best_practices/scripts/scripts.md create mode 100644 manual/best_practices/scripts/shebang.md create mode 100644 manual/best_practices/using_args_in_function.md create mode 100644 manual/core/base.md create mode 100644 manual/core/concepts/global_ids.md create mode 100644 manual/core/concepts/name_registry.md create mode 100644 manual/core/concepts/objects.md create mode 100644 manual/core/concepts/sid.md create mode 100644 manual/core/context.md create mode 100644 manual/core/context_session_job.md create mode 100644 manual/core/play.md create mode 100644 manual/core/session.md create mode 100644 manual/documentation/docextractor.md create mode 100755 v_install.sh diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..c6a49afd --- /dev/null +++ b/.gitignore @@ -0,0 +1,29 @@ + +*dSYM/ +.vmodules/ +.vscode +_docs/ +vls.* +vls.log +node_modules/ +docs/ +photonwrapper.so +x +.env +myexample +myexample2 +remote_update_compile_hero +remote_install_v_hero +zdb-data +zdb-index +.idea/ +.venv/ +.trunk/ +.DS_Store +.venv/ +dump.rdb +output/ +*.db +.stellar +vdocs/ +data.ms/ diff --git a/.vdocignore b/.vdocignore new file mode 100644 index 00000000..360ac450 --- /dev/null +++ b/.vdocignore @@ -0,0 +1,3 @@ +examples/* + + diff --git a/LICENSE b/LICENSE index 261eeb9e..fe9d2a06 100644 --- a/LICENSE +++ b/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright Incubaid BVBA Belgium, Threefold NV Belgium Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/aiprompts/starter/0_start_here.md b/aiprompts/starter/0_start_here.md new file mode 100644 index 00000000..628e4c61 --- /dev/null +++ b/aiprompts/starter/0_start_here.md @@ -0,0 +1,62 @@ + +## instructions for code generation + +> when I generate code, the following instructions can never be overruled they are the basics + +- do not try to fix files which end with _.v because these are generated files + + +## instruction for vlang scripts + +when I generate vlang scripts I will always use .vsh extension and use following as first line: + +``` +#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +``` + +- a .vsh is a v shell script and can be executed as is, no need to use v ... +- in .vsh file there is no need for a main() function +- these scripts can be used for examples or instruction scripts e.g. an installs script + +## to do argument parsing use following examples + +```v +#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run + +import os +import flag + +mut fp := flag.new_flag_parser(os.args) +fp.application('compile.vsh') +fp.version('v0.1.0') +fp.description('Compile hero binary in debug or production mode') +fp.skip_executable() + +prod_mode := fp.bool('prod', `p`, false, 'Build production version (optimized)') +help_requested := fp.bool('help', `h`, false, 'Show help message') + +if help_requested { + println(fp.usage()) + exit(0) +} + +additional_args := fp.finalize() or { + eprintln(err) + println(fp.usage()) + exit(1) +} + +``` + + +## when creating a test script + +instruct user to use it as + +```bash +v -enable-globals test ~/code/github/freeflowuniverse/herolib/lib/osal/package_test.v +``` + +- use ~ so it works over all machines +- always use -enable-globals + diff --git a/aiprompts/starter/3_heroscript & params instructions.md b/aiprompts/starter/3_heroscript & params instructions.md new file mode 100644 index 00000000..fea8cefe --- /dev/null +++ b/aiprompts/starter/3_heroscript & params instructions.md @@ -0,0 +1,309 @@ +# how to work with heroscript in vlang + +## heroscript + +Heroscript is our small scripting language which has following structure + +an example of a heroscript is + +```heroscript + +!!dagu.script_define + name: 'test_dag' + homedir:'' + title:'a title' + reset:1 + start:true //trie or 1 is same + colors: 'green,red,purple' //lists are comma separated + description: ' + a description can be multiline + + like this + ' + + +!!dagu.add_step + dag: 'test_dag' + name: 'hello_world' + command: 'echo hello world' + +!!dagu.add_step + dag: 'test_dag' + name: 'last_step' + command: 'echo last step' + + +``` + +Notice how: +- every action starts with !! + - the first part is the actor e.g. dagu in this case + - the 2e part is the action name +- multilines are supported see the description field + +## how to process heroscript in Vlang + +- heroscript can be converted to a struct, +- the methods available to get the params are in 'params' section further in this doc + + +```vlang + +fn test_play_dagu() ! { + mut plbook := playbook.new(text: thetext_from_above)! + play_dagu(mut plbook)! //see below in vlang block there it all happens +} + + +pub fn play_dagu(mut plbook playbook.PlayBook) ! { + + //find all actions are !!$actor.$actionname. in this case above the actor is !!dagu, we check with the fitler if it exists, if not we return + dagu_actions := plbook.find(filter: 'dagu.')! + if dagu_actions.len == 0 { + return + } + play_dagu_basic(mut plbook)! +} + +pub struct DaguScript { +pub mut: + name string + homedir string + title string + reset bool + start bool + colors []string +} + +// play_dagu plays the dagu play commands +pub fn play_dagu_basic(mut plbook playbook.PlayBook) ! { + + //now find the specific ones for dagu.script_define + mut actions := plbook.find(filter: 'dagu.script_define')! + + if actions.len > 0 { + for myaction in actions { + mut p := myaction.params //get the params object from the action object, this can then be processed using the param getters + mut obj := DaguScript{ + //INFO: all details about the get methods can be found in 'params get methods' section + name : p.get('name')! //will give error if not exist + homedir : p.get('homedir')! + title : p.get_default('title', 'My Hero DAG')! //uses a default if not set + reset : p.get_default_false('reset') + start : p.get_default_true('start') + colors : p.get_list('colors') + description : p.get_default('description','')! + } + ... + } + } + + //there can be more actions which will have other filter + +} + +``` + +## params get methods (param getters) + +```vlang + +fn (params &Params) exists(key_ string) bool + +//check if arg exist (arg is just a value in the string e.g. red, not value:something) +fn (params &Params) exists_arg(key_ string) bool + +//see if the kwarg with the key exists if yes return as string trimmed +fn (params &Params) get(key_ string) !string + +//return the arg with nr, 0 is the first +fn (params &Params) get_arg(nr int) !string + +//return arg, if the nr is larger than amount of args, will return the defval +fn (params &Params) get_arg_default(nr int, defval string) !string + +fn (params &Params) get_default(key string, defval string) !string + +fn (params &Params) get_default_false(key string) bool + +fn (params &Params) get_default_true(key string) bool + +fn (params &Params) get_float(key string) !f64 + +fn (params &Params) get_float_default(key string, defval f64) !f64 + +fn (params &Params) get_from_hashmap(key_ string, defval string, hashmap map[string]string) !string + +fn (params &Params) get_int(key string) !int + +fn (params &Params) get_int_default(key string, defval int) !int + +//Looks for a list of strings in the parameters. ',' are used as deliminator to list +fn (params &Params) get_list(key string) ![]string + +fn (params &Params) get_list_default(key string, def []string) ![]string + +fn (params &Params) get_list_f32(key string) ![]f32 + +fn (params &Params) get_list_f32_default(key string, def []f32) []f32 + +fn (params &Params) get_list_f64(key string) ![]f64 + +fn (params &Params) get_list_f64_default(key string, def []f64) []f64 + +fn (params &Params) get_list_i16(key string) ![]i16 + +fn (params &Params) get_list_i16_default(key string, def []i16) []i16 + +fn (params &Params) get_list_i64(key string) ![]i64 + +fn (params &Params) get_list_i64_default(key string, def []i64) []i64 + +fn (params &Params) get_list_i8(key string) ![]i8 + +fn (params &Params) get_list_i8_default(key string, def []i8) []i8 + +fn (params &Params) get_list_int(key string) ![]int + +fn (params &Params) get_list_int_default(key string, def []int) []int + +fn (params &Params) get_list_namefix(key string) ![]string + +fn (params &Params) get_list_namefix_default(key string, def []string) ![]string + +fn (params &Params) get_list_u16(key string) ![]u16 + +fn (params &Params) get_list_u16_default(key string, def []u16) []u16 + +fn (params &Params) get_list_u32(key string) ![]u32 + +fn (params &Params) get_list_u32_default(key string, def []u32) []u32 + +fn (params &Params) get_list_u64(key string) ![]u64 + +fn (params &Params) get_list_u64_default(key string, def []u64) []u64 + +fn (params &Params) get_list_u8(key string) ![]u8 + +fn (params &Params) get_list_u8_default(key string, def []u8) []u8 + +fn (params &Params) get_map() map[string]string + +fn (params &Params) get_path(key string) !string + +fn (params &Params) get_path_create(key string) !string + +fn (params &Params) get_percentage(key string) !f64 + +fn (params &Params) get_percentage_default(key string, defval string) !f64 + +//convert GB, MB, KB to bytes e.g. 10 GB becomes bytes in u64 +fn (params &Params) get_storagecapacity_in_bytes(key string) !u64 + +fn (params &Params) get_storagecapacity_in_bytes_default(key string, defval u64) !u64 + +fn (params &Params) get_storagecapacity_in_gigabytes(key string) !u64 + +//Get Expiration object from time string input input can be either relative or absolute## Relative time +fn (params &Params) get_time(key string) !ourtime.OurTime + +fn (params &Params) get_time_default(key string, defval ourtime.OurTime) !ourtime.OurTime + +fn (params &Params) get_time_interval(key string) !Duration + +fn (params &Params) get_timestamp(key string) !Duration + +fn (params &Params) get_timestamp_default(key string, defval Duration) !Duration + +fn (params &Params) get_u32(key string) !u32 + +fn (params &Params) get_u32_default(key string, defval u32) !u32 + +fn (params &Params) get_u64(key string) !u64 + +fn (params &Params) get_u64_default(key string, defval u64) !u64 + +fn (params &Params) get_u8(key string) !u8 + +fn (params &Params) get_u8_default(key string, defval u8) !u8 + +``` + +## how internally a heroscript gets parsed for params + +- example to show how a heroscript gets parsed in action with params +- params are part of action object + +```heroscript +example text to parse (heroscript) + +id:a1 name6:aaaaa +name:'need to do something 1' +description: + ' + ## markdown works in it + description can be multiline + lets see what happens + + - a + - something else + + ### subtitle + ' + +name2: test +name3: hi +name10:'this is with space' name11:aaa11 + +name4: 'aaa' + +//somecomment +name5: 'aab' +``` + +the params are part of the action and are represented as follow for the above: + +```vlang +Params{ + params: [Param{ + key: 'id' + value: 'a1' + }, Param{ + key: 'name6' + value: 'aaaaa' + }, Param{ + key: 'name' + value: 'need to do something 1' + }, Param{ + key: 'description' + value: '## markdown works in it + + description can be multiline + lets see what happens + + - a + - something else + + ### subtitle + ' + }, Param{ + key: 'name2' + value: 'test' + }, Param{ + key: 'name3' + value: 'hi' + }, Param{ + key: 'name10' + value: 'this is with space' + }, Param{ + key: 'name11' + value: 'aaa11' + }, Param{ + key: 'name4' + value: 'aaa' + }, Param{ + key: 'name5' + value: 'aab' + }] + } +``` \ No newline at end of file diff --git a/cli/compile.vsh b/cli/compile.vsh new file mode 100755 index 00000000..d76e5ad7 --- /dev/null +++ b/cli/compile.vsh @@ -0,0 +1,76 @@ +#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run + +import os +import flag + +mut fp := flag.new_flag_parser(os.args) +fp.application('compile.vsh') +fp.version('v0.1.0') +fp.description('Compile hero binary in debug or production mode') +fp.skip_executable() + +prod_mode := fp.bool('prod', `p`, false, 'Build production version (optimized)') +help_requested := fp.bool('help', `h`, false, 'Show help message') + +if help_requested { + println(fp.usage()) + exit(0) +} + +additional_args := fp.finalize() or { + eprintln(err) + println(fp.usage()) + exit(1) +} + +if additional_args.len > 0 { + eprintln('Unexpected arguments: ${additional_args.join(' ')}') + println(fp.usage()) + exit(1) +} + +// Change to the hero directory +hero_dir := os.join_path(os.home_dir(), 'code/github/freeflowuniverse/crystallib/cli/hero') +os.chdir(hero_dir) or { panic('Failed to change directory to ${hero_dir}: ${err}') } + +// Set HEROPATH based on OS +mut heropath := '/usr/local/bin/hero' +if os.user_os() == 'macos' { + heropath = os.join_path(os.home_dir(), 'hero/bin/hero') +} + +// Set compilation command based on OS and mode +compile_cmd := if os.user_os() == 'macos' { + if prod_mode { + 'v -enable-globals -w -n -prod hero.v' + } else { + 'v -w -cg -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals hero.v' + } +} else { + if prod_mode { + 'v -cg -enable-globals -parallel-cc -w -n hero.v' + } else { + 'v -cg -enable-globals -w -n hero.v' + } +} + +println('Building in ${if prod_mode { 'production' } else { 'debug' }} mode...') + +if os.system(compile_cmd) != 0 { + panic('Failed to compile hero.v with command: ${compile_cmd}') +} + +// Make executable +os.chmod('hero', 0o755) or { panic('Failed to make hero binary executable: ${err}') } + +// Ensure destination directory exists +os.mkdir_all(os.dir(heropath)) or { panic('Failed to create directory ${os.dir(heropath)}: ${err}') } + +// Copy to destination paths +os.cp('hero', heropath) or { panic('Failed to copy hero binary to ${heropath}: ${err}') } +os.cp('hero', '/tmp/hero') or { panic('Failed to copy hero binary to /tmp/hero: ${err}') } + +// Clean up +os.rm('hero') or { panic('Failed to remove temporary hero binary: ${err}') } + +println('**COMPILE OK**') diff --git a/cli/hero.v b/cli/hero.v new file mode 100644 index 00000000..9c5b8683 --- /dev/null +++ b/cli/hero.v @@ -0,0 +1,102 @@ +module main + +import os +import cli { Command, Flag } +import freeflowuniverse.herolib.hero.cmds +// import freeflowuniverse.herolib.hero.publishing +import freeflowuniverse.herolib.installers.base +import freeflowuniverse.herolib.ui.console +import freeflowuniverse.herolib.ui +import freeflowuniverse.herolib.osal +import freeflowuniverse.herolib.core.playbook +// import freeflowuniverse.herolib.core.playcmds + +fn playcmds_do(path string) ! { + mut plbook := playbook.new(path: path)! + playcmds.run(mut plbook, false)! +} + +fn do() ! { + if os.args.len == 2 { + mypath := os.args[1] + if mypath.to_lower().ends_with('.hero') { + // hero was called from a file + playcmds_do(mypath)! + return + } + } + + mut cmd := Command{ + name: 'hero' + description: 'Your HERO toolset.' + version: '2.0.0' + } + + cmd.add_flag(Flag{ + flag: .string + name: 'url' + abbrev: 'u' + global: true + description: 'url of playbook' + }) + + // herocmds.cmd_run_add_flags(mut cmd) + + mut toinstall := false + if !osal.cmd_exists('mc') || !osal.cmd_exists('redis-cli') { + toinstall = true + } + + if osal.is_osx() { + if !osal.cmd_exists('brew') { + console.clear() + mut myui := ui.new()! + toinstall = myui.ask_yesno( + question: "we didn't find brew installed is it ok to install for you?" + default: true + )! + if toinstall { + base.install()! + } + console.clear() + console.print_stderr('Brew installed, please follow instructions and do hero ... again.') + exit(0) + } + } else { + if toinstall { + base.install()! + } + } + + base.redis_install()! + + //herocmds.cmd_bootstrap(mut cmd) + // herocmds.cmd_run(mut cmd) + // herocmds.cmd_git(mut cmd) + // herocmds.cmd_init(mut cmd) + // herocmds.cmd_imagedownsize(mut cmd) + // herocmds.cmd_biztools(mut cmd) + // herocmds.cmd_gen(mut cmd) + // herocmds.cmd_sshagent(mut cmd) + // herocmds.cmd_installers(mut cmd) + // herocmds.cmd_configure(mut cmd) + // herocmds.cmd_postgres(mut cmd) + // herocmds.cmd_mdbook(mut cmd) + // herocmds.cmd_luadns(mut cmd) + //herocmds.cmd_caddy(mut cmd) + //herocmds.cmd_zola(mut cmd) + // herocmds.cmd_juggler(mut cmd) + // herocmds.cmd_generator(mut cmd) + // herocmds.cmd_docsorter(mut cmd) + // cmd.add_command(publishing.cmd_publisher(pre_func)) + cmd.setup() + cmd.parse(os.args) +} + +fn main() { + do() or { panic(err) } +} + +fn pre_func(cmd Command) ! { + herocmds.plbook_run(cmd)! +} diff --git a/doc.vsh b/doc.vsh new file mode 100644 index 00000000..45435545 --- /dev/null +++ b/doc.vsh @@ -0,0 +1,66 @@ +#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run + +import os + +abs_dir_of_script := dir(@FILE) + +// Format code +println('Formatting code...') +os.system('v fmt -w ${abs_dir_of_script}/examples') or { + eprintln('Warning: Failed to format examples: ${err}') +} +os.system('v fmt -w ${abs_dir_of_script}/herolib') or { + eprintln('Warning: Failed to format herolib: ${err}') +} + +// Clean existing docs +println('Cleaning existing documentation...') +os.rmdir_all('${abs_dir_of_script}/docs') or {} + +herolib_path := os.join_path(abs_dir_of_script, 'herolib') +os.chdir(herolib_path) or { + panic('Failed to change directory to herolib: ${err}') +} + +os.rmdir_all('_docs') or {} +os.rmdir_all('docs') or {} + +// Generate HTML documentation +println('Generating HTML documentation...') +os.system('v doc -m -f html . -readme -comments -no-timestamp') or { + panic('Failed to generate HTML documentation: ${err}') +} + +// Move docs to parent directory +os.rename('_docs', '${abs_dir_of_script}/docs') or { + panic('Failed to move documentation to parent directory: ${err}') +} + +// Generate Markdown documentation +println('Generating Markdown documentation...') +os.rmdir_all('vdocs') or {} +os.mkdir_all('vdocs/v') or { + panic('Failed to create v docs directory: ${err}') +} +os.mkdir_all('vdocs/crystal') or { + panic('Failed to create crystal docs directory: ${err}') +} + +os.system('v doc -m -no-color -f md -o vdocs/v/') or { + panic('Failed to generate V markdown documentation: ${err}') +} +os.system('v doc -m -no-color -f md -o vdocs/crystal/') or { + panic('Failed to generate Crystal markdown documentation: ${err}') +} + +// Open documentation in browser on non-Linux systems +$if !linux { + os.chdir(abs_dir_of_script) or { + panic('Failed to change directory: ${err}') + } + os.system('open docs/index.html') or { + eprintln('Warning: Failed to open documentation in browser: ${err}') + } +} + +println('Documentation generation completed successfully!') diff --git a/install.vsh b/install.vsh new file mode 100755 index 00000000..30da0584 --- /dev/null +++ b/install.vsh @@ -0,0 +1,25 @@ +#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run + +import os +import flag + +vroot := @VROOT +abs_dir_of_script := dir(@FILE) + +// Reset symlinks if requested +println('Resetting all symlinks...') +os.rmdir_all('${os.home_dir()}/.vmodules/freeflowuniverse/herolib') or {} +os.rmdir_all('${os.home_dir()}/.vmodules/vlang/testing') or {} +os.rm('/usr/local/bin/herolib') or {} + +// Create necessary directories +os.mkdir_all('${os.home_dir()}/.vmodules/freeflowuniverse') or { + panic('Failed to create directory ~/.vmodules/freeflowuniverse: ${err}') +} + +// Create new symlinks +os.symlink('${abs_dir_of_script}/herolib', '${os.home_dir()}/.vmodules/freeflowuniverse/herolib') or { + panic('Failed to create herolib symlink: ${err}') +} + +println('Herolib installation completed successfully!') diff --git a/lib/code/codemodel/README.md b/lib/code/codemodel/README.md new file mode 100644 index 00000000..41281034 --- /dev/null +++ b/lib/code/codemodel/README.md @@ -0,0 +1,38 @@ +# Code Model + +A set of models that represent code, such as structs and functions. The motivation behind this module is to provide a more generic, and lighter alternative to v.ast code models, that can be used for code parsing and code generation across multiple languages. + +## Using Codemodel + +While the models in this module can be used in any domain, the models here are used extensively in the modules [codeparser](../codeparser/) and codegen (under development). Below are examples on how codemodel can be used for parsing and generating code. +## Code parsing with codemodel + +As shown in the example below, the codemodels returned by the parser can be used to infer information about the code written + +```js +code := codeparser.parse("somedir") // code is a list of code models + +num_functions := code.filter(it is Function).len +structs := code.filter(it is Struct) +println("This directory has ${num_functions} functions") +println('The directory has the structs: ${structs.map(it.name)}') + +``` + +or can be used as intermediate structures to serialize code into some other format: + +```js +code_md := '' + +// describes the struct in markdown format +for struct in structs { + code_md += '# ${struct.name}' + code_md += 'Type: ${struct.typ.symbol}' + code_md += '## Fields:' + for field in struct.fields { + code_md += '- ${field.name}' + } +} +``` + +The [openrpc/docgen](../openrpc/docgen/) module demonstrates a good use case, where codemodels are serialized into JSON schema's, to generate an OpenRPC description document from a client in v. \ No newline at end of file diff --git a/lib/code/codemodel/codefile.v b/lib/code/codemodel/codefile.v new file mode 100644 index 00000000..6d7acbbe --- /dev/null +++ b/lib/code/codemodel/codefile.v @@ -0,0 +1,99 @@ +module codemodel + +import freeflowuniverse.herolib.core.texttools +import freeflowuniverse.herolib.core.pathlib +import os + +pub struct CodeFile { +pub mut: + name string + mod string + imports []Import + consts []Const + items []CodeItem + content string +} + +pub fn new_file(config CodeFile) CodeFile { + return CodeFile{ + ...config + mod: texttools.name_fix(config.mod) + items: config.items + } +} + +pub fn (mut file CodeFile) add_import(import_ Import) ! { + for mut i in file.imports { + if i.mod == import_.mod { + i.add_types(import_.types) + return + } + } + file.imports << import_ +} + +pub fn (code CodeFile) write_v(path string, options WriteOptions) ! { + filename := '${options.prefix}${texttools.name_fix(code.name)}.v' + mut filepath := pathlib.get('${path}/${filename}') + + if !options.overwrite && filepath.exists() { + return + } + + imports_str := code.imports.map(it.vgen()).join_lines() + + code_str := if code.content != '' { + code.content + } else { + vgen(code.items) + } + + consts_str := if code.consts.len > 1 { + stmts := code.consts.map('${it.name} = ${it.value}') + '\nconst(\n${stmts.join('\n')}\n)\n' + } else if code.consts.len == 1 { + '\nconst ${code.consts[0].name} = ${code.consts[0].value}\n' + } else { + '' + } + + mut file := pathlib.get_file( + path: filepath.path + create: true + )! + file.write('module ${code.mod}\n${imports_str}\n${consts_str}\n${code_str}')! + if options.format { + os.execute('v fmt -w ${file.path}') + } +} + +pub fn (file CodeFile) get_function(name string) ?Function { + functions := file.items.filter(it is Function).map(it as Function) + target_lst := functions.filter(it.name == name) + + if target_lst.len == 0 { + return none + } + if target_lst.len > 1 { + panic('This should never happen') + } + return target_lst[0] +} + +pub fn (mut file CodeFile) set_function(function Function) ! { + function_names := file.items.map(if it is Function { it.name } else { '' }) + + index := function_names.index(function.name) + if index == -1 { + return error('function not found') + } + file.items[index] = function +} + +pub fn (file CodeFile) functions() []Function { + return file.items.filter(it is Function).map(it as Function) +} + +pub fn (file CodeFile) structs() []Struct { + return file.items.filter(it is Struct).map(it as Struct) +} diff --git a/lib/code/codemodel/example.v b/lib/code/codemodel/example.v new file mode 100644 index 00000000..f38d7306 --- /dev/null +++ b/lib/code/codemodel/example.v @@ -0,0 +1,9 @@ +module codemodel + +pub struct Example { + function Function + values map[string]Value + result Value +} + +pub type Value = string diff --git a/lib/code/codemodel/model.v b/lib/code/codemodel/model.v new file mode 100644 index 00000000..5d645628 --- /dev/null +++ b/lib/code/codemodel/model.v @@ -0,0 +1,205 @@ +module codemodel + +import freeflowuniverse.herolib.core.pathlib +// Code is a list of statements +// pub type Code = []CodeItem + +pub type CodeItem = Alias | Comment | CustomCode | Function | Import | Struct | Sumtype + +// item for adding custom code in +pub struct CustomCode { +pub: + text string +} + +pub struct Comment { +pub: + text string + is_multi bool +} + +pub struct Struct { +pub mut: + name string + description string + mod string + is_pub bool + embeds []Struct @[str: skip] + generics map[string]string @[str: skip] + attrs []Attribute + fields []StructField +} + +pub struct Sumtype { +pub: + name string + description string + types []Type +} + +pub struct StructField { +pub mut: + comments []Comment + attrs []Attribute + name string + description string + default string + is_pub bool + is_mut bool + is_ref bool + anon_struct Struct @[str: skip] // sometimes fields may hold anonymous structs + typ Type + structure Struct @[str: skip] +} + +pub struct Attribute { +pub: + name string // [name] + has_arg bool + arg string // [name: arg] +} + +pub struct Function { +pub: + name string + receiver Param + is_pub bool + mod string +pub mut: + description string + params []Param + body string + result Result + has_return bool +} + +pub fn parse_function(code_ string) !Function { + mut code := code_.trim_space() + is_pub := code.starts_with('pub ') + if is_pub { + code = code.trim_string_left('pub ').trim_space() + } + + is_fn := code.starts_with('fn ') + if !is_fn { + return error('invalid function format') + } + code = code.trim_string_left('fn ').trim_space() + + receiver := if code.starts_with('(') { + param_str := code.all_after('(').all_before(')').trim_space() + code = code.all_after(')').trim_space() + parse_param(param_str)! + } else { + Param{} + } + + name := code.all_before('(').trim_space() + code = code.trim_string_left(name).trim_space() + + params_str := code.all_after('(').all_before(')') + params := if params_str.trim_space() != '' { + params_str_lst := params_str.split(',') + params_str_lst.map(parse_param(it)!) + } else { + []Param{} + } + result := parse_result(code.all_after(')').all_before('{').replace(' ', ''))! + + body := if code.contains('{') { code.all_after('{').all_before_last('}') } else { '' } + return Function{ + name: name + receiver: receiver + params: params + result: result + body: body + } +} + +pub fn parse_param(code_ string) !Param { + mut code := code_.trim_space() + is_mut := code.starts_with('mut ') + if is_mut { + code = code.trim_string_left('mut ').trim_space() + } + split := code.split(' ').filter(it != '') + if split.len != 2 { + return error('invalid param format: ${code_}') + } + return Param{ + name: split[0] + typ: Type{ + symbol: split[1] + } + mutable: is_mut + } +} + +pub fn parse_result(code_ string) !Result { + code := code_.replace(' ', '').trim_space() + + return Result{ + result: code_.starts_with('!') + optional: code_.starts_with('?') + typ: Type{ + symbol: code.trim('!?') + is_optional: code.starts_with('?') + is_result: code.starts_with('!') + } + } +} + +pub struct Param { +pub: + required bool + mutable bool + is_shared bool + is_optional bool + description string + name string + typ Type + struct_ Struct +} + +pub struct Result { +pub mut: + typ Type + description string + name string + result bool // whether is result type + optional bool // whether is result type + structure Struct +} + +// todo: maybe make 'is_' fields methods? +pub struct Type { +pub mut: + is_reference bool @[str: skip] + is_map bool @[str: skip] + is_array bool + is_mutable bool @[str: skip] + is_shared bool @[str: skip] + is_optional bool @[str: skip] + is_result bool @[str: skip] + symbol string + mod string @[str: skip] +} + +pub struct File { +pub mut: + name string + extension string + content string +} + +pub fn (f File) write(path string) ! { + mut fd_file := pathlib.get_file(path: '${path}/${f.name}.${f.extension}')! + fd_file.write(f.content)! +} + +pub struct Alias { +pub: + name string + description string + typ Type +} diff --git a/lib/code/codemodel/model_const.v b/lib/code/codemodel/model_const.v new file mode 100644 index 00000000..d2a8b84a --- /dev/null +++ b/lib/code/codemodel/model_const.v @@ -0,0 +1,42 @@ +module codemodel + +pub struct Const { + name string + value string +} + +pub fn parse_const(code_ string) !Const { + code := code_.trim_space().all_before('\n') + if !code.contains('=') { + return error('code <${code_}> is not of const') + } + return Const{ + name: code.split('=')[0].trim_space() + value: code.split('=')[1].trim_space() + } +} + +pub fn parse_consts(code_ string) ![]Const { + mut code := code_.trim_space() + code = code.replace('const (', 'const(') + + const_codes := code.split('\n').filter(it.trim_space().starts_with('const ')) + + mut consts := const_codes.map(parse_const(it)!) + + const_blocks := code.split('const(') + + if const_blocks.len == 1 { + return consts + } + + for i, block in const_blocks { + if i == 0 { + continue + } + stmts := block.trim_string_left('const(').all_before('\n)').trim_space().split('\n') + consts << stmts.map(parse_const(it)!) + } + + return consts +} diff --git a/lib/code/codemodel/model_import.v b/lib/code/codemodel/model_import.v new file mode 100644 index 00000000..f6a9b795 --- /dev/null +++ b/lib/code/codemodel/model_import.v @@ -0,0 +1,24 @@ +module codemodel + +pub struct Import { +pub mut: + mod string + types []string +} + +pub fn (mut i Import) add_types(types []string) { + i.types << types.filter(it !in i.types) +} + +pub fn parse_import(code_ string) Import { + code := code_.trim_space().trim_string_left('import').trim_space() + types_str := if code.contains(' ') { code.all_after(' ').trim('{}') } else { '' } + return Import{ + mod: code.all_before(' ') + types: if types_str != '' { + types_str.split(',').map(it.trim_space()) + } else { + []string{} + } + } +} diff --git a/lib/code/codemodel/module.v b/lib/code/codemodel/module.v new file mode 100644 index 00000000..175e9622 --- /dev/null +++ b/lib/code/codemodel/module.v @@ -0,0 +1,38 @@ +module codemodel + +import freeflowuniverse.herolib.core.pathlib +import os + +pub struct Module { +pub mut: + name string + files []CodeFile + misc_files []File + // model CodeFile + // methods CodeFile +} + +pub fn (mod Module) write_v(path string, options WriteOptions) ! { + mut module_dir := pathlib.get_dir( + path: '${path}/${mod.name}' + empty: options.overwrite + )! + + if !options.overwrite && module_dir.exists() { + return + } + + for file in mod.files { + file.write_v(module_dir.path, options)! + } + for file in mod.misc_files { + file.write(module_dir.path)! + } + + if options.format { + os.execute('v fmt -w ${module_dir.path}') + } + if options.document { + os.execute('v doc -f html -o ${module_dir.path}/docs ${module_dir.path}') + } +} diff --git a/lib/code/codemodel/templates/comment/comment.py b/lib/code/codemodel/templates/comment/comment.py new file mode 100644 index 00000000..e69de29b diff --git a/lib/code/codemodel/templates/comment/comment.v b/lib/code/codemodel/templates/comment/comment.v new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/lib/code/codemodel/templates/comment/comment.v @@ -0,0 +1 @@ + diff --git a/lib/code/codemodel/templates/function/function.v.template b/lib/code/codemodel/templates/function/function.v.template new file mode 100644 index 00000000..ba9de4cc --- /dev/null +++ b/lib/code/codemodel/templates/function/function.v.template @@ -0,0 +1,6 @@ +@if function.description != '' +// @{function.description} +@endif +pub fn @receiver @{function.name}(@{params}) @{function.result.vgen()} { + @{function.body.trim_space().replace('\t', '')} +} \ No newline at end of file diff --git a/lib/code/codemodel/templates/function/method.py b/lib/code/codemodel/templates/function/method.py new file mode 100644 index 00000000..e69de29b diff --git a/lib/code/codemodel/templates/struct/struct.v.template b/lib/code/codemodel/templates/struct/struct.v.template new file mode 100644 index 00000000..3ee67bbd --- /dev/null +++ b/lib/code/codemodel/templates/struct/struct.v.template @@ -0,0 +1,26 @@ +@{struct_.description} +@if struct_.attrs.len > 0 +[ +@for attr in struct_.attrs + @{attr.name} +@end +] +@end +@{prefix} struct @{name} { +@for embed in struct_.embeds + @{embed.get_type_symbol()} +@end +@{priv_fields.join_lines()} +@if pub_fields.len > 0 +pub: +@{pub_fields.join_lines()} +@end +@if mut_fields.len > 0 +mut: +@{mut_fields.join_lines()} +@end +@if pub_mut_fields.len > 0 +pub mut: +@{pub_mut_fields.join_lines()} +@end +} \ No newline at end of file diff --git a/lib/code/codemodel/utils.v b/lib/code/codemodel/utils.v new file mode 100644 index 00000000..08bf8ea6 --- /dev/null +++ b/lib/code/codemodel/utils.v @@ -0,0 +1,92 @@ +module codemodel + +import freeflowuniverse.herolib.ui.console +import rand + +pub struct GetStruct { +pub: + code []CodeItem + mod string + name string +} + +pub fn get_struct(params GetStruct) ?Struct { + structs_ := params.code.filter(it is Struct).map(it as Struct) + structs := structs_.filter(it.name == params.name) + if structs.len == 0 { + return none + } else if structs.len > 1 { + panic('Multiple structs with same name found. This should never happen.') + } + return structs[0] +} + +pub fn inflate_types(mut code []CodeItem) { + for mut item in code { + if item is Struct { + // TODO: handle this when refactoring types / structs + + inflate_struct_fields(code, mut item) + } + } +} + +pub fn inflate_struct_fields(code []CodeItem, mut struct_ CodeItem) { + for mut field in (struct_ as Struct).fields { + // TODO: fix inflation for imported types + if field.typ.symbol.starts_with_capital() { + field.structure = get_struct( + code: code + name: field.typ.symbol + ) or { continue } + } + } +} + +@[params] +pub struct GenerateCallParams { +pub: + receiver string +} + +pub fn (func Function) generate_call(params GenerateCallParams) !string { + mut call := '' + if func.result.typ.symbol != '' { + call = 'result := ' + } + call += if params.receiver != '' { + '${params.receiver}.${func.name}' + } else if func.receiver.name != '' { + '${func.receiver.name}.${func.name}' + } else { + func.name + } + + call += if func.params.len != 0 { + '(${func.params.map(it.generate_value()!).join(',')})' + } else { + '()' + } + + if func.result.result { + call += '!' + } + return call +} + +@[params] +pub struct GenerateValueParams { +} + +pub fn (param Param) generate_value() !string { + if param.typ.symbol == 'string' { + return "'mock_string_${rand.string(3)}'" + } else if param.typ.symbol == 'int' || param.typ.symbol == 'u32' { + return '42' + } else if param.typ.symbol[0].is_capital() { + return '${param.typ.symbol}{}' + } else { + console.print_debug('mock values for types other than strings and ints are not yet supported') + } + return '' +} diff --git a/lib/code/codemodel/vgen.v b/lib/code/codemodel/vgen.v new file mode 100644 index 00000000..33984713 --- /dev/null +++ b/lib/code/codemodel/vgen.v @@ -0,0 +1,274 @@ +module codemodel + +import os +import freeflowuniverse.herolib.core.texttools +import freeflowuniverse.herolib.core.pathlib +import freeflowuniverse.herolib.ui.console + +pub struct WriteCode { + destination string +} + +interface ICodeItem { + vgen() string +} + +pub fn vgen(code []CodeItem) string { + mut str := '' + for item in code { + if item is Function { + str += '\n${item.vgen()}' + } + if item is Struct { + str += '\n${item.vgen()}' + } + if item is CustomCode { + str += '\n${item.vgen()}' + } + } + return str +} + +// pub fn (code Code) vgen() string { +// return code.items.map(it.vgen()).join_lines() +// } + +// vgen_import generates an import statement for a given type +pub fn (import_ Import) vgen() string { + types_str := if import_.types.len > 0 { + '{${import_.types.join(', ')}}' + } else { + '' + } // comma separated string list of types + return 'import ${import_.mod} ${types_str}' +} + +// TODO: enfore that cant be both mutable and shared +pub fn (type_ Type) vgen() string { + mut type_str := '' + if type_.is_mutable { + type_str += 'mut ' + } else if type_.is_shared { + type_str += 'shared ' + } + + if type_.is_optional { + type_str += '?' + } else if type_.is_result { + type_str += '!' + } + + return '${type_str} ${type_.symbol}' +} + +pub fn (field StructField) vgen() string { + symbol := field.get_type_symbol() + mut vstr := '${field.name} ${symbol}' + if field.description != '' { + vstr += '// ${field.description}' + } + return vstr +} + +pub fn (field StructField) get_type_symbol() string { + mut field_str := if field.structure.name != '' { + field.structure.get_type_symbol() + } else { + field.typ.symbol + } + + if field.is_ref { + field_str = '&${field_str}' + } + + return field_str +} + +pub fn (structure Struct) get_type_symbol() string { + mut symbol := if structure.mod != '' { + '${structure.mod.all_after_last('.')}.${structure.name}' + } else { + structure.name + } + if structure.generics.len > 0 { + symbol = '${symbol}${vgen_generics(structure.generics)}' + } + + return symbol +} + +pub fn vgen_generics(generics map[string]string) string { + if generics.keys().len == 0 { + return '' + } + mut vstr := '[' + for key, val in generics { + vstr += if val != '' { val } else { key } + } + return '${vstr}]' +} + +// vgen_function generates a function statement for a function +pub fn (function Function) vgen(options WriteOptions) string { + mut params_ := function.params.map(Param{ + ...it + typ: Type{ + symbol: if it.struct_.name != '' { + it.struct_.name + } else { + it.typ.symbol + } + } + }) + + optionals := params_.filter(it.is_optional) + options_struct := Struct{ + name: '${texttools.name_fix_snake_to_pascal(function.name)}Options' + attrs: [Attribute{ + name: 'params' + }] + fields: optionals.map(StructField{ + name: it.name + description: it.description + typ: Type{ + symbol: it.typ.symbol + } + }) + } + if optionals.len > 0 { + params_ << Param{ + name: 'options' + typ: Type{ + symbol: options_struct.name + } + } + } + + params := params_.filter(!it.is_optional).map('${it.name} ${it.typ.symbol}').join(', ') + + receiver := function.receiver.vgen() + + mut function_str := $tmpl('templates/function/function.v.template') + + // if options.format { + // result := os.execute_opt('echo "${function_str.replace('$', '\\$')}" | v fmt') or { + // panic('${function_str}\n${err}') + // } + // function_str = result.output + // } + function_str = function_str.split_into_lines().filter(!it.starts_with('import ')).join('\n') + + return if options_struct.fields.len != 0 { + '${options_struct.vgen()}\n${function_str}' + } else { + function_str + } +} + +pub fn (param Param) vgen() string { + if param.name == '' { + return '' + } + sym := if param.struct_.name != '' { + param.struct_.get_type_symbol() + } else { + param.typ.symbol + } + + mut vstr := '${param.name} ${sym}' + if param.typ.is_reference { + vstr = '&${vstr}' + } + if param.mutable { + vstr = 'mut ${vstr}' + } + return '(${vstr})' +} + +// vgen_function generates a function statement for a function +pub fn (struct_ Struct) vgen() string { + gen := VGenerator{false} + return gen.generate_struct(struct_) or { panic(err) } + // mut struct_str := $tmpl('templates/struct/struct.v.template') + // return struct_str + // result := os.execute_opt('echo "${struct_str.replace('$', '\$')}" | v fmt') or {panic(err)} + // return result.output +} + +pub struct VGenerator { + format bool +} + +pub fn (gen VGenerator) generate_struct(struct_ Struct) !string { + name := if struct_.generics.len > 0 { + '${struct_.name}${vgen_generics(struct_.generics)}' + } else { + struct_.name + } + + prefix := if struct_.is_pub { + 'pub' + } else { + '' + } + + priv_fields := struct_.fields.filter(!it.is_mut && !it.is_pub).map(gen.generate_struct_field(it)) + pub_fields := struct_.fields.filter(!it.is_mut && it.is_pub).map(gen.generate_struct_field(it)) + mut_fields := struct_.fields.filter(it.is_mut && !it.is_pub).map(gen.generate_struct_field(it)) + pub_mut_fields := struct_.fields.filter(it.is_mut && it.is_pub).map(gen.generate_struct_field(it)) + + mut struct_str := $tmpl('templates/struct/struct.v.template') + if gen.format { + result := os.execute_opt('echo "${struct_str.replace('$', '\$')}" | v fmt') or { + console.print_debug(struct_str) + panic(err) + } + return result.output + } + return struct_str +} + +pub fn (gen VGenerator) generate_struct_field(field StructField) string { + symbol := field.get_type_symbol() + mut vstr := '${field.name} ${symbol}' + if field.description != '' { + vstr += '// ${field.description}' + } + return vstr +} + +pub fn (custom CustomCode) vgen() string { + return custom.text +} + +// vgen_function generates a function statement for a function +pub fn (result Result) vgen() string { + result_type := if result.structure.name != '' { + result.structure.get_type_symbol() + } else if result.typ.symbol == 'void' { + '' + } else { + if result.typ.is_array { + '[]${result.typ.symbol}' + } else { + result.typ.symbol + } + } + str := if result.result { + '!' + } else if result.typ.is_result { + '!' + } else { + '' + } + return '${str}${result_type}' +} + +@[params] +pub struct WriteOptions { +pub: + format bool + overwrite bool + document bool + prefix string +} diff --git a/lib/code/codeparser/README.md b/lib/code/codeparser/README.md new file mode 100644 index 00000000..99130bf7 --- /dev/null +++ b/lib/code/codeparser/README.md @@ -0,0 +1,179 @@ +# Code Parser + +A library of code parsers that parse code and comments into defined code primitives in the [CodeModel](../codemodel/README.md) library. + +## What it does + +- The codeparser parses code into the same generic code models. + +This allows programs that use the code parser to be able to parse from all the languages the codeparser library supports (though currently only V) without having to change implementation. + +- The codeparser parses comments into the code models. + +This introduces styling guidelines around writing comments in programming languages, which if used can help the parser parse in a lot of structured information into code models. See for instance how the codeparser can harvest a lot of information from the below V function's comments: + +```go +// hello generates a list of greeting strings for a specific name +// - name: the name of the person being greeted +// - times: the number of greeting messages to be generated +// returns hello messages, a list of messages that greets a person with their name +fn hello(name string, times int) []string { + return "hello $name" +} +``` + +The VParser parses the above function into the following models: + +```py +Function { + name: 'hello' + description: 'generates a greeting string for a specific name' + body: 'return "hello $name"' + params: [ + Param { + name: 'name' + description: 'the name of the person being greeted' + typ: Type { + symbol: 'string' + } + }, + Param { + name: 'times' + description: 'the number of greeting messages to be generated' + typ: Type { + symbol: 'int' + } + } + ] + result: Result { + name: 'hello messages' + description: 'a list of messages that greets a person with their name' + typ: Type { + symbol: '[]string' + } + } +} +``` + +While this example contains a lot of comments for a simple function, this can come in especially useful when parsing more complex functions, and parsing for documentation generation (see [OpenRPC Document Generator](#openrpc-document-generator)). + +## Getting started + +1. Have a code directory or file to parse. +2. Follow annotations guidelines for the coding languages in your project to annotate your code in the format codeparser can parse from. +3. Run `v run ` + +## Annotations + +Currently, the codeparser can parse annotations on struct declarations and function declarations, and gather the following information: + +**Struct declaration annotations** + +- struct description +- field descriptions for each field + +**Function declaration annotations** + +- function description +- parameter descriptions for each parameter +- result name and description of what the function returns + +The codeparser expects code to be annotated in a certain format to be able to parse descriptive comments into ['code items'](). While failure to follow this formatting won't cause any errors, some of the comments may end up not being parsed into the ['code model']() outputted. The format of annotations expected in each programming language the codeparser supports are detailed below. + +### Annotating code in V + +- Struct annotations: + +```go +// this is a description of the struct +struct Example { + field0 string // this comment describes field0 + field1 int // this comment describes field1 +} +``` + +This struct is parsed as the following: + +```py +Struct { + name: 'Example' + description: 'this is a description of the struct' + fields: [ + StructField { + name: 'field0' + description: 'this comment describes field0' + typ: Type { + symbol: 'string' + } + }, + StructField { + name: 'field1' + description: 'this comment describes field1' + typ: Type { + symbol: 'int' + } + } + ] +} +``` + +- Function annotations: + +```go +// some_function is described by the words following the functions name +// - param0: this sentence after the colon describes param0 +// - param1: this sentence after the colon describes param1 +// returns the desired result, this sentence after the comma describes 'the desired result' +fn some_function(param0 string, param1 int) result []string {} +``` + +This function is parsed as the following: + +```py +Function { + name: 'some_function' + description: 'is described by the words following the functions name' + body: '' + params: [ + Param { + name: 'param0' + description: 'this sentence after the colon describes param0' + typ: Type { + symbol: 'string' + } + }, + Param { + name: 'param1' + description: 'this sentence after the colon describes param1' + typ: Type { + symbol: 'int' + } + } + ] + result: Result { + name: 'the desired result' + description: 'this sentence after the comma describes \'the desired result\'' + typ: Type { + symbol: '[]string' + } + } +} +``` + +## VParser + +NB: v.parser refers to the parser in v standard library, whereas VParser refers to the codeparser for V in this module. + +The VParser uses the v.ast and v.parser libraries to parse through the code in V files. The main purpose of the VParser in this library is to provide a simpler alternative to the builtin v.parser, for less complex applications. As the v.parser module is used in parsing and compiling V itself, it's ast models for function and struct declarations come with a lot of overhead that is not necessary for simpler applications. + +### Using VParser + +The vparser contains only one public function: `pub fn parse_v(path_ string, parser VParser)`. + +The VParser struct can be configured to determine how the parsing should be done on a path_ containing V files. See the [docs]() for more information on using the parse_v function. + +### Example applications + +#### [OpenRPC Document Generator](../openrpc/docgen/) + +The OpenRPC document generator uses the VParser to parse through OpenRPC Client code in V, to create an OpenRPC Document from the parsed code. diff --git a/lib/code/codeparser/parse_example.v b/lib/code/codeparser/parse_example.v new file mode 100644 index 00000000..222f1a21 --- /dev/null +++ b/lib/code/codeparser/parse_example.v @@ -0,0 +1,32 @@ +module codeparser + +// import freeflowuniverse.herolib.core.codemodel {Example} +// import freeflowuniverse.herolib.rpc.openrpc {ExamplePairing} + +// pub fn parse_example_pairing(text_ string) !ExamplePairing { +// if !text_.contains('Example:') { return error('no example found fitting format') } +// mut text := text_.all_after('Example:').trim_space() + +// mut pairing := ExamplePairing{} + +// if text.contains('assert') { +// pairing.name = if text.all_before('assert').trim_space() != '' { +// text.all_before('assert').trim_space() +// } else {text.all_after('assert').all_before('(').trim_space()} +// value := text.all_after('==').all_before('//').trim_space() +// pairing.result = parse_example() +// description := text.all_after('//').trim_space() +// } + +// return pairing +// } + +// pub fn parse_examples(text string) []openrpc.Example { + +// } + +// pub fn parse_example(text string) openrpc.Example { +// return Example{ + +// } +// } diff --git a/lib/code/codeparser/parse_example_test.v b/lib/code/codeparser/parse_example_test.v new file mode 100644 index 00000000..1c4f1cf7 --- /dev/null +++ b/lib/code/codeparser/parse_example_test.v @@ -0,0 +1,31 @@ +module codeparser + +// const example_txt = " +// Example: Get pet example. +// assert some_function('input_string') == 'output_string' +// " + +// // "examples": [ +// // { +// // "name": "getPetExample", +// // "description": "get pet example", +// // "params": [ +// // { +// // "name": "petId", +// // "value": 7 +// // } +// // ], +// // "result": { +// // "name": "getPetExampleResult", +// // "value": { +// // "name": "fluffy", +// // "tag": "poodle", +// // "id": 7 +// // } +// // } +// // } + +// fn test_parse_example() ! { +// example := parse_example(example_txt) +// panic('example ${example}') +// } diff --git a/lib/code/codeparser/testdata/file.v b/lib/code/codeparser/testdata/file.v new file mode 100644 index 00000000..144b77f8 --- /dev/null +++ b/lib/code/codeparser/testdata/file.v @@ -0,0 +1,31 @@ +module testdata + +// file_func0 is the first function of file +fn file_func0() {} + +// file_func1 is the second function of file +// - name: a name that the function will do nothing with +pub fn file_func1(name string) {} + +// FileStruct0 defines the configuration params of file_func2 +@[params] +pub struct FileStruct0 { + param1 string // + param2 int // +} + +// file_func2 is the third function of the file +// - config: configuration for file_func2 +pub fn file_func2(config FileStruct0) {} + +pub struct FileStruct1 {} + +// file_func3 is the fourth function of the file +// is does something with param1 and param2 and creates FileStruct1 +// returns the created filestruct1, a FileStruc1 struct filled in with params 1 and 2 +pub fn file_func3(param1 string, param2 int) FileStruct1 { + return FileStruc1{ + firstname: firstname + lastname: lastname + } +} diff --git a/lib/code/codeparser/testdata/flatdir/anotherfile.v b/lib/code/codeparser/testdata/flatdir/anotherfile.v new file mode 100644 index 00000000..e745a384 --- /dev/null +++ b/lib/code/codeparser/testdata/flatdir/anotherfile.v @@ -0,0 +1,32 @@ +module flatdir + +// anotherfile_func0 is the first function of file +fn anotherfile_func0() {} + +// anotherfile_func1 is the second function of file +// - name: a name that the function will do nothing with +pub fn anotherfile_func1(name string) {} + +// AnotherfileStruct0 defines the configuration params of anotherfile_func2 +@[params] +pub struct AnotherfileStruct0 { + param1 string // + param2 int // +} + +// anotherfile_func2 is the third function of the file +// - config: configuration for anotherfile_func2 +pub fn anotherfile_func2(config AnotherfileStruct0) {} + +pub struct AnotherfileStruct1 { + param string +} + +// anotherfile_func3 is the fourth function of the file +// is does something with param1 and param2 and creates AnotherfileStruct1 +// returns the created filestruct1, a FileStruc1 struct filled in with params 1 and 2 +pub fn anotherfile_func3(param1 string, param2 string) AnotherfileStruct1 { + return AnotherfileStruct1{ + param: param1 + param2 + } +} diff --git a/lib/code/codeparser/testdata/flatdir/subfile.v b/lib/code/codeparser/testdata/flatdir/subfile.v new file mode 100644 index 00000000..f89bc0e4 --- /dev/null +++ b/lib/code/codeparser/testdata/flatdir/subfile.v @@ -0,0 +1,32 @@ +module flatdir + +// subfile_func0 is the first function of file +fn subfile_func0() {} + +// subfile_func1 is the second function of file +// - name: a name that the function will do nothing with +pub fn subfile_func1(name string) {} + +// SubfileStruct0 defines the configuration params of subfile_func2 +@[params] +pub struct SubfileStruct0 { + param1 string // + param2 int // +} + +// subfile_func2 is the third function of the file +// - config: configuration for subfile_func2 +pub fn subfile_func2(config SubfileStruct0) {} + +pub struct SubfileStruct1 { + param string +} + +// subfile_func3 is the fourth function of the file +// is does something with param1 and param2 and creates SubfileStruct1 +// returns the created filestruct1, a FileStruc1 struct filled in with params 1 and 2 +pub fn subfile_func3(param1 string, param2 string) SubfileStruct1 { + return SubfileStruct1{ + param: param1 + param2 + } +} diff --git a/lib/code/codeparser/vparser.v b/lib/code/codeparser/vparser.v new file mode 100644 index 00000000..0d40c477 --- /dev/null +++ b/lib/code/codeparser/vparser.v @@ -0,0 +1,529 @@ +module codeparser + +import v.ast +import v.parser +import freeflowuniverse.herolib.core.pathlib +import freeflowuniverse.herolib.ui.console +import freeflowuniverse.herolib.core.codemodel { CodeFile, CodeItem, Function, Import, Module, Param, Result, Struct, StructField, Sumtype, Type, parse_consts, parse_import } +import v.pref + +// VParser holds configuration of parsing +// has methods that implement parsing +@[params] +pub struct VParser { +pub: + exclude_dirs []string // directories to be excluded from parsing + exclude_files []string // files to be excluded from parsing + only_pub bool // whether to only parse public functions and structs + recursive bool // whether subdirs should be parsed as well +} + +// parse_v takes in a path to parse V code from and +// vparser configuration params, returns a list of parsed codeitems +pub fn parse_v(path_ string, vparser VParser) ![]CodeItem { + mut path := pathlib.get(path_) + + $if debug { + console.print_debug('Parsing path `${path.path}` with cofiguration:\n${vparser}\n') + } + + if !path.exists() { + return error('Path `${path.path}` doesn\'t exist.') + } + + path.check() + mut table := ast.new_table() + return vparser.parse_vpath(mut path, mut table)! +} + +// parse_vpath parses the v code files and returns codeitems in a given path +// can be recursive or not based on the parsers configuration +fn (vparser VParser) parse_vpath(mut path pathlib.Path, mut table ast.Table) ![]CodeItem { + mut code := []CodeItem{} + // mut table := ast.new_table() + // fpref := &pref.Preferences{ // preferences for parsing + // is_fmt: true + // } + if path.is_dir() { + dir_is_excluded := vparser.exclude_dirs.any(path.path.ends_with(it)) + if dir_is_excluded { + return code + } + + if vparser.recursive { + // parse subdirs if configured recursive + mut flist := path.list(recursive: true)! + for mut subdir in flist.paths { + if subdir.is_dir() { + code << vparser.parse_vpath(mut subdir, mut table)! + } + } + } + + mut fl := path.list(recursive: false)! + for mut file in fl.paths { + if !file.is_dir() { + code << vparser.parse_vpath(mut file, mut table)! + } + } + } else if path.is_file() { + file_is_excluded := vparser.exclude_files.any(path.path.ends_with(it)) + // todo: use pathlib list regex param to filter non-v files + if file_is_excluded || !path.path.ends_with('.v') { + return code + } + code << vparser.parse_vfile(path.path, mut table) + } else { + return error('Path being parsed must either be a directory or a file.') + } + // codemodel.inflate_types(mut code) + return code +} + +// parse_vfile parses and returns code items from a v code file +pub fn parse_file(path string, vparser VParser) !CodeFile { + mut file := pathlib.get_file(path: path)! + mut table := ast.new_table() + items := vparser.parse_vfile(file.path, mut table) + return CodeFile{ + name: file.name().trim_string_right('.v') + imports: parse_imports(file.read()!) + consts: parse_consts(file.read()!)! + items: items + } +} + +pub fn parse_imports(code string) []Import { + return code.split('\n').filter(it.starts_with('import ')).map(parse_import(it)) +} + +// parse_vfile parses and returns code items from a v code file +fn (vparser VParser) parse_vfile(path string, mut table ast.Table) []CodeItem { + $if debug { + console.print_debug('Parsing file `${path}`') + } + mut code := []CodeItem{} + + // mut table := ast.new_table() + fpref := &pref.Preferences{ // preferences for parsing + is_fmt: true + } + file_ast := parser.parse_file(path, mut table, .parse_comments, fpref) + mut file := pathlib.get_file(path: path) or { panic(err) } + file_text := file.read() or { panic(err) } + mut preceeding_comments := []ast.Comment{} + + for stmt in file_ast.stmts { + // code block from vlib/v/doc/doc.v + if stmt is ast.ExprStmt { + // Collect comments + if stmt.expr is ast.Comment { + preceeding_comments << stmt.expr as ast.Comment + continue + } + } + if stmt is ast.FnDecl { + fn_decl := stmt as ast.FnDecl + if fn_decl.attrs.len > 0 { + openrpc_attrs := fn_decl.attrs.filter(it.name == 'openrpc') + { + if openrpc_attrs.any(it.arg == 'exclude') { + continue + } + } + } + if fn_decl.is_pub || !vparser.only_pub { + code << CodeItem(vparser.parse_vfunc( + fn_decl: fn_decl + table: table + comments: preceeding_comments + text: file_text + )) + } + preceeding_comments = []ast.Comment{} + } else if stmt is ast.TypeDecl { + if stmt is ast.SumTypeDecl { + sumtype_decl := stmt as ast.SumTypeDecl + if sumtype_decl.attrs.len > 0 { + openrpc_attrs := sumtype_decl.attrs.filter(it.name == 'openrpc') + { + if openrpc_attrs.any(it.arg == 'exclude') { + continue + } + } + } + if sumtype_decl.is_pub || !vparser.only_pub { + code << CodeItem(vparser.parse_vsumtype( + sumtype_decl: sumtype_decl + table: table + comments: preceeding_comments + )) + } + preceeding_comments = []ast.Comment{} + } + } else if stmt is ast.StructDecl { + struct_decl := stmt as ast.StructDecl + if struct_decl.attrs.len > 0 { + openrpc_attrs := struct_decl.attrs.filter(it.name == 'openrpc') + { + if openrpc_attrs.any(it.arg == 'exclude') { + continue + } + } + } + if struct_decl.is_pub || !vparser.only_pub { + code << CodeItem(vparser.parse_vstruct( + struct_decl: struct_decl + table: table + comments: preceeding_comments + )) + } + preceeding_comments = []ast.Comment{} + } + } + return code +} + +// parse_vfile parses and returns code items from a v code file +pub fn parse_module(path_ string, vparser VParser) !Module { + mut path := pathlib.get(path_) + if !path.exists() { + return error('Path `${path.path}` doesn\'t exist.') + } + + mut table := ast.new_table() + mut code := []CodeFile{} + // fpref := &pref.Preferences{ // preferences for parsing + // is_fmt: true + // } + mut mod := Module{ + name: path.name() + } + if path.is_dir() { + dir_is_excluded := vparser.exclude_dirs.any(path.path.ends_with(it)) + if dir_is_excluded { + return Module{ + ...mod + files: code + } + } + + if vparser.recursive { + return error('recursive module parsing not yet supported') + } + + mut fl := path.list(recursive: false)! + for mut file in fl.paths { + if !file.is_dir() { + code << parse_file(file.path, vparser)! + } + } + } else if path.is_file() { + file_is_excluded := vparser.exclude_files.any(path.path.ends_with(it)) + // todo: use pathlib list regex param to filter non-v files + if file_is_excluded || !path.path.ends_with('.v') { + return Module{ + ...mod + files: code + } + } + code << parse_file(path.path, vparser)! + } else { + return error('Path being parsed must either be a directory or a file.') + } + // codemodel.inflate_types(mut code) + return Module{ + ...mod + files: code + } +} + +@[params] +struct VFuncArgs { + comments []ast.Comment // v comments that belong to the function + fn_decl ast.FnDecl // v.ast parsed function declaration + table &ast.Table // ast table used for getting typesymbols from + text string +} + +// parse_vfunc parses function args into function struct +pub fn (vparser VParser) parse_vfunc(args VFuncArgs) Function { + $if debug { + console.print_debug('Parsing function: ${args.fn_decl.short_name}') + } + + // get function params excluding receiver + receiver_name := args.fn_decl.receiver.name + receiver_type := args.table.type_to_str(args.fn_decl.receiver.typ).all_after_last('.') + fn_params := args.fn_decl.params.filter(it.name != receiver_name) + + receiver := Param{ + name: receiver_name + typ: Type{ + symbol: receiver_type + } + mutable: args.fn_decl.rec_mut + } + + params := vparser.parse_params( + comments: args.comments + params: fn_params + table: args.table + ) + + result := vparser.parse_result( + comments: args.comments + return_type: args.fn_decl.return_type + table: args.table + ) + + mut fn_comments := []string{} + for comment in args.comments.map(it.text.trim_string_left('\u0001').trim_space()) { + if !comment.starts_with('-') && !comment.starts_with('returns') { + fn_comments << comment.trim_string_left('${args.fn_decl.short_name} ') + } + } + + text_lines := args.text.split('\n') + fn_lines := text_lines.filter(it.contains('fn') && it.contains(' ${args.fn_decl.short_name}(')) + fn_line := fn_lines[0] or { panic('this should never happen') } + line_i := text_lines.index(fn_line) + end_i := line_i + text_lines[line_i..].index('}') + + fn_text := text_lines[line_i..end_i + 1].join('\n') + // mut fn_index := args.text.index(args.fn_decl.short_name) or {panic('this should never happen1')} + // text_cropped := args.text[..fn_index] or {panic('this should never happen2')} + // fn_start := text_cropped.last_index('fn ') or {panic('this should never happen3 \n-${text_cropped}')} + // fn_text := args.text[fn_start..] or {panic('this should never happen4')} + fn_parsed := codemodel.parse_function(fn_text) or { panic(err) } + + return Function{ + name: args.fn_decl.short_name + description: fn_comments.join(' ') + mod: args.fn_decl.mod + receiver: receiver + params: params + result: fn_parsed.result + body: fn_parsed.body + } +} + +@[params] +struct ParamsArgs { + comments []ast.Comment // comments of the function + params []ast.Param // ast type of what function returns + table &ast.Table // ast table for getting type names +} + +// parse_params parses ast function parameters into function parameters +fn (vparser VParser) parse_params(args ParamsArgs) []Param { + mut params := []Param{} + for param in args.params { + mut description := '' + // parse comment line that describes param + for comment in args.comments { + if start := comment.text.index('- ${param.name}: ') { + description = comment.text[start..].trim_string_left('- ${param.name}: ') + } + } + + params << Param{ + name: param.name + description: description + typ: Type{ + symbol: args.table.type_to_str(param.typ).all_after_last('.') + } + } + } + return params +} + +@[params] +struct ParamArgs { + comments []ast.Comment // comments of the function + param ast.Param // ast type of what function returns + table &ast.Table // ast table for getting type names +} + +// parse_params parses ast function parameters into function parameters +fn (vparser VParser) parse_param(args ParamArgs) Param { + mut description := '' + // parse comment line that describes param + for comment in args.comments { + if start := comment.text.index('- ${args.param.name}: ') { + description = comment.text[start..].trim_string_left('- ${args.param.name}: ') + } + } + + return Param{ + name: args.param.name + description: description + typ: Type{ + symbol: args.table.type_to_str(args.param.typ).all_after_last('.') + } + } +} + +struct ReturnArgs { + comments []ast.Comment // comments of the function + return_type ast.Type // v.ast type of what function returns + table &ast.Table // v.ast table for getting type names +} + +// parse_result parses a function's comments and return type +// returns a result struct that represents what the function's result is +fn (vparser VParser) parse_result(args ReturnArgs) Result { + comment_str := args.comments.map(it.text).join('') + + // parse comments to get return name and description + mut name := '' + mut description := '' + if start := comment_str.index('returns') { + mut end := comment_str.index_after('.', start) + if end == -1 { + end = comment_str.len + } + return_str := comment_str[start..end].trim_string_left('returns ') + + split := return_str.split(', ') + name = split[0] + if split.len > 1 { + description = split[1..].join(', ') + } + } + return_symbol := args.table.type_to_str(args.return_type).all_after_last('.') + + return Result{ + name: name + description: description + typ: Type{ + symbol: return_symbol + } + } +} + +// parse_params parses ast function parameters into function parameters +fn (vparser VParser) parse_type(typ ast.Type, table &ast.Table) Type { + type_str := table.type_to_str(typ).all_after_last('.') + return Type{ + symbol: type_str + } +} + +struct VStructArgs { + comments []ast.Comment // comments that belong to the struct declaration + struct_decl ast.StructDecl // v.ast Struct declaration for struct being parsed + table &ast.Table // v.ast table for getting type names +} + +// parse_params parses struct args into struct +fn (vparser VParser) parse_vstruct(args VStructArgs) Struct { + $if debug { + console.print_debug('Parsing struct: ${args.struct_decl.name}') + } + + comments := args.comments.map(it.text.trim_string_left('\u0001').trim_space()) + mut fields := vparser.parse_fields(args.struct_decl.fields, args.table) + fields << vparser.parse_embeds(args.struct_decl.embeds, args.table) + return Struct{ + name: args.struct_decl.name.all_after_last('.') + description: comments.join(' ') + fields: fields + mod: args.struct_decl.name.all_before_last('.') + attrs: args.struct_decl.attrs.map(codemodel.Attribute{ name: it.name }) + is_pub: args.struct_decl.is_pub + } +} + +struct VSumTypeArgs { + comments []ast.Comment // comments that belong to the struct declaration + sumtype_decl ast.SumTypeDecl // v.ast Struct declaration for struct being parsed + table &ast.Table // v.ast table for getting type names +} + +// parse_params parses struct args into struct +fn (vparser VParser) parse_vsumtype(args VSumTypeArgs) Sumtype { + $if debug { + console.print_debug('Parsing sumtype: ${args.sumtype_decl.name}') + } + + comments := args.comments.map(it.text.trim_string_left('\u0001').trim_space()) + + return Sumtype{ + name: args.sumtype_decl.name.all_after_last('.') + description: comments.join(' ') + types: vparser.parse_variants(args.sumtype_decl.variants, args.table) + } +} + +// parse_fields parses ast struct fields into struct fields +fn (vparser VParser) parse_fields(fields []ast.StructField, table &ast.Table) []StructField { + mut fields_ := []StructField{} + for field in fields { + mut anon_struct := Struct{} + if table.type_to_str(field.typ).all_after_last('.').starts_with('_VAnon') { + anon_struct = vparser.parse_vstruct( + table: table + struct_decl: field.anon_struct_decl + ) + } + + description := field.comments.map(it.text.trim_string_left('\u0001').trim_space()).join(' ') + fields_ << StructField{ + attrs: field.attrs.map(codemodel.Attribute{ + name: it.name + has_arg: it.has_arg + arg: it.arg + }) + name: field.name + anon_struct: anon_struct + description: description + typ: Type{ + symbol: table.type_to_str(field.typ).all_after_last('.') + is_array: table.type_to_str(field.typ).contains('[]') + is_map: table.type_to_str(field.typ).contains('map[') + } + is_pub: field.is_pub + is_mut: field.is_mut + default: field.default_val + } + } + return fields_ + + // return fields.map( + // StructField{ + // name: it.name + // typ: Type{ + // symbol: table.type_to_str(it.typ).all_after_last('.') + // } + // } + // ) +} + +// parse_embeds parses ast.embeds into struct fields +// TODO: Support requiresive fields +fn (vparser VParser) parse_embeds(embeds []ast.Embed, table &ast.Table) []StructField { + mut fields := []StructField{} + for embed in embeds { + $if debug { + console.print_debug('Parsing embed: ${table.sym(embed.typ).info}') + } + embed_info := table.sym(embed.typ).info + if embed_info is ast.Struct { + // embeds: vparser.parse_embeds(embed_info.embeds, table) + fields << vparser.parse_fields(embed_info.fields, table) + } + } + return fields +} + +// parse_fields parses ast struct fields into struct fields +fn (vparser VParser) parse_variants(variants []ast.TypeNode, table &ast.Table) []Type { + mut types := []Type{} + for variant in variants { + types << Type{ + symbol: table.type_to_str(variant.typ).all_after_last('.') + } + } + return types +} diff --git a/lib/code/codeparser/vparser_test.v b/lib/code/codeparser/vparser_test.v new file mode 100644 index 00000000..b1f72448 --- /dev/null +++ b/lib/code/codeparser/vparser_test.v @@ -0,0 +1,643 @@ +module codeparser + +import freeflowuniverse.herolib.core.codemodel { CodeItem, Function, Struct } +import os +import freeflowuniverse.herolib.ui.console + +const testpath = os.dir(@FILE) + '/testdata' + +// is a map of test files used in these tests and their complete codeitems +// used to make assertions and verify test outputs +const testcode = { + 'anotherfile.v': [ + CodeItem(Function{ + name: 'anotherfile_func0' + receiver: codemodel.Param{ + required: false + description: '' + name: '' + typ: codemodel.Type{ + symbol: 'void' + } + } + mod: 'core.codeparser.testdata.flatdir' + description: 'is the first function of file' + params: [] + body: '' + result: codemodel.Result{ + typ: codemodel.Type{ + symbol: 'void' + } + description: '' + name: '' + } + has_return: false + }), + CodeItem(Function{ + name: 'anotherfile_func1' + receiver: codemodel.Param{ + required: false + description: '' + name: '' + typ: codemodel.Type{ + symbol: 'void' + } + } + mod: 'core.codeparser.testdata.flatdir' + description: 'is the second function of file' + params: [ + codemodel.Param{ + required: false + description: 'a name that the function will do nothing with' + name: 'name' + typ: codemodel.Type{ + symbol: 'string' + } + }, + ] + body: '' + result: codemodel.Result{ + typ: codemodel.Type{ + symbol: 'void' + } + description: '' + name: '' + } + has_return: false + }), + CodeItem(Struct{ + name: 'AnotherfileStruct0' + description: 'AnotherfileStruct0 defines the configuration params of anotherfile_func2' + mod: 'core.codeparser.testdata.flatdir' + is_pub: true + attrs: [ + codemodel.Attribute{ + name: 'params' + has_arg: false + arg: '' + }, + ] + fields: [ + codemodel.StructField{ + comments: [] + attrs: [] + name: 'param1' + description: '' + anon_struct: Struct{ + name: '' + description: '' + fields: [] + } + typ: codemodel.Type{ + symbol: 'string' + } + }, + codemodel.StructField{ + comments: [] + attrs: [] + name: 'param2' + description: '' + anon_struct: Struct{ + name: '' + description: '' + fields: [] + } + typ: codemodel.Type{ + symbol: 'int' + } + }, + ] + }), + CodeItem(Function{ + name: 'anotherfile_func2' + receiver: codemodel.Param{ + required: false + description: '' + name: '' + typ: codemodel.Type{ + symbol: 'void' + } + } + mod: 'core.codeparser.testdata.flatdir' + description: 'is the third function of the file' + params: [ + codemodel.Param{ + required: false + description: 'configuration for anotherfile_func2' + name: 'config' + typ: codemodel.Type{ + symbol: 'AnotherfileStruct0' + } + }, + ] + body: '' + result: codemodel.Result{ + typ: codemodel.Type{ + symbol: 'void' + } + description: '' + name: '' + } + has_return: false + }), + CodeItem(Struct{ + name: 'AnotherfileStruct1' + description: '' + mod: 'core.codeparser.testdata.flatdir' + is_pub: true + fields: [ + codemodel.StructField{ + comments: [] + attrs: [] + name: 'param' + description: '' + anon_struct: Struct{ + name: '' + description: '' + fields: [] + } + typ: codemodel.Type{ + symbol: 'string' + } + }, + ] + }), + CodeItem(Function{ + name: 'anotherfile_func3' + receiver: codemodel.Param{ + required: false + description: '' + name: '' + typ: codemodel.Type{ + symbol: 'void' + } + } + mod: 'core.codeparser.testdata.flatdir' + description: 'is the fourth function of the file is does something with param1 and param2 and creates AnotherfileStruct1' + params: [ + codemodel.Param{ + required: false + description: '' + name: 'param1' + typ: codemodel.Type{ + symbol: 'string' + } + }, + codemodel.Param{ + required: false + description: '' + name: 'param2' + typ: codemodel.Type{ + symbol: 'string' + } + }, + ] + body: '' + result: codemodel.Result{ + typ: codemodel.Type{ + symbol: 'AnotherfileStruct1' + } + description: 'a FileStruc1 struct filled in with params 1 and 2' + name: 'the created filestruct1' + } + has_return: false + }), + ] + 'subfile.v': [ + CodeItem(Function{ + name: 'subfile_func0' + receiver: codemodel.Param{ + required: false + description: '' + name: '' + typ: codemodel.Type{ + symbol: 'void' + } + } + mod: 'core.codeparser.testdata.flatdir' + description: 'is the first function of file' + params: [] + body: '' + result: codemodel.Result{ + typ: codemodel.Type{ + symbol: 'void' + } + description: '' + name: '' + } + has_return: false + }), + CodeItem(Function{ + name: 'subfile_func1' + receiver: codemodel.Param{ + required: false + description: '' + name: '' + typ: codemodel.Type{ + symbol: 'void' + } + } + mod: 'core.codeparser.testdata.flatdir' + description: 'is the second function of file' + params: [ + codemodel.Param{ + required: false + description: 'a name that the function will do nothing with' + name: 'name' + typ: codemodel.Type{ + symbol: 'string' + } + }, + ] + body: '' + result: codemodel.Result{ + typ: codemodel.Type{ + symbol: 'void' + } + description: '' + name: '' + } + has_return: false + }), + CodeItem(Struct{ + name: 'SubfileStruct0' + description: 'SubfileStruct0 defines the configuration params of subfile_func2' + mod: 'core.codeparser.testdata.flatdir' + is_pub: true + attrs: [ + codemodel.Attribute{ + name: 'params' + has_arg: false + arg: '' + }, + ] + fields: [ + codemodel.StructField{ + comments: [] + attrs: [] + name: 'param1' + description: '' + anon_struct: Struct{ + name: '' + description: '' + fields: [] + } + typ: codemodel.Type{ + symbol: 'string' + } + }, + codemodel.StructField{ + comments: [] + attrs: [] + name: 'param2' + description: '' + anon_struct: Struct{ + name: '' + description: '' + fields: [] + } + typ: codemodel.Type{ + symbol: 'int' + } + }, + ] + }), + CodeItem(Function{ + name: 'subfile_func2' + receiver: codemodel.Param{ + required: false + description: '' + name: '' + typ: codemodel.Type{ + symbol: 'void' + } + } + mod: 'core.codeparser.testdata.flatdir' + description: 'is the third function of the file' + params: [ + codemodel.Param{ + required: false + description: 'configuration for subfile_func2' + name: 'config' + typ: codemodel.Type{ + symbol: 'SubfileStruct0' + } + }, + ] + body: '' + result: codemodel.Result{ + typ: codemodel.Type{ + symbol: 'void' + } + description: '' + name: '' + } + has_return: false + }), + CodeItem(Struct{ + name: 'SubfileStruct1' + description: '' + mod: 'core.codeparser.testdata.flatdir' + is_pub: true + fields: [ + codemodel.StructField{ + comments: [] + attrs: [] + name: 'param' + description: '' + anon_struct: Struct{ + name: '' + description: '' + fields: [] + } + typ: codemodel.Type{ + symbol: 'string' + } + }, + ] + }), + CodeItem(Function{ + name: 'subfile_func3' + receiver: codemodel.Param{ + required: false + description: '' + name: '' + typ: codemodel.Type{ + symbol: 'void' + } + } + mod: 'core.codeparser.testdata.flatdir' + description: 'is the fourth function of the file is does something with param1 and param2 and creates SubfileStruct1' + params: [ + codemodel.Param{ + required: false + description: '' + name: 'param1' + typ: codemodel.Type{ + symbol: 'string' + } + }, + codemodel.Param{ + required: false + description: '' + name: 'param2' + typ: codemodel.Type{ + symbol: 'string' + } + }, + ] + body: '' + result: codemodel.Result{ + typ: codemodel.Type{ + symbol: 'SubfileStruct1' + } + description: 'a FileStruc1 struct filled in with params 1 and 2' + name: 'the created filestruct1' + } + has_return: false + }), + ] + 'file.v': [ + CodeItem(Function{ + name: 'file_func0' + receiver: codemodel.Param{ + required: false + description: '' + name: '' + typ: codemodel.Type{ + symbol: 'void' + } + } + mod: 'core.codeparser.testdata' + description: 'is the first function of file' + params: [] + body: '' + result: codemodel.Result{ + typ: codemodel.Type{ + symbol: 'void' + } + description: '' + name: '' + } + has_return: false + }), + CodeItem(Function{ + name: 'file_func1' + receiver: codemodel.Param{ + required: false + description: '' + name: '' + typ: codemodel.Type{ + symbol: 'void' + } + } + mod: 'core.codeparser.testdata' + description: 'is the second function of file' + params: [ + codemodel.Param{ + required: false + description: 'a name that the function will do nothing with' + name: 'name' + typ: codemodel.Type{ + symbol: 'string' + } + }, + ] + body: '' + result: codemodel.Result{ + typ: codemodel.Type{ + symbol: 'void' + } + description: '' + name: '' + } + has_return: false + }), + CodeItem(Struct{ + name: 'FileStruct0' + description: 'FileStruct0 defines the configuration params of file_func2' + mod: 'core.codeparser.testdata' + is_pub: true + attrs: [ + codemodel.Attribute{ + name: 'params' + has_arg: false + arg: '' + }, + ] + fields: [ + codemodel.StructField{ + comments: [] + attrs: [] + name: 'param1' + description: '' + anon_struct: Struct{ + name: '' + description: '' + fields: [] + } + typ: codemodel.Type{ + symbol: 'string' + } + }, + codemodel.StructField{ + comments: [] + attrs: [] + name: 'param2' + description: '' + anon_struct: Struct{ + name: '' + description: '' + fields: [] + } + typ: codemodel.Type{ + symbol: 'int' + } + }, + ] + }), + CodeItem(Function{ + name: 'file_func2' + receiver: codemodel.Param{ + required: false + description: '' + name: '' + typ: codemodel.Type{ + symbol: 'void' + } + } + mod: 'core.codeparser.testdata' + description: 'is the third function of the file' + params: [ + codemodel.Param{ + required: false + description: 'configuration for file_func2' + name: 'config' + typ: codemodel.Type{ + symbol: 'FileStruct0' + } + }, + ] + body: '' + result: codemodel.Result{ + typ: codemodel.Type{ + symbol: 'void' + } + description: '' + name: '' + } + has_return: false + }), + CodeItem(Struct{ + name: 'FileStruct1' + description: '' + fields: [] + mod: 'core.codeparser.testdata' + is_pub: true + }), + CodeItem(Function{ + name: 'file_func3' + receiver: codemodel.Param{ + required: false + description: '' + name: '' + typ: codemodel.Type{ + symbol: 'void' + } + } + mod: 'core.codeparser.testdata' + description: 'is the fourth function of the file is does something with param1 and param2 and creates FileStruct1' + params: [ + codemodel.Param{ + required: false + description: '' + name: 'param1' + typ: codemodel.Type{ + symbol: 'string' + } + }, + codemodel.Param{ + required: false + description: '' + name: 'param2' + typ: codemodel.Type{ + symbol: 'int' + } + }, + ] + body: '' + result: codemodel.Result{ + typ: codemodel.Type{ + symbol: 'FileStruct1' + } + description: 'a FileStruc1 struct filled in with params 1 and 2' + name: 'the created filestruct1' + } + has_return: false + }), + ] +} + +fn test_vparse_blankdir() ! { + os.mkdir_all('${testpath}/blankdir', os.MkdirParams{})! + code := parse_v('${testpath}/blankdir')! + assert code.len == 0 +} + +fn test_vparse_flat_directory() ! { + code := parse_v('${testpath}/flatdir')! + assert code.len == 12 + assert code[0] == testcode['anotherfile.v'][0] + assert code[0..6] == testcode['anotherfile.v'][0..6], '<${code[0..6]}> vs <${testcode['anotherfile.v'][0..6]}>' + assert code[6..12] == testcode['subfile.v'][0..6], '<${code[6..12]}> vs <${testcode['subfile.v'][0..6]}>' +} + +fn test_vparse_non_recursive() ! { + code := parse_v(testpath)! + assert code.len == 6 + assert code[0] == testcode['file.v'][0] + assert code[0..6] == testcode['file.v'][0..6], '<${code[0..6]}> vs <${testcode['file.v'][0..6]}>' +} + +fn test_vparse_recursive() ! { + $if debug { + console.print_debug('\nTEST: test_vparse_recursive\n') + } + code := parse_v(testpath, recursive: true)! + assert code.len == 18 + assert code[0..6] == testcode['anotherfile.v'][0..6] + assert code[6..12] == testcode['subfile.v'][0..6] + assert code[12..18] == testcode['file.v'][0..6] +} + +fn test_vparse_exclude_directories() ! { + code := parse_v(testpath, + recursive: true + exclude_dirs: ['flatdir'] + )! + assert code.len == 6 + assert code[0..6] == testcode['file.v'][0..6] +} + +fn test_vparse_exclude_files() ! { + code := parse_v(testpath, + recursive: true + exclude_files: ['flatdir/anotherfile.v'] + )! + assert code.len == 12 + assert code[0..6] == testcode['subfile.v'][0..6] + assert code[6..12] == testcode['file.v'][0..6] +} + +fn test_vparse_only_public() ! { + code := parse_v(testpath, + recursive: true + only_pub: true + )! + + // first function of each code file is private so should skip those + assert code.len == 15 + assert code[0..5] == testcode['anotherfile.v'][1..6] + assert code[5..10] == testcode['subfile.v'][1..6] + assert code[10..15] == testcode['file.v'][1..6] +} diff --git a/lib/core/installers/redis.v b/lib/core/installers/redis.v new file mode 100644 index 00000000..cef3bfc7 --- /dev/null +++ b/lib/core/installers/redis.v @@ -0,0 +1,108 @@ +module redis + +import freeflowuniverse.herolib.osal +import freeflowuniverse.herolib.core.pathlib +import freeflowuniverse.herolib.core.rootpath +import freeflowuniverse.herolib.ui.console +import time +import os + +@[params] +pub struct InstallArgs { +pub mut: + port int = 6379 + datadir string = '${rootpath.vardir()}/redis' + ipaddr string = 'localhost' // can be more than 1, space separated + reset bool + start bool + restart bool // do not put on true +} + +// ``` +// struct InstallArgs { +// port int = 6379 +// datadir string +// ipaddr string = "localhost" +// reset bool +// start bool +// restart bool = true +// } +// ``` +pub fn install(args_ InstallArgs) ! { + mut args := args_ + + if !args.reset { + if check() { + return + } + } + console.print_header('install redis.') + + if !(osal.cmd_exists_profile('redis-server')) { + if osal.is_linux() { + osal.package_install('redis-server')! + } else { + osal.package_install('redis')!/Users/despiegk1/code/github/freeflowuniverse/crystallib/crystallib/installers/db/redis/template + } + } + osal.execute_silent('mkdir -p ${args.datadir}')! + + if args.restart { + stop()! + } + start(args)! +} + +fn configfilepath(args InstallArgs) string { + if osal.is_linux() { + return '/etc/redis/redis.conf' + } else { + return '${args.datadir}/redis.conf' + } +} + +fn configure(args InstallArgs) ! { + c := $tmpl('template/redis_config.conf') + pathlib.template_write(c, configfilepath(), true)! +} + +pub fn check(args InstallArgs) bool { + res := os.execute('redis-cli -c -p ${args.port} ping > /dev/null 2>&1') + if res.exit_code == 0 { + return true + } + return false +} + +pub fn start(args InstallArgs) ! { + if check() { + return + } + + configure(args)! + // remove all redis in memory + osal.process_kill_recursive(name: 'redis-server')! + + if osal.platform() == .osx { + osal.exec(cmd: 'redis-server ${configfilepath()} --daemonize yes')! + // osal.exec(cmd:"brew services start redis") or { + // osal.exec(cmd:"redis-server ${configfilepath()} --daemonize yes")! + // } + } else { + mut sm := startupmanager.get()! + sm.new(name: 'redis', cmd: 'redis-server ${configfilepath()}', start: true)! + } + + for _ in 0 .. 100 { + if check() { + console.print_debug('redis started.') + return + } + time.sleep(100) + } + return error("Redis did not install propertly could not do:'redis-cli -c ping'") +} + +pub fn stop() ! { + osal.execute_silent('redis-cli shutdown')! +} diff --git a/lib/core/installers/template/redis_config.conf b/lib/core/installers/template/redis_config.conf new file mode 100644 index 00000000..872557fd --- /dev/null +++ b/lib/core/installers/template/redis_config.conf @@ -0,0 +1,2320 @@ +# Redis configuration file example. +# +# Note that in order to read the configuration file, Redis must be +# started with the file path as first argument: +# +# ./redis-server /path/to/redis.conf + +# Note on units: when memory size is needed, it is possible to specify +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis servers but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# Note that option "include" won't be rewritten by command "CONFIG REWRITE" +# from admin or Redis Sentinel. Since Redis always uses the last processed +# line as value of a configuration directive, you'd better put includes +# at the beginning of this file to avoid overwriting config change at runtime. +# +# If instead you are interested in using includes to override configuration +# options, it is better to use include as the last line. +# +# Included paths may contain wildcards. All files matching the wildcards will +# be included in alphabetical order. +# Note that if an include path contains a wildcards but no files match it when +# the server is started, the include statement will be ignored and no error will +# be emitted. It is safe, therefore, to include wildcard files from empty +# directories. +# +# include /path/to/local.conf +# include /path/to/other.conf +# include /path/to/fragments/*.conf +# + +################################## MODULES ##################################### + +# Load modules at startup. If the server is not able to load modules +# it will abort. It is possible to use multiple loadmodule directives. +# +# loadmodule /path/to/my_module.so +# loadmodule /path/to/other_module.so +# loadmodule /path/to/args_module.so [arg [arg ...]] + +################################## NETWORK ##################################### + +# By default, if no "bind" configuration directive is specified, Redis listens +# for connections from all available network interfaces on the host machine. +# It is possible to listen to just one or multiple selected interfaces using +# the "bind" configuration directive, followed by one or more IP addresses. +# Each address can be prefixed by "-", which means that redis will not fail to +# start if the address is not available. Being not available only refers to +# addresses that does not correspond to any network interface. Addresses that +# are already in use will always fail, and unsupported protocols will always BE +# silently skipped. +# +# Examples: +# +bind ${args.ipaddr} +# bind 127.0.0.1 ::1 # listens on loopback IPv4 and IPv6 +# bind * -::* # like the default, all available interfaces +# +# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the +# internet, binding to all the interfaces is dangerous and will expose the +# instance to everybody on the internet. So by default we uncomment the +# following bind directive, that will force Redis to listen only on the +# IPv4 and IPv6 (if available) loopback interface addresses (this means Redis +# will only be able to accept client connections from the same host that it is +# running on). +# +# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES +# COMMENT OUT THE FOLLOWING LINE. +# +# You will also need to set a password unless you explicitly disable protected +# mode. +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +bind 127.0.0.1 -::1 + +# By default, outgoing connections (from replica to master, from Sentinel to +# instances, cluster bus, etc.) are not bound to a specific local address. In +# most cases, this means the operating system will handle that based on routing +# and the interface through which the connection goes out. +# +# Using bind-source-addr it is possible to configure a specific address to bind +# to, which may also affect how the connection gets routed. +# +# Example: +# +# bind-source-addr 10.0.0.1 + +# Protected mode is a layer of security protection, in order to avoid that +# Redis instances left open on the internet are accessed and exploited. +# +# When protected mode is on and the default user has no password, the server +# only accepts local connections from the IPv4 address (127.0.0.1), IPv6 address +# (::1) or Unix domain sockets. +# +# By default protected mode is enabled. You should disable it only if +# you are sure you want clients from other hosts to connect to Redis +# even if no authentication is configured. +protected-mode yes + +# Redis uses default hardened security configuration directives to reduce the +# attack surface on innocent users. Therefore, several sensitive configuration +# directives are immutable, and some potentially-dangerous commands are blocked. +# +# Configuration directives that control files that Redis writes to (e.g., 'dir' +# and 'dbfilename') and that aren't usually modified during runtime +# are protected by making them immutable. +# +# Commands that can increase the attack surface of Redis and that aren't usually +# called by users are blocked by default. +# +# These can be exposed to either all connections or just local ones by setting +# each of the configs listed below to either of these values: +# +# no - Block for any connection (remain immutable) +# yes - Allow for any connection (no protection) +# local - Allow only for local connections. Ones originating from the +# IPv4 address (127.0.0.1), IPv6 address (::1) or Unix domain sockets. +# +# enable-protected-configs no +# enable-debug-command no +# enable-module-command no + +# Accept connections on the specified port, default is 6379 (IANA #815344). +# If port 0 is specified Redis will not listen on a TCP socket. +port ${args.port} + +# TCP listen() backlog. +# +# In high requests-per-second environments you need a high backlog in order +# to avoid slow clients connection issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to get the desired effect. +tcp-backlog 511 + +# Unix socket. +# +# Specify the path for the Unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /run/redis.sock +# unixsocketperm 700 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Force network equipment in the middle to consider the connection to be +# alive. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 300 seconds, which is the new +# Redis default starting with Redis 3.2.1. +tcp-keepalive 300 + +# Apply OS-specific mechanism to mark the listening socket with the specified +# ID, to support advanced routing and filtering capabilities. +# +# On Linux, the ID represents a connection mark. +# On FreeBSD, the ID represents a socket cookie ID. +# On OpenBSD, the ID represents a route table ID. +# +# The default value is 0, which implies no marking is required. +# socket-mark-id 0 + +################################# TLS/SSL ##################################### + +# By default, TLS/SSL is disabled. To enable it, the "tls-port" configuration +# directive can be used to define TLS-listening ports. To enable TLS on the +# default port, use: +# +# port 0 +# tls-port 6379 + +# Configure a X.509 certificate and private key to use for authenticating the +# server to connected clients, masters or cluster peers. These files should be +# PEM formatted. +# +# tls-cert-file redis.crt +# tls-key-file redis.key +# +# If the key file is encrypted using a passphrase, it can be included here +# as well. +# +# tls-key-file-pass secret + +# Normally Redis uses the same certificate for both server functions (accepting +# connections) and client functions (replicating from a master, establishing +# cluster bus connections, etc.). +# +# Sometimes certificates are issued with attributes that designate them as +# client-only or server-only certificates. In that case it may be desired to use +# different certificates for incoming (server) and outgoing (client) +# connections. To do that, use the following directives: +# +# tls-client-cert-file client.crt +# tls-client-key-file client.key +# +# If the key file is encrypted using a passphrase, it can be included here +# as well. +# +# tls-client-key-file-pass secret + +# Configure a DH parameters file to enable Diffie-Hellman (DH) key exchange, +# required by older versions of OpenSSL (<3.0). Newer versions do not require +# this configuration and recommend against it. +# +# tls-dh-params-file redis.dh + +# Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL +# clients and peers. Redis requires an explicit configuration of at least one +# of these, and will not implicitly use the system wide configuration. +# +# tls-ca-cert-file ca.crt +# tls-ca-cert-dir /etc/ssl/certs + +# By default, clients (including replica servers) on a TLS port are required +# to authenticate using valid client side certificates. +# +# If "no" is specified, client certificates are not required and not accepted. +# If "optional" is specified, client certificates are accepted and must be +# valid if provided, but are not required. +# +# tls-auth-clients no +# tls-auth-clients optional + +# By default, a Redis replica does not attempt to establish a TLS connection +# with its master. +# +# Use the following directive to enable TLS on replication links. +# +# tls-replication yes + +# By default, the Redis Cluster bus uses a plain TCP connection. To enable +# TLS for the bus protocol, use the following directive: +# +# tls-cluster yes + +# By default, only TLSv1.2 and TLSv1.3 are enabled and it is highly recommended +# that older formally deprecated versions are kept disabled to reduce the attack surface. +# You can explicitly specify TLS versions to support. +# Allowed values are case insensitive and include "TLSv1", "TLSv1.1", "TLSv1.2", +# "TLSv1.3" (OpenSSL >= 1.1.1) or any combination. +# To enable only TLSv1.2 and TLSv1.3, use: +# +# tls-protocols "TLSv1.2 TLSv1.3" + +# Configure allowed ciphers. See the ciphers(1ssl) manpage for more information +# about the syntax of this string. +# +# Note: this configuration applies only to <= TLSv1.2. +# +# tls-ciphers DEFAULT:!MEDIUM + +# Configure allowed TLSv1.3 ciphersuites. See the ciphers(1ssl) manpage for more +# information about the syntax of this string, and specifically for TLSv1.3 +# ciphersuites. +# +# tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256 + +# When choosing a cipher, use the server's preference instead of the client +# preference. By default, the server follows the client's preference. +# +# tls-prefer-server-ciphers yes + +# By default, TLS session caching is enabled to allow faster and less expensive +# reconnections by clients that support it. Use the following directive to disable +# caching. +# +# tls-session-caching no + +# Change the default number of TLS sessions cached. A zero value sets the cache +# to unlimited size. The default size is 20480. +# +# tls-session-cache-size 5000 + +# Change the default timeout of cached TLS sessions. The default timeout is 300 +# seconds. +# +# tls-session-cache-timeout 60 + +################################# GENERAL ##################################### + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +# When Redis is supervised by upstart or systemd, this parameter has no impact. +daemonize no + +# If you run Redis from upstart or systemd, Redis can interact with your +# supervision tree. Options: +# supervised no - no supervision interaction +# supervised upstart - signal upstart by putting Redis into SIGSTOP mode +# requires "expect stop" in your upstart job config +# supervised systemd - signal systemd by writing READY=1 to ??NOTIFY_SOCKET +# on startup, and updating Redis status on a regular +# basis. +# supervised auto - detect upstart or systemd method based on +# UPSTART_JOB or NOTIFY_SOCKET environment variables +# Note: these supervision methods only signal "process is ready." +# They do not enable continuous pings back to your supervisor. +# +# The default is "no". To run under upstart/systemd, you can simply uncomment +# the line below: +# +# supervised auto + +# If a pid file is specified, Redis writes it where specified at startup +# and removes it at exit. +# +# When the server runs non daemonized, no pid file is created if none is +# specified in the configuration. When the server is daemonized, the pid file +# is used even if not specified, defaulting to "/var/run/redis.pid". +# +# Creating a pid file is best effort: if Redis is not able to create it +# nothing bad happens, the server will start and run normally. +# +# Note that on modern Linux systems "/run/redis.pid" is more conforming +# and should be used instead. +pidfile ${args.datadir}/redis.pid + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +# nothing (nothing is logged) +loglevel notice + +# Specify the log file name. Also the empty string can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile "" + +# To enable logging to the system logger, just set 'syslog-enabled' to yes, +# and optionally update the other syslog parameters to suit your needs. +# syslog-enabled no + +# Specify the syslog identity. +# syslog-ident redis + +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# syslog-facility local0 + +# To disable the built in crash log, which will possibly produce cleaner core +# dumps when they are needed, uncomment the following: +# +# crash-log-enabled no + +# To disable the fast memory check that's run as part of the crash log, which +# will possibly let redis terminate sooner, uncomment the following: +# +# crash-memcheck-enabled no + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 2 + +# By default Redis shows an ASCII art logo only when started to log to the +# standard output and if the standard output is a TTY and syslog logging is +# disabled. Basically this means that normally a logo is displayed only in +# interactive sessions. +# +# However it is possible to force the pre-4.0 behavior and always show a +# ASCII art logo in startup logs by setting the following option to yes. +always-show-logo no + +# By default, Redis modifies the process title (as seen in 'top' and 'ps') to +# provide some runtime information. It is possible to disable this and leave +# the process name as executed by setting the following to no. +set-proc-title yes + +# When changing the process title, Redis uses the following template to construct +# the modified title. +# +# Template variables are specified in curly brackets. The following variables are +# supported: +# +# {title} Name of process as executed if parent, or type of child process. +# {listen-addr} Bind address or '*' followed by TCP or TLS port listening on, or +# Unix socket if only that's available. +# {server-mode} Special mode, i.e. "[sentinel]" or "[cluster]". +# {port} TCP port listening on, or 0. +# {tls-port} TLS port listening on, or 0. +# {unixsocket} Unix domain socket listening on, or "". +# {config-file} Name of configuration file used. +# +proc-title-template "{title} {listen-addr} {server-mode}" + +# Set the local environment which is used for string comparison operations, and +# also affect the performance of Lua scripts. Empty String indicates the locale +# is derived from the environment variables. +locale-collate "" + +################################ SNAPSHOTTING ################################ + +# Save the DB to disk. +# +# save [ ...] +# +# Redis will save the DB if the given number of seconds elapsed and it +# surpassed the given number of write operations against the DB. +# +# Snapshotting can be completely disabled with a single empty string argument +# as in following example: +# +# save "" +# +# Unless specified otherwise, by default Redis will save the DB: +# * After 3600 seconds (an hour) if at least 1 change was performed +# * After 300 seconds (5 minutes) if at least 100 changes were performed +# * After 60 seconds if at least 10000 changes were performed +# +# You can set these explicitly by uncommenting the following line. +# +# save 3600 1 300 100 60 10000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in a hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# disaster will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usual even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes + +# Compress string objects using LZF when dump .rdb databases? +# By default compression is enabled as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes + +# Enables or disables full sanitization checks for ziplist and listpack etc when +# loading an RDB or RESTORE payload. This reduces the chances of a assertion or +# crash later on while processing commands. +# Options: +# no - Never perform full sanitization +# yes - Always perform full sanitization +# clients - Perform full sanitization only for user connections. +# Excludes: RDB files, RESTORE commands received from the master +# connection, and client connections which have the +# skip-sanitize-payload ACL flag. +# The default should be 'clients' but since it currently affects cluster +# resharding via MIGRATE, it is temporarily set to 'no' by default. +# +# sanitize-dump-payload no + +# The filename where to dump the DB +dbfilename dump.rdb + +# Remove RDB files used by replication in instances without persistence +# enabled. By default this option is disabled, however there are environments +# where for regulations or other security concerns, RDB files persisted on +# disk by masters in order to feed replicas, or stored on disk by replicas +# in order to load them for the initial synchronization, should be deleted +# ASAP. Note that this option ONLY WORKS in instances that have both AOF +# and RDB persistence disabled, otherwise is completely ignored. +# +# An alternative (and sometimes better) way to obtain the same effect is +# to use diskless replication on both master and replicas instances. However +# in the case of replicas, diskless is not always an option. +rdb-del-sync-files no + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir ${args.datadir} + +################################# REPLICATION ################################# + +# Master-Replica replication. Use replicaof to make a Redis instance a copy of +# another Redis server. A few things to understand ASAP about Redis replication. +# +# +------------------+ +---------------+ +# | Master | ---> | Replica | +# | (receive writes) | | (exact copy) | +# +------------------+ +---------------+ +# +# 1) Redis replication is asynchronous, but you can configure a master to +# stop accepting writes if it appears to be not connected with at least +# a given number of replicas. +# 2) Redis replicas are able to perform a partial resynchronization with the +# master if the replication link is lost for a relatively small amount of +# time. You may want to configure the replication backlog size (see the next +# sections of this file) with a sensible value depending on your needs. +# 3) Replication is automatic and does not need user intervention. After a +# network partition replicas automatically try to reconnect to masters +# and resynchronize with them. +# +# replicaof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the replica to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the replica request. +# +# masterauth +# +# However this is not enough if you are using Redis ACLs (for Redis version +# 6 or greater), and the default user is not capable of running the PSYNC +# command and/or other commands needed for replication. In this case it's +# better to configure a special user to use with replication, and specify the +# masteruser configuration as such: +# +# masteruser +# +# When masteruser is specified, the replica will authenticate against its +# master using the new AUTH form: AUTH . + +# When a replica loses its connection with the master, or when the replication +# is still in progress, the replica can act in two different ways: +# +# 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) If replica-serve-stale-data is set to 'no' the replica will reply with error +# "MASTERDOWN Link with MASTER is down and replica-serve-stale-data is set to 'no'" +# to all data access commands, excluding commands such as: +# INFO, REPLICAOF, AUTH, SHUTDOWN, REPLCONF, ROLE, CONFIG, SUBSCRIBE, +# UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, COMMAND, POST, +# HOST and LATENCY. +# +replica-serve-stale-data yes + +# You can configure a replica instance to accept writes or not. Writing against +# a replica instance may be useful to store some ephemeral data (because data +# written on a replica will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default replicas are read-only. +# +# Note: read only replicas are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only replica exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve +# security of read only replicas using 'rename-command' to shadow all the +# administrative / dangerous commands. +replica-read-only yes + +# Replication SYNC strategy: disk or socket. +# +# New replicas and reconnecting replicas that are not able to continue the +# replication process just receiving differences, need to do what is called a +# "full synchronization". An RDB file is transmitted from the master to the +# replicas. +# +# The transmission can happen in two different ways: +# +# 1) Disk-backed: The Redis master creates a new process that writes the RDB +# file on disk. Later the file is transferred by the parent +# process to the replicas incrementally. +# 2) Diskless: The Redis master creates a new process that directly writes the +# RDB file to replica sockets, without touching the disk at all. +# +# With disk-backed replication, while the RDB file is generated, more replicas +# can be queued and served with the RDB file as soon as the current child +# producing the RDB file finishes its work. With diskless replication instead +# once the transfer starts, new replicas arriving will be queued and a new +# transfer will start when the current one terminates. +# +# When diskless replication is used, the master waits a configurable amount of +# time (in seconds) before starting the transfer in the hope that multiple +# replicas will arrive and the transfer can be parallelized. +# +# With slow disks and fast (large bandwidth) networks, diskless replication +# works better. +repl-diskless-sync yes + +# When diskless replication is enabled, it is possible to configure the delay +# the server waits in order to spawn the child that transfers the RDB via socket +# to the replicas. +# +# This is important since once the transfer starts, it is not possible to serve +# new replicas arriving, that will be queued for the next RDB transfer, so the +# server waits a delay in order to let more replicas arrive. +# +# The delay is specified in seconds, and by default is 5 seconds. To disable +# it entirely just set it to 0 seconds and the transfer will start ASAP. +repl-diskless-sync-delay 5 + +# When diskless replication is enabled with a delay, it is possible to let +# the replication start before the maximum delay is reached if the maximum +# number of replicas expected have connected. Default of 0 means that the +# maximum is not defined and Redis will wait the full delay. +repl-diskless-sync-max-replicas 0 + +# ----------------------------------------------------------------------------- +# WARNING: Since in this setup the replica does not immediately store an RDB on +# disk, it may cause data loss during failovers. RDB diskless load + Redis +# modules not handling I/O reads may cause Redis to abort in case of I/O errors +# during the initial synchronization stage with the master. +# ----------------------------------------------------------------------------- +# +# Replica can load the RDB it reads from the replication link directly from the +# socket, or store the RDB to a file and read that file after it was completely +# received from the master. +# +# In many cases the disk is slower than the network, and storing and loading +# the RDB file may increase replication time (and even increase the master's +# Copy on Write memory and replica buffers). +# However, when parsing the RDB file directly from the socket, in order to avoid +# data loss it's only safe to flush the current dataset when the new dataset is +# fully loaded in memory, resulting in higher memory usage. +# For this reason we have the following options: +# +# "disabled" - Don't use diskless load (store the rdb file to the disk first) +# "swapdb" - Keep current db contents in RAM while parsing the data directly +# from the socket. Replicas in this mode can keep serving current +# dataset while replication is in progress, except for cases where +# they can't recognize master as having a data set from same +# replication history. +# Note that this requires sufficient memory, if you don't have it, +# you risk an OOM kill. +# "on-empty-db" - Use diskless load only when current dataset is empty. This is +# safer and avoid having old and new dataset loaded side by side +# during replication. +repl-diskless-load disabled + +# Master send PINGs to its replicas in a predefined interval. It's possible to +# change this interval with the repl_ping_replica_period option. The default +# value is 10 seconds. +# +# repl-ping-replica-period 10 + +# The following option sets the replication timeout for: +# +# 1) Bulk transfer I/O during SYNC, from the point of view of replica. +# 2) Master timeout from the point of view of replicas (data, pings). +# 3) Replica timeout from the point of view of masters (REPLCONF ACK pings). +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-replica-period otherwise a timeout will be detected +# every time there is low traffic between the master and the replica. The default +# value is 60 seconds. +# +# repl-timeout 60 + +# Disable TCP_NODELAY on the replica socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to replicas. But this can add a delay for +# the data to appear on the replica side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the replica side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and replicas are many hops away, turning this to "yes" may +# be a good idea. +repl-disable-tcp-nodelay no + +# Set the replication backlog size. The backlog is a buffer that accumulates +# replica data when replicas are disconnected for some time, so that when a +# replica wants to reconnect again, often a full resync is not needed, but a +# partial resync is enough, just passing the portion of data the replica +# missed while disconnected. +# +# The bigger the replication backlog, the longer the replica can endure the +# disconnect and later be able to perform a partial resynchronization. +# +# The backlog is only allocated if there is at least one replica connected. +# +# repl-backlog-size 1mb + +# After a master has no connected replicas for some time, the backlog will be +# freed. The following option configures the amount of seconds that need to +# elapse, starting from the time the last replica disconnected, for the backlog +# buffer to be freed. +# +# Note that replicas never free the backlog for timeout, since they may be +# promoted to masters later, and should be able to correctly "partially +# resynchronize" with other replicas: hence they should always accumulate backlog. +# +# A value of 0 means to never release the backlog. +# +# repl-backlog-ttl 3600 + +# The replica priority is an integer number published by Redis in the INFO +# output. It is used by Redis Sentinel in order to select a replica to promote +# into a master if the master is no longer working correctly. +# +# A replica with a low priority number is considered better for promotion, so +# for instance if there are three replicas with priority 10, 100, 25 Sentinel +# will pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the replica as not able to perform the +# role of master, so a replica with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +replica-priority 100 + +# The propagation error behavior controls how Redis will behave when it is +# unable to handle a command being processed in the replication stream from a master +# or processed while reading from an AOF file. Errors that occur during propagation +# are unexpected, and can cause data inconsistency. However, there are edge cases +# in earlier versions of Redis where it was possible for the server to replicate or persist +# commands that would fail on future versions. For this reason the default behavior +# is to ignore such errors and continue processing commands. +# +# If an application wants to ensure there is no data divergence, this configuration +# should be set to 'panic' instead. The value can also be set to 'panic-on-replicas' +# to only panic when a replica encounters an error on the replication stream. One of +# these two panic values will become the default value in the future once there are +# sufficient safety mechanisms in place to prevent false positive crashes. +# +# propagation-error-behavior ignore + +# Replica ignore disk write errors controls the behavior of a replica when it is +# unable to persist a write command received from its master to disk. By default, +# this configuration is set to 'no' and will crash the replica in this condition. +# It is not recommended to change this default, however in order to be compatible +# with older versions of Redis this config can be toggled to 'yes' which will just +# log a warning and execute the write command it got from the master. +# +# replica-ignore-disk-write-errors no + +# ----------------------------------------------------------------------------- +# By default, Redis Sentinel includes all replicas in its reports. A replica +# can be excluded from Redis Sentinel's announcements. An unannounced replica +# will be ignored by the 'sentinel replicas ' command and won't be +# exposed to Redis Sentinel's clients. +# +# This option does not change the behavior of replica-priority. Even with +# replica-announced set to 'no', the replica can be promoted to master. To +# prevent this behavior, set replica-priority to 0. +# +# replica-announced yes + +# It is possible for a master to stop accepting writes if there are less than +# N replicas connected, having a lag less or equal than M seconds. +# +# The N replicas need to be in "online" state. +# +# The lag in seconds, that must be <= the specified value, is calculated from +# the last ping received from the replica, that is usually sent every second. +# +# This option does not GUARANTEE that N replicas will accept the write, but +# will limit the window of exposure for lost writes in case not enough replicas +# are available, to the specified number of seconds. +# +# For example to require at least 3 replicas with a lag <= 10 seconds use: +# +# min-replicas-to-write 3 +# min-replicas-max-lag 10 +# +# Setting one or the other to 0 disables the feature. +# +# By default min-replicas-to-write is set to 0 (feature disabled) and +# min-replicas-max-lag is set to 10. + +# A Redis master is able to list the address and port of the attached +# replicas in different ways. For example the "INFO replication" section +# offers this information, which is used, among other tools, by +# Redis Sentinel in order to discover replica instances. +# Another place where this info is available is in the output of the +# "ROLE" command of a master. +# +# The listed IP address and port normally reported by a replica is +# obtained in the following way: +# +# IP: The address is auto detected by checking the peer address +# of the socket used by the replica to connect with the master. +# +# Port: The port is communicated by the replica during the replication +# handshake, and is normally the port that the replica is using to +# listen for connections. +# +# However when port forwarding or Network Address Translation (NAT) is +# used, the replica may actually be reachable via different IP and port +# pairs. The following two options can be used by a replica in order to +# report to its master a specific set of IP and port, so that both INFO +# and ROLE will report those values. +# +# There is no need to use both the options if you need to override just +# the port or the IP address. +# +# replica-announce-ip 5.5.5.5 +# replica-announce-port 1234 + +############################### KEYS TRACKING ################################# + +# Redis implements server assisted support for client side caching of values. +# This is implemented using an invalidation table that remembers, using +# a radix key indexed by key name, what clients have which keys. In turn +# this is used in order to send invalidation messages to clients. Please +# check this page to understand more about the feature: +# +# https://redis.io/topics/client-side-caching +# +# When tracking is enabled for a client, all the read only queries are assumed +# to be cached: this will force Redis to store information in the invalidation +# table. When keys are modified, such information is flushed away, and +# invalidation messages are sent to the clients. However if the workload is +# heavily dominated by reads, Redis could use more and more memory in order +# to track the keys fetched by many clients. +# +# For this reason it is possible to configure a maximum fill value for the +# invalidation table. By default it is set to 1M of keys, and once this limit +# is reached, Redis will start to evict keys in the invalidation table +# even if they were not modified, just to reclaim memory: this will in turn +# force the clients to invalidate the cached values. Basically the table +# maximum size is a trade off between the memory you want to spend server +# side to track information about who cached what, and the ability of clients +# to retain cached objects in memory. +# +# If you set the value to 0, it means there are no limits, and Redis will +# retain as many keys as needed in the invalidation table. +# In the "stats" INFO section, you can find information about the number of +# keys in the invalidation table at every given moment. +# +# Note: when key tracking is used in broadcasting mode, no memory is used +# in the server side so this setting is useless. +# +# tracking-table-max-keys 1000000 + +################################## SECURITY ################################### + +# Warning: since Redis is pretty fast, an outside user can try up to +# 1 million passwords per second against a modern box. This means that you +# should use very strong passwords, otherwise they will be very easy to break. +# Note that because the password is really a shared secret between the client +# and the server, and should not be memorized by any human, the password +# can be easily a long string from /dev/urandom or whatever, so by using a +# long and unguessable password no brute force attack will be possible. + +# Redis ACL users are defined in the following format: +# +# user ... acl rules ... +# +# For example: +# +# user worker +^^list +^^connection ~jobs:* on >ffa9203c493aa99 +# +# The special username "default" is used for new connections. If this user +# has the "nopass" rule, then new connections will be immediately authenticated +# as the "default" user without the need of any password provided via the +# AUTH command. Otherwise if the "default" user is not flagged with "nopass" +# the connections will start in not authenticated state, and will require +# AUTH (or the HELLO command AUTH option) in order to be authenticated and +# start to work. +# +# The ACL rules that describe what a user can do are the following: +# +# on Enable the user: it is possible to authenticate as this user. +# off Disable the user: it's no longer possible to authenticate +# with this user, however the already authenticated connections +# will still work. +# skip-sanitize-payload RESTORE dump-payload sanitization is skipped. +# sanitize-payload RESTORE dump-payload is sanitized (default). +# + Allow the execution of that command. +# May be used with `|` for allowing subcommands (e.g "+config|get") +# - Disallow the execution of that command. +# May be used with `|` for blocking subcommands (e.g "-config|set") +# +^^ Allow the execution of all the commands in such category +# with valid categories are like ^^admin, ^^set, ^^sortedset, ... +# and so forth, see the full list in the server.c file where +# the Redis command table is described and defined. +# The special category ^^all means all the commands, but currently +# present in the server, and that will be loaded in the future +# via modules. +# +|first-arg Allow a specific first argument of an otherwise +# disabled command. It is only supported on commands with +# no sub-commands, and is not allowed as negative form +# like -SELECT|1, only additive starting with "+". This +# feature is deprecated and may be removed in the future. +# allcommands Alias for +^^all. Note that it implies the ability to execute +# all the future commands loaded via the modules system. +# nocommands Alias for -^^all. +# ~ Add a pattern of keys that can be mentioned as part of +# commands. For instance ~* allows all the keys. The pattern +# is a glob-style pattern like the one of KEYS. +# It is possible to specify multiple patterns. +# %R~ Add key read pattern that specifies which keys can be read +# from. +# %W~ Add key write pattern that specifies which keys can be +# written to. +# allkeys Alias for ~* +# resetkeys Flush the list of allowed keys patterns. +# & Add a glob-style pattern of Pub/Sub channels that can be +# accessed by the user. It is possible to specify multiple channel +# patterns. +# allchannels Alias for &* +# resetchannels Flush the list of allowed channel patterns. +# > Add this password to the list of valid password for the user. +# For example >mypass will add "mypass" to the list. +# This directive clears the "nopass" flag (see later). +# < Remove this password from the list of valid passwords. +# nopass All the set passwords of the user are removed, and the user +# is flagged as requiring no password: it means that every +# password will work against this user. If this directive is +# used for the default user, every new connection will be +# immediately authenticated with the default user without +# any explicit AUTH command required. Note that the "resetpass" +# directive will clear this condition. +# resetpass Flush the list of allowed passwords. Moreover removes the +# "nopass" status. After "resetpass" the user has no associated +# passwords and there is no way to authenticate without adding +# some password (or setting it as "nopass" later). +# reset Performs the following actions: resetpass, resetkeys, resetchannels, +# allchannels (if acl-pubsub-default is set), off, clearselectors, -^^all. +# The user returns to the same state it has immediately after its creation. +# () Create a new selector with the options specified within the +# parentheses and attach it to the user. Each option should be +# space separated. The first character must be ( and the last +# character must be ). +# clearselectors Remove all of the currently attached selectors. +# Note this does not change the "root" user permissions, +# which are the permissions directly applied onto the +# user (outside the parentheses). +# +# ACL rules can be specified in any order: for instance you can start with +# passwords, then flags, or key patterns. However note that the additive +# and subtractive rules will CHANGE MEANING depending on the ordering. +# For instance see the following example: +# +# user alice on +^^all -DEBUG ~* >somepassword +# +# This will allow "alice" to use all the commands with the exception of the +# DEBUG command, since +^^all added all the commands to the set of the commands +# alice can use, and later DEBUG was removed. However if we invert the order +# of two ACL rules the result will be different: +# +# user alice on -DEBUG +^^all ~* >somepassword +# +# Now DEBUG was removed when alice had yet no commands in the set of allowed +# commands, later all the commands are added, so the user will be able to +# execute everything. +# +# Basically ACL rules are processed left-to-right. +# +# The following is a list of command categories and their meanings: +# * keyspace - Writing or reading from keys, databases, or their metadata +# in a type agnostic way. Includes DEL, RESTORE, DUMP, RENAME, EXISTS, DBSIZE, +# KEYS, EXPIRE, TTL, FLUSHALL, etc. Commands that may modify the keyspace, +# key or metadata will also have `write` category. Commands that only read +# the keyspace, key or metadata will have the `read` category. +# * read - Reading from keys (values or metadata). Note that commands that don't +# interact with keys, will not have either `read` or `write`. +# * write - Writing to keys (values or metadata) +# * admin - Administrative commands. Normal applications will never need to use +# these. Includes REPLICAOF, CONFIG, DEBUG, SAVE, MONITOR, ACL, SHUTDOWN, etc. +# * dangerous - Potentially dangerous (each should be considered with care for +# various reasons). This includes FLUSHALL, MIGRATE, RESTORE, SORT, KEYS, +# CLIENT, DEBUG, INFO, CONFIG, SAVE, REPLICAOF, etc. +# * connection - Commands affecting the connection or other connections. +# This includes AUTH, SELECT, COMMAND, CLIENT, ECHO, PING, etc. +# * blocking - Potentially blocking the connection until released by another +# command. +# * fast - Fast O(1) commands. May loop on the number of arguments, but not the +# number of elements in the key. +# * slow - All commands that are not Fast. +# * pubsub - PUBLISH / SUBSCRIBE related +# * transaction - WATCH / MULTI / EXEC related commands. +# * scripting - Scripting related. +# * set - Data type: sets related. +# * sortedset - Data type: zsets related. +# * list - Data type: lists related. +# * hash - Data type: hashes related. +# * string - Data type: strings related. +# * bitmap - Data type: bitmaps related. +# * hyperloglog - Data type: hyperloglog related. +# * geo - Data type: geo related. +# * stream - Data type: streams related. +# +# For more information about ACL configuration please refer to +# the Redis web site at https://redis.io/topics/acl + +# ACL LOG +# +# The ACL Log tracks failed commands and authentication events associated +# with ACLs. The ACL Log is useful to troubleshoot failed commands blocked +# by ACLs. The ACL Log is stored in memory. You can reclaim memory with +# ACL LOG RESET. Define the maximum entry length of the ACL Log below. +acllog-max-len 128 + +# Using an external ACL file +# +# Instead of configuring users here in this file, it is possible to use +# a stand-alone file just listing users. The two methods cannot be mixed: +# if you configure users here and at the same time you activate the external +# ACL file, the server will refuse to start. +# +# The format of the external ACL user file is exactly the same as the +# format that is used inside redis.conf to describe users. +# +# aclfile /etc/redis/users.acl + +# IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatibility +# layer on top of the new ACL system. The option effect will be just setting +# the password for the default user. Clients will still authenticate using +# AUTH as usually, or more explicitly with AUTH default +# if they follow the new protocol: both will work. +# +# The requirepass is not compatible with aclfile option and the ACL LOAD +# command, these will cause requirepass to be ignored. +# +# requirepass foobared + +# New users are initialized with restrictive permissions by default, via the +# equivalent of this ACL rule 'off resetkeys -^^all'. Starting with Redis 6.2, it +# is possible to manage access to Pub/Sub channels with ACL rules as well. The +# default Pub/Sub channels permission if new users is controlled by the +# acl-pubsub-default configuration directive, which accepts one of these values: +# +# allchannels: grants access to all Pub/Sub channels +# resetchannels: revokes access to all Pub/Sub channels +# +# From Redis 7.0, acl-pubsub-default defaults to 'resetchannels' permission. +# +# acl-pubsub-default resetchannels + +# Command renaming (DEPRECATED). +# +# ------------------------------------------------------------------------ +# WARNING: avoid using this option if possible. Instead use ACLs to remove +# commands from the default user, and put them only in some admin user you +# create for administrative purposes. +# ------------------------------------------------------------------------ +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to replicas may cause problems. + +################################### CLIENTS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# IMPORTANT: When Redis Cluster is used, the max number of connections is also +# shared with the cluster bus: every node in the cluster will use two +# connections, one incoming and another outgoing. It is important to size the +# limit accordingly in case of very large clusters. +# +# maxclients 10000 + +############################## MEMORY MANAGEMENT ################################ + +# Set a memory usage limit to the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# according to the eviction policy selected (see maxmemory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU or LFU cache, or to +# set a hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have replicas attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the replicas are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of replicas is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have replicas attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for replica +# output buffers (but this is not needed if the policy is 'noeviction'). +# +# maxmemory + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached. You can select one from the following behaviors: +# +# volatile-lru -> Evict using approximated LRU, only keys with an expire set. +# allkeys-lru -> Evict any key using approximated LRU. +# volatile-lfu -> Evict using approximated LFU, only keys with an expire set. +# allkeys-lfu -> Evict any key using approximated LFU. +# volatile-random -> Remove a random key having an expire set. +# allkeys-random -> Remove a random key, any key. +# volatile-ttl -> Remove the key with the nearest expire time (minor TTL) +# noeviction -> Don't evict anything, just return an error on write operations. +# +# LRU means Least Recently Used +# LFU means Least Frequently Used +# +# Both LRU, LFU and volatile-ttl are implemented using approximated +# randomized algorithms. +# +# Note: with any of the above policies, when there are no suitable keys for +# eviction, Redis will return an error on write operations that require +# more memory. These are usually commands that create new keys, add data or +# modify existing keys. A few examples are: SET, INCR, HSET, LPUSH, SUNIONSTORE, +# SORT (due to the STORE argument), and EXEC (if the transaction includes any +# command that requires memory). +# +# The default is: +# +# maxmemory-policy noeviction + +# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can tune it for speed or +# accuracy. By default Redis will check five keys and pick the one that was +# used least recently, you can change the sample size using the following +# configuration directive. +# +# The default of 5 produces good enough results. 10 Approximates very closely +# true LRU but costs more CPU. 3 is faster but not very accurate. +# +# maxmemory-samples 5 + +# Eviction processing is designed to function well with the default setting. +# If there is an unusually large amount of write traffic, this value may need to +# be increased. Decreasing this value may reduce latency at the risk of +# eviction processing effectiveness +# 0 = minimum latency, 10 = default, 100 = process without regard to latency +# +# maxmemory-eviction-tenacity 10 + +# Starting from Redis 5, by default a replica will ignore its maxmemory setting +# (unless it is promoted to master after a failover or manually). It means +# that the eviction of keys will be just handled by the master, sending the +# DEL commands to the replica as keys evict in the master side. +# +# This behavior ensures that masters and replicas stay consistent, and is usually +# what you want, however if your replica is writable, or you want the replica +# to have a different memory setting, and you are sure all the writes performed +# to the replica are idempotent, then you may change this default (but be sure +# to understand what you are doing). +# +# Note that since the replica by default does not evict, it may end using more +# memory than the one set via maxmemory (there are certain buffers that may +# be larger on the replica, or data structures may sometimes take more memory +# and so forth). So make sure you monitor your replicas and make sure they +# have enough memory to never hit a real out-of-memory condition before the +# master hits the configured maxmemory setting. +# +# replica-ignore-maxmemory yes + +# Redis reclaims expired keys in two ways: upon access when those keys are +# found to be expired, and also in background, in what is called the +# "active expire key". The key space is slowly and interactively scanned +# looking for expired keys to reclaim, so that it is possible to free memory +# of keys that are expired and will never be accessed again in a short time. +# +# The default effort of the expire cycle will try to avoid having more than +# ten percent of expired keys still in memory, and will try to avoid consuming +# more than 25% of total memory and to add latency to the system. However +# it is possible to increase the expire "effort" that is normally set to +# "1", to a greater value, up to the value "10". At its maximum value the +# system will use more CPU, longer cycles (and technically may introduce +# more latency), and will tolerate less already expired keys still present +# in the system. It's a tradeoff between memory, CPU and latency. +# +# active-expire-effort 1 + +############################# LAZY FREEING #################################### + +# Redis has two primitives to delete keys. One is called DEL and is a blocking +# deletion of the object. It means that the server stops processing new commands +# in order to reclaim all the memory associated with an object in a synchronous +# way. If the key deleted is associated with a small object, the time needed +# in order to execute the DEL command is very small and comparable to most other +# O(1) or O(log_N) commands in Redis. However if the key is associated with an +# aggregated value containing millions of elements, the server can block for +# a long time (even seconds) in order to complete the operation. +# +# For the above reasons Redis also offers non blocking deletion primitives +# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and +# FLUSHDB commands, in order to reclaim memory in background. Those commands +# are executed in constant time. Another thread will incrementally free the +# object in the background as fast as possible. +# +# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. +# It's up to the design of the application to understand when it is a good +# idea to use one or the other. However the Redis server sometimes has to +# delete keys or flush the whole database as a side effect of other operations. +# Specifically Redis deletes objects independently of a user call in the +# following scenarios: +# +# 1) On eviction, because of the maxmemory and maxmemory policy configurations, +# in order to make room for new data, without going over the specified +# memory limit. +# 2) Because of expire: when a key with an associated time to live (see the +# EXPIRE command) must be deleted from memory. +# 3) Because of a side effect of a command that stores data on a key that may +# already exist. For example the RENAME command may delete the old key +# content when it is replaced with another one. Similarly SUNIONSTORE +# or SORT with STORE option may delete existing keys. The SET command +# itself removes any old content of the specified key in order to replace +# it with the specified string. +# 4) During replication, when a replica performs a full resynchronization with +# its master, the content of the whole database is removed in order to +# load the RDB file just transferred. +# +# In all the above cases the default is to delete objects in a blocking way, +# like if DEL was called. However you can configure each case specifically +# in order to instead release memory in a non-blocking way like if UNLINK +# was called, using the following configuration directives. + +lazyfree-lazy-eviction no +lazyfree-lazy-expire no +lazyfree-lazy-server-del no +replica-lazy-flush no + +# It is also possible, for the case when to replace the user code DEL calls +# with UNLINK calls is not easy, to modify the default behavior of the DEL +# command to act exactly like UNLINK, using the following configuration +# directive: + +lazyfree-lazy-user-del no + +# FLUSHDB, FLUSHALL, SCRIPT FLUSH and FUNCTION FLUSH support both asynchronous and synchronous +# deletion, which can be controlled by passing the [SYNC|ASYNC] flags into the +# commands. When neither flag is passed, this directive will be used to determine +# if the data should be deleted asynchronously. + +lazyfree-lazy-user-flush no + +################################ THREADED I/O ################################# + +# Redis is mostly single threaded, however there are certain threaded +# operations such as UNLINK, slow I/O accesses and other things that are +# performed on side threads. +# +# Now it is also possible to handle Redis clients socket reads and writes +# in different I/O threads. Since especially writing is so slow, normally +# Redis users use pipelining in order to speed up the Redis performances per +# core, and spawn multiple instances in order to scale more. Using I/O +# threads it is possible to easily speedup two times Redis without resorting +# to pipelining nor sharding of the instance. +# +# By default threading is disabled, we suggest enabling it only in machines +# that have at least 4 or more cores, leaving at least one spare core. +# Using more than 8 threads is unlikely to help much. We also recommend using +# threaded I/O only if you actually have performance problems, with Redis +# instances being able to use a quite big percentage of CPU time, otherwise +# there is no point in using this feature. +# +# So for instance if you have a four cores boxes, try to use 2 or 3 I/O +# threads, if you have a 8 cores, try to use 6 threads. In order to +# enable I/O threads use the following configuration directive: +# +# io-threads 4 +# +# Setting io-threads to 1 will just use the main thread as usual. +# When I/O threads are enabled, we only use threads for writes, that is +# to thread the write(2) syscall and transfer the client buffers to the +# socket. However it is also possible to enable threading of reads and +# protocol parsing using the following configuration directive, by setting +# it to yes: +# +# io-threads-do-reads no +# +# Usually threading reads doesn't help much. +# +# NOTE 1: This configuration directive cannot be changed at runtime via +# CONFIG SET. Also, this feature currently does not work when SSL is +# enabled. +# +# NOTE 2: If you want to test the Redis speedup using redis-benchmark, make +# sure you also run the benchmark itself in threaded mode, using the +# --threads option to match the number of Redis threads, otherwise you'll not +# be able to notice the improvements. + +############################ KERNEL OOM CONTROL ############################## + +# On Linux, it is possible to hint the kernel OOM killer on what processes +# should be killed first when out of memory. +# +# Enabling this feature makes Redis actively control the oom_score_adj value +# for all its processes, depending on their role. The default scores will +# attempt to have background child processes killed before all others, and +# replicas killed before masters. +# +# Redis supports these options: +# +# no: Don't make changes to oom-score-adj (default). +# yes: Alias to "relative" see below. +# absolute: Values in oom-score-adj-values are written as is to the kernel. +# relative: Values are used relative to the initial value of oom_score_adj when +# the server starts and are then clamped to a range of -1000 to 1000. +# Because typically the initial value is 0, they will often match the +# absolute values. +oom-score-adj no + +# When oom-score-adj is used, this directive controls the specific values used +# for master, replica and background child processes. Values range -2000 to +# 2000 (higher means more likely to be killed). +# +# Unprivileged processes (not root, and without CAP_SYS_RESOURCE capabilities) +# can freely increase their value, but not decrease it below its initial +# settings. This means that setting oom-score-adj to "relative" and setting the +# oom-score-adj-values to positive values will always succeed. +oom-score-adj-values 0 200 800 + + +#################### KERNEL transparent hugepage CONTROL ###################### + +# Usually the kernel Transparent Huge Pages control is set to "madvise" or +# or "never" by default (/sys/kernel/mm/transparent_hugepage/enabled), in which +# case this config has no effect. On systems in which it is set to "always", +# redis will attempt to disable it specifically for the redis process in order +# to avoid latency problems specifically with fork(2) and CoW. +# If for some reason you prefer to keep it enabled, you can set this config to +# "no" and the kernel global to "always". + +disable-thp yes + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Note that changing this value in a config file of an existing database and +# restarting the server can lead to data loss. A conversion needs to be done +# by setting it via CONFIG command on a live server first. +# +# Please check https://redis.io/topics/persistence for more information. + +appendonly no + +# The base name of the append only file. +# +# Redis 7 and newer use a set of append-only files to persist the dataset +# and changes applied to it. There are two basic types of files in use: +# +# - Base files, which are a snapshot representing the complete state of the +# dataset at the time the file was created. Base files can be either in +# the form of RDB (binary serialized) or AOF (textual commands). +# - Incremental files, which contain additional commands that were applied +# to the dataset following the previous file. +# +# In addition, manifest files are used to track the files and the order in +# which they were created and should be applied. +# +# Append-only file names are created by Redis following a specific pattern. +# The file name's prefix is based on the 'appendfilename' configuration +# parameter, followed by additional information about the sequence and type. +# +# For example, if appendfilename is set to appendonly.aof, the following file +# names could be derived: +# +# - appendonly.aof.1.base.rdb as a base file. +# - appendonly.aof.1.incr.aof, appendonly.aof.2.incr.aof as incremental files. +# - appendonly.aof.manifest as a manifest file. + +appendfilename "appendonly.aof" + +# For convenience, Redis stores all persistent append-only files in a dedicated +# directory. The name of the directory is determined by the appenddirname +# configuration parameter. + +appenddirname "appendonlydir" + +# The fsync() call tells the Operating System to actually write data on disk +# instead of waiting for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log. Slow, Safest. +# everysec: fsync only one time every second. Compromise. +# +# The default is "everysec", as that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# +# If unsure, use "everysec". + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync no". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. + +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size grows by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (if no rewrite has happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a percentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +# An AOF file may be found to be truncated at the end during the Redis +# startup process, when the AOF data gets loaded back into memory. +# This may happen when the system where Redis is running +# crashes, especially when an ext4 filesystem is mounted without the +# data=ordered option (however this can't happen when Redis itself +# crashes or aborts but the operating system still works correctly). +# +# Redis can either exit with an error when this happens, or load as much +# data as possible (the default now) and start if the AOF file is found +# to be truncated at the end. The following option controls this behavior. +# +# If aof-load-truncated is set to yes, a truncated AOF file is loaded and +# the Redis server starts emitting a log to inform the user of the event. +# Otherwise if the option is set to no, the server aborts with an error +# and refuses to start. When the option is set to no, the user requires +# to fix the AOF file using the "redis-check-aof" utility before to restart +# the server. +# +# Note that if the AOF file will be found to be corrupted in the middle +# the server will still exit with an error. This option only applies when +# Redis will try to read more data from the AOF file but not enough bytes +# will be found. +aof-load-truncated yes + +# Redis can create append-only base files in either RDB or AOF formats. Using +# the RDB format is always faster and more efficient, and disabling it is only +# supported for backward compatibility purposes. +aof-use-rdb-preamble yes + +# Redis supports recording timestamp annotations in the AOF to support restoring +# the data from a specific point-in-time. However, using this capability changes +# the AOF format in a way that may not be compatible with existing AOF parsers. +aof-timestamp-enabled no + +################################ SHUTDOWN ##################################### + +# Maximum time to wait for replicas when shutting down, in seconds. +# +# During shut down, a grace period allows any lagging replicas to catch up with +# the latest replication offset before the master exists. This period can +# prevent data loss, especially for deployments without configured disk backups. +# +# The 'shutdown-timeout' value is the grace period's duration in seconds. It is +# only applicable when the instance has replicas. To disable the feature, set +# the value to 0. +# +# shutdown-timeout 10 + +# When Redis receives a SIGINT or SIGTERM, shutdown is initiated and by default +# an RDB snapshot is written to disk in a blocking operation if save points are configured. +# The options used on signaled shutdown can include the following values: +# default: Saves RDB snapshot only if save points are configured. +# Waits for lagging replicas to catch up. +# save: Forces a DB saving operation even if no save points are configured. +# nosave: Prevents DB saving operation even if one or more save points are configured. +# now: Skips waiting for lagging replicas. +# force: Ignores any errors that would normally prevent the server from exiting. +# +# Any combination of values is allowed as long as "save" and "nosave" are not set simultaneously. +# Example: "nosave force now" +# +# shutdown-on-sigint default +# shutdown-on-sigterm default + +################ NON-DETERMINISTIC LONG BLOCKING COMMANDS ##################### + +# Maximum time in milliseconds for EVAL scripts, functions and in some cases +# modules' commands before Redis can start processing or rejecting other clients. +# +# If the maximum execution time is reached Redis will start to reply to most +# commands with a BUSY error. +# +# In this state Redis will only allow a handful of commands to be executed. +# For instance, SCRIPT KILL, FUNCTION KILL, SHUTDOWN NOSAVE and possibly some +# module specific 'allow-busy' commands. +# +# SCRIPT KILL and FUNCTION KILL will only be able to stop a script that did not +# yet call any write commands, so SHUTDOWN NOSAVE may be the only way to stop +# the server in the case a write command was already issued by the script when +# the user doesn't want to wait for the natural termination of the script. +# +# The default is 5 seconds. It is possible to set it to 0 or a negative value +# to disable this mechanism (uninterrupted execution). Note that in the past +# this config had a different name, which is now an alias, so both of these do +# the same: +# lua-time-limit 5000 +# busy-reply-threshold 5000 + +################################ REDIS CLUSTER ############################### + +# Normal Redis instances can't be part of a Redis Cluster; only nodes that are +# started as cluster nodes can. In order to start a Redis instance as a +# cluster node enable the cluster support uncommenting the following: +# +# cluster-enabled yes + +# Every cluster node has a cluster configuration file. This file is not +# intended to be edited by hand. It is created and updated by Redis nodes. +# Every Redis Cluster node requires a different cluster configuration file. +# Make sure that instances running in the same system do not have +# overlapping cluster configuration file names. +# +# cluster-config-file nodes-6379.conf + +# Cluster node timeout is the amount of milliseconds a node must be unreachable +# for it to be considered in failure state. +# Most other internal time limits are a multiple of the node timeout. +# +# cluster-node-timeout 15000 + +# The cluster port is the port that the cluster bus will listen for inbound connections on. When set +# to the default value, 0, it will be bound to the command port + 10000. Setting this value requires +# you to specify the cluster bus port when executing cluster meet. +# cluster-port 0 + +# A replica of a failing master will avoid to start a failover if its data +# looks too old. +# +# There is no simple way for a replica to actually have an exact measure of +# its "data age", so the following two checks are performed: +# +# 1) If there are multiple replicas able to failover, they exchange messages +# in order to try to give an advantage to the replica with the best +# replication offset (more data from the master processed). +# Replicas will try to get their rank by offset, and apply to the start +# of the failover a delay proportional to their rank. +# +# 2) Every single replica computes the time of the last interaction with +# its master. This can be the last ping or command received (if the master +# is still in the "connected" state), or the time that elapsed since the +# disconnection with the master (if the replication link is currently down). +# If the last interaction is too old, the replica will not try to failover +# at all. +# +# The point "2" can be tuned by user. Specifically a replica will not perform +# the failover if, since the last interaction with the master, the time +# elapsed is greater than: +# +# (node-timeout * cluster-replica-validity-factor) + repl-ping-replica-period +# +# So for example if node-timeout is 30 seconds, and the cluster-replica-validity-factor +# is 10, and assuming a default repl-ping-replica-period of 10 seconds, the +# replica will not try to failover if it was not able to talk with the master +# for longer than 310 seconds. +# +# A large cluster-replica-validity-factor may allow replicas with too old data to failover +# a master, while a too small value may prevent the cluster from being able to +# elect a replica at all. +# +# For maximum availability, it is possible to set the cluster-replica-validity-factor +# to a value of 0, which means, that replicas will always try to failover the +# master regardless of the last time they interacted with the master. +# (However they'll always try to apply a delay proportional to their +# offset rank). +# +# Zero is the only value able to guarantee that when all the partitions heal +# the cluster will always be able to continue. +# +# cluster-replica-validity-factor 10 + +# Cluster replicas are able to migrate to orphaned masters, that are masters +# that are left without working replicas. This improves the cluster ability +# to resist to failures as otherwise an orphaned master can't be failed over +# in case of failure if it has no working replicas. +# +# Replicas migrate to orphaned masters only if there are still at least a +# given number of other working replicas for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a replica +# will migrate only if there is at least 1 other working replica for its master +# and so forth. It usually reflects the number of replicas you want for every +# master in your cluster. +# +# Default is 1 (replicas migrate only if their masters remain with at least +# one replica). To disable migration just set it to a very large value or +# set cluster-allow-replica-migration to 'no'. +# A value of 0 can be set but is useful only for debugging and dangerous +# in production. +# +# cluster-migration-barrier 1 + +# Turning off this option allows to use less automatic cluster configuration. +# It both disables migration to orphaned masters and migration from masters +# that became empty. +# +# Default is 'yes' (allow automatic migrations). +# +# cluster-allow-replica-migration yes + +# By default Redis Cluster nodes stop accepting queries if they detect there +# is at least a hash slot uncovered (no available node is serving it). +# This way if the cluster is partially down (for example a range of hash slots +# are no longer covered) all the cluster becomes, eventually, unavailable. +# It automatically returns available as soon as all the slots are covered again. +# +# However sometimes you want the subset of the cluster which is working, +# to continue to accept queries for the part of the key space that is still +# covered. In order to do so, just set the cluster-require-full-coverage +# option to no. +# +# cluster-require-full-coverage yes + +# This option, when set to yes, prevents replicas from trying to failover its +# master during master failures. However the replica can still perform a +# manual failover, if forced to do so. +# +# This is useful in different scenarios, especially in the case of multiple +# data center operations, where we want one side to never be promoted if not +# in the case of a total DC failure. +# +# cluster-replica-no-failover no + +# This option, when set to yes, allows nodes to serve read traffic while the +# cluster is in a down state, as long as it believes it owns the slots. +# +# This is useful for two cases. The first case is for when an application +# doesn't require consistency of data during node failures or network partitions. +# One example of this is a cache, where as long as the node has the data it +# should be able to serve it. +# +# The second use case is for configurations that don't meet the recommended +# three shards but want to enable cluster mode and scale later. A +# master outage in a 1 or 2 shard configuration causes a read/write outage to the +# entire cluster without this option set, with it set there is only a write outage. +# Without a quorum of masters, slot ownership will not change automatically. +# +# cluster-allow-reads-when-down no + +# This option, when set to yes, allows nodes to serve pubsub shard traffic while +# the cluster is in a down state, as long as it believes it owns the slots. +# +# This is useful if the application would like to use the pubsub feature even when +# the cluster global stable state is not OK. If the application wants to make sure only +# one shard is serving a given channel, this feature should be kept as yes. +# +# cluster-allow-pubsubshard-when-down yes + +# Cluster link send buffer limit is the limit on the memory usage of an individual +# cluster bus link's send buffer in bytes. Cluster links would be freed if they exceed +# this limit. This is to primarily prevent send buffers from growing unbounded on links +# toward slow peers (E.g. PubSub messages being piled up). +# This limit is disabled by default. Enable this limit when 'mem_cluster_links' INFO field +# and/or 'send-buffer-allocated' entries in the 'CLUSTER LINKS` command output continuously increase. +# Minimum limit of 1gb is recommended so that cluster link buffer can fit in at least a single +# PubSub message by default. (client-query-buffer-limit default value is 1gb) +# +# cluster-link-sendbuf-limit 0 + +# Clusters can configure their announced hostname using this config. This is a common use case for +# applications that need to use TLS Server Name Indication (SNI) or dealing with DNS based +# routing. By default this value is only shown as additional metadata in the CLUSTER SLOTS +# command, but can be changed using 'cluster-preferred-endpoint-type' config. This value is +# communicated along the clusterbus to all nodes, setting it to an empty string will remove +# the hostname and also propagate the removal. +# +# cluster-announce-hostname "" + +# Clusters can configure an optional nodename to be used in addition to the node ID for +# debugging and admin information. This name is broadcasted between nodes, so will be used +# in addition to the node ID when reporting cross node events such as node failures. +# cluster-announce-human-nodename "" + +# Clusters can advertise how clients should connect to them using either their IP address, +# a user defined hostname, or by declaring they have no endpoint. Which endpoint is +# shown as the preferred endpoint is set by using the cluster-preferred-endpoint-type +# config with values 'ip', 'hostname', or 'unknown-endpoint'. This value controls how +# the endpoint returned for MOVED/ASKING requests as well as the first field of CLUSTER SLOTS. +# If the preferred endpoint type is set to hostname, but no announced hostname is set, a '?' +# will be returned instead. +# +# When a cluster advertises itself as having an unknown endpoint, it's indicating that +# the server doesn't know how clients can reach the cluster. This can happen in certain +# networking situations where there are multiple possible routes to the node, and the +# server doesn't know which one the client took. In this case, the server is expecting +# the client to reach out on the same endpoint it used for making the last request, but use +# the port provided in the response. +# +# cluster-preferred-endpoint-type ip + +# In order to setup your cluster make sure to read the documentation +# available at https://redis.io web site. + +########################## CLUSTER DOCKER/NAT support ######################## + +# In certain deployments, Redis Cluster nodes address discovery fails, because +# addresses are NAT-ted or because ports are forwarded (the typical case is +# Docker and other containers). +# +# In order to make Redis Cluster working in such environments, a static +# configuration where each node knows its public address is needed. The +# following four options are used for this scope, and are: +# +# * cluster-announce-ip +# * cluster-announce-port +# * cluster-announce-tls-port +# * cluster-announce-bus-port +# +# Each instructs the node about its address, client ports (for connections +# without and with TLS) and cluster message bus port. The information is then +# published in the header of the bus packets so that other nodes will be able to +# correctly map the address of the node publishing the information. +# +# If tls-cluster is set to yes and cluster-announce-tls-port is omitted or set +# to zero, then cluster-announce-port refers to the TLS port. Note also that +# cluster-announce-tls-port has no effect if tls-cluster is set to no. +# +# If the above options are not used, the normal Redis Cluster auto-detection +# will be used instead. +# +# Note that when remapped, the bus port may not be at the fixed offset of +# clients port + 10000, so you can specify any port and bus-port depending +# on how they get remapped. If the bus-port is not set, a fixed offset of +# 10000 will be used as usual. +# +# Example: +# +# cluster-announce-ip 10.1.1.5 +# cluster-announce-tls-port 6379 +# cluster-announce-port 0 +# cluster-announce-bus-port 6380 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +################################ LATENCY MONITOR ############################## + +# The Redis latency monitoring subsystem samples different operations +# at runtime in order to collect data related to possible sources of +# latency of a Redis instance. +# +# Via the LATENCY command this information is available to the user that can +# print graphs and obtain reports. +# +# The system only logs operations that were performed in a time equal or +# greater than the amount of milliseconds specified via the +# latency-monitor-threshold configuration directive. When its value is set +# to zero, the latency monitor is turned off. +# +# By default latency monitoring is disabled since it is mostly not needed +# if you don't have latency issues, and collecting data has a performance +# impact, that while very small, can be measured under big load. Latency +# monitoring can easily be enabled at runtime using the command +# "CONFIG SET latency-monitor-threshold " if needed. +latency-monitor-threshold 0 + +################################ LATENCY TRACKING ############################## + +# The Redis extended latency monitoring tracks the per command latencies and enables +# exporting the percentile distribution via the INFO latencystats command, +# and cumulative latency distributions (histograms) via the LATENCY command. +# +# By default, the extended latency monitoring is enabled since the overhead +# of keeping track of the command latency is very small. +# latency-tracking yes + +# By default the exported latency percentiles via the INFO latencystats command +# are the p50, p99, and p999. +# latency-tracking-info-percentiles 50 99 99.9 + +############################# EVENT NOTIFICATION ############################## + +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at https://redis.io/topics/notifications +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: +# +# PUBLISH __keyspace^^0__:foo del +# PUBLISH __keyevent^^0__:del foo +# +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: +# +# K Keyspace events, published with __keyspace^^__ prefix. +# E Keyevent events, published with __keyevent^^__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# ?? String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# n New key events (Note: not included in the 'A' class) +# t Stream commands +# d Module key type events +# m Key-miss events (Note: It is not included in the 'A' class) +# A Alias for g??lshzxetd, so that the "AKE" string means all the events +# (Except key-miss events which are excluded from 'A' due to their +# unique nature). +# +# The "notify-keyspace-events" takes as argument a string that is composed +# of zero or multiple characters. The empty string means that notifications +# are disabled. +# +# Example: to enable list and generic events, from the point of view of the +# event name, use: +# +# notify-keyspace-events Elg +# +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent^^0__:expired use: +# +# notify-keyspace-events Ex +# +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +notify-keyspace-events "" + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-listpack-entries 512 +hash-max-listpack-value 64 + +# Lists are also encoded in a special way to save a lot of space. +# The number of entries allowed per internal list node can be specified +# as a fixed maximum size or a maximum number of elements. +# For a fixed maximum size, use -5 through -1, meaning: +# -5: max size: 64 Kb <-- not recommended for normal workloads +# -4: max size: 32 Kb <-- not recommended +# -3: max size: 16 Kb <-- probably not recommended +# -2: max size: 8 Kb <-- good +# -1: max size: 4 Kb <-- good +# Positive numbers mean store up to _exactly_ that number of elements +# per list node. +# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), +# but if your use case is unique, adjust the settings as necessary. +list-max-listpack-size -2 + +# Lists may also be compressed. +# Compress depth is the number of quicklist ziplist nodes from *each* side of +# the list to *exclude* from compression. The head and tail of the list +# are always uncompressed for fast push/pop operations. Settings are: +# 0: disable all list compression +# 1: depth 1 means "don't start compressing until after 1 node into the list, +# going from either the head or tail" +# So: [head]->node->node->...->node->[tail] +# [head], [tail] will always be uncompressed; inner nodes will compress. +# 2: [head]->[next]->node->node->...->node->[prev]->[tail] +# 2 here means: don't compress head or head->next or tail->prev or tail, +# but compress all nodes between them. +# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] +# etc. +list-compress-depth 0 + +# Sets have a special encoding when a set is composed +# of just strings that happen to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Sets containing non-integer values are also encoded using a memory efficient +# data structure when they have a small number of entries, and the biggest entry +# does not exceed a given threshold. These thresholds can be configured using +# the following directives. +set-max-listpack-entries 128 +set-max-listpack-value 64 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-listpack-entries 128 +zset-max-listpack-value 64 + +# HyperLogLog sparse representation bytes limit. The limit includes the +# 16 bytes header. When a HyperLogLog using the sparse representation crosses +# this limit, it is converted into the dense representation. +# +# A value greater than 16000 is totally useless, since at that point the +# dense representation is more memory efficient. +# +# The suggested value is ~ 3000 in order to have the benefits of +# the space efficient encoding without slowing down too much PFADD, +# which is O(N) with the sparse encoding. The value can be raised to +# ~ 10000 when CPU is not a concern, but space is, and the data set is +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. +hll-sparse-max-bytes 3000 + +# Streams macro node max size / items. The stream data structure is a radix +# tree of big nodes that encode multiple items inside. Using this configuration +# it is possible to configure how big a single node can be in bytes, and the +# maximum number of items it may contain before switching to a new node when +# appending new stream entries. If any of the following settings are set to +# zero, the limit is ignored, so for instance it is possible to set just a +# max entries limit by setting max-bytes to 0 and max-entries to the desired +# value. +stream-node-max-bytes 4096 +stream-node-max-entries 100 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# actively rehash the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply from time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients including MONITOR clients +# replica -> replica clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and replica clients, since +# subscribers and replicas receive data in a push fashion. +# +# Note that it doesn't make sense to set the replica clients output buffer +# limit lower than the repl-backlog-size config (partial sync will succeed +# and then replica will get disconnected). +# Such a configuration is ignored (the size of repl-backlog-size will be used). +# This doesn't have memory consumption implications since the replica client +# will share the backlog buffers memory. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit replica 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 + +# Client query buffers accumulate new commands. They are limited to a fixed +# amount by default in order to avoid that a protocol desynchronization (for +# instance due to a bug in the client) will lead to unbound memory usage in +# the query buffer. However you can configure it here if you have very special +# needs, such us huge multi/exec requests or alike. +# +# client-query-buffer-limit 1gb + +# In some scenarios client connections can hog up memory leading to OOM +# errors or data eviction. To avoid this we can cap the accumulated memory +# used by all client connections (all pubsub and normal clients). Once we +# reach that limit connections will be dropped by the server freeing up +# memory. The server will attempt to drop the connections using the most +# memory first. We call this mechanism "client eviction". +# +# Client eviction is configured using the maxmemory-clients setting as follows: +# 0 - client eviction is disabled (default) +# +# A memory value can be used for the client eviction threshold, +# for example: +# maxmemory-clients 1g +# +# A percentage value (between 1% and 100%) means the client eviction threshold +# is based on a percentage of the maxmemory setting. For example to set client +# eviction at 5% of maxmemory: +# maxmemory-clients 5% + +# In the Redis protocol, bulk requests, that are, elements representing single +# strings, are normally limited to 512 mb. However you can change this limit +# here, but must be 1mb or greater +# +# proto-max-bulk-len 512mb + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeout, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are performed with the same frequency, but Redis checks for +# tasks to perform according to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz 10 + +# Normally it is useful to have an HZ value which is proportional to the +# number of clients connected. This is useful in order, for instance, to +# avoid too many clients are processed for each background task invocation +# in order to avoid latency spikes. +# +# Since the default HZ value by default is conservatively set to 10, Redis +# offers, and enables by default, the ability to use an adaptive HZ value +# which will temporarily raise when there are many connected clients. +# +# When dynamic HZ is enabled, the actual configured HZ will be used +# as a baseline, but multiples of the configured HZ value will be actually +# used as needed once more clients are connected. In this way an idle +# instance will use very little CPU time while a busy instance will be +# more responsive. +dynamic-hz yes + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 4 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +aof-rewrite-incremental-fsync yes + +# When redis saves RDB file, if the following option is enabled +# the file will be fsync-ed every 4 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +rdb-save-incremental-fsync yes + +# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good +# idea to start with the default settings and only change them after investigating +# how to improve the performances and how the keys LFU change over time, which +# is possible to inspect via the OBJECT FREQ command. +# +# There are two tunable parameters in the Redis LFU implementation: the +# counter logarithm factor and the counter decay time. It is important to +# understand what the two parameters mean before changing them. +# +# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis +# uses a probabilistic increment with logarithmic behavior. Given the value +# of the old counter, when a key is accessed, the counter is incremented in +# this way: +# +# 1. A random number R between 0 and 1 is extracted. +# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). +# 3. The counter is incremented only if R < P. +# +# The default lfu-log-factor is 10. This is a table of how the frequency +# counter changes with a different number of accesses with different +# logarithmic factors: +# +# +--------+------------+------------+------------+------------+------------+ +# | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | +# +--------+------------+------------+------------+------------+------------+ +# | 0 | 104 | 255 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 1 | 18 | 49 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 10 | 10 | 18 | 142 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 100 | 8 | 11 | 49 | 143 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# +# NOTE: The above table was obtained by running the following commands: +# +# redis-benchmark -n 1000000 incr foo +# redis-cli object freq foo +# +# NOTE 2: The counter initial value is 5 in order to give new objects a chance +# to accumulate hits. +# +# The counter decay time is the time, in minutes, that must elapse in order +# for the key counter to be decremented. +# +# The default value for the lfu-decay-time is 1. A special value of 0 means we +# will never decay the counter. +# +# lfu-log-factor 10 +# lfu-decay-time 1 + + +# The maximum number of new client connections accepted per event-loop cycle. This configuration +# is set independently for TLS connections. +# +# By default, up to 10 new connection will be accepted per event-loop cycle for normal connections +# and up to 1 new connection per event-loop cycle for TLS connections. +# +# Adjusting this to a larger number can slightly improve efficiency for new connections +# at the risk of causing timeouts for regular commands on established connections. It is +# not advised to change this without ensuring that all clients have limited connection +# pools and exponential backoff in the case of command/connection timeouts. +# +# If your application is establishing a large number of new connections per second you should +# also consider tuning the value of tcp-backlog, which allows the kernel to buffer more +# pending connections before dropping or rejecting connections. +# +# max-new-connections-per-cycle 10 +# max-new-tls-connections-per-cycle 1 + + +########################### ACTIVE DEFRAGMENTATION ####################### +# +# What is active defragmentation? +# ------------------------------- +# +# Active (online) defragmentation allows a Redis server to compact the +# spaces left between small allocations and deallocations of data in memory, +# thus allowing to reclaim back memory. +# +# Fragmentation is a natural process that happens with every allocator (but +# less so with Jemalloc, fortunately) and certain workloads. Normally a server +# restart is needed in order to lower the fragmentation, or at least to flush +# away all the data and create it again. However thanks to this feature +# implemented by Oran Agra for Redis 4.0 this process can happen at runtime +# in a "hot" way, while the server is running. +# +# Basically when the fragmentation is over a certain level (see the +# configuration options below) Redis will start to create new copies of the +# values in contiguous memory regions by exploiting certain specific Jemalloc +# features (in order to understand if an allocation is causing fragmentation +# and to allocate it in a better place), and at the same time, will release the +# old copies of the data. This process, repeated incrementally for all the keys +# will cause the fragmentation to drop back to normal values. +# +# Important things to understand: +# +# 1. This feature is disabled by default, and only works if you compiled Redis +# to use the copy of Jemalloc we ship with the source code of Redis. +# This is the default with Linux builds. +# +# 2. You never need to enable this feature if you don't have fragmentation +# issues. +# +# 3. Once you experience fragmentation, you can enable this feature when +# needed with the command "CONFIG SET activedefrag yes". +# +# The configuration parameters are able to fine tune the behavior of the +# defragmentation process. If you are not sure about what they mean it is +# a good idea to leave the defaults untouched. + +# Active defragmentation is disabled by default +# activedefrag no + +# Minimum amount of fragmentation waste to start active defrag +# active-defrag-ignore-bytes 100mb + +# Minimum percentage of fragmentation to start active defrag +# active-defrag-threshold-lower 10 + +# Maximum percentage of fragmentation at which we use maximum effort +# active-defrag-threshold-upper 100 + +# Minimal effort for defrag in CPU percentage, to be used when the lower +# threshold is reached +# active-defrag-cycle-min 1 + +# Maximal effort for defrag in CPU percentage, to be used when the upper +# threshold is reached +# active-defrag-cycle-max 25 + +# Maximum number of set/hash/zset/list fields that will be processed from +# the main dictionary scan +# active-defrag-max-scan-fields 1000 + +# Jemalloc background thread for purging will be enabled by default +jemalloc-bg-thread yes + +# It is possible to pin different threads and processes of Redis to specific +# CPUs in your system, in order to maximize the performances of the server. +# This is useful both in order to pin different Redis threads in different +# CPUs, but also in order to make sure that multiple Redis instances running +# in the same host will be pinned to different CPUs. +# +# Normally you can do this using the "taskset" command, however it is also +# possible to this via Redis configuration directly, both in Linux and FreeBSD. +# +# You can pin the server/IO threads, bio threads, aof rewrite child process, and +# the bgsave child process. The syntax to specify the cpu list is the same as +# the taskset command: +# +# Set redis server/io threads to cpu affinity 0,2,4,6: +# server-cpulist 0-7:2 +# +# Set bio threads to cpu affinity 1,3: +# bio-cpulist 1,3 +# +# Set aof rewrite child process to cpu affinity 8,9,10,11: +# aof-rewrite-cpulist 8-11 +# +# Set bgsave child process to cpu affinity 1,10,11 +# bgsave-cpulist 1,10-11 + +# In some cases redis will emit warnings and even refuse to start if it detects +# that the system is in bad state, it is possible to suppress these warnings +# by setting the following config which takes a space delimited list of warnings +# to suppress +# +# ignore-warnings ARM64-COW-BUG \ No newline at end of file diff --git a/lib/core/playbook/action.v b/lib/core/playbook/action.v new file mode 100644 index 00000000..f6498139 --- /dev/null +++ b/lib/core/playbook/action.v @@ -0,0 +1,95 @@ +module playbook + +import crypto.blake2b +import freeflowuniverse.herolib.data.paramsparser +import freeflowuniverse.herolib.core.texttools +// import freeflowuniverse.herolib.core.smartid + +pub struct Action { +pub mut: + id int + cid string + name string + actor string + priority int = 10 // 0 is highest, do 10 as default + params paramsparser.Params + result paramsparser.Params // can be used to remember outputs + // run bool = true // certain actions can be defined but meant to be executed directly + actiontype ActionType = .sal + comments string + done bool // if done then no longer need to process +} + +pub enum ActionType { + unknown + dal + sal + wal + macro +} + +pub fn (action Action) str() string { + mut out := action.heroscript() + if !action.result.empty() { + out += '\n\nResult:\n' + out += texttools.indent(action.result.heroscript(), ' ') + } + return out +} + +// serialize to heroscript +pub fn (action Action) heroscript() string { + mut out := '' + if action.comments.len > 0 { + out += texttools.indent(action.comments, '// ') + } + if action.actiontype == .sal { + out += '!!' + } else if action.actiontype == .macro { + out += '!!!' + } else { + panic('only action sal and macro supported for now,\n${action}') + } + + if action.actor != '' { + out += '${action.actor}.' + } + out += '${action.name} ' + if action.id > 0 { + out += 'id:${action.id} ' + } + if !action.params.empty() { + heroscript := action.params.heroscript() + heroscript_lines := heroscript.split_into_lines() + out += heroscript_lines[0] + '\n' + for line in heroscript_lines[1..] { + out += ' ' + line + '\n' + } + } + return out +} + +// return list of names . +// the names are normalized (no special chars, lowercase, ... ) +pub fn (action Action) names() []string { + mut names := []string{} + for name in action.name.split('.') { + names << texttools.name_fix(name) + } + return names +} + +pub enum ActionState { + init // first state + next // will continue with next steps + restart + error + done // means we don't process the next ones +} + +// get hash from the action, should always be the same for the same action +pub fn (action Action) hashkey() string { + txt := action.heroscript() + bs := blake2b.sum160(txt.bytes()) + return bs.hex() +} diff --git a/lib/core/playbook/factory.v b/lib/core/playbook/factory.v new file mode 100644 index 00000000..2006fdf9 --- /dev/null +++ b/lib/core/playbook/factory.v @@ -0,0 +1,57 @@ +module playbook + +import freeflowuniverse.herolib.core.base + +@[params] +pub struct PlayBookNewArgs { +pub mut: + path string + text string + git_url string + git_pull bool + git_branch string + git_reset bool + prio int = 50 + priorities map[int]string // filter and give priority, see filtersort method to know how to use + session ?&base.Session +} + +// get a new playbook, can scan a directory or just add text +// ``` +// path string +// text string +// git_url string +// git_pull bool +// git_branch string +// git_reset bool +// session &base.Session +// ``` +pub fn new(args_ PlayBookNewArgs) !PlayBook { + mut args := args_ + + mut c := base.context()! + + mut s := c.session_new()! + + mut plbook := PlayBook{ + session: &s + } + if args.path.len > 0 || args.text.len > 0 || args.git_url.len > 0 { + plbook.add( + path: args.path + text: args.text + git_url: args.git_url + git_pull: args.git_pull + git_branch: args.git_branch + git_reset: args.git_reset + prio: args.prio + session: args.session + )! + } + + if args.priorities.len > 0 { + plbook.filtersort(priorities: args.priorities)! + } + + return plbook +} diff --git a/lib/core/playbook/filter1_test.v b/lib/core/playbook/filter1_test.v new file mode 100644 index 00000000..28caa100 --- /dev/null +++ b/lib/core/playbook/filter1_test.v @@ -0,0 +1,83 @@ +module playbook + +const text3 = " +//select the circle, can come from context as has been set before +// +//now every person added will be added in this circle +// +!!select_actor people +!!select_circle aaa + +//delete everything as found in current circle +!!person.delete cid:1g + +!!person.define + //is optional will be filled in automatically, but maybe we want to update + cid: '1gt' + //name as selected in this group, can be used to find someone back + name: fatayera + firstname: 'Adnan' + lastname: 'Fatayerji' + description: 'Head of Business Development' + email: 'adnan@threefold.io,fatayera@threefold.io' + +!!circle_link + //can define as cid or as name, name needs to be in same circle + person: '1gt' + //can define as cid or as name + circle:tftech + role:'stakeholder' + description:'' + //is the name as given to the link + name:'vpsales' + +!!people.circle_comment cid:'1g' + comment:' + this is a comment + can be multiline + ' + +!!circle.comment cid:'1g' + comment: + another comment + +!!digital_payment.add + person:fatayera + name: 'TF Wallet' + blockchain: 'stellar' + account: '' + description: 'TF Wallet for TFT' + preferred: false + +!!test.myaction + key: value + +!!person.define + cid: 'eg' + name: despiegk //this is a remark + +" + +// test filter with only two names in filter +fn test_filter1() ! { + mut plbook := new( + text: text3 + )! + + assert plbook.actions.len == 10 + + assert plbook.hashkey() == '6936aafcd18b2d839e6b5c5f20b8817243c237da' + + plbook.filtersort( + priorities: { + 2: 'digital_payment:*' + } + )! + assert plbook.priorities[2].len == 1 + + mut asorted := plbook.actions_sorted()! + + assert asorted.map('${it.actor}:${it.name}') == ['digital_payment:add', 'core:select_actor', + 'core:select_circle', 'person:delete', 'person:define', 'core:circle_link', + 'people:circle_comment', 'circle:comment', 'test:myaction', 'person:define'] +} diff --git a/lib/core/playbook/filter_sort.v b/lib/core/playbook/filter_sort.v new file mode 100644 index 00000000..e6a9df31 --- /dev/null +++ b/lib/core/playbook/filter_sort.v @@ -0,0 +1,189 @@ +module playbook + +import freeflowuniverse.herolib.data.paramsparser +// import freeflowuniverse.herolib.ui.console + +@[params] +pub struct FilterSortArgs { +pub: + priorities map[int]string // filter and give priority +} + +// filter parser based on the criteria +//``` +// string for filter is $actor:$action, ... name and globs are possible (*,?) +// +// struct FilterSortArgs { +// priorities map[int]string //filter and give priority +//``` +// the action_names or actor_names can be a glob in match_glob . +// see https://modules.vlang.io/index.html#string.match_glob . +// the highest priority will always be chosen . (it can be a match happens 2x) +// return []Action (will only return actions which wered filtered, included in the filter-sort args) +pub fn (mut plbook PlayBook) filtersort(args FilterSortArgs) ![]&Action { + mut nrs := args.priorities.keys() + nrs.sort() + plbook.priorities = map[int][]int{} // reset the prio's + for prio in nrs { + if prio > 49 { + return error('prio cannot be higher than 49') + } + argsfilter := args.priorities[prio] or { panic('bug') } + mut actionsfound := plbook.find(filter: argsfilter)! + // console.print_header('- ${prio}:(${actionsfound.len})\n${argsfilter}') + for mut actionfiltered in actionsfound { + if actionfiltered.id in plbook.done { + continue + } + actionfiltered.priority = prio + if prio !in plbook.priorities { + plbook.priorities[prio] = []int{} + } + if actionfiltered.id !in plbook.done { + plbook.priorities[prio] << actionfiltered.id + plbook.done << actionfiltered.id + } + } + } + + // the remainder now needs to be put on prio 50 + for mut action in plbook.actions { + if 50 !in plbook.priorities { + plbook.priorities[50] = []int{} + } + if action.id !in plbook.done { + plbook.priorities[50] << action.id + plbook.done << action.id + } + } + + return plbook.actions_sorted() +} + +@[params] +pub struct FindArgs { +pub: + filter string + include_done bool +} + +// filter is of form $actor.$action, ... name and globs are possible (*,?) . +// comma separated, actor and name needs to be specified, if more than one use * glob . +// e.g. find("core.person_select,myactor.*,green*.*") +pub fn (mut plbook PlayBook) find(args FindArgs) ![]&Action { + filter := args.filter.replace(':', '.').trim_space() + mut res := []&Action{} + mut items := []string{} + if filter.contains(',') { + items = filter.split(',').map(it.trim_space()) + } else { + items << filter.trim_space() + } + for action in plbook.actions { + // console.print_debug("${action.actor}:${action.name}:${action.id}") + if action.match_items(items) { + // console.print_debug(" OK") + if !args.include_done && action.done { + continue + } + res << action + } + } + return res +} + +pub fn (mut plbook PlayBook) exists_once(args FindArgs) bool { + mut res := plbook.find(args) or { [] } + return res.len == 1 +} + +pub fn (mut plbook PlayBook) find_one(args FindArgs) !&Action { + mut res := plbook.find(args)! + if res.len == 0 { + return error("can't find action: '${args.filter}'") + } else if res.len > 1 { + return error("found more than one action: '${args.filter}'") + } + return res[0] or { panic('bug') } +} + +pub fn (mut plbook PlayBook) find_max_one(args FindArgs) ![]&Action { + mut res := plbook.find(args)! + if res.len > 1 { + return error("found more than one action: '${args.filter}'") + } + return res +} + +fn (action Action) match_items(items []string) bool { + for p in items { + mut actor := '' + mut name := '' + if p.contains('.') { + actor = p.all_before('.').trim_space() + name = p.all_after_last('.').trim_space() + } else { + name = p.trim_space() + actor = 'core' + } + // console.print_header('- checkmatch:${actor}:${name}") + if action.checkmatch(actor: actor, name: name) { + return true + } + } + return false +} + +@[params] +pub struct MatchFilter { +pub mut: + actor string + name string + cid string +} + +// check if the action matches following the filter args . +// the action_names or actor_names can be a glob in match_glob . +// see https://modules.vlang.io/index.html#string.match_glob +fn (action Action) checkmatch(args MatchFilter) bool { + if args.cid.len > 0 { + if args.cid != action.cid { + return false + } + } + if args.actor.len > 0 { + if args.actor.contains('*') || args.actor.contains('?') || args.actor.contains('[') { + if !action.actor.match_glob(args.actor) { + return false + } + } else { + if action.actor != args.actor.to_lower().trim_space() { + return false + } + } + } + if args.name.len > 0 { + if args.name.contains('*') || args.name.contains('?') || args.name.contains('[') { + if !action.name.match_glob(args.name) { + return false + } + } else { + if action.name != args.name.to_lower().trim_space() { + return false + } + } + } + return true +} + +// find all relevant parser, return the params out of one . +// filter is of form $actor.$action, ... name and globs are possible (*,?) . +// comma separated, actor and name needs to be specified, if more than one use * glob . +// e.g. find("core.person_select,myactor.*,green*.*") +pub fn (mut plbook PlayBook) params_get(filter string) !paramsparser.Params { + mut paramsresult := paramsparser.new('')! + for action in plbook.find(filter: filter)! { + paramsresult.merge(action.params)! + } + return paramsresult +} diff --git a/lib/core/playbook/filter_test.v b/lib/core/playbook/filter_test.v new file mode 100644 index 00000000..b94f9848 --- /dev/null +++ b/lib/core/playbook/filter_test.v @@ -0,0 +1,148 @@ +module playbook + +const text2 = " +//select the circle, can come from context as has been set before +//now every person added will be added in this circle +!!select_actor people +!!select_circle aaa + +//delete everything as found in current circle +!!person_delete cid:1g + +!!person_define + //is optional will be filled in automatically, but maybe we want to update + cid: '1gt' + //name as selected in this group, can be used to find someone back + name: fatayera + firstname: 'Adnan' + lastname: 'Fatayerji' + description: 'Head of Business Development' + email: 'adnan@threefold.io,fatayera@threefold.io' + +!!circle_link +//can define as cid or as name, name needs to be in same circle + person: '1gt' + //can define as cid or as name + circle:tftech + role:'stakeholder' + description:'' + //is the name as given to the link + name:'vpsales' + +!!people.circle_comment cid:'1g' + comment: + this is a comment + can be multiline + +!!circle_comment cid:'1g' + comment: + another comment + +!!digital_payment_add + person:fatayera + name: 'TF Wallet' + blockchain: 'stellar' + account: '' + description: 'TF Wallet for TFT' + preferred: false + +!!select_actor test + +!!test_action + key: value + +!!select_circle bbb +!!select_actor people + +!!person_define + cid: 'eg' + name: despiegk //this is a remark + +" + +// QUESTION: how to better organize these tests +// ANSWER: split them up, this test is testing too much, tests should be easy to read and easy to modify +// TODO: FIX THE TESTS, THEY ARE BROKEN NOW + +fn test_filter_on_circle_aaa() ! { + // test filter circle:aaa + mut parser := new(text: text2)! + assert parser.actions.len == 13 +} + +// test filter with names:[*] +fn test_filter_with_names_asterix() ! { + mut parser := new(text: text2)! + assert parser.actions.len == 13 + assert parser.actions.map(it.name) == ['select_actor', 'select_circle', 'person_delete', + 'person_define', 'circle_link', 'circle_comment', 'circle_comment', 'digital_payment_add', + 'select_actor', 'test_action', 'select_circle', 'select_actor', 'person_define'] + + sorted := parser.find(filter: '*.*')! + assert sorted.len == 13 + assert sorted.map(it.name) == ['select_actor', 'select_circle', 'person_delete', 'person_define', + 'circle_link', 'circle_comment', 'circle_comment', 'digital_payment_add', 'select_actor', + 'test_action', 'select_circle', 'select_actor', 'person_define'] +} + +// test filtering with names_filter with one empty string +fn test_filter_with_names_list_with_empty_string() ! { + // QUESTION: should this return empty list? + // ANSWER: I think yes as you technically want the parser where the name is an empty string + + // NOTE: empty name does not filter by name, it's simply ignored + mut parser := new( + text: text2 + )! + + assert parser.actions.len == 13 + assert parser.actions.map(it.name) == ['select_actor', 'select_circle', 'person_delete', + 'person_define', 'circle_link', 'circle_comment', 'circle_comment', 'digital_payment_add', + 'select_actor', 'test_action', 'select_circle', 'select_actor', 'person_define'] + + filtered := parser.find(filter: '*.')! + assert filtered.len == 13 + assert filtered.map(it.name) == ['select_actor', 'select_circle', 'person_delete', + 'person_define', 'circle_link', 'circle_comment', 'circle_comment', 'digital_payment_add', + 'select_actor', 'test_action', 'select_circle', 'select_actor', 'person_define'] +} + +// test filter with names in same order as parser +fn test_filter_with_names_in_same_order() ! { + mut parser := new( + text: text2 + )! + + sorted := parser.find(filter: 'person_define,circle_link,circle_comment,digital_payment_add')! + assert sorted.len == 5 + assert sorted.map(it.name) == ['person_define', 'circle_link', 'circle_comment', + 'digital_payment_add', 'person_define'] +} + +// test filter with names in different order than parser +fn test_filter_with_names_in_different_order() ! { + mut parser := new( + text: text2 + )! + + sorted := parser.find( + filter: 'people.circle_comment,person_define,digital_payment_add,person_delete,circle_link' + )! + + assert sorted.len == 6 + assert sorted.map(it.name) == ['person_delete', 'person_define', 'circle_link', 'circle_comment', + 'digital_payment_add', 'person_define'] +} + +// test filter with only two names in filter +fn test_filter_with_only_two_names_in_filter() ! { + // QUESTION: if we only have one name, is it just that action? + // ANSWER: yes + mut parser := new( + text: text2 + )! + + sorted := parser.find(filter: 'person_define,person_delete')! + assert sorted.len == 3 + assert sorted.map(it.name) == ['person_delete', 'person_define', 'person_define'] +} diff --git a/lib/core/playbook/parser_test.v b/lib/core/playbook/parser_test.v new file mode 100644 index 00000000..120919d6 --- /dev/null +++ b/lib/core/playbook/parser_test.v @@ -0,0 +1,56 @@ +module playbook + +const text1 = " +//comment for the action +!!payment.add person:fatayera + //comment for name + name: 'TF Wallet' + blockchain: 'stellar' //holochain maybe? + account: 'something' + description: 'TF Wallet for TFT' + preferred: false +" + +fn test_parse_1() { + mut a := new(text: text1) or { panic(err) } + + assert a.actions.len == 1 + mut s := a.actions_sorted()! + assert s.len == 1 + // mut sorted := a.actions_sorted(prio_only: true)! + // assert sorted.len == 0 + + mut myaction := s[0] or { panic('bug') } + + assert myaction.comments == 'comment for the action' + assert myaction.params.params.len == 6 + assert myaction.id == 1 + + assert a.hashkey() == '95c585c8bf01b4c432cb7096dc7c974fc1a14b5a' + c := a.heroscript()! + b := new(text: c) or { panic(err) } + + assert b.hashkey() == '95c585c8bf01b4c432cb7096dc7c974fc1a14b5a' +} + +fn test_parser() { + mut pb := new(text: text1) or { panic(err) } + mut a := pb.actions[0] + assert a.actor == 'payment' + assert a.name == 'add' + assert a.params.get('name')! == 'TF Wallet' + assert a.params.get('blockchain')! == 'stellar' + assert a.params.get('account')! == 'something' + assert a.params.get('description')! == 'TF Wallet for TFT' + assert a.params.get_default_false('preferred') == false +} + +fn test_parser2() { + mut pb := new( + text: "!!play.run url:'https://git.ourworld.tf/despiegk/cfg/src/branch/main/myit/hetzner.md'" + ) or { panic(err) } + mut a := pb.actions[0] + assert a.actor == 'play' + assert a.name == 'run' + assert a.params.get('url')! == 'https://git.ourworld.tf/despiegk/cfg/src/branch/main/myit/hetzner.md' +} diff --git a/lib/core/playbook/playbook.v b/lib/core/playbook/playbook.v new file mode 100644 index 00000000..95704ff6 --- /dev/null +++ b/lib/core/playbook/playbook.v @@ -0,0 +1,202 @@ +module playbook + +import freeflowuniverse.herolib.core.base +import freeflowuniverse.herolib.core.texttools +import freeflowuniverse.herolib.data.paramsparser +import crypto.blake2b + +@[heap] +pub struct PlayBook { +pub mut: + actions []&Action + priorities map[int][]int // first key is the priority, the list of int's is position in list self.actions + othertext string // in case there is text outside of the actions + result string // if any result + nractions int + done []int // which actions did we already find/run? + session &base.Session +} + +@[params] +pub struct ActionNewArgs { +pub mut: + cid string + name string + actor string + priority int = 10 // 0 is highest, do 10 as default + // run bool = true // certain actions can be defined but meant to be executed directly + actiontype ActionType +} + +// add action to the book +fn (mut plbook PlayBook) action_new(args ActionNewArgs) &Action { + plbook.nractions += 1 + mut a := Action{ + id: plbook.nractions + cid: args.cid + name: args.name + actor: args.actor + priority: args.priority + // run: args.run + actiontype: args.actiontype + params: paramsparser.Params{} + result: paramsparser.Params{} + } + plbook.actions << &a + return &a +} + +pub fn (mut plbook PlayBook) str() string { + return plbook.heroscript() or { 'Cannot visualize playbook properly.\n${plbook.actions}' } +} + +@[params] +pub struct SortArgs { +pub mut: + prio_only bool // if true only show the actions which were prioritized before +} + +// only return the actions which are not done yet +// if filtered is set, it means we only get the ones which were prioritized before +pub fn (mut plbook PlayBook) actions_sorted(args SortArgs) ![]&Action { + mut res := []&Action{} + mut nrs := plbook.priorities.keys() + nrs.sort() + if nrs.len == 0 { + // means sorting did not happen before + return plbook.actions + } + for nr in nrs { + if args.prio_only && nr > 49 { + continue + } + action_ids := plbook.priorities[nr] or { panic('bug') } + for id in action_ids { + mut a := plbook.action_get(id: id)! + res << a + } + } + return res +} + +@[params] +pub struct HeroScriptArgs { +pub mut: + show_done bool = true +} + +// serialize to heroscript +pub fn (mut plbook PlayBook) heroscript(args HeroScriptArgs) !string { + mut out := '' + for action in plbook.actions_sorted()! { + if args.show_done == false && action.done { + continue + } + out += '${action.heroscript()}\n' + } + if plbook.othertext.len > 0 { + out += '${plbook.othertext}' + } + out = texttools.remove_empty_js_blocks(out) + return out +} + +// return list of names . +// the names are normalized (no special chars, lowercase, ... ) +pub fn (mut plbook PlayBook) names() ![]string { + mut names := []string{} + for action in plbook.actions_sorted()! { + names << action.name + } + return names +} + +@[params] +pub struct ActionGetArgs { +pub mut: + id int + actor string + name string + actiontype ActionType = .sal +} + +// Find all actions based on ActionGetArgs +// - If id == 0, then matches all ids; when id is specified, can only return 1. +// - If actor == "", then matches all actors. +// - If name == "", then matches all actions from the defined actor (if defined). +// - If actiontype == .unknown, then matches all action types; when specified, filters by the action type, default .sal +pub fn (mut plbook PlayBook) actions_find(args ActionGetArgs) ![]&Action { + mut res := []&Action{} + for a in plbook.actions { + // If id is specified, return only the action with that id + if args.id != 0 { + if a.id == args.id { + return [a] + } + continue + } + // Filter by actor if specified + if args.actor.len > 0 && a.actor != args.actor { + continue + } + // Filter by name if specified + if args.name.len > 0 && a.name != args.name { + continue + } + // Filter by actiontype if specified + if args.actiontype != .unknown && a.actiontype != args.actiontype { + continue + } + // If the action passes all filters, add it to the result + res << a + } + return res +} + +pub fn (mut plbook PlayBook) action_exists(args ActionGetArgs) bool { + // Use actions_find to get the filtered actions + actions := plbook.actions_find(args) or { return false } + if actions.len == 1 { + return true + } else if actions.len == 0 { + return false + } else { + return false + } +} + +pub fn (mut plbook PlayBook) action_get(args ActionGetArgs) !&Action { + // Use actions_find to get the filtered actions + actions := plbook.actions_find(args)! + if actions.len == 1 { + return actions[0] + } else if actions.len == 0 { + return error("couldn't find action with args: ${args}") + } else { + return error('multiple actions found with args: ${args}, expected only one') + } +} + +pub fn (plbook PlayBook) hashkey() string { + mut out := []string{} + for action in plbook.actions { + out << action.hashkey() + } + txt := out.join_lines() + bs := blake2b.sum160(txt.bytes()) + return bs.hex() +} + +// check if playbook is empty,if not will give error, means there are actions left to be exected +pub fn (mut plbook PlayBook) empty_check() ! { + mut actions := []&Action{} + for a in plbook.actions { + if a.done == false { + actions << a + } + } + if actions.len > 0 { + msg := plbook.heroscript(show_done: false)! + return error('There are actions left to execute, see below:\n\n${msg}\n\n') + } +} diff --git a/lib/core/playbook/playbook_add.v b/lib/core/playbook/playbook_add.v new file mode 100644 index 00000000..9e49808e --- /dev/null +++ b/lib/core/playbook/playbook_add.v @@ -0,0 +1,174 @@ +module playbook + +import freeflowuniverse.herolib.core.texttools +import freeflowuniverse.herolib.data.paramsparser +import freeflowuniverse.herolib.core.pathlib +// import freeflowuniverse.herolib.core.base +import freeflowuniverse.herolib.develop.gittools +// import freeflowuniverse.herolib.ui.console + +enum State { + start + comment_for_action_maybe + action + othertext +} + +pub fn (mut plbook PlayBook) add(args_ PlayBookNewArgs) ! { + mut args := args_ + + if args.git_url.len > 0 { + mut gs := gittools.get()! + mut repo := gs.get_repo( + url: args.git_url + pull: args.git_pull + reset: args.git_reset + )! + args.path = repo.get_path()! + } + + // walk over directory + if args.path.len > 0 { + // console.print_header("PLBOOK add path:'${args.path}'") + mut p := pathlib.get(args.path) + if !p.exists() { + return error("can't find path:${p.path}") + } + if p.is_file() { + c := p.read()! + plbook.add(text: c, prio: args.prio, session: args_.session)! + return + } else if p.is_dir() { + // get .md and .hero files from dir + mut ol0 := p.list(recursive: true, regex: [r'.*\.md$'])! + mut paths := ol0.paths.clone() + mut ol1 := p.list(recursive: true, regex: [r'.*\.hero$'])! + paths << ol1.paths + + for mut p2 in paths { + c2 := p2.read()! + plbook.add(text: c2, prio: args.prio, session: args_.session)! + } + return + } + return error("can't process path: ${args.path}, unknown type.") + } + // console.print_header('PLBOOK add text') + // console.print_stdout(args.text) + + args.text = texttools.dedent(args.text) + mut state := State.start + + mut action := &Action{} + mut comments := []string{} + mut paramsdata := []string{} + + for line_ in args.text.split_into_lines() { + line := line_.replace('\t', ' ') + line_strip := line.trim_space() + + if line_strip.len == 0 { + continue + } + + // console.print_header(' state:${state} action:'${action.name}' comments:'${comments.len}' -> '${line}'") + + if state == .action { + if !line.starts_with(' ') || line_strip == '' || line_strip.starts_with('!') { + state = .start + // means we found end of action + // console.print_debug("+++${paramsdata.join('\n')}+++") + action.params = paramsparser.new(paramsdata.join('\n'))! + action.params.delete('id') + comments = []string{} + paramsdata = []string{} + action = &Action{} + // console.print_header(' action end') + } else { + paramsdata << line + } + } + + if state == .comment_for_action_maybe { + if line.starts_with('//') { + comments << line_strip.trim_left('/ ') + } else { + if line_strip.starts_with('!') { + // we are at end of comment + state = .start + } else { + state = .start + plbook.othertext += comments.join('\n') + if !plbook.othertext.ends_with('\n') { + plbook.othertext += '\n' + } + comments = []string{} + } + } + } + + if state == .start { + if line_strip.starts_with('!') && !line_strip.starts_with('![') { + // start with new action + state = .action + action = plbook.action_new( + priority: args.prio + ) + action.comments = comments.join('\n') + comments = []string{} + paramsdata = []string{} + mut actionname := line_strip + if line_strip.contains(' ') { + actionname = line_strip.all_before(' ').trim_space() + paramsdata << line_strip.all_after_first(' ').trim_space() + } + if actionname.starts_with('!!!!!') { + error('there is no action starting with 5 x !') + } else if actionname.starts_with('!!!!') { + action.actiontype = .wal + } else if actionname.starts_with('!!!') { + action.actiontype = .macro + } else if actionname.starts_with('!!') { + action.actiontype = .sal + } else if actionname.starts_with('!') { + action.actiontype = .dal + } else { + print_backtrace() + panic('bug') + } + actionname = actionname.trim_left('!') + splitted := actionname.split('.') + if splitted.len == 1 { + action.actor = 'core' + action.name = texttools.name_fix(splitted[0]) + } else if splitted.len == 2 { + action.actor = texttools.name_fix(splitted[0]) + action.name = texttools.name_fix(splitted[1]) + } else { + print_backtrace() + return error('for now we only support actions with 1 or 2 parts.\n${actionname}') + } + // console.print_header(' action new: ${action.actor}:${action.name} params:${paramsdata}') + continue + } else if line.starts_with('//') { + state = .comment_for_action_maybe + comments << line_strip.trim_left('/ ') + // } else { + // plbook.othertext += '${line_strip}\n' + } + } + } + // process the last one + if state == .action { + if action.id != 0 { + action.params = paramsparser.new(paramsdata.join('\n'))! + action.params.delete('id') + } + } + if state == .comment_for_action_maybe { + plbook.othertext += comments.join('\n') + } + // if state == .start{ + // plbook.othertext+=line_strip + // } +} diff --git a/lib/core/playbook/playbook_test.v b/lib/core/playbook/playbook_test.v new file mode 100644 index 00000000..1c2ad40d --- /dev/null +++ b/lib/core/playbook/playbook_test.v @@ -0,0 +1,84 @@ +module playbook + +import os +import crypto.sha256 +import freeflowuniverse.herolib.ui.console + +const testpath = os.dir(@FILE) + '/testdata' + +// TODO: fix + +const text1 = " +//comment for the action +!!payment.add person:fatayera + //comment for name + name: 'TF Wallet' + blockchain: 'stellar' //holochain maybe? + account: 'something' + description: 'TF Wallet for TFT' + preferred: false + +//comment2 +!!payment.add person:despiegk + name: 'TF Wallet2' + +" + +const text2 = " +//comment for the action +!!payment.add person:fatayera + name: 'TF Wallet' + +!!payment.else person:despiegk + name: 'TF Wallet2' + +!!actor2.else person:despiegk + name: 'TF Wallet2' + +" + +fn test_parse_1() { + mut a := new(text: text1) or { panic(err) } + + console.print_debug('${a}') + + console.print_debug("EXPECTED OUTPUT: +// comment for the action +!!payment.add account:something description:'TF Wallet for TFT' person:fatayera preferred:false + name:'TF Wallet' //comment for name + blockchain:stellar //holochain maybe? + +// comment2 +!!payment.add name:'TF Wallet2' person:despiegk +") + + assert sha256.hexhash(a.str()) == 'e86eb063d8556c8501f63494a863fc78415112d6990ba6f1d0d5db16ff26e954' +} + +fn test_hashkey() { + mut a := new(text: text1) or { panic(err) } + t := a.hashkey() + + console.print_debug(t) + + assert t == 'a5e85c3a8e4c132bd40c88acc0dcc3d9a2af56c5' +} + +fn test_filter() { + mut a := new(text: text2) or { panic(err) } + + mut b := a.find(filter: 'payment.*')! + assert b.len == 2 + + mut c := a.find(filter: 'payment.else')! + assert c.len == 1 + + mut d := a.find(filter: 'actor2.*')! + assert d.len == 1 + + mut e := a.find(filter: 'actor2.else')! + assert e.len == 1 + + mut f := a.find(filter: 'actor2:else2')! + assert f.len == 0 +} diff --git a/lib/core/playbook/readme.md b/lib/core/playbook/readme.md new file mode 100644 index 00000000..ca399547 --- /dev/null +++ b/lib/core/playbook/readme.md @@ -0,0 +1,129 @@ +# heroscript + +is our small language which allows us to run parser + + +## execute a playbook + +the following will load heroscript and execute + +```v +import freeflowuniverse.herolib.core.playbook +import freeflowuniverse.herolib.core.playcmds + +// path string +// text string +// git_url string +// git_pull bool +// git_branch string +// git_reset bool +// session ?&base.Session is optional +mut plbook := playbook.new(path: "....")! + +//now we run all the commands as they are pre-defined in crystallib (herolib) +playcmds.run(mut plbook)! + + +``` + +## execute a heroscript and make executable + +```bash +#!/usr/bin/env hero + +!!play.echo content:'this is just a test' + +!!play.echo content:'this is just another test' +``` + +you can now just execute this script and hero will interprete the content + + +## parser + +are text based representatsions of parser which need to be executed + +example + +```js +!!tflibrary.circlesmanager.circle_add + gitsource:'books' + path:'technology/src' + name:technology +``` + +the first one is the action, the rest are the params + +```v +import freeflowuniverse.herolib.core.playbook + + + + +mut plbook := playbook.new(text: "....")! + +``` +## way how to use for a module + + +```v +import freeflowuniverse.herolib.core.playbook + +// !!hr.employee_define +// descr:'Junior Engineer' +// growth:'1:5,60:30' cost:'4000USD' indexation:'5%' +// department:'engineering' + + +// populate the params for hr +fn (mut m BizModel) hr_actions(actions playbook.PlayBook) ! { + mut actions2 := actions.find('hr.*,vm.start')! + for action in actions2 { + if action.name == 'employee_define' { + mut name := action.params.get_default('name', '')! + mut descr := action.params.get_default('descr', '')! + //... + } + } +} +``` + + +## we can also use the filtersort + +```v + +import freeflowuniverse.herolib.core.playbook +import freeflowuniverse.herolib.core.playcmds + +mut plbook := playbook.new(path: "....") or { panic(err) } + +// filter parser based on the criteria +//``` +// string for filter is $actor:$action, ... name and globs are possible (*,?) +// +// struct FilterSortArgs +// priorities map[int]string //filter and give priority +//``` +// the action_names or actor_names can be a glob in match_glob . +// see https://modules.vlang.io/index.html#string.match_glob . +// the highest priority will always be chosen . (it can be a match happens 2x) +// return []Action +actions:=plbook.filtersort({ + 5:"sshagent:*", + 10:"doctree:*", + 11:"mdbooks:*", + 12:"mdbook:*", +})! + +//now process the actions if we want to do it ourselves +for a in actions{ + mut p := action.params + mut repo := p.get_default('repo', '')! + if p.exists('coderoot') { + coderoot = p.get_path_create('coderoot')! + } +} + +``` + diff --git a/lib/core/rootpath/README.md b/lib/core/rootpath/README.md new file mode 100644 index 00000000..e1c41daa --- /dev/null +++ b/lib/core/rootpath/README.md @@ -0,0 +1,48 @@ +# Rootpath Module + +The rootpath module provides functionality for managing the Hero environment directory structure and path handling. It ensures consistent access to Hero-specific directories and provides utilities for path manipulation. + +## Core Functions + +### Directory Management + +- `herodir()` - Returns the root directory for the Hero environment (`~/hero`) +- `bindir()` - Returns the binary directory (`~/hero/bin`) +- `vardir()` - Returns the variable directory (`~/hero/var`) +- `cfgdir()` - Returns the configuration directory (`~/hero/cfg`) +- `ensure_hero_dirs()` - Creates all necessary Hero directories if they don't exist + +### Path Utilities + +- `shell_expansion(s string)` - Expands shell-like path expressions (e.g., `~` or `{HOME}`) to full paths +- `path_ensure(s string)` - Ensures a given path exists by creating it if necessary +- `hero_path(s string)` - Constructs a path underneath the Hero root directory +- `hero_path_ensure(s string)` - Ensures a Hero-specific path exists and returns it + +## Usage Example + +```vsh +import freeflowuniverse.herolib.core.rootpath + +// Get and ensure Hero directories exist +hero_root := rootpath.ensure_hero_dirs() + +// Work with Hero-specific paths +ensured_path := rootpath.hero_path_ensure('data/myapp') + +// Expand shell paths +full_path := rootpath.shell_expansion('~/hero/custom/path') + +``` + +## Directory Structure + +The module manages the following directory structure: + +``` +~/hero/ + ├── bin/ # Binary files + ├── var/ # Variable data + └── cfg/ # Configuration files +``` + diff --git a/lib/core/rootpath/rootpath.v b/lib/core/rootpath/rootpath.v new file mode 100644 index 00000000..2d02c05a --- /dev/null +++ b/lib/core/rootpath/rootpath.v @@ -0,0 +1,72 @@ +module rootpath + +import os + +// replace ~ to home dir in string as given +pub fn shell_expansion(s_ string) string { + mut s := s_ + home := os.real_path(os.home_dir()) + for x in ['{HOME}', '~'] { + if s.contains(x) { + s = s.replace(x, home) + } + } + return s +} + +// ensure_hero_dirs creates all necessary hero directories +pub fn ensure_hero_dirs() string { + path_ensure(herodir()) + path_ensure(bindir()) + path_ensure(vardir()) + path_ensure(cfgdir()) + return herodir() +} + + +// root dir for our hero environment +pub fn herodir() string { + return shell_expansion('~/hero') +} + +// bin dir +pub fn bindir() string { + return '${herodir()}/bin' +} + +// var dir +pub fn vardir() string { + return '${herodir()}/var' +} + +// cfg dir +pub fn cfgdir() string { + return '${herodir()}/cfg' +} + +// path_ensure ensures the given path exists and returns it +pub fn path_ensure(s string) string { + path := shell_expansion(s) + if !os.exists(path) { + os.mkdir_all(path) or { panic('cannot create dir ${path}') } + } + return path +} + + +// get path underneath the hero root directory +pub fn hero_path(s string) string { + path := shell_expansion(s).trim_left(' /') + full_path := '${herodir()}/${path}/' + return full_path +} + + +// return path and ensure it exists and return the path +pub fn hero_path_ensure(s string) string { + path := hero_path(s) + if !os.exists(path) { + os.mkdir_all(path) or { panic('cannot create dir ${path}') } + } + return path +} diff --git a/lib/core/smartid/sid.v b/lib/core/smartid/sid.v new file mode 100644 index 00000000..a7c5b2d7 --- /dev/null +++ b/lib/core/smartid/sid.v @@ -0,0 +1,118 @@ +module smartid + +// import freeflowuniverse.herolib.clients.redisclient +import math +// import freeflowuniverse.herolib.core.texttools.regext +// import rand + +// each part min3 max 6 chars, each char = a...z or 0...9 +// to create a new one we need to know the circle +// pub fn sid_new(cid string) !string { +// mut redis := redisclient.core_get()! +// key := 'circle:sid:${cid}' +// mut sidlast := redis.get(key)! // is the last sid +// if sidlast == '' { +// redis.set(key, '10')! +// sidlast = redis.get(key)! // need to make sure we reserve the first 10 ones +// } +// sidlasti := sidlast.u32() + 1 // is a new one +// redis.set(key, '${sidlasti}')! +// return sid_str(sidlasti) +// } + +// // make sure redis knows about it, will return true if its not known in redis yet +// fn sid_acknowledge(cid string, sid string) !bool { +// mut redis := redisclient.core_get()! +// key := 'circle:sid:${cid}' +// sidlast := redis.get(key)! // is the last sid +// sidlasti := sidlast.u32() +// sidnewi := sid_int(sid) +// if sidnewi > sidlasti { +// redis.set(key, '${sidnewi}')! +// return true +// } +// return false +// } + +// set the sids in redis, so we remember them all, and we know which one is the latest +// this is for all sids as found in text +// fn sids_acknowledge(cid string, text string) ! { +// res := regext.find_sid(text) +// for sid in res { +// sid_acknowledge(cid, sid)! +// } +// } + +// // make sure that we don't use an existing one +// pub fn sid_new_unique(existing []string) !string { +// idint := rand.u32_in_range(1, 42800) or { panic(err) } +// idstr := smartid_string(idint) +// if idstr !in existing { +// return idstr +// } +// return error('Could not find unique smartid, run out of tries') +// } + +// convert sid to int +pub fn sid_int(sid string) u32 { + mut result := 0 + mut count := sid.len - 1 + for i in sid { + if i > 47 && i < 58 { + result += (i - 48) * int(math.pow(36, count)) + } else if i > 96 && i < 123 { + result += (i - 87) * int(math.pow(36, count)) + } + count -= 1 + } + return u32(result) +} + +// represent sid as string, from u32 +pub fn sid_str(sid u32) string { + mut completed := false + mut remaining := int(sid) + mut decimals := []f64{} + mut count := 1 + for completed == false { + if int(math.pow(36, count)) > sid { + for i in 0 .. count { + decimals << math.floor(f64(remaining / int(math.pow(36, count - 1 - i)))) + remaining = remaining % int(math.pow(36, count - 1 - i)) + } + completed = true + } else { + count += 1 + } + } + mut strings := []string{} + for i in 0 .. (decimals.len) { + if decimals[i] >= 0 && decimals[i] <= 9 { + strings << u8(decimals[i] + 48).ascii_str() + } else { + strings << u8(decimals[i] + 87).ascii_str() + } + } + return strings.join('') +} + +// check if format is [..5].[..5].[..5] . and [..5] is string +// return error if issue +pub fn sid_check(sid string) bool { + if sid.len > 6 || sid.len < 2 { + return false + } + for cha in sid { + if (cha < 48 || cha > 57) && (cha < 97 || cha > 122) { + return false + } + } + return true +} + +// raise error if smartid not valid +pub fn sid_test(sid string) ! { + if !sid_check(sid) { + return error('sid:${sid} is not valid.') + } +} diff --git a/lib/data/hjson/README.md b/lib/data/hjson/README.md new file mode 100644 index 00000000..6f1470fa --- /dev/null +++ b/lib/data/hjson/README.md @@ -0,0 +1,70 @@ +# HJSON Module + +A V module for handling JSON data with additional utility functions for filtering, extracting, and manipulating JSON structures. + +## Features + +- JSON list splitting +- JSON dictionary filtering and extraction +- Clean ASCII handling option +- Support for both string and Any type outputs + +## Main Functions + +### `json_list(r string, clean bool) []string` +Splits a list of dictionaries into text blocks. Useful for processing large JSON arrays of objects. + +### `json_dict_get_any(r string, clean bool, key string) !json2.Any` +Extracts a value from a JSON dictionary by key, returning it as `json2.Any`. + +### `json_dict_get_string(r string, clean bool, key string) !string` +Similar to `json_dict_get_any` but returns the result as a string. + +### `json_dict_filter_any(r string, clean bool, include []string, exclude []string) !map[string]json2.Any` +Filters a JSON dictionary based on included and excluded keys. + +### `json_dict_filter_string(r string, clean bool, include []string, exclude []string) !map[string]string` +Similar to `json_dict_filter_any` but returns a map of strings. + +### `json_list_dict_get_any(r string, clean bool, key string) ![]json2.Any` +Processes a list of dictionaries and extracts values for a specific key from each dictionary. + +### `json_list_dict_get_string(r string, clean bool, key string) ![]string` +Similar to `json_list_dict_get_any` but returns an array of strings. + +## Usage Examples + +```v +// Get a value from a JSON dictionary +json_str := '{"name": "John", "age": 30}' +name := json_dict_get_string(json_str, true, "name")! +println(name) // Output: "John" + +// Filter JSON dictionary +json_str := '{"name": "John", "age": 30, "city": "New York"}' +include := ["name", "age"] +exclude := [] +filtered := json_dict_filter_string(json_str, true, include, exclude)! +println(filtered) // Output: {"name": "John", "age": 30} + +// Process a list of dictionaries +json_list := '[{"user": {"name": "John"}}, {"user": {"name": "Jane"}}]' +names := json_list_dict_get_string(json_list, true, "user")! +println(names) // Output: [{"name": "John"}, {"name": "Jane"}] +``` + +## Parameters + +- `r string`: The input JSON string to process +- `clean bool`: When true, cleans the input string to ensure ASCII compatibility +- `key string`: The key to search for in JSON dictionaries +- `include []string`: List of keys to include in filtered output +- `exclude []string`: List of keys to exclude from filtered output + +## Error Handling + +All functions that can fail return a Result type (`!`). Common error cases include: +- Empty input strings +- Invalid JSON format +- Missing keys +- Invalid data types diff --git a/lib/data/hjson/hjson.v b/lib/data/hjson/hjson.v new file mode 100644 index 00000000..1a34ac00 --- /dev/null +++ b/lib/data/hjson/hjson.v @@ -0,0 +1,136 @@ +module crystaljson + +import x.json2 +import freeflowuniverse.herolib.core.texttools + +const keep_ascii = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%^&*()_-+={}[]"\':;!/>.<,|\\~` ' + +// rought splitter for json, splits a list of dicts into the text blocks +pub fn json_list(r string, clean bool) []string { + // mut res := []string{} + mut open_counter := 0 + mut block := []string{} + mut blocks := []string{} + for ch in r { + mut c := ch.ascii_str() + // //rough one to debug + // if clean && ! keep_ascii.contains(c){ + // console.print_debug("SKIP") + // continue + // } + // console.print_debug('${c}') + if c == '{' { + open_counter += 1 + } + if c == '}' { + open_counter -= 1 + } + // console.print_debug(open_counter) + if open_counter > 0 { + block << c + // console.print_debug(block.len) + } + if open_counter == 0 && block.len > 2 { + blocks << block.join('') + '}' + block = []string{} + } + } + return blocks +} + +// get dict out of json +// if include used (not empty, then will only match on keys given) +pub fn json_dict_get_any(r string, clean bool, key string) !json2.Any { + mut r2 := r + if clean { + r2 = texttools.ascii_clean(r2) + } + if r2.trim(' \n') == '' { + return error('Cannot do json2 raw decode in json_dict_get_any.\ndata was empty.') + } + data_raw := json2.raw_decode(r2) or { + return error('Cannot do json2 raw decode in json_dict_get_any.\ndata:\n${r2}\nerror:${err}') + } + mut res := data_raw.as_map() + if key in res { + return res[key]! + } else { + return error('Could not find key:${key} in ${r}') + } +} + +pub fn json_dict_get_string(r string, clean bool, key string) !string { + r2 := json_dict_get_any(r, clean, key)! + return r2.json_str() +} + +// get dict out of json +// if include used (not empty, then will only match on keys given) +pub fn json_dict_filter_any(r string, clean bool, include []string, exclude []string) !map[string]json2.Any { + mut r2 := r + if clean { + r2 = texttools.ascii_clean(r2) + } + if r2.trim(' \n') == '' { + return error('Cannot do json2 raw decode in json_dict_filter_any.\ndata was empty.') + } + data_raw := json2.raw_decode(r2) or { + return error('Cannot do json2 raw decode in json_dict_filter_any.\ndata:\n${r2}\nerror:${err}') + } + mut res := data_raw.as_map() + if include != [] { + for key in res.keys() { + if key !in include { + res.delete(key) + } + } + } + for key in exclude { + res.delete(key) + } + return res +} + +pub fn json_dict_filter_string(r string, clean bool, include []string, exclude []string) !map[string]string { + mut res := json_dict_filter_any(r, clean, include, exclude)! + mut res2 := map[string]string{} + for key in res.keys() { + res2[key] = res[key]!.json_str() + } + return res2 +} + +// the input is a list of dicts e.g. [{"key":{"name":"kristof@incubaid.com",...},{"key":...}] +// in this key the key would be key +// returns list of json2.any +pub fn json_list_dict_get_any(r string, clean bool, key string) ![]json2.Any { + mut r2 := r + if clean { + r2 = texttools.ascii_clean(r2) + } + if r2.trim(' \n') == '' { + return error('Cannot do json2 raw decode in json_dict_get_any.\ndata was empty.') + } + data_raw := json2.raw_decode(r2) or { + return error('Cannot do json2 raw decode in json_dict_get_any.\ndata:\n${r2}\nerror:${err}') + } + mut res_list := data_raw.arr() + mut res_final := []json2.Any{} + for item in res_list { + mut res := item.as_map() + if key in res { + res_final << res[key] or { panic('bug') } + } else { + return error('Could not find key:${key} in ${res} as part of json_list_dict_get_any') + } + } + return res_final +} + +// the input is a list of dicts e.g. [{"key":{"name":"kristof@incubaid.com",...},{"key":...}] +// in this key the key would be key +// returns list strings which can be parsed as json +pub fn json_list_dict_get_string(r string, clean bool, key string) ![]string { + r2 := json_list_dict_get_any(r, clean, key)! + return r2.map(it.json_str()) +} diff --git a/lib/osal/cmds.v b/lib/osal/cmds.v new file mode 100644 index 00000000..3e63dca8 --- /dev/null +++ b/lib/osal/cmds.v @@ -0,0 +1,304 @@ +module osal + +import os +import freeflowuniverse.herolib.core.pathlib +import freeflowuniverse.herolib.ui.console +// import regex +import freeflowuniverse.herolib.core.texttools + +pub struct CmdAddArgs { +pub mut: + cmdname string + source string @[required] // path where the binary is + symlink bool // if rather than copy do a symlink + reset bool = true // if existing cmd will delete + // bin_repo_url string = 'https://github.com/freeflowuniverse/freeflow_binary' // binary where we put the results +} + +// copy a binary to the right location on the local computer . +// e.g. is /usr/local/bin on linux . +// e.g. is ~/hero/bin on osx . +// will also add the bin location to the path of .zprofile and .zshrc (different per platform) +pub fn cmd_add(args_ CmdAddArgs) ! { + mut args := args_ + if args.cmdname == '' { + args.cmdname = os.base(args.source) + } + mut dest := bin_path()! + + mut sourcepath := pathlib.get_file(path: args.source, create: false)! + mut destpath := '${dest}/${args.cmdname}' + + console.print_debug(destpath) + + // check if there is other file + res := os.execute('which ${args.cmdname}') + if res.exit_code == 0 { + existing_path := res.output.trim_space() + if destpath != existing_path { + console.print_debug(' - did find a cmd which is not in path we expect:\n expected:${destpath}\n got:${existing_path}') + if args.reset { + if existing_path.contains('homebrew/bin') { + exec(cmd: 'brew uninstall ${args.cmdname}') or { + return error('failed to remove existing command using brew') + } + } else { + os.rm(existing_path)! + } + } else { + return error("existing cmd found on: ${existing_path} and can't delete.\nWas trying to install on ${destpath}.") + } + } + } + + if args.symlink { + sourcepath.link(destpath, true)! + } else { + sourcepath.copy(dest: destpath, rsync: false)! + } + + mut destfile := pathlib.get_file(path: destpath, create: false)! + + destfile.chmod(0o770)! // includes read & write & execute + + // lets make sure this path is in profile + profile_path_add_remove(paths2add: dest)! +} + +pub fn profile_path_add_hero() !string { + mut dest := bin_path()! + profile_path_add_remove(paths2add: dest)! + return dest +} + +pub fn bin_path() !string { + mut dest := '' + if is_osx() { + dest = '${os.home_dir()}/hero/bin' + dir_ensure(dest)! + } else { + dest = '/usr/local/bin' + } + return dest +} + +pub fn hero_path() !string { + mut dest := '' + dest = '${os.home_dir()}/hero' + dir_ensure(dest)! + return dest +} + +///usr/local on linux, ${os.home_dir()}/hero on osx +pub fn usr_local_path() !string { + mut dest := '' + if is_osx() { + dest = '${os.home_dir()}/hero' + dir_ensure(dest)! + } else { + dest = '/usr/local' + } + return dest +} + +// return the source statement if the profile exists +pub fn profile_path_source() string { + if hostname() or { '' } == 'rescue' { + return '' + } + pp := profile_path() + if os.exists(pp) { + return 'source ${pp}' + } + return '' +} + +// return source $path && . +// or empty if it doesn't exist +pub fn profile_path_source_and() string { + if hostname() or { '' } == 'rescue' { + return '' + } + pp := profile_path() + if os.exists(pp) { + return '. ${pp} &&' + } + return '' +} + +fn profile_paths_get(content string) []string { + mut paths := []string{} + for line in content.split_into_lines() { + if line.contains('PATH') { + post := line.all_after_last('=').trim('\'" ,') + splitted := post.split(':') + for item in splitted { + item2 := item.trim(' "\'') + if item2 !in paths && !item2.contains('PATH') { + paths << item2 + } + } + } + } + return paths +} + +@[params] +pub struct ProfilePathAddRemoveArgs { +pub mut: + paths_profile string + paths2add string + paths2delete string + allprofiles bool +} + +// add and/or remove paths from profiles +// if paths_profile not specified it will walk over all of them +pub fn profile_path_add_remove(args_ ProfilePathAddRemoveArgs) ! { + mut args := args_ + + mut paths_profile := texttools.to_array(args.paths_profile) + mut paths2add := texttools.to_array(args.paths2add) + mut paths2delete := texttools.to_array(args.paths2delete) + + if paths_profile.len == 0 { + if args.allprofiles { + paths_profile = profile_paths_all()! + } else { + paths_profile = profile_paths_preferred()! + } + } + + for path_profile_str in paths_profile { + mut path_profile := pathlib.get_file(path: path_profile_str, create: true)! + mut c := path_profile.read()! + mut c_out := '' // the result file + mut paths_existing_inprofile := profile_paths_get(c) + console.print_debug(" -- profile path profile:'${path_profile_str}' add:'${args.paths2add}' delete:'${args.paths2delete}'") + // Remove paths to delete + for mut todelete in paths2delete { + todelete = todelete.trim_space() + if todelete.len > 0 { + if todelete.starts_with('/') || todelete.starts_with('~') { + paths_existing_inprofile = paths_existing_inprofile.filter(it != todelete) + paths_existing_inprofile = paths_existing_inprofile.filter(it.replace('~', + os.home_dir()) != todelete) + } else { + paths_existing_inprofile = paths_existing_inprofile.filter(!(it.contains(todelete))) + } + } + } + + // Add new paths if they don't exist + for mut path2add in paths2add { + if path2add !in paths_existing_inprofile { + path2add = path2add.replace('~', os.home_dir()) + if !os.exists(path2add) { + return error("can't add path to profile, doesn't exist: ${path2add}") + } + paths_existing_inprofile << path2add + } + } + + // Remove existing PATH declarations + lines := c.split_into_lines() + for line in lines { + if !line.to_lower().starts_with('export path=') { + c_out += line + '\n' + } + } + + // Sort the paths + paths_existing_inprofile.sort() + + // println(paths_existing_inprofile) + // if true{panic("ss")} + + // Add the sorted paths + for item in paths_existing_inprofile { + c_out += 'export PATH=\$PATH:${item}\n' + } + + // Only write if the content has changed + if c.trim_space() != c_out.trim_space() { + path_profile.write(c_out)! + } + } +} + +// is same as executing which in OS +// returns path or error +pub fn cmd_path(cmd string) !string { + res := os.execute('which ${cmd}') + if res.exit_code == 0 { + return res.output.trim_space() + } + return error("can't do find path for cmd: ${cmd}") +} + +// delete cmds from found locations +// can be one command of multiple +pub fn cmd_delete(cmd string) ! { + cmds := texttools.to_array(cmd) + for cmd2 in cmds { + res := cmd_path(cmd2) or { '' } + if res.len > 0 { + if os.exists(res) { + os.rm(res)! + } + } + } +} + +// return possible profile paths in OS +pub fn profile_paths_all() ![]string { + mut profile_files_ := []string{} + + profile_files_ = [ + '/etc/profile', + '/etc/bash.bashrc', + '${os.home_dir()}/.bashrc', + '${os.home_dir()}/.bash_profile', + '${os.home_dir()}/.profile', + '${os.home_dir()}/.zprofile', + '${os.home_dir()}/.zshrc', + ] + + mut profile_files2 := []string{} + + for file in profile_files_ { + if os.exists(file) { + profile_files2 << file + } + } + return profile_files_ +} + +pub fn profile_paths_preferred() ![]string { + mut toadd := []string{} + if is_osx() { + toadd << '${os.home_dir()}/.zprofile' + toadd << '${os.home_dir()}/.zshrc' + } else { + toadd << '${os.home_dir()}/.bash_profile' + toadd << '${os.home_dir()}/.bashrc' + toadd << '${os.home_dir()}/.zshrc' + } + mut profile_files2 := []string{} + + for file in toadd { + if os.exists(file) { + println('${file} exists') + profile_files2 << file + } + } + return profile_files2 +} + +pub fn profile_path() string { + if is_osx() { + return '${os.home_dir()}/.zprofile' + } else { + return '${os.home_dir()}/.bash_profile' + } +} diff --git a/lib/osal/downloader.v b/lib/osal/downloader.v new file mode 100644 index 00000000..cfe4e0cd --- /dev/null +++ b/lib/osal/downloader.v @@ -0,0 +1,138 @@ +module osal + +import freeflowuniverse.herolib.core.pathlib +// import freeflowuniverse.herolib.core.texttools +import freeflowuniverse.herolib.ui.console +import os + +@[params] +pub struct DownloadArgs { +pub mut: + name string // optional (otherwise derived out of filename) + url string + reset bool // will remove + hash string // if hash is known, will verify what hash is + dest string // if specified will copy to that destination + timeout int = 180 + retry int = 3 + minsize_kb u32 = 10 // is always in kb + maxsize_kb u32 + expand_dir string + expand_file string +} + +// if name is not specified, then will be the filename part +// if the last ends in an extension like .md .txt .log .text ... the file will be downloaded +pub fn download(args_ DownloadArgs) !pathlib.Path { + mut args := args_ + + console.print_header('download: ${args.url}') + if args.name == '' { + if args.dest != '' { + args.name = args.dest.split('/').last() + } else { + mut lastname := args.url.split('/').last() + if lastname.contains('?') { + return error('cannot get name from url if ? in the last part after /') + } + args.name = lastname + } + if args.name == '' { + return error('cannot find name for download') + } + } + + if args.dest.contains('@name') { + args.dest = args.dest.replace('@name', args.name) + } + if args.url.contains('@name') { + args.url = args.url.replace('@name', args.name) + } + + if args.dest == '' { + args.dest = '/tmp/${args.name}' + } + + if !cmd_exists('curl') { + return error('please make sure curl has been installed.') + } + + mut dest := pathlib.get_file(path: args.dest, check: false)! + + // now check to see the url is not different + mut meta := pathlib.get_file(path: args.dest + '.meta', create: true)! + metadata := meta.read()! + if metadata.trim_space() != args.url.trim_space() { + // means is a new one need to delete + args.reset = true + dest.delete()! + } + + if args.reset { + mut dest_delete := pathlib.get_file(path: args.dest + '_', check: false)! + dest_delete.delete()! + } + + meta.write(args.url.trim_space())! + + // check if the file exists, if yes and right size lets return + mut todownload := true + if dest.exists() { + size := dest.size_kb()! + if args.minsize_kb > 0 { + if size > args.minsize_kb { + todownload = false + } + } + } + + if todownload { + mut dest0 := pathlib.get_file(path: args.dest + '_')! + + cmd := ' + rm -f ${dest0.path} + cd /tmp + curl -L \'${args.url}\' -o ${dest0.path} + ' + exec( + cmd: cmd + timeout: args.timeout + retry: args.retry + debug: false + description: 'download ${args.url} to ${dest0.path}' + stdout: true + )! + + if dest0.exists() { + size0 := dest0.size_kb()! + // console.print_debug(size0) + if args.minsize_kb > 0 { + if size0 < args.minsize_kb { + return error('Could not download ${args.url} to ${dest0.path}, size (${size0}) was smaller than ${args.minsize_kb}') + } + } + if args.maxsize_kb > 0 { + if size0 > args.maxsize_kb { + return error('Could not download ${args.url} to ${dest0.path}, size (${size0}) was larger than ${args.maxsize_kb}') + } + } + } + dest0.rename(dest.name())! + dest.check() + } + if args.expand_dir.len > 0 { + if os.exists(args.expand_dir) { + os.rmdir_all(args.expand_dir)! + } + + return dest.expand(args.expand_dir)! + } + if args.expand_file.len > 0 { + if os.exists(args.expand_file) { + os.rm(args.expand_file)! + } + return dest.expand(args.expand_file)! + } + + return dest +} diff --git a/lib/osal/env.v b/lib/osal/env.v new file mode 100644 index 00000000..e302c577 --- /dev/null +++ b/lib/osal/env.v @@ -0,0 +1,79 @@ +module osal + +import freeflowuniverse.herolib.core.pathlib +import os + +@[params] +pub struct EnvSet { +pub mut: + key string @[required] + value string @[required] + overwrite bool = true +} + +@[params] +pub struct EnvSetAll { +pub mut: + env map[string]string + clear_before_set bool + overwrite_if_exists bool = true +} + +// Sets an environment if it was not set before, it overwrites the enviroment variable if it exists and if overwrite was set to true (default) +pub fn env_set(args EnvSet) { + os.setenv(args.key, args.value, args.overwrite) +} + +// Unsets an environment variable +pub fn env_unset(key string) { + os.unsetenv(key) +} + +// Unsets all environment variables +pub fn env_unset_all() { + for key, _ in os.environ() { + env_unset(key) + } +} + +// Allows to set multiple enviroment variables in one go, if clear_before_set is true all existing environment variables will be unset before the operation, if overwrite_if_exists is set to true it will overwrite all existing enviromnent variables +pub fn env_set_all(args EnvSetAll) { + if args.clear_before_set { + env_unset_all() + } + for key, val in args.env { + env_set(key: key, value: val, overwrite: args.overwrite_if_exists) + } +} + +// Returns all existing environment variables +pub fn env_get_all() map[string]string { + return os.environ() +} + +// Returns the requested environment variable if it exists or throws an error if it does not +pub fn env_get(key string) !string { + return os.environ()[key]! +} + +// Returns the requested environment variable if it exists or returns the provided default value if it does not +pub fn env_get_default(key string, def string) string { + return os.environ()[key] or { return def } +} + +pub fn load_env_file(file_path string) ! { + mut file := pathlib.get_file(path: file_path)! + content := file.read()! + lines := content.split_into_lines() + for line in lines { + if line.len == 0 || line[0] == `#` { + continue + } + if !line.contains('=') { + continue + } + key := line.all_before('=').trim_space() + value := line.all_after('=').trim_space() + os.setenv(key, value, true) + } +} diff --git a/lib/osal/env_test.v b/lib/osal/env_test.v new file mode 100644 index 00000000..c6c89cbd --- /dev/null +++ b/lib/osal/env_test.v @@ -0,0 +1,41 @@ +module osal + +fn test_env_get_default() ! { + key := 'keythatshouldnotexist' + def_value := 'defaultvalue' + + env_unset(key) + + env_get(key) or { + assert env_get_default(key, def_value) == def_value + return + } + return error('The environment value ${key} should have been unset, it was not!') +} + +fn test_env_set_env_get_env_unset() ! { + key := 'myenvironmentvariable' + value := 'somevalue' + + env_set(key: key, value: value) + + assert env_get(key)! == value + + env_unset(key) + + env_get(key) or { return } + return error('The environment variable ${key} should have been unset, it was not!') +} + +fn test_env_unset_all_and_set_all_and_get_all() { + mut env := map[string]string{} + env['Dummy'] = 'dummy' + + env_unset_all() + + assert env_get_all() == map[string]string{} + + env_set_all(env: env) + + assert env_get_all() == env +} diff --git a/lib/osal/exec.v b/lib/osal/exec.v new file mode 100644 index 00000000..cba675fe --- /dev/null +++ b/lib/osal/exec.v @@ -0,0 +1,445 @@ +module osal + +// import freeflowuniverse.herolib.core.texttools +// import freeflowuniverse.herolib.core.pathlib +import freeflowuniverse.herolib.ui.console +import json +import os +import time +// import io.util + +pub struct JobError { + Error +pub mut: + job Job + error_type ErrorType +} + +pub enum ErrorType { + exec + timeout + args +} + +fn (err JobError) msg() string { + if err.error_type == .args { + return 'Error in arguments:\n${err.job.cmd}' + } + if err.error_type == .timeout { + return 'Execution failed timeout\n${err.job}' + } + mut msg := 'Execution failed with code ${err.job.exit_code}\n' + if err.job.cmd.scriptpath.len > 0 { + msg += '\nscript path:${err.job.cmd.scriptpath}' + } + if err.job.output.len > 0 { + msg += '\n\n## stdout:\n${err.job.output}' + } + if err.job.error.len > 0 { + msg += '\n\n## stderr:\n${err.job.error}' + } + return msg +} + +fn (err JobError) code() int { + if err.error_type == .timeout { + return 9999 + } + return err.job.exit_code +} + +@[params] +pub struct Command { +pub mut: + name string // to give a name to your command, good to see logs... + cmd string + description string + timeout int = 3600 // timeout in sec + stdout bool = true + stdout_log bool = true + raise_error bool = true // if false, will not raise an error but still error report + ignore_error bool // means if error will just exit and not raise, there will be no error reporting + work_folder string // location where cmd will be executed + environment map[string]string // env variables + ignore_error_codes []int + scriptpath string // is the path where the script will be put which is executed + scriptkeep bool // means we don't remove the script + debug bool // if debug will put +ex in the script which is being executed and will make sure script stays + shell bool // means we will execute it in a shell interactive + retry int + interactive bool = true + async bool + runtime RunTime +} + +pub enum JobStatus { + init + running + error_exec + error_timeout + error_args + done +} + +pub enum RunTime { + bash + python + heroscript + herocmd + v +} + +pub struct Job { +pub mut: + start time.Time + end time.Time + cmd Command + output string + error string + exit_code int + status JobStatus + process ?&os.Process @[skip; str: skip] + runnr int // nr of time it runs, is for retry +} + +// cmd is the cmd to execute can use ' ' and spaces . +// if \n in cmd it will write it to ext and then execute with bash . +// if die==false then will just return returncode,out but not return error . +// if stdout will show stderr and stdout . +// . +// if cmd starts with find or ls, will give to bash -c so it can execute . +// if cmd has no path, path will be found . +// . +// Command argument: . +//``` +// name string // to give a name to your command, good to see logs... +// cmd string +// description string +// timeout int = 3600 // timeout in sec +// stdout bool = true +// stdout_log bool = true +// raise_error bool = true // if false, will not raise an error but still error report +// ignore_error bool // means if error will just exit and not raise, there will be no error reporting +// work_folder string // location where cmd will be executed +// environment map[string]string // env variables +// ignore_error_codes []int +// scriptpath string // is the path where the script will be put which is executed +// scriptkeep bool // means we don't remove the script +// debug bool // if debug will put +ex in the script which is being executed and will make sure script stays +// shell bool // means we will execute it in a shell interactive +// retry int +// interactive bool = true // make sure we run on non interactive way +// async bool +// runtime RunTime (.bash, .python) +// +// returns Job: +// start time.Time +// end time.Time +// cmd Command +// output []string +// error []string +// exit_code int +// status JobStatus +// process os.Process +//``` +// return Job . +pub fn exec(cmd Command) !Job { + mut job := Job{ + cmd: cmd + } + job.start = time.now() + + if job.cmd.debug { + job.cmd.stdout = true + console.print_header(' execute: ${job.cmd.cmd}') + } + + if cmd.shell { + // $if debug { + // console.print_debug('cmd shell: ${cmd.cmd}') + // } + scriptpath := cmd_to_script_path(job.cmd)! + os.execvp(scriptpath, [])! + return job + } + if !cmd.async { + job.execute_retry() or { + // println(err) + return err + } + } + return job +} + +// execute the job and wait on result +// will retry as specified +pub fn (mut job Job) execute_retry() ! { + for x in 0 .. job.cmd.retry + 1 { + job.execute() or { + if x == job.cmd.retry { + // println(job) + return err + } + } + // println(job) + if job.status == .done { + // means we could execute we can stop + return + } + } + job.close()! +} + +// execute the job, start process, process will not be closed . +// important you need to close the process later by job.close()! otherwise we get zombie processes +pub fn (mut job Job) execute() ! { + job.runnr += 1 + job.start = time.now() + job.status = .running + + job.cmd.scriptpath = cmd_to_script_path(job.cmd)! + + // console.print_debug(" - process execute ${process_args[0]}") + mut p := os.new_process(job.cmd.scriptpath) + + if job.cmd.work_folder.len > 0 { + p.set_work_folder(job.cmd.work_folder) + } + if job.cmd.environment.len > 0 { + p.set_environment(job.cmd.environment) + } + p.set_redirect_stdio() + // console.print_debug("process setargs ${process_args[1..process_args.len]}") + // p.set_args(process_args[1..process_args.len]) + if job.cmd.stdout { + console.print_debug('') + } + p.run() + job.process = p + job.wait()! +} + +// ORDER IS +// EXECUTE +// LOOP -> WAIT -> PROCESS -> READ +// -> CLOSE + +// wait till the job finishes or goes in error +pub fn (mut job Job) wait() ! { + // if job.status != .running && job.status != .init { + // return error('can only wait for running job') + // } + + for { + job.process()! + // console.print_debug(result) + if job.status == .done { + // console.print_stderr("wait done") + job.close()! + return + } + time.sleep(10 * time.millisecond) + } + job.close()! +} + +// process (read std.err and std.out of process) +pub fn (mut job Job) process() ! { + // $if debug{console.print_debug(" - job process: $job")} + if job.status == .init { + panic('should not be here') + // job.execute()! + } + mut p := job.process or { return error('there is not process on job') } + + // mut result := job.read()! + + job.read()! + if p.is_alive() { + job.read()! + // result=job.read()! + if time.now().unix() > job.start.unix() + job.cmd.timeout * 1000 { + // console.print_stderr("TIMEOUT TIMEOUT TIMEOUT TIMEOUT") + p.signal_pgkill() + p.close() + job.exit_code = 9999 + job.end = time.now() + job.status = .error_timeout + if job.cmd.raise_error { + return JobError{ + job: job + error_type: .timeout + } + } + } + } else { + // console.print_stderr(" - process stopped") + job.read()! + job.read()! + job.status = .done + // result.done = true + if p.code > 0 { + // console.print_stderr(' ########## Process CODE IS > 0') + job.exit_code = p.code + job.status = .error_exec + job.cmd.scriptkeep = true + job.close()! + } + } +} + +fn (mut job Job) read() ! { + mut p := job.process or { return error('there is no process on job') } + + // console.print_debug("READ STDOUT") + out_std := p.pipe_read(.stdout) or { '' } + // console.print_debug(" OK") + if out_std.len > 0 { + if job.cmd.stdout { + console.print_stdout(out_std) + } + job.output += out_std + } + // console.print_debug("READ ERROR") + out_error := p.pipe_read(.stderr) or { '' } + // console.print_debug(" OK") + if out_error.len > 0 { + if job.cmd.stdout && job.cmd.ignore_error == false { + console.print_stderr(out_error) + } + job.error += out_error + } +} + +// will wait & close +pub fn (mut job Job) close() ! { + mut p := job.process or { return error('there is no process on job') } + // console.print_debug("CLOSE") + p.signal_pgkill() + p.wait() + p.close() + job.end = time.now() + if job.exit_code > 0 && job.exit_code !in job.cmd.ignore_error_codes { + if !job.cmd.ignore_error { + errorpath := job.cmd.scriptpath.all_before_last('.sh') + '_error.json' + errorjson := json.encode_pretty(job) + os.write_file(errorpath, errorjson) or { + msg := 'cannot write errorjson to ${errorpath}' + return error(msg) + } + + errorpath2 := job.cmd.scriptpath.all_before_last('.sh') + '_error.log' + mut errortxt := '# ERROR:\n\n' + errortxt += job.cmd.cmd + '\n' + errortxt += '## OUTPUT:\n\n' + errortxt += job.output + os.write_file(errorpath2, errortxt) or { + msg := 'cannot write error to ${errorpath2}' + return error(msg) + } + + je := JobError{ + job: job + error_type: .exec + } + if job.cmd.stdout { + console.print_debug('Job Error') + console.print_debug(je.msg()) + } + if job.cmd.raise_error { + return je + } + } + } + + if job.exit_code == 0 && job.cmd.scriptkeep == false && os.exists(job.cmd.scriptpath) { + // console.print_debug(job.cmd.scriptpath) + os.rm(job.cmd.scriptpath)! + } + if job.cmd.ignore_error == false && job.cmd.scriptkeep == false && os.exists(job.cmd.scriptpath) { + os.rm(job.cmd.scriptpath)! + } + // job.status = .done + + if job.cmd.raise_error && job.exit_code > 0 { + return JobError{ + job: job + error_type: .exec + } + } +} + +// shortcut to execute a job silent +pub fn execute_silent(cmd string) !string { + job := exec(cmd: cmd, stdout: false)! + return job.output +} + +pub fn execute_debug(cmd string) !string { + job := exec(cmd: cmd, stdout: true, debug: true)! + return job.output +} + +// shortcut to execute a job to stdout +pub fn execute_stdout(cmd string) !string { + job := exec(cmd: cmd, stdout: true)! + return job.output +} + +// shortcut to execute a job interactive means in shell +pub fn execute_interactive(cmd string) ! { + exec(cmd: cmd, stdout: true, shell: true)! +} + +// executes a cmd, if not error return true +pub fn execute_ok(cmd string) bool { + res := os.execute(cmd) + if res.exit_code > 0 { + return false + } + return true +} + +pub fn cmd_exists(cmd string) bool { + cmd1 := 'which ${cmd}' + res := os.execute(cmd1) + if res.exit_code > 0 { + return false + } + return true +} + +pub fn cmd_exists_profile(cmd string) bool { + cmd1 := '${profile_path_source_and()} which ${cmd}' + res := os.execute(cmd1) + if res.exit_code > 0 { + return false + } + return true +} + +// cmd is the cmd to execute can use ' ' and spaces +// if \n in cmd it will write it to ext and then execute with bash +// if die==false then will just return returncode,out but not return error +// if stdout will show stderr and stdout +// +// if cmd starts with find or ls, will give to bash -c so it can execute +// if cmd has no path, path will be found +// $... are remplaced by environment arguments TODO:implement +// +// Command argument: +// cmd string +// timeout int = 600 +// stdout bool = true +// die bool = true +// debug bool +// +// return what needs to be executed can give it to bash -c ... +pub fn exec_string(cmd Command) !string { + mut job := Job{ + cmd: cmd + } + job.start = time.now() + job.cmd.scriptpath = cmd_to_script_path(job.cmd)! + return job.cmd.scriptpath +} diff --git a/lib/osal/exec_test.v b/lib/osal/exec_test.v new file mode 100644 index 00000000..f2e2f0a6 --- /dev/null +++ b/lib/osal/exec_test.v @@ -0,0 +1,78 @@ +module osal + +// import crypto.md5 +// import os + +// TODO: needs to be rewritten for process + +// TODO: remove this test, to make the tests pass we need at least one test +fn test_does_nothing() { +} + +// const ( +// cmd_create_file_and_print_content = '#!/bin/bash +// mkdir -p /tmp/testdirectory +// echo text > /tmp/testdirectory/file.txt +// cat /tmp/testdirectory/file.txt +// ' +// ) + +// // Test that succeeds in creating a file and printing the content of that file +// fn test_exec_cmd_create_file_and_print_content() ! { +// res := exec(cmd: osal.cmd_create_file_and_print_content, remove_installer: false)! + +// assert res.trim_space() == 'text' +// assert os.is_file('/tmp/testdirectory/file.txt') +// assert os.is_file('/tmp/installer.sh') + +// // cleanup +// os.rmdir_all('/tmp/testdirectory')! +// } + +// // Test where the command fails and we retry 2 times and it still fails +// fn test_exec_cmd_fail_and_retry() ! { +// res := exec(cmd: 'lsk ./', retry: 2) or { +// assert err.code() == 127 +// assert err.msg().contains('Execution failed with code 127'), err.msg() +// assert !os.is_file('/tmp/installer.sh') +// return +// } +// return error('The command should fail and return an error!') +// } + +// // Test where the execution takes too long and a timeout occurs +// fn test_exec_cmd_fail_due_timeout() ! { +// res := exec(cmd: 'sleep 10s', retry_timeout: 100) or { +// assert err.code() == 9999 +// assert err.msg().contains('Execution failed timeout'), err.msg() +// return +// } +// return error('The command should fail and return an error!') +// } + +// // Test where the command returns in an error but we ignore that error code +// fn test_exec_ignore_error_codes() ! { +// args := ExecArgs{ +// cmd: 'exit 10' +// ignore_error_codes: [10] +// } + +// mut res := exec(args)! +// } + +// // Test using a cached result with a period of 10 milliseconds +// fn test_exec_cmd_done() ! { +// args := ExecArgs{ +// cmd: 'echo sometext' +// remove_installer: false +// reset: false +// period: 10 +// } +// hhash := md5.hexhash(args.cmd) +// mut res := exec(args)! +// redis_str := done_get_str('exec_${hhash}') +// assert redis_str.trim_space().ends_with('sometext') +// assert res.trim_space() == 'sometext' +// res = exec(args)! +// assert res.trim_space() == 'sometext' +// } diff --git a/lib/osal/exec_to_scriptpath.v b/lib/osal/exec_to_scriptpath.v new file mode 100644 index 00000000..1e8703d7 --- /dev/null +++ b/lib/osal/exec_to_scriptpath.v @@ -0,0 +1,78 @@ +module osal + +import freeflowuniverse.herolib.core.texttools +import freeflowuniverse.herolib.core.pathlib +// import freeflowuniverse.herolib.ui.console + +// will return temporary path which then can be executed, is a helper function for making script out of command +pub fn cmd_to_script_path(cmd Command) !string { + // all will be done over filessytem now + mut cmdcontent := texttools.dedent(cmd.cmd) + if !cmdcontent.ends_with('\n') { + cmdcontent += '\n' + } + + if cmd.environment.len > 0 { + mut cmdenv := '' + for key, val in cmd.environment { + cmdenv += "export ${key}='${val}'\n" + } + cmdcontent = cmdenv + '\n' + cmdcontent + // process.set_environment(args.environment) + } + + // use bash debug and die on error features + mut firstlines := '' + mut extension := 'sh' + if cmd.runtime == .bash || cmd.runtime == .herocmd { + if !cmd.cmd.contains('#!/bin/bash') { + firstlines = '#!/bin/bash\n\n' + if !cmd.ignore_error { + firstlines += 'set -e\n' // exec 2>&1\n + } else { + firstlines += 'set +e\n' // exec 2>&1\n + } + if cmd.debug { + firstlines += 'set -x\n' // exec 2>&1\n + } + } + if !cmd.interactive { + // firstlines += 'export DEBIAN_FRONTEND=noninteractive TERM=xterm\n\n' + firstlines += 'export DEBIAN_FRONTEND=noninteractive\n\n' + } + if cmd.work_folder.len > 0 { + firstlines += 'cd ${cmd.work_folder}\n' + } + if cmd.runtime == .herocmd { + firstlines += 'hero ' // put hero on the next line, the cmdcontent will be appended then + extension = 'hero' + } + } else if cmd.runtime == .python { + firstlines = '#!/usr/bin/env python3\n\n' + extension = 'py' + } else if cmd.runtime == .heroscript { + firstlines = '#!/usr/bin/env hero\n\n' + extension = 'hero' + } else if cmd.runtime == .v { + firstlines = '#!/usr/bin/env v\n\n' + extension = 'vsh' + } else { + panic("can't find runtime type") + } + + cmdcontent = firstlines + cmdcontent + + mut scriptpath := if cmd.scriptpath.len > 0 { + cmd.scriptpath + } else { + '' + } + scriptpath = pathlib.temp_write( + text: cmdcontent + path: scriptpath + name: cmd.name + ext: extension + ) or { return error('error: cannot write script to execute: ${err}') } + // console.print_debug(" - scriptpath: ${cmd.scriptpath}") + return scriptpath +} diff --git a/lib/osal/file.v b/lib/osal/file.v new file mode 100644 index 00000000..aa657a93 --- /dev/null +++ b/lib/osal/file.v @@ -0,0 +1,61 @@ +module osal + +import freeflowuniverse.herolib.core.texttools +import freeflowuniverse.herolib.ui.console +import os + +pub fn file_write(path string, text string) ! { + return os.write_file(path, text) +} + +pub fn file_read(path string) !string { + return os.read_file(path) +} + +// remove all if it exists +pub fn dir_ensure(path string) ! { + if !os.exists(path) { + os.mkdir_all(path)! + } +} + +// remove all if it exists +pub fn dir_delete(path string) ! { + if os.exists(path) { + return os.rmdir_all(path) + } +} + +// remove all if it exists +// and then (re-)create +pub fn dir_reset(path string) ! { + os.rmdir_all(path)! + os.mkdir_all(path)! +} + +// can be list of dirs, files +// ~ supported +// can be \n or , separated +pub fn rm(todelete_ string) ! { + for mut item in texttools.to_array(todelete_) { + if item.trim_space() == '' { + continue + } + item = item.replace('~', os.home_dir()) + console.print_debug(' - rm: ${item}') + if item.starts_with('/') { + if os.exists(item) { + if os.is_dir(item) { + os.rmdir_all(item)! + } else { + os.rm(item)! + } + } + } else { + if item.contains('/') { + return error('there should be no / in to remove list') + } + cmd_delete(item)! // look for the command, if will be removed if found + } + } +} diff --git a/lib/osal/hostsfile/hostsfile.v b/lib/osal/hostsfile/hostsfile.v new file mode 100644 index 00000000..5963ea22 --- /dev/null +++ b/lib/osal/hostsfile/hostsfile.v @@ -0,0 +1,141 @@ +module hostsfile + +import os +import freeflowuniverse.herolib.osal + +// TODO: will be broken now + +@[heap] +pub struct HostsFile { +pub mut: + sections []Section +} + +pub struct Section { +pub mut: + name string + hosts []Host +} + +pub struct Host { +pub mut: + ip string + domain string +} + +// pub fn new() HostsFile { +// mut obj := HostsFile{} + +// mut content := os.read_file('/etc/hosts') or { panic(err) } +// mut section := '' + +// for mut line in content.split('\n') { +// line = line.trim_space() +// if line.starts_with('#') { +// section = line.trim('#').trim_space() +// continue +// } + +// mut splitted := line.fields() +// if splitted.len > 1 { +// if section !in obj.hosts { +// obj.hosts[section] = []map[string]string{} +// } +// obj.hosts[section] << { +// splitted[0]: splitted[1] +// } +// } +// } +// return obj +// } + +// pub fn (mut hostsfile HostsFile) save(sudo bool) &HostsFile { +// mut str := '' +// for section, items in hostsfile.hosts { +// if section != '' { +// str = str + '# ${section}\n\n' +// } + +// for item in items { +// for ip, domain in item { +// str = str + '${ip}\t${domain}\n' +// } +// } +// str = str + '\n\n' +// } +// if sudo { +// osal.execute_interactive('sudo -- sh -c -e "echo \'${str}\' > /etc/hosts"') or { +// panic(err) +// } +// } else { +// os.write_file('/etc/hosts', str) or { panic(err) } +// } +// return hostsfile +// } + +// pub fn (mut hostsfile HostsFile) reset(sections []string) &HostsFile { +// for section in sections { +// if section in hostsfile.hosts { +// hostsfile.hosts[section] = []map[string]string{} +// } +// } +// return hostsfile +// } + +// pub struct HostItemArg{ +// pub mut: +// ip string +// domain string +// section string = "main" +// } + +// pub fn (mut hostsfile HostsFile) add(args HostItemArg) &HostsFile { +// if args.section !in hostsfile.hosts { +// hostsfile.hosts[args.section] = []map[string]string{} +// } +// hostsfile.hosts[args.section] << { +// ip: domain +// } +// return hostsfile +// } + +// pub fn (mut hostsfile HostsFile) delete(domain string) &HostsFile { +// mut indexes := map[string][]int{} + +// for section, items in hostsfile.hosts { +// indexes[section] = []int{} +// for i, item in items { +// for _, dom in item { +// if dom == domain { +// indexes[section] << i +// } +// } +// } +// } + +// for section, items in indexes { +// for i in items { +// hostsfile.hosts[section].delete(i) +// } +// } + +// return hostsfile +// } + +// pub fn (mut hostsfile HostsFile) delete_section(section string) &HostsFile { +// hostsfile.hosts.delete(section) +// return hostsfile +// } + +// pub fn (mut hostsfile HostsFile) exists(domain string) bool { +// for _, items in hostsfile.hosts { +// for item in items { +// for _, dom in item { +// if dom == domain { +// return true +// } +// } +// } +// } +// return false +// } diff --git a/lib/osal/net.v b/lib/osal/net.v new file mode 100644 index 00000000..c21a4bda --- /dev/null +++ b/lib/osal/net.v @@ -0,0 +1,108 @@ +module osal + +import net +import time +import freeflowuniverse.herolib.ui.console + +pub enum PingResult { + ok + timeout // timeout from ping + unknownhost // means we don't know the hostname its a dns issue +} + +@[params] +pub struct PingArgs { +pub mut: + address string @[required] + count u8 = 1 // the ping is successful if it got count amount of replies from the other side + timeout u16 = 1 // the time in which the other side should respond in seconds + retry u8 +} + +// if reached in timout result will be True +// address is e.g. 8.8.8.8 +// ping means we check if the destination responds +pub fn ping(args PingArgs) !PingResult { + platform_ := platform() + mut cmd := 'ping' + if args.address.contains(':') { + cmd = 'ping6' + } + if platform_ == .osx { + cmd += ' -c ${args.count} -i ${args.timeout} ${args.address}' + } else if platform_ == .ubuntu { + cmd += ' -c ${args.count} -w ${args.timeout} ${args.address}' + } else { + return error('Unsupported platform for ping') + } + console.print_debug(cmd) + _ := exec(cmd: cmd, retry: args.retry, timeout: 0, stdout: false) or { + // println("ping failed.error.\n${err}") + if err.code() == 9999 { + return .timeout + } + if platform_ == .osx { + return match err.code() { + 2 { + .timeout + } + 68 { + .unknownhost + } + else { + // println("${err} ${err.code()}") + error("can't ping on osx (${err.code()})\n${err}") + } + } + } else if platform_ == .ubuntu { + return match err.code() { + 1 { .timeout } + 2 { .unknownhost } + else { error("can't ping on ubuntu (${err.code()})\n${err}") } + } + } else { + panic('bug, should never get here') + } + } + return .ok +} + +@[params] +pub struct TcpPortTestArgs { +pub mut: + address string @[required] // 192.168.8.8 + port int = 22 + timeout u16 = 2000 // total time in milliseconds to keep on trying +} + +// test if a tcp port answers +//``` +// address string //192.168.8.8 +// port int = 22 +// timeout u16 = 2000 // total time in milliseconds to keep on trying +//``` +pub fn tcp_port_test(args TcpPortTestArgs) bool { + start_time := time.now().unix_milli() + mut run_time := 0.0 + for true { + run_time = time.now().unix_milli() + if run_time > start_time + args.timeout { + return false + } + _ = net.dial_tcp('${args.address}:${args.port}') or { + time.sleep(100 * time.millisecond) + continue + } + // console.print_debug(socket) + return true + } + return false +} + +// Returns the ipaddress as known on the public side +// is using resolver4.opendns.com +pub fn ipaddr_pub_get() !string { + cmd := 'dig @resolver4.opendns.com myip.opendns.com +short' + ipaddr := exec(cmd: cmd)! + return ipaddr.output.trim('\n').trim(' \n') +} diff --git a/lib/osal/net_test.v b/lib/osal/net_test.v new file mode 100644 index 00000000..c459142f --- /dev/null +++ b/lib/osal/net_test.v @@ -0,0 +1,18 @@ +module osal + +fn test_ipaddr_pub_get() { + ipaddr := ipaddr_pub_get()! + assert ipaddr != '' +} + +fn test_ping() { + assert ping(address: '127.0.0.1', count: 1) == .ok +} + +fn test_ping_timeout() ! { + assert ping(address: '192.168.145.154', count: 5, timeout: 1) == .timeout +} + +fn test_ping_unknownhost() ! { + assert ping(address: '12.902.219.1', count: 1, timeout: 1) == .unknownhost +} diff --git a/lib/osal/notifier/notifier.v b/lib/osal/notifier/notifier.v new file mode 100644 index 00000000..2357e65e --- /dev/null +++ b/lib/osal/notifier/notifier.v @@ -0,0 +1,28 @@ +module notifier + +import os.notify +import os +import time +import freeflowuniverse.herolib.ui.console + +pub struct Notifier { +pub mut: + name string +} + +// TODO: its not working + +pub fn new() !Notifier { + mut n := notify.new()! + mut f := os.open('/Users/despiegk1/code/github/freeflowuniverse/crystallib/osal/examples/download/download_example.v')! + f.close() + // how can we know the filedescriptors of what we need? + fid := f.fd + for i in 0 .. 1000000 { + n.add(fid, .write, .edge_trigger)! + events := n.wait(time.Duration(time.second * 100)) + console.print_debug(events) + time.sleep(time.Duration(time.second * 1)) + } + return Notifier{} +} diff --git a/lib/osal/notifier/readme.md b/lib/osal/notifier/readme.md new file mode 100644 index 00000000..83ecff11 --- /dev/null +++ b/lib/osal/notifier/readme.md @@ -0,0 +1,8 @@ + +# requirements + +```bash +brew install fswatch + +fswatch -r ~/code/github/freeflowuniverse/crystallib.biz.bizmodel +``` \ No newline at end of file diff --git a/lib/osal/osinstaller/diskmgmt.v b/lib/osal/osinstaller/diskmgmt.v new file mode 100644 index 00000000..1d6409a9 --- /dev/null +++ b/lib/osal/osinstaller/diskmgmt.v @@ -0,0 +1,126 @@ +module osinstaller + +import os +import freeflowuniverse.herolib.ui.console + +pub fn (s ServerManager) raid_stop() !bool { + if !os.exists('/proc/mdstat') { + return false + } + + md := os.read_file('/proc/mdstat')! + lines := md.split_into_lines() + + for line in lines { + if line.contains('active') { + dev := line.split(' ')[0] + console.print_debug('[+] stopping raid device: ${dev}') + + r := os.execute('mdadm --stop /dev/${dev}') + if r.exit_code != 0 { + console.print_debug(r.output) + } + } + } + + return true +} + +pub fn (s ServerManager) disks_list() ![]string { + blocks := os.ls('/sys/class/block')! + mut disks := []string{} + + for block in blocks { + if os.is_link('/sys/class/block/${block}/device') { + // discard cdrom + events := os.read_file('/sys/class/block/${block}/events')! + if events.contains('eject') { + continue + } + + // that should be good + disks << block + } + } + + return disks +} + +pub fn (s ServerManager) disk_erase(disk string) bool { + // make it safe via wipefs + r := os.execute('wipefs -a /dev/${disk}') + if r.exit_code != 0 { + console.print_debug(r.output) + return false + } + + return true +} + +fn (s ServerManager) disk_partitions(disk string) ![]string { + mut files := os.ls('/sys/class/block/${disk}')! + mut parts := []string{} + + files.sort() + for file in files { + if file.starts_with(disk) { + parts << file + } + } + + return parts +} + +pub fn (s ServerManager) disk_main_layout(disk string) !map[string]string { + s.execute('parted /dev/${disk} mklabel msdos') + s.execute('parted -a optimal /dev/${disk} mkpart primary 0% 768MB') + s.execute('parted -a optimal /dev/${disk} mkpart primary 768MB 100GB') + s.execute('parted -a optimal /dev/${disk} mkpart primary linux-swap 100GB 104GB') + s.execute('parted -a optimal /dev/${disk} mkpart primary 104GB 100%') + s.execute('parted /dev/${disk} set 1 boot on') + + s.execute('partprobe') + + parts := s.disk_partitions(disk)! + if parts.len < 4 { + return error("partitions found doesn't match expected map") + } + + mut diskmap := map[string]string{} + diskmap['/'] = parts[1] + diskmap['/boot'] = parts[0] + diskmap['swap'] = parts[2] + diskmap['/disk1'] = parts[3] + + boot := '/dev/' + parts[0] + root := '/dev/' + parts[1] + swap := '/dev/' + parts[2] + more := '/dev/' + parts[3] + + console.print_debug('[+] partition map:') + console.print_debug('[+] / -> ${root} [ext2]') + console.print_debug('[+] /boot -> ${boot} [ext4]') + console.print_debug('[+] [swap] -> ${swap} [swap]') + console.print_debug('[+] [extra] -> ${more} [btrfs]') + + console.print_debug('[+] creating boot partition') + s.execute('mkfs.ext2 ${boot}') + + console.print_debug('[+] creating root partition') + s.execute('mkfs.ext4 ${root}') + + console.print_debug('[+] creating swap partition') + s.execute('mkswap ${swap}') + + console.print_debug('[+] creating storage partition') + s.execute('mkfs.btrfs -f ${more}') + + return diskmap +} + +pub fn (s ServerManager) disk_create_btrfs(disk string) !bool { + console.print_debug('[+] creating btrfs on disk: /dev/${disk}') + s.execute('mkfs.btrfs -f /dev/${disk}') + + return true +} diff --git a/lib/osal/osinstaller/factory.v b/lib/osal/osinstaller/factory.v new file mode 100644 index 00000000..4cdb30f7 --- /dev/null +++ b/lib/osal/osinstaller/factory.v @@ -0,0 +1,24 @@ +module osinstaller + +import os +import freeflowuniverse.herolib.ui.console +// import json +// import maxux.vssh + +struct ServerManager { + root string +} + +pub fn new() ServerManager { + sm := ServerManager{} + return sm +} + +fn (s ServerManager) execute(command string) bool { + // console.print_debug(command) + + r := os.execute(command) + // console.print_debug(r) + + return true +} diff --git a/lib/osal/package.v b/lib/osal/package.v new file mode 100644 index 00000000..48fd82c1 --- /dev/null +++ b/lib/osal/package.v @@ -0,0 +1,113 @@ +module osal + +import freeflowuniverse.herolib.ui.console +import freeflowuniverse.herolib.core.texttools + +// update the package list +pub fn package_refresh() ! { + platform_ := platform() + + if cmd_exists('nix-env') { + // means nix package manager is installed + // nothing to do + return + } + if platform_ == .ubuntu { + exec(cmd: 'apt-get update') or { return error('Could not update packages\nerror:\n${err}') } + return + } else if platform_ == .osx { + exec(cmd: 'brew update') or { return error('Could not update packages\nerror:\n${err}') } + return + } else if platform_ == .alpine { + exec(cmd: 'apk update') or { return error('Could not update packages\nerror:\n${err}') } + return + } else if platform_ == .arch { + exec(cmd: 'pacman -Syu --noconfirm') or { + return error('Could not update packages\nerror:\n${err}') + } + return + } + return error("Only ubuntu, alpine, arch and osx is supported for now. Found \"${platform_}\"") +} + +// install a package will use right commands per platform +pub fn package_install(name_ string) ! { + names := texttools.to_array(name_) + + // if cmd_exists('nix-env') { + // // means nix package manager is installed + // names_list := names.join(' ') + // console.print_header('package install: ${names_list}') + // exec(cmd: 'nix-env --install ${names_list}') or { + // return error('could not install package using nix:${names_list}\nerror:\n${err}') + // } + // return + // } + + name := names.join(' ') + console.print_header('package install: ${name}') + platform_ := platform() + cpu := cputype() + if platform_ == .osx { + if cpu == .arm { + exec(cmd: 'arch --arm64 brew install ${name}') or { + return error('could not install package: ${name}\nerror:\n${err}') + } + } else { + exec(cmd: 'brew install ${name}') or { + return error('could not install package:${name}\nerror:\n${err}') + } + } + } else if platform_ == .ubuntu { + exec( + cmd: ' + export TERM=xterm + export DEBIAN_FRONTEND=noninteractive + apt install -y ${name} -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --allow-downgrades --allow-remove-essential --allow-change-held-packages + ' + ) or { return error('could not install package:${name}\nerror:\n${err}') } + } else if platform_ == .alpine { + exec(cmd: 'apk add ${name}') or { + return error('could not install package:${name}\nerror:\n${err}') + } + } else if platform_ == .arch { + exec(cmd: 'pacman --noconfirm -Su ${name}') or { + return error('could not install package:${name}\nerror:\n${err}') + } + } else { + return error('Only ubuntu, alpine and osx supported for now') + } +} + +// Remove a package using the appropriate command for each platform +pub fn package_remove(name_ string) ! { + names := texttools.to_array(name_) + name := names.join(' ') + console.print_header('package remove: ${name}') + platform_ := platform() + cpu := cputype() + + if platform_ == .osx { + if cpu == .arm { + exec(cmd: 'arch --arm64 brew uninstall ${name}', ignore_error: true)! + } else { + exec(cmd: 'brew uninstall ${name}', ignore_error: true)! + } + } else if platform_ == .ubuntu { + exec( + cmd: ' + export TERM=xterm + export DEBIAN_FRONTEND=noninteractive + apt remove -y ${name} --allow-change-held-packages + apt autoremove -y + ' + ignore_error: true + )! + } else if platform_ == .alpine { + exec(cmd: 'apk del ${name}', ignore_error: true)! + } else if platform_ == .arch { + exec(cmd: 'pacman --noconfirm -R ${name}', ignore_error: true)! + } else { + return error('Only ubuntu, alpine and osx supported for now') + } +} diff --git a/lib/osal/package_test.v b/lib/osal/package_test.v new file mode 100644 index 00000000..09c70ba2 --- /dev/null +++ b/lib/osal/package_test.v @@ -0,0 +1,42 @@ +module osal + +fn test_package_management() { + platform_ := platform() + + if platform_ == .osx { + // Check if brew is installed + if !cmd_exists('brew') { + eprintln('WARNING: Homebrew is not installed. Please install it to run package management tests on OSX.') + return + } + } + + // First ensure wget is not installed + package_remove('wget') or { + assert true // Ignore error if package wasn't installed + } + + // Verify wget is not installed + assert !cmd_exists('wget') + + // Update package list + package_refresh() or { + assert false, 'Failed to refresh package list: ${err}' + } + + // Install wget + package_install('wget') or { + assert false, 'Failed to install wget: ${err}' + } + + // Verify wget is now installed + assert cmd_exists('wget') + + // Clean up - remove wget + package_remove('wget') or { + assert false, 'Failed to remove wget: ${err}' + } + + // Verify wget is removed + assert !cmd_exists('wget') +} diff --git a/lib/osal/platform.v b/lib/osal/platform.v new file mode 100644 index 00000000..7b4ecfd4 --- /dev/null +++ b/lib/osal/platform.v @@ -0,0 +1,148 @@ +module osal + +import os +// import freeflowuniverse.herolib.ui.console +// Returns the enum value that matches the provided string for PlatformType + +pub fn platform_enum_from_string(platform string) PlatformType { + return match platform.to_lower() { + 'osx' { .osx } + 'ubuntu' { .ubuntu } + 'alpine' { .alpine } + 'arch' { .arch } + else { .unknown } + } +} + +pub enum PlatformType { + unknown + osx + ubuntu + alpine + arch + suse +} + +// Returns the enum value that matches the provided string for CPUType +pub fn cputype_enum_from_string(cpytype string) CPUType { + return match cpytype.to_lower() { + 'intel' { .intel } + 'arm' { .arm } + 'intel32' { .intel32 } + 'arm32' { .arm32 } + else { .unknown } + } +} + +pub enum CPUType { + unknown + intel + arm + intel32 + arm32 +} + +pub fn platform() PlatformType { + mut logger := get_logger() + mut platform_ := PlatformType.unknown + platform_ = platform_enum_from_string(memdb_get('platformtype')) + if platform_ != PlatformType.unknown { + return platform_ + } + if cmd_exists('sw_vers') { + platform_ = PlatformType.osx + } else if cmd_exists('apt-get') { + platform_ = PlatformType.ubuntu + } else if cmd_exists('apk') { + platform_ = PlatformType.alpine + } else if cmd_exists('pacman') { + platform_ = PlatformType.arch + } else { + logger.error('Unknown platform') + } + if platform_ != PlatformType.unknown { + memdb_set('platformtype', platform_.str()) + } + return platform_ +} + +pub fn cputype() CPUType { + mut logger := get_logger() + mut cputype_ := CPUType.unknown + cputype_ = cputype_enum_from_string(memdb_get('cputype')) + if cputype_ != CPUType.unknown { + return cputype_ + } + sys_info := execute_stdout('uname -m') or { + logger.error('Failed to execute uname to get the cputype: ${err}') + return CPUType.unknown + } + cputype_ = match sys_info.to_lower().trim_space() { + 'x86_64' { + CPUType.intel + } + 'arm64' { + CPUType.arm + } + 'aarch64' { + CPUType.arm + } + // TODO 32 bit ones! + else { + logger.error('Unknown cpu type ${sys_info}') + CPUType.unknown + } + } + + if cputype_ != CPUType.unknown { + memdb_set('cputype', cputype_.str()) + } + return cputype_ +} + +pub fn is_osx() bool { + return platform() == .osx +} + +pub fn is_osx_arm() bool { + return platform() == .osx && cputype() == .arm +} + +pub fn is_osx_intel() bool { + return platform() == .osx && cputype() != .intel +} + +pub fn is_ubuntu() bool { + return platform() == .ubuntu +} + +pub fn is_linux() bool { + return platform() == .ubuntu || platform() == .arch || platform() == .suse + || platform() == .alpine +} + +pub fn is_linux_arm() bool { + // console.print_debug("islinux:${is_linux()} cputype:${cputype()}") + return is_linux() && cputype() == .arm +} + +pub fn is_linux_intel() bool { + return is_linux() && cputype() == .intel +} + +pub fn hostname() !string { + res := os.execute('hostname') + if res.exit_code > 0 { + return error("can't get hostname. Error.") + } + return res.output.trim_space() +} + +// e.g. systemd, bash, zinit +pub fn initname() !string { + res := os.execute('ps -p 1 -o comm=') + if res.exit_code > 0 { + return error("can't get process with pid 1. Error:\n${res.output}") + } + return res.output.trim_space() +} diff --git a/lib/osal/platform_test.v b/lib/osal/platform_test.v new file mode 100644 index 00000000..414735fc --- /dev/null +++ b/lib/osal/platform_test.v @@ -0,0 +1,9 @@ +module osal + +fn test_platform() { + assert platform() != .unknown +} + +fn test_cputype() { + assert cputype() != .unknown +} diff --git a/lib/osal/ps_tool.v b/lib/osal/ps_tool.v new file mode 100644 index 00000000..99cb8c7d --- /dev/null +++ b/lib/osal/ps_tool.v @@ -0,0 +1,230 @@ +module osal + +import time +import os +import math +// import freeflowuniverse.herolib.ui.console + +pub enum PMState { + init + ok + old +} + +@[heap] +pub struct ProcessMap { +pub mut: + processes []ProcessInfo + lastscan time.Time + state PMState + pids []int +} + +@[heap] +pub struct ProcessInfo { +pub mut: + cpu_perc f32 + mem_perc f32 + cmd string + pid int + ppid int // parentpid + // resident memory + rss int +} + +// make sure to use new first, so that the connection has been initted +// then you can get it everywhere +pub fn processmap_get() !ProcessMap { + mut pm := ProcessMap{} + pm.scan()! + return pm +} + +// get process info from 1 specific process +// returns +//``` +// pub struct ProcessInfo { +// pub mut: +// cpu_perc f32 +// mem_perc f32 +// cmd string +// pid int +// ppid int +// //resident memory +// rss int +// } +//``` +pub fn processinfo_get(pid int) !ProcessInfo { + mut pm := processmap_get()! + for pi in pm.processes { + if pi.pid == pid { + return pi + } + } + return error('Cannot find process with pid: ${pid}, to get process info from.') +} + +pub fn processinfo_get_byname(name string) ![]ProcessInfo { + mut pm := processmap_get()! + mut res := []ProcessInfo{} + for pi in pm.processes { + // console.print_debug(pi.cmd) + if pi.cmd.contains(name) { + if pi.cmd.starts_with('sudo ') { + continue + } + if pi.cmd.to_lower().starts_with('screen ') { + continue + } + res << pi + } + } + return res +} + +pub fn process_exists_byname(name string) !bool { + res := processinfo_get_byname(name)! + return res.len > 0 +} + +pub fn process_exists(pid int) bool { + r := os.execute('kill -0 ${pid}') + if r.exit_code > 0 { + // return error('could not execute kill -0 ${pid}') + return false + } + return true +} + +// return the process and its children +pub fn processinfo_with_children(pid int) !ProcessMap { + mut pi := processinfo_get(pid)! + mut res := processinfo_children(pid)! + res.processes << pi + return res +} + +// get all children of 1 process +pub fn processinfo_children(pid int) !ProcessMap { + mut pm := processmap_get()! + mut res := []ProcessInfo{} + pm.children_(mut res, pid)! + return ProcessMap{ + processes: res + lastscan: pm.lastscan + state: pm.state + } +} + +@[params] +pub struct ProcessKillArgs { +pub mut: + name string + pid int +} + +// kill process and all the ones underneith +pub fn process_kill_recursive(args ProcessKillArgs) ! { + if args.name.len > 0 { + for pi in processinfo_get_byname(args.name)! { + process_kill_recursive(pid: pi.pid)! + } + return + } + if args.pid == 0 { + return error('need to specify pid or name') + } + if process_exists(args.pid) { + pm := processinfo_with_children(args.pid)! + for p in pm.processes { + os.execute('kill -9 ${p.pid}') + } + } +} + +fn (pm ProcessMap) children_(mut result []ProcessInfo, pid int) ! { + // console.print_debug("children: $pid") + for p in pm.processes { + if p.ppid == pid { + // console.print_debug("found parent: ${p}") + if result.filter(it.pid == p.pid).len == 0 { + result << p + pm.children_(mut result, p.pid)! // find children of the one we found + } + } + } +} + +pub fn (mut p ProcessInfo) str() string { + x := math.min(60, p.cmd.len) + subst := p.cmd.substr(0, x) + return 'pid:${p.pid:-7} parent:${p.ppid:-7} cmd:${subst}' +} + +fn (mut pm ProcessMap) str() string { + mut out := '' + for p in pm.processes { + out += '${p}\n' + } + return out +} + +fn (mut pm ProcessMap) scan() ! { + now := time.now().unix() + // only scan if we didn't do in last 5 seconds + if pm.lastscan.unix() > now - 5 { + // means scan is ok + if pm.state == PMState.ok { + return + } + } + + cmd := 'ps ax -o pid,ppid,stat,%cpu,%mem,rss,command' + res := os.execute(cmd) + + if res.exit_code > 0 { + return error('Cannot get process info \n${cmd}') + } + + pm.processes = []ProcessInfo{} + + // console.print_debug("DID SCAN") + for line in res.output.split_into_lines() { + if !line.contains('PPID') { + mut fields := line.fields() + if fields.len < 6 { + // console.print_debug(res) + // console.print_debug("SSS") + // console.print_debug(line) + // panic("ss") + continue + } + mut pi := ProcessInfo{} + pi.pid = fields[0].int() + pi.ppid = fields[1].int() + pi.cpu_perc = fields[3].f32() + pi.mem_perc = fields[4].f32() + pi.rss = fields[5].int() + fields.delete_many(0, 6) + pi.cmd = fields.join(' ') + // console.print_debug(pi.cmd) + if pi.pid !in pm.pids { + pm.processes << pi + pm.pids << pi.pid + } + } + } + + pm.lastscan = time.now() + pm.state = PMState.ok + + // console.print_debug(pm) +} + +pub fn whoami() !string { + res := os.execute('whoami') + if res.exit_code > 0 { + return error('Could not do whoami\n${res}') + } + return res.output.trim_space() +} diff --git a/lib/osal/readme.md b/lib/osal/readme.md new file mode 100644 index 00000000..985f3a60 --- /dev/null +++ b/lib/osal/readme.md @@ -0,0 +1,200 @@ +# Operating System Abstraction Layer (OSAL) + +A comprehensive operating system abstraction layer for V that provides platform-independent system operations, process management, and network utilities. + +## Features + +- Platform detection and system information +- Process execution and management +- Network utilities (ping, TCP port testing) +- Environment variable handling +- File system operations +- SSH key management +- Profile path management + +## Platform Detection + +```v +import freeflowuniverse.herolib.osal + +// Get platform type +platform := osal.platform() +if platform == .osx { + // macOS specific code +} + +// Platform-specific checks +if osal.is_linux() { + // Linux specific code +} +if osal.is_osx_arm() { + // Apple Silicon specific code +} + +// CPU architecture +cpu := osal.cputype() +if cpu == .arm { + // ARM specific code +} + +// System information +hostname := osal.hostname()! +init_system := osal.initname()! // e.g., systemd, bash, zinit +``` + +## Process Execution + +The module provides flexible process execution with extensive configuration options: + +```v +// Simple command execution +job := osal.exec(cmd: 'ls -la')! +println(job.output) + +// Execute with error handling +job := osal.exec(Command{ + cmd: 'complex_command' + timeout: 3600 // timeout in seconds + retry: 3 // retry count + work_folder: '/tmp' // working directory + environment: { // environment variables + 'PATH': '/usr/local/bin' + } + stdout: true // show output + raise_error: true // raise error on failure +})! + +// Silent execution +output := osal.execute_silent('command')! + +// Interactive shell execution +osal.execute_interactive('bash command')! + +// Debug mode execution +output := osal.execute_debug('command')! +``` + +### Job Status and Error Handling + +```v +// Check job status +if job.status == .done { + println('Success!') +} else if job.status == .error_timeout { + println('Command timed out') +} + +// Error handling with specific error types +job := osal.exec(cmd: 'invalid_command') or { + match err.error_type { + .exec { println('Execution error') } + .timeout { println('Command timed out') } + .args { println('Invalid arguments') } + else { println(err) } + } + return +} +``` + +## Network Utilities + +### Ping + +```v +// Simple ping +result := osal.ping(address: '8.8.8.8')! +assert result == .ok + +// Advanced ping configuration +result := osal.ping(PingArgs{ + address: '8.8.8.8' + count: 3 // number of pings + timeout: 2 // timeout in seconds + retry: 1 // retry attempts +})! + +match result { + .ok { println('Host is reachable') } + .timeout { println('Host timed out') } + .unknownhost { println('Unknown host') } +} +``` + +### TCP Port Testing + +```v +// Test if port is open +is_open := osal.tcp_port_test(TcpPortTestArgs{ + address: '192.168.1.1' + port: 22 + timeout: 2000 // milliseconds +}) + +if is_open { + println('Port is open') +} + +// Get public IP address +pub_ip := osal.ipaddr_pub_get()! +println('Public IP: ${pub_ip}') +``` + +## Profile Management + +Manage system PATH and other profile settings: + +```v +// Add/remove paths from system PATH +osal.profile_path_add_remove( + paths2delete: 'go/bin', + paths2add: '~/hero/bin,~/usr/local/bin' +)! +``` + +## Environment Variables + +```v +// Get environment variable +value := osal.env_get('PATH')! + +// Set environment variable +osal.env_set('MY_VAR', 'value')! + +// Check if environment variable exists +exists := osal.env_exists('MY_VAR') +``` + +## Notes + +- All commands are executed from temporary scripts in `/tmp/execscripts` +- Failed script executions are preserved for debugging +- Successful script executions are automatically cleaned up +- Platform-specific behavior is automatically handled +- Timeout and retry mechanisms are available for robust execution +- Environment variables and working directories can be specified per command +- Interactive and non-interactive modes are supported +- Debug mode provides additional execution information + +## Error Handling + +The module provides detailed error information: + +- Exit codes +- Standard output and error streams +- Execution time and duration +- Process status +- Retry counts +- Error types (execution, timeout, arguments) + +## Platform Support + +- macOS (Intel and ARM) +- Ubuntu +- Alpine Linux +- Arch Linux +- SUSE (partial) + +CPU architectures: +- Intel (x86_64) +- ARM (arm64/aarch64) +- 32-bit variants (intel32, arm32) diff --git a/lib/osal/rsync/readme.md b/lib/osal/rsync/readme.md new file mode 100644 index 00000000..fc506cd5 --- /dev/null +++ b/lib/osal/rsync/readme.md @@ -0,0 +1,6 @@ +to test + +```bash +echo 'mypasswd' > /tmp/passwd +rsync -avz --password-file=/tmp/passwd /local/path/ rsync://authorizeduser@yourserver/private +``` \ No newline at end of file diff --git a/lib/osal/rsync/rsync.v b/lib/osal/rsync/rsync.v new file mode 100644 index 00000000..9fe6ee9a --- /dev/null +++ b/lib/osal/rsync/rsync.v @@ -0,0 +1,61 @@ +module rsync + +import freeflowuniverse.herolib.core.pathlib +import freeflowuniverse.herolib.ui.console +import freeflowuniverse.herolib.osal + +@[params] +pub struct RsyncArgs { +pub mut: + source string + dest string + ipaddr_src string // e.g. root@192.168.5.5:33 (can be without root@ or :port) + ipaddr_dst string + delete bool // do we want to delete the destination + ignore []string // arguments to ignore e.g. ['*.pyc','*.bak'] + ignore_default bool = true // if set will ignore a common set + stdout bool = true + fast_rsync bool + sshkey string +} + +// flexible tool to sync files from to, does even support ssh . +// args: . +// ``` +// source string +// dest string +// delete bool //do we want to delete the destination +// ipaddr_src string //e.g. root@192.168.5.5:33 (can be without root@ or :port) +// ipaddr_dst string //can only use src or dst, not both +// ignore []string //arguments to ignore +// ignore_default bool = true //if set will ignore a common set +// stdout bool = true +// ``` +// . +// see https://github.com/freeflowuniverse/crystallib/blob/development/examples/pathlib.rsync/rsync_example.v +pub fn rsync(args_ RsyncArgs) ! { + mut args := args_ + if args.ipaddr_src.len == 0 { + pathlib.get(args.source) + } + args2 := pathlib.RsyncArgs{ + source: args.source + dest: args.dest + ipaddr_src: args.ipaddr_src + ipaddr_dst: args.ipaddr_dst + delete: args.delete + ignore: args.ignore + ignore_default: args.ignore_default + fast_rsync: args.fast_rsync + sshkey: args.sshkey + } + + // TODO: is only for ssh right now, we prob need support for a real ssh server as well + cmdoptions := pathlib.rsync_cmd_options(args2)! + cmd := 'rsync ${cmdoptions}' + $if debug { + console.print_debug('rsync command (osal):\n${cmd}') + } + // console.print_debug(cmd) + osal.exec(cmd: cmd, stdout: args_.stdout)! +} diff --git a/lib/osal/rsync/rsyncd.v b/lib/osal/rsync/rsyncd.v new file mode 100644 index 00000000..c14d221f --- /dev/null +++ b/lib/osal/rsync/rsyncd.v @@ -0,0 +1,60 @@ +module rsync + +pub struct RsyncD { +pub mut: + configpath string = '/etc/rsyncd.conf' + sites []RsyncSite + usermanager UserManager +} + +@[params] +pub struct RsyncSite { +pub mut: + name string + path string + comment string + readonly bool + list bool + auth string + secrets string +} + +pub fn rsyncd() !RsyncD { + mut um := usermanager()! + mut self := RsyncD{ + usermanager: um + } + self.load()! + return self +} + +// add site to the rsyncd config +pub fn (mut self RsyncD) site_add(args_ RsyncSite) ! { + _ := args_ + // self.sites[args.name]=RsyncSite{name:args.name,} +} + +// get all info from existing config file, populate the sites +pub fn (mut self RsyncD) load() ! { + // TODO: check rsync is installed if not use osal package manager to install + // TODO: populate sites in the struct +} + +pub fn (mut self RsyncD) generate() ! { + // TODO: generate a new config file (based on previous info on disk as well as new one) + // TODO: make sure we can add more than 1 user to the user manager + + self.reload()! +} + +fn (mut self RsyncD) reload() ! { + _ := ' + chmod 600 /etc/rsyncd.secrets + systemctl enable rsync + systemctl start rsync + ' + + // TODO: execute, maybe we should check its on linux and there is a systemd active, also prob we need to see if we need to start or restart + + // TODO: we should do a test using rsync +} diff --git a/lib/osal/rsync/templates/rsyncd.conf b/lib/osal/rsync/templates/rsyncd.conf new file mode 100644 index 00000000..46d9ab7d --- /dev/null +++ b/lib/osal/rsync/templates/rsyncd.conf @@ -0,0 +1,27 @@ +# /etc/rsyncd.conf +#TODO: make template generate to fill in the different sites +# Global settings +uid = nobody +gid = nogroup +use chroot = yes +max connections = 50 +log file = /var/log/rsyncd.log +pid file = /var/run/rsyncd.pid +lock file = /var/run/rsync.lock + +#needs to be loop over the sites, make sure dirs exist +[public] + path = ${dirpath} + comment = Public Read-Only Share + read only = yes + list = yes + auth users = + secrets file = /etc/rsyncd.secrets + +[private] + path = ${dirpath} + comment = Private Read-Write Share + read only = no + list = yes + auth users = authorizeduser + secrets file = /etc/rsyncd.secrets diff --git a/lib/osal/rsync/templates/rsyncd.secrets b/lib/osal/rsync/templates/rsyncd.secrets new file mode 100644 index 00000000..28a45d98 --- /dev/null +++ b/lib/osal/rsync/templates/rsyncd.secrets @@ -0,0 +1,2 @@ +#TODO: make template work to fill in the items with loop +authorizeduser:yourpassword \ No newline at end of file diff --git a/lib/osal/rsync/usermgmt.v b/lib/osal/rsync/usermgmt.v new file mode 100644 index 00000000..5d2b7dac --- /dev/null +++ b/lib/osal/rsync/usermgmt.v @@ -0,0 +1,59 @@ +module rsync + +import freeflowuniverse.herolib.core.pathlib + +pub struct UserManager { +pub mut: + configpath string = '/etc/rsyncd.secrets' + users map[string]User +} + +pub struct User { +pub mut: + name string + passwd string +} + +@[params] +pub struct UserArgs { +pub mut: + name string + passwd string +} + +pub fn (mut self UserManager) user_add(args_ UserArgs) ! { + mut args := args_ + self.users[args.name] = User{ + name: args.name + passwd: args.passwd + } +} + +pub fn usermanager() !UserManager { + mut self := UserManager{} + self.load()! + return self +} + +pub fn (mut self UserManager) load(args UserArgs) ! { + mut p := pathlib.get_file(path: self.configpath, create: true)! + content := p.read()! + for line in content.split('\n') { + if line.trim_space() == '' { + continue + } + if line.contains(':') { + items := line.split(':') + if items.len != 2 { + return error('syntax error in ${self.configpath}.\n${line}') + } + self.user_add(name: items[0], passwd: items[1])! + } else { + return error('syntax error in ${self.configpath}.\n${line}') + } + } +} + +// generate the secrets config file +pub fn (mut self UserManager) generate() ! { +} diff --git a/lib/osal/screen/factory.v b/lib/osal/screen/factory.v new file mode 100644 index 00000000..2271f6d5 --- /dev/null +++ b/lib/osal/screen/factory.v @@ -0,0 +1,177 @@ +module screen + +// import freeflowuniverse.herolib.osal +import freeflowuniverse.herolib.core.texttools +// import freeflowuniverse.herolib.screen +import os +import time +import freeflowuniverse.herolib.ui.console + +@[heap] +pub struct ScreensFactory { +pub mut: + screens []Screen +} + +@[params] +pub struct ScreensNewArgs { +pub: + reset bool +} + +// return screen instance +pub fn new(args ScreensNewArgs) !ScreensFactory { + mut t := ScreensFactory{} + t.scan()! + if args.reset { + t.reset()! + } + return t +} + +pub fn init_screen_object(item_ map[string]string) Screen { + mut item := Screen{} + state_item := item_['state'] or { panic('bug') } + item.state = match state_item.trim('() ').to_lower() { + 'detached' { .detached } + else { .unknown } + } + pre := item_['pre'] or { panic('bug') } + item.pid = pre.all_before('.').trim_space().int() + item.name = pre.all_after('.').trim_space() + return item +} + +// loads screen screen, populate the object +pub fn (mut self ScreensFactory) scan() ! { + self.screens = []Screen{} + os.execute('screen -wipe > /dev/null 2>&1') // make sure its all clean + res := os.execute('screen -ls') + if res.exit_code > 1 { + return error('could not find screen or other error, make sure screen is installed.\n${res.output}') + } + if res.output.contains('No Sockets found') { + return + } + // there is stuff to parses + + res1 := texttools.remove_empty_lines(res.output) + .split_into_lines() + .filter(it.starts_with(' ') || it.starts_with('\t')) + .join_lines() + mut res2 := texttools.to_list_map('pre,state', res1, '').map(init_screen_object(it)) + for mut item in res2 { + if self.exists(item.name) { + return error('duplicate screen with name: ${item.name}') + } + self.screens << item + } + // console.print_debug(self.str()) +} + +pub struct ScreenAddArgs { +pub mut: + name string @[requred] + cmd string + reset bool + start bool = true + attach bool +} + +// print list of screen screens +pub fn (mut self ScreensFactory) add(args_ ScreenAddArgs) !Screen { + mut args := args_ + if args.cmd == '' { + args.cmd = '/bin/bash' + } + if args.name.len < 3 { + return error('name needs to be at least 3 chars.') + } + if self.exists(args.name) { + if args.reset { + self.kill(args.name)! + } else { + return self.get(args.name)! + } + } + self.screens << Screen{ + name: args.name + cmd: args.cmd + } + if args.start { + self.start(args.name)! + } + mut myscreen := self.get(args.name) or { + return error('couldnt start screen with name ${args.name}, was not found afterwards.\ncmd:${args.cmd}\nScreens found.\n${self.str()}') + } + + if args.attach { + myscreen.attach()! + } + return myscreen +} + +// print list of screen screens +pub fn (mut self ScreensFactory) exists(name string) bool { + for mut screen in self.screens { + if screen.name == name { + return true + } + } + return false +} + +pub fn (mut self ScreensFactory) get(name string) !Screen { + for mut screen in self.screens { + if screen.name == name { + return screen + } + } + // print_backtrace() + return error('couldnt find screen with name ${name}\nScreens found.\n${self.str()}') +} + +pub fn (mut self ScreensFactory) start(name string) ! { + mut s := self.get(name) or { + return error("can't start screen with name:${name}, couldn't find.\nScreens found.\n${self.str()}") + } + s.start_()! + for { + self.scan()! + mut s2 := self.get(name) or { + return error('couldnt start screen with name ${name}, was not found in screen scan.\ncmd:\n${s.cmd}\nScreens found.\n${self.str()}') + } + if s2.pid > 0 { + return + } + console.print_debug(s2.str()) + time.sleep(100000) + } +} + +pub fn (mut self ScreensFactory) kill(name string) ! { + if self.exists(name) { + mut s := self.get(name) or { return } + s.kill_()! + } + self.scan()! +} + +// print list of screen screens +pub fn (mut self ScreensFactory) reset() ! { + for mut screen in self.screens { + screen.kill_()! + } + self.scan()! +} + +pub fn (mut self ScreensFactory) str() string { + if self.screens.len == 0 { + return 'No screens found.' + } + mut out := '# Screens\n' + for s in self.screens { + out += '${s}\n' + } + return out +} diff --git a/lib/osal/screen/readme.md b/lib/osal/screen/readme.md new file mode 100644 index 00000000..f7ef222b --- /dev/null +++ b/lib/osal/screen/readme.md @@ -0,0 +1,13 @@ +# screen + +```bash +#to see sessions which have been created + +screen -ls + +There is a screen on: + 3230.test (Detached) + +#now to attach to this screen +screen -r test +``` \ No newline at end of file diff --git a/lib/osal/screen/screen.v b/lib/osal/screen/screen.v new file mode 100644 index 00000000..20968538 --- /dev/null +++ b/lib/osal/screen/screen.v @@ -0,0 +1,142 @@ +module screen + +import freeflowuniverse.herolib.ui.console +import freeflowuniverse.herolib.osal +import os +import time + +@[heap] +struct Screen { +mut: + cmd string + name string + pid int + state ScreenState + // factory ?&ScreensFactory @[skip; str: skip] +} + +enum ScreenState { + unknown + detached +} + +// checks whether screen server is running +pub fn (mut t Screen) is_running() !bool { + panic('implement') + // res := osal.exec(cmd: 'screen info', stdout: false, name: 'screen_info', raise_error: false) or { + // panic('bug') + // } + // if res.error.contains('no server running') { + // // console.print_debug(" TMUX NOT RUNNING") + // return false + // } + // if res.error.contains('no current client') { + // return true + // } + // if res.exit_code > 0 { + // return error('could not execute screen info.\n${res}') + // } + return true +} + +pub enum ScreenStatus { + unknown + active + inactive +} + +// Method to check the status of a screen process +pub fn (self Screen) status() !ScreenStatus { + panic('implement') + // // Command to list screen sessions + // cmd := 'screen -ls' + // response := osal.execute_silent(cmd)! + + // // Check if the screen session exists + // if !response.contains(self.name) { + // return .inactive + // } + + // // Command to send a dummy command to the screen session and check response + // cmd_check := 'screen -S ${self.name} -X eval "stuff \\"\\003\\"; sleep 0.1; stuff \\"ps\\n\\""' + // osal.execute_silent(cmd_check)! + + // // Check if the process is running in the screen session + // cmd_ps := 'screen -S ${self.name} -X hardcopy -h /tmp/screen_output; cat /tmp/screen_output | grep "${self.name}"' + // ps_response := osal.execute_silent(cmd_ps)! + + // return parse_screen_process_status(ps_response) +} + +// Function to parse screen process status output +fn parse_screen_process_status(output string) ScreenStatus { + lines := output.split_into_lines() + for line in lines { + if line.contains('SCREEN') || line.contains('PID') { + return .active + } + } + return .inactive +} + +fn (mut self Screen) kill_() ! { + // console.print_debug('kill screen: ${self}') + if self.pid == 0 || self.pid < 5 { + return error("pid was <5 for ${self}, can't kill") + } + osal.process_kill_recursive(pid: self.pid)! + res := os.execute('export TERM=xterm-color && screen -X -S ${self.name} kill > /dev/null 2>&1') + if res.exit_code > 1 { + return error('could not kill a screen.\n${res.output}') + } + time.sleep(100 * time.millisecond) // 0.1 sec wait + os.execute('screen -wipe > /dev/null 2>&1') + // self.scan()! +} + +// fn (mut self Screen) scan() ! { +// mut f:=self.factory or {panic("bug, no factory attached to screen.")} +// f.scan(false)! +// } + +pub fn (mut self Screen) attach() ! { + cmd := 'screen -r ${self.pid}.${self.name}' + osal.execute_interactive(cmd)! +} + +pub fn (mut self Screen) cmd_send(cmd string) ! { + mut cmd2 := "screen -S ${self.name} -p 0 -X stuff \"${cmd} \n\" " + if osal.is_osx() { + cmd2 = "screen -S ${self.name} -p 0 -X stuff \"${cmd}\"\$'\n' " + } + res := os.execute(cmd2) + if res.exit_code > 1 { + return error('could not send screen command.\n${cmd2}\n${res.output}') + } +} + +pub fn (mut self Screen) str() string { + green := console.color_fg(.green) + yellow := console.color_fg(.yellow) + reset := console.reset + return ' - screen:${green}${self.name:-20}${reset} pid:${yellow}${self.pid:-10}${reset} state:${green}${self.state}${reset}' +} + +fn (mut self Screen) start_() ! { + if self.pid != 0 { + return + } + if self.name.len == 0 { + return error('screen name needs to exist.') + } + if self.cmd == '' { + self.cmd = '/bin/bash' + } + cmd := 'export TERM=xterm-color && screen -dmS ${self.name} ${self.cmd}' + // console.print_debug(" startcmd:'${cmd}'") + res := os.execute(cmd) + // console.print_debug(res) + if res.exit_code > 1 { + return error('could not find screen or other error, make sure screen is installed.\n${res.output}') + } +} diff --git a/lib/osal/screen/screen_test.v b/lib/osal/screen/screen_test.v new file mode 100644 index 00000000..52df7876 --- /dev/null +++ b/lib/osal/screen/screen_test.v @@ -0,0 +1,17 @@ +module screen + +import freeflowuniverse.herolib.ui.console +import freeflowuniverse.herolib.osal +import os +import time + +pub fn testsuite_begin() ! { + mut screen_factory := new(reset: true)! +} + +pub fn test_screen_status() ! { + mut screen_factory := new()! + mut screen := screen_factory.add(name: 'testservice', cmd: 'redis-server')! + status := screen.status()! + // assert status == .active +} diff --git a/lib/osal/sleep.v b/lib/osal/sleep.v new file mode 100644 index 00000000..88698540 --- /dev/null +++ b/lib/osal/sleep.v @@ -0,0 +1,8 @@ +module osal + +import time + +// sleep in seconds +pub fn sleep(duration int) { + time.sleep(time.second * duration) +} diff --git a/lib/osal/ssh.v b/lib/osal/ssh.v new file mode 100644 index 00000000..23b71882 --- /dev/null +++ b/lib/osal/ssh.v @@ -0,0 +1,86 @@ +module osal + +import freeflowuniverse.herolib.core.pathlib +import os + +@[params] +pub struct SSHConfig { +pub: + directory string = os.join_path(os.home_dir(), '.ssh') +} + +// Returns a specific SSH key with the given name from the default SSH directory (~/.ssh) +pub fn get_ssh_key(key_name string, config SSHConfig) ?SSHKey { + mut ssh_dir := pathlib.get_dir(path: config.directory) or { return none } + + list := ssh_dir.list(files_only: true) or { return none } + for file in list.paths { + if file.name() == key_name { + return SSHKey{ + name: file.name() + directory: ssh_dir.path + } + } + } + + return none +} + +// Lists SSH keys in the default SSH directory (~/.ssh) and returns an array of SSHKey structs +fn list_ssh_keys(config SSHConfig) ![]SSHKey { + mut ssh_dir := pathlib.get_dir(path: config.directory) or { + return error('Error getting ssh directory: ${err}') + } + + mut keys := []SSHKey{} + list := ssh_dir.list(files_only: true) or { + return error('Failed to list files in SSH directory') + } + + for file in list.paths { + if file.extension() == 'pub' || file.name().starts_with('id_') { + keys << SSHKey{ + name: file.name() + directory: ssh_dir.path + } + } + } + + return keys +} + +// Creates a new SSH key pair to the specified directory +pub fn new_ssh_key(key_name string, config SSHConfig) !SSHKey { + ssh_dir := pathlib.get_dir( + path: config.directory + create: true + ) or { return error('Error getting SSH directory: ${err}') } + + // Paths for the private and public keys + priv_key_path := os.join_path(ssh_dir.path, key_name) + pub_key_path := '${priv_key_path}.pub' + + // Check if the key already exists + if os.exists(priv_key_path) || os.exists(pub_key_path) { + return error("Key pair already exists with the name '${key_name}'") + } + + panic('implement shhkeygen logic') + // Generate a random private key (for demonstration purposes) + // Replace this with actual key generation logic (e.g., calling `ssh-keygen` or similar) + // private_key_content := '-----BEGIN PRIVATE KEY-----\n${rand.string(64)}\n-----END PRIVATE KEY-----' + // public_key_content := 'ssh-rsa ${rand.string(64)} user@host' + + // Save the keys to their respective files + // os.write_file(priv_key_path, private_key_content) or { + // return error("Failed to write private key: ${err}") + // } + // os.write_file(pub_key_path, public_key_content) or { + // return error("Failed to write public key: ${err}") + // } + + return SSHKey{ + name: key_name + directory: ssh_dir.path + } +} diff --git a/lib/osal/ssh_key.v b/lib/osal/ssh_key.v new file mode 100644 index 00000000..5b12bfcc --- /dev/null +++ b/lib/osal/ssh_key.v @@ -0,0 +1,41 @@ +module osal + +import freeflowuniverse.herolib.core.pathlib +import os + +@[noinit] +pub struct SSHKey { +pub: + name string + directory string +} + +// returns the public ssh key's path of the keypair +pub fn (key SSHKey) public_key_path() !pathlib.Path { + path_str := os.join_path(key.directory, '${key.name}.pub') + return pathlib.get_file(path: path_str) or { + return error('Failed to get public key path: ${err}') + } +} + +// returns the private ssh key's path of the keypair +pub fn (key SSHKey) private_key_path() !pathlib.Path { + path_str := os.join_path(key.directory, '${key.name}') + return pathlib.get_file(path: path_str) or { + return error('Failed to get public key path: ${err}') + } +} + +// returns the public ssh key of the keypair +pub fn (key SSHKey) public_key() !string { + mut path := key.public_key_path()! + content := path.read()! + return content +} + +// returns the private ssh key of the keypair +pub fn (key SSHKey) private_key() !string { + mut path := key.private_key_path()! + content := path.read()! + return content +} diff --git a/lib/osal/sshagent/factory.v b/lib/osal/sshagent/factory.v new file mode 100644 index 00000000..32e9b427 --- /dev/null +++ b/lib/osal/sshagent/factory.v @@ -0,0 +1,32 @@ +module sshagent + +import os +import freeflowuniverse.herolib.core.pathlib + +@[params] +pub struct SSHAgentNewArgs { +pub mut: + homepath string +} + +pub fn new(args_ SSHAgentNewArgs) !SSHAgent { + mut args := args_ + if args.homepath.len == 0 { + args.homepath = '${os.home_dir()}/.ssh' + } + + mut agent := SSHAgent{ + homepath: pathlib.get_dir(path: args.homepath, create: true)! + } + res := os.execute('ssh-add -l') + if res.exit_code == 0 { + agent.active = true + } + agent.init()! // loads the keys known on fs and in ssh-agent + return agent +} + +pub fn loaded() bool { + mut agent := new() or { panic(err) } + return agent.active +} diff --git a/lib/osal/sshagent/get.v b/lib/osal/sshagent/get.v new file mode 100644 index 00000000..62724b88 --- /dev/null +++ b/lib/osal/sshagent/get.v @@ -0,0 +1,55 @@ +module sshagent + +import freeflowuniverse.herolib.core.texttools + +@[params] +pub struct KeyGetArgs { +pub mut: + pubkey string + // privkey string + // privkey_path string + name string +} + +pub fn (mut agent SSHAgent) get(args_ KeyGetArgs) ?SSHKey { + mut args := args_ + args.pubkey = args.pubkey.trim_space() + args.name = texttools.name_fix(args.name) + for mut key in agent.keys { + mut found := false + if args.name.len > 0 && key.name == args.name { + found = true + } + if args.pubkey.len > 0 && key.pubkey == args.pubkey { + found = true + } + if found { + return key + } + } + return none +} + +fn (mut agent SSHAgent) pop(pubkey_ string) { + mut x := 0 + mut result := 9999 + for key in agent.keys { + if key.pubkey == pubkey_ { + result = x + break + } + x += 1 + } + if result != 9999 { + if agent.keys.len > result { + agent.keys.delete(x) + } else { + panic('bug') + } + } +} + +pub fn (mut agent SSHAgent) exists(args KeyGetArgs) bool { + agent.get(args) or { return false } + return true +} diff --git a/lib/osal/sshagent/interactive.v b/lib/osal/sshagent/interactive.v new file mode 100644 index 00000000..9abcec2e --- /dev/null +++ b/lib/osal/sshagent/interactive.v @@ -0,0 +1,128 @@ +module sshagent + +// import freeflowuniverse.herolib.ui.console + +// will see if there is one ssh key in sshagent +// or if not, if there is 1 ssh key in ${agent.homepath.path}/ if yes will load +// if we were able to define the key to use, it will be returned here +// will return the key which will be used +// pub fn load_interactive() ! { +// mut pubkeys := pubkeys_get() +// mut c := console.UIConsole{} +// pubkeys.map(listsplit) +// if pubkeys.len == 1 { +// c.ask_yesno( +// description: 'We found sshkey ${pubkeys[0]} in sshagent, want to use this one?' +// )! +// { +// key_load(pubkeys[0])! +// return pubkeys[0] +// } +// } +// if pubkeys.len > 1 { +// if c.ask_yesno( +// description: 'We found more than 1 sshkey in sshagent, want to use one of those!' +// )! +// { +// // keytouse := console.ask_dropdown( +// // items: pubkeys +// // description: 'Please choose the ssh key you want to use' +// // ) +// // key_load(keytouse)! +// // return keytouse +// } +// } + +// // now means nothing in ssh-agent, lets see if we find 1 key in .ssh directory +// mut sshdirpath := pathlib.get_dir(path: '${os.home_dir()}/.ssh', create: true)! + +// mut pubkeys := []string{} +// pl := sshdirpath.list(recursive: false)! +// for p in pl.paths { +// if p.path.ends_with('.pub') { +// pubkeys << p.path.replace('.pub', '') +// } +// } +// // console.print_debug(keypaths) + +// if pubkeys.len == 1 { +// if c.ask_yesno( +// description: 'We found sshkey ${pubkeys[0]} in ${agent.homepath.path} dir, want to use this one?' +// )! +// { +// key_load(pubkeys[0])! +// return pubkeys[0] +// } +// } +// if pubkeys.len > 1 { +// if c.ask_yesno( +// description: 'We found more than 1 sshkey in ${agent.homepath.path} dir, want to use one of those?' +// )! +// { +// // keytouse := console.ask_dropdown( +// // items: pubkeys +// // description: 'Please choose the ssh key you want to use' +// // ) +// // key_load(keytouse)! +// // return keytouse +// } +// } + +// will see if there is one ssh key in sshagent +// or if not, if there is 1 ssh key in ${agent.homepath.path}/ if yes will return +// if we were able to define the key to use, it will be returned here +// pub fn pubkey_guess() !string { +// pubkeys := pubkeys_get() +// if pubkeys.len == 1 { +// return pubkeys[0] +// } +// if pubkeys.len > 1 { +// return error('There is more than 1 ssh-key loaded in ssh-agent, cannot identify which one to use.') +// } +// // now means nothing in ssh-agent, lets see if we find 1 key in .ssh directory +// mut sshdirpath := pathlib.get_dir(path: '${os.home_dir()}/.ssh', create: true)! + +// // todo: use ourregex field to nly list .pub files +// mut fl := sshdirpath.list()! +// mut sshfiles := fl.paths +// mut keypaths := sshfiles.filter(it.path.ends_with('.pub')) +// // console.print_debug(keypaths) + +// if keypaths.len == 1 { +// keycontent := keypaths[0].read()! +// privkeypath := keypaths[0].path.replace('.pub', '') +// key_load(privkeypath)! +// return keycontent +// } +// if keypaths.len > 1 { +// return error('There is more than 1 ssh-key in your ${agent.homepath.path} dir, could not automatically load.') +// } +// return error('Could not find sshkey in your ssh-agent as well as in your ${agent.homepath.path} dir, please generate an ssh-key') +// } + +// if c.ask_yesno(description: 'Would you like to generate a new key?') { +// // name := console.ask_question(question: 'name', minlen: 3) +// // passphrase := console.ask_question(question: 'passphrase', minlen: 5) + +// // keytouse := key_generate(name, passphrase)! + +// // if console.ask_yesno(description:"Please acknowledge you will remember your passphrase for ever (-: ?"){ +// // key_load(keytouse)? +// // return keytouse +// // }else{ +// // return error("Cannot continue, did not find sshkey to use") +// // } +// // key_load_with_passphrase(keytouse, passphrase)! +// }! +// return error('Cannot continue, did not find sshkey to use') + +// // url_github_add := "https://library.threefold.me/info/publishtools/#/sshkey_github" + +// // osal.execute_interactive("open $url_github_add")? + +// // if console.ask_yesno(description:"Did you manage to add the github key to this repo ?"){ +// // console.print_debug(" - CONGRATS: your sshkey is now loaded.") +// // } + +// // return keytouse +// } diff --git a/lib/osal/sshagent/readme.md b/lib/osal/sshagent/readme.md new file mode 100644 index 00000000..9e65cd4f --- /dev/null +++ b/lib/osal/sshagent/readme.md @@ -0,0 +1,44 @@ +## ssh agent + +```v +import freeflowuniverse.herolib.osal.sshagent + +mut agent := sshagent.new()! + +privkey:=' +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW +QyNTUxOQAAACDXf9Z/2AH8/8a1ppagCplQdhWyQ8wZAieUw3nNcxsDiQAAAIhb3ybRW98m +0QAAAAtzc2gtZWQyNTUxOQAAACDXf9Z/2AH8/8a1ppagCplQdhWyQ8wZAieUw3nNcxsDiQ +AAAEC+fcDBPqdJHlJOQJ2zXhU2FztKAIl3TmWkaGCPnyts49d/1n/YAfz/xrWmlqAKmVB2 +FbJDzBkCJ5TDec1zGwOJAAAABWJvb2tz +-----END OPENSSH PRIVATE KEY----- +' + +//make sure the name chose is same as original name of the key +mut sshkey:=agent.add("mykey:,privkey)! + + +sshkey.forget()! + +``` + +### hero + +there is also a hero command + +```js +//will add the key and load (at this stage no support for passphrases) +!!sshagent.key_add name:'myname' + privkey:' + -----BEGIN OPENSSH PRIVATE KEY----- + b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW + QyNTUxOQAAACDXf9Z/2AH8/8a1ppagCplQdhWyQ8wZAieUw3nNcxsDiQAAAIhb3ybRW98m + 0QAAAAtzc2gtZWQysdsdsddsdsdsdsdsdsd8/8a1ppagCplQdhWyQ8wZAieUw3nNcxsDiQ + AAAEC+fcDBPqdJHlJOQJ2zXhU2FztKAIl3TmWkaGCPnyts49d/1n/YAfz/xrWmlqAKmVB2 + FbJDzBkCJ5TDec1zGwOJAAAABWJvb2tz + -----END OPENSSH PRIVATE KEY----- + ' + +``` + diff --git a/lib/osal/sshagent/sshagent.v b/lib/osal/sshagent/sshagent.v new file mode 100644 index 00000000..6cc30d90 --- /dev/null +++ b/lib/osal/sshagent/sshagent.v @@ -0,0 +1,186 @@ +module sshagent + +import os +import freeflowuniverse.herolib.core.pathlib +// import freeflowuniverse.herolib.ui.console + +@[heap] +pub struct SSHAgent { +pub mut: + keys []SSHKey + active bool + homepath pathlib.Path +} + +// get all keys from sshagent and from the local .ssh dir +pub fn (mut agent SSHAgent) init() ! { + // first get keys out of ssh-add + agent.keys = []SSHKey{} + res := os.execute('ssh-add -L') + if res.exit_code == 0 { + for line in res.output.split('\n') { + if line.trim(' ') == '' { + continue + } + if line.contains(' ') { + splitted := line.split(' ') + if splitted.len < 2 { + panic('bug') + } + pubkey := splitted[1] + mut sshkey := SSHKey{ + pubkey: pubkey + agent: &agent + loaded: true + } + if splitted[0].contains('ed25519') { + sshkey.cat = .ed25519 + if splitted.len > 2 { + sshkey.email = splitted[2] or { panic('bug') } + } + } else if splitted[0].contains('rsa') { + sshkey.cat = .rsa + } else { + panic('bug: implement other cat for ssh-key.\n${line}') + } + + if !(agent.exists(pubkey: pubkey)) { + // $if debug{console.print_debug("- add from agent: ${sshkey}")} + agent.keys << sshkey + } + } + } + } + + // now get them from the filesystem + mut fl := agent.homepath.list()! + mut sshfiles := fl.paths.clone() + mut pubkeypaths := sshfiles.filter(it.path.ends_with('.pub')) + for mut pkp in pubkeypaths { + mut c := pkp.read()! + c = c.replace(' ', ' ').replace(' ', ' ') // deal with double spaces, or tripple (need to do this 2x + splitted := c.trim_space().split(' ') + if splitted.len < 2 { + panic('bug') + } + mut name := pkp.name() + name = name[0..(name.len - 4)] + pubkey2 := splitted[1] + // the pop makes sure the key is removed from keys in agent, this means we can add later + mut sshkey2 := agent.get(pubkey: pubkey2) or { + SSHKey{ + name: name + pubkey: pubkey2 + agent: &agent + } + } + agent.pop(sshkey2.pubkey) + sshkey2.name = name + if splitted[0].contains('ed25519') { + sshkey2.cat = .ed25519 + } else if splitted[0].contains('rsa') { + sshkey2.cat = .rsa + } else { + panic('bug: implement other cat for ssh-key') + } + if splitted.len > 2 { + sshkey2.email = splitted[2] + } + // $if debug{console.print_debug("- add from fs: ${sshkey2}")} + agent.keys << sshkey2 + } +} + +// returns path to sshkey +pub fn (mut agent SSHAgent) generate(name string, passphrase string) !SSHKey { + dest := '${agent.homepath.path}/${name}' + if os.exists(dest) { + os.rm(dest)! + } + cmd := 'ssh-keygen -t ed25519 -f ${dest} -P ${passphrase} -q' + // console.print_debug(cmd) + rc := os.execute(cmd) + if !(rc.exit_code == 0) { + return error('Could not generated sshkey,\n${rc}') + } + agent.init()! + return agent.get(name: name) or { panic(err) } +} + +// unload all ssh keys +pub fn (mut agent SSHAgent) reset() ! { + if true { + panic('reset_ssh') + } + res := os.execute('ssh-add -D') + if res.exit_code > 0 { + return error('cannot reset sshkeys.') + } + agent.init()! // should now be empty for loaded keys +} + +// load the key, they key is content (private key) . +// a name is required +pub fn (mut agent SSHAgent) add(name string, privkey_ string) !SSHKey { + mut privkey := privkey_ + path := '${agent.homepath.path}/${name}' + if os.exists(path) { + os.rm(path)! + } + if os.exists('${path}.pub') { + os.rm('${path}.pub')! + } + if !privkey.ends_with('\n') { + privkey += '\n' + } + os.write_file(path, privkey)! + os.chmod(path, 0o600)! + res4 := os.execute('ssh-keygen -y -f ${path} > ${path}.pub') + if res4.exit_code > 0 { + return error('cannot generate pubkey ${path}.\n${res4.output}') + } + return agent.load(path)! +} + +// load key starting from path to private key +pub fn (mut agent SSHAgent) load(keypath string) !SSHKey { + if !os.exists(keypath) { + return error('cannot find sshkey: ${keypath}') + } + if keypath.ends_with('.pub') { + return error('can only load private keys') + } + name := keypath.split('/').last() + os.chmod(keypath, 0o600)! + res := os.execute('ssh-add ${keypath}') + if res.exit_code > 0 { + return error('cannot add ssh-key with path ${keypath}.\n${res.output}') + } + agent.init()! + return agent.get(name: name) or { + panic("can't find sshkey with name:'${name}' from agent.\n${err}") + } +} + +// forget the specified key +pub fn (mut agent SSHAgent) forget(name string) ! { + if true { + panic('reset_ssh') + } + mut key := agent.get(name: name) or { return } + agent.pop(key.pubkey) + key.forget()! +} + +pub fn (mut agent SSHAgent) str() string { + mut out := []string{} + out << '\n## SSHAGENT:\n' + for mut key in agent.keys { + out << key.str() + } + return out.join_lines() + '\n' +} + +pub fn (mut agent SSHAgent) keys_loaded() ![]SSHKey { + return agent.keys.filter(it.loaded) +} diff --git a/lib/osal/sshagent/sshkey.v b/lib/osal/sshagent/sshkey.v new file mode 100644 index 00000000..3b51b278 --- /dev/null +++ b/lib/osal/sshagent/sshkey.v @@ -0,0 +1,88 @@ +module sshagent + +import os +import freeflowuniverse.herolib.core.pathlib +import freeflowuniverse.herolib.ui.console + +@[heap] +pub struct SSHKey { +pub mut: + name string + pubkey string + loaded bool + email string + agent &SSHAgent @[skip; str: skip] + cat SSHKeyCat +} + +pub enum SSHKeyCat { + ed25519 + rsa +} + +pub fn (mut key SSHKey) keypath() !pathlib.Path { + if key.name.len == 0 { + return error('cannot have key name empty to get path.') + } + return key.agent.homepath.file_get_new('${key.name}')! +} + +pub fn (mut key SSHKey) keypath_pub() !pathlib.Path { + if key.name.len == 0 { + return error('cannot have key name empty to get path.') + } + mut p := key.agent.homepath.file_get_new('${key.name}.pub')! + if !(os.exists('${key.agent.homepath.path}/${key.name}.pub')) { + p.write(key.pubkey)! + } + return p +} + +pub fn (mut key SSHKey) keypub() !string { + mut p := key.keypath_pub()! + return p.read()! +} + +// load the key, they key is content, other keys will be unloaded +pub fn (mut key SSHKey) forget() ! { + if key.loaded == false { + return + } + mut keypath := key.keypath_pub() or { + return error('keypath not set or known on sshkey: ${key}') + } + if !os.exists(keypath.path) { + return error('cannot find sshkey: ${keypath}') + } + res := os.execute('ssh-add -d ${keypath.path}') + if res.exit_code > 0 { + return error('cannot forget ssh-key with path ${keypath.path}') + } + key.agent.init()! +} + +pub fn (mut key SSHKey) str() string { + patho := key.keypath_pub() or { pathlib.Path{} } + mut l := ' ' + if key.loaded { + l = 'L' + } + return '${key.name:-15} : ${l} : ${key.cat:-8} : ${key.email:-25} : ${patho.path}' +} + +pub fn (mut key SSHKey) load() ! { + $if debug { + console.print_debug(" - sshkey load: '${key}'") + } + if key.name.len == 0 { + return error('can only load keys which are on filesystem and as such have a name.') + } + patho := key.keypath() or { + return error('cannot load because privkey not on fs.\n${err}\n${key}') + } + res := os.execute('ssh-add ${patho.path}') + if res.exit_code > 0 { + return error('cannot add ssh-key with path ${patho.path}.\n${res}') + } + key.agent.init()! +} diff --git a/lib/osal/sshagent/tools.v b/lib/osal/sshagent/tools.v new file mode 100644 index 00000000..451ca117 --- /dev/null +++ b/lib/osal/sshagent/tools.v @@ -0,0 +1,12 @@ +module sshagent + +// fn listsplit(key string) string { +// if key.trim(' ') == '' { +// return '' +// } +// if key.contains(' ') { +// splitted := key.split(' ') +// return splitted[splitted.len].replace('.pub', '') +// } +// return key +// } diff --git a/lib/osal/systemd/journalctl.v b/lib/osal/systemd/journalctl.v new file mode 100644 index 00000000..9bd7c5c9 --- /dev/null +++ b/lib/osal/systemd/journalctl.v @@ -0,0 +1,15 @@ +module systemd + +import freeflowuniverse.herolib.osal + +pub struct JournalArgs { +pub: + service string // name of service for which logs will be retrieved + limit int = 100 // number of last log lines to be shown +} + +pub fn journalctl(args JournalArgs) !string { + cmd := 'journalctl --no-pager -n ${args.limit} -u ${name_fix(args.service)}' + response := osal.execute_silent(cmd) or { return err } + return response +} diff --git a/lib/osal/systemd/readme.md b/lib/osal/systemd/readme.md new file mode 100644 index 00000000..8ad0c60b --- /dev/null +++ b/lib/osal/systemd/readme.md @@ -0,0 +1,7 @@ +# a sal to work with systemd + + +> only basics implemented as we need for our installers + +example see crystallib/examples/... + diff --git a/lib/osal/systemd/systemd.v b/lib/osal/systemd/systemd.v new file mode 100644 index 00000000..9c6f0f90 --- /dev/null +++ b/lib/osal/systemd/systemd.v @@ -0,0 +1,184 @@ +module systemd + +import freeflowuniverse.herolib.osal +import freeflowuniverse.herolib.core.pathlib +import freeflowuniverse.herolib.core.texttools +import freeflowuniverse.herolib.ui.console + +__global ( + systemd_global []&Systemd +) + +@[heap] +pub struct Systemd { +pub mut: + processes []&SystemdProcess + path pathlib.Path + path_cmd pathlib.Path + status SystemdFactoryStatus +} + +pub enum SystemdFactoryStatus { + init + ok + error +} + +pub fn new() !&Systemd { + if systemd_global.len > 0 { + return systemd_global[0] + } + mut systemd := Systemd{ + path: pathlib.get_dir(path: '/etc/systemd/system', create: false)! + path_cmd: pathlib.get_dir(path: '/etc/systemd_cmds', create: true)! + } + systemd.load()! + systemd_global << &systemd + return systemd_global[0] +} + +// check if systemd is on system, returns True if yes +pub fn check() !bool { + if !osal.cmd_exists('systemctl') { + return false + } + + return osal.execute_ok('systemctl status --no-pager') +} + +fn (mut systemd Systemd) load() ! { + if systemd.status == .ok { + return + } + console.print_header('Systemd load') + osal.execute_silent('systemctl daemon-reload')! + systemd.processes = []&SystemdProcess{} + for item in process_list()! { + mut sdprocess := SystemdProcess{ + description: item.description + systemd: &systemd + unit: item.unit + info: item + } + systemd.setinternal(mut sdprocess) + } + + systemd.status = .ok +} + +pub fn (mut systemd Systemd) reload() ! { + systemd.status = .init + systemd.load()! +} + +@[params] +pub struct SystemdProcessNewArgs { +pub mut: + name string @[required] + cmd string @[required] + description string + env map[string]string + start bool = true + restart bool = true +} + +//``` +// name string @[required] +// cmd string @[required] +// description string @[required] +//``` +pub fn (mut systemd Systemd) new(args_ SystemdProcessNewArgs) !SystemdProcess { + mut args := args_ + args.name = name_fix(args.name) + + if args.cmd == '' { + return error('cmd needs to be filled in in:\n${args}') + } + + mut sdprocess := SystemdProcess{ + name: args.name + description: args.description + cmd: args.cmd + restart: true + systemd: &systemd + info: SystemdProcessInfo{ + unit: args.name + } + } + + // TODO: maybe systemd can start multiline scripts? + if args.cmd.contains('\n') { + // means we can load the special cmd + mut pathcmd := systemd.path_cmd.file_get_new('${args.name}_cmd')! + pathcmd.write(sdprocess.cmd)! + pathcmd.chmod(0o750)! + sdprocess.cmd = '/bin/bash -c ${pathcmd.path}' + } + sdprocess.env = args.env.move() + + sdprocess.write()! + systemd.setinternal(mut sdprocess) + + if args.start || args.restart { + sdprocess.stop()! + } + + if args.start { + sdprocess.start()! + } + + return sdprocess +} + +pub fn (mut systemd Systemd) names() []string { + r := systemd.processes.map(it.name) + return r +} + +fn (mut systemd Systemd) setinternal(mut sdprocess SystemdProcess) { + sdprocess.name = name_fix(sdprocess.info.unit) + systemd.processes = systemd.processes.filter(it.name != sdprocess.name) + systemd.processes << &sdprocess +} + +pub fn (mut systemd Systemd) get(name_ string) !&SystemdProcess { + name := name_fix(name_) + if systemd.processes.len == 0 { + systemd.load()! + } + for item in systemd.processes { + if name_fix(item.name) == name { + return item + } + } + return error("Can't find systemd process with name ${name}, maybe reload the state with systemd.load()") +} + +pub fn (mut systemd Systemd) exists(name_ string) bool { + name := name_fix(name_) + for item in systemd.processes { + if name_fix(item.name) == name { + return true + } + } + return false +} + +pub fn (mut systemd Systemd) destroy(name_ string) ! { + for i, mut pr in systemd.processes { + if name_fix(pr.name) == name_fix(name_) { + pr.delete()! + systemd.processes[i] = systemd.processes[systemd.processes.len - 1] + systemd.processes.delete_last() + break + } + } +} + +fn name_fix(name_ string) string { + mut name := texttools.name_fix(name_) + if name.contains('.service') { + name = name.all_before_last('.') + } + return name +} diff --git a/lib/osal/systemd/systemd_list.v b/lib/osal/systemd/systemd_list.v new file mode 100644 index 00000000..18b63c91 --- /dev/null +++ b/lib/osal/systemd/systemd_list.v @@ -0,0 +1,94 @@ +module systemd + +import os +import json + +struct SystemdProcessInfoRaw { + unit string + load string + active string + sub string + description string +} + +pub struct SystemdProcessInfo { +pub mut: + unit string + load_state LoadState + active_state ActiveState + sub_state SubState + description string +} + +pub enum LoadState { + loaded // The unit's configuration file has been successfully loaded into memory. + not_found // The unit's configuration file could not be found. + error // There was an error loading the unit's configuration file. + masked // The unit has been masked, which means it has been explicitly disabled and cannot be started. +} + +pub enum ActiveState { + active // The unit has been started successfully and is running as expected. + inactive // The unit is not running. + activating // The unit is in the process of being started. + deactivating // The unit is in the process of being stopped. + failed // The unit tried to start but failed. +} + +// This provides more detailed information about the unit's state, often referred to as the "sub-state". This can vary significantly between different types of units (services, sockets, timers, etc.) +pub enum SubState { + unknown + start + running // The service is currently running. + exited // The service has completed its process and exited. For services that do something at startup and then exit (oneshot services), this is a normal state. + failed // The service has failed after starting. + waiting // The service is waiting for some condition to be met. + autorestart + dead +} + +pub fn process_list() ![]SystemdProcessInfo { + cmd := 'systemctl list-units --type=service --no-pager --all -o json-pretty ' + res_ := os.execute(cmd) + if res_.exit_code > 0 { + return error('could not execute: ${cmd}') + } + items := json.decode([]SystemdProcessInfoRaw, res_.output) or { + panic('Failed to decode Systemd Process Info') + } + mut res := []SystemdProcessInfo{} + for item in items { + mut unit := SystemdProcessInfo{ + unit: item.unit + description: item.description + } + match item.load { + 'loaded' { unit.load_state = .loaded } + 'not-found' { unit.load_state = .not_found } + 'error' { unit.load_state = .error } + 'bad-setting' { unit.load_state = .error } + 'masked' { unit.load_state = .masked } + else { return error('could not find right load state for systemd ${unit.load_state}') } + } + match item.active { + 'active' { unit.active_state = .active } + 'inactive' { unit.active_state = .inactive } + 'activating' { unit.active_state = .activating } + 'deactivating' { unit.active_state = .deactivating } + 'failed' { unit.active_state = .failed } + else { return error('could not find right active state for systemd ${unit.load_state}') } + } + match item.sub { + 'start' { unit.sub_state = .start } + 'running' { unit.sub_state = .running } + 'exited' { unit.sub_state = .exited } + 'failed' { unit.sub_state = .failed } + 'waiting' { unit.sub_state = .waiting } + 'dead' { unit.sub_state = .dead } + 'auto-restart' { unit.sub_state = .autorestart } + else { unit.sub_state = .unknown } + } + res << unit + } + return res +} diff --git a/lib/osal/systemd/systemd_process.v b/lib/osal/systemd/systemd_process.v new file mode 100644 index 00000000..78f40ae0 --- /dev/null +++ b/lib/osal/systemd/systemd_process.v @@ -0,0 +1,143 @@ +module systemd + +// import os +import maps +import freeflowuniverse.herolib.osal +import freeflowuniverse.herolib.core.pathlib +import freeflowuniverse.herolib.ui.console +import os + +@[heap] +pub struct SystemdProcess { +pub mut: + name string + unit string // as generated or used by systemd + cmd string + pid int + env map[string]string + systemd &Systemd @[skip; str: skip] + description string + info SystemdProcessInfo + restart bool = true // whether process will be restarted upon failure +} + +pub fn (mut self SystemdProcess) servicefile_path() string { + return '${self.systemd.path.path}/${self.name}.service' +} + +pub fn (mut self SystemdProcess) write() ! { + mut p := pathlib.get_file(path: self.servicefile_path(), create: true)! + console.print_header(' systemd write service: ${p.path}') + + envs_lst := maps.to_array[string, string, string](self.env, fn (k string, v string) string { + return 'Environment=${k}=${v}' + }) + + envs := envs_lst.join('\n') + + servicecontent := $tmpl('templates/service.yaml') + + println(self) + println(servicecontent) + + p.write(servicecontent)! +} + +pub fn (mut self SystemdProcess) start() ! { + console.print_header('starting systemd process: ${self.name}') + // self.write()! + cmd := ' + systemctl daemon-reload + systemctl enable ${self.name} + systemctl start ${self.name} + ' // console.print_debug(cmd) + + _ = osal.execute_silent(cmd)! + self.refresh()! +} + +// get status from system +pub fn (mut self SystemdProcess) refresh() ! { + self.systemd.load()! + systemdobj2 := self.systemd.get(self.name)! + self.info = systemdobj2.info + self.description = systemdobj2.description + self.name = systemdobj2.name + self.unit = systemdobj2.unit + self.cmd = systemdobj2.cmd +} + +pub fn (mut self SystemdProcess) delete() ! { + console.print_header('Process systemd: ${self.name} delete.') + self.stop()! + if os.exists(self.servicefile_path()) { + os.rm(self.servicefile_path())! + } +} + +pub fn (mut self SystemdProcess) stop() ! { + cmd := ' + set +ex + systemctl daemon-reload + systemctl disable ${self.name} + systemctl stop ${self.name} + ' + _ = osal.exec(cmd: cmd, stdout: false, debug: false, ignore_error: false)! + self.systemd.load()! +} + +pub fn (mut self SystemdProcess) restart() ! { + cmd := ' + systemctl daemon-reload + systemctl restart ${self.name} + ' + _ = osal.execute_silent(cmd)! + self.systemd.load()! +} + +enum SystemdStatus { + unknown + active + inactive + failed + activating + deactivating +} + +pub fn (self SystemdProcess) status() !SystemdStatus { + // exit with 3 is converted to exit with 0 + cmd := ' + systemctl daemon-reload + systemctl status --no-pager --lines=0 ${name_fix(self.name)} + ' + job := osal.exec(cmd: cmd, stdout: false) or { + if err.code() == 3 { + if err is osal.JobError { + return parse_systemd_process_status(err.job.output) + } + } + return error('Failed to run command to get status ${err}') + } + + return parse_systemd_process_status(job.output) +} + +fn parse_systemd_process_status(output string) SystemdStatus { + lines := output.split_into_lines() + for line in lines { + if line.contains('Active: ') { + if line.contains('active (running)') { + return .active + } else if line.contains('inactive (dead)') { + return .inactive + } else if line.contains('failed') { + return .failed + } else if line.contains('activating') { + return .activating + } else if line.contains('deactivating') { + return .deactivating + } + } + } + return .unknown +} diff --git a/lib/osal/systemd/systemd_process_test.v b/lib/osal/systemd/systemd_process_test.v new file mode 100644 index 00000000..2b2f9518 --- /dev/null +++ b/lib/osal/systemd/systemd_process_test.v @@ -0,0 +1,61 @@ +module systemd + +// import os +import maps +import freeflowuniverse.herolib.osal +import freeflowuniverse.herolib.core.pathlib +import freeflowuniverse.herolib.ui.console +import os + +pub fn testsuite_begin() ! { + mut systemdfactory := new()! + mut process := systemdfactory.new( + cmd: 'redis-server' + name: 'testservice' + start: false + )! + + process.delete()! +} + +pub fn testsuite_end() ! { + mut systemdfactory := new()! + mut process := systemdfactory.new( + cmd: 'redis-server' + name: 'testservice' + start: false + )! + + process.delete()! +} + +pub fn test_systemd_process_status() ! { + mut systemdfactory := new()! + mut process := systemdfactory.new( + cmd: 'redis-server' + name: 'testservice' + start: false + )! + + process.start()! + status := process.status()! + assert status == .active +} + +pub fn test_parse_systemd_process_status() ! { + output := 'testservice.service - testservice + Loaded: loaded (/etc/systemd/system/testservice.service; enabled; preset: disabled) + Active: active (running) since Mon 2024-06-10 12:51:24 CEST; 2ms ago + Main PID: 202537 (redis-server) + Tasks: 1 (limit: 154455) + Memory: 584.0K (peak: 584.0K) + CPU: 0 + CGroup: /system.slice/testservice.service + └─202537 redis-server + +Jun 10 12:51:24 myhost1 systemd[1]: testservice.service: Scheduled restart job, restart counter is at 1. +Jun 10 12:51:24 myhost1 systemd[1]: Started testservice.' + + status := parse_systemd_process_status(output) + assert status == .active +} diff --git a/lib/osal/systemd/templates/service.yaml b/lib/osal/systemd/templates/service.yaml new file mode 100644 index 00000000..dabfff6c --- /dev/null +++ b/lib/osal/systemd/templates/service.yaml @@ -0,0 +1,17 @@ +[Unit] +Description=${self.name} +After=network.target + +[Service] +Type=simple +ExecStart=${self.cmd} +WorkingDirectory=/tmp +@if self.restart +Restart=always +@else +Restart=no +@end +@{envs} + +[Install] +WantedBy=multi-user.target diff --git a/lib/osal/tmux/readme.md b/lib/osal/tmux/readme.md new file mode 100644 index 00000000..090511c2 --- /dev/null +++ b/lib/osal/tmux/readme.md @@ -0,0 +1,24 @@ +# TMUX + + +TMUX is a very capable process manager. + +### Concepts + +- tmux = is the factory, it represents the tmux process manager, linked to a node +- session = is a set of windows, it has a name and groups windows +- window = is typically one process running (you can have panes but in our implementation we skip this) + + +## structure + +tmux library provides functions for managing tmux sessions + +- session is the top one +- then windows (is where you see the app running) +- then panes in windows (we don't support yet) + + +## to attach to a tmux session + +> TODO: \ No newline at end of file diff --git a/lib/osal/tmux/testdata/tmux_session_test.v b/lib/osal/tmux/testdata/tmux_session_test.v new file mode 100644 index 00000000..5be65c1d --- /dev/null +++ b/lib/osal/tmux/testdata/tmux_session_test.v @@ -0,0 +1,86 @@ +module tmux + +import freeflowuniverse.herolib.osal +import freeflowuniverse.herolib.installers.tmux + +// fn testsuite_end() { + +// +// } + +fn testsuite_begin() { + mut tmux := Tmux{} + + if tmux.is_running()! { + tmux.stop()! + } +} + +fn test_session_create() { + // installer := tmux.get_install( + // panic('could not install tmux: ${err}') + // } + + mut tmux := Tmux{} + tmux.start() or { panic('cannot start tmux: ${err}') } + + mut s := Session{ + tmux: &tmux + windows: map[string]&Window{} + name: 'testsession' + } + + mut s2 := Session{ + tmux: &tmux + windows: map[string]&Window{} + name: 'testsession2' + } + + // test testsession exists after session_create + mut tmux_ls := osal.execute_silent('tmux ls') or { panic("can't exec: ${err}") } + assert !tmux_ls.contains('testsession: 1 windows') + s.create() or { panic('Cannot create session: ${err}') } + tmux_ls = osal.execute_silent('tmux ls') or { panic("can't exec: ${err}") } + assert tmux_ls.contains('testsession: 1 windows') + + // test multiple session_create for same tmux + tmux_ls = osal.execute_silent('tmux ls') or { panic("can't exec: ${err}") } + assert !tmux_ls.contains('testsession2: 1 windows') + s2.create() or { panic('Cannot create session: ${err}') } + tmux_ls = osal.execute_silent('tmux ls') or { panic("can't exec: ${err}") } + assert tmux_ls.contains('testsession2: 1 windows') + + // test session_create with duplicate session + mut create_err := '' + s2.create() or { create_err = err.msg() } + assert create_err != '' + assert create_err.contains('duplicate session: testsession2') + tmux_ls = osal.execute_silent('tmux ls') or { panic("can't exec: ${err}") } + assert tmux_ls.contains('testsession2: 1 windows') + + s.stop() or { panic('Cannot stop session: ${err}') } + s2.stop() or { panic('Cannot stop session: ${err}') } +} + +// fn test_session_stop() { + +// +// installer := tmux.get_install( + +// mut tmux := Tmux { +// node: node_ssh +// } + +// mut s := Session{ +// tmux: &tmux // reference back +// windows: map[string]&Window{} +// name: 'testsession3' +// } + +// s.create() or { panic("Cannot create session: $err") } +// mut tmux_ls := osal.execute_silent('tmux ls') or { panic("can't exec: $err") } +// assert tmux_ls.contains("testsession3: 1 windows") +// s.stop() or { panic("Cannot stop session: $err")} +// tmux_ls = osal.execute_silent('tmux ls') or { panic("can't exec: $err") } +// assert !tmux_ls.contains("testsession3: 1 windows") +// } diff --git a/lib/osal/tmux/testdata/tmux_window_test.v b/lib/osal/tmux/testdata/tmux_window_test.v new file mode 100644 index 00000000..686d742e --- /dev/null +++ b/lib/osal/tmux/testdata/tmux_window_test.v @@ -0,0 +1,67 @@ +module tmux + +import freeflowuniverse.herolib.osal +import freeflowuniverse.herolib.installers.tmux +import freeflowuniverse.herolib.ui.console + +// uses single tmux instance for all tests +__global ( + tmux Tmux +) + +fn init() { + tmux = get_remote('185.69.166.152')! + + // reset tmux for tests + if tmux.is_running() { + tmux.stop() or { panic('Cannot stop tmux') } + } +} + +fn testsuite_end() { + if tmux.is_running() { + tmux.stop()! + } +} + +fn test_window_new() { + tmux.start() or { panic("can't start tmux: ${err}") } + + // test window new with only name arg + window_args := WindowArgs{ + name: 'TestWindow' + } + + assert !tmux.sessions.keys().contains('main') + + mut window := tmux.window_new(window_args) or { panic("Can't create new window: ${err}") } + assert tmux.sessions.keys().contains('main') + window.delete() or { panic('Cant delete window') } +} + +// // tests creating duplicate windows +// fn test_window_new0() { + +// +// installer := tmux.get_install( + +// mut tmux := Tmux { +// node: node_ssh +// } + +// window_args := WindowArgs { +// name: 'TestWindow0' +// } + +// // console.print_debug(tmux) +// mut window := tmux.window_new(window_args) or { +// panic("Can't create new window: $err") +// } +// assert tmux.sessions.keys().contains('main') +// mut window_dup := tmux.window_new(window_args) or { +// panic("Can't create new window: $err") +// } +// console.print_debug(node_ssh.exec('tmux ls') or { panic("fail:$err")}) +// window.delete() or { panic("Cant delete window") } +// // console.print_debug(tmux) +// } diff --git a/lib/osal/tmux/tmux.v b/lib/osal/tmux/tmux.v new file mode 100644 index 00000000..7dbb3686 --- /dev/null +++ b/lib/osal/tmux/tmux.v @@ -0,0 +1,116 @@ +module tmux + +import freeflowuniverse.herolib.osal +// import freeflowuniverse.herolib.session +import os +import time +import freeflowuniverse.herolib.ui.console + +@[heap] +pub struct Tmux { +pub mut: + sessions []&Session + sessionid string // unique link to job +} + +@[params] +pub struct TmuxNewArgs { + sessionid string +} + +// return tmux instance +pub fn new(args TmuxNewArgs) !Tmux { + mut t := Tmux{ + sessionid: args.sessionid + } + t.load()! + t.scan()! + return t +} + +// loads tmux session, populate the object +pub fn (mut tmux Tmux) load() ! { + isrunning := tmux.is_running()! + if !isrunning { + tmux.start()! + } + // console.print_debug("SCAN") + tmux.scan()! +} + +pub fn (mut t Tmux) stop() ! { + $if debug { + console.print_debug('Stopping tmux...') + } + + t.sessions = []&Session{} + t.scan()! + + for _, mut session in t.sessions { + session.stop()! + } + + cmd := 'tmux kill-server' + _ := osal.exec(cmd: cmd, stdout: false, name: 'tmux_kill_server', ignore_error: true) or { + panic('bug') + } + os.log('TMUX - All sessions stopped .') +} + +pub fn (mut t Tmux) start() ! { + cmd := 'tmux new-sess -d -s main' + _ := osal.exec(cmd: cmd, stdout: false, name: 'tmux_start') or { + return error("Can't execute ${cmd} \n${err}") + } + // scan and add default bash window created with session init + time.sleep(time.Duration(100 * time.millisecond)) + t.scan()! +} + +// print list of tmux sessions +pub fn (mut t Tmux) list_print() { + // os.log('TMUX - Start listing ....') + for _, session in t.sessions { + for _, window in session.windows { + console.print_debug(window) + } + } +} + +// get all windows as found in all sessions +pub fn (mut t Tmux) windows_get() []&Window { + mut res := []&Window{} + // os.log('TMUX - Start listing ....') + for _, session in t.sessions { + for _, window in session.windows { + res << window + } + } + return res +} + +// checks whether tmux server is running +pub fn (mut t Tmux) is_running() !bool { + res := osal.exec(cmd: 'tmux info', stdout: false, name: 'tmux_info', raise_error: false) or { + panic('bug') + } + if res.error.contains('no server running') { + // console.print_debug(" TMUX NOT RUNNING") + return false + } + if res.error.contains('no current client') { + return true + } + if res.exit_code > 0 { + return error('could not execute tmux info.\n${res}') + } + return true +} + +pub fn (mut t Tmux) str() string { + mut out := '# Tmux\n\n' + for s in t.sessions { + out += '${*s}\n' + } + return out +} diff --git a/lib/osal/tmux/tmux_scan.v b/lib/osal/tmux/tmux_scan.v new file mode 100644 index 00000000..f385262d --- /dev/null +++ b/lib/osal/tmux/tmux_scan.v @@ -0,0 +1,95 @@ +module tmux + +import freeflowuniverse.herolib.osal +import freeflowuniverse.herolib.core.texttools +import freeflowuniverse.herolib.ui.console + +fn (mut t Tmux) scan_add(line string) !&Window { + // console.print_debug(" -- scan add: $line") + if line.count('|') < 4 { + return error(@FN + 'expects line with at least 5 params separated by |') + } + + line_arr := line.split('|') + session_name := line_arr[0] + window_name := line_arr[1] + window_id := line_arr[2] + pane_active := line_arr[3] + pane_id := line_arr[4] + pane_pid := line_arr[5] + pane_start_command := line_arr[6] or { '' } + + wid := (window_id.replace('@', '')).int() + + // os.log('TMUX FOUND: $line\n ++ $session_name:$window_name wid:$window_id pid:$pane_pid entrypoint:$pane_start_command') + mut s := t.session_get(session_name)! + + mut active := false + if pane_active == '1' { + active = true + } + + mut name := texttools.name_fix(window_name) + + mut w := Window{ + session: s + name: name + } + + if !(s.window_exist(name: window_name, id: wid)) { + // console.print_debug("window not exists") + s.windows << &w + } else { + w = s.window_get(name: window_name, id: wid)! + } + + w.id = wid + w.active = active + w.pid = pane_pid.int() + w.paneid = (pane_id.replace('%', '')).int() + w.cmd = pane_start_command + + return &w +} + +// scan the system to detect sessions . +pub fn (mut t Tmux) scan() ! { + // os.log('TMUX - Scanning ....') + + cmd_list_session := "tmux list-sessions -F '#{session_name}'" + exec_list := osal.exec(cmd: cmd_list_session, stdout: false, name: 'tmux_list') or { + return error('could not execute list sessions.\n${err}') + } + + // console.print_debug('execlist out for sessions: ${exec_list}') + + // make sure we have all sessions + for line in exec_list.output.split_into_lines() { + session_name := line.trim(' \n').to_lower() + if session_name == '' { + continue + } + if t.session_exist(session_name) { + continue + } + mut s := Session{ + tmux: &t // reference back + name: session_name + } + t.sessions << &s + } + + console.print_debug(t) + + // mut done := map[string]bool{} + cmd := "tmux list-panes -a -F '#{session_name}|#{window_name}|#{window_id}|#{pane_active}|#{pane_id}|#{pane_pid}|#{pane_start_command}'" + out := osal.execute_silent(cmd) or { return error("Can't execute ${cmd} \n${err}") } + + // $if debug{console.print_debug('tmux list panes out:\n${out}')} + + for line in out.split_into_lines() { + if line.contains('|') { + t.scan_add(line)! + } + } +} diff --git a/lib/osal/tmux/tmux_session.v b/lib/osal/tmux/tmux_session.v new file mode 100644 index 00000000..6426f140 --- /dev/null +++ b/lib/osal/tmux/tmux_session.v @@ -0,0 +1,153 @@ +module tmux + +import freeflowuniverse.herolib.osal +import freeflowuniverse.herolib.core.texttools +import os +import freeflowuniverse.herolib.ui.console + +@[heap] +struct Session { +pub mut: + tmux &Tmux @[str: skip] // reference back + windows []&Window // session has windows + name string +} + +// get session (session has windows) . +// returns none if not found +pub fn (mut t Tmux) session_get(name_ string) !&Session { + name := texttools.name_fix(name_) + for s in t.sessions { + if s.name == name { + return s + } + } + return error('Can not find session with name: \'${name_}\', out of loaded sessions.') +} + +pub fn (mut t Tmux) session_exist(name_ string) bool { + name := texttools.name_fix(name_) + t.session_get(name) or { return false } + return true +} + +pub fn (mut t Tmux) session_delete(name_ string) ! { + if !(t.session_exist(name_)) { + return + } + name := texttools.name_fix(name_) + mut i := 0 + for mut s in t.sessions { + if s.name == name { + s.stop()! + break + } + i += 1 + } + t.sessions.delete(i) +} + +@[params] +pub struct SessionCreateArgs { +pub mut: + name string @[required] + reset bool +} + +// create session, if reset will re-create +pub fn (mut t Tmux) session_create(args SessionCreateArgs) !&Session { + name := texttools.name_fix(args.name) + if !(t.session_exist(name)) { + $if debug { + console.print_header(' tmux - create session: ${args}') + } + mut s2 := Session{ + tmux: t // reference back + name: name + } + s2.create()! + t.sessions << &s2 + } + mut s := t.session_get(name)! + if args.reset { + $if debug { + console.print_header(' tmux - session ${name} will be restarted.') + } + s.restart()! + } + t.scan()! + return s +} + +pub fn (mut s Session) create() ! { + res_opt := "-P -F '#\{window_id\}'" + cmd := "tmux new-session ${res_opt} -d -s ${s.name} 'sh'" + window_id_ := osal.execute_silent(cmd) or { + return error("Can't create tmux session ${s.name} \n${cmd}\n${err}") + } + + cmd3 := 'tmux set-option remain-on-exit on' + osal.execute_silent(cmd3) or { return error("Can't execute ${cmd3}\n${err}") } + + window_id := window_id_.trim(' \n') + cmd2 := "tmux rename-window -t ${window_id} 'notused'" + osal.execute_silent(cmd2) or { + return error("Can't rename window ${window_id} to notused \n${cmd2}\n${err}") + } +} + +pub fn (mut s Session) restart() ! { + s.stop()! + s.create()! +} + +pub fn (mut s Session) stop() ! { + osal.execute_silent('tmux kill-session -t ${s.name}') or { + return error("Can't delete session ${s.name} - This may happen when session is not found: ${err}") + } +} + +// get all windows as found in a session +pub fn (mut s Session) windows_get() []&Window { + mut res := []&Window{} + // os.log('TMUX - Start listing ....') + for _, window in s.windows { + res << window + } + return res +} + +pub fn (mut s Session) windownames_get() []string { + mut res := []string{} + for _, window in s.windows { + res << window.name + } + return res +} + +pub fn (mut s Session) str() string { + mut out := '## Session: ${s.name}\n\n' + for _, w in s.windows { + out += '${*w}\n' + } + return out +} + +// pub fn (mut s Session) activate()! { +// active_session := s.tmux.redis.get('tmux:active_session') or { 'No active session found' } +// if active_session != 'No active session found' && s.name != active_session { +// s.tmuxexecutor.db.exec('tmux attach-session -t $active_session') or { +// return error('Fail to attach to current active session: $active_session \n$err') +// } +// s.tmuxexecutor.db.exec('tmux switch -t $s.name') or { +// return error("Can't switch to session $s.name \n$err") +// } +// s.tmux.redis.set('tmux:active_session', s.name) or { panic('Failed to set tmux:active_session') } +// os.log('SESSION - Session: $s.name activated ') +// } else if active_session == 'No active session found' { +// s.tmux.redis.set('tmux:active_session', s.name) or { panic('Failed to set tmux:active_session') } +// os.log('SESSION - Session: $s.name activated ') +// } else { +// os.log('SESSION - Session: $s.name already activate ') +// } +// } diff --git a/lib/osal/tmux/tmux_test.v b/lib/osal/tmux/tmux_test.v new file mode 100644 index 00000000..783659f7 --- /dev/null +++ b/lib/osal/tmux/tmux_test.v @@ -0,0 +1,118 @@ +module tmux + +import freeflowuniverse.herolib.osal +// import freeflowuniverse.herolib.installers.tmux +import os +import freeflowuniverse.herolib.ui.console + +const testpath = os.dir(@FILE) + '/testdata' + +// make sure tmux isn't running prior to test +fn testsuite_begin() { + mut tmux := get_remote('185.69.166.152')! + if tmux.is_running() { + tmux.stop()! + } +} + +// make sure tmux isn't running after test +fn testsuite_end() { + mut tmux := get_remote('185.69.166.152')! + + if tmux.is_running() { + tmux.stop()! + } +} + +fn test_start() ! { + mut tmux := get_remote('185.69.166.152')! + + // test server is running after start() + tmux.start() or { panic('cannot start tmux: ${err}') } + mut tmux_ls := osal.execute_silent('tmux ls') or { panic('Cannot execute tmux ls: ${err}') } + // test started tmux contains windows + assert tmux_ls.contains('init: 1 windows') + tmux.stop() or { panic('cannot stop tmux: ${err}') } +} + +fn test_stop() ! { + mut tmux := get_remote('185.69.166.152')! + + // test server is running after start() + tmux.start() or { panic('cannot start tmux: ${err}') } + assert tmux.is_running() + tmux.stop() or { panic('cannot stop tmux: ${err}') } + assert !tmux.is_running() +} + +fn test_windows_get() ! { + mut tmux := get_remote('185.69.166.152')! + + // test windows_get when only starting window is running + tmux.start()! + mut windows := tmux.windows_get() + assert windows.len == 1 + + // test getting newly created window + tmux.window_new(WindowArgs{ name: 'testwindow' })! + windows = tmux.windows_get() + unsafe { + assert windows.keys().contains('testwindow') + } + assert windows['testwindow'].name == 'testwindow' + assert windows['testwindow'].active + tmux.stop()! +} + +// TODO: fix test +fn test_scan() ! { + console.print_debug('-----Testing scan------') + mut tmux := get_remote('185.69.166.152')! + tmux.start()! + + // check bash window is initialized + mut new_windows := tmux.windows_get() + unsafe { + assert new_windows.keys() == ['bash'] + } + // test scan, should return no windows + mut windows := tmux.windows_get() + unsafe { + assert windows.keys().len == 0 + } + // test scan with window in tmux but not in tmux struct + // mocking a failed command to see if scan identifies + tmux.sessions['init'].windows['test'] = &Window{ + session: tmux.sessions['init'] + name: 'test' + } + new_windows = tmux.windows_get() + panic('new windows ${new_windows.keys()}') + unsafe { + assert new_windows.keys().len == 1 + } + new_windows = tmux.scan()! + tmux.stop()! +} + +// //TODO: fix test +// fn test_scan_add() ! { +// console.print_debug("-----Testing scan_add------") + +// +// mut tmux := Tmux { node: node_ssh } +// windows := tmux.scan_add("line")! +// } + +// remaining tests are run synchronously to avoid conflicts +fn test_tmux_window() { + res := os.execute('${os.quoted_path(@VEXE)} test ${testpath}/tmux_window_test.v') + // assert res.exit_code == 1 + // assert res.output.contains('other_test.v does not exist') +} + +fn test_tmux_scan() { + res := os.execute('${os.quoted_path(@VEXE)} test ${testpath}/tmux_window_test.v') + // assert res.exit_code == 1 + // assert res.output.contains('other_test.v does not exist') +} diff --git a/lib/osal/tmux/tmux_window.v b/lib/osal/tmux/tmux_window.v new file mode 100644 index 00000000..3bbcb817 --- /dev/null +++ b/lib/osal/tmux/tmux_window.v @@ -0,0 +1,257 @@ +module tmux + +import os +import freeflowuniverse.herolib.osal +import freeflowuniverse.herolib.core.texttools +import freeflowuniverse.herolib.data.ourtime +import time +import freeflowuniverse.herolib.ui.console + +@[heap] +struct Window { +pub mut: + session &Session @[skip] + name string + id int + active bool + pid int + paneid int + cmd string + env map[string]string +} + +pub struct WindowArgs { +pub mut: + name string + cmd string + env map[string]string + reset bool +} + +// window_name is the name of the window in session main (will always be called session main) +// cmd to execute e.g. bash file +// environment arguments to use +// reset, if reset it will create window even if it does already exist, will destroy it +// ``` +// struct WindowArgs { +// pub mut: +// name string +// cmd string +// env map[string]string +// reset bool +// } +// ``` +pub fn (mut t Tmux) window_new(args WindowArgs) !Window { + mut s := t.session_create(name: 'main', reset: false)! + mut w := s.window_new(args)! + return w +} + +// is always in the main tmux +pub fn (mut t Tmux) window_delete(args WindowGetArgs) ! { + mut s := t.session_create(name: 'main', reset: false)! + s.window_delete(name: args.name)! +} + +// window_name is the name of the window in session main (will always be called session main) +// cmd to execute e.g. bash file +// environment arguments to use +// reset, if reset it will create window even if it does already exist, will destroy it +// ``` +// struct WindowArgs { +// pub mut: +// name string +// cmd string +// env map[string]string +// reset bool +// } +// ``` +pub fn (mut s Session) window_new(args WindowArgs) !Window { + $if debug { + console.print_header(' start window: \n${args}') + } + namel := texttools.name_fix(args.name) + if s.window_exist(name: namel) { + if args.reset { + s.window_delete(name: namel)! + } else { + return error('cannot create new window it already exists, window ${namel} in session:${s.name}') + } + } + mut w := Window{ + session: &s + name: namel + cmd: args.cmd + env: args.env + } + s.windows << &w + w.create()! + s.window_delete(name: 'notused')! + return w +} + +pub struct WindowGetArgs { +pub mut: + name string + cmd string + id int +} + +fn (mut s Session) window_exist(args_ WindowGetArgs) bool { + mut args := args_ + s.window_get(args) or { return false } + return true +} + +pub fn (mut s Session) window_get(args_ WindowGetArgs) !&Window { + mut args := args_ + args.name = texttools.name_fix(args.name) + for w in s.windows { + if w.name == args.name { + if (args.id > 0 && w.id == args.id) || args.id == 0 { + return w + } + } + } + return error('Cannot find window ${args.name} in session:${s.name}') +} + +pub fn (mut s Session) window_delete(args_ WindowGetArgs) ! { + // $if debug { console.print_debug(" - window delete: $args_")} + mut args := args_ + args.name = texttools.name_fix(args.name) + if !(s.window_exist(args)) { + return + } + mut i := 0 + for mut w in s.windows { + if w.name == args.name { + if (args.id > 0 && w.id == args.id) || args.id == 0 { + w.stop()! + break + } + } + i += 1 + } + s.windows.delete(i) // i is now the one in the list which needs to be removed +} + +pub fn (mut w Window) create() ! { + // tmux new-window -P -c /tmp -e good=1 -e bad=0 -n koekoe -t main bash + if w.cmd.contains('\n') { + // means is multiline need to write it + // scriptpath string // is the path where the script will be put which is executed + // scriptkeep bool // means we don't remove the script + os.mkdir_all('/tmp/tmux/${w.session.name}')! + cmd_new := osal.exec_string( + cmd: w.cmd + scriptpath: '/tmp/tmux/${w.session.name}/${w.name}.sh' + scriptkeep: true + )! + w.cmd = cmd_new + } + + // console.print_debug(w) + + if w.active == false { + res_opt := "-P -F '#{session_name}|#{window_name}|#{window_id}|#{pane_active}|#{pane_id}|#{pane_pid}|#{pane_start_command}'" + cmd := 'tmux new-window ${res_opt} -t ${w.session.name} -n ${w.name} \'/bin/bash -c ${w.cmd}\'' + console.print_debug(cmd) + res := osal.exec(cmd: cmd, stdout: false, name: 'tmux_window_create') or { + return error("Can't create new window ${w.name} \n${cmd}\n${err}") + } + // now look at output to get the window id = wid + line_arr := res.output.split('|') + wid := line_arr[2] or { panic('cannot split line for window create.\n${line_arr}') } + w.id = wid.replace('@', '').int() + $if debug { + console.print_header(' WINDOW - Window: ${w.name} created in session: ${w.session.name}') + } + } else { + return error('cannot create window, it already exists.\n${w.name}:${w.id}:${w.cmd}') + } +} + +// do some good checks if the window is still active +// not implemented yet +pub fn (mut w Window) check() ! { + panic('not implemented yet') +} + +// restart the window +pub fn (mut w Window) restart() ! { + w.stop()! + w.create()! +} + +// stop the window +pub fn (mut w Window) stop() ! { + osal.exec( + cmd: 'tmux kill-window -t @${w.id}' + stdout: false + name: 'tmux_kill-window' + die: false + ) or { return error("Can't kill window with id:${w.id}") } + w.pid = 0 + w.active = false +} + +pub fn (window Window) str() string { + return ' - name:${window.name} wid:${window.id} active:${window.active} pid:${window.pid} cmd:${window.cmd}' +} + +// will select the current window so with tmux a we can go there . +// to login into a session do `tmux a -s mysessionname` +fn (mut w Window) activate() ! { + cmd2 := 'tmux select-window -t %${w.id}' + osal.execute_silent(cmd2) or { + return error("Couldn't select window ${w.name} \n${cmd2}\n${err}") + } +} + +// show the environment +pub fn (mut w Window) environment_print() ! { + res := osal.execute_silent('tmux show-environment -t %${w.paneid}') or { + return error('Couldnt show enviroment cmd: ${w.cmd} \n${err}') + } + os.log(res) +} + +// capture the output +pub fn (mut w Window) output_print() ! { + o := w.output()! + console.print_debug(o) +} + +// capture the output +pub fn (mut w Window) output() !string { + //-S is start, minus means go in history, otherwise its only the active output + // tmux capture-pane -t your-session-name:your-window-number -S -1000 + cmd := 'tmux capture-pane -t ${w.session.name}:@${w.id} -S -1000 && tmux show-buffer' + res := osal.execute_silent(cmd) or { + return error('Couldnt show enviroment cmd: ${w.cmd} \n${err}') + } + return texttools.remove_empty_lines(res) +} + +pub fn (mut w Window) output_wait(c_ string, timeoutsec int) ! { + mut t := ourtime.now() + start := t.unix() + c := c_.replace('\n', '') + for i in 0 .. 2000 { + o := w.output()! + // console.print_debug(o) + $if debug { + console.print_debug(" - tmux ${w.name}: wait for: '${c}'") + } + // need to replace \n because can be wrapped because of size of pane + if o.replace('\n', '').contains(c) { + return + } + mut t2 := ourtime.now() + if t2.unix() > start + timeoutsec { + return error('timeout on output wait for tmux.\n${w} .\nwaiting for:\n${c}') + } + time.sleep(100 * time.millisecond) + } +} diff --git a/lib/osal/ufw/model.v b/lib/osal/ufw/model.v new file mode 100644 index 00000000..fb9af0d9 --- /dev/null +++ b/lib/osal/ufw/model.v @@ -0,0 +1,59 @@ +module ufw + +pub struct UFWStatus { +pub mut: + active bool + rules []Rule +} + +@[heap] +pub struct RuleSet { +pub mut: + rules []Rule + ssh bool = true // leave this on, its your backdoor to get in the system + reset bool = true +} + +pub struct Rule { +pub mut: + ipv6 bool + port int + from string = 'any' + tcp bool + udp bool + allow bool // if not then is denied +} + +@[params] +pub struct RuleArgs { +pub mut: + ipv6 bool + port int + from string = 'any' + tcp bool = true + udp bool +} + +// Allow incoming traffic to a specific port or service +pub fn (mut rs RuleSet) allow(args RuleArgs) { + rs.rules << Rule{ + port: args.port + tcp: args.tcp + udp: args.udp + allow: true + from: args.from + ipv6: args.ipv6 + } +} + +// Deny incoming traffic to a specific port or service +pub fn (mut rs RuleSet) deny(args RuleArgs) { + rs.rules << Rule{ + port: args.port + tcp: args.tcp + from: args.from + udp: args.udp + allow: false + ipv6: args.ipv6 + } +} diff --git a/lib/osal/ufw/play.v b/lib/osal/ufw/play.v new file mode 100644 index 00000000..c067d698 --- /dev/null +++ b/lib/osal/ufw/play.v @@ -0,0 +1,46 @@ +module ufw + +import freeflowuniverse.herolib.core.playbook +import freeflowuniverse.herolib.data.paramsparser + +pub fn play_ufw(mut plbook playbook.PlayBook) !RuleSet { + mut ufw_status := UFWStatus{ + active: false + rules: [] + } + + mut ruleset := RuleSet{} + + // Find all UFW-related actions + ufw_actions := plbook.find(filter: 'ufw.')! + if ufw_actions.len == 0 { + return + } + + for action in ufw_actions { + mut p := action.params + + match action.name { + 'ufw.configure' { + ufw_status.active = p.get_default_true('active')! + ruleset.ssh = p.get_default_true('ssh')! + ruleset.reset = p.get_default_true('reset')! + } + 'ufw.add_rule' { + mut rule := Rule{ + allow: p.get_default_true('allow')! + port: p.get_int('port')! + from: p.get_default('from', 'any')! + tcp: p.get_default_true('tcp')! + udp: p.get_default('udp', false)! + ipv6: p.get_default('ipv6', false)! + } + ruleset.rules << rule + } + else { + println('Unknown action: ${action.name}') + } + } + } + return ruleset +} diff --git a/lib/osal/ufw/readme.md b/lib/osal/ufw/readme.md new file mode 100644 index 00000000..375b01e0 --- /dev/null +++ b/lib/osal/ufw/readme.md @@ -0,0 +1,6 @@ + +## UFW OSAL + + +see crystallib/examples/osal/ufw.vsh for example. + diff --git a/lib/osal/ufw/ufw.v b/lib/osal/ufw/ufw.v new file mode 100644 index 00000000..56f6a832 --- /dev/null +++ b/lib/osal/ufw/ufw.v @@ -0,0 +1,91 @@ +module ufw + +import freeflowuniverse.herolib.osal +import freeflowuniverse.herolib.ui.console +import os + +pub fn reset() ! { + osal.execute_silent(' + ufw --force disable + ufw --force reset + ufw allow 22 + ufw --force enable + ')! + console.print_debug('UFW Reset') +} + +// ufw allow proto tcp to any port 80 + +pub fn apply_rule(rule_ Rule) ! { + mut rule := rule_ + mut command := 'ufw ' + + if rule.allow { + command += 'allow ' + } else { + command += 'deny ' + } + + if rule.tcp && !rule.udp { + command += 'proto tcp ' + } else if !rule.tcp && rule.udp { + command += 'proto udp ' + } + + if rule.from.trim_space() == '' { + rule.from = 'any' + } + + if rule.from != 'any' { + command += 'from ${rule.from} ' + } + + command += 'to any ' + + if rule.port == 0 { + return error('rule port cannot be 0, needs to be a port nr') + } + + command += 'port ${rule.port} ' + + result := os.execute(command) + if result.exit_code != 0 { + return error('Failed to apply rule: \n${rule}\n${command}\nError: ${result.output}') + } + console.print_debug('Rule applied: ${command}') +} + +pub fn allow_ssh() ! { + osal.execute_silent('ufw default deny incoming')! // make sure all is default denied + osal.execute_silent('ufw allow ssh')! +} + +pub fn disable() ! { + osal.execute_silent('ufw --force disable')! +} + +pub fn enable() ! { + allow_ssh()! + osal.execute_silent('ufw --force enable')! +} + +pub fn apply(ruleset RuleSet) ! { + if ruleset.reset { + reset()! + } + disable()! + console.print_debug('UFW Disabled') + for rule in ruleset.rules { + apply_rule(rule)! + } + if ruleset.ssh { + console.print_debug('SSH enable') + allow_ssh()! + } + enable()! + console.print_debug('UFW Enabled and Configured') +} + +pub fn new() RuleSet { + return RuleSet{} +} diff --git a/lib/osal/ufw/ufw_list.v b/lib/osal/ufw/ufw_list.v new file mode 100644 index 00000000..8cac8f03 --- /dev/null +++ b/lib/osal/ufw/ufw_list.v @@ -0,0 +1,94 @@ +module ufw + +import os + +pub fn ufw_status() !UFWStatus { + // Run the UFW status command + result := os.execute('sudo ufw status') + if result.exit_code != 0 { + return error('Error running UFW status: ${result.output}') + } + + // Split the output into lines + lines := result.output.split_into_lines() + + // Initialize our data structure + mut ufw_data := UFWStatus{ + rules: [] + } + mut tostart := false + // Parse the output + for line in lines { + line_trimmed := line.trim_space() + if line_trimmed.starts_with('Status:') { + status := line_trimmed.split(':')[1].trim_space() + if status.to_lower() == 'active' { + ufw_data.active = true + } + continue + } + if line_trimmed == '' { + continue + } + if line_trimmed.starts_with('--') { + tostart = true + continue + } + if tostart { + rule := parse_rule(line_trimmed)! + ufw_data.rules << rule + } + } + + return ufw_data +} + +fn parse_rule(line_ string) !Rule { + mut line := line_ + line = line.replace(' (v6)', '(v6)') + parts := line.split_any(' \t').filter(it.len > 0) + + if parts.len != 3 { + return error('error in parsing rule of ufw.\n${parts}') + } + mut to := parts[0] + mut rule := Rule{ + from: parts[parts.len - 1] + } + + if parts[1].to_lower().contains('allow') { + rule.allow = true + } + + // Check for IPv6 + if to.contains('(v6)') || rule.from.contains('(v6)') { + rule.ipv6 = true + to = to.replace('(v6)', '').trim_space() + rule.from = rule.from.replace('(v6)', '').trim_space() + } + + // Check for protocol + if to.contains('/') { + proto := to.split('/')[1] + to = to.split('/')[0] + if proto == 'tcp' { + rule.tcp = true + } + if proto == 'udp' { + rule.udp = true + } + to = to.split('/')[0].trim_space() + } else { + rule.tcp = true + rule.udp = true + } + + rule.port = to.int() + + // Convert 'Anywhere' to 'any' + if rule.from.contains('Anywhere') { + rule.from = 'any' + } + + return rule +} diff --git a/lib/osal/users.v b/lib/osal/users.v new file mode 100644 index 00000000..9f5b1bff --- /dev/null +++ b/lib/osal/users.v @@ -0,0 +1,45 @@ +module osal + +import os +import freeflowuniverse.herolib.ui.console + +@[params] +pub struct UserArgs { +pub mut: + name string @[required] +} + +pub fn user_exists(username string) bool { + res := os.execute('id ${username}') + if res.exit_code > 0 { + console.print_debug(res.exit_code.str()) + // return error("cannot execute id ... code to see if username exist") + return false + } + return true +} + +pub fn user_id_get(username string) !int { + res := os.execute('id ${username}') + if res.exit_code > 0 { + return error('cannot execute id ... code to see if username exist') + } + return res.output.all_before('(').all_after_first('=').int() +} + +// add's a user if the user does not exist yet +pub fn user_add(args UserArgs) !int { + if user_exists(args.name) { + return user_id_get(args.name)! + } + mut cmd := '' + platform_ := platform() + if platform_ == .ubuntu { + cmd = 'useradd -m ${args.name} ' + } else { + panic('Unsupported platform for user_add') + } + _ := exec(cmd: cmd, timeout: 0, stdout: false)! + + return user_id_get(args.name)! +} diff --git a/lib/osal/utils.v b/lib/osal/utils.v new file mode 100644 index 00000000..ac58cf3c --- /dev/null +++ b/lib/osal/utils.v @@ -0,0 +1,41 @@ +module osal + +import log + +__global ( + memdb shared map[string]string +) + +pub fn memdb_set(key string, val string) { + lock memdb { + memdb[key] = val + } +} + +pub fn memdb_get(key string) string { + lock memdb { + return memdb[key] or { return '' } + } + return '' +} + +pub fn memdb_exists(key string) bool { + if memdb_get(key).len > 0 { + return true + } + return false +} + +// Returns a logger object and allows you to specify via environment argument OSAL_LOG_LEVEL the debug level +pub fn get_logger() log.Log { + log_level := env_get_default('OSAL_LOG_LEVEL', 'info') + mut logger := &log.Log{} + logger.set_level(match log_level.to_lower() { + 'debug' { .debug } + 'info' { .info } + 'warn' { .warn } + 'error' { .error } + else { .info } + }) + return *logger +} diff --git a/lib/osal/zinit/readme.md b/lib/osal/zinit/readme.md new file mode 100644 index 00000000..ac6a43b7 --- /dev/null +++ b/lib/osal/zinit/readme.md @@ -0,0 +1,50 @@ +# a sal to work with zinit + +Easy reliable way how to work with processes + + +## Example + +```golang +import freeflowuniverse.herolib.osal.zinit + +fn main() { + do() or { panic(err) } +} + +fn do() ! { + mut z:=zinit.get()! + + z.destroy()! + + // name string @[required] + // cmd string @[required] + // cmd_file bool //if we wanna force to run it as a file which is given to bash -c (not just a cmd in zinit) + // cmd_stop string + // cmd_test string + // test_file bool + // after []string + // env map[string]string + // oneshot bool + p:=z.new( + name:"test" + cmd:'/bin/bash' + )! + + output:=p.log()! + println(output) + + p.check()! //will check the process is up and running + + p.stop()! + +} + +``` + +## protocol defined in + + +sal on top of https://github.com/threefoldtech/zinit/tree/master + +https://github.com/threefoldtech/zinit/blob/master/docs/protocol.md \ No newline at end of file diff --git a/lib/osal/zinit/rpc.v b/lib/osal/zinit/rpc.v new file mode 100644 index 00000000..62c599bd --- /dev/null +++ b/lib/osal/zinit/rpc.v @@ -0,0 +1,200 @@ +module zinit + +import net.unix +import json +import freeflowuniverse.herolib.ui.console + +// these need to be all private (non pub) + +pub struct Client { + socket_path string = '/var/run/zinit.sock' +} + +enum State { + ok @[json: 'ok'] + error @[json: 'error'] +} + +struct ZinitResponse { + state State + body string @[raw] +} + +@[params] +struct ZinitClientArgs { + socket_path string = '/var/run/zinit.sock' +} + +pub fn new_rpc_client(args ZinitClientArgs) Client { + return Client{ + socket_path: args.socket_path + } +} + +fn (z Client) connect() !&unix.StreamConn { + mut s := unix.connect_stream(z.socket_path)! + return s +} + +fn close(sc &unix.StreamConn) { + unix.shutdown(sc.sock.handle) +} + +// get string from the zinit socket +fn (z Client) rpc(cmd string) !string { + mut c := z.connect()! + console.print_debug("zinit rpc: '${cmd}'") + c.write_string(cmd + '\n')! + mut res := []u8{len: 5000, cap: 5000} + n := c.read(mut res)! + close(c) + return res[..n].bytestr() +} + +fn (z Client) list() !map[string]string { + response := z.rpc('list')! + decoded_response := json.decode(ZinitResponse, response)! + // println("") + // println(decoded_response) + // println("") + if decoded_response.state == .error { + $if debug { + print_backtrace() + } + return error('zinit list failed: ${decoded_response.body}') + } + return json.decode(map[string]string, decoded_response.body)! +} + +struct ServiceStatusRaw { + after map[string]string + name string + pid int + state string + target string +} + +//{"state":"ok","body":{"after":{"delay":"Success"},"name":"redis","pid":320996,"state":"Running","target":"Up"}} + +// check if the service is known +fn (z Client) isloaded(name string) bool { + // console.print_debug(" -- status rpc: '$name'") + r := z.list() or { return false } + if name !in r { + return false + } + return true +} + +fn (z Client) status(name string) !ServiceStatusRaw { + // console.print_debug(" -- status rpc: '$name'") + r := z.list()! + if name !in r { + $if debug { + print_backtrace() + } + return error("cannot ask status over rpc, service with name:'${name}' not found in rpc daemon.\nFOUND:${r}") + } + + response := z.rpc('status ${name}')! + decoded_response := json.decode(ZinitResponse, response)! + + if decoded_response.state == .error { + $if debug { + print_backtrace() + } + return error('service ${name} status failed: ${decoded_response.body}') + } + + return json.decode(ServiceStatusRaw, decoded_response.body)! +} + +fn (z Client) start(name string) ! { + response := z.rpc('start ${name}')! + decoded_response := json.decode(ZinitResponse, response)! + if decoded_response.state == .error { + $if debug { + print_backtrace() + } + return error('service ${name} start failed: ${decoded_response.body}') + } +} + +fn (z Client) stop(name string) ! { + response := z.rpc('stop ${name}')! + decoded_response := json.decode(ZinitResponse, response)! + if decoded_response.state == .error { + $if debug { + print_backtrace() + } + return error('service ${name} stop failed: ${decoded_response.body}') + } +} + +fn (z Client) forget(name string) ! { + response := z.rpc('forget ${name}')! + decoded_response := json.decode(ZinitResponse, response)! + if decoded_response.state == .error { + $if debug { + print_backtrace() + } + return error('service ${name} forget failed: ${decoded_response.body}') + } +} + +fn (z Client) monitor(name string) ! { + response := z.rpc('monitor ${name}')! + decoded_response := json.decode(ZinitResponse, response)! + if decoded_response.state == .error { + $if debug { + print_backtrace() + } + return error('service ${name} monitor failed: ${decoded_response.body}') + } +} + +fn (z Client) kill(name string, signal string) ! { + response := z.rpc('kill ${name} ${signal}')! + decoded_response := json.decode(ZinitResponse, response)! + if decoded_response.state == .error { + $if debug { + print_backtrace() + } + return error('service ${name} kill failed: ${decoded_response.body}') + } +} + +fn (z Client) shutdown() ! { + response := z.rpc('shutdown')! + decoded_response := json.decode(ZinitResponse, response)! + if decoded_response.state == .error { + $if debug { + print_backtrace() + } + return error('zinit shutdown failed: ${decoded_response.body}') + } +} + +fn (z Client) reboot() ! { + response := z.rpc('reboot')! + decoded_response := json.decode(ZinitResponse, response)! + if decoded_response.state == .error { + $if debug { + print_backtrace() + } + return error('zinit reboot failed: ${decoded_response.body}') + } +} + +fn (z Client) log(name string) !string { + response := z.rpc('log ${name}')! + decoded_response := json.decode(ZinitResponse, response)! + if decoded_response.state == .error { + $if debug { + print_backtrace() + } + return error('zinit log failed: ${decoded_response.body}') + } + + return decoded_response.body +} diff --git a/lib/osal/zinit/rpc_test.v b/lib/osal/zinit/rpc_test.v new file mode 100644 index 00000000..5de89452 --- /dev/null +++ b/lib/osal/zinit/rpc_test.v @@ -0,0 +1,60 @@ +module zinit + +import os +import time + +fn test_zinit() { + // you need to have zinit in your path to run this test + spawn os.execute('zinit -s crystallib/osal/zinit/zinit/zinit.sock init -c crystallib/osal/zinit/zinit') + time.sleep(time.second) + + client := new_rpc_client('crystallib/osal/zinit/zinit/zinit.sock') + + mut ls := client.list()! + mut want_ls := { + 'service_1': 'Running' + 'service_2': 'Running' + } + assert ls == want_ls + + mut st := client.status('service_2')! + assert st.after == { + 'service_1': 'Running' + } + assert st.name == 'service_2' + assert st.state == 'Running' + assert st.target == 'Up' + + client.stop('service_2')! + st = client.status('service_2')! + assert st.target == 'Down' + + time.sleep(time.millisecond * 10) + client.forget('service_2')! + ls = client.list()! + want_ls = { + 'service_1': 'Running' + } + assert ls == want_ls + + client.monitor('service_2')! + time.sleep(time.millisecond * 10) + st = client.status('service_2')! + assert st.after == { + 'service_1': 'Running' + } + assert st.name == 'service_2' + assert st.state == 'Running' + assert st.target == 'Up' + + client.stop('service_2')! + time.sleep(time.millisecond * 10) + client.start('service_2')! + st = client.status('service_2')! + assert st.target == 'Up' + + client.kill('service_1', 'sigterm')! + time.sleep(time.millisecond * 10) + st = client.status('service_1')! + assert st.state.contains('SIGTERM') +} diff --git a/lib/osal/zinit/zinit.v b/lib/osal/zinit/zinit.v new file mode 100644 index 00000000..0aed427b --- /dev/null +++ b/lib/osal/zinit/zinit.v @@ -0,0 +1,160 @@ +module zinit + +import freeflowuniverse.herolib.core.texttools +import freeflowuniverse.herolib.ui.console +import freeflowuniverse.herolib.core.pathlib +import freeflowuniverse.herolib.data.ourtime +import os +import json + +@[heap] +pub struct Zinit { +pub mut: + processes map[string]ZProcess + path pathlib.Path + pathcmds pathlib.Path +} + +// will delete the process if it exists while starting +pub fn (mut zinit Zinit) new(args_ ZProcessNewArgs) !ZProcess { + console.print_header(' zinit process new') + mut args := args_ + + if args.cmd.len == 0 { + $if debug { + print_backtrace() + } + return error('cmd cannot be empty for ${args} in zinit.') + } + + if zinit.exists(args.name) { + mut p := zinit.get(args.name)! + p.destroy()! + } + + mut zp := ZProcess{ + name: args.name + cmd: args.cmd + cmd_test: args.cmd_test + cmd_stop: args.cmd_stop + env: args.env.move() + after: args.after + start: args.start + restart: args.restart + oneshot: args.oneshot + workdir: args.workdir + } + + zinit.cmd_write(args.name, args.cmd, '_start', {}, args.workdir)! + zinit.cmd_write(args.name, args.cmd_test, '_test', {}, args.workdir)! + zinit.cmd_write(args.name, args.cmd_stop, '_stop', {}, args.workdir)! + + mut json_path := zinit.pathcmds.file_get_new('${args.name}.json')! + json_content := json.encode(args) + json_path.write(json_content)! + + mut pathyaml := zinit.path.file_get_new(zp.name + '.yaml')! + // console.print_debug('debug zprocess path yaml: ${pathyaml}') + pathyaml.write(zp.config_content()!)! + if zp.start { + zp.start()! + } + zinit.processes[args.name] = zp + + return zp +} + +fn (mut zinit Zinit) cmd_write(name string, cmd string, cat string, env map[string]string, workdir string) !string { + if cmd.trim_space() == '' { + return '' + } + mut zinitobj := new()! + mut pathcmd := zinitobj.pathcmds.file_get_new('${name}${cat}.sh')! + mut cmd_out := '#!/bin/bash\nset -e\n\n' + + if cat == '_start' { + cmd_out += 'echo === START ======== ${ourtime.now().str()} === \n' + } + for key, val in env { + cmd_out += '${key}=${val}\n' + } + + if workdir.trim_space() != '' { + cmd_out += 'cd ${workdir.trim_space()}\n' + } + + cmd_out += texttools.dedent(cmd) + '\n' + pathcmd.write(cmd_out)! + pathcmd.chmod(0x770)! + return '/bin/bash -c ${pathcmd.path}' +} + +pub fn (mut zinit Zinit) get(name_ string) !ZProcess { + name := texttools.name_fix(name_) + // console.print_debug(zinit) + return zinit.processes[name] or { return error("cannot find process in zinit:'${name}'") } +} + +pub fn (mut zinit Zinit) exists(name_ string) bool { + name := texttools.name_fix(name_) + if name in zinit.processes { + return true + } + return false +} + +pub fn (mut zinit Zinit) stop(name string) ! { + mut p := zinit.get(name)! + + p.stop()! +} + +pub fn (mut zinit Zinit) start(name string) ! { + mut p := zinit.get(name)! + p.start()! +} + +pub fn (mut zinit Zinit) delete(name string) ! { + mut p := zinit.get(name)! + p.destroy()! +} + +pub fn (mut self Zinit) load() ! { + cmd := 'zinit list' + mut res := os.execute(cmd) + if res.exit_code > 0 { + if res.output.contains('failed to connect') { + res = os.execute(cmd) + if res.exit_code > 0 { + $if debug { + print_backtrace() + } + return error("can't do zinit list, after start of zinit.\n${res}") + } + } else { + $if debug { + print_backtrace() + } + return error("can't do zinit list.\n${res}") + } + } + mut state := '' + for line in res.output.split_into_lines() { + if line.starts_with('---') { + state = 'ok' + continue + } + if state == 'ok' && line.contains(':') { + name := line.split(':')[0].to_lower().trim_space() + mut zp := ZProcess{ + name: name + } + zp.load()! + self.processes[name] = zp + } + } +} + +pub fn (mut self Zinit) names() []string { + return self.processes.keys() +} diff --git a/lib/osal/zinit/zinit/service_1.yaml b/lib/osal/zinit/zinit/service_1.yaml new file mode 100644 index 00000000..856129a6 --- /dev/null +++ b/lib/osal/zinit/zinit/service_1.yaml @@ -0,0 +1 @@ +exec: "sleep 1m" \ No newline at end of file diff --git a/lib/osal/zinit/zinit/service_2.yaml b/lib/osal/zinit/zinit/service_2.yaml new file mode 100644 index 00000000..aba0a181 --- /dev/null +++ b/lib/osal/zinit/zinit/service_2.yaml @@ -0,0 +1,3 @@ +exec: "sleep 1m" +after: + - service_1 \ No newline at end of file diff --git a/lib/osal/zinit/zinit_factory.v b/lib/osal/zinit/zinit_factory.v new file mode 100644 index 00000000..1d26daf8 --- /dev/null +++ b/lib/osal/zinit/zinit_factory.v @@ -0,0 +1,36 @@ +module zinit + +import freeflowuniverse.herolib.core.pathlib +import freeflowuniverse.herolib.ui.console +import freeflowuniverse.herolib.osal + +__global ( + zinit_global_manager []Zinit +) + +pub fn new() !Zinit { + if zinit_global_manager.len == 0 { + mut z := Zinit{ + path: pathlib.get_dir(path: '/etc/zinit', create: true)! + pathcmds: pathlib.get_dir(path: '/etc/zinit/cmds', create: true)! + } + zinit_global_manager << z + z.load()! + } + return zinit_global_manager[0] +} + +pub fn check() bool { + if !osal.cmd_exists('zinit') { + return false + } + // println(osal.execute_ok('zinit list')) + return osal.execute_ok('zinit list') +} + +// remove all know services to zinit +pub fn destroy() ! { + mut zinitpath := pathlib.get_dir(path: '/etc/zinit', create: true)! + zinitpath.empty()! + console.print_header(' zinit destroyed') +} diff --git a/lib/osal/zinit/zinit_stateless.v b/lib/osal/zinit/zinit_stateless.v new file mode 100644 index 00000000..3f95755e --- /dev/null +++ b/lib/osal/zinit/zinit_stateless.v @@ -0,0 +1,134 @@ +module zinit + +import freeflowuniverse.herolib.core.texttools +import freeflowuniverse.herolib.ui.console +import freeflowuniverse.herolib.core.pathlib +import freeflowuniverse.herolib.data.ourtime +import time +import json + +@[params] +pub struct ZinitConfig { + path string = '/etc/zinit' + pathcmds string = '/etc/zinit/cmds' +} + +pub struct ZinitStateless { +pub mut: + client Client + path pathlib.Path + pathcmds pathlib.Path +} + +pub fn new_stateless(z ZinitConfig) !ZinitStateless { + return ZinitStateless{ + client: new_rpc_client() + path: pathlib.get_dir(path: '/etc/zinit', create: true)! + pathcmds: pathlib.get_dir(path: '/etc/zinit/cmds', create: true)! + } +} + +// will delete the process if it exists while starting +pub fn (mut zinit ZinitStateless) new(args_ ZProcessNewArgs) !ZProcess { + console.print_header(' zinit process new') + mut args := args_ + + if args.cmd.len == 0 { + $if debug { + print_backtrace() + } + return error('cmd cannot be empty for ${args} in zinit.') + } + + if zinit.exists(args.name)! { + zinit.delete(args.name)! + } + + mut zp := ZProcess{ + name: args.name + cmd: args.cmd + cmd_test: args.cmd_test + cmd_stop: args.cmd_stop + env: args.env.move() + after: args.after + start: args.start + restart: args.restart + oneshot: args.oneshot + workdir: args.workdir + } + + zinit.cmd_write(args.name, args.cmd, '_start', {}, args.workdir)! + zinit.cmd_write(args.name, args.cmd_test, '_test', {}, args.workdir)! + zinit.cmd_write(args.name, args.cmd_stop, '_stop', {}, args.workdir)! + + mut json_path := zinit.pathcmds.file_get_new('${args.name}.json')! + json_content := json.encode(args) + json_path.write(json_content)! + + mut pathyaml := zinit.path.file_get_new(zp.name + '.yaml')! + // console.print_debug('debug zprocess path yaml: ${pathyaml}') + pathyaml.write(zp.config_content()!)! + + zinit.client.monitor(args.name)! + assert zinit.exists(args.name)! + + if args.start { + zinit.client.start(args.name)! + } + + return zp +} + +fn (mut zinit ZinitStateless) cmd_write(name string, cmd string, cat string, env map[string]string, workdir string) !string { + if cmd.trim_space() == '' { + return '' + } + mut zinitobj := new()! + mut pathcmd := zinitobj.pathcmds.file_get_new('${name}${cat}.sh')! + mut cmd_out := '#!/bin/bash\nset -e\n\n' + + if cat == '_start' { + cmd_out += 'echo === START ======== ${ourtime.now().str()} === \n' + } + for key, val in env { + cmd_out += '${key}=${val}\n' + } + + if workdir.trim_space() != '' { + cmd_out += 'cd ${workdir.trim_space()}\n' + } + + cmd_out += texttools.dedent(cmd) + '\n' + pathcmd.write(cmd_out)! + pathcmd.chmod(0x770)! + return '/bin/bash -c ${pathcmd.path}' +} + +pub fn (zinit ZinitStateless) exists(name string) !bool { + return name in zinit.client.list()! +} + +pub fn (mut zinit ZinitStateless) stop(name string) ! { + zinit.client.stop(name)! +} + +pub fn (mut zinit ZinitStateless) start(name string) ! { + zinit.client.start(name)! +} + +pub fn (mut zinit ZinitStateless) running(name string) !bool { + if !zinit.exists(name)! { + return false + } + return zinit.client.status(name)!.state == 'Running' +} + +pub fn (mut zinit ZinitStateless) delete(name string) ! { + zinit.client.stop(name)! + time.sleep(1000000) + zinit.client.forget(name)! +} + +pub fn (mut self ZinitStateless) names() ![]string { + return self.client.list()!.keys() +} diff --git a/lib/osal/zinit/zprocess.v b/lib/osal/zinit/zprocess.v new file mode 100644 index 00000000..8de6b615 --- /dev/null +++ b/lib/osal/zinit/zprocess.v @@ -0,0 +1,290 @@ +module zinit + +import os +import freeflowuniverse.herolib.osal +import freeflowuniverse.herolib.data.ourtime +import freeflowuniverse.herolib.ui.console +import time + +pub struct ZProcess { +pub: + name string = 'default' +pub mut: + cmd string // command to start + cmd_stop string // command to stop (optional) + cmd_test string // command line to test service is running + workdir string // where to execute the commands + status ZProcessStatus + pid int + after []string // list of service we depend on + env map[string]string + oneshot bool + start bool = true + restart bool = true // whether the process should be restarted on failure + description string // not used in zinit +} + +pub enum ZProcessStatus { + unknown + init + ok + killed + error + blocked + spawned +} + +pub enum StartupManagerType { + unknown + zinit + systemd + screen +} + +@[params] +pub struct ZProcessNewArgs { +pub mut: + name string @[required] + cmd string @[required] + cmd_stop string // command to stop (optional) + cmd_test string // command line to test service is running + workdir string // where to execute the commands + after []string // list of service we depend on + env map[string]string + oneshot bool + start bool = true + restart bool = true // whether the process should be restarted on failure + description string // not used in zinit + startuptype StartupManagerType +} + +pub fn (zp ZProcess) cmd() !string { + mut zinitobj := new()! + mut path := zinitobj.pathcmds.file_get_new('${zp.name}_start.sh')! + if path.exists() { + return 'bash -c \'${path.path}\'' + } else { + if zp.cmd.contains('\n') { + return error('cmd cannot have \\n and not have cmd file on disk on ${path.path}') + } + if zp.cmd == '' { + return error('cmd cannot be empty') + } + } + return '${zp.cmd}' +} + +pub fn (zp ZProcess) cmdtest() !string { + mut zinitobj := new()! + mut path := zinitobj.pathcmds.file_get_new('${zp.name}_test.sh')! + if path.exists() { + return "bash -c \"${path.path}\"" + } else { + if zp.cmd_test.contains('\n') { + return error('cmd test cannot have \\n and not have cmd file on disk on ${path.path}') + } + if zp.cmd_test == '' { + return error('cmd test cannot be empty') + } + } + return '${zp.cmd_test}' +} + +pub fn (zp ZProcess) cmdstop() !string { + mut zinitobj := new()! + mut path := zinitobj.pathcmds.file_get_new('${zp.name}_stop.sh')! + if path.exists() { + return "bash -c \"${path.path}\"" + } else { + if zp.cmd_stop.contains('\n') { + return error('cmd stop cannot have \\n and not have cmd file on disk on ${path.path}') + } + if zp.cmd_stop == '' { + return error('cmd stop cannot be empty') + } + } + return '${zp.cmd_stop}' +} + +// return the configuration as needs to be given to zinit +fn (zp ZProcess) config_content() !string { + mut out := " +exec: \"${zp.cmd()!}\" +signal: + stop: SIGKILL +log: ring +" + if zp.cmd_test.len > 0 { + out += "test: \"${zp.cmdtest()!}\"\n" + } + if zp.oneshot { + out += 'oneshot: true\n' + } + if zp.after.len > 0 { + out += 'after:\n' + for val in zp.after { + out += ' - ${val}\n' + } + } + if zp.env.len > 0 { + out += 'env:\n' + for key, val in zp.env { + out += ' ${key}: \'${val}\'\n' + } + } + return out +} + +pub fn (zp ZProcess) start() ! { + console.print_header(' start ${zp.name}') + mut client := new_rpc_client() + if !client.isloaded(zp.name) { + client.monitor(zp.name)! // means will check it out + } +} + +pub fn (mut zp ZProcess) stop() ! { + console.print_header(' stop ${zp.name}') + st := zp.status()! + + // QUESTION: removed error, since those can also be stopped + // otherwise fails to forget the zp when destroying + if st in [.unknown, .killed] { + return + } + mut client := new_rpc_client() + client.stop(zp.name)! + zp.status()! +} + +pub fn (mut zp ZProcess) destroy() ! { + console.print_header(' destroy ${zp.name}') + zp.stop()! + // return error('ssssa') + mut client := new_rpc_client() + client.forget(zp.name) or {} + mut zinit_obj := new()! + mut path1 := zinit_obj.pathcmds.file_get_new('${zp.name}_start.sh')! + mut path2 := zinit_obj.pathcmds.file_get_new('${zp.name}_stop.sh')! + mut path3 := zinit_obj.pathcmds.file_get_new('${zp.name}_test.sh')! + mut pathyaml := zinit_obj.path.file_get_new(zp.name + '.yaml')! + path1.delete()! + path2.delete()! + path3.delete()! + pathyaml.delete()! +} + +// how long to wait till the specified output shows up, timeout in sec +pub fn (mut zp ZProcess) output_wait(c_ string, timeoutsec int) ! { + zp.start()! + _ = new_rpc_client() + zp.check()! + mut t := ourtime.now() + start := t.unix() + c := c_.replace('\n', '') + for _ in 0 .. 2000 { + o := zp.log()! + console.print_debug(o) + $if debug { + console.print_debug(" - zinit ${zp.name}: wait for: '${c}'") + } + // need to replace \n because can be wrapped because of size of pane + if o.replace('\n', '').contains(c) { + return + } + mut t2 := ourtime.now() + if t2.unix() > start + timeoutsec { + return error('timeout on output wait for zinit.\n${zp.name} .\nwaiting for:\n${c}') + } + time.sleep(100 * time.millisecond) + } +} + +// check if process is running if yes return the log +pub fn (zp ZProcess) log() !string { + assert zp.name.len > 2 + cmd := 'zinit log ${zp.name} -s' + res := os.execute(cmd) + if res.exit_code > 0 { + $if debug { + print_backtrace() + } + return error('zprocesslog: could not execute ${cmd}') + } + mut out := []string{} + + for line in res.output.split_into_lines() { + if line.contains('=== START ========') { + out = []string{} + } + out << line + } + + return out.join_lines() +} + +// return status of process +//``` +// enum ZProcessStatus { +// unknown +// init +// ok +// error +// blocked +// spawned +// killed +// } +//``` +pub fn (mut zp ZProcess) status() !ZProcessStatus { + cmd := 'zinit status ${zp.name}' + r := osal.execute_silent(cmd)! + for line in r.split_into_lines() { + if line.starts_with('pid') { + zp.pid = line.split('pid:')[1].trim_space().int() + } + if line.starts_with('state') { + st := line.split('state:')[1].trim_space().to_lower() + // console.print_debug(" status string: $st") + if st.contains('sigkill') { + zp.status = .killed + } else if st.contains('error') { + zp.status = .error + } else if st.contains('spawned') { + zp.status = .error + } else if st.contains('running') { + zp.status = .ok + } else { + zp.status = .unknown + } + } + } + // mut client := new_rpc_client() + // st := client.status(zp.name) or {return .unknown} + // statusstr:=st.state.to_lower() + // if statusstr=="running"{ + // zp.status = .ok + // }else if statusstr.contains("error"){ + // zp.status = .error + // }else{ + // console.print_debug(st) + // return error("status not implemented yet") + // } + return zp.status +} + +// will check that process is running +pub fn (mut zp ZProcess) check() ! { + status := zp.status()! + if status != .ok { + return error('process is not running.\n${zp}') + } +} + +// will check that process is running +pub fn (mut zp ZProcess) isrunning() !bool { + status := zp.status()! + if status != .ok { + return false + } + return true +} diff --git a/lib/osal/zinit/zprocess_load.v b/lib/osal/zinit/zprocess_load.v new file mode 100644 index 00000000..6e3a6f5d --- /dev/null +++ b/lib/osal/zinit/zprocess_load.v @@ -0,0 +1,80 @@ +module zinit + +pub fn (mut zp ZProcess) load() ! { + zp.status()! + mut zinitobj := new()! + + if !zinitobj.path.file_exists(zp.name + '.yaml') { + $if debug { + print_backtrace() + } + mut pathyaml := zinitobj.path.file_get_new(zp.name + '.yaml')! + content := zp.config_content()! + pathyaml.write(content)! + } + + // if zinitobj.pathcmds.file_exists(zp.name) { + // // means we can load the special cmd + // mut pathcmd := zinitobj.pathcmds.file_get(zp.name)! + // zp.cmd = pathcmd.read()! + // } + // if zinitobj.pathtests.file_exists(zp.name) { + // // means we can load the special cmd + // mut pathtest := zinitobj.path.file_get(zp.name)! + // zp.test = pathtest.read()! + // } + if zinitobj.pathcmds.file_exists(zp.name) { + // means we can load the special cmd + mut pathcmd := zinitobj.pathcmds.file_get(zp.name)! + zp.cmd = pathcmd.read()! + } + + mut pathyaml := zinitobj.path.file_get_new(zp.name + '.yaml')! + contentyaml := pathyaml.read()! + + // the parsing of the file is needed to find the info which we can't get from the zinit daemon + + mut st := '' + for line in contentyaml.split_into_lines() { + if line.starts_with('exec:') && zp.cmd.len == 0 { + zp.cmd = line.split('exec:')[1].trim('\'" ') + } + if line.starts_with('test:') && zp.cmd.len == 0 { + zp.cmd_test = line.split('test:')[1].trim('\'" ') + } + if line.starts_with('after:') { + st = 'after' + continue + } + if line.starts_with('env:') { + st = 'env' + continue + } + if st == 'after' { + if line.trim_space() == '' { + st = 'start' + } else { + line.trim_space().starts_with('-') + { + _, after := line.split_once('-') or { + panic('bug in ${pathyaml} for line ${line}') + } + zp.after << after.to_lower().trim_space() + } + } + } + if st == 'env' { + if line.trim_space() == '' { + st = 'start' + } else { + line.contains('=') + { + key, val := line.split_once(':') or { + panic('bug in ${pathyaml} for line ${line} for env') + } + zp.env[key.trim(' \'"')] = val.trim(' \'"') + } + } + } + } +} diff --git a/lib/ui/console/array.v b/lib/ui/console/array.v new file mode 100644 index 00000000..dcf48178 --- /dev/null +++ b/lib/ui/console/array.v @@ -0,0 +1,51 @@ +module console + +// print 2 dimensional array, delimeter is between columns +pub fn print_array(arr [][]string, delimiter string, sort bool) { + if arr.len == 0 { + return + } + + mut maxwidth := []int{len: arr[0].len, cap: arr[0].len, init: 3} + mut x := 0 + mut y := 0 + for y < arr.len { + for x < arr[y].len { + if maxwidth[x] < arr[y][x].len { + maxwidth[x] = arr[y][x].len + } + x++ + } + x = 0 + y++ + } + + x = 0 + y = 0 + mut res := []string{} + for y < arr.len { + mut row := '' + for x < arr[y].len { + row += expand(arr[y][x], maxwidth[x], ' ') + delimiter + x++ + } + res << row + x = 0 + y++ + } + if sort { + res.sort_ignore_case() + } + // console.print_debug(res) + print_stdout(res.join_lines()) +} + +// expand text till length l, with string which is normally ' ' +pub fn expand(txt_ string, l int, with string) string { + mut txt := txt_ + if l > txt.len { + extra := l - txt.len + txt += with.repeat(extra) + } + return txt +} diff --git a/lib/ui/console/chalk.v b/lib/ui/console/chalk.v new file mode 100644 index 00000000..d0b18337 --- /dev/null +++ b/lib/ui/console/chalk.v @@ -0,0 +1,234 @@ +module console + +// ORIGINAL CODE COMES FROM https://github.com/etienne-napoleone/chalk/blob/master/chalk.v +// CREDITS TO https://github.com/etienne-napoleone (MIT license) + +const prefix = '\e[' +const suffix = 'm' + +pub enum ForegroundColor { + default_color = 39 // 'default' is a reserved keyword in V + white = 97 + black = 30 + red = 31 + green = 32 + yellow = 33 + blue = 34 + magenta = 35 + cyan = 36 + light_gray = 37 + dark_gray = 90 + light_red = 91 + light_green = 92 + light_yellow = 93 + light_blue = 94 + light_magenta = 95 + light_cyan = 96 +} + +pub enum BackgroundColor { + default_color = 49 // 'default' is a reserved keyword in V + black = 40 + red = 41 + green = 42 + yellow = 43 + blue = 44 + magenta = 45 + cyan = 46 + light_gray = 47 + dark_gray = 100 + light_red = 101 + light_green = 102 + light_yellow = 103 + light_blue = 104 + light_magenta = 105 + light_cyan = 106 + white = 107 +} + +pub enum Style { + normal = 99 + bold = 1 + dim = 2 + underline = 4 + blink = 5 + reverse = 7 + hidden = 8 +} + +pub const reset = '${prefix}0${suffix}' + +// will give ansi codes to change foreground color . +// don't forget to call reset to change back to normal +//``` +// enum ForegroundColor { +// black = 30 +// red = 31 +// green = 32 +// yellow = 33 +// blue = 34 +// magenta = 35 +// cyan = 36 +// default_color = 39 // 'default' is a reserved keyword in V +// light_gray = 37 +// dark_gray = 90 +// light_red = 91 +// light_green = 92 +// light_yellow = 93 +// light_blue = 94 +// light_magenta = 95 +// light_cyan = 96 +// white = 97 +// } +// ``` +pub fn color_fg(c ForegroundColor) string { + return '${prefix}${int(c)}${suffix}' +} + +// will give ansi codes to change background color . +// don't forget to call reset to change back to normal +//``` +// enum BackgroundColor { +// black = 40 +// red = 41 +// green = 42 +// yellow = 43 +// blue = 44 +// magenta = 45 +// cyan = 46 +// default_color = 49 // 'default' is a reserved keyword in V +// light_gray = 47 +// dark_gray = 100 +// light_red = 101 +// light_green = 102 +// light_yellow = 103 +// light_blue = 104 +// light_magenta = 105 +// light_cyan = 106 +// white = 107 +// } +// ``` +pub fn color_bg(c BackgroundColor) string { + return '${prefix}${int(c)}${suffix}' +} + +// will give ansi codes to change style . +// don't forget to call reset to change back to normal +//``` +// enum Style { +// normal = 99 +// bold = 1 +// dim = 2 +// underline = 4 +// blink = 5 +// reverse = 7 +// hidden = 8 +// } +// ``` +pub fn style(c Style) string { + return '${prefix}${int(c)}${suffix}' +} + +pub fn reset() string { + return reset +} + +pub struct PrintArgs { +pub mut: + foreground ForegroundColor + background BackgroundColor + text string + style Style + reset_before bool = true + reset_after bool = true +} + +// print with colors, reset... +//``` +// foreground ForegroundColor +// background BackgroundColor +// text string +// style Style +// reset_before bool = true +// reset_after bool = true +//``` +pub fn cprint(args PrintArgs) { + mut out := []string{} + if args.reset_before { + out << reset() + } + if args.foreground != .default_color { + out << color_fg(args.foreground) + } + if args.background != .default_color { + out << color_bg(args.background) + } + if args.style != .normal { + out << style(args.style) + } + if args.text.len > 0 { + out << args.text + } + if args.reset_after { + out << reset() + } + if !silent_get() { + print(out.join('')) + } +} + +pub fn cprintln(args_ PrintArgs) { + mut args := args_ + args.text = trim(args.text) + if !(args.text.ends_with('\n')) { + args.text += '\n' + } + cprint(args) +} + +// const foreground_colors = { +// 'black': 30 +// 'red': 31 +// 'green': 32 +// 'yellow': 33 +// 'blue': 34 +// 'magenta': 35 +// 'cyan': 36 +// 'default': 39 +// 'light_gray': 37 +// 'dark_gray': 90 +// 'light_red': 91 +// 'light_green': 92 +// 'light_yellow': 93 +// 'light_blue': 94 +// 'light_magenta': 95 +// 'light_cyan': 96 +// 'white': 97 +// } +// const background_colors = { +// 'black': 40 +// 'red': 41 +// 'green': 42 +// 'yellow': 44 +// 'blue': 44 +// 'magenta': 45 +// 'cyan': 46 +// 'default': 49 +// 'light_gray': 47 +// 'dark_gray': 100 +// 'light_red': 101 +// 'light_green': 102 +// 'light_yellow': 103 +// 'light_blue': 104 +// 'light_magenta': 105 +// 'light_cyan': 106 +// 'white': 107 +// } +// const style = { +// 'bold': 1 +// 'dim': 2 +// 'underline': 4 +// 'blink': 5 +// 'reverse': 7 +// 'hidden': 8 +// } diff --git a/lib/ui/console/console.v b/lib/ui/console/console.v new file mode 100644 index 00000000..2593a48b --- /dev/null +++ b/lib/ui/console/console.v @@ -0,0 +1,103 @@ +module console + +import freeflowuniverse.herolib.core.texttools + +pub fn clear() { + if !silent_get() { + print('\033[2J') + } +} + +pub fn print_header(txt string) { + txt2 := trim(texttools.indent(txt.trim_left(' -'), ' - ')) + mut c := get() + c.reset() + if !c.prev_title { + lf() + } + cprintln(foreground: .light_yellow, text: txt2) + c.prev_title = true +} + +pub fn print_item(txt string) { + mut c := get() + if c.prev_title { + lf() + } + c.prev_item = true + txt2 := trim(texttools.indent(txt, ' . ')) + cprintln(foreground: .light_green, text: txt2) + c.reset() +} + +pub interface IPrintable {} + +pub fn print_debug(i IPrintable) { + $if debug { + // to print anything + txt := '${i}'.trim_string_left("console.IPrintable('").trim_string_right("')") + mut c := get() + if c.prev_title || c.prev_item { + lf() + } + txt2 := trim(texttools.indent(txt, ' ')) + cprintln(foreground: .light_gray, text: txt2) + c.reset() + } +} + +pub fn print_debug_title(title string, txt string) { + $if debug { + print_header(title) + lf() + mut c := get() + if c.prev_title || c.prev_item { + lf() + } + txt2 := trim(texttools.indent(txt, ' ')) + cprintln(foreground: .light_gray, text: txt2) + c.reset() + lf() + } +} + +pub fn print_stdout(txt string) { + mut c := get() + c.status() + if c.prev_title || c.prev_item { + lf() + } + txt2 := trim(texttools.indent(txt, ' ')) + cprintln(foreground: .light_blue, text: txt2) + // print_backtrace() + c.reset() +} + +pub fn print_lf(nr int) { + for _ in 0 .. nr { + cprintln(text: '') + } +} + +pub fn print_stderr(txt string) { + mut c := get() + if c.prev_title || c.prev_item { + lf() + } + txt2 := trim(texttools.indent(txt, ' ')) + cprintln(foreground: .red, text: txt2) + c.reset() +} + +pub fn print_green(txt string) { + mut c := get() + if c.prev_title || c.prev_item { + lf() + } + txt2 := trim(texttools.indent(txt, ' ')) + cprintln(foreground: .green, text: txt2) + c.reset() +} + +// import freeflowuniverse.herolib.ui.console +// console.print_header() diff --git a/lib/ui/console/dropdown.v b/lib/ui/console/dropdown.v new file mode 100644 index 00000000..ac2e42f7 --- /dev/null +++ b/lib/ui/console/dropdown.v @@ -0,0 +1,180 @@ +module console + +// import freeflowuniverse.herolib.core.texttools +// import freeflowuniverse.herolib.ui.console { color_fg } +import freeflowuniverse.herolib.ui.uimodel { DropDownArgs } +import os + +fn (mut c UIConsole) ask_dropdown_internal(args DropDownArgs) !string { + if silent_get() { + panic("can't do ask_... when in silent mode") + } + if args.clear { + clear() // clears the screen + } + if args.description.len > 0 { + cprintln(style: .bold, text: args.description) + } + if args.warning.len > 0 { + cprintln(foreground: .red, text: args.warning + '\n') + } + print_debug('\nChoices: ${args.choice_message}\n') + mut items2 := args.items.clone() + items2.sort() + mut nr := 0 + for item in items2 { + nr += 1 + print_header(' ${nr} : ${item}') + } + if args.all { + print_header(' all : *') + } + if args.default.len > 0 { + print_debug('\n - default : ${args.default.join(',')} (press enter to select default)') + } + print_debug('') + print_debug(' - Make your choice:') + choice := os.get_raw_line().trim(' \n') + if choice.trim_space() == '*' { + // means we return all + return '999999' + } + if choice.trim_space() == '' && args.default.len > 0 { + return '999998' + } + return choice +} + +// return the dropdown as an int +// description string +// items []string +// warning string +// clear bool = true +pub fn (mut c UIConsole) ask_dropdown_int(args_ DropDownArgs) !int { + if silent_get() { + panic("can't do ask_... when in silent mode") + } + mut args := args_ + args.items.sort() + choice := c.ask_dropdown_internal(args)! + + nr := args.items.len + + if choice.contains(',') { + return c.ask_dropdown_int( + clear: true + description: args.description + all: args.all + items: args.items + default: args.default + warning: 'Choice needs to be a number larger than 0 and smaller than ${nr + 1}, and only 1 return' + )! + } + + choice_int := choice.int() + + if choice_int == 999999 { + return 1 + } else if choice_int == 999998 { + default := args.default[0] or { return 1 } + return args.items.index(default) + 1 + } + + if choice_int < 1 || choice_int > nr { + return c.ask_dropdown_int( + clear: true + description: args.description + all: args.all + items: args.items + default: args.default + warning: 'Choice needs to be a number larger than 0 and smaller than ${nr + 1}' + )! + } + return choice_int +} + +// result can be multiple, aloso can select all +// description string +// items []string +// warning string +// clear bool = true +pub fn (mut c UIConsole) ask_dropdown_multiple(args_ DropDownArgs) ![]string { + if silent_get() { + panic("can't do ask_... when in silent mode") + } + mut args := args_ + args.items.sort() + res := c.ask_dropdown_internal( + clear: args.clear + description: args.description + all: args.all + items: args.items + default: args.default + warning: args.warning + choice_message: '(multiple is possible)' + )! + if res == '999999' { + return args.items + } else if res == '999998' { + return args.default + } + + // check valid input + mut bad := false + nr := args.items.len + for item in res.split(',') { + if item.trim_space().len > 0 { + choice_int := item.int() + if choice_int < 1 || choice_int > nr { + bad = true + } + } + } + + if bad { + return c.ask_dropdown_multiple( + clear: true + description: args.description + all: args.all + items: args.items + default: args.default + warning: 'Choice needs to be a number larger than 0 and smaller than ${nr + 1}' + )! + } + + mut res2 := []string{} + for item in res.split(',') { + if item.trim_space().len > 0 { + i := item.int() + res2 << args.items[i - 1] or { panic('bug') } + } + } + return res2 +} + +// will return the string as given as response +// description string +// items []string +// warning string +// clear bool = true +pub fn (mut c UIConsole) ask_dropdown(args DropDownArgs) !string { + if silent_get() { + panic("can't do ask_... when in silent mode") + } + res := c.ask_dropdown_int( + clear: args.clear + description: args.description + all: args.all + items: args.items + default: args.default + warning: '' + )! + if res == 999998 { + if args.default.len > 1 { + return error('more than 1 default for single choice.\n${args}') + } + print(args) + return args.default[0] or { panic('bug in default args for ask_dropdown_string.\n') } + } + return args.items[res - 1] +} diff --git a/lib/ui/console/factory.v b/lib/ui/console/factory.v new file mode 100644 index 00000000..29d7e40c --- /dev/null +++ b/lib/ui/console/factory.v @@ -0,0 +1,79 @@ +module console + +import freeflowuniverse.herolib.core.texttools + +__global ( + consoles map[string]&UIConsole + silent bool +) + +pub fn silent_set() { + silent = true +} + +pub fn silent_unset() { + silent = false +} + +pub fn silent_get() bool { + return silent +} + +pub struct UIConsole { +pub mut: + x_max int = 80 + y_max int = 60 + prev_lf bool + prev_title bool + prev_item bool +} + +pub fn (mut c UIConsole) reset() { + c.prev_lf = false + c.prev_title = false + c.prev_item = false +} + +pub fn (mut c UIConsole) status() string { + mut out := 'status: ' + if c.prev_lf { + out += 'L ' + } + if c.prev_title { + out += 'T ' + } + if c.prev_item { + out += 'I ' + } + return out.trim_space() +} + +pub fn new() UIConsole { + return UIConsole{} +} + +fn init() { + mut c := UIConsole{} + consoles['main'] = &c +} + +fn get() &UIConsole { + return consoles['main'] or { panic('bug') } +} + +pub fn trim(c_ string) string { + c := texttools.remove_double_lines(c_) + return c +} + +// line feed +pub fn lf() { + mut c := get() + if c.prev_lf { + return + } + if !silent_get() { + print('\n') + } + c.prev_lf = true +} diff --git a/lib/ui/console/question.v b/lib/ui/console/question.v new file mode 100644 index 00000000..fce8204e --- /dev/null +++ b/lib/ui/console/question.v @@ -0,0 +1,53 @@ +module console + +import os +import freeflowuniverse.herolib.ui.uimodel { QuestionArgs } +// import freeflowuniverse.herolib.ui.console { color_fg } + +// args: +// - description string +// - question string +// - warning: string (if it goes wrong, which message to use) +// - reset bool = true +// - regex: to check what result need to be part of +// - minlen: min nr of chars +// +pub fn (mut c UIConsole) ask_question(args QuestionArgs) !string { + if silent_get() { + panic("can't do ask_... when in silent mode") + } + mut question := args.question + if args.clear { + clear() // clears the screen + } + if args.description.len > 0 { + cprintln(text: args.description) + } + if args.warning.len > 0 { + cprintln(foreground: .red, text: args.warning + '\n') + } + if question == '' { + question = 'Please provide answer' + } + if args.default.len > 0 { + question += ' (${args.default}) ' + } + print_debug('${question}: ') + choice := os.get_raw_line().trim(' \n') + if choice.trim_space() == '' { + return args.default + } + + if args.regex.len > 0 { + panic('need to implement regex check') + } + if args.minlen > 0 && choice.len < args.minlen { + return c.ask_question( + reset: args.reset + description: args.description + warning: 'Min lenght of answer is: ${args.minlen}' + question: args.question + ) + } + return choice +} diff --git a/lib/ui/console/readme.md b/lib/ui/console/readme.md new file mode 100644 index 00000000..09bf9da4 --- /dev/null +++ b/lib/ui/console/readme.md @@ -0,0 +1,48 @@ + +## Chalk + +A terminal string colorizer for the [V language](https://vlang.io). + +Chalk offers functions: +- `console.color_fg(text string, color string)` - To change the foreground color. +- `console.color_bg(text string, color string)` - To change the background color. +- `console.style(text string, style string)` - To change the text style. + +Example: + +```v +import console + +# basic usage +println('I am really ' + console.color_fg('happy', 'green')) + +# you can also nest them +println('I am really ' + console.color_fg(console.style('ANGRY', 'bold'), 'red')) +``` + +Available colors: +- black +- red +- green +- yellow +- blue +- magenta +- cyan +- default +- light_gray +- dark_gray +- light_red +- light_green +- light_yellow +- light_blue +- light_magenta +- light_cyan +- white + +Available styles: +- bold +- dim +- underline +- blink +- reverse +- hidden \ No newline at end of file diff --git a/lib/ui/console/time_date.v b/lib/ui/console/time_date.v new file mode 100644 index 00000000..c6f87b39 --- /dev/null +++ b/lib/ui/console/time_date.v @@ -0,0 +1,11 @@ +module console + +import freeflowuniverse.herolib.ui.uimodel { QuestionArgs } + +pub fn (mut c UIConsole) ask_date(args QuestionArgs) !string { + panic('implement') +} + +pub fn (mut c UIConsole) ask_time(args QuestionArgs) !string { + panic('implement') +} diff --git a/lib/ui/console/yesno.v b/lib/ui/console/yesno.v new file mode 100644 index 00000000..1412636f --- /dev/null +++ b/lib/ui/console/yesno.v @@ -0,0 +1,51 @@ +module console + +import freeflowuniverse.herolib.ui.uimodel { YesNoArgs } +// import freeflowuniverse.herolib.ui.console { color_fg } +import os + +// yes is true, no is false +// args: +// - description string +// - question string +// - warning string +// - clear bool = true +// +pub fn (mut c UIConsole) ask_yesno(args YesNoArgs) !bool { + if silent_get() { + panic("can't do ask_... when in silent mode") + } + mut question := args.question + if args.clear { + clear() // clears the screen + } + if args.description.len > 0 { + cprintln(text: args.description) + } + if args.warning.len > 0 { + cprintln(foreground: .red, text: args.warning + '\n') + } + if question == '' { + question = 'Yes or No, default is Yes.' + } + print_debug('${question} (y/n) : ') + choice := os.get_raw_line().trim(' \n').to_lower() + if choice.starts_with('y') { + return true + } + if choice.starts_with('1') { + return true + } + if choice.starts_with('n') { + return false + } + if choice.starts_with('0') { + return false + } + return c.ask_yesno( + description: args.description + question: args.question + warning: "Please choose 'y' or 'n', then enter." + reset: true + ) +} diff --git a/lib/ui/factory.v b/lib/ui/factory.v new file mode 100644 index 00000000..cec9fa36 --- /dev/null +++ b/lib/ui/factory.v @@ -0,0 +1,27 @@ +module ui + +import freeflowuniverse.herolib.ui.generic { ChannelType, UserInterface } +import freeflowuniverse.herolib.ui.console +import freeflowuniverse.herolib.data.paramsparser +// import freeflowuniverse.herolib.ui.telegram + +@[params] +pub struct UserInterfaceArgs { +pub mut: + channel ChannelType + params paramsparser.Params // to pass arguments to implementation +} + +pub fn new(args UserInterfaceArgs) !UserInterface { + mut ch := match args.channel { + .console { console.new() } + else { panic("can't find channel") } + } + + // .telegram { telegram.new() } + + return UserInterface{ + channel: ch + } + // return error("Channel type not understood, only console supported now.") // input is necessarily valid +} diff --git a/lib/ui/generic/dropdown.v b/lib/ui/generic/dropdown.v new file mode 100644 index 00000000..aafc92c2 --- /dev/null +++ b/lib/ui/generic/dropdown.v @@ -0,0 +1,49 @@ +module generic + +import freeflowuniverse.herolib.ui.console { UIConsole } +// import freeflowuniverse.herolib.ui.telegram { UITelegram } +import freeflowuniverse.herolib.ui.uimodel { DropDownArgs } + +// return the dropdown as an int +// description string +// items []string +// warning string +// clear bool = true +pub fn (mut c UserInterface) ask_dropdown(args DropDownArgs) !string { + match mut c.channel { + UIConsole { return c.channel.ask_dropdown(args)! } + else { panic("can't find channel") } + } +} + +// result can be multiple, aloso can select all +// description string +// items []string +// warning string +// clear bool = true +pub fn (mut c UserInterface) ask_dropdown_multiple(args DropDownArgs) ![]string { + match mut c.channel { + UIConsole { + return c.channel.ask_dropdown_multiple(args)! + } + else { + panic("can't find channel") + } + } +} + +// will return the string as given as response +// description string +// items []string +// warning string +// clear bool = true +pub fn (mut c UserInterface) ask_dropdown_int(args DropDownArgs) !int { + match mut c.channel { + UIConsole { + return c.channel.ask_dropdown_int(args)! + } + else { + panic("can't find channel") + } + } +} diff --git a/lib/ui/generic/editor.v b/lib/ui/generic/editor.v new file mode 100644 index 00000000..b5db6d6c --- /dev/null +++ b/lib/ui/generic/editor.v @@ -0,0 +1,26 @@ +module generic + +// import freeflowuniverse.herolib.ui.console +// import freeflowuniverse.herolib.ui.telegram { UITelegram } +import freeflowuniverse.herolib.ui.uimodel + +// open editor which can be used to edit content +// (not every UI has all capability, in case of console open vscode if installed) . +// . +// ``` +// args: +// content string //in specified format +// cat ... +// enum InfoCat { +// content string //in specified format +// cat EditorCat +// } +// ``` +// returns the editted content, idea is that formatting is used in editor +pub fn (mut c UserInterface) edit(args uimodel.EditArgs) !string { + // match mut c.channel { + // UIConsole { return c.channel.editor(args)! } + // else { panic("can't find channel") } + // } + return '' +} diff --git a/lib/ui/generic/info.v b/lib/ui/generic/info.v new file mode 100644 index 00000000..9ab1e168 --- /dev/null +++ b/lib/ui/generic/info.v @@ -0,0 +1,37 @@ +module generic + +// import freeflowuniverse.herolib.ui.console +// import freeflowuniverse.herolib.ui.telegram { UITelegram } +import freeflowuniverse.herolib.ui.uimodel + +// send info to the main pannel . +// (not every UI has all capability e.g. html) +// +// ``` +// args: +// content string //in specified format +// clear bool //means screen is reset for content above +// lf_before int //line feed before content +// lf_after int +// cat InfoCat +// components []ComponentCat +// enum InfoCat { +// txt +// html +// markdown +// } +// MORE THAN ONE COMPONENT CAN BE ADDED TO INFO +// enum ComponentCat { +// bootstrap +// htmx +// bulma +// } +// ``` +// supports images, and other html elements +// suggest to support htmx and limited js (how can we limit this) +pub fn (mut c UserInterface) info(args uimodel.InfoArgs) ! { + // match mut c.channel { + // UIConsole { return c.channel.info(args)! } + // else { panic("can't find channel") } + // } +} diff --git a/lib/ui/generic/log.v b/lib/ui/generic/log.v new file mode 100644 index 00000000..30b2274d --- /dev/null +++ b/lib/ui/generic/log.v @@ -0,0 +1,30 @@ +module generic + +// import freeflowuniverse.herolib.ui.console +// import freeflowuniverse.herolib.ui.telegram { UITelegram } +import freeflowuniverse.herolib.ui.uimodel + +// log content to the log panel (not every UI has this capability) +// ``` +// args: +// content string +// clear bool //means screen is reset for content above +// lf_before int //line feed before content +// lf_after int +// cat LogCat +// defines colors as used in the representation layer +// enum LogCat { +// info +// log +// warning +// header +// debug +// error +// } +// ``` +pub fn (mut c UserInterface) log(args uimodel.LogArgs) ! { + // match mut c.channel { + // UIConsole { return c.channel.log(args)! } + // else { panic("can't find channel") } + // } +} diff --git a/lib/ui/generic/model.v b/lib/ui/generic/model.v new file mode 100644 index 00000000..6da0d57c --- /dev/null +++ b/lib/ui/generic/model.v @@ -0,0 +1,19 @@ +module generic + +import freeflowuniverse.herolib.ui.console { UIConsole } +import freeflowuniverse.herolib.ui.template { UIExample } +// import freeflowuniverse.herolib.ui.telegram { UITelegram } + +// need to do this for each type of UI channel e.g. console, telegram, ... +type UIChannel = UIConsole | UIExample // TODO TelegramBot + +pub struct UserInterface { +pub mut: + channel UIChannel + user_id string +} + +pub enum ChannelType { + console + telegram +} diff --git a/lib/ui/generic/payment.v b/lib/ui/generic/payment.v new file mode 100644 index 00000000..66655d2c --- /dev/null +++ b/lib/ui/generic/payment.v @@ -0,0 +1,18 @@ +module generic + +// import freeflowuniverse.herolib.ui.console +// import freeflowuniverse.herolib.ui.telegram { UITelegram } +import freeflowuniverse.herolib.ui.uimodel + +// ... +// ``` +// args: +// TODO +// } +// ``` +pub fn (mut c UserInterface) pay(args uimodel.PayArgs) ! { + // match mut c.channel { + // UIConsole { return c.channel.editor(args)! } + // else { panic("can't find channel") } + // } +} diff --git a/lib/ui/generic/question.v b/lib/ui/generic/question.v new file mode 100644 index 00000000..7bd5097a --- /dev/null +++ b/lib/ui/generic/question.v @@ -0,0 +1,22 @@ +module generic + +import freeflowuniverse.herolib.ui.console { UIConsole } +// import freeflowuniverse.herolib.ui.telegram { UITelegram } +import freeflowuniverse.herolib.ui.uimodel { QuestionArgs } + +// args: +// +// - description string +// - question string +// - warning: string (if it goes wrong, which message to use) +// - reset bool = true +// - regex: to check what result need to be part of +// - minlen: min nr of chars +// +pub fn (mut c UserInterface) ask_question(args QuestionArgs) !string { + match mut c.channel { + UIConsole { return c.channel.ask_question(args)! } + // UITelegram { return c.ask_question(args) } + else { panic("can't find channel") } + } +} diff --git a/lib/ui/generic/yesno.v b/lib/ui/generic/yesno.v new file mode 100644 index 00000000..39f3983a --- /dev/null +++ b/lib/ui/generic/yesno.v @@ -0,0 +1,19 @@ +module generic + +import freeflowuniverse.herolib.ui.console { UIConsole } +// import freeflowuniverse.herolib.ui.telegram { UITelegram } +import freeflowuniverse.herolib.ui.uimodel + +// yes is true, no is false +// args: +// - description string +// - question string +// - warning string +// - clear bool = true +// +pub fn (mut c UserInterface) ask_yesno(args uimodel.YesNoArgs) !bool { + match mut c.channel { + UIConsole { return c.channel.ask_yesno(args)! } + else { panic("can't find channel") } + } +} diff --git a/lib/ui/logger/logger.v b/lib/ui/logger/logger.v new file mode 100644 index 00000000..b4a49512 --- /dev/null +++ b/lib/ui/logger/logger.v @@ -0,0 +1,52 @@ +module console + +import time +import freeflowuniverse.herolib.ui.console + +pub enum LogLevel { + error + warning + info + debug +} + +pub struct Logger { +pub mut: + level LogLevel +} + +fn (log Logger) output(msg string, t LogLevel) { + if int(log.level) >= int(t) { + console.print_debug(msg) + } +} + +pub fn (log Logger) info(msg string) { + log.output('${time.now()} | ' + 'INFO' + '\t| ' + msg, .info) +} + +@[if debug] +pub fn (log Logger) debug(msg string) { + log.output('${time.now()} | ' + color_fg('DEBUG', 'blue') + '\t| ' + color_fg(msg, 'light_blue'), + .debug) +} + +pub fn (log Logger) warning(msg string) { + log.output('${time.now()} | ' + color_fg('WARNING', 'yellow') + '\t| ' + + color_fg(msg, 'light_yellow'), .warning) +} + +pub fn (log Logger) success(msg string) { + console.print_debug('${time.now()} | ' + color_fg('SUCCESS', 'green') + '\t| ' + + color_fg(msg, 'light_green')) +} + +pub fn (log Logger) error(msg string) { + print_debug('${time.now()} | ' + style(color_fg('ERROR', 'red'), 'bold') + '\t| ' + + color_fg(msg, 'light_red')) +} + +pub fn (log Logger) critical(msg string) { + print_debug('${time.now()} | ' + style(color_bg('CRITICAL', 'red'), 'bold') + '\t| ' + + style(color_bg(msg, 'light_red'), 'bold')) +} diff --git a/lib/ui/readme.md b/lib/ui/readme.md new file mode 100644 index 00000000..69ce31e0 --- /dev/null +++ b/lib/ui/readme.md @@ -0,0 +1,109 @@ +# User Interface + +```v +import freeflowuniverse.herolib.ui +import freeflowuniverse.herolib.ui.console + +//today channeltype is not used, only console supported +mut myui:=ui.new()! + +console.clear() + +//ask_question(args QuestionArgs) string +myurl:=myui.ask_question(question:"what is the url to your repo")! + + +//..ask_dropdown(args DropDownArgs) int +mycolor:=myui.ask_dropdown(question:"what is the color you like",items:["red","green])! + +//..ask_dropdown_multiple(args DropDownArgs) []string +mycolors:=myui.ask_dropdown_multiple(question:"what is the colors you like",items:["red","green"])! + +// will return the string as given as response +//..ask_dropdown_string(args DropDownArgs) string + + +//..ask_yesno(args YesNoArgs) bool +ok2delete:=myui.ask_yesno(question:"are you sure?")! + + +// print with colors, reset... +//``` +// foreground ForegroundColor +// background BackgroundColor +// text string +// style Style +// reset_before bool = true +// reset_after bool = true +//``` +console.cprint(foreground=.yellow,style=.bold,text:"this is my happy text") + + +``` + +## parameters + +see uimodel + +## base capabilities + +can be seen in section ```generic``` + +not all features are possible for all UI implementations, sometimes a redirect to a webserver might be needed e.g. edit a document when using telegram, probably means we need to send a link to a server where the editor is a javascript editor and then the result get saved at the backend. + + +## console colors + +```v + +enum ForegroundColor { + default_color = 39 + white = 97 + black = 30 + red = 31 + green = 32 + yellow = 33 + blue = 34 + magenta = 35 + cyan = 36 + light_gray = 37 + dark_gray = 90 + light_red = 91 + light_green = 92 + light_yellow = 93 + light_blue = 94 + light_magenta = 95 + light_cyan = 96 +} + +enum BackgroundColor { + default_color = 49 + black = 40 + red = 41 + green = 42 + yellow = 43 + blue = 44 + magenta = 45 + cyan = 46 + light_gray = 47 + dark_gray = 100 + light_red = 101 + light_green = 102 + light_yellow = 103 + light_blue = 104 + light_magenta = 105 + light_cyan = 106 + white = 107 +} + +enum Style { + normal = 99 + bold = 1 + dim = 2 + underline = 4 + blink = 5 + reverse = 7 + hidden = 8 +} + +``` \ No newline at end of file diff --git a/lib/ui/telegram/README.md b/lib/ui/telegram/README.md new file mode 100644 index 00000000..f0b3e3c1 --- /dev/null +++ b/lib/ui/telegram/README.md @@ -0,0 +1,55 @@ +spawn: + +- processor +- telegramclient +- flowsupervisor + +guest -> tbot -> tclient(through telegram) +tclient -> ui.telegram.forward +ui.telegram.forward -> flow_supervisor +if chat_id initializes new flow + flow_supervisor creates newflow + flow_supervisor -> newflow +else if chat_id in existing flow + flow_supervisor -> existingflow + +flow parses message and creates response +flow -> ui.telegram.out +ui.telegram.out -> tclient -> tbot -> guest + +Questions: +- is flow_supervisor telegram specific? + - if not then shouldn't the telegram client parse the update into a standard format ie action, params + +Flow Supervisor: +- receives messages requesting new flows +- forwards messages into existing flows + +Entities: +- TelegramClient + - receives messages from users and sends them to FlowSupervisor + - receives messages from UIChannels and send them to users +- Processor + - acts as messaging bus +- FlowSupervisor - agnostic + - receives messages from TelegramClient and creates flows +- Flows - agnostic + - receives messages from FlowSupervisor and +- UIChannels +- Questions + + +# New Plan + +struct TelegramClient { + +} + +Contents +- 1 processor +- 1 actionrunner +- 1 telegram client +- 1 actor + - many flows + - many questions + - flow supervisor \ No newline at end of file diff --git a/lib/ui/telegram/channel.v b/lib/ui/telegram/channel.v new file mode 100644 index 00000000..89a403ea --- /dev/null +++ b/lib/ui/telegram/channel.v @@ -0,0 +1,52 @@ +module telegram + +import dariotarantini.vgram +// import freeflowuniverse.herolib.baobab.client +import freeflowuniverse.herolib.data.paramsparser + +pub struct UITelegram { +pub mut: + // baobab client.Client + user_id string +} + +pub fn new(user_id string) UITelegram { + return UITelegram{ + // baobab: client.new()! + user_id: user_id + } +} + +fn (ui UITelegram) send_question(msg string) !string { + mut j_params := paramsparser.Params{} + j_params.set('question', msg) + + // job := ui.baobab.job_new( + // // todo twinid + // action: 'ui.telegramclient.send_question' + // params: j_params + // // todo actionsource + // ) + + // response := ui.baobab.job_schedule_wait(job, 0)! + + // return response.result.get('answer') +} + +fn (ui UITelegram) send_exit_message(msg string) ! { + mut j_params := paramsparser.Params{} + j_params.set('message', msg) + + job := ui.baobab.job_new( + // todo twinid + action: 'ui.telegramclient.exit_message' + params: j_params + // todo actionsource + ) + + response := ui.baobab.job_schedule(job)! +} + +/* +needs to schedule new jobs and wait +*/ diff --git a/lib/ui/telegram/client/client.v b/lib/ui/telegram/client/client.v new file mode 100644 index 00000000..f64ed6a5 --- /dev/null +++ b/lib/ui/telegram/client/client.v @@ -0,0 +1,126 @@ +module client + +import freeflowuniverse.herolib.baobab.client +import freeflowuniverse.herolib.clients.redisclient +import dariotarantini.vgram +import json + +// client for telegram bot +struct TelegramClient { + bot vgram.Bot + baobab client.Client // Baobab client + waiting_qs map[string]RedisQueue // where string is user_id +} + +// factory for telegram client initializes baobab client and redis queues +pub fn new_client(bot_token string, supervisor_key string) !TelegramClient { + baobab := client.new()! + return TelegramClient{ + baobab: baobab + bot: vgram.new_bot(bot_token) + flow_supervisor: baobab.redis.queue_get(supervisor_key) //? Why are these necessary? + in_q: baobab.redis.queue_get('client.telegram.in') //? Why are these necessary? + } +} + +// listens for incoming messages, relays to flow +pub fn (mut client TelegramClient) execute() { + mut last_offset := client.clear_old_updates() + for { + updates := client.bot.get_updates(offset: last_offset, limit: 100) + for update in updates { + // make sure message is new + if last_offset < update.update_id { + last_offset = update.update_id + if update.message.text.starts_with('/') { + // todo send update to flow supervisor + } else { + // todo send update to uichannel return queue + client.handle_update(update) or { continue } // TODO log a failure + } + } + } + } +} + +pub fn (mut client TelegramClient) clear_old_updates() string { + mut last_offset := 0 + mut updates := client.bot.get_updates( + timeout: 0 + allowed_updates: json.encode([ + 'message', + ]) + offset: last_offset + limit: 100 + ) + for update in updates { + if last_offset < update.update_id { + last_offset = update.update_id + } + } + return last_offset +} + +// forwards update to telegramui for handling +// todo: implement separate handlers for separate message types +fn (mut client TelegramClient) handle_update(update vgram.Update) { + // todo check for an exit code + // todo user_id in + // [params] + // pub struct JobNewArgs { + // pub mut: + // twinid u32 + // action string + // args Params + // actionsource string + // } + + job_args := JobNewArgs{ + action: 'ui.telegram.forward' + } + // creates job in jobs db and pushes to processor's incoming queue + client.baobab.job_new_schedule()! + + user_id := update.message.from.id.str() + text := update.message.text + + // // Infinite loop to deal with incoming and outgoing messages + // for { + // select { + // output := <- ui.to_user { + // ui.send(output.message, output.user_id) + // ui.waiting_qs[output.user_id] = output.response_channel + // } + // else { + // updates := ui.bot.get_updates(timeout: 0, allowed_updates: json.encode(["message"]), offset: last_offset, limit: 100) + // for update in updates { + // if last_offset < update.update_id { + // last_offset = update.update_id + // ui.handle_update(update) // ? Should this line be in the if statement? + // } + // } + // } + // } + // } +} + +fn (mut ui UITelegram) handle_update(update vgram.Update) { + user_id := update.message.from.id.str() + text := update.message.text + if user_id in ui.waiting_qs.keys() && text[0].ascii_str() != '/' { + ui.waiting_qs[user_id].response_channel <- update.message.text + } else { + match text.trim_string_left('/').split(' ')[0] { + 'register' {} + 'order' {} + } + } +} + +fn (ui UITelegram) send(msg string, user_id string) { + _ := ui.bot.send_message( + chat_id: user_id + text: msg + parse_mode: 'MarkdownV2' + ) +} diff --git a/lib/ui/telegram/client_test.v b/lib/ui/telegram/client_test.v new file mode 100644 index 00000000..43afef82 --- /dev/null +++ b/lib/ui/telegram/client_test.v @@ -0,0 +1,74 @@ +module telegram + +fn test_run() { + client := new_client() + for { + updates := client.get_updates(offset: p.last_offset, limit: 100) + for update in updates { + if p.last_offset < update.update_id { + if update.message.text == '/start' { + // todo: do something for new chat + } + p.last_offset = update.update_id + p.handle_update(update) or { continue } + } + } + } +} + +fn (mut client TelegramClient) handle_update(update vgram.Update) { + user_id := update.message.from.id.str() + + // todo: implement separate handlers for separate message types + text := update.message.text + bot.client.new() + + if user_id in ui.waiting_qs.keys() && text[0].ascii_str() != '/' { + ui.waiting_qs[user_id].response_channel <- update.message.text + } else { + match text.trim_string_left('/').split(' ')[0] { + 'register' {} + 'order' {} + } + } + + // // Infinite loop to deal with incoming and outgoing messages + // for { + // select { + // output := <- ui.to_user { + // ui.send(output.message, output.user_id) + // ui.waiting_qs[output.user_id] = output.response_channel + // } + // else { + // updates := ui.bot.get_updates(timeout: 0, allowed_updates: json.encode(["message"]), offset: last_offset, limit: 100) + // for update in updates { + // if last_offset < update.update_id { + // last_offset = update.update_id + // ui.handle_update(update) // ? Should this line be in the if statement? + // } + // } + // } + // } + // } +} + +fn (mut ui UITelegram) handle_update(update vgram.Update) { + user_id := update.message.from.id.str() + text := update.message.text + if user_id in ui.waiting_qs.keys() && text[0].ascii_str() != '/' { + ui.waiting_qs[user_id].response_channel <- update.message.text + } else { + match text.trim_string_left('/').split(' ')[0] { + 'register' {} + 'order' {} + } + } +} + +fn (ui UITelegram) send(msg string, user_id string) { + _ := ui.bot.send_message( + chat_id: user_id + text: msg + parse_mode: 'MarkdownV2' + ) +} diff --git a/lib/ui/telegram/flow_supervisor.v b/lib/ui/telegram/flow_supervisor.v new file mode 100644 index 00000000..1538d649 --- /dev/null +++ b/lib/ui/telegram/flow_supervisor.v @@ -0,0 +1,27 @@ +module telegram + +import dariotarantini.vgram +import json +// TODO +// need to create something here that: +// gets updates +// creates flows +// passes text into flow channels +// receives text from to_user channel +// sends messages to users + +// pub fn (mut ui UITelegram) run() { +// } + +// fn (mut ui UITelegram) handle_update(update vgram.Update) { +// user_id := update.message.from.id.str() +// text := update.message.text +// if user_id in ui.waiting_qs.keys() && text[0].ascii_str() != '/' { +// ui.waiting_qs[user_id].response_channel <- update.message.text +// } else { +// match text.trim_string_left('/').split(' ')[0] { +// 'register' {} +// 'order' {} +// } +// } +// } diff --git a/lib/ui/telegram/questions.v b/lib/ui/telegram/questions.v new file mode 100644 index 00000000..a3bd3340 --- /dev/null +++ b/lib/ui/telegram/questions.v @@ -0,0 +1,114 @@ +module telegram + +import os +import freeflowuniverse.herolib.ui.uimodel +// import freeflowuniverse.herolib.timetools + +// // args: +// // - description string +// // - question string +// // - warning: string (if it goes wrong, which message to use) +// // - clear bool = true +// // - regex: to check what result need to be part of +// // - minlen: min nr of chars +// // + +// // ! struct Output { +// // message string +// // response_channel chan string +// // } + +// pub fn (mut ui UITelegram) ask_dropdown(args DropDownArgs) !string { +// mut description := '${args.description} \n\nChoices: \n' +// mut count := 1 +// for item in args.items { +// description += '${count} - ${item}\n' +// count += 1 +// } + +// question := 'Please send your choice by entering a number from 1 to ${count}:' + +// q_args := QuestionArgs{ +// question: question +// description: description +// warning: args.warning +// clear: args.clear +// user_id: args.user_id +// } +// return ui.ask_question(q_args) or { return error('Failed to ask dropdown: ${err}') } +// } + +// pub fn (mut ui UITelegram) ask_yesno(args YesNoArgs) !string { +// q_args := QuestionArgs{ +// question: args.question +// description: args.description +// warning: args.warning +// clear: args.clear +// user_id: args.user_id +// } +// return ui.ask_question(q_args) or { return error('Failed to ask yesno: ${err}') } +// } + +// pub fn (mut ui UITelegram) ask_question(args QuestionArgs) !string { +// mut message := '' + +// mut warning := args.warning + +// for { +// if args.description.len > 0 { +// message += '${make_safe(args.description)}\n' +// } +// if args.warning.len > 0 { +// message += '__${make_safe(args.warning)}__\n' +// } +// mut question := 'Please provide an answer:' +// if args.question != '' { +// question = args.question +// } +// message += '*bold *${make_safe(question)}*\n' + +// warning = args.warning + +// answer := ui.send_question(message)! + +// if args.validation(answer) { +// return answer +// } else { +// warning += '\n ${err}' +// } +// } +// } + +// pub fn (mut ui TelegramBot) ask_date(args QuestionArgs) !map[string]int { +// mut warning := args.warning +// for { +// date_string := ui.ask_question(args) +// args.warning = warning +// if date := timetools.parse_date(date_string) { +// return date +// } +// args.warning = warning + +// "\n Failed to parse date, please input a date of the format: '28 feb'" +// } +// } + +// pub fn (mut ui TelegramBot) ask_time(args QuestionArgs) !map[string]int { +// mut warning := args.warning +// for { +// time_string := ui.ask_question(args) +// args.warning = warning +// if time := timetools.parse_time(time_string) { +// return time +// } +// args.warning = warning + +// "\n Failed to parse time, please input a time of the format: 'HH:MM'" +// } +// } + +// fn make_safe(text string) string { +// mut new_text := '' +// for character in text { +// new_text += '\\${character.ascii_str()}' +// } +// return new_text +// } diff --git a/lib/ui/template/console.v b/lib/ui/template/console.v new file mode 100644 index 00000000..68c71166 --- /dev/null +++ b/lib/ui/template/console.v @@ -0,0 +1,7 @@ +module template + +import freeflowuniverse.herolib.ui.console + +pub fn clear() { + console.print_debug('\033[2J') +} diff --git a/lib/ui/template/dropdown.v b/lib/ui/template/dropdown.v new file mode 100644 index 00000000..250259f0 --- /dev/null +++ b/lib/ui/template/dropdown.v @@ -0,0 +1,31 @@ +module template + +// import freeflowuniverse.herolib.ui.console +import freeflowuniverse.herolib.ui.uimodel { DropDownArgs } + +// return the dropdown as an int +// description string +// items []string +// warning string +// clear bool = true +pub fn (mut c UIExample) ask_dropdown(args DropDownArgs) !string { + return '' +} + +// result can be multiple, also can select all +// description string +// items []string +// warning string +// clear bool = true +pub fn (mut c UIExample) ask_dropdown_multiple(args DropDownArgs) ![]string { + return []string{} +} + +// will return the string as given as response +// description string +// items []string +// warning string +// clear bool = true +pub fn (mut c UIExample) ask_dropdown_int(args DropDownArgs) !int { + return 1 +} diff --git a/lib/ui/template/factory.v b/lib/ui/template/factory.v new file mode 100644 index 00000000..e5d1b52b --- /dev/null +++ b/lib/ui/template/factory.v @@ -0,0 +1,11 @@ +module template + +pub struct UIExample { +pub mut: + x_max int = 80 + y_max int = 60 +} + +pub fn new() UIExample { + return UIExample{} +} diff --git a/lib/ui/template/question.v b/lib/ui/template/question.v new file mode 100644 index 00000000..25688928 --- /dev/null +++ b/lib/ui/template/question.v @@ -0,0 +1,17 @@ +module template + +// import os +import freeflowuniverse.herolib.ui.uimodel { QuestionArgs } +// import freeflowuniverse.herolib.ui.console + +// args: +// - description string +// - question string +// - warning: string (if it goes wrong, which message to use) +// - reset bool = true +// - regex: to check what result need to be part of +// - minlen: min nr of chars +// +pub fn (mut c UIExample) ask_question(args QuestionArgs) !string { + return '' +} diff --git a/lib/ui/template/time_date.v b/lib/ui/template/time_date.v new file mode 100644 index 00000000..efe1a068 --- /dev/null +++ b/lib/ui/template/time_date.v @@ -0,0 +1,11 @@ +module template + +import freeflowuniverse.herolib.ui.uimodel { QuestionArgs } + +pub fn (mut c UIExample) ask_date(args QuestionArgs) !string { + panic('implement') +} + +pub fn (mut c UIExample) ask_time(args QuestionArgs) !string { + panic('implement') +} diff --git a/lib/ui/template/yesno.v b/lib/ui/template/yesno.v new file mode 100644 index 00000000..05e0f970 --- /dev/null +++ b/lib/ui/template/yesno.v @@ -0,0 +1,14 @@ +module template + +import freeflowuniverse.herolib.ui.uimodel { YesNoArgs } + +// yes is true, no is false +// args: +// - description string +// - question string +// - warning string +// - clear bool = true +// +pub fn (mut c UIExample) ask_yesno(args YesNoArgs) !bool { + return true +} diff --git a/lib/ui/uimodel/uimodel.v b/lib/ui/uimodel/uimodel.v new file mode 100644 index 00000000..c588eed1 --- /dev/null +++ b/lib/ui/uimodel/uimodel.v @@ -0,0 +1,116 @@ +module uimodel + +@[params] +pub struct DropDownArgs { +pub mut: + description string + question string + items []string + default []string + warning string + clear bool + all bool + choice_message string + validation fn (string) bool = fn (s string) bool { + return true + } +} + +@[params] +pub struct QuestionArgs { +pub mut: + description string + question string + warning string + clear bool + regex string + minlen int + reset bool + default string + validation fn (string) bool = fn (s string) bool { + return true + } +} + +// validation responds with either true or an error message + +@[params] +pub struct YesNoArgs { +pub mut: + description string + question string + warning string + clear bool + reset bool + default bool + validation fn (string) bool = fn (s string) bool { + return true + } +} + +@[params] +pub struct LogArgs { +pub mut: + content string + clear bool // means screen is reset for content above + lf_before int // line feed before content + lf_after int + cat LogCat +} + +// defines colors as used in the representation layer +pub enum LogCat { + info + log + warning + header + debug + error +} + +@[params] +pub struct InfoArgs { +pub mut: + content string // in specified format + clear bool // means screen is reset for content above + lf_before int // line feed before content + lf_after int + cat InfoCat + components []ComponentCat +} + +// defines colors as used in the representation layer +pub enum InfoCat { + txt + html + markdown +} + +// MORE THAN ONE COMPONENT CAN BE ADDED TO INFO +pub enum ComponentCat { + bootstrap + htmx + bulma +} + +@[params] +pub struct EditArgs { +pub mut: + content string // in specified format + cat EditorCat +} + +// defines colors as used in the representation layer +pub enum EditorCat { + txt + markdown + heroscript +} + +@[params] +pub struct PayArgs { +pub mut: + amount f64 + currency string = 'USD' // use currency module to do conversions where needed, + // TODO: what else do we need +} diff --git a/manual/best_practices/osal/silence.md b/manual/best_practices/osal/silence.md new file mode 100644 index 00000000..241731ae --- /dev/null +++ b/manual/best_practices/osal/silence.md @@ -0,0 +1,21 @@ +# silence + +the following code shows how we can surpress all output, errors should still go to stderr (to be tested) + +the example is a .vsh script note the arguments to v, this also makes sure there are no notices shown. + +```go +#!/usr/bin/env -S v -n -w -enable-globals run + +import freeflowuniverse.herolib.osal +import freeflowuniverse.herolib.ui.console + +console.silent_set() +mut job2 := osal.exec(cmd: 'ls /',debug:true)! +println("I got nothing above") + +console.silent_unset() +println("now I will get output") + +osal.exec(cmd: 'ls /',debug:true)! +``` \ No newline at end of file diff --git a/manual/best_practices/scripts/scripts.md b/manual/best_practices/scripts/scripts.md new file mode 100644 index 00000000..e76c3fd9 --- /dev/null +++ b/manual/best_practices/scripts/scripts.md @@ -0,0 +1,61 @@ +# Scripts + +Lets stop using bash files and use v for everything + +example would be + + +```go +#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run + +fn sh(cmd string) { + println('❯ ${cmd}') + print(execute_or_exit(cmd).output) +} + +//super handy trick to go to where the file is +abs_dir_of_script := dir(@FILE) + + +sh(' +set -ex +cd ${abs_dir_of_script} + +') + +//the $ shows its a compile time argument, will only put it compiled if linux +$if !linux { + println('AM IN LINUX') +} + +``` + +## argument parsing + +```v +#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run + +import os +import flag + +mut fp := flag.new_flag_parser(os.args) +fp.application('compile.vsh') +fp.version('v0.1.0') +fp.description('Compile hero binary in debug or production mode') +fp.skip_executable() + +prod_mode := fp.bool('prod', `p`, false, 'Build production version (optimized)') +help_requested := fp.bool('help', `h`, false, 'Show help message') + +if help_requested { + println(fp.usage()) + exit(0) +} + +additional_args := fp.finalize() or { + eprintln(err) + println(fp.usage()) + exit(1) +} + +``` diff --git a/manual/best_practices/scripts/shebang.md b/manual/best_practices/scripts/shebang.md new file mode 100644 index 00000000..3b107534 --- /dev/null +++ b/manual/best_practices/scripts/shebang.md @@ -0,0 +1,14 @@ +# Shebang + +is the first line of a script, your os will use that one to get started. + +for V we use + +```bash +#!/usr/bin/env -S v -n -w -gc none -no-retry-compilation -cc tcc -d use_openssl -enable-globals run +``` + +- -w no warnings +- -n ??? + +This one is the fastest way how to run scripts, but not meant to run long running items diff --git a/manual/best_practices/using_args_in_function.md b/manual/best_practices/using_args_in_function.md new file mode 100644 index 00000000..4dc65b7b --- /dev/null +++ b/manual/best_practices/using_args_in_function.md @@ -0,0 +1,32 @@ +# example how to use args in a function + +Below you can see a snippet which demonstrates how you have a params struct with path and git_url. + +- the params, makes sure you can call the function with + - path:... + - or git_url:... +- if git_url is used it will get the code from git and bring it in args.path +- notice the trick where args is give immutable but then made mutable inside so we can change it later +- git_pull means we will pull the code if the directory would already exist +- git_reset means we ignore the files which are changed in the repo and will discard those local changes (dangerous) +- reload: gets the git cache to be reloaded + +```go +[params] +pub struct TreeScannerArgs { +pub mut: + name string = 'default' // name of tree + path string + git_url string + git_reset bool + git_root string + git_pull bool +} +pub fn scan(args_ TreeScannerArgs) ! { + mut args := args_ + if args.git_url.len > 0 { + args.path=gittools.code_get(coderoot:args.git_root,url:args.git_url, + pull:args.git_pull,reset:args.git_reset,reload:false)! + } + +``` \ No newline at end of file diff --git a/manual/core/base.md b/manual/core/base.md new file mode 100644 index 00000000..aeb55f11 --- /dev/null +++ b/manual/core/base.md @@ -0,0 +1,87 @@ + +# Base Context & Session + +Important section about how to create base objects which hold context an config mgmt. + +## Context + +A context is sort of sandbox in which we execute our scripts it groups the following + +- filesystem key value stor +- logs +- multiple sessions +- gittools: gitstructure +- redis client + +> more info see [context](context.md) + +## Session + +- each time we execute something with a client or other sal we do this as part of a session +- a session can have a name as given by the developer or will be autocreated based on time + +> more info see [session](session.md) + +## Config Mgmt + +is done per instance of an object which inherits from BaseConfig. + +- see [base](base.md) +- see [config](config.md) + +## KVS = FSDB + +there is a KVS attached to each context/session + +- see [kvs](kvs.md) + + +# BaseConfig + +Clients, DALs, SAL's can inherit base + + +```golang + +pub struct BaseConfig { +pub mut: + session_ ?&Session + instance string +} + +//how to use + +import freeflowuniverse.herolib.core.base + +pub struct B2Client { + base.BaseConfig +pub mut: + someprop string +} + + + +``` + +## BaseConfig Methods + +This will give some super powers to each base inheritted class + + +```v + +// return a session which has link to the actions and params on context and session level +// the session also has link to dbfs (filesystem key val stor and gitstructure if relevant) +//``` +// context ?&Context @[skip; str: skip] +// session ?&Session @[skip; str: skip] +// context_name string = 'default' +// session_name string //default will be based on a date when run +// interactive bool = true //can ask questions, default on true +//``` +pub fn (mut self BaseConfig) session(args PlayArgs) &Session + +pub fn (mut self BaseConfig) context() &Context + + +``` \ No newline at end of file diff --git a/manual/core/concepts/global_ids.md b/manual/core/concepts/global_ids.md new file mode 100644 index 00000000..e69de29b diff --git a/manual/core/concepts/name_registry.md b/manual/core/concepts/name_registry.md new file mode 100644 index 00000000..527bec19 --- /dev/null +++ b/manual/core/concepts/name_registry.md @@ -0,0 +1,75 @@ + +we are building a decentralized DNS system + +- the DNS system is stored in a filedb, which is a directory with subdirs where directory structure + file name, defines the key +- we store pubkeys (32 bytes) and names (like dns names)- +- the public key gets mapped to a unique id per filedb dir +- each person can register 1 or more names (max 12 characters, min 3) +- these names are unique per repo and linked to the id of the public key +- a name can be owned by min 1, max 5 public keys (names can be co-owned) = means max 4 bytes x 5 to identify which users own a name +- we represent database for public keys and names as directory structures +- database for public keys + - each pubkey as remembered in the file database in the repo on $repodir/keys gets a unique incremental key = int + - we have max 256 dirs and 256 files, where the name is first byte expressed as hex + - e.g. hex(999999) = 'f423f', this results in $repodir/keys/f4/23.txt + - in each txt file we \n separate the entries (each line is pubkey\n) + - 999999 -> f4/23 then f (remainder) gets converted back to int and this is the element in the list in 23.txt (the Xe line) + - this means max nr of dirs:65536, max nr of elements in file = 152 items, line separated + - this means it goes fast to process one txt file to retrieve relation between id and pubkey + - this db allows gast retrieval of pubkey based on int (unique per file db dir) + - the order of the lines is never changed, new data always added so we keep unique id (int) +- database for names + - names are ascii only with ofcourse '.' as separator + - names are 2 levels e.g. kristof.belgium + - we hash the name md5, take first 2 chars as identifier for directory, the next 2 chars as text file with the names + - e.g. /data/repo1/names/aa/bb.txt (aa would be first 2 chars of md5, bb next 2 chars of md5) + - names are in that bb.txt file (example), they are added as they come in + - the linenr is the unique id in that file, which means each name has unique id as follows + - aabb1 (position 1) would result to: aabb -> int + 1, e.g. position 999 would be hex2int(aabb)+999 + - this would be the unique int + - per line we store the following: $name(lowercase, ascii):f423f,a4233:signature + - this means 2 pub keys linked to the name + - the link is done by an id (as described above, which can then be mapped back to pubkey) + - the signature is secp256k1 signature which can be verified by everyone who reads this file, only 1 of users need to sign + - the signature is on name+the id's of who owns the name (so we verify ownership) + - the order of the lines is never changed, new data always added so we keep unique id (int) + +now create the following python functions and implement above + +```python +#register pubkey in the pubkey db, return the int +def key_register(pubkey) -> int + +class NamePreparation: + name str + pubkeys []int #position of each pubkey in the pubkey db + signature []u8 #bytestr of the secp256k1 signature + + #sign name + int's (always concatenated in same way) with given privkey + #the result is stored in signature on class + def sign(privkey): + #need to check that priv key given is part of the pubkeys + + #return str representation which is $name:f423f,a4233,...:$signature + def str() -> str: + ... + +#name will be lowercased, trimmed space +#max 1 dot in name (2 levels in DNS, top and 1 down) +#signature is secp256k and will be verified in this function against all given pubkeys +#the first pubkey need to have signed the name + +#returns the inique id of the name in this filedb repo +def name_register(name:str,pubkeys:[]str,privkey:...) -> int: + #will use NamePreparation functionality + #str() will give the right str which is added as newline to the right file in the filedb + +#find the name, NotFound exception when name not found, +#if verify on then will check the signature vs the first pubkey of the list +def name_get(id:int,verify:bool=True) -> str: + +def key_get(id:int) -> PubKey: + + + +``` + diff --git a/manual/core/concepts/objects.md b/manual/core/concepts/objects.md new file mode 100644 index 00000000..cbd4534f --- /dev/null +++ b/manual/core/concepts/objects.md @@ -0,0 +1,32 @@ + + +## rootobject with config + +example of 3 methods each of such rootobjects need to have + +```golang + +pub fn (mut c Context) str() string { + return c.heroscript() or {"BUG: can't represent the object properly, I try raw.\n$c"} +} + +fn (mut c Context) str2() string { + return "cid:${c.cid} name:${c.name} " or {"BUG: can't represent the context properly, I try raw"} +} + +//if executed needs to redefine this object +pub fn (mut c Context) heroscript() !string { + mut out:="!!core.context_define ${c.str2()}\n" + mut params:=c.params()! + if ! params.empty(){ + out+="\n!!core.context_params guid:${c.guid()}\n" + out+=params.heroscript()+"\n" + } + return out +} + +//needs to be unique for universe +pub fn (mut c Context) guid() string { + return "${c.cid}:${c.name}" +} +``` \ No newline at end of file diff --git a/manual/core/concepts/sid.md b/manual/core/concepts/sid.md new file mode 100644 index 00000000..bdcc745c --- /dev/null +++ b/manual/core/concepts/sid.md @@ -0,0 +1,69 @@ + +# sid = Smart ID + +- format: + - smart id, is 3 to 6 letters, 0...z +- the rid,cid and id are all smart id's +- sid's are unique per circle +- sid's can be converted to int easily + +## gid = Global ID + +Identifies an object in unique way on global level, normally not needed in heroscript, because heroscript most often executes in context of a circle + +- gid = rid.cid.oid + - rid = region id (regional identifier on which circle is defined), often not used today + - cid = circle id + - id = object id +- each of above id's are smart id's + + +The following are valid representations + +- '$rid.$cid.$id' +- '$cid.$id' if rid is known +- '$id' if rid and cid are known + +## automatically fill in + +```golang +!circle_role.define + id:'***' //means will be filled in automatically, unique per circle + name:'vpsales' + circle:'tftech' //can be id of circle or name + role:'stakeholder' +``` + +## code + +```golang +pub struct SmartId { +pub mut: + rid string //regional id + cid string //link to circle + id string //content id +} +``` + +## sid's can address the world + +- each object can be addressed by means of 3 smart id's + - $smartid_region (e.g. regional internet) + - $smartid_circle + - $smartid_object +- object is any of the object types e.g. issue, story, ... +- each object is identified as + - $smartid_region.$smartid_circle.$smartid_object + - $smartid_circle.$smartid_object (will enherit the id from the region we are operating on) + - $smartid_object (will enherit region and circle from the circle we are operating on) +- smart id is + - 2 to 6 times [a...z|0...9] + - size to nr of objects + - 2 -> 26+10^2 = 1,296 + - 3 -> 26+10^3 = 46,656 + - 4 -> 26+10^4 = 1,679,616 + - 5 -> 26+10^5 = 60,466,176 + - 6 -> 26+10^6 = 2,176,782,336 +- a circle can be owned by 1 person or by a group (e.g. company, or administrators for e.g. blockchain DB) +- e.g. 1a.e5q.9h would result to globally unique identifier 1a would be the region, e5q the circle, 9h is id of the obj in my circle + diff --git a/manual/core/context.md b/manual/core/context.md new file mode 100644 index 00000000..323fe676 --- /dev/null +++ b/manual/core/context.md @@ -0,0 +1,100 @@ + + +# Context + +## Get a context + + +```js +cid string // rid.cid or just cid +name string // a unique name in cid +params paramsparser.Params +redis &redisclient.Redis +dbcollection &dbfs.DBCollection +``` + +- cid is the unique id for a circle. +- the default context is "default" +- each context can have params attached to it, as can be set by the heroscripts +- each context has a redis client (can be a different per context but normally not) +- context db is a fs db (key value stor) + + +```golang +import freeflowuniverse.herolib.core.base + + +struct ContextGetArgs { + name string = "default" // a unique name in cid + interactive bool = true +} + +//get context based on name, can overrule interactivity +play.context_get(args_ ContextGetArgs) !Context + + +``` + +## Work with a context + +E.g. gitstructure is linked to a context + +```golang + +//return the gistructure as is being used in context +fn (mut self Context) gitstructure() !&gittools.GitStructure + +//reload gitstructure from filesystem +fn (mut self Context) gitstructure_reload() + +//return the coderoot as is used in context +fn (mut self Context) coderoot() !string + +// load the context params from redis +fn (mut self Context) load() ! + +// save the params to redis +fn (mut self Context) save() ! + +``` + +## get a custom DB from context + +```golang + +//get a unique db with a name per context +fn (mut self Context) db_get(dbname string) !dbfs.DB + +//get configuration DB is always per context +fn (mut self Context) db_config_get() !dbfs.DB + +``` + +## configure context through heroscript + +```js +!!context.configure + name:'test' + coderoot:'' + interactive:true +``` + + +## Configure a context + +A context can get certain configuration e.g. params, coderoot, ... (in future encryption), configuration is optional. + +```golang + +// configure a context object +// params: +// ``` +// cid string = "000" // rid.cid or cid allone +// name string // a unique name in cid +// params string +// coderoot string +// interactive bool +// ``` +fn context_configure(args_ ContextConfigureArgs) ! + +``` \ No newline at end of file diff --git a/manual/core/context_session_job.md b/manual/core/context_session_job.md new file mode 100644 index 00000000..0236b844 --- /dev/null +++ b/manual/core/context_session_job.md @@ -0,0 +1,26 @@ +# Circle + + +- has a unique CID = circle id (is a SID) +- has following components + - context + - manages a state for one specific context + - has a name and unique cid, and is linked to 1 circle (there can be more than 1 in a circle) + - has params + - has todo checklist + - session + - linked to 1 context + - has unique id (int) linked to context + - can have a name (optional) + - is like a chat session, can be any series of actions + - each action once in needs to be executed becomes a job + - a job is linked to a heroscript, which is the physical representation of all the jobs (actions) which need to be executed, the heroscript is in order. + - each action done on session is stateless in memory (no mem usage), in other words can pass Session around without worrying about its internal state + - we use redis as backend to keep the state + - job + - linked to a session + - has incremental id, in relation to session + - is the execution of 1 specific action (heroscript action) + - it results in logs being collected + - it results in params being set on session level (only when WAL) + - TODO: needs to be implemented on job (Kristof) \ No newline at end of file diff --git a/manual/core/play.md b/manual/core/play.md new file mode 100644 index 00000000..9464e5f2 --- /dev/null +++ b/manual/core/play.md @@ -0,0 +1,35 @@ +# Play + +Important section about how to create base objects which hold context an config mgmt. + +## Context + +A context is sort of sandbox in which we execute our scripts it groups the following + +- filesystem key value stor +- logs +- multiple sessions +- gittools: gitstructure +- redis client + +> more info see [context](context.md) + +## Session + +- each time we execute a playbook using heroscript we do it in a session +- a session can have a name as given by the developer or will be autocreated based on time + +> more info see [session](session.md) + +## Config Mgmt + +is done per instance of an object which inherits from BaseConfig. + +- see [base](base.md) +- see [config](config.md) + +## KVS + +there is a KVS attached to each context/session + +- see [kvs](kvs.md) diff --git a/manual/core/session.md b/manual/core/session.md new file mode 100644 index 00000000..c4ff176a --- /dev/null +++ b/manual/core/session.md @@ -0,0 +1,82 @@ +## play.session + +```js +name string // unique id for session (session id), can be more than one per context +plbook playbook.PlayBook //is how heroscripts are being executed +interactive bool = true +params paramsparser.Params +start ourtime.OurTime +end ourtime.OurTime +context Context //link back to the context +``` + +### **The PlayArgs:** + +- context ?&Context +- session ?&Session +- context_name string = 'default' +- session_name string //default will be based on a date when run +- interactive bool = true //can ask questions, default on true +- coderoot string //this will define where all code is checked out +- playbook_url string //url of heroscript to get and execute in current context +- playbook_path string //path of heroscript to get and execute +- playbook_text string //heroscript to execute + +```golang +import freeflowuniverse.herolib.core.base +import freeflowuniverse.herolib.develop.gittools + +mut session:=play.session_new( + coderoot:'/tmp/code' + interactive:true +)! + +//THE next could be in a module which we call + +pub fn play_git(mut session Session) ! { + for mut action in session.plbook.find(filter:'gittools.*')! { + mut p := action.params + mut repo := p.get_default('repo', '')! + ... do whatever is required to + } +} + + +``` + + +### use playbook + +```golang + +// add playbook heroscript (starting from path, text or git url) +//``` +// path string +// text string +// prio int = 99 +// url string +//``` +fn (mut session Session) playbook_add(args_ PLayBookAddArgs) ! + +//show the sesstion playbook as heroscript +fn (mut session Session) heroscript() + +// add priorities for the playbook, normally more internal per module +fn (mut self Session) playbook_priorities_add(prios map[int]string) + + +``` + +### use the kvs database + +is stored on filesystem + +```golang + +// get db of the session, is unique per session +fn (mut self Session) db_get() !dbfs.DB { + +// get the db of the config, is unique per context +fn (mut self Session) db_config_get() !dbfs.DB { + +``` \ No newline at end of file diff --git a/manual/documentation/docextractor.md b/manual/documentation/docextractor.md new file mode 100644 index 00000000..b5a18482 --- /dev/null +++ b/manual/documentation/docextractor.md @@ -0,0 +1,17 @@ +# doc extractor + +is a python tool to help us to get .md files into our manual + +copies all readme.md files from the different lib directors to + +- e.g. $crystallib/manual/libreadme/installers_sysadmintools_actrunner.md +- note the name has the location inside of where info came from + +this allows us to make manual and to copy information from the readme's which are in library + +to run + +```bash +~/code/github/freeflowuniverse/crystallib/tools/doc_extractor/extractor.sh +``` + diff --git a/v_install.sh b/v_install.sh new file mode 100755 index 00000000..081d7471 --- /dev/null +++ b/v_install.sh @@ -0,0 +1,331 @@ + +#!/bin/bash -ex + +# Help function +print_help() { + echo "V & HeroLib Installer Script" + echo + echo "Usage: $0 [options]" + echo + echo "Options:" + echo " -h, --help Show this help message" + echo " --reset Force reinstallation of V" + echo " --remove Remove V installation and exit" + echo " --analyzer Install/update v-analyzer" + echo " --herolib Install our herolib" + echo + echo "Examples:" + echo " $0" + echo " $0 --reset " + echo " $0 --remove " + echo " $0 --analyzer " + echo " $0 --herolib " + echo " $0 --reset --analyzer # Fresh install of both" + echo +} + +# Parse arguments +RESET=false +REMOVE=false +INSTALL_ANALYZER=false +HEROLIB=false + +for arg in "$@"; do + case $arg in + -h|--help) + print_help + exit 0 + ;; + --reset) + RESET=true + ;; + --remove) + REMOVE=true + ;; + --herolib) + HEROLIB=true + ;; + --analyzer) + INSTALL_ANALYZER=true + ;; + *) + echo "Unknown option: $arg" + echo "Use -h or --help to see available options" + exit 1 + ;; + esac +done + +# Function to check if command exists +command_exists() { + command -v "$1" >/dev/null 2>&1 +} + +export DIR_BASE="$HOME" +export DIR_BUILD="/tmp" +export DIR_CODE="$DIR_BASE/code" + +function sshknownkeysadd { + mkdir -p ~/.ssh + touch ~/.ssh/known_hosts + if ! grep github.com ~/.ssh/known_hosts > /dev/null + then + ssh-keyscan github.com >> ~/.ssh/known_hosts + fi + if ! grep git.ourworld.tf ~/.ssh/known_hosts > /dev/null + then + ssh-keyscan git.ourworld.tf >> ~/.ssh/known_hosts + fi + git config --global pull.rebase false + +} + +function package_check_install { + local command_name="$1" + if command -v "$command_name" >/dev/null 2>&1; then + echo "command '$command_name' is already installed." + else + package_install '$command_name' + fi +} + +function package_install { + local command_name="$1" + if [[ "${OSNAME}" == "ubuntu" ]]; then + apt -o Dpkg::Options::="--force-confold" -o Dpkg::Options::="--force-confdef" install $1 -q -y --allow-downgrades --allow-remove-essential + elif [[ "${OSNAME}" == "darwin"* ]]; then + brew install $command_name + elif [[ "${OSNAME}" == "alpine"* ]]; then + apk add $command_name + elif [[ "${OSNAME}" == "arch"* ]]; then + pacman --noconfirm -Su $command_name + else + echo "platform : ${OSNAME} not supported" + exit 1 + fi +} + + +function os_update { + echo ' - os update' + if [[ "${OSNAME}" == "ubuntu" ]]; then + if is_github_actions; then + echo "github actions" + else + rm -f /var/lib/apt/lists/lock + rm -f /var/cache/apt/archives/lock + rm -f /var/lib/dpkg/lock* + fi + export TERM=xterm + export DEBIAN_FRONTEND=noninteractive + dpkg --configure -a + apt update -y + if is_github_actions; then + echo "** IN GITHUB ACTIONS, DON'T DO UPDATE" + else + set +e + echo "** UPDATE" + apt-mark hold grub-efi-amd64-signed + set -e + apt upgrade -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --force-yes + apt autoremove -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --force-yes + fi + #apt install apt-transport-https ca-certificates curl software-properties-common -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --force-yes + package_install "apt-transport-https ca-certificates curl wget software-properties-common tmux" + package_install "rclone rsync mc redis-server screen net-tools git htop ca-certificates lsb-release binutils pkg-config" + + elif [[ "${OSNAME}" == "darwin"* ]]; then + if command -v brew >/dev/null 2>&1; then + echo 'homebrew installed' + else + export NONINTERACTIVE=1 + /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" + unset NONINTERACTIVE + fi + set +e + brew install mc redis curl tmux screen htop wget rclone tcc + set -e + elif [[ "${OSNAME}" == "alpine"* ]]; then + apk update screen git htop tmux + apk add mc curl rsync htop redis bash bash-completion screen git rclone + sed -i 's#/bin/ash#/bin/bash#g' /etc/passwd + elif [[ "${OSNAME}" == "arch"* ]]; then + pacman -Syy --noconfirm + pacman -Syu --noconfirm + pacman -Su --noconfirm arch-install-scripts gcc mc git tmux curl htop redis wget screen net-tools git sudo htop ca-certificates lsb-release screen rclone + + # Check if builduser exists, create if not + if ! id -u builduser > /dev/null 2>&1; then + useradd -m builduser + echo "builduser:$(openssl rand -base64 32 | sha256sum | base64 | head -c 32)" | chpasswd + echo 'builduser ALL=(ALL) NOPASSWD: ALL' | tee /etc/sudoers.d/builduser + fi + + if [[ -n "${DEBUG}" ]]; then + execute_with_marker "paru_install" paru_install + fi + fi + echo 'os_update done' +} + + +function hero_lib_pull { + pushd $DIR_CODE/github/freeflowuniverse/herolib 2>&1 >> /dev/null + if [[ $(git status -s) ]]; then + echo "There are uncommitted changes in the Git repository herolib." + return 1 + fi + git pull + popd 2>&1 >> /dev/null +} + +function hero_lib_get { + + mkdir -p $DIR_CODE/github/freeflowuniverse + if [[ -d "$DIR_CODE/github/freeflowuniverse/herolib" ]] + then + hero_lib_pull + else + pushd $DIR_CODE/github/freeflowuniverse 2>&1 >> /dev/null + git clone --depth 1 --no-single-branch https://github.com/freeflowuniverse/herolib.git + popd 2>&1 >> /dev/null + fi +} + + +remove_all() { + echo "Removing V installation..." + # Set reset to true to use existing reset functionality + RESET=true + # Call reset functionality + sudo rm -rf ~/code/v + sudo rm -rf ~/_code/v + sudo rm -rf ~/.config/v-analyzer + if command_exists v; then + echo "Removing V from system..." + sudo rm -f $(which v) + fi + if command_exists v-analyzer; then + echo "Removing v-analyzer from system..." + sudo rm -f $(which v-analyzer) + fi + + # Remove v-analyzer path from rc files + for RC_FILE in ~/.zshrc ~/.bashrc; do + if [ -f "$RC_FILE" ]; then + echo "Cleaning up $RC_FILE..." + # Create a temporary file + TMP_FILE=$(mktemp) + # Remove lines containing v-analyzer/bin path + sed '/v-analyzer\/bin/d' "$RC_FILE" > "$TMP_FILE" + # Remove empty lines at the end of file + sed -i.bak -e :a -e '/^\n*$/{$d;N;ba' -e '}' "$TMP_FILE" + # Replace original file + mv "$TMP_FILE" "$RC_FILE" + echo "Cleaned up $RC_FILE" + fi + done + + echo "V removal complete" +} + + +# Handle remove if requested +if [ "$REMOVE" = true ]; then + remove_all + exit 0 +fi + +# Handle reset if requested +if [ "$RESET" = true ]; then + remove_all + echo "Reset complete" +fi + +# Create code directory if it doesn't exist +mkdir -p ~/code + +os_update + +sshknownkeysadd + + +# Check if v needs to be installed +if [ "$RESET" = true ] || ! command_exists v; then + # Only clone and install if directory doesn't exist + if [ ! -d ~/code/v ]; then + echo "Installing V..." + cd ~/code + git clone --depth=1 https://github.com/vlang/v + cd v + make + sudo ./v symlink + fi + + # Verify v is in path + if ! command_exists v; then + echo "Error: V installation failed or not in PATH" + echo "Please ensure ~/code/v is in your PATH" + exit 1 + fi + echo "V installation successful!" +fi + +# Install v-analyzer if requested +if [ "$INSTALL_ANALYZER" = true ]; then + echo "Installing v-analyzer..." + v download -RD https://raw.githubusercontent.com/vlang/v-analyzer/main/install.vsh + + # Check if v-analyzer bin directory exists + if [ ! -d "$HOME/.config/v-analyzer/bin" ]; then + echo "Error: v-analyzer bin directory not found at $HOME/.config/v-analyzer/bin" + echo "Please ensure v-analyzer was installed correctly" + exit 1 + fi + + echo "v-analyzer installation successful!" +fi + +# Add v-analyzer to PATH if installed +if [ -d "$HOME/.config/v-analyzer/bin" ]; then + V_ANALYZER_PATH='export PATH="$PATH:$HOME/.config/v-analyzer/bin"' + + # Function to add path to rc file if not present + add_to_rc() { + local RC_FILE="$1" + if [ -f "$RC_FILE" ]; then + if ! grep -q "v-analyzer/bin" "$RC_FILE"; then + echo "" >> "$RC_FILE" + echo "$V_ANALYZER_PATH" >> "$RC_FILE" + echo "Added v-analyzer to $RC_FILE" + else + echo "v-analyzer path already exists in $RC_FILE" + fi + fi + } + + # Add to both .zshrc and .bashrc if they exist + add_to_rc ~/.zshrc + if [ "$(uname)" = "Darwin" ] && [ -f ~/.bashrc ]; then + add_to_rc ~/.bashrc + fi +fi + +# Final verification +if ! command_exists v; then + echo "Error: V is not accessible in PATH" + echo "Please add ~/code/v to your PATH and try again" + exit 1 +fi + +if [ "$HEROLIB" = true ]; then + hero_lib_get +fi + + +if [ "$INSTALL_ANALYZER" = true ]; then + echo "Run 'source ~/.bashrc' or 'source ~/.zshrc' to update PATH for v-analyzer" +fi + + +echo "Installation complete!"