From e34a770cc6417ae2c74469bb5922fd0501e5dc96 Mon Sep 17 00:00:00 2001 From: mariobassem Date: Mon, 30 Dec 2024 12:22:36 +0200 Subject: [PATCH] wip: fix doctree example --- .../mdbook_markdown/doctree_export.vsh | 2 +- lib/biz/biz.v | 1 + lib/biz/bizmodel/factory.v | 39 ++ lib/biz/bizmodel/macros.v | 110 +++ lib/biz/bizmodel/model.v | 56 ++ lib/biz/bizmodel/play.v | 89 +++ lib/biz/bizmodel/play_cost.v | 85 +++ lib/biz/bizmodel/play_costcenter.v | 23 + lib/biz/bizmodel/play_funding.v | 41 ++ lib/biz/bizmodel/play_hr.v | 148 ++++ lib/biz/bizmodel/play_product_revenue.v | 345 ++++++++++ lib/biz/bizmodel/templates/departments.md | 16 + lib/biz/bizmodel/templates/employee.md | 28 + lib/biz/bizmodel/templates/employee2.md | 21 + lib/biz/bizmodel/templates/intro.md | 54 ++ .../bizmodel/templates/revenue_overview.md | 74 ++ lib/biz/bizmodel/templates/rows_overview.md | 6 + lib/biz/bizmodel/templates/summary.md | 24 + lib/biz/investortool/company.v | 34 + lib/biz/investortool/employee.v | 64 ++ lib/biz/investortool/factory.v | 139 ++++ lib/biz/investortool/investment_share.v | 55 ++ lib/biz/investortool/investor.v | 33 + lib/biz/investortool/investortool.v | 70 ++ lib/biz/investortool/investortool2.v | 30 + lib/biz/investortool/investortool3.v | 31 + lib/biz/investortool/readme.md | 4 + lib/biz/investortool/simulator/captable.v | 4 + lib/biz/investortool/simulator/play.v | 53 ++ lib/biz/investortool/simulator/simulator.v | 50 ++ .../simulator/templates/investor.md | 7 + .../investortool/simulator/templates/user.md | 9 + lib/biz/investortool/user.v | 37 + lib/biz/spreadsheet/calc_test.v | 132 ++++ lib/biz/spreadsheet/cell.v | 35 + lib/biz/spreadsheet/extrapolate.v | 116 ++++ lib/biz/spreadsheet/factory.v | 60 ++ lib/biz/spreadsheet/number.v | 33 + lib/biz/spreadsheet/playmacro.v | 116 ++++ lib/biz/spreadsheet/readme.md | 63 ++ lib/biz/spreadsheet/row.v | 184 +++++ lib/biz/spreadsheet/row_actions.v | 159 +++++ lib/biz/spreadsheet/row_copy.v | 50 ++ lib/biz/spreadsheet/row_recurring.v | 47 ++ lib/biz/spreadsheet/sheet.v | 293 ++++++++ lib/biz/spreadsheet/sheet_getters.v | 185 +++++ lib/biz/spreadsheet/tools.v | 17 + lib/biz/spreadsheet/wiki.v | 86 +++ lib/biz/spreadsheet/wiki_charts.v | 188 +++++ lib/data/doctree/collection/collection.v | 48 ++ lib/data/doctree/collection/data/error.v | 29 + lib/data/doctree/collection/data/file.v | 102 +++ lib/data/doctree/collection/data/page.v | 164 +++++ .../doctree/collection/data/process_aliases.v | 49 ++ .../collection/data/process_aliases_test.v | 40 ++ .../collection/data/process_def_pointers.v | 34 + .../data/process_def_pointers_test.v | 23 + .../doctree/collection/data/process_link.v | 59 ++ .../collection/data/process_link_test.v | 20 + .../doctree/collection/data/process_macros.v | 24 + lib/data/doctree/collection/error.v | 64 ++ lib/data/doctree/collection/export.v | 129 ++++ lib/data/doctree/collection/export_test.v | 47 ++ lib/data/doctree/collection/getters.v | 45 ++ lib/data/doctree/collection/scan.v | 250 +++++++ lib/data/doctree/collection/scan_test.v | 121 ++++ .../doctree/collection/template/errors.md | 11 + .../doctree/collection/testdata/.gitignore | 1 + .../export_expected/src/col1/.collection | 1 + .../export_expected/src/col1/.linkedpages | 1 + .../export_expected/src/col1/errors.md | 9 + .../export_expected/src/col1/file1.md | 1 + .../export_expected/src/col1/file2.md | 1 + .../export_expected/src/col1/img/image.png | 0 .../export_test/mytree/dir1/.collection | 1 + .../export_test/mytree/dir1/dir2/file1.md | 1 + .../testdata/export_test/mytree/dir1/file2.md | 1 + .../export_test/mytree/dir1/image.png | 0 lib/data/doctree/error.v | 45 ++ lib/data/doctree/export.v | 92 +++ lib/data/doctree/export_test.v | 82 +++ lib/data/doctree/getters.v | 72 ++ lib/data/doctree/getters_test.v | 35 + lib/data/doctree/pointer/pointer.v | 106 +++ lib/data/doctree/pointer/pointer_test.v | 139 ++++ lib/data/doctree/process_defs.v | 83 +++ lib/data/doctree/process_defs_test.v | 26 + lib/data/doctree/process_includes.v | 153 ++++ lib/data/doctree/process_includes_test.v | 56 ++ lib/data/doctree/process_macros.v | 54 ++ lib/data/doctree/scan.v | 244 +++++++ lib/data/doctree/testdata/.gitignore | 1 + lib/data/doctree/testdata/actions/.collection | 1 + lib/data/doctree/testdata/actions/actions1.md | 7 + .../actions/functionality/actions2.md | 15 + .../export_expected/col1/.collection | 1 + .../export_expected/col1/.linkedpages | 1 + .../export_expected/col1/errors.md | 9 + .../export_test/export_expected/col1/file1.md | 1 + .../export_test/export_expected/col1/file2.md | 1 + .../export_expected/col1/img/image.png | 0 .../export_expected/col2/.collection | 1 + .../export_expected/col2/.linkedpages | 0 .../export_test/export_expected/col2/file3.md | 0 .../export_test/mytree/dir1/.collection | 1 + .../export_test/mytree/dir1/dir2/file1.md | 1 + .../testdata/export_test/mytree/dir1/file2.md | 1 + .../export_test/mytree/dir1/image.png | 0 .../export_test/mytree/dir3/.collection | 1 + .../testdata/export_test/mytree/dir3/file3.md | 0 .../testdata/process_defs_test/col1/page1.md | 1 + .../testdata/process_defs_test/col2/page2.md | 3 + .../process_includes_test/col1/page1.md | 1 + .../process_includes_test/col2/page2.md | 1 + .../process_includes_test/col2/page3.md | 1 + lib/data/doctree/testdata/rpc/.collection | 1 + lib/data/doctree/testdata/rpc/eth.md | 130 ++++ lib/data/doctree/testdata/rpc/rpc.md | 12 + lib/data/doctree/testdata/rpc/stellar.md | 342 +++++++++ lib/data/doctree/testdata/rpc/tfchain.md | 251 +++++++ lib/data/doctree/testdata/rpc/tfgrid.md | 651 ++++++++++++++++++ .../testdata/tree_test/fruits/.collection | 1 + .../testdata/tree_test/fruits/apple.md | 9 + .../testdata/tree_test/fruits/banana.txt | 0 .../fruits/berries/img/digital_twin.png | Bin 0 -> 125580 bytes .../tree_test/fruits/berries/strawberry.md | 5 + .../testdata/tree_test/fruits/intro.md | 3 + .../testdata/tree_test/vegetables/.collection | 1 + .../testdata/tree_test/vegetables/cabbage.txt | 0 .../vegetables/cruciferous/broccoli.md | 3 + .../testdata/tree_test/vegetables/intro.md | 3 + .../testdata/tree_test/vegetables/tomato.md | 3 + lib/data/doctree/tree.v | 72 ++ lib/data/doctree/tree_test.v | 79 +++ lib/threefold/deploy/deployment.v | 258 +++++++ lib/threefold/grid/README.md | 5 + lib/threefold/grid/deployer.v | 319 +++++++++ lib/threefold/grid/deployment_state.v | 35 + lib/threefold/grid/factory.v | 69 ++ lib/threefold/grid/graphql.v | 156 +++++ lib/threefold/grid/models/computecapacity.v | 16 + lib/threefold/grid/models/deployment.v | 188 +++++ lib/threefold/grid/models/gw_fqdn.v | 35 + lib/threefold/grid/models/gw_name.v | 41 ++ lib/threefold/grid/models/ip.v | 37 + lib/threefold/grid/models/qsfs.v | 52 ++ lib/threefold/grid/models/workload.v | 166 +++++ lib/threefold/grid/models/zdb.v | 61 ++ lib/threefold/grid/models/zlogs.v | 29 + lib/threefold/grid/models/zmachine.v | 139 ++++ lib/threefold/grid/models/zmount.v | 32 + lib/threefold/grid/models/znet.v | 117 ++++ lib/threefold/grid/rmb.v | 45 ++ lib/threefold/grid/vm.v | 97 +++ lib/threefold/grid/vm_test.v | 22 + lib/threefold/grid/zdb.v | 90 +++ lib/threefold/grid4/cloudslices/loader.v | 17 + lib/threefold/grid4/cloudslices/model.v | 95 +++ .../grid4/cloudslices/model_aggregated.v | 75 ++ lib/threefold/grid4/cloudslices/play.v | 123 ++++ .../grid4/farmingsimulator/factory.v | 96 +++ .../grid4/farmingsimulator/model_capacity.v | 48 ++ .../grid4/farmingsimulator/model_nodesbatch.v | 63 ++ .../farmingsimulator/model_nodetemplate.v | 109 +++ .../grid4/farmingsimulator/model_params.v | 121 ++++ .../farmingsimulator/model_regionalinternet.v | 132 ++++ .../grid4/farmingsimulator/model_simulator.v | 37 + lib/threefold/grid4/farmingsimulator/play.v | 126 ++++ .../grid4/farmingsimulator/playmacro.v | 44 ++ .../templates/node_template.md | 33 + .../templates/regionalinternet_template.md | 10 + .../farmingsimulator/token_cultivation.v | 32 + .../grid4/farmingsimulator/token_farming.v | 29 + lib/threefold/grid4/farmingsimulator/wiki.v | 23 + lib/threefold/grid4/gridsimulator/factory.v | 96 +++ .../grid4/gridsimulator/manual/.collection | 1 + .../grid4/gridsimulator/manual/home.md | 1 + .../grid4/gridsimulator/manual/macros.md | 0 lib/threefold/grid4/gridsimulator/play.v | 238 +++++++ lib/threefold/grid4/gridsimulator/readme.md | 6 + lib/threefold/griddriver/client.v | 12 + lib/threefold/griddriver/rmb.v | 33 + lib/threefold/griddriver/substrate.v | 111 +++ lib/threefold/griddriver/utils.v | 38 + lib/threefold/gridproxy/README.md | 93 +++ lib/threefold/gridproxy/gridproxy_core.v | 489 +++++++++++++ lib/threefold/gridproxy/gridproxy_factory.v | 111 +++ lib/threefold/gridproxy/gridproxy_highlevel.v | 169 +++++ lib/threefold/gridproxy/gridproxy_test.v | 247 +++++++ lib/threefold/gridproxy/model/contract.v | 52 ++ lib/threefold/gridproxy/model/farm.v | 22 + lib/threefold/gridproxy/model/filter.v | 575 ++++++++++++++++ lib/threefold/gridproxy/model/iterators.v | 101 +++ lib/threefold/gridproxy/model/model.v | 106 +++ lib/threefold/gridproxy/model/node.v | 128 ++++ lib/threefold/gridproxy/model/stats.v | 44 ++ lib/threefold/gridproxy/model/twin.v | 8 + lib/threefold/main.v | 6 + lib/threefold/nodepilot/nodepilot.v | 108 +++ lib/threefold/nodepilot/readme.md | 6 + lib/threefold/rmb/model_rmb.v | 32 + lib/threefold/rmb/readme.md | 34 + lib/threefold/rmb/rmb_calls_zos.v | 30 + lib/threefold/rmb/rmb_calls_zos_statistics.v | 29 + .../rmb/rmb_calls_zos_storagepools.v | 42 ++ lib/threefold/rmb/rmb_client.v | 80 +++ lib/threefold/rmb/rmb_request.v | 23 + lib/threefold/rmb/rmb_test.v | 13 + lib/threefold/tfgrid3deployer/.heroscript | 8 + lib/threefold/tfgrid3deployer/_todo/base.v | 44 ++ lib/threefold/tfgrid3deployer/_todo/client.v | 16 + lib/threefold/tfgrid3deployer/_todo/k8s.v | 20 + .../tfgrid3deployer/_todo/machines.v | 264 +++++++ lib/threefold/tfgrid3deployer/contracts.v | 39 ++ lib/threefold/tfgrid3deployer/deployment.v | 555 +++++++++++++++ .../tfgrid3deployer/deployment_setup.v | 290 ++++++++ lib/threefold/tfgrid3deployer/kvstore.v | 29 + lib/threefold/tfgrid3deployer/network.v | 324 +++++++++ lib/threefold/tfgrid3deployer/readme.md | 27 + .../tfgrid3deployer_factory_.v | 106 +++ .../tfgrid3deployer/tfgrid3deployer_model.v | 63 ++ lib/threefold/tfgrid3deployer/utils.v | 55 ++ lib/threefold/tfgrid3deployer/vmachine.v | 166 +++++ lib/threefold/tfgrid3deployer/webnames.v | 27 + lib/threefold/tfgrid3deployer/zdbs.v | 32 + lib/threefold/tfgrid_actions/README.md | 22 + .../tfgrid_actions/blockchain/blockchain.v | 15 + .../tfgrid_actions/blockchain/factory.v | 11 + .../tfgrid_actions/clients/clients.v | 16 + lib/threefold/tfgrid_actions/factory.v | 94 +++ lib/threefold/tfgrid_actions/nostr/channel.v | 59 ++ lib/threefold/tfgrid_actions/nostr/direct.v | 34 + lib/threefold/tfgrid_actions/nostr/handler.v | 35 + .../tfgrid_actions/stellar/account.v | 47 ++ .../tfgrid_actions/stellar/handler.v | 30 + .../tfgrid_actions/tfgrid/contracts.v | 64 ++ lib/threefold/tfgrid_actions/tfgrid/core.v | 17 + .../tfgrid_actions/tfgrid/discourse.v | 54 ++ lib/threefold/tfgrid_actions/tfgrid/farms.v | 61 ++ .../tfgrid_actions/tfgrid/funkwhale.v | 48 ++ .../tfgrid_actions/tfgrid/gateway_fqdn.v | 40 ++ .../tfgrid_actions/tfgrid/gateway_name.v | 38 + lib/threefold/tfgrid_actions/tfgrid/handler.v | 49 ++ lib/threefold/tfgrid_actions/tfgrid/helpers.v | 31 + lib/threefold/tfgrid_actions/tfgrid/k8s.v | 130 ++++ lib/threefold/tfgrid_actions/tfgrid/network.v | 88 +++ lib/threefold/tfgrid_actions/tfgrid/nodes.v | 112 +++ .../tfgrid_actions/tfgrid/peertube.v | 48 ++ .../tfgrid_actions/tfgrid/presearch.v | 50 ++ lib/threefold/tfgrid_actions/tfgrid/stats.v | 24 + lib/threefold/tfgrid_actions/tfgrid/taiga.v | 50 ++ lib/threefold/tfgrid_actions/tfgrid/twins.v | 43 ++ lib/threefold/tfgrid_actions/tfgrid/vm.v | 75 ++ lib/threefold/tfgrid_actions/tfgrid/zdb.v | 42 ++ lib/threefold/tfgrid_actions/web3gw/handler.v | 38 + lib/threefold/tfgrid_actions/web3gw/keys.v | 47 ++ lib/threefold/tfgrid_actions/web3gw/money.v | 152 ++++ lib/threefold/tfrobot/README.md | 3 + lib/threefold/tfrobot/cancel.v | 46 ++ lib/threefold/tfrobot/cancel_test.v | 69 ++ lib/threefold/tfrobot/deploy.v | 184 +++++ lib/threefold/tfrobot/deploy_test.v | 64 ++ lib/threefold/tfrobot/factory.v | 80 +++ lib/threefold/tfrobot/factory_test.v | 5 + lib/threefold/tfrobot/job.v | 153 ++++ lib/threefold/tfrobot/job_test.v | 35 + lib/threefold/tfrobot/templates/config.json | 57 ++ lib/threefold/tfrobot/templates/config.yaml | 40 ++ lib/threefold/tfrobot/tfrobot_redis.v | 47 ++ lib/threefold/tfrobot/vm.v | 214 ++++++ lib/threefold/tfrobot/vm_deploy.v | 102 +++ lib/threefold/tfrobot/vm_deploy_test.v | 32 + lib/threefold/tokens/readme.md | 2 + lib/threefold/tokens/tokens_fetch.v | 422 ++++++++++++ lib/threefold/zerohub/flist.v | 176 +++++ lib/threefold/zerohub/readme.md | 18 + lib/threefold/zerohub/zerohub.v | 42 ++ lib/threefold/zerohub/zerohub_test.v | 68 ++ 278 files changed, 19517 insertions(+), 1 deletion(-) create mode 100644 lib/biz/biz.v create mode 100644 lib/biz/bizmodel/factory.v create mode 100644 lib/biz/bizmodel/macros.v create mode 100644 lib/biz/bizmodel/model.v create mode 100644 lib/biz/bizmodel/play.v create mode 100644 lib/biz/bizmodel/play_cost.v create mode 100644 lib/biz/bizmodel/play_costcenter.v create mode 100644 lib/biz/bizmodel/play_funding.v create mode 100644 lib/biz/bizmodel/play_hr.v create mode 100644 lib/biz/bizmodel/play_product_revenue.v create mode 100644 lib/biz/bizmodel/templates/departments.md create mode 100644 lib/biz/bizmodel/templates/employee.md create mode 100644 lib/biz/bizmodel/templates/employee2.md create mode 100644 lib/biz/bizmodel/templates/intro.md create mode 100644 lib/biz/bizmodel/templates/revenue_overview.md create mode 100644 lib/biz/bizmodel/templates/rows_overview.md create mode 100644 lib/biz/bizmodel/templates/summary.md create mode 100644 lib/biz/investortool/company.v create mode 100644 lib/biz/investortool/employee.v create mode 100644 lib/biz/investortool/factory.v create mode 100644 lib/biz/investortool/investment_share.v create mode 100644 lib/biz/investortool/investor.v create mode 100644 lib/biz/investortool/investortool.v create mode 100644 lib/biz/investortool/investortool2.v create mode 100644 lib/biz/investortool/investortool3.v create mode 100644 lib/biz/investortool/readme.md create mode 100644 lib/biz/investortool/simulator/captable.v create mode 100644 lib/biz/investortool/simulator/play.v create mode 100644 lib/biz/investortool/simulator/simulator.v create mode 100644 lib/biz/investortool/simulator/templates/investor.md create mode 100644 lib/biz/investortool/simulator/templates/user.md create mode 100644 lib/biz/investortool/user.v create mode 100644 lib/biz/spreadsheet/calc_test.v create mode 100644 lib/biz/spreadsheet/cell.v create mode 100644 lib/biz/spreadsheet/extrapolate.v create mode 100644 lib/biz/spreadsheet/factory.v create mode 100644 lib/biz/spreadsheet/number.v create mode 100644 lib/biz/spreadsheet/playmacro.v create mode 100644 lib/biz/spreadsheet/readme.md create mode 100644 lib/biz/spreadsheet/row.v create mode 100644 lib/biz/spreadsheet/row_actions.v create mode 100644 lib/biz/spreadsheet/row_copy.v create mode 100644 lib/biz/spreadsheet/row_recurring.v create mode 100644 lib/biz/spreadsheet/sheet.v create mode 100644 lib/biz/spreadsheet/sheet_getters.v create mode 100644 lib/biz/spreadsheet/tools.v create mode 100644 lib/biz/spreadsheet/wiki.v create mode 100644 lib/biz/spreadsheet/wiki_charts.v create mode 100644 lib/data/doctree/collection/collection.v create mode 100644 lib/data/doctree/collection/data/error.v create mode 100644 lib/data/doctree/collection/data/file.v create mode 100644 lib/data/doctree/collection/data/page.v create mode 100644 lib/data/doctree/collection/data/process_aliases.v create mode 100644 lib/data/doctree/collection/data/process_aliases_test.v create mode 100644 lib/data/doctree/collection/data/process_def_pointers.v create mode 100644 lib/data/doctree/collection/data/process_def_pointers_test.v create mode 100644 lib/data/doctree/collection/data/process_link.v create mode 100644 lib/data/doctree/collection/data/process_link_test.v create mode 100644 lib/data/doctree/collection/data/process_macros.v create mode 100644 lib/data/doctree/collection/error.v create mode 100644 lib/data/doctree/collection/export.v create mode 100644 lib/data/doctree/collection/export_test.v create mode 100644 lib/data/doctree/collection/getters.v create mode 100644 lib/data/doctree/collection/scan.v create mode 100644 lib/data/doctree/collection/scan_test.v create mode 100644 lib/data/doctree/collection/template/errors.md create mode 100644 lib/data/doctree/collection/testdata/.gitignore create mode 100644 lib/data/doctree/collection/testdata/export_test/export_expected/src/col1/.collection create mode 100644 lib/data/doctree/collection/testdata/export_test/export_expected/src/col1/.linkedpages create mode 100644 lib/data/doctree/collection/testdata/export_test/export_expected/src/col1/errors.md create mode 100644 lib/data/doctree/collection/testdata/export_test/export_expected/src/col1/file1.md create mode 100644 lib/data/doctree/collection/testdata/export_test/export_expected/src/col1/file2.md create mode 100644 lib/data/doctree/collection/testdata/export_test/export_expected/src/col1/img/image.png create mode 100644 lib/data/doctree/collection/testdata/export_test/mytree/dir1/.collection create mode 100644 lib/data/doctree/collection/testdata/export_test/mytree/dir1/dir2/file1.md create mode 100644 lib/data/doctree/collection/testdata/export_test/mytree/dir1/file2.md create mode 100644 lib/data/doctree/collection/testdata/export_test/mytree/dir1/image.png create mode 100644 lib/data/doctree/error.v create mode 100644 lib/data/doctree/export.v create mode 100644 lib/data/doctree/export_test.v create mode 100644 lib/data/doctree/getters.v create mode 100644 lib/data/doctree/getters_test.v create mode 100644 lib/data/doctree/pointer/pointer.v create mode 100644 lib/data/doctree/pointer/pointer_test.v create mode 100644 lib/data/doctree/process_defs.v create mode 100644 lib/data/doctree/process_defs_test.v create mode 100644 lib/data/doctree/process_includes.v create mode 100644 lib/data/doctree/process_includes_test.v create mode 100644 lib/data/doctree/process_macros.v create mode 100644 lib/data/doctree/scan.v create mode 100644 lib/data/doctree/testdata/.gitignore create mode 100644 lib/data/doctree/testdata/actions/.collection create mode 100644 lib/data/doctree/testdata/actions/actions1.md create mode 100644 lib/data/doctree/testdata/actions/functionality/actions2.md create mode 100644 lib/data/doctree/testdata/export_test/export_expected/col1/.collection create mode 100644 lib/data/doctree/testdata/export_test/export_expected/col1/.linkedpages create mode 100644 lib/data/doctree/testdata/export_test/export_expected/col1/errors.md create mode 100644 lib/data/doctree/testdata/export_test/export_expected/col1/file1.md create mode 100644 lib/data/doctree/testdata/export_test/export_expected/col1/file2.md create mode 100644 lib/data/doctree/testdata/export_test/export_expected/col1/img/image.png create mode 100644 lib/data/doctree/testdata/export_test/export_expected/col2/.collection create mode 100644 lib/data/doctree/testdata/export_test/export_expected/col2/.linkedpages create mode 100644 lib/data/doctree/testdata/export_test/export_expected/col2/file3.md create mode 100644 lib/data/doctree/testdata/export_test/mytree/dir1/.collection create mode 100644 lib/data/doctree/testdata/export_test/mytree/dir1/dir2/file1.md create mode 100644 lib/data/doctree/testdata/export_test/mytree/dir1/file2.md create mode 100644 lib/data/doctree/testdata/export_test/mytree/dir1/image.png create mode 100644 lib/data/doctree/testdata/export_test/mytree/dir3/.collection create mode 100644 lib/data/doctree/testdata/export_test/mytree/dir3/file3.md create mode 100644 lib/data/doctree/testdata/process_defs_test/col1/page1.md create mode 100644 lib/data/doctree/testdata/process_defs_test/col2/page2.md create mode 100644 lib/data/doctree/testdata/process_includes_test/col1/page1.md create mode 100644 lib/data/doctree/testdata/process_includes_test/col2/page2.md create mode 100644 lib/data/doctree/testdata/process_includes_test/col2/page3.md create mode 100644 lib/data/doctree/testdata/rpc/.collection create mode 100644 lib/data/doctree/testdata/rpc/eth.md create mode 100644 lib/data/doctree/testdata/rpc/rpc.md create mode 100644 lib/data/doctree/testdata/rpc/stellar.md create mode 100644 lib/data/doctree/testdata/rpc/tfchain.md create mode 100644 lib/data/doctree/testdata/rpc/tfgrid.md create mode 100644 lib/data/doctree/testdata/tree_test/fruits/.collection create mode 100644 lib/data/doctree/testdata/tree_test/fruits/apple.md create mode 100644 lib/data/doctree/testdata/tree_test/fruits/banana.txt create mode 100644 lib/data/doctree/testdata/tree_test/fruits/berries/img/digital_twin.png create mode 100644 lib/data/doctree/testdata/tree_test/fruits/berries/strawberry.md create mode 100644 lib/data/doctree/testdata/tree_test/fruits/intro.md create mode 100644 lib/data/doctree/testdata/tree_test/vegetables/.collection create mode 100644 lib/data/doctree/testdata/tree_test/vegetables/cabbage.txt create mode 100644 lib/data/doctree/testdata/tree_test/vegetables/cruciferous/broccoli.md create mode 100644 lib/data/doctree/testdata/tree_test/vegetables/intro.md create mode 100644 lib/data/doctree/testdata/tree_test/vegetables/tomato.md create mode 100644 lib/data/doctree/tree.v create mode 100644 lib/data/doctree/tree_test.v create mode 100644 lib/threefold/deploy/deployment.v create mode 100644 lib/threefold/grid/README.md create mode 100644 lib/threefold/grid/deployer.v create mode 100644 lib/threefold/grid/deployment_state.v create mode 100644 lib/threefold/grid/factory.v create mode 100644 lib/threefold/grid/graphql.v create mode 100644 lib/threefold/grid/models/computecapacity.v create mode 100644 lib/threefold/grid/models/deployment.v create mode 100644 lib/threefold/grid/models/gw_fqdn.v create mode 100644 lib/threefold/grid/models/gw_name.v create mode 100644 lib/threefold/grid/models/ip.v create mode 100644 lib/threefold/grid/models/qsfs.v create mode 100644 lib/threefold/grid/models/workload.v create mode 100644 lib/threefold/grid/models/zdb.v create mode 100644 lib/threefold/grid/models/zlogs.v create mode 100644 lib/threefold/grid/models/zmachine.v create mode 100644 lib/threefold/grid/models/zmount.v create mode 100644 lib/threefold/grid/models/znet.v create mode 100644 lib/threefold/grid/rmb.v create mode 100644 lib/threefold/grid/vm.v create mode 100644 lib/threefold/grid/vm_test.v create mode 100644 lib/threefold/grid/zdb.v create mode 100644 lib/threefold/grid4/cloudslices/loader.v create mode 100644 lib/threefold/grid4/cloudslices/model.v create mode 100644 lib/threefold/grid4/cloudslices/model_aggregated.v create mode 100644 lib/threefold/grid4/cloudslices/play.v create mode 100644 lib/threefold/grid4/farmingsimulator/factory.v create mode 100644 lib/threefold/grid4/farmingsimulator/model_capacity.v create mode 100644 lib/threefold/grid4/farmingsimulator/model_nodesbatch.v create mode 100644 lib/threefold/grid4/farmingsimulator/model_nodetemplate.v create mode 100644 lib/threefold/grid4/farmingsimulator/model_params.v create mode 100644 lib/threefold/grid4/farmingsimulator/model_regionalinternet.v create mode 100644 lib/threefold/grid4/farmingsimulator/model_simulator.v create mode 100644 lib/threefold/grid4/farmingsimulator/play.v create mode 100644 lib/threefold/grid4/farmingsimulator/playmacro.v create mode 100644 lib/threefold/grid4/farmingsimulator/templates/node_template.md create mode 100644 lib/threefold/grid4/farmingsimulator/templates/regionalinternet_template.md create mode 100644 lib/threefold/grid4/farmingsimulator/token_cultivation.v create mode 100644 lib/threefold/grid4/farmingsimulator/token_farming.v create mode 100644 lib/threefold/grid4/farmingsimulator/wiki.v create mode 100644 lib/threefold/grid4/gridsimulator/factory.v create mode 100644 lib/threefold/grid4/gridsimulator/manual/.collection create mode 100644 lib/threefold/grid4/gridsimulator/manual/home.md create mode 100644 lib/threefold/grid4/gridsimulator/manual/macros.md create mode 100644 lib/threefold/grid4/gridsimulator/play.v create mode 100644 lib/threefold/grid4/gridsimulator/readme.md create mode 100644 lib/threefold/griddriver/client.v create mode 100644 lib/threefold/griddriver/rmb.v create mode 100644 lib/threefold/griddriver/substrate.v create mode 100644 lib/threefold/griddriver/utils.v create mode 100644 lib/threefold/gridproxy/README.md create mode 100644 lib/threefold/gridproxy/gridproxy_core.v create mode 100644 lib/threefold/gridproxy/gridproxy_factory.v create mode 100644 lib/threefold/gridproxy/gridproxy_highlevel.v create mode 100644 lib/threefold/gridproxy/gridproxy_test.v create mode 100644 lib/threefold/gridproxy/model/contract.v create mode 100644 lib/threefold/gridproxy/model/farm.v create mode 100644 lib/threefold/gridproxy/model/filter.v create mode 100644 lib/threefold/gridproxy/model/iterators.v create mode 100644 lib/threefold/gridproxy/model/model.v create mode 100644 lib/threefold/gridproxy/model/node.v create mode 100644 lib/threefold/gridproxy/model/stats.v create mode 100644 lib/threefold/gridproxy/model/twin.v create mode 100644 lib/threefold/main.v create mode 100644 lib/threefold/nodepilot/nodepilot.v create mode 100644 lib/threefold/nodepilot/readme.md create mode 100644 lib/threefold/rmb/model_rmb.v create mode 100644 lib/threefold/rmb/readme.md create mode 100644 lib/threefold/rmb/rmb_calls_zos.v create mode 100644 lib/threefold/rmb/rmb_calls_zos_statistics.v create mode 100644 lib/threefold/rmb/rmb_calls_zos_storagepools.v create mode 100644 lib/threefold/rmb/rmb_client.v create mode 100644 lib/threefold/rmb/rmb_request.v create mode 100644 lib/threefold/rmb/rmb_test.v create mode 100644 lib/threefold/tfgrid3deployer/.heroscript create mode 100644 lib/threefold/tfgrid3deployer/_todo/base.v create mode 100644 lib/threefold/tfgrid3deployer/_todo/client.v create mode 100644 lib/threefold/tfgrid3deployer/_todo/k8s.v create mode 100644 lib/threefold/tfgrid3deployer/_todo/machines.v create mode 100644 lib/threefold/tfgrid3deployer/contracts.v create mode 100644 lib/threefold/tfgrid3deployer/deployment.v create mode 100644 lib/threefold/tfgrid3deployer/deployment_setup.v create mode 100644 lib/threefold/tfgrid3deployer/kvstore.v create mode 100644 lib/threefold/tfgrid3deployer/network.v create mode 100644 lib/threefold/tfgrid3deployer/readme.md create mode 100644 lib/threefold/tfgrid3deployer/tfgrid3deployer_factory_.v create mode 100644 lib/threefold/tfgrid3deployer/tfgrid3deployer_model.v create mode 100644 lib/threefold/tfgrid3deployer/utils.v create mode 100644 lib/threefold/tfgrid3deployer/vmachine.v create mode 100644 lib/threefold/tfgrid3deployer/webnames.v create mode 100644 lib/threefold/tfgrid3deployer/zdbs.v create mode 100644 lib/threefold/tfgrid_actions/README.md create mode 100644 lib/threefold/tfgrid_actions/blockchain/blockchain.v create mode 100644 lib/threefold/tfgrid_actions/blockchain/factory.v create mode 100644 lib/threefold/tfgrid_actions/clients/clients.v create mode 100644 lib/threefold/tfgrid_actions/factory.v create mode 100644 lib/threefold/tfgrid_actions/nostr/channel.v create mode 100644 lib/threefold/tfgrid_actions/nostr/direct.v create mode 100644 lib/threefold/tfgrid_actions/nostr/handler.v create mode 100644 lib/threefold/tfgrid_actions/stellar/account.v create mode 100644 lib/threefold/tfgrid_actions/stellar/handler.v create mode 100644 lib/threefold/tfgrid_actions/tfgrid/contracts.v create mode 100644 lib/threefold/tfgrid_actions/tfgrid/core.v create mode 100644 lib/threefold/tfgrid_actions/tfgrid/discourse.v create mode 100644 lib/threefold/tfgrid_actions/tfgrid/farms.v create mode 100644 lib/threefold/tfgrid_actions/tfgrid/funkwhale.v create mode 100644 lib/threefold/tfgrid_actions/tfgrid/gateway_fqdn.v create mode 100644 lib/threefold/tfgrid_actions/tfgrid/gateway_name.v create mode 100644 lib/threefold/tfgrid_actions/tfgrid/handler.v create mode 100644 lib/threefold/tfgrid_actions/tfgrid/helpers.v create mode 100644 lib/threefold/tfgrid_actions/tfgrid/k8s.v create mode 100644 lib/threefold/tfgrid_actions/tfgrid/network.v create mode 100644 lib/threefold/tfgrid_actions/tfgrid/nodes.v create mode 100644 lib/threefold/tfgrid_actions/tfgrid/peertube.v create mode 100644 lib/threefold/tfgrid_actions/tfgrid/presearch.v create mode 100644 lib/threefold/tfgrid_actions/tfgrid/stats.v create mode 100644 lib/threefold/tfgrid_actions/tfgrid/taiga.v create mode 100644 lib/threefold/tfgrid_actions/tfgrid/twins.v create mode 100644 lib/threefold/tfgrid_actions/tfgrid/vm.v create mode 100644 lib/threefold/tfgrid_actions/tfgrid/zdb.v create mode 100644 lib/threefold/tfgrid_actions/web3gw/handler.v create mode 100644 lib/threefold/tfgrid_actions/web3gw/keys.v create mode 100644 lib/threefold/tfgrid_actions/web3gw/money.v create mode 100644 lib/threefold/tfrobot/README.md create mode 100644 lib/threefold/tfrobot/cancel.v create mode 100644 lib/threefold/tfrobot/cancel_test.v create mode 100644 lib/threefold/tfrobot/deploy.v create mode 100644 lib/threefold/tfrobot/deploy_test.v create mode 100644 lib/threefold/tfrobot/factory.v create mode 100644 lib/threefold/tfrobot/factory_test.v create mode 100644 lib/threefold/tfrobot/job.v create mode 100644 lib/threefold/tfrobot/job_test.v create mode 100644 lib/threefold/tfrobot/templates/config.json create mode 100644 lib/threefold/tfrobot/templates/config.yaml create mode 100644 lib/threefold/tfrobot/tfrobot_redis.v create mode 100644 lib/threefold/tfrobot/vm.v create mode 100644 lib/threefold/tfrobot/vm_deploy.v create mode 100644 lib/threefold/tfrobot/vm_deploy_test.v create mode 100644 lib/threefold/tokens/readme.md create mode 100644 lib/threefold/tokens/tokens_fetch.v create mode 100644 lib/threefold/zerohub/flist.v create mode 100644 lib/threefold/zerohub/readme.md create mode 100644 lib/threefold/zerohub/zerohub.v create mode 100644 lib/threefold/zerohub/zerohub_test.v diff --git a/examples/webtools/mdbook_markdown/doctree_export.vsh b/examples/webtools/mdbook_markdown/doctree_export.vsh index 11215d15..795b16bc 100755 --- a/examples/webtools/mdbook_markdown/doctree_export.vsh +++ b/examples/webtools/mdbook_markdown/doctree_export.vsh @@ -19,7 +19,7 @@ for project in 'projectinca, legal, why, web4,tfgrid3'.split(',').map(it.trim_sp } tree.export( - dest: '/tmp/test' + destination: '/tmp/test' reset: true keep_structure: true exclude_errors: false diff --git a/lib/biz/biz.v b/lib/biz/biz.v new file mode 100644 index 00000000..0d2c150f --- /dev/null +++ b/lib/biz/biz.v @@ -0,0 +1 @@ +module biz diff --git a/lib/biz/bizmodel/factory.v b/lib/biz/bizmodel/factory.v new file mode 100644 index 00000000..c3118da3 --- /dev/null +++ b/lib/biz/bizmodel/factory.v @@ -0,0 +1,39 @@ +module bizmodel + +import freeflowuniverse.herolib.biz.spreadsheet + +__global ( + bizmodels shared map[string]&BizModel +) + +pub fn get(name string) !&BizModel { + rlock bizmodels { + if name in bizmodels { + return bizmodels[name] or { panic('bug') } + } + } + return error("cann't find biz model:'${name}' in global bizmodels") +} + +// get bizmodel from global +pub fn getset(name string) !&BizModel { + lock bizmodels { + if name !in bizmodels { + mut sh := spreadsheet.sheet_new(name: 'bizmodel_${name}')! + mut bizmodel := BizModel{ + sheet: sh + name: name + // currencies: cs + } + bizmodels[bizmodel.name] = &bizmodel + } + return bizmodels[name] or { panic('bug') } + } + panic('bug') +} + +pub fn set(bizmodel BizModel) { + lock bizmodels { + bizmodels[bizmodel.name] = &bizmodel + } +} diff --git a/lib/biz/bizmodel/macros.v b/lib/biz/bizmodel/macros.v new file mode 100644 index 00000000..36aaef56 --- /dev/null +++ b/lib/biz/bizmodel/macros.v @@ -0,0 +1,110 @@ +module bizmodel + +import freeflowuniverse.herolib.core.playbook +import freeflowuniverse.herolib.data.paramsparser +import freeflowuniverse.herolib.data.markdownparser.elements +import freeflowuniverse.herolib.ui.console + +pub fn playmacro(action playbook.Action) !string { + p := action.params + + bizname := action.params.get('bizname') or { + return error("Can't find param:'bizname' for action: ${action.name}, please specify as bizname: ...") + } + + mut sim := get(bizname)! + + if action.name == 'employee_wiki' { + return employee_wiki(p, sim)! + } else if action.name == 'employees_wiki' { + return employees_wiki(p, sim)! + } else if action.name == 'department_wiki' { + return department_wiki(p, sim)! + } else if action.name == 'revenues_wiki' { + return revenues_wiki(p, mut sim)! + } + + return error("couldn't find macro '${action.name}' for bizmodel.") +} + +fn employee_wiki(p paramsparser.Params, sim BizModel) !string { + console.print_green('playmacro employee_wiki') + mut id := p.get_default('id', '')! + if id !in sim.employees { + id = p.get_default('name', '')! + } + + if id !in sim.employees { + println(id) + println(sim.employees) + panic('sss') + return error('employee with name \'${id}\' not found') + } + + employee := sim.employees[id] or { panic('bug') } + + println(employee) + + // OUTPUTS: + // &bizmodel.Employee{ + // name: 'despiegk' + // description: 'CTO' + // department: 'engineering' + // cost: '1:12000EUR,60:21258.73200000001' + // cost_percent_revenue: 0.0 + // nrpeople: '1' + // indexation: 0.1 + // cost_center: 'default_costcenter' + // page: 'cto.md' + // } + + // if true{panic("s")} + + // theme := 'light' + // theme := 'dark' // Removed unused variable + mut t := $tmpl('./templates/employee.md') + return t +} + +fn employees_wiki(p paramsparser.Params, sim BizModel) !string { + mut deps := []Department{} + for _, dep in sim.departments { + deps << dep + } + deps.sort(a.order < b.order) + + mut employee_names := map[string]string{} + for _, empl in sim.employees { + employee_names[empl.name] = empl.name + if empl.page.len > 0 { + employee_names[empl.name] = '[${empl.name}](${empl.page})' + } + } + mut t := $tmpl('./templates/departments.md') + + return t +} + +fn department_wiki(p paramsparser.Params, sim BizModel) !string { + return '' +} + +fn revenues_wiki(p paramsparser.Params, mut sim BizModel) !string { + // mut revs := map[string]string{} // Removed unused variable + + // for name,_ in sim.products{ + // myrow:=sim.sheet.row_get('${name}_rev_total') or { panic("bug in revenues_wiki macro") } + // println(myrow) + // } + + // if true{ + // panic("s") + // } + + panic('fix template below') + // mut t:=$tmpl('./templates/revenue_overview.md') + + // title:'REVENUE FOR ${name1.to_lower().replace("_"," ")}' + + // return t +} diff --git a/lib/biz/bizmodel/model.v b/lib/biz/bizmodel/model.v new file mode 100644 index 00000000..ca311d02 --- /dev/null +++ b/lib/biz/bizmodel/model.v @@ -0,0 +1,56 @@ +module bizmodel + +import freeflowuniverse.herolib.biz.spreadsheet + +pub struct BizModel { +pub mut: + name string + sheet &spreadsheet.Sheet + employees map[string]&Employee + departments map[string]&Department + costcenters map[string]&Costcenter + products map[string]&Product +} + +pub struct Employee { +pub: + name string + description string + title string + department string + cost string + cost_percent_revenue f64 + nrpeople string + indexation f64 + cost_center string + page string + fulltime_perc f64 +} + +pub struct Department { +pub: + name string + description string + page string + title string + order int +} + +pub struct Costcenter { +pub: + name string + description string + department string +} + +pub struct Product { +pub mut: + name string + title string + description string + order int + has_revenue bool + has_items bool + has_oneoffs bool + nr_months_recurring int +} diff --git a/lib/biz/bizmodel/play.v b/lib/biz/bizmodel/play.v new file mode 100644 index 00000000..21756966 --- /dev/null +++ b/lib/biz/bizmodel/play.v @@ -0,0 +1,89 @@ +module bizmodel + +import freeflowuniverse.herolib.core.playbook { PlayBook } +import freeflowuniverse.herolib.ui.console +// import freeflowuniverse.herolib.core.texttools +// import freeflowuniverse.herolib.ui.console +import freeflowuniverse.herolib.biz.spreadsheet + +pub fn play(mut plbook PlayBook) ! { + // first make sure we find a run action to know the name + mut actions4 := plbook.actions_find(actor: 'bizmodel')! + + if actions4.len == 0 { + return + } + + knownactions := ['revenue_define', 'employee_define', 'department_define', 'funding_define', + 'costcenter_define', 'cost_define'] + + for action in actions4 { + // biz name needs to be specified in the the bizmodel hero actions + bizname := action.params.get('bizname') or { + return error("Can't find param: 'bizname' for ${action.actor}.${action.name} macro, is a requirement argument.") + } + mut sim := getset(bizname)! + + if action.name !in knownactions { + return error("Can't find macro with name: ${action.name} for macro's for bizmodel.") + } + + console.print_debug(action.name) + match action.name { + 'revenue_define' { + sim.revenue_action(action)! + } + 'funding_define' { + sim.funding_define_action(action)! + } + 'costcenter_define' { + sim.costcenter_define_action(action)! + } + else {} + } + } + + console.print_debug('TOTALS for bizmodel play') + // now we have processed the macro's, we can calculate the totals + rlock bizmodels { + for _, mut sim in bizmodels { + // sim.hr_total()! + sim.cost_total()! + sim.revenue_total()! + sim.funding_total()! + } + } + + for action in actions4 { + console.print_debug(action.name) + // biz name needs to be specified in the the bizmodel hero actions + bizname := action.params.get('bizname') or { + return error("Can't find param: 'bizname' for bizmodel macro, is a requirement argument.") + } + + mut sim := get(bizname)! + + if action.name !in knownactions { + return error("Can't find macro with name: ${action.name} for macro's for bizmodel.") + } + + match action.name { + 'cost_define' { + sim.cost_define_action(action)! + } + 'department_define' { + sim.department_define_action(action)! + } + 'employee_define' { + sim.employee_define_action(action)! + } + else {} + } + } + + // mut sim:=get("test")! + // //println(sim.sheet.rows.keys()) + // //println(spreadsheet.sheets_keys()) + // println(spreadsheet.sheet_get('bizmodel_test')!) + // if true{panic("sss")} +} diff --git a/lib/biz/bizmodel/play_cost.v b/lib/biz/bizmodel/play_cost.v new file mode 100644 index 00000000..ee015d65 --- /dev/null +++ b/lib/biz/bizmodel/play_cost.v @@ -0,0 +1,85 @@ +module bizmodel + +import freeflowuniverse.herolib.core.playbook { Action } +import freeflowuniverse.herolib.core.texttools + +fn (mut m BizModel) cost_define_action(action Action) ! { + mut name := action.params.get_default('name', '')! + mut descr := action.params.get_default('descr', '')! + if descr.len == 0 { + descr = action.params.get('description')! + } + if name.len == 0 { + // make name ourselves + name = texttools.name_fix(descr) // TODO:limit len + } + mut cost := action.params.get_default('cost', '0.0')! // is extrapolated + mut cost_one := action.params.get_default('cost_one', '')! + + department := action.params.get_default('department', 'unknown department')! + cost_percent_revenue := action.params.get_percentage_default('cost_percent_revenue', + '0%')! + + indexation := action.params.get_percentage_default('indexation', '0%')! + + if indexation > 0 { + if cost.contains(':') { + return error('cannot specify cost growth and indexation, should be no : inside cost param.') + } + // TODO: need to be able to go from e.g. month 6 and still do indexation + mut cost_ := cost.int() + cost2 := cost_ * (1 + indexation) * (1 + indexation) * (1 + indexation) * (1 + indexation) * ( + 1 + indexation) * (1 + indexation) // 6 years, maybe need to look at months + cost = '0:${cost},59:${cost2}' + // console.print_debug(cost) + } + + mut extrap := false + if cost_one != '' { + // if cost!=""{ + // return error("Cannot specify cost:'${cost}' and cost_one:'${cost_one}'.") + // } + extrap = false + cost = cost_one + } else { + // if cost_one!=""{ + // return error("Cannot specify cost:'${cost}' and cost_one:'${cost_one}'.") + // } + extrap = true + } + + mut cost_row := m.sheet.row_new( + name: 'cost_${name}' + growth: cost + tags: 'department:${department} ocost' + descr: 'cost overhead for department ${department}' + extrapolate: extrap + )! + cost_row.action(action: .reverse)! + + if cost_percent_revenue > 0 { + mut revtotal := m.sheet.row_get('revenue_total')! + mut cost_min := revtotal.action( + action: .multiply + val: cost_percent_revenue + name: 'tmp3' + aggregatetype: .avg + )! + cost_min.action(action: .forwardavg)! // avg out forward looking for 12 months + cost_min.action(action: .reverse)! + cost_row.action( + action: .min + rows: [cost_min] + )! + m.sheet.row_delete('tmp3') + } +} + +fn (mut sim BizModel) cost_total() ! { + sim.sheet.group2row( + name: 'hr_cost_total' + include: ['hrcost'] + tags: 'pl' + descr: 'total cost for hr' + )! +} diff --git a/lib/biz/bizmodel/play_costcenter.v b/lib/biz/bizmodel/play_costcenter.v new file mode 100644 index 00000000..a3cb25c7 --- /dev/null +++ b/lib/biz/bizmodel/play_costcenter.v @@ -0,0 +1,23 @@ +module bizmodel + +import freeflowuniverse.herolib.core.playbook { Action } +import freeflowuniverse.herolib.core.texttools + +fn (mut m BizModel) costcenter_define_action(action Action) ! { + mut name := action.params.get_default('name', '')! + mut descr := action.params.get_default('descr', '')! + if descr.len == 0 { + descr = action.params.get('description')! + } + mut department := action.params.get_default('department', '')! + if name.len == 0 { + // make name ourselves + name = texttools.name_fix(descr) // TODO:limit len + } + mut cc := Costcenter{ + name: name + description: descr + department: department + } + m.costcenters[name] = &cc +} diff --git a/lib/biz/bizmodel/play_funding.v b/lib/biz/bizmodel/play_funding.v new file mode 100644 index 00000000..71644dc9 --- /dev/null +++ b/lib/biz/bizmodel/play_funding.v @@ -0,0 +1,41 @@ +module bizmodel + +import freeflowuniverse.herolib.core.playbook { Action } +import freeflowuniverse.herolib.core.texttools + +// populate the params for hr . +// !!hr.funding_define . +// - name, e.g. for a specific person . +// - descr: description of the funding . +// - investment is month:amount,month:amount, ... . +// - type: loan or capital . +fn (mut m BizModel) funding_define_action(action Action) ! { + mut name := action.params.get_default('name', '')! + mut descr := action.params.get_default('descr', '')! + if descr.len == 0 { + descr = action.params.get('description')! + } + if name.len == 0 { + // make name ourselves + name = texttools.name_fix(descr) // TODO:limit len + } + mut investment := action.params.get_default('investment', '0.0')! + fundingtype := action.params.get_default('type', 'capital')! + + m.sheet.row_new( + name: 'funding_${name}' + growth: investment + tags: 'funding type:${fundingtype}' + descr: descr + extrapolate: false + )! +} + +fn (mut sim BizModel) funding_total() ! { + sim.sheet.group2row( + name: 'funding_total' + include: ['funding'] + tags: 'pl' + descr: 'total funding' + )! +} diff --git a/lib/biz/bizmodel/play_hr.v b/lib/biz/bizmodel/play_hr.v new file mode 100644 index 00000000..97abbf92 --- /dev/null +++ b/lib/biz/bizmodel/play_hr.v @@ -0,0 +1,148 @@ +module bizmodel + +import freeflowuniverse.herolib.core.playbook { Action } +import freeflowuniverse.herolib.core.texttools +// import freeflowuniverse.herolib.data.paramsparser +// import freeflowuniverse.herolib.core.pathlib +// import rand + +// populate the params for hr +// !!hr.employee_define +// descr:'Junior Engineer' +// nrpeople:'1:5,60:30' +// cost:'4000USD' +// indexation:'5%' +// department:'engineering' +// cost_percent_revenue e.g. 4%, will make sure the cost will be at least 4% of revenue + +fn (mut m BizModel) employee_define_action(action Action) ! { + mut name := action.params.get_default('name', '')! + mut descr := action.params.get_default('descr', '')! + if descr.len == 0 { + descr = action.params.get('description')! + } + if name.len == 0 { + // make name ourselves + name = texttools.name_fix(descr) // TODO:limit len + } + mut cost := action.params.get_default('cost', '0.0')! + // mut cost_year := action.params.get_currencyfloat_default('cost_year', 0.0)! + // if cost_year > 0 { + // cost = cost_year / 12 + // } + // mut cost_growth := action.params.get_default('cost_growth', '')! + // growth := action.params.get_default('growth', '1:1')! + department := action.params.get_default('department', '')! + page := action.params.get_default('page', '')! + + cost_percent_revenue := action.params.get_percentage_default('cost_percent_revenue', + '0%')! + nrpeople := action.params.get_default('nrpeople', '1')! + + indexation := action.params.get_percentage_default('indexation', '0%')! + + cost_center := action.params.get_default('costcenter', 'default_costcenter')! + + // // cost per person + // namecostperson := 'nr_${name}' + // if cost_growth.len > 0 && cost > 0 { + // return error('cannot specify cost and cost growth together, chose one please.') + // } + if indexation > 0 { + if cost.contains(':') { + return error('cannot specify cost growth and indexation, should be no : inside cost param.') + } + mut cost_ := cost.int() + cost2 := cost_ * (1 + indexation) * (1 + indexation) * (1 + indexation) * (1 + indexation) * ( + 1 + indexation) * (1 + indexation) // 6 years, maybe need to look at months + cost = '1:${cost},60:${cost2}' + } + + mut costpeople_row := m.sheet.row_new( + name: 'hr_cost_${name}' + growth: cost + tags: 'department:${department} hrcost' + descr: 'Department ${department}' + subgroup: 'HR cost per department.' + )! + costpeople_row.action(action: .reverse)! + + // multiply with nr of people if any + if nrpeople != '1' { + mut nrpeople_row := m.sheet.row_new( + name: 'nrpeople_${name}' + growth: nrpeople + tags: 'hrnr' + descr: '# people for ${descr}' + aggregatetype: .avg + )! + _ := costpeople_row.action(action: .multiply, rows: [nrpeople_row])! + } + if cost_percent_revenue > 0 { + mut revtotal := m.sheet.row_get('revenue_total')! + mut cost_min := revtotal.action( + action: .multiply + val: cost_percent_revenue + name: 'tmp3' + aggregatetype: .avg + )! + cost_min.action(action: .forwardavg)! // avg out forward looking for 12 months + cost_min.action(action: .reverse)! + costpeople_row.action( + action: .min + rows: [cost_min] + )! + m.sheet.row_delete('tmp3') + } + employee := Employee{ + name: name + description: descr + department: department + cost: cost + cost_percent_revenue: cost_percent_revenue + nrpeople: nrpeople + indexation: indexation + cost_center: cost_center + page: page + fulltime_perc: action.params.get_percentage_default('fulltime', '100%')! + } + + // println(employee) + + // todo: use existing id gen + + if name != '' { + // sid = smartid.sid_new('')! + // // TODO: this isn't necessary if sid_new works correctly + // // but lets keep it in here for now until we test smartid + // for (sid in m.employees) { + // sid = smartid.sid_new('')! + // } + m.employees[name] = &employee + } +} + +fn (mut m BizModel) department_define_action(action Action) ! { + mut name := action.params.get_default('name', '')! + mut descr := action.params.get_default('descr', '')! + if descr.len == 0 { + descr = action.params.get_default('description', '')! + } + + department := Department{ + name: name + description: descr + title: action.params.get_default('title', '')! + page: action.params.get_default('page', '')! + } + + // println(department) + + if name != '' { + m.departments[name] = &department + } +} + +// fn (mut sim BizModel) hr_total() ! { + +// } diff --git a/lib/biz/bizmodel/play_product_revenue.v b/lib/biz/bizmodel/play_product_revenue.v new file mode 100644 index 00000000..2a3015fb --- /dev/null +++ b/lib/biz/bizmodel/play_product_revenue.v @@ -0,0 +1,345 @@ +module bizmodel + +import freeflowuniverse.herolib.core.playbook { Action } +import freeflowuniverse.herolib.core.texttools + +// - name, e.g. for a specific project +// - descr, description of the revenue line item +// - revenue_setup, revenue for 1 item '1000usd' +// - revenue_setup_delay +// - revenue_monthly, revenue per month for 1 item +// - revenue_monthly_delay, how many months before monthly revenue starts +// - maintenance_month_perc, how much percent of revenue_setup will come back over months +// - cogs_setup, cost of good for 1 item at setup +// - cogs_setup_delay, how many months before setup cogs starts, after sales +// - cogs_setup_perc: what is percentage of the cogs (can change over time) for setup e.g. 0:50% + +// - cogs_monthly, cost of goods for the monthly per 1 item +// - cogs_monthly_delay, how many months before monthly cogs starts, after sales +// - cogs_monthly_perc: what is percentage of the cogs (can change over time) for monthly e.g. 0:5%,12:10% + +// - nr_sold: how many do we sell per month (is in growth format e.g. 10:100,20:200) +// - nr_months_recurring: how many months is recurring, if 0 then no recurring +// +fn (mut m BizModel) revenue_action(action Action) ! { + mut name := action.params.get_default('name', '')! + mut descr := action.params.get_default('descr', '')! + if descr.len == 0 { + descr = action.params.get_default('description', '')! + } + if name.len == 0 { + // make name ourselves + name = texttools.name_fix(descr) + } + + name = texttools.name_fix(name) + if name.len == 0 { + return error('name and description is empty for ${action}') + } + name2 := name.replace('_', ' ').replace('-', ' ') + descr = descr.replace('_', ' ').replace('-', ' ') + + mut product := Product{ + name: name + title: action.params.get_default('title', name)! + description: descr + } + m.products[name] = &product + + mut nr_months_recurring := action.params.get_int_default('nr_months_recurring', 60)! + + if nr_months_recurring == 0 { + nr_months_recurring = 1 + } + + product.nr_months_recurring = nr_months_recurring + + mut revenue := m.sheet.row_new( + name: '${name}_revenue' + growth: action.params.get_default('revenue', '0:0')! + tags: 'rev name:${name}' + descr: 'Revenue for ${name2}' + extrapolate: false + )! + + mut revenue_setup := m.sheet.row_new( + name: '${name}_revenue_setup' + growth: action.params.get_default('revenue_setup', '0:0')! + tags: 'rev name:${name}' + descr: 'Setup Sales price for ${name2}' + aggregatetype: .avg + )! + + mut revenue_setup_delay := action.params.get_int_default('revenue_setup_delay', 0)! + + mut revenue_monthly := m.sheet.row_new( + name: '${name}_revenue_monthly' + growth: action.params.get_default('revenue_monthly', '0:0')! + tags: 'rev name:${name}' + descr: 'Monthly Sales price for ${name2}' + aggregatetype: .avg + )! + + mut revenue_monthly_delay := action.params.get_int_default('revenue_monthly_delay', + 1)! + + mut cogs := m.sheet.row_new( + name: '${name}_cogs' + growth: action.params.get_default('cogs', '0:0')! + tags: 'rev name:${name}' + descr: 'COGS for ${name2}' + extrapolate: false + )! + + if revenue.max() > 0 || cogs.max() > 0 { + product.has_oneoffs = true + } + + _ := m.sheet.row_new( + name: '${name}_cogs_perc' + growth: action.params.get_default('cogs_perc', '0')! + tags: 'rev name:${name}' + descr: 'COGS as percent of revenue for ${name2}' + aggregatetype: .avg + )! + + mut cogs_setup := m.sheet.row_new( + name: '${name}_cogs_setup' + growth: action.params.get_default('cogs_setup', '0:0')! + tags: 'rev name:${name}' + descr: 'COGS for ${name2} Setup' + aggregatetype: .avg + )! + + mut cogs_setup_delay := action.params.get_int_default('cogs_setup_delay', 1)! + + mut cogs_setup_perc := m.sheet.row_new( + name: '${name}_cogs_setup_perc' + growth: action.params.get_default('cogs_setup_perc', '0')! + tags: 'rev name:${name}' + descr: 'COGS as percent of revenue for ${name2} Setup' + aggregatetype: .avg + )! + + mut cogs_monthly := m.sheet.row_new( + name: '${name}_cogs_monthly' + growth: action.params.get_default('cogs_monthly', '0:0')! + tags: 'rev name:${name}' + descr: 'Cost of Goods (COGS) for ${name2} Monthly' + aggregatetype: .avg + )! + + mut cogs_monthly_delay := action.params.get_int_default('cogs_monthly_delay', 1)! + + mut cogs_monthly_perc := m.sheet.row_new( + name: '${name}_cogs_monthly_perc' + growth: action.params.get_default('cogs_monthly_perc', '0')! + tags: 'rev name:${name}' + descr: 'COGS as percent of revenue for ${name2} Monthly' + aggregatetype: .avg + )! + + // if true{ + // println(cogs_setup_perc) + // println(cogs_monthly_perc) + // panic("sdsd") + // } + + mut nr_sold := m.sheet.row_new( + name: '${name}_nr_sold' + growth: action.params.get_default('nr_sold', '0')! + tags: 'rev name:${name}' + descr: 'nr of items sold/month for ${name2}' + aggregatetype: .avg + )! + + if nr_sold.max() > 0 { + product.has_items = true + } + + // CALCULATE THE TOTAL (multiply with nr sold) + + mut revenue_setup_total := revenue_setup.action( + name: '${name}_revenue_setup_total' + descr: 'Setup sales for ${name2} total' + action: .multiply + rows: [nr_sold] + delaymonths: revenue_setup_delay + )! + + mut revenue_monthly_total := revenue_monthly.action( + name: '${name}_revenue_monthly_total' + descr: 'Monthly sales for ${name2} total' + action: .multiply + rows: [nr_sold] + delaymonths: revenue_monthly_delay + )! + + mut cogs_setup_total := cogs_setup.action( + name: '${name}_cogs_setup_total' + descr: 'Setup COGS for ${name2} total' + action: .multiply + rows: [nr_sold] + delaymonths: cogs_setup_delay + )! + + mut cogs_monthly_total := cogs_monthly.action( + name: '${name}_cogs_monthly_total' + descr: 'Monthly COGS for ${name2} total' + action: .multiply + rows: [nr_sold] + delaymonths: cogs_monthly_delay + )! + + // DEAL WITH RECURRING + + if nr_months_recurring > 0 { + revenue_monthly_total = revenue_monthly_total.recurring( + name: '${name}_revenue_monthly_recurring' + descr: 'Revenue monthly recurring for ${name2}' + nrmonths: nr_months_recurring + )! + cogs_monthly_total = cogs_monthly_total.recurring( + name: '${name}_cogs_monthly_recurring' + descr: 'COGS recurring for ${name2}' + nrmonths: nr_months_recurring + )! + + _ := nr_sold.recurring( + name: '${name}_nr_sold_recurring' + descr: 'Nr products active because of recurring for ${name2}' + nrmonths: nr_months_recurring + aggregatetype: .max + )! + // if true{ + // println(nr_sold_recurring) + // panic('sd') + // } + } + + // cogs as percentage of revenue + mut cogs_setup_from_perc := cogs_setup_perc.action( + action: .multiply + rows: [revenue_setup_total] + name: '${name}_cogs_setup_from_perc' + )! + mut cogs_monthly_from_perc := cogs_monthly_perc.action( + action: .multiply + rows: [revenue_monthly_total] + name: '${name}_cogs_monthly_from_perc' + )! + + // if true{ + // println(revenue_setup_total) + // println(cogs_setup_perc) + // println(cogs_setup_from_perc) + // println("montlhy") + // println(revenue_monthly_total) + // println(cogs_monthly_perc) + // println(cogs_monthly_from_perc) + // panic("sdsd") + // } + + // mut cogs_from_perc:=cogs_perc.action(action:.multiply,rows:[revenue],name:"cogs_from_perc")! + + // DEAL WITH MAINTENANCE + + // make sum of all past revenue (all one off revenue, needed to calculate maintenance) + mut temp_past := revenue.recurring( + nrmonths: nr_months_recurring + name: 'temp_past' + // delaymonths:4 + )! + + mut maintenance_month_perc := action.params.get_percentage_default('maintenance_month_perc', + '0%')! + + mut maintenance_month := m.sheet.row_new( + name: '${name}_maintenance_month' + growth: '0:${maintenance_month_perc:.2f}' + tags: 'rev name:${name}' + descr: 'maintenance fee for ${name2}' + )! + + maintenance_month.action(action: .multiply, rows: [temp_past])! + + // temp_past.delete() + + // TOTALS + + mut revenue_total := m.sheet.row_new( + name: '${name}_revenue_total' + growth: '0:0' + tags: 'rev revtotal name:${name}' + descr: 'Revenue total for ${name2}.' + )! + + mut cogs_total := m.sheet.row_new( + name: '${name}_cogs_total' + growth: '0:0' + tags: 'rev cogstotal name:${name}' + descr: 'COGS total for ${name2}.' + )! + + if revenue_total.max() > 0.0 || cogs_total.max() > 0.0 { + product.has_revenue + } + + revenue_total = revenue_total.action( + action: .add + rows: [revenue, revenue_monthly_total, revenue_setup_total, maintenance_month] + )! + + if revenue_total.max() > 0 { + product.has_revenue = true + } + + cogs_total = cogs_total.action( + action: .add + rows: [cogs, cogs_monthly_total, cogs_setup_total, cogs_setup_from_perc, + cogs_monthly_from_perc] + )! + + // if true{ + // //println(m.sheet) + // println(revenue_total) + // println(cogs_total) + // println(cogs) + // println(cogs_monthly_total) + // println(cogs_setup_total) + // println(cogs_setup_from_perc) + // println(cogs_monthly_from_perc) + // panic("sdsd") + + // } +} + +// revenue_total calculates and aggregates the total revenue and cost of goods sold (COGS) for the business model +fn (mut sim BizModel) revenue_total() ! { + // Create a new row in the sheet to represent the total revenue across all products + sim.sheet.group2row( + name: 'revenue_total' + tags: '' + descr: 'total revenue.' + )! + + // Create a new row in the sheet to represent the total COGS across all products + sim.sheet.group2row( + name: 'cogs_total' + tags: '' + descr: 'total cogs.' + )! + + // Note: The following commented-out code block seems to be for debugging or future implementation + // It demonstrates how to create a smaller version of the sheet with specific filters + // if true{ + // // name string + // // namefilter []string // only include the exact names as specified for the rows + // // includefilter []string // matches for the tags + // // excludefilter []string // matches for the tags + // // period_months int = 12 + // mut r:=sim.sheet.tosmaller(name:"tmp",includefilter:["cogstotal"],period_months:12)! + // println(r) + // panic("sdsd") + // } +} diff --git a/lib/biz/bizmodel/templates/departments.md b/lib/biz/bizmodel/templates/departments.md new file mode 100644 index 00000000..c574d1d9 --- /dev/null +++ b/lib/biz/bizmodel/templates/departments.md @@ -0,0 +1,16 @@ + +@for dept in deps + +@if dept.title.len>0 +## @{dept.title} +@else +## @{dept.name} +@end + +| Name | Title | Nr People | +|------|-------|-------| +@for employee in sim.employees.values().filter(it.department == dept.name) +| @{employee_names[employee.name]} | @{employee.title} | @{employee.nrpeople} | +@end + +@end \ No newline at end of file diff --git a/lib/biz/bizmodel/templates/employee.md b/lib/biz/bizmodel/templates/employee.md new file mode 100644 index 00000000..d74425e6 --- /dev/null +++ b/lib/biz/bizmodel/templates/employee.md @@ -0,0 +1,28 @@ +# @{employee.name} + + +`@{employee.description}` + +> department: `@{employee.department}` + +**Cost To The Company:** + +`@{employee.cost}` + + +@if employee.cost_percent_revenue > 0.0 + +**Cost Percent Revenue:** + +`@{employee.cost_percent_revenue}%` + +@end + + +@if employee.nrpeople.len > 1 + +**Number of People in this group** + +`@{employee.nrpeople}` + +@end diff --git a/lib/biz/bizmodel/templates/employee2.md b/lib/biz/bizmodel/templates/employee2.md new file mode 100644 index 00000000..ec7f2c12 --- /dev/null +++ b/lib/biz/bizmodel/templates/employee2.md @@ -0,0 +1,21 @@ +# Employee Wiki + + + +
+ + + + + + + + + + + + + + + +
diff --git a/lib/biz/bizmodel/templates/intro.md b/lib/biz/bizmodel/templates/intro.md new file mode 100644 index 00000000..db32a3be --- /dev/null +++ b/lib/biz/bizmodel/templates/intro.md @@ -0,0 +1,54 @@ +# This is our business model planner + +## FUNDING + +!!bizmodel.sheet_wiki includefilter:'funding' + +## REVENUE vs COGS + +!!bizmodel.sheet_wiki includefilter:rev + +#### Revenue Lines + +!!bizmodel.sheet_wiki title:'Revenue Total' includefilter:'revtotal' + +#### COGS Lines + +!!bizmodel.sheet_wiki title:'COGS' includefilter:'cogstotal' + +## HR +!!bizmodel.sheet_wiki title:'HR Teams' includefilter:'hrnr' + +!!bizmodel.sheet_wiki title:'HR Costs' includefilter:'hrcost' + +## Operational Costs + +!!bizmodel.sheet_wiki title:'COSTS' includefilter:'ocost' + + +## P&L Overview + + + +!!bizmodel.sheet_wiki title:'P&L Overview' includefilter:'pl' + + +!!bizmodel.graph_bar_row rowname:revenue_total unit:million title:'A Title' title_sub:'Sub' + +Unit is in Million USD. + +!!bizmodel.graph_bar_row rowname:revenue_total unit:million + +!!bizmodel.graph_line_row rowname:revenue_total unit:million + +!!bizmodel.graph_pie_row rowname:revenue_total unit:million size:'80%' + + +## Some Details + +> show how we can do per month + +!!bizmodel.sheet_wiki includefilter:'pl' period_months:1 + + + diff --git a/lib/biz/bizmodel/templates/revenue_overview.md b/lib/biz/bizmodel/templates/revenue_overview.md new file mode 100644 index 00000000..a7dc26da --- /dev/null +++ b/lib/biz/bizmodel/templates/revenue_overview.md @@ -0,0 +1,74 @@ + +# Revenue Overview + +@for name1,product in sim.products + +@if product.has_revenue + +## ${product.title} + +${product.description} + +#### parameters for the product + +@if product.has_oneoffs + +Product ${name1} has revenue events (one offs) + +!!!spreadsheet.sheet_wiki + namefilter:'${name1}_revenue,${name1}_cogs,${name1}_cogs_perc,${name1}_maintenance_month_perc' sheetname:'bizmodel_tf9 + +- COGS = Cost of Goods Sold (is our cost to deliver the product/service) +- maintenance is fee we charge to the customer per month in relation to the revenue we charged e.g. 1% of a product which was sold for 1m EUR means we charge 1% of 1 m EUR per month. + +@end //one offs + +@if product.has_items + +Product sold and its revenue/cost of goods + +!!!spreadsheet.sheet_wiki + namefilter:'${name1}_nr_sold,${name1}_revenue_setup,${name1}_revenue_monthly,${name1}_cogs_setup,${name1}_cogs_setup_perc,${name1}_cogs_monthly,${name1}_cogs_monthly_perc' + sheetname:'bizmodel_tf9 + +- nr sold, is the nr sold per month of ${name1} +- revenue setup is setup per item for ${name1}, this is the money we receive. Similar there is a revenue monthly. +- cogs = Cost of Goods Sold (is our cost to deliver the product) + - can we as a setup per item, or per month per item + +@if product.nr_months_recurring>1 + +This product ${name1} is recurring, means customer pays per month ongoing, the period customer is paying for in months is: **${product.nr_months_recurring}** + +@end //recurring + +@end + +#### the revenue/cogs calculated + + +!!!spreadsheet.sheet_wiki + namefilter:'${name1}_nr_sold_recurring' + sheetname:'bizmodel_tf9 + +This results in following revenues and cogs: + +!!!spreadsheet.sheet_wiki + namefilter:'${name1}_revenue_setup_total,${name1}_revenue_monthly_total,${name1}_cogs_setup_total,${name1}_cogs_monthly_total,${name1}_cogs_setup_from_perc,${name1}_cogs_monthly_from_perc,${name1}_maintenance_month, + ${name1}_revenue_monthly_recurring,${name1}_cogs_monthly_recurring' + sheetname:'bizmodel_tf9 + +resulting revenues: +!!!spreadsheet.sheet_wiki + namefilter:'${name1}_revenue_total,${name1}_cogs_total' + sheetname:'bizmodel_tf9 + + +!!!spreadsheet.graph_line_row rowname:'${name1}_cogs_total' unit:million sheetname:'bizmodel_tf9' + +!!!spreadsheet.graph_line_row rowname:'${name1}_revenue_total' unit:million sheetname:'bizmodel_tf9' + + +@end //product has_revenue + +@end //loop \ No newline at end of file diff --git a/lib/biz/bizmodel/templates/rows_overview.md b/lib/biz/bizmodel/templates/rows_overview.md new file mode 100644 index 00000000..7912240e --- /dev/null +++ b/lib/biz/bizmodel/templates/rows_overview.md @@ -0,0 +1,6 @@ +# Overview of the rows in the biz model sheet + +!!bizmodel.wiki_row_overview + + + diff --git a/lib/biz/bizmodel/templates/summary.md b/lib/biz/bizmodel/templates/summary.md new file mode 100644 index 00000000..8cfef969 --- /dev/null +++ b/lib/biz/bizmodel/templates/summary.md @@ -0,0 +1,24 @@ +- [bizmodel](bizmodel_example:bizmodel.md) + - [Revenue](bizmodel_example:revenue.md) +- [parameters](bizmodel_example:rows_overview.md) + - [revenue_params](bizmodel_example:params/revenue_params.md) + - [funding_params](bizmodel_example:params/funding_params.md) + - [hr_params](bizmodel_example:params/hr_params.md) + - [costs_params](bizmodel_example:params/costs_params.md) + - [rows overview](bizmodel_example:rows_overview.md) +- [manual](bizmodel_manual:configuration.md) + - [widgets](bizmodel_manual:widgets.md) + - [graph_bar_row](bizmodel_manual:graph_bar_row.md) + - [sheet_tables](bizmodel_manual:sheet_tables.md) + - [widget_args](bizmodel_manual:widget_args.md) + - [params](bizmodel_manual:configuration.md) + - [revenue params](bizmodel_manual:revenue_params.md) + - [funding params](bizmodel_manual:funding_params.md) + - [hr params](bizmodel_manual:hr_params.md) + - [costs params](bizmodel_manual:costs_params.md) +- [employees](bizmodel_example:employees.md) + - [CTO](bizmodel_example:cto.md) +- [concepts](bizmodel_manual:concepts.md) + + + diff --git a/lib/biz/investortool/company.v b/lib/biz/investortool/company.v new file mode 100644 index 00000000..2829f45a --- /dev/null +++ b/lib/biz/investortool/company.v @@ -0,0 +1,34 @@ +module investortool + +import freeflowuniverse.herolib.core.playbook + +@[heap] +pub struct Company { +pub mut: + oid string + short_code string + name string + current_nr_shares int + current_share_value string + description string + admins []string + comments []string +} + +fn play_company(mut investortool InvestorTool, mut plbook playbook.PlayBook) ! { + for mut action in plbook.find(filter: 'investortool.company_define')! { + mut p := action.params + mut company := Company{ + oid: p.get_default('oid', '')! + short_code: p.get_default('short_code', '')! + name: p.get_default('name', '')! + current_nr_shares: p.get_int_default('current_nr_shares', 0)! + current_share_value: p.get_default('current_share_value', '')! + description: p.get_default('description', '')! + admins: p.get_list_default('admins', [])! + comments: p.get_list_default('comments', [])! + } + println(company) + investortool.company_add(company)! + } +} diff --git a/lib/biz/investortool/employee.v b/lib/biz/investortool/employee.v new file mode 100644 index 00000000..7dc5cf19 --- /dev/null +++ b/lib/biz/investortool/employee.v @@ -0,0 +1,64 @@ +module investortool + +import freeflowuniverse.herolib.core.playbook +import freeflowuniverse.herolib.data.ourtime +import freeflowuniverse.herolib.data.currency + +// TODO add currency and ourtime types +@[heap] +pub struct Employee { +pub mut: + oid string + user_ref string + company_ref string + status string + start_date ?ourtime.OurTime + end_date ?ourtime.OurTime + salary ?currency.Amount + salary_low ?currency.Amount + outstanding ?currency.Amount + tft_grant f64 + reward_pool_points int + salary_low_date ?ourtime.OurTime + comments string +} + +fn play_employee(mut investortool InvestorTool, mut plbook playbook.PlayBook) ! { + for mut action in plbook.find(filter: 'investortool.employee_define')! { + mut p := action.params + mut employee := Employee{ + oid: p.get_default('oid', '')! + user_ref: p.get_default('user_ref', '')! + company_ref: p.get_default('company_ref', '')! + status: p.get_default('status', '')! + start_date: if p.exists('start_date') { p.get_time('start_date')! } else { none } + end_date: if p.exists('end_date') { p.get_time('end_date')! } else { none } + salary: if p.exists('salary') && p.get('salary')!.trim(' ').len > 0 { + p.get_currencyamount('salary')! + } else { + none + } + salary_low: if p.exists('salary_low') && p.get('salary_low')!.trim(' ').len > 0 { + p.get_currencyamount('salary_low')! + } else { + none + } + outstanding: if p.exists('outstanding') + && p.get('outstanding')!.trim(' ').len > 0 { + p.get_currencyamount('outstanding')! + } else { + none + } + tft_grant: p.get_float_default('tft_grant', 0.0)! + reward_pool_points: p.get_int_default('reward_pool_points', 0)! + salary_low_date: if p.exists('salary_low_date') { + p.get_time('salary_low_date')! + } else { + none + } + comments: p.get_default('comments', '')! + } + println(employee) + investortool.employee_add(employee)! + } +} diff --git a/lib/biz/investortool/factory.v b/lib/biz/investortool/factory.v new file mode 100644 index 00000000..c40afe7d --- /dev/null +++ b/lib/biz/investortool/factory.v @@ -0,0 +1,139 @@ +module investortool + +import freeflowuniverse.herolib.core.playbook + +// TODO: need to do a global +__global ( + investortools shared map[string]&InvestorTool +) + +@[heap] +pub struct InvestorTool { +pub mut: + companies map[string]&Company + employees map[string]&Employee + investments map[string]&InvestmentShares + investors map[string]&Investor + users map[string]&User +} + +// Factory methods +pub fn new() &InvestorTool { + return &InvestorTool{} +} + +pub fn get() !&InvestorTool { + if 'default' in investortools { + return investortools['default'] + } + return error("can't find default investor tool") +} + +// Factory methods +pub fn (mut it InvestorTool) user_new() &User { + return &User{} +} + +pub fn (mut it InvestorTool) company_new() &Company { + return &Company{} +} + +pub fn (mut it InvestorTool) employee_new() &Employee { + return &Employee{} +} + +pub fn (mut it InvestorTool) investment_shares_new() &InvestmentShares { + return &InvestmentShares{} +} + +pub fn (mut it InvestorTool) investor_new() &Investor { + return &Investor{} +} + +// Add methods +pub fn (mut it InvestorTool) user_add(user &User) ! { + it.users[user.oid] = user +} + +pub fn (mut it InvestorTool) company_add(company &Company) ! { + it.companies[company.oid] = company +} + +pub fn (mut it InvestorTool) employee_add(employee &Employee) ! { + it.employees[employee.oid] = employee +} + +pub fn (mut it InvestorTool) investment_shares_add(investment &InvestmentShares) ! { + it.investments[investment.oid] = investment +} + +pub fn (mut it InvestorTool) investor_add(investor &Investor) ! { + it.investors[investor.oid] = investor +} + +pub fn play(mut plbook playbook.PlayBook) !&InvestorTool { + mut it := new() + play_company(mut it, mut plbook)! + play_employee(mut it, mut plbook)! + play_investmentshares(mut it, mut plbook)! + play_investor(mut it, mut plbook)! + play_user(mut it, mut plbook)! + + investortools['default'] = it + return it +} + +pub fn (mut it InvestorTool) check() ! { + // TODO: walk over all objects check all relationships + // TODO: make helpers on e.g. employee, ... to get the related ones + + for _, cmp in it.companies { + for admin in cmp.admins { + if admin !in it.users { + return error('admin ${admin} from company ${cmp.oid} is not found') + } + } + } + + for _, emp in it.employees { + if emp.user_ref !in it.users { + return error('user ${emp.user_ref} from employee ${emp.oid} is not found') + } + + if emp.company_ref !in it.companies { + return error('company ${emp.company_ref} from employee ${emp.oid} is not found') + } + } + + for _, inv in it.investments { + if inv.company_ref != '' && inv.company_ref !in it.companies { + return error('company ${inv.company_ref} from investment ${inv.oid} is not found') + } + + if inv.investor_ref !in it.investors { + return error('investor ${inv.investor_ref} from investment ${inv.oid} is not found') + } + } + + for _, inv in it.investors { + for user in inv.user_refs { + if user !in it.users { + return error('user ${user} from investor ${inv.oid} is not found') + } + } + + for admin in inv.admins { + if admin !in it.users { + return error('admin ${admin} from investor ${inv.oid} is not found') + } + } + } + + for _, user in it.users { + for inv in user.investor_ids { + if inv !in it.investors { + return error('investor ${inv} from user ${user.oid} is not found') + } + } + } +} diff --git a/lib/biz/investortool/investment_share.v b/lib/biz/investortool/investment_share.v new file mode 100644 index 00000000..b84bf8a0 --- /dev/null +++ b/lib/biz/investortool/investment_share.v @@ -0,0 +1,55 @@ +module investortool + +import freeflowuniverse.herolib.core.playbook +import freeflowuniverse.herolib.data.ourtime +import freeflowuniverse.herolib.data.currency + +@[heap] +pub struct InvestmentShares { +pub mut: + oid string + company_ref string + investor_ref string + nr_shares f64 + share_class string + investment_value ?currency.Amount + interest ?currency.Amount + description string + investment_date ?ourtime.OurTime + type_ string + comments []string +} + +fn play_investmentshares(mut investortool InvestorTool, mut plbook playbook.PlayBook) ! { + for mut action in plbook.find(filter: 'investortool.investment_shares_define')! { + mut p := action.params + mut investment_shares := InvestmentShares{ + oid: p.get_default('oid', '')! + company_ref: p.get_default('company_ref', '')!.trim(' ') + investor_ref: p.get_default('investor_ref', '')! + nr_shares: p.get_float_default('nr_shares', 0)! + share_class: p.get_default('share_class', '')! + investment_value: if p.exists('investment_value') + && p.get('investment_value')!.trim(' ').len > 0 { + p.get_currencyamount('investment_value')! + } else { + none + } + interest: if p.exists('interest') && p.get('interest')!.trim(' ').len > 0 { + p.get_currencyamount('interest')! + } else { + none + } + description: p.get_default('description', '')! + investment_date: if p.exists('investment_date') { + p.get_time('investment_date')! + } else { + none + } + type_: p.get_default('type', '')! + comments: p.get_list_default('comments', [])! + } + println(investment_shares) + investortool.investment_shares_add(investment_shares)! + } +} diff --git a/lib/biz/investortool/investor.v b/lib/biz/investortool/investor.v new file mode 100644 index 00000000..65ecb0e1 --- /dev/null +++ b/lib/biz/investortool/investor.v @@ -0,0 +1,33 @@ +module investortool + +import freeflowuniverse.herolib.core.playbook + +@[heap] +pub struct Investor { +pub mut: + oid string + name string + code string + description string + user_refs []string + admins []string + comments []string +} + +fn play_investor(mut investortool InvestorTool, mut plbook playbook.PlayBook) ! { + for mut action in plbook.find(filter: 'investortool.investor_define')! { + mut p := action.params + mut investor := Investor{ + oid: p.get_default('oid', '')! + name: p.get_default('name', '')! + code: p.get_default('code', '')! + description: p.get_default('description', '')! + user_refs: p.get_list_default('user_refs', [])! + admins: p.get_list_default('admins', [])! + comments: p.get_list_default('comments', [])! + } + // println(investor) + + investortool.investor_add(investor)! + } +} diff --git a/lib/biz/investortool/investortool.v b/lib/biz/investortool/investortool.v new file mode 100644 index 00000000..c671828b --- /dev/null +++ b/lib/biz/investortool/investortool.v @@ -0,0 +1,70 @@ +module investortool + +// struct Investor { +// id string +// name string +// code string +// description string +// user_refs string +// admins string +// comments string +// } + +// struct InvestmentShares { +// id string +// company_ref string +// investor_ref string +// nr_shares f64 +// share_class string +// investment_value string +// interest string +// description string +// investment_date string +// type string +// comments string +// } + +// struct InvestorTool { +// mut: +// investors []Investor +// investments []InvestmentShares +// } + +// fn new_investor_tool() InvestorTool { +// return InvestorTool{ +// investors: [] +// investments: [] +// } +// } + +// fn (mut it InvestorTool) investor_define(params string) ! { +// mut p := paramsparser.new(params)! +// investor := Investor{ +// id: p.get_default('id', '')! +// name: p.get_default('name', '')! +// code: p.get_default('code', '')! +// description: p.get_default('description', '')! +// user_refs: p.get_default('user_refs', '')! +// admins: p.get_default('admins', '')! +// comments: p.get_default('comments', '')! +// } +// it.investors << investor +// } + +// fn (mut it InvestorTool) investment_shares_define(params string) ! { +// mut p := paramsparser.new(params)! +// investment := InvestmentShares{ +// id: p.get_default('id', '')! +// company_ref: p.get_default('company_ref', '')! +// investor_ref: p.get_default('investor_ref', '')! +// nr_shares: p.get_f64('nr_shares')! +// share_class: p.get_default('share_class', '')! +// investment_value: p.get_default('investment_value', '')! +// interest: p.get_default('interest', '')! +// description: p.get_default('description', '')! +// investment_date: p.get_default('investment_date', '')! +// type: p.get_default('type', '')! +// comments: p.get_default('comments', '')! +// } +// it.investments << investment +// } diff --git a/lib/biz/investortool/investortool2.v b/lib/biz/investortool/investortool2.v new file mode 100644 index 00000000..fc00e6c6 --- /dev/null +++ b/lib/biz/investortool/investortool2.v @@ -0,0 +1,30 @@ +// struct User { +// id string +// usercode string +// name string +// investor_ids string +// status string +// info_links string +// telnrs string +// emails string +// secret string +// } + +// struct InvestorTool { +// mut: +// users []User +// // ... other fields like investors and investments +// } + +// fn (mut it InvestorTool) user_define(params string) ! { +// mut p := paramsparser.new(params)! +// user := User{ +// id: p.get_default('id', '')! +// usercode: p.get_default('usercode', '')! +// name: p.get_default('name', '')! +// investor_ids: p.get_default('investor_ids', '')! +// status: p.get_default('status', '')! +// info_links: p.get_default('info_links', '')! +// telnrs: p.get_default('telnrs', '')! +// emails: p.get_default('emails', '')! +// secret: p.get_ diff --git a/lib/biz/investortool/investortool3.v b/lib/biz/investortool/investortool3.v new file mode 100644 index 00000000..f9cd201f --- /dev/null +++ b/lib/biz/investortool/investortool3.v @@ -0,0 +1,31 @@ +// struct Company { +// id string +// short_code string +// name string +// current_nr_shares int +// current_share_value string +// description string +// admins string +// comments string +// } + +// struct InvestorTool { +// mut: +// companies []Company +// // ... other fields like users, investors, and investments +// } + +// fn (mut it InvestorTool) company_define(params string) ! { +// mut p := paramsparser.new(params)! +// company := Company{ +// id: p.get_default('id', '')! +// short_code: p.get_default('short_code', '')! +// name: p.get_default('name', '')! +// current_nr_shares: p.get_int('current_nr_shares')! +// current_share_value: p.get_default('current_share_value', '')! +// description: p.get_default('description', '')! +// admins: p.get_default('admins', '')! +// comments: p.get_default('comments', '')! +// } +// it.companies << company +// } diff --git a/lib/biz/investortool/readme.md b/lib/biz/investortool/readme.md new file mode 100644 index 00000000..045c5db5 --- /dev/null +++ b/lib/biz/investortool/readme.md @@ -0,0 +1,4 @@ + + +use by ThreeFold to work with our administration around Shares, ... + diff --git a/lib/biz/investortool/simulator/captable.v b/lib/biz/investortool/simulator/captable.v new file mode 100644 index 00000000..20760ff8 --- /dev/null +++ b/lib/biz/investortool/simulator/captable.v @@ -0,0 +1,4 @@ +module investorsimulator + +pub struct CapTable { +} diff --git a/lib/biz/investortool/simulator/play.v b/lib/biz/investortool/simulator/play.v new file mode 100644 index 00000000..beb2c4d4 --- /dev/null +++ b/lib/biz/investortool/simulator/play.v @@ -0,0 +1,53 @@ +module investorsimulator + +import freeflowuniverse.herolib.core.playbook { PlayBook } +import freeflowuniverse.herolib.biz.investortool + +pub fn (mut s Simulator) play(mut plbook PlayBook) ! { + for mut action in plbook.find(filter: 'investorsimulator.user_view_add')! { + /* + !!!investorsimulator.user_view_add + view: view1 + oid: abc + */ + mut p := action.params + view := p.get_default('view', 'default')! + user_oid := p.get('oid')! + + user := if user_oid in s.it.users { + s.it.users[user_oid] + } else { + return error('user with oid ${user_oid} is not found') + } + + mut v := if view in s.user_views { + s.user_views[view] + } else { + s.user_views[view] = [] + s.user_views[view] + } + + v << user + } + + for mut action in plbook.find(filter: 'investorsimulator.investor_view_add')! { + mut p := action.params + view := p.get_default('view', 'default')! + investor_oid := p.get('oid')! + + investor := if investor_oid in s.it.investors { + s.it.investors[investor_oid] + } else { + return error('investor with oid ${investor_oid} is not found') + } + + mut v := if view in s.investor_views { + s.investor_views[view] + } else { + s.investor_views[view] = [] + s.investor_views[view] + } + + v << user + } +} diff --git a/lib/biz/investortool/simulator/simulator.v b/lib/biz/investortool/simulator/simulator.v new file mode 100644 index 00000000..bc219634 --- /dev/null +++ b/lib/biz/investortool/simulator/simulator.v @@ -0,0 +1,50 @@ +module investorsimulator + +import freeflowuniverse.herolib.core.playbook { PlayBook } +import freeflowuniverse.herolib.biz.investortool + +__global ( + simulators map[string]Simulator +) + +@[params] +pub struct NewSimulatorArgs { +pub mut: + name string @[required] + data_path string @[requried] +} + +pub struct Simulator { +pub mut: + name string + it &investortool.InvestorTool + user_views map[string][]&investortool.User + investor_views map[string][]&investortool.Investor + // captable_views map[string]CapTable +} + +pub fn new(args NewSimulatorArgs) !Simulator { + mut plbook := playbook.new(path: args.data_path)! + mut it := investortool.play(mut plbook)! + + return Simulator{ + name: args.name + it: it + user_views: map[string][]&investortool.User{} + investor_views: map[string][]&investortool.Investor{} + } +} + +pub fn play(mut plbook PlayBook) ! { + for mut action in plbook.find(filter: 'investorsimulator.run')! { + name := action.params.get_default('name', 'default')! + data_path := action.params.get('data_path')! + mut sim := new(name, data_path)! + + lock simulators { + simulators[name] = sim + } + + sim.play(mut plbook)! + } +} diff --git a/lib/biz/investortool/simulator/templates/investor.md b/lib/biz/investortool/simulator/templates/investor.md new file mode 100644 index 00000000..9b7e701d --- /dev/null +++ b/lib/biz/investortool/simulator/templates/investor.md @@ -0,0 +1,7 @@ +# Investors + +@for investor in it.investors +## @{investor.name} +- Investor Code: @{investor.code} +- Admins IDs: (x@{investor.admins}) +@end \ No newline at end of file diff --git a/lib/biz/investortool/simulator/templates/user.md b/lib/biz/investortool/simulator/templates/user.md new file mode 100644 index 00000000..a6734bd7 --- /dev/null +++ b/lib/biz/investortool/simulator/templates/user.md @@ -0,0 +1,9 @@ +# Users + +@for user in it.users +## @{user.name} +- User Code: @{user.code} +- Status: @{user.status} +- Telephone Numbers: @{user.telnrs} +- Emails: @{user.emails} +@end \ No newline at end of file diff --git a/lib/biz/investortool/user.v b/lib/biz/investortool/user.v new file mode 100644 index 00000000..e285f530 --- /dev/null +++ b/lib/biz/investortool/user.v @@ -0,0 +1,37 @@ +module investortool + +import freeflowuniverse.herolib.core.playbook + +@[heap] +pub struct User { +pub mut: + oid string + usercode string + name string + investor_ids []string + status string + info_links []string + telnrs []string + emails []string + secret string +} + +fn play_user(mut investortool InvestorTool, mut plbook playbook.PlayBook) ! { + for mut action in plbook.find(filter: 'investortool.user_define')! { + mut p := action.params + mut user := User{ + oid: p.get_default('oid', '')! + usercode: p.get_default('usercode', '')! + name: p.get_default('name', '')! + investor_ids: p.get_list_default('investor_ids', [])! + status: p.get_default('status', '')! + info_links: p.get_list_default('info_links', [])! + telnrs: p.get_telnrs_default('telnrs', [])! + emails: p.get_emails_default('emails', [])! + secret: p.get_default('secret', '')! + } + // println(user) + investortool.user_add(user)! + // TODO: now we need to do some mapping to make sure telnr's and emails are normalized (no . in tel nr, no spaces ...) + } +} diff --git a/lib/biz/spreadsheet/calc_test.v b/lib/biz/spreadsheet/calc_test.v new file mode 100644 index 00000000..e0f74f47 --- /dev/null +++ b/lib/biz/spreadsheet/calc_test.v @@ -0,0 +1,132 @@ +module spreadsheet + +import freeflowuniverse.herolib.data.currency +import freeflowuniverse.herolib.ui.console + +fn test_sheets() { + mut sh := sheet_new() or { panic(err) } + + mut nrnodes := sh.row_new( + name: 'nrnodes' + growth: '5:100,55:1000' + tags: 'cat:nodes color:yellow urgent' + )! + mut curtest := sh.row_new(name: 'curtest', growth: '1:100EUR,55:1000AED,56:0')! + + mut nrnodes2 := sh.row_new( + name: 'nrnodes2' + growth: '5:100,55:1000,60:500' + tags: 'cat:nodes delay color:green' + )! + + mut nrnodes3 := sh.row_new( + name: 'nrnodes3' + growth: '0:100' + )! + + mut incrementalrow := sh.row_new(name: 'incrementalrow', growth: '0:0,60:59')! + + mut smartrow := sh.row_new(name: 'oem', growth: '10:1000USD,40:2000', extrapolate: false)! + + assert smartrow.cells[8].val == 0.0 + assert smartrow.cells[10].val == 1000.0 + assert smartrow.cells[40].val == 2000.0 + + console.print_debug('${nrnodes}') + + console.print_debug('${incrementalrow}') + + mut toincrement := sh.row_new(name: 'incr2', growth: '0:0,60:59')! + inc1row := toincrement.recurring(name: 'testrecurring1', delaymonths: 0)! + inc2row := toincrement.recurring(name: 'testrecurring2', delaymonths: 3)! + + console.print_debug('${toincrement}') + + a1 := toincrement.look_forward_avg(50, 20)! + a2 := toincrement.look_forward_avg(12, 12)! + + // console.print_debug(a1) + // console.print_debug(a2) + + // if true{panic("sss")} + + console.print_debug(inc1row) + console.print_debug(inc2row) + + assert inc1row.cells[4].val == 10.0 + assert inc2row.cells[7].val == 10.0 + + // if true{panic("sds")} + + // SUM + + mut res := []Row{} + + res << nrnodes.action(name: 'sum', action: .add, val: 100)! + assert res.last().cells[1].val == nrnodes.cells[1].val + 100.0 + assert res.last().cells[30].val == nrnodes.cells[30].val + 100.0 + + res << nrnodes.action(name: 'minus', action: .substract, val: 100)! + assert res.last().cells[1].val == nrnodes.cells[1].val - 100.0 + assert res.last().cells[30].val == nrnodes.cells[30].val - 100.0 + + res << nrnodes.action(name: 'sum2', action: .add, rows: [incrementalrow])! + assert res.last().cells[20].val == nrnodes.cells[20].val + 20.0 + + res << nrnodes.action(name: 'minus2', action: .substract, rows: [incrementalrow])! + assert res.last().cells[20].val == nrnodes.cells[20].val - 20.0 + + res << nrnodes.action(name: 'minus3', action: .substract, rows: [incrementalrow, incrementalrow])! + assert res.last().cells[20].val == nrnodes.cells[20].val - 40.0 + + res << nrnodes.action(name: 'max1', action: .max, rows: [incrementalrow])! + assert res.last().cells[2].val == 2.0 + + res << nrnodes3.action(name: 'max2', action: .max, val: 3.0)! + assert res.last().cells[20].val == 100.0 + + res << nrnodes3.action(name: 'max3', action: .max, val: 300.0)! + assert res.last().cells[20].val == 300.0 + + res << nrnodes3.action(name: 'min1', action: .min, val: 1.0)! + assert res.last().cells[20].val == 1.0 + + res << incrementalrow.action(name: 'aggr1', action: .aggregate, val: 1.0)! + assert res.last().cells[3].val == 6.0 + + console.print_debug(res.last()) + + incrementalrow.delay(3)! + assert incrementalrow.cells[6].val == 3 + + // mut nrnodessum := nrnodes.add('nrnodessum', nrnodes2)! + + mut shyear := sh.toyear(name: 'shyear', includefilter: ['cat:nodes'])! + mut shq := sh.toquarter(name: 'nrnodesq', includefilter: ['cat:nodes'])! + + console.print_debug(shyear) + console.print_debug(shq) + // r:=shq.json()! + // console.print_debug(r) + wiki := sh.wiki(description: 'is my description.')! + console.print_debug(wiki) + // panic('test1') +} + +fn test_curr() { + mut sh := sheet_new(name: 'test2') or { panic(err) } + + currency.set_default('AED', 0.25)! + currency.set_default('EUR', 0.9)! + + mut pricetft := sh.row_new(name: 'something', growth: '0:100aed,55:1000eur')! + + console.print_debug(sh.rows['something'].cells[0]) + assert sh.rows['something']!.cells[0].val == 25.0 + assert sh.rows['something']!.cells[60 - 1].val == 900.0 + + // TODO: we need to create tests for it + + console.print_debug(sh) + panic('test1') +} diff --git a/lib/biz/spreadsheet/cell.v b/lib/biz/spreadsheet/cell.v new file mode 100644 index 00000000..aa5b81c6 --- /dev/null +++ b/lib/biz/spreadsheet/cell.v @@ -0,0 +1,35 @@ +module spreadsheet + +import freeflowuniverse.herolib.data.currency + +pub struct Cell { +pub mut: + val f64 + row &Row @[skip; str: skip] + empty bool = true +} + +pub fn (mut c Cell) set(v string) ! { + // means we insert a currency so need to do the exchange + mut amount := currency.amount_get(v)! + assert amount.currency.name != '' + mut amount2 := amount.exchange(c.row.sheet.currency)! // do the exchange to the local currency + c.val = amount2.val + c.empty = false +} + +pub fn (mut c Cell) add(v f64) { + c.val += v + c.empty = false +} + +pub fn (mut c Cell) repr() string { + if c.empty { + return '-' + } + return float_repr(c.val, c.row.reprtype) +} + +pub fn (mut c Cell) str() string { + return c.repr() +} diff --git a/lib/biz/spreadsheet/extrapolate.v b/lib/biz/spreadsheet/extrapolate.v new file mode 100644 index 00000000..22d1c471 --- /dev/null +++ b/lib/biz/spreadsheet/extrapolate.v @@ -0,0 +1,116 @@ +module spreadsheet + +import freeflowuniverse.herolib.ui.console + +// smartstring is something like 3:2,10:5 means end month 3 we start with 2, it grows to 5 on end month 10 . +// the cells out of the mentioned ranges are not filled if they are already set . +// the cells which are empty at start of row will become 0 . +// the cells which are empty at the back will just be same value as the last one . +// currencies can be used e.g. 3:10usd,20:30aed (so we can even mix) . +// first cell is 1, the start is 0 (month 0) . +// if the smartstr, is empty then will use existing values in the row to extra/intra polate, the empty values will be filled in +pub fn (mut r Row) extrapolate(smartstr string) ! { + // put the values in the row + // console.print_debug("extrapolate: ${smartstr}") + for mut part in smartstr.split(',') { + part = part.trim_space() + if part.contains(':') { + splitted := part.split(':') + if splitted.len != 2 { + return error("smartextrapolate needs '3:2,10:5' as format, now ${smartstr} ") + } + mut x := splitted[0].int() + if x < 0 { + return error('Cannot do smartstr, because the X is out of scope.\n${smartstr}') + } + if x > r.sheet.nrcol - 1 { + x = r.sheet.nrcol - 1 + } + r.cells[x].set(splitted[1])! + } + } + + mut xlast := 0 // remembers where there was last non empty value + mut has_previous_value := false + mut xlastval := 0.0 // the value at that position + mut xlastwidth := 0 // need to know how fast to go up from the xlast to xnew + mut xnewval := 0.0 + // console.print_debug(r) + for x in 0 .. r.cells.len { + // console.print_debug("$x empty:${r.cells[x].empty} xlastwidth:$xlastwidth xlastval:$xlastval xlast:$xlast") + if r.cells[x].empty && !has_previous_value { + continue + } + has_previous_value = true + if r.cells[x].empty == false && xlastwidth == 0 { + // we get new value, just go to next + xlast = x + xlastval = r.cells[x].val + xlastwidth = 0 + // console.print_debug(" lastval:$xlastval") + continue // no need to do anything + } + // if we get here we get an empty after having a non empty before + xlastwidth += 1 + if r.cells[x].empty == false { + // now we find the next one not being empty so we need to do the interpolation + xnewval = r.cells[x].val + // now we need to walk over the inbetween and set the values + yincr := (xnewval - xlastval) / xlastwidth + mut yy := xlastval + // console.print_debug(" yincr:$yincr") + for xx in (xlast + 1) .. x { + yy += yincr + r.cells[xx].set('${yy:.2f}')! + } + xlast = x + xlastval = xnewval + xlastwidth = 0 + xnewval = 0.0 + } + } + // console.print_debug("ROW1:$r") + + // now fill in the last ones + xlastval = 0.0 + for x in 0 .. r.cells.len { + if r.cells[x].empty == false { + xlastval = r.cells[x].val + continue + } + r.cells[x].set('${xlastval:.2f}')! + } + + // console.print_debug("ROW:$r") + // if true{panic("s")} +} + +// something like 3:2,10:5 means end month 3 we set 2, month 10 5 +// there i no interpolation, all other fields are set on 0 +pub fn (mut r Row) smartfill(smartstr string) ! { + // console.print_debug("smartfill: ${smartstr}") + for mut part in smartstr.split(',') { + part = part.trim_space() + if part.contains(':') { + splitted := part.split(':') + if splitted.len != 2 { + return error("smartextrapolate needs '3:2,10:5' as format, now ${smartstr} ") + } + x := splitted[0].int() + if x < 0 { + return error('Cannot do smartstr, because the X is out of scope.\n${smartstr}') + } + if x > r.sheet.nrcol { + return error('Cannot do smartstr, because the X is out of scope, needs to be 1+.\n${smartstr}') + } + r.cells[x].set(splitted[1])! + } else { + r.cells[0].set(part)! + } + } + for x in 0 .. r.cells.len { + if r.cells[x].empty { + r.cells[x].set('0.0')! + } + } +} diff --git a/lib/biz/spreadsheet/factory.v b/lib/biz/spreadsheet/factory.v new file mode 100644 index 00000000..29e6a8bb --- /dev/null +++ b/lib/biz/spreadsheet/factory.v @@ -0,0 +1,60 @@ +module spreadsheet + +import freeflowuniverse.herolib.data.currency + +__global ( + sheets shared map[string]&Sheet +) + +@[params] +pub struct SheetNewArgs { +pub mut: + name string = 'main' + nrcol int = 60 + visualize_cur bool = true // if we want to show e.g. $44.4 in a cell or just 44.4 + curr string = 'usd' // preferred currency to work with +} + +// get a sheet +// has y nr of rows, each row has a name +// each row has X nr of columns which represent months +// we can do manipulations with the rows, is very useful for e.g. business planning +// params: +// nrcol int = 60 +// visualize_cur bool //if we want to show e.g. $44.4 in a cell or just 44.4 +pub fn sheet_new(args SheetNewArgs) !&Sheet { + mut sh := Sheet{ + nrcol: args.nrcol + params: SheetParams{ + visualize_cur: args.visualize_cur + } + currency: currency.get(args.curr)! + name: args.name + } + sheet_set(&sh) + return &sh +} + +// get sheet from global +pub fn sheet_get(name string) !&Sheet { + rlock sheets { + if name in sheets { + return sheets[name] or { return error('Sheet ${name} not found') } + } + } + return error("cann't find sheet:'${name}' in global sheets") +} + +// remember sheet in global +pub fn sheet_set(sh &Sheet) { + lock sheets { + sheets[sh.name] = sh + } +} + +pub fn sheets_keys() []string { + rlock sheets { + return sheets.keys() + } + panic('bug') +} diff --git a/lib/biz/spreadsheet/number.v b/lib/biz/spreadsheet/number.v new file mode 100644 index 00000000..9f07df69 --- /dev/null +++ b/lib/biz/spreadsheet/number.v @@ -0,0 +1,33 @@ +module spreadsheet + +import math + +pub enum ReprType { + number // will use k, m, ... + currency +} + +// represent a +pub fn float_repr(nr_ f64, reprtype ReprType) string { + mut out := '' + mut nr := nr_ + mut nr_pos := math.abs(nr) + mut ext := '' + if reprtype == .number || reprtype == .currency { + if nr_pos > 1000 * 1000 { + nr = nr / 1000000 + ext = 'm' + } else if nr_pos > 1000 { + ext = 'k' + nr = nr / 1000 + } + if nr > 1000 { + out = '${nr:.0}${ext}' + } else if nr > 100 { + out = '${nr:.1}${ext}' + } else { + out = '${nr:.2}${ext}' + } + } + return out +} diff --git a/lib/biz/spreadsheet/playmacro.v b/lib/biz/spreadsheet/playmacro.v new file mode 100644 index 00000000..cf349844 --- /dev/null +++ b/lib/biz/spreadsheet/playmacro.v @@ -0,0 +1,116 @@ +module spreadsheet + +import freeflowuniverse.herolib.core.playbook { Action } +import freeflowuniverse.herolib.ui.console + +pub fn playmacro(action Action) !string { + console.print_green('playmacro for worksheet') + + sheet_name := action.params.get('sheetname') or { + return error("can't find sheetname from spreadsheet macro's, define it as sheetname:... .") + } + mut sh := sheet_get(sheet_name) or { + return error("Couldn't find sheetname: ${sheet_name} \nerror:\n${err}") + } + + // sheet_name := action.params.get('sheetname') or {return error("can't find sheetname from sheet.chart macro.")} + // mut sh:= sheet_get(sheet_name)! + // console.print_debug(sh) + + supported_actions := ['sheet_wiki', 'graph_pie_row', 'graph_line_row', 'graph_bar_row', + 'graph_title_row', 'wiki_row_overview'] + + if action.name !in supported_actions { + return error("Couldn't find macro ${action.name} for spreadsheet:${sheet_name}.") + } + // rowname string // if specified then its one name + // namefilter []string // only include the exact names as secified for the rows + // includefilter []string // to use with tags filter e.g. ['location:belgium_*'] //would match all words starting with belgium + // excludefilter []string + // period_type PeriodType // year, month, quarter + // aggregate bool = true // if more than 1 row matches should we aggregate or not + // aggregatetype RowAggregateType = .sum // important if used with include/exclude, because then we group + // unit UnitType + // title string + // title_sub string + // size string + // rowname_show bool = true // show the name of the row + // description string + + mut p := action.params + + rowname := p.get_default('rowname', '')! + namefilter := p.get_list_default('namefilter', [])! + includefilter := p.get_list_default('includefilter', [])! + excludefilter := p.get_list_default('excludefilter', [])! + size := p.get_default('size', '')! + title_sub := p.get_default('title_sub', '')! + title := p.get_default('title', '')! + unit := p.get_default('unit', 'normal')! + unit_e := match unit { + 'thousand' { UnitType.thousand } + 'million' { UnitType.million } + 'billion' { UnitType.billion } + else { UnitType.normal } + } + period_type := p.get_default('period_type', 'year')! + if period_type !in ['year', 'month', 'quarter'] { + return error('period type needs to be in year,month,quarter') + } + period_type_e := match period_type { + 'year' { PeriodType.year } + 'month' { PeriodType.month } + 'quarter' { PeriodType.quarter } + else { PeriodType.error } + } + if period_type_e == .error { + return error('period type needs to be in year,month,quarter') + } + + rowname_show := p.get_default_true('rowname_show') + descr_show := p.get_default_true('descr_show') + + args := RowGetArgs{ + rowname: rowname + namefilter: namefilter + includefilter: includefilter + excludefilter: excludefilter + period_type: period_type_e + unit: unit_e + title_sub: title_sub + title: title + size: size + rowname_show: rowname_show + descr_show: descr_show + } + + mut content := '' + + match action.name { + // which action is associated with wiki() method + 'sheet_wiki' { + content = sh.wiki(args) or { panic(err) } + } + 'graph_title_row' { + content = sh.wiki_title_chart(args) + } + 'graph_line_row' { + content = sh.wiki_line_chart(args)! + } + 'graph_bar_row' { + content = sh.wiki_bar_chart(args)! + } + 'graph_pie_row' { + content = sh.wiki_pie_chart(args)! + } + 'wiki_row_overview' { + content = sh.wiki_row_overview(args)! + } + else { + return error('unexpected action name ${action.name} for sheet macro.') + } + } + + content += '\n
\n' + return content +} diff --git a/lib/biz/spreadsheet/readme.md b/lib/biz/spreadsheet/readme.md new file mode 100644 index 00000000..ea45fc17 --- /dev/null +++ b/lib/biz/spreadsheet/readme.md @@ -0,0 +1,63 @@ +# Sheet + +The idea is to have a module which allows us to make software representation of a spreadsheet. + +The spreadsheet has a currency linked to it and also multi currency behavior, it also has powerful extra/intrapolation possibilities. + +A sheet has following format + +If we have 60 months representation (5 year), we have 60 columns + +- rows, each row represent something e.g. salary for a person per month over 5 years +- the rows can be grouped per tags +- each row has 60 cols = cells, each cell has a value +- each row has a name + +A sheet can also be represented per year or per quarter, if per year then there would be 5 columns only. + +There is also functionality to export a sheet to wiki (markdown) or html representation. + +## offline + +if you need to work offline e.g. for development do + +```bash +export OFFLINE=1 +``` + +## Macro's + + + +```js +!!sheet.graph_pie_row sheetname:'tfgridsim_run1' + rowname:'revenue_usd' + period_type:quarter + title:'a title' +``` + +- supported_actions: + - 'sheet_wiki' + - 'graph_pie_row' = pie chart for 1 row + - 'graph_line_row' + - 'graph_bar_row' + - 'graph_title_row' + - 'wiki_row_overview' + + +Properties to use in heroscript + +- rowname string - if specified then its one name +- namefilter []string - only include the exact names as specified for the rows +- includefilter []string - to use with tags filter e.g. ['location:belgium_*'] //would match all words starting with belgium +- excludefilter []string +- period_type PeriodType - year, month, quarter +- aggregate bool = true - if more than 1 row matches should we aggregate or not +- aggregatetype RowAggregateType = .sum - important if used with include/exclude, because then we group +- unit UnitType +- title string +- title_sub string +- size string +- rowname_show bool = true - show the name of the row +- descr_show bool = false - show the description of the row, if this is on then rowname_show will be put on 0 +- description string diff --git a/lib/biz/spreadsheet/row.v b/lib/biz/spreadsheet/row.v new file mode 100644 index 00000000..12952551 --- /dev/null +++ b/lib/biz/spreadsheet/row.v @@ -0,0 +1,184 @@ +module spreadsheet + +import freeflowuniverse.herolib.data.paramsparser +// import freeflowuniverse.herolib.ui.console + +@[heap] +pub struct Row { +pub mut: + name string + alias string + description string + cells []Cell + sheet &Sheet @[skip; str: skip] + aggregatetype RowAggregateType + reprtype ReprType // how to represent it + tags string + subgroup string +} + +// pub enum RowType{ +// cur +// integer +// float +// } + +pub enum RowAggregateType { + unknown + sum + avg + max + min +} + +@[params] +pub struct RowNewParams { +pub mut: + name string + growth string + aggregatetype RowAggregateType + tags string + descr string + subgroup string + extrapolate bool = true +} + +// get a row with a certain name +// you can use the smart extrapolate function to populate the row +// params: +// name string +// growth string (this is input for the extrapolate function) +// aggregatetype e.g. sum,avg,max,min is used to go from months to e.g. year or quarter +// tags []string e.g. ["hr","hrdev"] attach a tag to a row, can be used later to group +// smart exptrapolation is 3:2,10:5 means end month 3 we start with 2, it grows to 5 on end month 10 +pub fn (mut s Sheet) row_new(args_ RowNewParams) !&Row { + mut args := args_ + if args.aggregatetype == .unknown { + args.aggregatetype = .sum + } + name := args.name.to_lower() + if name.trim_space() == '' { + return error('name cannot be empty') + } + mut r := Row{ + sheet: &s + name: name + aggregatetype: args.aggregatetype + tags: args.tags + description: args.descr + subgroup: args.subgroup + } + s.rows[name] = &r + for _ in 0 .. s.nrcol { + r.cells << Cell{ + row: &r + } + } + assert r.cells.len == s.nrcol + if args.growth.len > 0 { + if args.extrapolate { + if !args.growth.contains(',') && !args.growth.contains(':') { + args.growth = '0:${args.growth}' + } + r.extrapolate(args.growth)! + } else { + r.smartfill(args.growth)! + } + } + return &r +} + +pub fn (mut r Row) cell_get(colnr int) !&Cell { + if colnr > r.cells.len { + return error("Cannot find cell, the cell is out of bounds, the colnr:'${colnr}' is larger than nr of cells:'${r.cells.len}'") + } + return &r.cells[colnr] +} + +pub fn (mut r Row) values_get() []f64 { + mut out := []f64{} + for cell in r.cells { + out << cell.val + } + return out +} + +// starting from cell look forward for nrcolls +// make the average +pub fn (r Row) look_forward_avg(colnr_ int, nrcols_ int) !f64 { + mut colnr := colnr_ + mut nrcols := nrcols_ + if colnr > r.cells.len { + return error("Cannot find cell, the cell is out of bounds, the colnr:'${colnr}' is larger than nr of cells:'${r.cells.len}'") + } + if colnr + nrcols > r.cells.len { + colnr = r.cells.len - nrcols_ + } + mut v := 0.0 + for i in colnr .. colnr + nrcols { + v += r.cells[i].val + } + avg := v / f64(nrcols) + return avg +} + +pub fn (r Row) min() f64 { + mut v := 9999999999999.0 + for cell in r.cells { + // console.print_debug(cell.val) + if cell.val < v { + v = cell.val + } + } + return v +} + +pub fn (r Row) max() f64 { + mut v := 0.0 + for cell in r.cells { + // console.print_debug(cell.val) + if cell.val > v { + v = cell.val + } + } + return v +} + +// apply the namefilter, include & exclude filter, if match return true +pub fn (row Row) filter(args_ RowGetArgs) !bool { + mut ok := false + mut args := args_ + + if args.rowname != '' { + if args.rowname !in args.namefilter { + args.namefilter << args.rowname + } + } + + if args.namefilter.len == 0 && args.includefilter.len == 0 && args.excludefilter.len == 0 { + // this means we match all + return true + } + + if args.includefilter.len > 0 || args.excludefilter.len > 0 { + tagstofilter := paramsparser.parse(row.tags)! + ok = tagstofilter.filter_match( + include: args.includefilter + exclude: args.excludefilter + )! + } + for name1 in args.namefilter { + if name1.to_lower() == row.name.to_lower() { + ok = true + } + } + if ok == false { + return false + } + + return ok +} + +pub fn (mut row Row) delete() { + row.sheet.delete(row.name) +} diff --git a/lib/biz/spreadsheet/row_actions.v b/lib/biz/spreadsheet/row_actions.v new file mode 100644 index 00000000..67b83416 --- /dev/null +++ b/lib/biz/spreadsheet/row_actions.v @@ -0,0 +1,159 @@ +module spreadsheet + +pub enum RowAction { + add // add rows + substract + divide + multiply + aggregate + difference + roundint + max + min + reverse //+1 becomes -1 + forwardavg // try to find 12 forward looking cells and do avg where we are +} + +@[params] +pub struct RowActionArgs { +pub mut: + name string + action RowAction + val f64 + rows []&Row + tags string + descr string + subgroup string + aggregatetype RowAggregateType = .sum + delaymonths int // how many months should we delay the output +} + +// add one row to the other +// +// ''' +// name string optional: if not used then row will be modified itself +// action RowAction +// val f64 optional: if we want to e.g. multiply every cell with same val +// rows []Row optional: a row if we want to add each val of item of row, can be more than 1 +// tags string how to recognize a row (selection) +// aggregatetype RowAggregateType is unknown, sum, avg, max, min +// delaymonths int //how many months should we delay the output +// descr string +// subgroup string +// ''' +// row action is +// ''' +// add // add rows +// substract +// divide +// multiply +// aggregate +// difference +// roundint +// max +// min +// reverse //+1 becomes -1 +// forwardavg // try to find 12 forward looking cells and do avg where we are +// ''' +// +pub fn (mut r Row) action(args_ RowActionArgs) !&Row { + mut args := args_ + if args.name == '' { + args.name = r.name + r.sheet.delete(r.name) + } + + mut row_result := r.copy( + name: args.name + tags: args.tags + descr: args.descr + subgroup: args.subgroup + aggregatetype: args.aggregatetype + )! + + mut prevval := 0.0 + for x in 0 .. r.sheet.nrcol { + row_result.cells[x].empty = false + row_result.cells[x].val = r.cells[x].val + if args.rows.len > 0 { + for r2 in args.rows { + if args.action == .add { + row_result.cells[x].val = row_result.cells[x].val + r2.cells[x].val + } else if args.action == .substract { + row_result.cells[x].val = row_result.cells[x].val - r2.cells[x].val + } else if args.action == .multiply { + row_result.cells[x].val = row_result.cells[x].val * r2.cells[x].val + } else if args.action == .divide { + row_result.cells[x].val = row_result.cells[x].val / r2.cells[x].val + } else if args.action == .max { + if r2.cells[x].val > row_result.cells[x].val { + row_result.cells[x].val = r2.cells[x].val + } + } else if args.action == .min { + if r2.cells[x].val < row_result.cells[x].val { + row_result.cells[x].val = r2.cells[x].val + } + } else { + return error('Action wrongly specified for ${r} with\nargs:${args}') + } + } + } + if args.val > 0.0 { + if args.action == .add { + row_result.cells[x].val = row_result.cells[x].val + args.val + } else if args.action == .substract { + row_result.cells[x].val = row_result.cells[x].val - args.val + } else if args.action == .multiply { + row_result.cells[x].val = row_result.cells[x].val * args.val + } else if args.action == .divide { + row_result.cells[x].val = row_result.cells[x].val / args.val + } else if args.action == .aggregate { + row_result.cells[x].val = row_result.cells[x].val + prevval + prevval = row_result.cells[x].val + } else if args.action == .difference { + row_result.cells[x].val = row_result.cells[x].val - r.cells[x - 1].val + } else if args.action == .roundint { + row_result.cells[x].val = int(row_result.cells[x].val) + } else if args.action == .max { + if args.val > row_result.cells[x].val { + row_result.cells[x].val = args.val + } + } else if args.action == .min { + if args.val < row_result.cells[x].val { + row_result.cells[x].val = args.val + } + } else { + return error('Action wrongly specified for ${r} with\nargs:${args}') + } + } + + if args.action == .reverse { + row_result.cells[x].val = -row_result.cells[x].val + } + if args.action == .forwardavg { + a := row_result.look_forward_avg(x, 6)! + row_result.cells[x].val = a + } + } + if args.delaymonths > 0 { + row_result.delay(args.delaymonths)! + } + return row_result +} + +// pub fn (mut r Row) add(name string, r2 Row) !&Row { +// return r.action(name:name, rows:[]r2, tags:r.tags) +// } +pub fn (mut r Row) delay(monthdelay int) ! { + mut todelay := []f64{} + for x in 0 .. r.sheet.nrcol { + todelay << r.cells[x].val + } + for x in 0 .. r.sheet.nrcol { + if x < monthdelay { + r.cells[x].val = 0.0 + } else { + r.cells[x].val = todelay[x - monthdelay] + } + } +} diff --git a/lib/biz/spreadsheet/row_copy.v b/lib/biz/spreadsheet/row_copy.v new file mode 100644 index 00000000..d1af6d53 --- /dev/null +++ b/lib/biz/spreadsheet/row_copy.v @@ -0,0 +1,50 @@ +module spreadsheet + +import math + +@[params] +pub struct RowCopyArgs { +pub mut: + name string + tags string + descr string + subgroup string + aggregatetype RowAggregateType = .sum +} + +pub fn (mut r Row) copy(args_ RowCopyArgs) !&Row { + mut row_result := r + mut args := args_ + if args.name == '' { + return error('name cannot be empty for copy, args:${args} \non ${r}') + } + if args.tags == '' { + args.tags = r.tags + } + if args.descr == '' { + args.descr = r.description + } + if args.subgroup == '' { + args.subgroup = r.subgroup + } + if args.aggregatetype == .unknown { + args.aggregatetype = r.aggregatetype + } + if args.name.len > 0 { + mut r3 := r.sheet.row_new( + name: args.name + aggregatetype: args.aggregatetype + descr: args.descr + subgroup: args.subgroup + tags: args.tags + )! + row_result = *r3 + for x in 0 .. r.sheet.nrcol { + row_result.cells[x].empty = false + row_result.cells[x].val = r.cells[x].val + } + } else { + return error('name need to be specified:\n${args_}') + } + return &row_result +} diff --git a/lib/biz/spreadsheet/row_recurring.v b/lib/biz/spreadsheet/row_recurring.v new file mode 100644 index 00000000..1a8b21c3 --- /dev/null +++ b/lib/biz/spreadsheet/row_recurring.v @@ -0,0 +1,47 @@ +module spreadsheet + +import math + +@[params] +pub struct RowRecurringArgs { + RowCopyArgs +pub mut: + nrmonths int = 60 + delaymonths int // how many months should we delay the output +} + +pub fn (mut r Row) recurring(args_ RowRecurringArgs) !&Row { + mut args := args_ + if args.name == '' { + args.name = r.name + r.sheet.delete(r.name) + } + + if args.nrmonths < 5 { + return error('nrmonths should be at least 5 for recurring, args:${args} \non ${r}') + } + + mut row_result := r.copy( + name: args.name + tags: args.tags + descr: args.descr + subgroup: args.subgroup + aggregatetype: args.aggregatetype + )! + + for x in 0 .. r.sheet.nrcol { + mut aggregated := 0.0 + startnr := math.max(0, x - args.nrmonths) + + for x2 in startnr .. x + 1 { + // println("${startnr}-${x} ${x2}:${r.cells[x2].val}") + aggregated += r.cells[x2].val // go back max nrmonths months and aggregate it all + } + row_result.cells[x].empty = false + row_result.cells[x].val = aggregated + } + if args.delaymonths > 0 { + row_result.delay(args.delaymonths)! + } + return row_result +} diff --git a/lib/biz/spreadsheet/sheet.v b/lib/biz/spreadsheet/sheet.v new file mode 100644 index 00000000..4e02238f --- /dev/null +++ b/lib/biz/spreadsheet/sheet.v @@ -0,0 +1,293 @@ +module spreadsheet + +import freeflowuniverse.herolib.data.currency +import freeflowuniverse.herolib.data.paramsparser +import freeflowuniverse.herolib.ui.console + +@[heap] +pub struct Sheet { +pub mut: + name string + rows map[string]&Row + nrcol int = 60 + params SheetParams + currency currency.Currency = currency.get('USD')! +} + +pub struct SheetParams { +pub mut: + visualize_cur bool // if we want to show e.g. $44.4 in a cell or just 44.4 +} + +// find maximum length of a cell (as string representation for a colnr) +// 0 is the first col +// the headers if used are never counted +pub fn (mut s Sheet) cells_width(colnr int) !int { + mut lmax := 0 + for _, mut row in s.rows { + if row.cells.len > colnr { + mut c := row.cell_get(colnr)! + ll := c.repr().len + if ll > lmax { + lmax = ll + } + } + } + return lmax +} + +// walk over all rows, return the max width of the name and/or alias field of a row +pub fn (mut s Sheet) rows_names_width_max() int { + mut res := 0 + for _, mut row in s.rows { + if row.name.len > res { + res = row.name.len + } + if row.alias.len > res { + res = row.alias.len + } + } + return res +} + +// walk over all rows, return the max width of the description field of a row +pub fn (mut s Sheet) rows_description_width_max() int { + mut res := 0 + for _, mut row in s.rows { + if row.description.len > res { + res = row.description.len + } + } + return res +} + +@[params] +pub struct Group2RowArgs { +pub mut: + name string + include []string // to use with params filter e.g. ['location:belgium_*'] //would match all words starting with belgium + exclude []string + tags string + descr string + subgroup string + aggregatetype RowAggregateType = .sum +} + +// find all rows which have one of the tags +// aggregate (sum) them into one row +// returns a row with the result +// useful to e.g. make new row which makes sum of all salaries for e.g. dev and engineering tag +pub fn (mut s Sheet) group2row(args Group2RowArgs) !&Row { + name := args.name + if name == '' { + return error('name cannot be empty') + } + mut rowout := s.row_new( + name: name + tags: args.tags + descr: args.descr + subgroup: args.subgroup + aggregatetype: args.aggregatetype + )! + for _, row in s.rows { + tagstofilter := paramsparser.parse(row.tags)! + matched := tagstofilter.filter_match(include: args.include, exclude: args.exclude)! + if matched { + // console.print_debug("MMMMMAAAAATCH: \n${args.include} ${row.tags}") + // console.print_debug(row) + // if true{panic("SDSD")} + mut x := 0 + for cell in row.cells { + rowout.cells[x].val += cell.val + rowout.cells[x].empty = false + x += 1 + } + } + } + return rowout +} + +@[params] +pub struct ToYearQuarterArgs { +pub mut: + name string + namefilter []string // only include the exact names as specified for the rows + includefilter []string // matches for the tags + excludefilter []string // matches for the tags + period_months int = 12 +} + +// internal function used by to year and by to quarter +pub fn (s Sheet) tosmaller(args_ ToYearQuarterArgs) !&Sheet { + mut args := args_ + mut sheetname := args.name + if sheetname == '' { + sheetname = s.name + '_year' + } + // console.print_debug("to smaller for sheet: ${s.name} rows:${s.rows.len}") + nrcol_new := int(s.nrcol / args.period_months) + // println("nr cols: ${s.nrcol} ${args.period_months} ${nrcol_new} ") + if f64(nrcol_new) != s.nrcol / args.period_months { + // means we can't do it + panic('is bug, can only be 4 or 12') + } + mut sheet_out := sheet_new( + name: sheetname + nrcol: nrcol_new + visualize_cur: s.params.visualize_cur + curr: s.currency.name + )! + for _, row in s.rows { + // QUESTION: how to parse period_months + ok := row.filter( + rowname: args.name + namefilter: args.namefilter + includefilter: args.includefilter + excludefilter: args.excludefilter + period_type: .month + )! + // console.print_debug("process row in to smaller: ${row.name}, result ${ok}") + if ok == false { + continue + } + // means filter not specified or filtered + mut rnew := sheet_out.row_new( + name: row.name + aggregatetype: row.aggregatetype + tags: row.tags + growth: '0:0.0' + descr: row.description + )! + for x in 0 .. nrcol_new { + mut newval := 0.0 + for xsub in 0 .. args.period_months { + xtot := x * args.period_months + xsub + // console.print_debug("${row.name} $xtot ${row.cells.len}") + // if row.cells.len < xtot+1{ + // console.print_debug(row) + // panic("too many cells") + // } + if row.aggregatetype == .sum || row.aggregatetype == .avg { + newval += row.cells[xtot].val + } else if row.aggregatetype == .max { + if row.cells[xtot].val > newval { + newval = row.cells[xtot].val + } + } else if row.aggregatetype == .min { + if row.cells[xtot].val < newval { + newval = row.cells[xtot].val + } + } else { + panic('not implemented') + } + } + if row.aggregatetype == .sum || row.aggregatetype == .max || row.aggregatetype == .min { + // console.print_debug("sum/max/min ${row.name} $x ${rnew.cells.len}") + rnew.cells[x].val = newval + } else { + // avg + // console.print_debug("avg ${row.name} $x ${rnew.cells.len}") + rnew.cells[x].val = newval / args.period_months + } + } + } + // console.print_debug("to smaller done") + return sheet_out +} + +// make a copy of the sheet and aggregate on year +// params +// name string +// rowsfilter []string +// tagsfilter []string +// tags if set will see that there is at least one corresponding tag per row +// rawsfilter is list of names of rows which will be included +pub fn (mut s Sheet) toyear(args ToYearQuarterArgs) !&Sheet { + mut args2 := args + args2.period_months = 12 + return s.tosmaller(args2) +} + +// make a copy of the sheet and aggregate on quarter +// params +// name string +// rowsfilter []string +// tagsfilter []string +// tags if set will see that there is at least one corresponding tag per row +// rawsfilter is list of names of rows which will be included +pub fn (mut s Sheet) toquarter(args ToYearQuarterArgs) !&Sheet { + mut args2 := args + args2.period_months = 3 + return s.tosmaller(args2) +} + +// return array with same amount of items as cols in the rows +// +// for year we return Y1, Y2, ... +// for quarter we return Q1, Q2, ... +// for months we returm m1, m2, ... +pub fn (mut s Sheet) header() ![]string { + // if col + 40 = months + if s.nrcol > 40 { + mut res := []string{} + for x in 1 .. s.nrcol + 1 { + res << 'M${x}' + } + return res + } + // if col + 10 = quarters + if s.nrcol > 10 { + mut res := []string{} + for x in 1 .. s.nrcol + 1 { + res << 'Q${x}' + } + return res + } + + // else is years + mut res := []string{} + for x in 1 .. s.nrcol + 1 { + res << 'Y${x}' + } + return res +} + +pub fn (mut s Sheet) json() string { + // TODO: not done yet + // return json.encode_pretty(s) + return '' +} + +// find row, report error if not found +pub fn (mut s Sheet) row_get(name string) !&Row { + mut row := s.rows[name] or { return error('could not find row with name: ${name}') } + return row +} + +pub fn (mut s Sheet) values_get(name string) ![]f64 { + mut r := s.row_get(name)! + vs := r.values_get() + return vs +} + +pub fn (mut s Sheet) row_delete(name string) { + if name in s.rows { + s.rows.delete(name) + } +} + +// find row, report error if not found +pub fn (mut s Sheet) cell_get(row string, col int) !&Cell { + mut r := s.row_get(row)! + mut c := r.cells[col] or { + return error('could not find cell from col:${col} for row name: ${row}') + } + return &c +} + +// find row, report error if not found +pub fn (mut s Sheet) delete(name string) { + if name in s.rows { + s.rows.delete(name) + } +} diff --git a/lib/biz/spreadsheet/sheet_getters.v b/lib/biz/spreadsheet/sheet_getters.v new file mode 100644 index 00000000..eb0cf1b0 --- /dev/null +++ b/lib/biz/spreadsheet/sheet_getters.v @@ -0,0 +1,185 @@ +module spreadsheet + +import freeflowuniverse.herolib.ui.console +import math + +fn remove_empty_line(txt string) string { + mut out := '' + for line in txt.split_into_lines() { + if line.trim_space() == '' { + continue + } + out += '${line}\n' + } + return out +} + +@[params] +pub struct RowGetArgs { +pub mut: + rowname string // if specified then its one name + namefilter []string // only include the exact names as secified for the rows + includefilter []string // to use with params filter e.g. ['location:belgium_*'] //would match all words starting with belgium + excludefilter []string + period_type PeriodType // year, month, quarter + aggregate bool = true // if more than 1 row matches should we aggregate or not + aggregatetype RowAggregateType = .sum // important if used with include/exclude, because then we group + unit UnitType + title string + title_sub string + size string + rowname_show bool = true // show the name of the row + descr_show bool + description string +} + +pub enum UnitType { + normal + thousand + million + billion +} + +pub enum PeriodType { + year + month + quarter + error +} + +// find rownames which match RowGetArgs +pub fn (s Sheet) rownames_get(args RowGetArgs) ![]string { + mut res := []string{} + for _, row in s.rows { + if row.filter(args)! { + res << row.name + } + } + return res +} + +// get one rowname, if more than 1 will fail, if 0 will fail +pub fn (s Sheet) rowname_get(args RowGetArgs) !string { + r := s.rownames_get(args)! + if r.len == 1 { + return r[0] + } + if r.len == 0 { + return error("Didn't find rows for ${s.name}.\n${args}") + } + return error('Found too many rows for ${s.name}.\n${args}') +} + +// return e.g. "'Y1', 'Y2', 'Y3', 'Y4', 'Y5', 'Y6'" if year, is for header +pub fn (mut s Sheet) header_get_as_list(period_type PeriodType) ![]string { + str := s.header_get_as_string(period_type)! + return str.split(',') +} + +// return e.g. "'Y1', 'Y2', 'Y3', 'Y4', 'Y5', 'Y6'" if year, is for header +pub fn (mut s Sheet) data_get_as_list(args RowGetArgs) ![]string { + str := s.data_get_as_string(args)! + return str.split(',') +} + +// return e.g. "'Y1', 'Y2', 'Y3', 'Y4', 'Y5', 'Y6'" if year, is for header +pub fn (mut s Sheet) header_get_as_string(period_type PeriodType) !string { + err_pre := "Can't get header for sheet:${s.name}\n" + nryears := int(s.nrcol / 12) + mut out := '' + match period_type { + .year { + for i in 1 .. (nryears + 1) { + out += "'Y${i}', " + } + } + .quarter { + for i in 1 .. (nryears * 4 + 1) { + out += "'Q${i}', " + } + } + .month { + for i in 1 .. (12 * nryears + 1) { + out += "'M${i}', " + } + } + else { + return error('${err_pre}Period type not well specified') + } + } + out = out.trim_space().trim(',').trim_space() + return out +} + +// return the values +pub fn (mut s Sheet) data_get_as_string(args RowGetArgs) !string { + if args.rowname == '' { + return error('rowname needs to be specified') + } + nryears := 5 + err_pre := "Can't get data for sheet:${s.name} row:${args.rowname}.\n" + mut s2 := s + + if args.period_type == .year { + s.toyear( + name: args.rowname + namefilter: args.namefilter + includefilter: args.includefilter + excludefilter: args.excludefilter + )! + } + if args.period_type == .quarter { + s.toquarter( + name: args.rowname + namefilter: args.namefilter + includefilter: args.includefilter + excludefilter: args.excludefilter + )! + } + mut out := '' + + // console.print_debug(s2.row_get(args.rowname)!) + mut vals := s2.values_get(args.rowname)! + if args.period_type == .year && vals.len != nryears { + return error('${err_pre}Vals.len need to be 6, for year.\nhere:\n${vals}') + } + if args.period_type == .quarter && vals.len != nryears * 4 { + return error('${err_pre}vals.len need to be 6*4, for quarter.\nhere:\n${vals}') + } + if args.period_type == .month && vals.len != nryears * 12 { + return error('${err_pre}vals.len need to be 6*12, for month.\nhere:\n${vals}') + } + + for mut val in vals { + if args.unit == .thousand { + val = val / 1000.0 + } + if args.unit == .million { + val = val / 1000000.0 + } + if args.unit == .billion { + val = val / 1000000000.0 + } + out += ',${math.round_sig(val, 1)}' + } + return out.trim(',') +} + +// use RowGetArgs to get to smaller version of sheet +pub fn (mut s Sheet) filter(args RowGetArgs) !&Sheet { + period_months := match args.period_type { + .year { 12 } + .month { 1 } + .quarter { 3 } + else { panic('bug') } + } + + tga := ToYearQuarterArgs{ + namefilter: args.namefilter + includefilter: args.includefilter + excludefilter: args.excludefilter + period_months: period_months + } + + return s.tosmaller(tga)! +} diff --git a/lib/biz/spreadsheet/tools.v b/lib/biz/spreadsheet/tools.v new file mode 100644 index 00000000..88d7763b --- /dev/null +++ b/lib/biz/spreadsheet/tools.v @@ -0,0 +1,17 @@ +module spreadsheet + +pub fn array2float(list []int) []f64 { + mut list2 := []f64{} + for i in list { + list2 << f64(i) + } + return list2 +} + +pub fn array2int(list []f64) []int { + mut list2 := []int{} + for i in list { + list2 << int(i) + } + return list2 +} diff --git a/lib/biz/spreadsheet/wiki.v b/lib/biz/spreadsheet/wiki.v new file mode 100644 index 00000000..bc119778 --- /dev/null +++ b/lib/biz/spreadsheet/wiki.v @@ -0,0 +1,86 @@ +module spreadsheet + +import freeflowuniverse.herolib.core.texttools +import freeflowuniverse.herolib.ui.console +// format a sheet properly in wiki format + +pub fn (mut s Sheet) wiki(args_ RowGetArgs) !string { + mut args := args_ + + _ := match args.period_type { + .year { 12 } + .month { 1 } + .quarter { 3 } + else { panic('bug') } + } + + // console.print_debug("wiki with args:${args}") + mut sheet := s.filter(args)! // this will do the filtering and if needed make smaller + + mut out := '' + if args.title.len > 0 { + out = '## ${args.title}\n\n' + } + if args.title != '' { + out += args.title + '\n\n' + } + + mut colmax := []int{} + for x in 0 .. sheet.nrcol { + colmaxval := sheet.cells_width(x)! + colmax << colmaxval + } + + header := sheet.header()! + + // get the width of name and optionally description + mut names_width := sheet.rows_names_width_max() + + mut header_wiki_items := []string{} + mut header_wiki_items2 := []string{} + if args.rowname_show && names_width > 0 { + header_wiki_items << texttools.expand('|', names_width + 1, ' ') + header_wiki_items2 << texttools.expand('|', names_width + 1, '-') + } + for x in 0 .. sheet.nrcol { + colmaxval := colmax[x] + headername := header[x] + item := texttools.expand(headername, colmaxval, ' ') + header_wiki_items << '|${item}' + item2 := texttools.expand('', colmaxval, '-') + header_wiki_items2 << '|${item2}' + } + header_wiki_items << '|' + header_wiki_items2 << '|' + header_wiki := header_wiki_items.join('') + header_wiki2 := header_wiki_items2.join('') + + out += header_wiki + '\n' + out += header_wiki2 + '\n' + + for _, mut row in sheet.rows { + mut wiki_items := []string{} + mut rowname := row.name + if row.description.len > 0 { + names_width = sheet.rows_description_width_max() + rowname = row.description + } + if args.rowname_show && names_width > 0 { + if names_width > 60 { + names_width = 60 + } + wiki_items << texttools.expand('|${rowname}', names_width + 1, ' ') + } + for x in 0 .. sheet.nrcol { + colmaxval := colmax[x] + val := row.cells[x].str() + item := texttools.expand(val, colmaxval, ' ') + wiki_items << '|${item}' + } + wiki_items << '|' + wiki2 := wiki_items.join('') + out += wiki2 + '\n' + } + + return out +} diff --git a/lib/biz/spreadsheet/wiki_charts.v b/lib/biz/spreadsheet/wiki_charts.v new file mode 100644 index 00000000..b064e9c0 --- /dev/null +++ b/lib/biz/spreadsheet/wiki_charts.v @@ -0,0 +1,188 @@ +module spreadsheet + +import freeflowuniverse.herolib.data.markdownparser.elements +import freeflowuniverse.herolib.ui.console + +pub fn (mut s Sheet) wiki_title_chart(args RowGetArgs) string { + if args.title.len > 0 { + titletxt := " + title: { + text: '${args.title}', + subtext: '${args.title_sub}', + left: 'center' + }, + " + return titletxt + } + return '' +} + +pub fn (mut s_ Sheet) wiki_row_overview(args RowGetArgs) !string { + mut s := s_.filter(args)! + + rows_values := s.rows.values().map([it.name, it.description, it.tags]) + mut rows := []elements.Row{} + for values in rows_values { + rows << elements.Row{ + cells: values.map(&elements.Paragraph{ + content: it + }) + } + } + header_items := ['Row Name', 'Description', 'Tags'] + table := elements.Table{ + header: header_items.map(&elements.Paragraph{ + content: it + }) + // TODO: need to use the build in mechanism to filter rows + rows: rows + alignments: [.left, .left, .left] + } + return table.markdown() +} + +// produce a nice looking bar chart see +// https://echarts.apache.org/examples/en/editor.html?c=line-stack +pub fn (mut s Sheet) wiki_line_chart(args_ RowGetArgs) !string { + mut args := args_ + + rownames := s.rownames_get(args)! + header := s.header_get_as_string(args.period_type)! + mut series_lines := []string{} + + for rowname in rownames { + data := s.data_get_as_string(RowGetArgs{ + ...args + rowname: rowname + })! + series_lines << '{ + name: \'${rowname}\', + type: \'line\', + stack: \'Total\', + data: [${data}] + }' + } + + // TODO: need to implement the multiple results which can come back from the args, can be more than 1 + + // header := s.header_get_as_string(args.period_type)! + // data := s.data_get_as_string(args)! + // console.print_debug('HERE! ${header}') + // console.print_debug('HERE!! ${data}') + + template := " + ${s.wiki_title_chart(args)} + tooltip: { + trigger: 'axis' + }, + legend: { + data: ${rownames} + }, + grid: { + left: '3%', + right: '4%', + bottom: '3%', + containLabel: true + }, + toolbox: { + feature: { + saveAsImage: {} + } + }, + xAxis: { + type: 'category', + boundaryGap: false, + data: [${header}] + }, + yAxis: { + type: 'value' + }, + series: [${series_lines.join(',')}] + " + out := remove_empty_line('```echarts\n{${template}\n};\n```\n') + return out +} + +// produce a nice looking bar chart see +// https://echarts.apache.org/examples/en/index.html#chart-type-bar +pub fn (mut s Sheet) wiki_bar_chart(args_ RowGetArgs) !string { + mut args := args_ + args.rowname = s.rowname_get(args)! + header := s.header_get_as_string(args.period_type)! + data := s.data_get_as_string(args)! + bar1 := " + ${s.wiki_title_chart(args)} + xAxis: { + type: 'category', + data: [${header}] + }, + yAxis: { + type: 'value' + }, + series: [ + { + data: [${data}], + type: 'bar', + showBackground: true, + backgroundStyle: { + color: 'rgba(180, 180, 180, 0.2)' + } + } + ] + " + out := remove_empty_line('```echarts\n{${bar1}\n};\n```\n') + return out +} + +// produce a nice looking bar chart see +// https://echarts.apache.org/examples/en/index.html#chart-type-bar +pub fn (mut s Sheet) wiki_pie_chart(args_ RowGetArgs) !string { + mut args := args_ + args.rowname = s.rowname_get(args)! + header := s.header_get_as_list(args.period_type)! + data := s.data_get_as_list(args)! + + mut radius := '' + if args.size.len > 0 { + radius = "radius: '${args.size}'," + } + + if header.len != data.len { + return error('data and header lengths must match.\n${header}\n${data}') + } + + mut data_lines := []string{} + for i, _ in data { + data_lines << '{ value: ${data[i]}, name: ${header[i]}}' + } + data_str := '[${data_lines.join(',')}]' + + bar1 := " + ${s.wiki_title_chart(args)} + tooltip: { + trigger: 'item' + }, + legend: { + orient: 'vertical', + left: 'left' + }, + series: [ + { + name: 'Access From', + type: 'pie', + ${radius} + data: ${data_str}, + emphasis: { + itemStyle: { + shadowBlur: 10, + shadowOffsetX: 0, + shadowColor: 'rgba(0, 0, 0, 0.5)' + } + } + } + ] + + " + out := remove_empty_line('```echarts\n{${bar1}\n};\n```\n') + return out +} diff --git a/lib/data/doctree/collection/collection.v b/lib/data/doctree/collection/collection.v new file mode 100644 index 00000000..e59e178a --- /dev/null +++ b/lib/data/doctree/collection/collection.v @@ -0,0 +1,48 @@ +module collection + +import freeflowuniverse.herolib.core.pathlib { Path } +import freeflowuniverse.herolib.data.doctree.collection.data +import freeflowuniverse.herolib.core.texttools + +@[heap] +pub struct Collection { +pub mut: + name string @[required] + path Path @[required] + fail_on_error bool + heal bool = true + pages map[string]&data.Page + files map[string]&data.File + images map[string]&data.File + errors []CollectionError +} + +@[params] +pub struct CollectionNewArgs { +pub mut: + name string @[required] + path string @[required] + heal bool = true // healing means we fix images, if selected will automatically load, remove stale links + load bool = true + fail_on_error bool +} + +// get a new collection +pub fn new(args_ CollectionNewArgs) !Collection { + mut args := args_ + args.name = texttools.name_fix(args.name) + + mut pp := pathlib.get_dir(path: args.path)! // will raise error if path doesn't exist + mut collection := Collection{ + name: args.name + path: pp + heal: args.heal + fail_on_error: args.fail_on_error + } + + if args.load { + collection.scan() or { return error('Error scanning collection ${args.name}:\n${err}') } + } + + return collection +} diff --git a/lib/data/doctree/collection/data/error.v b/lib/data/doctree/collection/data/error.v new file mode 100644 index 00000000..8a33dfcd --- /dev/null +++ b/lib/data/doctree/collection/data/error.v @@ -0,0 +1,29 @@ +module data + +import freeflowuniverse.herolib.core.pathlib { Path } + +pub enum PageErrorCat { + unknown + file_not_found + image_not_found + page_not_found + def +} + +pub struct PageMultiError { + Error +pub mut: + errs []PageError +} + +pub fn (err PageMultiError) msg() string { + return 'Failed in processing page with one or multiple errors: ${err.errs}' +} + +pub struct PageError { + Error +pub mut: + path Path + msg string + cat PageErrorCat +} diff --git a/lib/data/doctree/collection/data/file.v b/lib/data/doctree/collection/data/file.v new file mode 100644 index 00000000..b6f4b254 --- /dev/null +++ b/lib/data/doctree/collection/data/file.v @@ -0,0 +1,102 @@ +module data + +import freeflowuniverse.herolib.core.pathlib +import os + +pub enum FileStatus { + unknown + ok + error +} + +pub enum FileType { + file + image +} + +@[heap] +pub struct File { +pub mut: + collection_path pathlib.Path + name string // received a name fix + ext string + path pathlib.Path + pathrel string + state FileStatus + pages_linked []&Page // pointer to pages which use this file + ftype FileType + collection_name string +} + +@[params] +pub struct NewFileArgs { +pub: + name string // received a name fix + collection_path pathlib.Path + pathrel string + path pathlib.Path + collection_name string @[required] +} + +pub fn new_file(args NewFileArgs) !File { + mut f := File{ + name: args.name + path: args.path + collection_path: args.collection_path + pathrel: args.pathrel + collection_name: args.collection_name + } + + f.init()! + + return f +} + +pub fn (file File) file_name() string { + return '${file.name}.${file.ext}' +} + +// parses file name, extension and relative path +pub fn (mut file File) init() ! { + if file.path.is_image() { + file.ftype = .image + } + + file.name = file.path.name_fix_no_ext() + file.ext = file.path.path.all_after_last('.').to_lower() + + path_rel := file.path.path_relative(file.collection_path.path) or { + return error('cannot get relative path.\n${err}') + } + + file.pathrel = path_rel.trim('/') +} + +fn (mut file File) delete() ! { + file.path.delete()! +} + +// TODO: what if this is moved to another collection, or outside the scope of the tree? +fn (mut file File) mv(dest string) ! { + mut destination := pathlib.get_dir(path: dest)! // will fail if dir doesn't exist + + os.mv(file.path.path, destination.path) or { + return error('could not move ${file.path.path} to ${destination.path} .\n${err}\n${file}') + } + + // need to get relative path in, in relation to collection + file.pathrel = destination.path_relative(file.collection_path.path)! + file.path = destination +} + +fn (mut file File) exists() !bool { + return file.path.exists() +} + +pub fn (file_ File) copy(dest string) ! { + mut file := file_ + mut dest2 := pathlib.get(dest) + file.path.copy(dest: dest2.path, rsync: false) or { + return error('Could not copy file: ${file.path.path} to ${dest} .\n${err}\n${file}') + } +} diff --git a/lib/data/doctree/collection/data/page.v b/lib/data/doctree/collection/data/page.v new file mode 100644 index 00000000..a33b02a6 --- /dev/null +++ b/lib/data/doctree/collection/data/page.v @@ -0,0 +1,164 @@ +module data + +import freeflowuniverse.herolib.core.pathlib +import freeflowuniverse.herolib.data.markdownparser.elements { Action, Doc, Element } +import freeflowuniverse.herolib.data.markdownparser + +pub enum PageStatus { + unknown + ok + error +} + +@[heap] +pub struct Page { +mut: + doc &Doc @[str: skip] + element_cache map[int]Element + changed bool +pub mut: + name string // received a name fix + alias string // a proper name for e.g. def + path pathlib.Path + collection_name string +} + +@[params] +pub struct NewPageArgs { +pub: + name string @[required] + path pathlib.Path @[required] + collection_name string @[required] +} + +pub fn new_page(args NewPageArgs) !Page { + if args.collection_name == '' { + return error('page collection name must not be empty') + } + + if args.name == '' { + return error('page name must not be empty') + } + mut doc := markdownparser.new(path: args.path.path, collection_name: args.collection_name) or { + return error('failed to parse doc for path ${args.path.path}\n${err}') + } + children := doc.children_recursive() + mut element_cache := map[int]Element{} + for child in children { + element_cache[child.id] = child + } + mut new_page := Page{ + element_cache: element_cache + name: args.name + path: args.path + collection_name: args.collection_name + doc: &doc + } + return new_page +} + +// return doc, reparse if needed +fn (mut page Page) doc() !&Doc { + if page.changed { + content := page.doc.markdown()! + page.reparse_doc(content)! + } + + return page.doc +} + +// return doc, reparse if needed +fn (page Page) doc_immute() !&Doc { + if page.changed { + content := page.doc.markdown()! + doc := markdownparser.new(content: content, collection_name: page.collection_name)! + return &doc + } + return page.doc +} + +// reparse doc markdown and assign new doc to page +fn (mut page Page) reparse_doc(content string) ! { + doc := markdownparser.new(content: content, collection_name: page.collection_name)! + page.element_cache = map[int]Element{} + for child in doc.children_recursive() { + page.element_cache[child.id] = child + } + + page.doc = &doc + page.changed = false +} + +pub fn (page Page) key() string { + return '${page.collection_name}:${page.name}' +} + +pub fn (page Page) get_linked_pages() ![]string { + doc := page.doc_immute()! + return doc.linked_pages +} + +pub fn (page Page) get_markdown() !string { + mut doc := page.doc_immute()! + return doc.markdown()! +} + +pub fn (mut page Page) set_content(content string) ! { + page.reparse_doc(content)! +} + +fn (mut page Page) get_element(element_id int) !Element { + return page.element_cache[element_id] or { + return error('no element found with id ${element_id}') + } +} + +// TODO: this should not be allowed (giving access to modify page content to any caller) +pub fn (mut page Page) get_all_actions() ![]&Action { + mut actions := []&Action{} + mut doc := page.doc()! + for element in doc.children_recursive() { + if element is Action { + actions << element + } + } + + return actions +} + +pub fn (page Page) get_include_actions() ![]Action { + mut actions := []Action{} + // TODO: check if below is necessary + // mut doc := page.doc_immute()! + for element in page.doc.children_recursive() { + if element is Action { + if element.action.actor == 'wiki' && element.action.name == 'include' { + actions << *element + } + } + } + return actions +} + +pub fn (mut page Page) set_action_element_to_processed(element_id int) ! { + mut element := page.element_cache[element_id] or { + return error('page ${page.path} doc has no element with id ${element_id}') + } + + if mut element is Action { + element.action_processed = true + page.changed = true + return + } + + return error('element with id ${element_id} is not an action') +} + +pub fn (mut page Page) set_element_content_no_reparse(element_id int, content string) ! { + mut element := page.element_cache[element_id] or { + return error('page ${page.path} doc has no element with id ${element_id}') + } + + element.content = content + page.changed = true +} diff --git a/lib/data/doctree/collection/data/process_aliases.v b/lib/data/doctree/collection/data/process_aliases.v new file mode 100644 index 00000000..4b683d60 --- /dev/null +++ b/lib/data/doctree/collection/data/process_aliases.v @@ -0,0 +1,49 @@ +module data + +import freeflowuniverse.herolib.core.texttools +import freeflowuniverse.herolib.data.markdownparser.elements + +// returns !!wiki.def actions +pub fn (mut page Page) get_def_actions() ![]elements.Action { + mut doc := page.doc()! + mut def_actions := doc.actionpointers(actor: 'wiki', name: 'def') + mut ret := []elements.Action{} + for def in def_actions { + ret << *def + } + + return ret +} + +// returns page aliases, and removes processed action's content +pub fn (mut page Page) process_def_action(element_id int) ![]string { + mut action_element := page.get_element(element_id)! + + mut doc := page.doc()! + if mut action_element is elements.Action { + mut aliases := map[string]bool{} + def_action := action_element.action + page.alias = def_action.params.get_default('name', '')! + if page.alias == '' { + page.alias = doc.header_name()! + } + + action_element.action_processed = true + action_element.content = '' + page.changed = true + for alias in def_action.params.get_list('alias')! { + mut processed_alias := alias + if processed_alias.to_lower().ends_with('.md') { + // remove the .md at end + processed_alias = processed_alias[0..page.collection_name.len - 3] + } + + processed_alias = texttools.name_fix(processed_alias).replace('_', '') + aliases[processed_alias] = true + } + + return aliases.keys() + } + + return error('element with id ${element_id} is not an action') +} diff --git a/lib/data/doctree/collection/data/process_aliases_test.v b/lib/data/doctree/collection/data/process_aliases_test.v new file mode 100644 index 00000000..79a86f13 --- /dev/null +++ b/lib/data/doctree/collection/data/process_aliases_test.v @@ -0,0 +1,40 @@ +module data + +import freeflowuniverse.herolib.core.pathlib + +fn test_get_def_actions() { + mut page1_path := pathlib.get_file(path: '/tmp/page1', create: true)! + page1_content := "!!wiki.def alias:'tf-dev,cloud-dev,threefold-dev' name:'about us'" + page1_path.write(page1_content)! + mut page1 := new_page(name: 'page1', path: page1_path, collection_name: 'col1')! + def_actions := page1.get_def_actions()! + + assert def_actions.len == 1 + + action := def_actions[0].action + assert action.params.get('name')! == 'about us' + mut aliases := action.params.get_list('alias')! + aliases.sort() + assert ['cloud-dev', 'tf-dev', 'threefold-dev'] == aliases +} + +fn test_process_def_action() { + // create page with def action + // get actions + // process def action + // processed page should have action removed and alias set + mut page1_path := pathlib.get_file(path: '/tmp/page1', create: true)! + page1_content := "!!wiki.def alias:'tf-dev,cloud-dev,threefold-dev' name:'about us'" + page1_path.write(page1_content)! + mut page1 := new_page(name: 'page1', path: page1_path, collection_name: 'col1')! + def_actions := page1.get_def_actions()! + + assert def_actions.len == 1 + + mut aliases := page1.process_def_action(def_actions[0].id)! + assert page1.get_markdown()! == '' + assert page1.alias == 'about us' + + aliases.sort() + assert ['clouddev', 'tfdev', 'threefolddev'] == aliases +} diff --git a/lib/data/doctree/collection/data/process_def_pointers.v b/lib/data/doctree/collection/data/process_def_pointers.v new file mode 100644 index 00000000..8e11f660 --- /dev/null +++ b/lib/data/doctree/collection/data/process_def_pointers.v @@ -0,0 +1,34 @@ +module data + +// returns all page def elements (similar to *DEF) +pub fn (mut page Page) get_def_names() ![]string { + mut defnames := map[string]bool{} + mut doc := page.doc()! + for defitem in doc.defpointers() { + defname := defitem.nameshort + defnames[defname] = true + } + + return defnames.keys() +} + +// removes the def content, and generates a link to the page +pub fn (mut page Page) set_def_links(def_data map[string][]string) ! { + mut doc := page.doc()! + for mut defitem in doc.defpointers() { + defname := defitem.nameshort + + v := def_data[defname] or { continue } + if v.len != 2 { + return error('invalid def data length: expected 2, found ${v.len}') + } + + defitem.pagekey = v[0] + defitem.pagename = v[1] + + defitem.process_link()! + } + + doc.process()! + page.changed = true +} diff --git a/lib/data/doctree/collection/data/process_def_pointers_test.v b/lib/data/doctree/collection/data/process_def_pointers_test.v new file mode 100644 index 00000000..2a013a02 --- /dev/null +++ b/lib/data/doctree/collection/data/process_def_pointers_test.v @@ -0,0 +1,23 @@ +module data + +import freeflowuniverse.herolib.core.pathlib +import rand + +fn test_process_def_pointers() { + // create a page with def pointers to two different pages + // set def links on page. + // processed page should have links to the other two pages + mut page1_path := pathlib.get_file(path: '/tmp/page1', create: true)! + alias1, alias2 := rand.string(5).to_upper(), rand.string(5).to_upper() + page1_content := '*${alias1}\n*${alias2}' + page1_path.write(page1_content)! + mut page1 := new_page(name: 'page1', path: page1_path, collection_name: 'col1')! + + mut defs := map[string][]string{} + defs['${alias1.to_lower()}'] = ['col2:page2', 'page2 alias'] + defs['${alias2.to_lower()}'] = ['col3:page3', 'my page3 alias'] + + page1.set_def_links(defs)! + + assert page1.get_markdown()! == '[page2 alias](col2:page2.md)\n[my page3 alias](col3:page3.md)' +} diff --git a/lib/data/doctree/collection/data/process_link.v b/lib/data/doctree/collection/data/process_link.v new file mode 100644 index 00000000..d41154fe --- /dev/null +++ b/lib/data/doctree/collection/data/process_link.v @@ -0,0 +1,59 @@ +module data + +import freeflowuniverse.herolib.core.texttools +import freeflowuniverse.herolib.data.markdownparser.elements +import freeflowuniverse.herolib.data.doctree.pointer + +// Note: doc should not get reparsed after invoking this method +pub fn (page Page) process_links(paths map[string]string) ![]string { + mut not_found := map[string]bool{} + mut doc := page.doc_immute()! + for mut element in doc.children_recursive() { + if mut element is elements.Link { + if element.cat == .html || (element.cat == .anchor && element.url == '') { + // is external link or same page anchor, nothing to process + // maybe in the future check if exists + continue + } + mut name := texttools.name_fix_keepext(element.filename) + mut site := texttools.name_fix(element.site) + if site == '' { + site = page.collection_name + } + pointerstr := '${site}:${name}' + + ptr := pointer.pointer_new(text: pointerstr, collection: page.collection_name)! + mut path := paths[ptr.str()] or { + not_found[ptr.str()] = true + continue + } + + if ptr.cat == .page && ptr.str() !in doc.linked_pages { + doc.linked_pages << ptr.str() + } + + if ptr.collection == page.collection_name { + // same directory + path = './' + path.all_after_first('/') + } else { + path = '../${path}' + } + + if ptr.cat == .image && element.extra.trim_space() != '' { + path += ' ${element.extra.trim_space()}' + } + + mut out := '[${element.description}](${path})' + if ptr.cat == .image { + out = '!${out}' + } + + element.content = out + element.processed = false + element.state = .linkprocessed + element.process()! + } + } + + return not_found.keys() +} diff --git a/lib/data/doctree/collection/data/process_link_test.v b/lib/data/doctree/collection/data/process_link_test.v new file mode 100644 index 00000000..4aff89fc --- /dev/null +++ b/lib/data/doctree/collection/data/process_link_test.v @@ -0,0 +1,20 @@ +module data + +import freeflowuniverse.herolib.core.pathlib + +fn test_process_link() { + mut page1_path := pathlib.get_file(path: '/tmp/page1', create: true)! + page1_content := '[some page description](col1:page1.md)\n![some other page desc](col2:img.png)' + page1_path.write(page1_content)! + mut page1 := new_page(name: 'page1', path: page1_path, collection_name: 'col1')! + + paths := { + 'col1:page1.md': 'col1/page1.md' + 'col2:img.png': 'col2/img/img.png' + } + + notfound := page1.process_links(paths)! + assert notfound.len == 0 + + assert page1.get_markdown()! == '[some page description](./page1.md)\n![some other page desc](../col2/img/img.png)' +} diff --git a/lib/data/doctree/collection/data/process_macros.v b/lib/data/doctree/collection/data/process_macros.v new file mode 100644 index 00000000..d5fbe38f --- /dev/null +++ b/lib/data/doctree/collection/data/process_macros.v @@ -0,0 +1,24 @@ +module data + +import freeflowuniverse.herolib.core.playmacros +import freeflowuniverse.herolib.data.markdownparser.elements { Action } + +pub fn (mut page Page) process_macros() ! { + mut mydoc := page.doc()! + for mut element in mydoc.children_recursive() { + if mut element is Action { + if element.action.actiontype == .macro { + content := playmacros.play_macro(element.action)! + page.changed = true + if content.len > 0 { + element.content = content + } + } + } + } + + if page.changed { + page.reparse_doc(page.doc.markdown()!)! + page.process_macros()! + } +} diff --git a/lib/data/doctree/collection/error.v b/lib/data/doctree/collection/error.v new file mode 100644 index 00000000..763c2a11 --- /dev/null +++ b/lib/data/doctree/collection/error.v @@ -0,0 +1,64 @@ +module collection + +import freeflowuniverse.herolib.core.pathlib { Path } +import freeflowuniverse.herolib.ui.console + +pub enum CollectionErrorCat { + unknown + image_double + file_double + file_not_found + image_not_found + page_double + page_not_found + sidebar + circular_import + def + summary + include +} + +pub struct CollectionError { + Error +pub mut: + path Path + msg string + cat CollectionErrorCat +} + +pub fn (e CollectionError) msg() string { + return 'collection error:\n\tPath: ${e.path.path}\n\tError message: ${e.msg}\n\tCategory: ${e.cat}' +} + +pub fn (mut collection Collection) error(args CollectionError) ! { + if collection.fail_on_error { + return args + } + + collection.errors << args + console.print_stderr(args.msg) +} + +pub struct ObjNotFound { + Error +pub: + name string + collection string + info string +} + +pub fn (err ObjNotFound) msg() string { + return 'Could not find object with name ${err.name} in collection ${err.collection}: ${err.info}' +} + +// write errors.md in the collection, this allows us to see what the errors are +pub fn (collection Collection) errors_report(dest_ string, errors []CollectionError) ! { + // console.print_debug("====== errors report: ${dest_} : ${collection.errors.len}\n${collection.errors}") + mut dest := pathlib.get_file(path: dest_, create: true)! + if errors.len == 0 { + dest.delete()! + return + } + c := $tmpl('template/errors.md') + dest.write(c)! +} diff --git a/lib/data/doctree/collection/export.v b/lib/data/doctree/collection/export.v new file mode 100644 index 00000000..16d90ae8 --- /dev/null +++ b/lib/data/doctree/collection/export.v @@ -0,0 +1,129 @@ +module collection + +import freeflowuniverse.herolib.core.pathlib +import freeflowuniverse.herolib.core.texttools.regext +import os +import freeflowuniverse.herolib.data.doctree.pointer +import freeflowuniverse.herolib.data.doctree.collection.data + +@[params] +pub struct CollectionExportArgs { +pub mut: + destination pathlib.Path @[required] + file_paths map[string]string + reset bool = true + keep_structure bool // wether the structure of the src collection will be preserved or not + exclude_errors bool // wether error reporting should be exported as well + replacer ?regext.ReplaceInstructions +} + +pub fn (c Collection) export(args CollectionExportArgs) ! { + dir_src := pathlib.get_dir(path: args.destination.path + '/' + c.name, create: true)! + + mut cfile := pathlib.get_file(path: dir_src.path + '/.collection', create: true)! // will auto save it + cfile.write("name:${c.name} src:'${c.path.path}'")! + + mut errors := c.errors.clone() + errors << export_pages(c.path.path, c.pages.values(), + dir_src: dir_src + file_paths: args.file_paths + keep_structure: args.keep_structure + replacer: args.replacer + )! + + c.export_files(dir_src, args.reset)! + c.export_images(dir_src, args.reset)! + c.export_linked_pages(dir_src)! + + if !args.exclude_errors { + c.errors_report('${dir_src.path}/errors.md', errors)! + } +} + +@[params] +pub struct ExportPagesArgs { +pub mut: + dir_src pathlib.Path + file_paths map[string]string + keep_structure bool // wether the structure of the src collection will be preserved or not + replacer ?regext.ReplaceInstructions +} + +// creates page file, processes page links, then writes page +fn export_pages(col_path string, pages []&data.Page, args ExportPagesArgs) ![]CollectionError { + mut errors := []CollectionError{} + for page in pages { + dest := if args.keep_structure { + relpath := page.path.path.trim_string_left(col_path) + '${args.dir_src.path}/${relpath}' + } else { + '${args.dir_src.path}/${page.name}.md' + } + + not_found := page.process_links(args.file_paths)! + + for pointer_str in not_found { + ptr := pointer.pointer_new(text: pointer_str)! + cat := match ptr.cat { + .page { + CollectionErrorCat.page_not_found + } + .image { + CollectionErrorCat.image_not_found + } + else { + CollectionErrorCat.file_not_found + } + } + errors << CollectionError{ + path: page.path + msg: '${ptr.cat} ${ptr.str()} not found' + cat: cat + } + } + + mut dest_path := pathlib.get_file(path: dest, create: true)! + mut markdown := page.get_markdown()! + if mut replacer := args.replacer { + markdown = replacer.replace(text: markdown)! + } + + dest_path.write(markdown)! + } + return errors +} + +fn (c Collection) export_files(dir_src pathlib.Path, reset bool) ! { + for _, file in c.files { + mut d := '${dir_src.path}/img/${file.name}.${file.ext}' + if reset || !os.exists(d) { + file.copy(d)! + } + } +} + +fn (c Collection) export_images(dir_src pathlib.Path, reset bool) ! { + for _, file in c.images { + mut d := '${dir_src.path}/img/${file.name}.${file.ext}' + if reset || !os.exists(d) { + file.copy(d)! + } + } +} + +fn (c Collection) export_linked_pages(dir_src pathlib.Path) ! { + collection_linked_pages := c.get_collection_linked_pages()! + mut linked_pages_file := pathlib.get_file(path: dir_src.path + '/.linkedpages', create: true)! + linked_pages_file.write(collection_linked_pages.join_lines())! +} + +fn (c Collection) get_collection_linked_pages() ![]string { + mut linked_pages_set := map[string]bool{} + for _, page in c.pages { + for linked_page in page.get_linked_pages()! { + linked_pages_set[linked_page] = true + } + } + + return linked_pages_set.keys() +} diff --git a/lib/data/doctree/collection/export_test.v b/lib/data/doctree/collection/export_test.v new file mode 100644 index 00000000..4c539f69 --- /dev/null +++ b/lib/data/doctree/collection/export_test.v @@ -0,0 +1,47 @@ +module collection + +import freeflowuniverse.herolib.core.pathlib +import os + +const test_dir = '${os.dir(@FILE)}/testdata/export_test' +const tree_dir = '${test_dir}/mytree' +const export_dir = '${test_dir}/export' +const export_expected_dir = '${test_dir}/export_expected' + +fn testsuite_begin() { + pathlib.get_dir( + path: export_dir + empty: true + )! +} + +fn testsuite_end() { + pathlib.get_dir( + path: export_dir + empty: true + )! +} + +fn test_export() { + mut col := Collection{ + name: 'col1' + path: pathlib.get('${tree_dir}/dir1') + } + col.scan()! + + path_dest := pathlib.get_dir(path: '${export_dir}/src', create: true)! + col.export( + destination: path_dest + file_paths: { + 'col2:file3.md': 'col2/file3.md' + } + )! + + col1_path := '${export_dir}/src/col1' + expected_col1_path := '${export_expected_dir}/src/col1' + assert os.read_file('${col1_path}/.collection')! == os.read_file('${expected_col1_path}/.collection')! + assert os.read_file('${col1_path}/.linkedpages')! == os.read_file('${expected_col1_path}/.linkedpages')! + assert os.read_file('${col1_path}/errors.md')! == os.read_file('${expected_col1_path}/errors.md')! + assert os.read_file('${col1_path}/file1.md')! == os.read_file('${expected_col1_path}/file1.md')! + assert os.read_file('${col1_path}/file2.md')! == os.read_file('${expected_col1_path}/file2.md')! +} diff --git a/lib/data/doctree/collection/getters.v b/lib/data/doctree/collection/getters.v new file mode 100644 index 00000000..f3b38438 --- /dev/null +++ b/lib/data/doctree/collection/getters.v @@ -0,0 +1,45 @@ +module collection + +import freeflowuniverse.herolib.data.doctree.collection.data + +// gets page with specified name from collection +pub fn (collection Collection) page_get(name string) !&data.Page { + return collection.pages[name] or { + return ObjNotFound{ + collection: collection.name + name: name + } + } +} + +pub fn (collection Collection) page_exists(name string) bool { + return name in collection.pages +} + +// gets image with specified name from collection +pub fn (collection Collection) get_image(name string) !&data.File { + return collection.images[name] or { + return ObjNotFound{ + collection: collection.name + name: name + } + } +} + +pub fn (collection Collection) image_exists(name string) bool { + return name in collection.images +} + +// gets file with specified name form collection +pub fn (collection Collection) get_file(name string) !&data.File { + return collection.files[name] or { + return ObjNotFound{ + collection: collection.name + name: name + } + } +} + +pub fn (collection Collection) file_exists(name string) bool { + return name in collection.files +} diff --git a/lib/data/doctree/collection/scan.v b/lib/data/doctree/collection/scan.v new file mode 100644 index 00000000..46b0a438 --- /dev/null +++ b/lib/data/doctree/collection/scan.v @@ -0,0 +1,250 @@ +module collection + +import freeflowuniverse.herolib.conversiontools.imagemagick +import freeflowuniverse.herolib.core.pathlib { Path } +import freeflowuniverse.herolib.data.doctree.pointer +import freeflowuniverse.herolib.data.doctree.collection.data + +// walk over one specific collection, find all files and pages +pub fn (mut collection Collection) scan() ! { + collection.scan_directory(mut collection.path)! +} + +// path is the full path +fn (mut collection Collection) scan_directory(mut p Path) ! { + mut entry_list := p.list(recursive: false)! + for mut entry in entry_list.paths { + if collection.should_skip_entry(mut entry) { + continue + } + + if !entry.exists() { + collection.error( + path: entry + msg: 'Entry ${entry.name()} does not exists' + cat: .unknown + )! + continue + } + + if mut entry.is_link() { + link_real_path := entry.realpath() // this is with the symlink resolved + collection_abs_path := collection.path.absolute() + if entry.extension_lower() == 'md' { + // means we are linking pages,this should not be done, need or change + collection.error( + path: entry + msg: 'Markdown files (${entry.path}) must not be linked' + cat: .unknown + ) or { return error('Failed to collection error ${entry.path}:\n${err}') } + continue + } + + if !link_real_path.starts_with(collection_abs_path) { + // means we are not in the collection so we need to copy + entry.unlink()! // will transform link to become the file or dir it points too + } else { + // TODO: why do we need this? + entry.relink()! // will check that the link is on the file with the shortest path + } + } + + if entry.is_dir() { + collection.scan_directory(mut entry) or { + return error('Failed to scan directory ${entry.path}:\n${err}') + } + continue + } + + if entry.extension_lower() == '' { + continue + } + + match entry.extension_lower() { + 'md' { + collection.add_page(mut entry) or { + return error('Failed to add page ${entry.path}:\n${err}') + } + } + else { + collection.file_image_remember(mut entry) or { + return error('Failed to remember image ${entry.path}:\n${err}') + } + } + } + } +} + +fn (mut c Collection) should_skip_entry(mut entry Path) bool { + entry_name := entry.name() + + // entries that start with . or _ are ignored + if entry_name.starts_with('.') || entry_name.starts_with('_') { + return true + } + + // TODO: why do we skip all these??? + + if entry.cat == .linkfile { + // means we link to a file which is in the folder, so can be loaded later, nothing to do here + return true + } + + if entry.is_dir() && entry_name.starts_with('gallery_') { + return true + } + + if entry_name.to_lower() == 'defs.md' { + return true + } + + if entry_name.contains('.test') { + return true + } + + if entry.path.starts_with('sidebar') { + return true + } + + return false +} + +// remember the file, so we know if we have duplicates +// also fixes the name +fn (mut collection Collection) file_image_remember(mut p Path) ! { + if collection.heal { + p.path_normalize()! + } + mut ptr := pointer.pointer_new( + collection: collection.name + text: p.name() + )! + + if ptr.is_file_video_html() { + collection.add_file(mut p)! + return + } + + if ptr.is_image() { + if collection.heal && imagemagick.installed() { + mut image := imagemagick.image_new(mut p) + + imagemagick.downsize(path: p.path)! + // after downsize it could be the path has been changed, need to set it on the file + if p.path != image.path.path { + p.path = image.path.path + p.check() + } + } + + // TODO: what are we trying to do? + if !collection.image_exists(ptr.name) { + collection.add_image(mut p)! + } + + mut image_file := collection.get_image(ptr.name)! + mut image_file_path := image_file.path.path + if p.path.len <= image_file_path.len { + // nothing to be done, because the already existing file is shortest or equal + return + } + // file double is the one who already existed, need to change the path and can delete original + // TODO: this is clearly a bug + image_file.path = image_file.path + image_file.init()! + if collection.heal { + p.delete()! + } + + return + } + + return error('unsupported file type: ${ptr.extension}') +} + +// add a page to the collection, specify existing path +// the page will be parsed as markdown +pub fn (mut collection Collection) add_page(mut p Path) ! { + if collection.heal { + p.path_normalize() or { return error('Failed to normalize path ${p.path}\n${err}') } + } + + mut ptr := pointer.pointer_new( + collection: collection.name + text: p.name() + ) or { return error('Failed to get pointer for ${p.name()}\n${err}') } + + // in case heal is true pointer_new can normalize the path + if collection.page_exists(ptr.name) { + collection.error( + path: p + msg: 'Can\'t add ${p.path}: a page named ${ptr.name} already exists in the collection' + cat: .page_double + ) or { return error('Failed to report collection error for ${p.name()}\n${err}') } + return + } + + new_page := data.new_page( + name: ptr.name + path: p + collection_name: collection.name + ) or { return error('Failed to create new page for ${ptr.name}\n${err}') } + + collection.pages[ptr.name] = &new_page +} + +// add a file to the collection, specify existing path +pub fn (mut collection Collection) add_file(mut p Path) ! { + if collection.heal { + p.path_normalize()! + } + mut ptr := pointer.pointer_new( + collection: collection.name + text: p.name() + )! + + // in case heal is true pointer_new can normalize the path + if collection.file_exists(ptr.name) { + collection.error( + path: p + msg: 'Can\'t add ${p.path}: a file named ${ptr.name} already exists in the collection' + cat: .file_double + )! + return + } + + mut new_file := data.new_file( + path: p + collection_path: collection.path + collection_name: collection.name + )! + collection.files[ptr.name] = &new_file +} + +// add a image to the collection, specify existing path +pub fn (mut collection Collection) add_image(mut p Path) ! { + if collection.heal { + p.path_normalize()! + } + mut ptr := pointer.pointer_new( + collection: collection.name + text: p.name() + )! + + // in case heal is true pointer_new can normalize the path + if collection.image_exists(ptr.name) { + collection.error( + path: p + msg: 'Can\'t add ${p.path}: a file named ${ptr.name} already exists in the collection' + cat: .image_double + )! + return + } + + mut image_file := &data.File{ + path: p + collection_path: collection.path + } + image_file.init()! + collection.images[ptr.name] = image_file +} diff --git a/lib/data/doctree/collection/scan_test.v b/lib/data/doctree/collection/scan_test.v new file mode 100644 index 00000000..7c20e837 --- /dev/null +++ b/lib/data/doctree/collection/scan_test.v @@ -0,0 +1,121 @@ +module collection + +import freeflowuniverse.herolib.core.pathlib + +fn test_add_page_success() { + /* + create collection + add page + check page in collection + */ + + mut col := Collection{ + name: 'col1' + path: pathlib.get('/tmp/col1') + } + + mut page1_path := pathlib.get_file(path: '/tmp/col1/page1.md', create: true)! + col.add_page(mut page1_path)! + assert col.page_exists('page1') + + mut page2_path := pathlib.get_file(path: '/tmp/col1/page:hamada.md', create: true)! + col.add_page(mut page2_path)! + assert col.page_exists('page_hamada') +} + +fn test_add_page_already_exists() { + /* + create collection + add page with path /tmp/col1/page1.md + add page with path /tmp/col1/dir/page1.md + second add should fail and error reported to collection errors + */ + + mut col := Collection{ + name: 'col1' + path: pathlib.get('/tmp/col1') + } + + mut page1_path := pathlib.get_file(path: '/tmp/col1/page1.md', create: true)! + col.add_page(mut page1_path)! + assert col.page_exists('page1') + + mut page2_path := pathlib.get_file(path: '/tmp/col1/dir1/page1.md', create: true)! + col.add_page(mut page2_path)! + + assert col.errors.len == 1 + assert col.errors[0].msg == "Can't add /tmp/col1/dir1/page1.md: a page named page1 already exists in the collection" +} + +fn test_add_image_success() { + mut col := Collection{ + name: 'col1' + path: pathlib.get('/tmp/col1') + } + + mut page1_path := pathlib.get_file(path: '/tmp/col1/image.png', create: true)! + col.add_image(mut page1_path)! + assert col.image_exists('image') + + mut page2_path := pathlib.get_file(path: '/tmp/col1/image:2.jpg', create: true)! + col.add_image(mut page2_path)! + assert col.image_exists('image_2') +} + +fn test_add_file_success() { + mut col := Collection{ + name: 'col1' + path: pathlib.get('/tmp/col1') + } + + mut page1_path := pathlib.get_file(path: '/tmp/col1/file1.html', create: true)! + col.add_file(mut page1_path)! + assert col.file_exists('file1') + + mut page2_path := pathlib.get_file(path: '/tmp/col1/file:2.mp4', create: true)! + col.add_file(mut page2_path)! + assert col.file_exists('file_2') +} + +fn test_file_image_remember() { + mut col := Collection{ + name: 'col1' + path: pathlib.get('/tmp/col1') + } + + mut file1_path := pathlib.get_file(path: '/tmp/col1/image.png', create: true)! + col.file_image_remember(mut file1_path)! + assert col.image_exists('image') + + mut file2_path := pathlib.get_file(path: '/tmp/col1/file.html', create: true)! + col.file_image_remember(mut file2_path)! + assert col.file_exists('file') + + mut file3_path := pathlib.get_file(path: '/tmp/col1/file2.unknownext', create: true)! + col.file_image_remember(mut file3_path)! + assert col.file_exists('file2') +} + +fn test_scan_directory() { + mut file := pathlib.get_file(path: '/tmp/mytree/dir1/.collection', create: true)! + file.write('name:col1')! + file = pathlib.get_file(path: '/tmp/mytree/dir1/file1.md', create: true)! + file = pathlib.get_file(path: '/tmp/mytree/dir1/file2.html', create: true)! + file = pathlib.get_file(path: '/tmp/mytree/dir1/file3.png', create: true)! + file = pathlib.get_file(path: '/tmp/mytree/dir1/dir2/file4.md', create: true)! + file = pathlib.get_file(path: '/tmp/mytree/dir1/.shouldbeskipped', create: true)! + file = pathlib.get_file(path: '/tmp/mytree/dir1/_shouldbeskipped', create: true)! + + mut col := Collection{ + name: 'col1' + path: pathlib.get('/tmp/mytree/dir1') + } + + col.scan()! + assert col.page_exists('file1') + assert col.file_exists('file2') + assert col.image_exists('file3') + assert col.page_exists('file4') + assert !col.file_exists('.shouldbeskipped') + assert !col.file_exists('_shouldbeskipped') +} diff --git a/lib/data/doctree/collection/template/errors.md b/lib/data/doctree/collection/template/errors.md new file mode 100644 index 00000000..b687f189 --- /dev/null +++ b/lib/data/doctree/collection/template/errors.md @@ -0,0 +1,11 @@ +# Errors + +@for error in collection.errors + +## @error.cat + +path: @error.path.path + +msg: @error.msg + +@end diff --git a/lib/data/doctree/collection/testdata/.gitignore b/lib/data/doctree/collection/testdata/.gitignore new file mode 100644 index 00000000..b22bcd28 --- /dev/null +++ b/lib/data/doctree/collection/testdata/.gitignore @@ -0,0 +1 @@ +export_test/export diff --git a/lib/data/doctree/collection/testdata/export_test/export_expected/src/col1/.collection b/lib/data/doctree/collection/testdata/export_test/export_expected/src/col1/.collection new file mode 100644 index 00000000..5cc5d469 --- /dev/null +++ b/lib/data/doctree/collection/testdata/export_test/export_expected/src/col1/.collection @@ -0,0 +1 @@ +name:col1 src:'/Users/timurgordon/code/github/freeflowuniverse/crystallib/crystallib/data/doctree/collection/testdata/export_test/mytree/dir1' \ No newline at end of file diff --git a/lib/data/doctree/collection/testdata/export_test/export_expected/src/col1/.linkedpages b/lib/data/doctree/collection/testdata/export_test/export_expected/src/col1/.linkedpages new file mode 100644 index 00000000..a0814882 --- /dev/null +++ b/lib/data/doctree/collection/testdata/export_test/export_expected/src/col1/.linkedpages @@ -0,0 +1 @@ +col2:file3.md \ No newline at end of file diff --git a/lib/data/doctree/collection/testdata/export_test/export_expected/src/col1/errors.md b/lib/data/doctree/collection/testdata/export_test/export_expected/src/col1/errors.md new file mode 100644 index 00000000..802abedf --- /dev/null +++ b/lib/data/doctree/collection/testdata/export_test/export_expected/src/col1/errors.md @@ -0,0 +1,9 @@ +# Errors + + +## page_not_found + +path: /Users/timurgordon/code/github/freeflowuniverse/crystallib/crystallib/data/doctree/collection/testdata/export_test/mytree/dir1/dir2/file1.md + +msg: page col3:file5.md not found + diff --git a/lib/data/doctree/collection/testdata/export_test/export_expected/src/col1/file1.md b/lib/data/doctree/collection/testdata/export_test/export_expected/src/col1/file1.md new file mode 100644 index 00000000..2d8d0af0 --- /dev/null +++ b/lib/data/doctree/collection/testdata/export_test/export_expected/src/col1/file1.md @@ -0,0 +1 @@ +[not existent page](col3:file5.md) \ No newline at end of file diff --git a/lib/data/doctree/collection/testdata/export_test/export_expected/src/col1/file2.md b/lib/data/doctree/collection/testdata/export_test/export_expected/src/col1/file2.md new file mode 100644 index 00000000..71f48c9b --- /dev/null +++ b/lib/data/doctree/collection/testdata/export_test/export_expected/src/col1/file2.md @@ -0,0 +1 @@ +[some page](../col2/file3.md) \ No newline at end of file diff --git a/lib/data/doctree/collection/testdata/export_test/export_expected/src/col1/img/image.png b/lib/data/doctree/collection/testdata/export_test/export_expected/src/col1/img/image.png new file mode 100644 index 00000000..e69de29b diff --git a/lib/data/doctree/collection/testdata/export_test/mytree/dir1/.collection b/lib/data/doctree/collection/testdata/export_test/mytree/dir1/.collection new file mode 100644 index 00000000..bf5be63f --- /dev/null +++ b/lib/data/doctree/collection/testdata/export_test/mytree/dir1/.collection @@ -0,0 +1 @@ +name:col1 \ No newline at end of file diff --git a/lib/data/doctree/collection/testdata/export_test/mytree/dir1/dir2/file1.md b/lib/data/doctree/collection/testdata/export_test/mytree/dir1/dir2/file1.md new file mode 100644 index 00000000..2d8d0af0 --- /dev/null +++ b/lib/data/doctree/collection/testdata/export_test/mytree/dir1/dir2/file1.md @@ -0,0 +1 @@ +[not existent page](col3:file5.md) \ No newline at end of file diff --git a/lib/data/doctree/collection/testdata/export_test/mytree/dir1/file2.md b/lib/data/doctree/collection/testdata/export_test/mytree/dir1/file2.md new file mode 100644 index 00000000..09f30c32 --- /dev/null +++ b/lib/data/doctree/collection/testdata/export_test/mytree/dir1/file2.md @@ -0,0 +1 @@ +[some page](col2:file3.md) \ No newline at end of file diff --git a/lib/data/doctree/collection/testdata/export_test/mytree/dir1/image.png b/lib/data/doctree/collection/testdata/export_test/mytree/dir1/image.png new file mode 100644 index 00000000..e69de29b diff --git a/lib/data/doctree/error.v b/lib/data/doctree/error.v new file mode 100644 index 00000000..a8f306cc --- /dev/null +++ b/lib/data/doctree/error.v @@ -0,0 +1,45 @@ +module doctree + +import freeflowuniverse.herolib.data.doctree.pointer + +pub struct ObjNotFound { + Error +pub: + name string + collection string + info string +} + +pub fn (err ObjNotFound) msg() string { + return '"Could not find object with name ${err.name} in collection:${err.collection}.\n${err.info}' +} + +pub struct CollectionNotFound { + Error +pub: + pointer pointer.Pointer + msg string +} + +pub fn (err CollectionNotFound) msg() string { + if err.msg.len > 0 { + return err.msg + } + return '"Cannot find collection ${err.pointer} in tree.\n}' +} + +// the next is our custom error for objects not found +pub struct NoOrTooManyObjFound { + Error +pub: + tree &Tree + pointer pointer.Pointer + nr int +} + +pub fn (err NoOrTooManyObjFound) msg() string { + if err.nr > 0 { + return 'Too many obj found for ${err.tree.name}. Pointer: ${err.pointer}' + } + return 'No obj found for ${err.tree.name}. Pointer: ${err.pointer}' +} diff --git a/lib/data/doctree/export.v b/lib/data/doctree/export.v new file mode 100644 index 00000000..e45f0692 --- /dev/null +++ b/lib/data/doctree/export.v @@ -0,0 +1,92 @@ +module doctree + +import freeflowuniverse.herolib.core.pathlib +import freeflowuniverse.herolib.data.doctree.collection { Collection } +import freeflowuniverse.herolib.data.doctree.collection.data +import freeflowuniverse.herolib.ui.console +import freeflowuniverse.herolib.core.texttools.regext + +@[params] +pub struct TreeExportArgs { +pub mut: + destination string @[required] + reset bool = true + keep_structure bool // wether the structure of the src collection will be preserved or not + exclude_errors bool // wether error reporting should be exported as well + toreplace string + concurrent bool = true +} + +// export all collections to chosen directory . +// all names will be in name_fixed mode . +// all images in img/ +pub fn (mut tree Tree) export(args TreeExportArgs) ! { + console.print_header('export tree: name:${tree.name} to ${args.destination}') + if args.toreplace.len > 0 { + mut ri := regext.regex_instructions_new() + ri.add_from_text(args.toreplace)! + tree.replacer = ri + } + + mut dest_path := pathlib.get_dir(path: args.destination, create: true)! + if args.reset { + dest_path.empty()! + } + + tree.process_defs()! + tree.process_includes()! + tree.process_actions_and_macros()! // process other actions and macros + + file_paths := tree.generate_paths()! + + console.print_green('exporting collections') + + if args.concurrent { + mut ths := []thread !{} + for _, col in tree.collections { + ths << spawn fn (col Collection, dest_path pathlib.Path, file_paths map[string]string, args TreeExportArgs) ! { + col.export( + destination: dest_path + file_paths: file_paths + reset: args.reset + keep_structure: args.keep_structure + exclude_errors: args.exclude_errors + // TODO: replacer: tree.replacer + )! + }(col, dest_path, file_paths, args) + } + for th in ths { + th.wait() or { panic(err) } + } + } else { + for _, mut col in tree.collections { + col.export( + destination: dest_path + file_paths: file_paths + reset: args.reset + keep_structure: args.keep_structure + exclude_errors: args.exclude_errors + replacer: tree.replacer + )! + } + } +} + +fn (mut t Tree) generate_paths() !map[string]string { + mut paths := map[string]string{} + for _, col in t.collections { + for _, page in col.pages { + paths['${col.name}:${page.name}.md'] = '${col.name}/${page.name}.md' + } + + for _, image in col.images { + paths['${col.name}:${image.file_name()}'] = '${col.name}/img/${image.file_name()}' + } + + for _, file in col.files { + paths['${col.name}:${file.file_name()}'] = '${col.name}/img/${file.file_name()}' + } + } + + return paths +} diff --git a/lib/data/doctree/export_test.v b/lib/data/doctree/export_test.v new file mode 100644 index 00000000..e9439f74 --- /dev/null +++ b/lib/data/doctree/export_test.v @@ -0,0 +1,82 @@ +module doctree + +import freeflowuniverse.herolib.core.pathlib +import os + +const test_dir = '${os.dir(@FILE)}/testdata/export_test' +const tree_dir = '${test_dir}/mytree' +const export_dir = '${test_dir}/export' +const export_expected_dir = '${test_dir}/export_expected' + +fn testsuite_begin() { + pathlib.get_dir( + path: export_dir + empty: true + )! +} + +fn testsuite_end() { + pathlib.get_dir( + path: export_dir + empty: true + )! +} + +fn test_export() { + /* + tree_root/ + dir1/ + .collection + dir2/ + file1.md + file2.md + image.png + dir3/ + .collection + file3.md + + export: + export_dest/ + src/ + col1/ + .collection + .linkedpages + errors.md + img/ + image.png + file1.md + file2.md + col2/ + .collection + .linkedpages + file3.md + + .edit/ + + test: + - create tree + - add files/pages and collections to tree + - export tree + - ensure tree structure is valid + */ + + mut tree := new(name: 'mynewtree')! + tree.add_collection(path: '${tree_dir}/dir1', name: 'col1')! + tree.add_collection(path: '${tree_dir}/dir3', name: 'col2')! + + tree.export(destination: '${export_dir}')! + + col1_path := '${export_dir}/col1' + expected_col1_path := '${export_expected_dir}/col1' + assert os.read_file('${col1_path}/.collection')! == os.read_file('${expected_col1_path}/.collection')! + assert os.read_file('${col1_path}/.linkedpages')! == os.read_file('${expected_col1_path}/.linkedpages')! + assert os.read_file('${col1_path}/errors.md')! == os.read_file('${expected_col1_path}/errors.md')! + assert os.read_file('${col1_path}/file1.md')! == os.read_file('${expected_col1_path}/file1.md')! + assert os.read_file('${col1_path}/file2.md')! == os.read_file('${expected_col1_path}/file2.md')! + + col2_path := '${export_dir}/col2' + expected_col2_path := '${export_expected_dir}/col2' + assert os.read_file('${col2_path}/.linkedpages')! == '' + assert os.read_file('${col2_path}/.collection')! == os.read_file('${expected_col2_path}/.collection')! + assert os.read_file('${col2_path}/file3.md')! == '' +} diff --git a/lib/data/doctree/getters.v b/lib/data/doctree/getters.v new file mode 100644 index 00000000..3b11987b --- /dev/null +++ b/lib/data/doctree/getters.v @@ -0,0 +1,72 @@ +module doctree + +import freeflowuniverse.herolib.data.doctree.collection +import freeflowuniverse.herolib.data.doctree.collection.data +import freeflowuniverse.herolib.data.doctree.pointer + +pub fn (tree Tree) get_collection(name string) !&collection.Collection { + col := tree.collections[name] or { return error('collection ${name} not found') } + + return col +} + +pub fn (tree Tree) get_collection_with_pointer(p pointer.Pointer) !&collection.Collection { + return tree.get_collection(p.collection) or { + return CollectionNotFound{ + pointer: p + msg: '${err}' + } + } +} + +// get the page from pointer string: $tree:$collection:$name or +// $collection:$name or $name +pub fn (tree Tree) page_get(pointerstr string) !&data.Page { + p := pointer.pointer_new(text: pointerstr)! + return tree.get_page_with_pointer(p)! +} + +fn (tree Tree) get_page_with_pointer(p pointer.Pointer) !&data.Page { + col := tree.get_collection_with_pointer(p)! + new_page := col.page_get(p.name)! + + return new_page +} + +// get the page from pointer string: $tree:$collection:$name or +// $collection:$name or $name +pub fn (tree Tree) get_image(pointerstr string) !&data.File { + p := pointer.pointer_new(text: pointerstr)! + col := tree.get_collection_with_pointer(p)! + image := col.get_image(p.name)! + + return image +} + +// get the file from pointer string: $tree:$collection:$name or +// $collection:$name or $name +pub fn (tree Tree) get_file(pointerstr string) !&data.File { + p := pointer.pointer_new(text: pointerstr)! + col := tree.get_collection_with_pointer(p)! + new_file := col.get_file(p.name)! + + return new_file +} + +pub fn (tree Tree) page_exists(pointerstr string) bool { + p := pointer.pointer_new(text: pointerstr) or { return false } + col := tree.get_collection_with_pointer(p) or { return false } + return col.page_exists(p.name) +} + +pub fn (tree Tree) image_exists(pointerstr string) bool { + p := pointer.pointer_new(text: pointerstr) or { return false } + col := tree.get_collection_with_pointer(p) or { return false } + return col.image_exists(p.name) +} + +pub fn (tree Tree) file_exists(pointerstr string) bool { + p := pointer.pointer_new(text: pointerstr) or { return false } + col := tree.get_collection_with_pointer(p) or { return false } + return col.file_exists(p.name) +} diff --git a/lib/data/doctree/getters_test.v b/lib/data/doctree/getters_test.v new file mode 100644 index 00000000..a2b0cef1 --- /dev/null +++ b/lib/data/doctree/getters_test.v @@ -0,0 +1,35 @@ +module doctree + +import freeflowuniverse.herolib.core.pathlib +import os + +fn test_page_get() { + mut file1_path := pathlib.get_file(path: '/tmp/mytree/dir1/file2.md', create: true)! + file1_path.write('[some page](col2:file3.md)')! + mut file2_path := pathlib.get_file(path: '/tmp/mytree/dir1/image.png', create: true)! + mut file3_path := pathlib.get_file(path: '/tmp/mytree/dir1/dir2/file1.md', create: true)! + file3_path.write('[not existent page](col3:file5.md)')! + mut file4_path := pathlib.get_file(path: '/tmp/mytree/dir1/.collection', create: true)! + file4_path.write('name:col1')! + + mut file5_path := pathlib.get_file(path: '/tmp/mytree/dir3/.collection', create: true)! + file5_path.write('name:col2')! + mut file6_path := pathlib.get_file(path: '/tmp/mytree/dir3/file3.md', create: true)! + + mut tree := new(name: 'mynewtree')! + tree.add_collection(path: file1_path.parent()!.path, name: 'col1')! + tree.add_collection(path: file6_path.parent()!.path, name: 'col2')! + + mut page := tree.page_get('col1:file2.md')! + assert page.name == 'file2' + + mut image := tree.get_image('col1:image.png')! + assert image.file_name() == 'image.png' + + // these page pointers are faulty + + apple_ptr_faulty0 := 'col3:file1.md' + if p := tree.page_get('col3:file1.md') { + assert false, 'this should fail: faulty pointer ${apple_ptr_faulty0}' + } +} diff --git a/lib/data/doctree/pointer/pointer.v b/lib/data/doctree/pointer/pointer.v new file mode 100644 index 00000000..a44df025 --- /dev/null +++ b/lib/data/doctree/pointer/pointer.v @@ -0,0 +1,106 @@ +module pointer + +import freeflowuniverse.herolib.core.texttools + +pub enum PointerCat { + page + image + video + file + html +} + +// links to a page, image or file +pub struct Pointer { +pub mut: + collection string // is the key of a collection + name string // is name without extension, all namefixed (lowercase...) + cat PointerCat + extension string // e.g. jpg +} + +@[params] +pub struct NewPointerArgs { +pub: + // pointer string (e.g. col:page.md) + text string + // used if text does not have collection information + collection string +} + +// will return a clean pointer to a page, image or file +//``` +// input is e.g. mycollection:filename.jpg +// or filename.jpg +// or mypage.md +// +//``` +pub fn pointer_new(args NewPointerArgs) !Pointer { + mut txt := args.text.trim_space().replace('\\', '/').replace('//', '/') + + // take colon parts out + split_colons := txt.split(':') + if split_colons.len > 2 { + return error("pointer can only have 1 ':' inside. ${txt}") + } + + mut collection_name := args.collection + mut file_name := '' + if split_colons.len == 2 { + collection_name = texttools.name_fix_keepext(split_colons[0].all_after_last('/')) + file_name = texttools.name_fix_keepext(split_colons[1].all_after_last('/')) + } + + if collection_name == '' { + return error('provided args do not have collection information: ${args}') + } + + if split_colons.len == 1 { + file_name = texttools.name_fix_keepext(split_colons[0].all_after_last('/')) + } + + split_file_name := file_name.split('.') + file_name_no_extension := split_file_name[0] + mut extension := 'md' + if split_file_name.len > 1 { + extension = split_file_name[1] + } + + mut file_cat := PointerCat.page + match extension { + 'md' { + file_cat = .page + } + 'jpg', 'jpeg', 'svg', 'gif', 'png' { + file_cat = .image + } + 'html' { + file_cat = .html + } + 'mp4', 'mov' { + file_cat = .video + } + else { + file_cat = .file + } + } + + return Pointer{ + name: file_name_no_extension + collection: collection_name + extension: extension + cat: file_cat + } +} + +pub fn (p Pointer) is_image() bool { + return p.cat == .image +} + +pub fn (p Pointer) is_file_video_html() bool { + return p.cat == .file || p.cat == .video || p.cat == .html +} + +pub fn (p Pointer) str() string { + return '${p.collection}:${p.name}.${p.extension}' +} diff --git a/lib/data/doctree/pointer/pointer_test.v b/lib/data/doctree/pointer/pointer_test.v new file mode 100644 index 00000000..6f6f5a1b --- /dev/null +++ b/lib/data/doctree/pointer/pointer_test.v @@ -0,0 +1,139 @@ +module pointer + +import freeflowuniverse.herolib.ui.console + +// import freeflowuniverse.herolib.core.pathlib +// import freeflowuniverse.herolib.core.texttools + +// fn test_pointerpath() { +// p1 := pointerpath_new(path: '/tmp/A file.md') or { panic(err) } +// console.print_debug(p1) +// p1_compare := PointerPath{ +// pointer: Pointer{ +// collection: '' +// name: 'a_file' +// cat: .page +// extension: 'md' +// error: '' +// state: .unknown +// } +// path: pathlib.Path{ +// path: '/tmp/A file.md' +// cat: .unknown +// exist: .no +// } +// } +// assert p1 == p1_compare + +// p2 := pointerpath_new(path: '/tmp/ss/A__file.jpeg') or { panic(err) } +// p2_compare := PointerPath{ +// pointer: Pointer{ +// collection: '' +// name: 'a_file' +// cat: .image +// extension: 'jpeg' +// error: '' +// state: .unknown +// } +// path: pathlib.Path{ +// path: '/tmp/A__file.jpeg' +// cat: .unknown +// exist: .no +// } +// } + +// // assert p2==p2_compare +// } + +fn test_pointer() { + // p := pointer_new('Page__.md') or { panic(err) } + // console.print_debug(p) + // p_compare := Pointer{ + // collection: '' + // name: 'page' + // cat: .page + // extension: 'md' + // error: '' + // state: .unknown + // } + // assert p == p_compare +} + +// fn test_pointer2() { +// p := pointer_new('collectionAAA:Page__.md') or { panic(err) } +// console.print_debug(p) +// p_compare := Pointer{ +// name: 'page' +// cat: .page +// extension: 'md' +// collection: 'collectionaaa' +// error: '' +// state: .unknown +// } +// assert p == p_compare +// } + +// fn test_pointer3() { +// p := pointer_new('MY_Book:collection_AAA:Page__.md') or { panic(err) } +// console.print_debug(p) +// p_compare := Pointer{ +// name: 'page' +// cat: .page +// extension: 'md' +// collection: 'collection_aaa' +// book: 'my_book' +// error: '' +// state: .unknown +// } +// assert p == p_compare +// } + +// fn test_pointer4() { +// p := pointer_new('MY_Book:collection_AAA:aImage__.jpg') or { panic(err) } +// console.print_debug(p) +// p_compare := Pointer{ +// name: 'aimage' +// cat: .image +// extension: 'jpg' +// collection: 'collection_aaa' +// book: 'my_book' +// error: '' +// state: .unknown +// } +// assert p == p_compare +// } + +// fn test_pointer5() { +// p := pointer_new('MY_Book::aImage__.jpg') or { panic(err) } +// console.print_debug(p) +// p_compare := Pointer{ +// name: 'aimage' +// cat: .image +// extension: 'jpg' +// collection: '' +// book: 'my_book' +// error: '' +// state: .unknown +// } +// assert p == p_compare +// } + +// fn test_pointer6() { +// p := pointer_new('MY_Book::aImage__.jpg') or { panic(err) } +// assert p.str() == 'my_book::aimage.jpg' + +// p2 := pointer_new('ddd:aImage__.jpg') or { panic(err) } +// assert p2.str() == 'ddd:aimage.jpg' + +// p3 := pointer_new('aImage__.jpg') or { panic(err) } +// assert p3.str() == 'aimage.jpg' + +// i := 40 +// p4 := pointer_new('collectionAAA:Page__${i}.md') or { panic(err) } +// assert p4.str() == 'collectionaaa:page_40.md' +// } + +// fn test_pointer7() { +// r := texttools.name_fix_keepext('page_40.md') +// assert r == 'page_40.md' +// } diff --git a/lib/data/doctree/process_defs.v b/lib/data/doctree/process_defs.v new file mode 100644 index 00000000..9003bb39 --- /dev/null +++ b/lib/data/doctree/process_defs.v @@ -0,0 +1,83 @@ +module doctree + +import freeflowuniverse.herolib.data.doctree.collection { CollectionError } +import freeflowuniverse.herolib.data.doctree.collection.data +import freeflowuniverse.herolib.ui.console + +// process definitions (!!wiki.def actions, elements.Def elements) +// this must be done before processing includes. +pub fn (mut tree Tree) process_defs() ! { + console.print_green('Processing tree defs') + + for _, mut col in tree.collections { + for _, mut page in col.pages { + mut p := page + mut c := col + tree.process_page_def_actions(mut p, mut c)! + } + } + + for _, mut col in tree.collections { + for _, mut page in mut col.pages { + mut p := page + errors := tree.replace_page_defs_with_links(mut p)! + // report accrued errors when replacing defs with links + for err in errors { + col.error(err)! + } + } + } +} + +fn (mut tree Tree) process_page_def_actions(mut p data.Page, mut c collection.Collection) ! { + def_actions := p.get_def_actions()! + if def_actions.len > 1 { + c.error( + path: p.path + msg: 'a page can have at most one def action' + cat: .def + )! + } + + if def_actions.len == 0 { + return + } + + aliases := p.process_def_action(def_actions[0].id)! + for alias in aliases { + if alias in tree.defs { + c.error( + path: p.path + msg: 'alias ${alias} is already used' + cat: .def + )! + continue + } + + tree.defs[alias] = p + } +} + +fn (mut tree Tree) replace_page_defs_with_links(mut p data.Page) ![]CollectionError { + defs := p.get_def_names()! + + mut def_data := map[string][]string{} + mut errors := []CollectionError{} + for def in defs { + if referenced_page := tree.defs[def] { + def_data[def] = [referenced_page.key(), referenced_page.alias] + } else { + // accrue errors that occur + errors << CollectionError{ + path: p.path + msg: 'def ${def} is not defined' + cat: .def + } + continue + } + } + + p.set_def_links(def_data)! + // return accrued collection errors for collection to handle + return errors +} diff --git a/lib/data/doctree/process_defs_test.v b/lib/data/doctree/process_defs_test.v new file mode 100644 index 00000000..a19aec9f --- /dev/null +++ b/lib/data/doctree/process_defs_test.v @@ -0,0 +1,26 @@ +module doctree + +import os +import freeflowuniverse.herolib.core.pathlib +import freeflowuniverse.herolib.data.doctree.collection.data + +const test_dir = '${os.dir(@FILE)}/testdata/process_defs_test' + +fn test_process_defs() { + /* + 1- use files with def actions and elements from testdata + 2- create tree + 3- invoke process defs + 4- check pages markdown + */ + mut tree := new(name: 'mynewtree')! + tree.add_collection(path: '${test_dir}/col1', name: 'col1')! + tree.add_collection(path: '${test_dir}/col2', name: 'col2')! + tree.process_defs()! + + mut page1 := tree.page_get('col1:page1.md')! + assert page1.get_markdown()! == '' + + mut page2 := tree.page_get('col2:page2.md')! + assert page2.get_markdown()! == '[about us](col1:page1.md)\n[about us](col1:page1.md)\n[about us](col1:page1.md)' +} diff --git a/lib/data/doctree/process_includes.v b/lib/data/doctree/process_includes.v new file mode 100644 index 00000000..1fbe8bcb --- /dev/null +++ b/lib/data/doctree/process_includes.v @@ -0,0 +1,153 @@ +module doctree + +// import freeflowuniverse.herolib.data.doctree.collection.data +import freeflowuniverse.herolib.data.doctree.pointer +import freeflowuniverse.herolib.data.doctree.collection { CollectionError } +import freeflowuniverse.herolib.data.doctree.collection.data +import freeflowuniverse.herolib.core.playbook +import freeflowuniverse.herolib.ui.console + +pub fn (mut tree Tree) process_includes() ! { + console.print_green('Processing page includes') + graph := tree.generate_pages_graph()! + + mut indegree := map[string]int{} + for _, c in tree.collections { + for _, p in c.pages { + indegree[p.key()] = 0 + } + } + + for _, children in graph { + for child in children.keys() { + indegree[child] += 1 + } + } + + mut queue := []string{} + for key, degree in indegree { + if degree == 0 { + queue << key + } + } + + for queue.len > 0 { + front := queue[0] + queue = queue[1..] + + mut page := tree.page_get(front)! + mut col := tree.get_collection(page.collection_name)! + + // process page + for element in page.get_include_actions()! { + page_pointer := get_include_page_pointer(col.name, element.action) or { continue } + + mut include_page := tree.get_page_with_pointer(page_pointer) or { continue } + + page.set_element_content_no_reparse(element.id, include_page.get_markdown()!)! + } + + // update indegree + for child in graph[page.key()].keys() { + indegree[child] -= 1 + if indegree[child] == 0 { + queue << child + } + } + } + + for key, degree in indegree { + if degree == 0 { + continue + } + + mut page := tree.page_get(key)! + mut col := tree.get_collection(page.collection_name)! + col.error( + path: page.path + msg: 'page ${key} is in an include cycle' + cat: .circular_import + )! + } +} + +fn get_include_page_pointer(collection_name string, a playbook.Action) !pointer.Pointer { + mut page_pointer_str := a.params.get('page')! + + // handle includes + mut page_pointer := pointer.pointer_new(collection: collection_name, text: page_pointer_str)! + if page_pointer.collection == '' { + page_pointer.collection = collection_name + } + + return page_pointer +} + +fn (mut tree Tree) generate_pages_graph() !map[string]map[string]bool { + mut graph := map[string]map[string]bool{} + mut ths := []thread !map[string]map[string]bool{} + for _, mut col in tree.collections { + ths << spawn fn (mut tree Tree, col &collection.Collection) !map[string]map[string]bool { + return tree.collection_page_graph(col)! + }(mut tree, col) + } + for th in ths { + col_graph := th.wait()! + for k, v in col_graph { + graph[k] = v.clone() + } + } + return graph +} + +fn (mut tree Tree) collection_page_graph(col &collection.Collection) !map[string]map[string]bool { + mut graph := map[string]map[string]bool{} + _ := []thread !GraphResponse{} + for _, page in col.pages { + resp := tree.generate_page_graph(page, col.name)! + for k, v in resp.graph { + graph[k] = v.clone() + } + } + + return graph +} + +pub struct GraphResponse { +pub: + graph map[string]map[string]bool + errors []CollectionError +} + +fn (tree Tree) generate_page_graph(current_page &data.Page, col_name string) !GraphResponse { + mut graph := map[string]map[string]bool{} + mut errors := []CollectionError{} + + include_action_elements := current_page.get_include_actions()! + for element in include_action_elements { + page_pointer := get_include_page_pointer(col_name, element.action) or { + errors << CollectionError{ + path: current_page.path + msg: 'failed to get page pointer for include ${element.action.heroscript()}: ${err}' + cat: .include + } + continue + } + + include_page := tree.get_page_with_pointer(page_pointer) or { + // TODO + // col.error( + // path: current_page.path + // msg: 'failed to get page for include ${element.action.heroscript()}: ${err.msg()}' + // cat: .include + // )! + continue + } + + graph[include_page.key()][current_page.key()] = true + } + return GraphResponse{ + graph: graph + errors: errors + } +} diff --git a/lib/data/doctree/process_includes_test.v b/lib/data/doctree/process_includes_test.v new file mode 100644 index 00000000..6b44d989 --- /dev/null +++ b/lib/data/doctree/process_includes_test.v @@ -0,0 +1,56 @@ +module doctree + +import os +import freeflowuniverse.herolib.core.pathlib + +const test_dir = '${os.dir(@FILE)}/testdata/process_includes_test' + +fn test_process_includes() { + /* + 1- use 3 pages in testdata: + - page1 includes page2 + - page2 includes page3 + 2- create tree + 3- invoke process_includes + 4- check pages markdown + */ + mut tree := new(name: 'mynewtree')! + tree.add_collection(path: '${test_dir}/col1', name: 'col1')! + tree.add_collection(path: '${test_dir}/col2', name: 'col2')! + tree.process_includes()! + + mut page1 := tree.page_get('col1:page1.md')! + mut page2 := tree.page_get('col2:page2.md')! + mut page3 := tree.page_get('col2:page3.md')! + + assert page1.get_markdown()! == 'page3 content' + assert page2.get_markdown()! == 'page3 content' + assert page3.get_markdown()! == 'page3 content' +} + +fn test_generate_pages_graph() { + /* + 1- use 3 pages in testdata: + - page1 includes page2 + - page2 includes page3 + 2- create tree + 3- invoke generate_pages_graph + 4- check graph + */ + mut tree := new(name: 'mynewtree')! + tree.add_collection(path: '${test_dir}/col1', name: 'col1')! + tree.add_collection(path: '${test_dir}/col2', name: 'col2')! + mut page1 := tree.page_get('col1:page1.md')! + mut page2 := tree.page_get('col2:page2.md')! + mut page3 := tree.page_get('col2:page3.md')! + + graph := tree.generate_pages_graph()! + assert graph == { + '${page3.key()}': { + '${page2.key()}': true + } + '${page2.key()}': { + '${page1.key()}': true + } + } +} diff --git a/lib/data/doctree/process_macros.v b/lib/data/doctree/process_macros.v new file mode 100644 index 00000000..a6cdecba --- /dev/null +++ b/lib/data/doctree/process_macros.v @@ -0,0 +1,54 @@ +module doctree + +import freeflowuniverse.herolib.data.doctree.collection { Collection } +import freeflowuniverse.herolib.data.markdownparser.elements +import freeflowuniverse.herolib.ui.console +import freeflowuniverse.herolib.core.playbook +import freeflowuniverse.herolib.core.playmacros + +@[params] +pub struct MacroGetArgs { +pub mut: + actor string + name string +} + +// adds all action elements to a playbook, calls playmacros.play on the playbook, +// which processes the macros, then reprocesses every page with the actions' new content +pub fn (mut tree Tree) process_actions_and_macros() ! { + console.print_green('Processing actions and macros') + + // first process the generic actions, which can be executed as is + mut plbook := playbook.new()! + for element_action in tree.get_actions()! { + plbook.actions << &element_action.action + } + + playmacros.play_actions(mut plbook)! + + // now get specific actions which need to return content + mut ths := []thread !{} + for _, mut col in tree.collections { + ths << spawn fn (mut col Collection) ! { + for _, mut page in col.pages { + page.process_macros()! // calls play_macro in playmacros... + } + }(mut col) + } + + for th in ths { + th.wait()! + } +} + +fn (mut tree Tree) get_actions(args_ MacroGetArgs) ![]&elements.Action { + // console.print_green('get actions for tree: name:${tree.name}') + mut res := []&elements.Action{} + for _, mut collection in tree.collections { + // console.print_green("export collection: name:${name}") + for _, mut page in collection.pages { + res << page.get_all_actions()! + } + } + return res +} diff --git a/lib/data/doctree/scan.v b/lib/data/doctree/scan.v new file mode 100644 index 00000000..6d628d56 --- /dev/null +++ b/lib/data/doctree/scan.v @@ -0,0 +1,244 @@ +module doctree + +import freeflowuniverse.herolib.core.pathlib { Path } +import freeflowuniverse.herolib.data.paramsparser +import freeflowuniverse.herolib.data.doctree.collection { Collection } +import freeflowuniverse.herolib.develop.gittools +import os +import freeflowuniverse.herolib.core.texttools + +@[params] +pub struct TreeScannerArgs { +pub mut: + path string + heal bool = true // healing means we fix images + git_url string + git_reset bool + git_root string + git_pull bool + load bool = true // means we scan automatically the added collection +} + +// walk over directory find dirs with .book or .collection inside and add to the tree . +// a path will not be added unless .collection is in the path of a collection dir or .book in a book +// ``` +// path string +// heal bool // healing means we fix images, if selected will automatically load, remove stale links +// git_url string +// git_reset bool +// git_root string +// git_pull bool +// ``` +pub fn (mut tree Tree) scan(args_ TreeScannerArgs) ! { + mut args := args_ + if args.git_url.len > 0 { + mut gs := gittools.get(coderoot: args.git_root)! + mut repo := gs.get_repo( + url: args.git_url + pull: args.git_pull + reset: args.git_reset + reload: false + )! + args.path = repo.get_path_of_url(args.git_url)! + } + + if args.path.len == 0 { + return error('Path needs to be provided.') + } + + mut path := pathlib.get_dir(path: args.path)! + if !path.is_dir() { + return error('path is not a directory') + } + + if path.file_exists('.site') { + move_site_to_collection(mut path)! + } + + if is_collection_dir(path) { + collection_name := get_collection_name(mut path)! + + tree.add_collection( + path: path.path + name: collection_name + heal: args.heal + load: true + fail_on_error: tree.fail_on_error + )! + + return + } + + mut entries := path.list(recursive: false) or { + return error('cannot list: ${path.path} \n${error}') + } + + for mut entry in entries.paths { + if !entry.is_dir() || is_ignored_dir(entry)! { + continue + } + + tree.scan(path: entry.path, heal: args.heal, load: args.load) or { + return error('failed to scan ${entry.path} :${err}') + } + } +} + +pub fn (mut tree Tree) scan_concurrent(args_ TreeScannerArgs) ! { + mut args := args_ + if args.git_url.len > 0 { + mut gs := gittools.get(coderoot: args.git_root)! + mut repo := gs.get_repo( + url: args.git_url + pull: args.git_pull + reset: args.git_reset + reload: false + )! + args.path = repo.get_path_of_url(args.git_url)! + } + + if args.path.len == 0 { + return error('Path needs to be provided.') + } + + path := pathlib.get_dir(path: args.path)! + mut collection_paths := scan_helper(path)! + mut threads := []thread !Collection{} + for mut col_path in collection_paths { + mut col_name := get_collection_name(mut col_path)! + col_name = texttools.name_fix(col_name) + + if col_name in tree.collections { + if tree.fail_on_error { + return error('Collection with name ${col_name} already exits') + } + // TODO: handle error + continue + } + + threads << spawn fn (args CollectionNewArgs) !Collection { + mut args_ := collection.CollectionNewArgs{ + name: args.name + path: args.path + heal: args.heal + load: args.load + fail_on_error: args.fail_on_error + } + return collection.new(args_)! + }( + name: col_name + path: col_path.path + heal: args.heal + fail_on_error: tree.fail_on_error + ) + } + + for _, t in threads { + new_collection := t.wait() or { return error('Error executing thread: ${err}') } + tree.collections[new_collection.name] = &new_collection + } +} + +// internal function that recursively returns +// the paths of collections in a given path +fn scan_helper(path_ Path) ![]Path { + mut path := path_ + if !path.is_dir() { + return error('path is not a directory') + } + + if path.file_exists('.site') { + move_site_to_collection(mut path)! + } + + if is_collection_dir(path) { + return [path] + } + + mut entries := path.list(recursive: false) or { + return error('cannot list: ${path.path} \n${error}') + } + + mut paths := []Path{} + for mut entry in entries.paths { + if !entry.is_dir() || is_ignored_dir(entry)! { + continue + } + + paths << scan_helper(entry) or { return error('failed to scan ${entry.path} :${err}') } + } + return paths +} + +@[params] +pub struct CollectionNewArgs { +mut: + name string @[required] + path string @[required] + heal bool = true // healing means we fix images, if selected will automatically load, remove stale links + load bool = true + fail_on_error bool +} + +// get a new collection +pub fn (mut tree Tree) add_collection(args_ CollectionNewArgs) ! { + mut args := args_ + args.name = texttools.name_fix(args.name) + + if args.name in tree.collections { + if args.fail_on_error { + return error('Collection with name ${args.name} already exits') + } + return + } + + mut pp := pathlib.get_dir(path: args.path)! // will raise error if path doesn't exist + mut new_collection := collection.new( + name: args.name + path: pp.path + heal: args.heal + fail_on_error: args.fail_on_error + )! + + tree.collections[new_collection.name] = &new_collection +} + +// returns true if directory should be ignored while scanning +fn is_ignored_dir(path_ Path) !bool { + mut path := path_ + if !path.is_dir() { + return error('path is not a directory') + } + name := path.name() + return name.starts_with('.') || name.starts_with('_') +} + +// gets collection name from .collection file +// if no name param, uses the directory name +fn get_collection_name(mut path Path) !string { + mut collection_name := path.name() + mut filepath := path.file_get('.collection')! + + // now we found a collection we need to add + content := filepath.read()! + if content.trim_space() != '' { + // means there are params in there + mut params_ := paramsparser.parse(content)! + if params_.exists('name') { + collection_name = params_.get('name')! + } + } + + return collection_name +} + +fn is_collection_dir(path Path) bool { + return path.file_exists('.collection') +} + +// moves .site file to .collection file +fn move_site_to_collection(mut path Path) ! { + collectionfilepath1 := path.extend_file('.site')! + collectionfilepath2 := path.extend_file('.collection')! + os.mv(collectionfilepath1.path, collectionfilepath2.path)! +} diff --git a/lib/data/doctree/testdata/.gitignore b/lib/data/doctree/testdata/.gitignore new file mode 100644 index 00000000..6eaa8eff --- /dev/null +++ b/lib/data/doctree/testdata/.gitignore @@ -0,0 +1 @@ +export_test/export \ No newline at end of file diff --git a/lib/data/doctree/testdata/actions/.collection b/lib/data/doctree/testdata/actions/.collection new file mode 100644 index 00000000..d86fceff --- /dev/null +++ b/lib/data/doctree/testdata/actions/.collection @@ -0,0 +1 @@ +actions \ No newline at end of file diff --git a/lib/data/doctree/testdata/actions/actions1.md b/lib/data/doctree/testdata/actions/actions1.md new file mode 100644 index 00000000..fdbf8df2 --- /dev/null +++ b/lib/data/doctree/testdata/actions/actions1.md @@ -0,0 +1,7 @@ +# actions 2 + +```js +!!payment3.add account:something description:'TF Wallet for TFT' + name:'TF Wallet' //comment for name + blockchain:stellar //holochain maybe? +``` diff --git a/lib/data/doctree/testdata/actions/functionality/actions2.md b/lib/data/doctree/testdata/actions/functionality/actions2.md new file mode 100644 index 00000000..9f254988 --- /dev/null +++ b/lib/data/doctree/testdata/actions/functionality/actions2.md @@ -0,0 +1,15 @@ +# web3gw_proxy server functionality + +- [stellar](./stellar.md) + + +```js +!!payment.add account:something description:'TF Wallet for TFT' person:fatayera preferred:false + name:'TF Wallet' //comment for name + blockchain:stellar //holochain maybe? +``` + +!!payment.add2 + name:'TF Wallet' //comment for name + blockchain:stellar + diff --git a/lib/data/doctree/testdata/export_test/export_expected/col1/.collection b/lib/data/doctree/testdata/export_test/export_expected/col1/.collection new file mode 100644 index 00000000..872927d0 --- /dev/null +++ b/lib/data/doctree/testdata/export_test/export_expected/col1/.collection @@ -0,0 +1 @@ +name:col1 src:'/Users/timurgordon/code/github/freeflowuniverse/crystallib/crystallib/data/doctree/testdata/export_test/mytree/dir1' \ No newline at end of file diff --git a/lib/data/doctree/testdata/export_test/export_expected/col1/.linkedpages b/lib/data/doctree/testdata/export_test/export_expected/col1/.linkedpages new file mode 100644 index 00000000..a0814882 --- /dev/null +++ b/lib/data/doctree/testdata/export_test/export_expected/col1/.linkedpages @@ -0,0 +1 @@ +col2:file3.md \ No newline at end of file diff --git a/lib/data/doctree/testdata/export_test/export_expected/col1/errors.md b/lib/data/doctree/testdata/export_test/export_expected/col1/errors.md new file mode 100644 index 00000000..1716bdb6 --- /dev/null +++ b/lib/data/doctree/testdata/export_test/export_expected/col1/errors.md @@ -0,0 +1,9 @@ +# Errors + + +## page_not_found + +path: /Users/timurgordon/code/github/freeflowuniverse/crystallib/crystallib/data/doctree/testdata/export_test/mytree/dir1/dir2/file1.md + +msg: page col3:file5.md not found + diff --git a/lib/data/doctree/testdata/export_test/export_expected/col1/file1.md b/lib/data/doctree/testdata/export_test/export_expected/col1/file1.md new file mode 100644 index 00000000..2d8d0af0 --- /dev/null +++ b/lib/data/doctree/testdata/export_test/export_expected/col1/file1.md @@ -0,0 +1 @@ +[not existent page](col3:file5.md) \ No newline at end of file diff --git a/lib/data/doctree/testdata/export_test/export_expected/col1/file2.md b/lib/data/doctree/testdata/export_test/export_expected/col1/file2.md new file mode 100644 index 00000000..71f48c9b --- /dev/null +++ b/lib/data/doctree/testdata/export_test/export_expected/col1/file2.md @@ -0,0 +1 @@ +[some page](../col2/file3.md) \ No newline at end of file diff --git a/lib/data/doctree/testdata/export_test/export_expected/col1/img/image.png b/lib/data/doctree/testdata/export_test/export_expected/col1/img/image.png new file mode 100644 index 00000000..e69de29b diff --git a/lib/data/doctree/testdata/export_test/export_expected/col2/.collection b/lib/data/doctree/testdata/export_test/export_expected/col2/.collection new file mode 100644 index 00000000..27908d9b --- /dev/null +++ b/lib/data/doctree/testdata/export_test/export_expected/col2/.collection @@ -0,0 +1 @@ +name:col2 src:'/Users/timurgordon/code/github/freeflowuniverse/crystallib/crystallib/data/doctree/testdata/export_test/mytree/dir3' \ No newline at end of file diff --git a/lib/data/doctree/testdata/export_test/export_expected/col2/.linkedpages b/lib/data/doctree/testdata/export_test/export_expected/col2/.linkedpages new file mode 100644 index 00000000..e69de29b diff --git a/lib/data/doctree/testdata/export_test/export_expected/col2/file3.md b/lib/data/doctree/testdata/export_test/export_expected/col2/file3.md new file mode 100644 index 00000000..e69de29b diff --git a/lib/data/doctree/testdata/export_test/mytree/dir1/.collection b/lib/data/doctree/testdata/export_test/mytree/dir1/.collection new file mode 100644 index 00000000..bf5be63f --- /dev/null +++ b/lib/data/doctree/testdata/export_test/mytree/dir1/.collection @@ -0,0 +1 @@ +name:col1 \ No newline at end of file diff --git a/lib/data/doctree/testdata/export_test/mytree/dir1/dir2/file1.md b/lib/data/doctree/testdata/export_test/mytree/dir1/dir2/file1.md new file mode 100644 index 00000000..2d8d0af0 --- /dev/null +++ b/lib/data/doctree/testdata/export_test/mytree/dir1/dir2/file1.md @@ -0,0 +1 @@ +[not existent page](col3:file5.md) \ No newline at end of file diff --git a/lib/data/doctree/testdata/export_test/mytree/dir1/file2.md b/lib/data/doctree/testdata/export_test/mytree/dir1/file2.md new file mode 100644 index 00000000..09f30c32 --- /dev/null +++ b/lib/data/doctree/testdata/export_test/mytree/dir1/file2.md @@ -0,0 +1 @@ +[some page](col2:file3.md) \ No newline at end of file diff --git a/lib/data/doctree/testdata/export_test/mytree/dir1/image.png b/lib/data/doctree/testdata/export_test/mytree/dir1/image.png new file mode 100644 index 00000000..e69de29b diff --git a/lib/data/doctree/testdata/export_test/mytree/dir3/.collection b/lib/data/doctree/testdata/export_test/mytree/dir3/.collection new file mode 100644 index 00000000..13847f7e --- /dev/null +++ b/lib/data/doctree/testdata/export_test/mytree/dir3/.collection @@ -0,0 +1 @@ +name:col2 \ No newline at end of file diff --git a/lib/data/doctree/testdata/export_test/mytree/dir3/file3.md b/lib/data/doctree/testdata/export_test/mytree/dir3/file3.md new file mode 100644 index 00000000..e69de29b diff --git a/lib/data/doctree/testdata/process_defs_test/col1/page1.md b/lib/data/doctree/testdata/process_defs_test/col1/page1.md new file mode 100644 index 00000000..23a012ad --- /dev/null +++ b/lib/data/doctree/testdata/process_defs_test/col1/page1.md @@ -0,0 +1 @@ +!!wiki.def alias:'tf-dev,cloud-dev,threefold-dev' name:'about us' \ No newline at end of file diff --git a/lib/data/doctree/testdata/process_defs_test/col2/page2.md b/lib/data/doctree/testdata/process_defs_test/col2/page2.md new file mode 100644 index 00000000..85e06ea4 --- /dev/null +++ b/lib/data/doctree/testdata/process_defs_test/col2/page2.md @@ -0,0 +1,3 @@ +*TFDEV +*CLOUDDEV +*THREEFOLDDEV \ No newline at end of file diff --git a/lib/data/doctree/testdata/process_includes_test/col1/page1.md b/lib/data/doctree/testdata/process_includes_test/col1/page1.md new file mode 100644 index 00000000..1f3fb4b1 --- /dev/null +++ b/lib/data/doctree/testdata/process_includes_test/col1/page1.md @@ -0,0 +1 @@ +!!wiki.include page:'col2:page2.md' \ No newline at end of file diff --git a/lib/data/doctree/testdata/process_includes_test/col2/page2.md b/lib/data/doctree/testdata/process_includes_test/col2/page2.md new file mode 100644 index 00000000..81e7d4c3 --- /dev/null +++ b/lib/data/doctree/testdata/process_includes_test/col2/page2.md @@ -0,0 +1 @@ +!!wiki.include page:'col2:page3.md' \ No newline at end of file diff --git a/lib/data/doctree/testdata/process_includes_test/col2/page3.md b/lib/data/doctree/testdata/process_includes_test/col2/page3.md new file mode 100644 index 00000000..1c9c075b --- /dev/null +++ b/lib/data/doctree/testdata/process_includes_test/col2/page3.md @@ -0,0 +1 @@ +page3 content \ No newline at end of file diff --git a/lib/data/doctree/testdata/rpc/.collection b/lib/data/doctree/testdata/rpc/.collection new file mode 100644 index 00000000..0157676e --- /dev/null +++ b/lib/data/doctree/testdata/rpc/.collection @@ -0,0 +1 @@ +name:rpc \ No newline at end of file diff --git a/lib/data/doctree/testdata/rpc/eth.md b/lib/data/doctree/testdata/rpc/eth.md new file mode 100644 index 00000000..a49f253b --- /dev/null +++ b/lib/data/doctree/testdata/rpc/eth.md @@ -0,0 +1,130 @@ +# Eth + +TODO + +## Remote Procedure Calls + +In this section you'll find the json rpc requests and responses of all the remote procedure calls. The fields params can contain text formated as . These represent json objects that are defined further down the document in section [Models](#models). + +### Load + +****Request**** + +``` +{ + "jsonrpc": "2.0", + "method": "eth.Load", + "params": { + "url": string, + "secret": string + }, + "id": "" +} +``` + +**Response** + +``` +{ + "jsonrpc": "2.0", + "result": "", + "id": "" +} +``` + +### Balance + +****Request**** + +``` +{ + "jsonrpc": "2.0", + "method": "eth.Balance", + "params": "
", + "id": "" +} +``` + +**Response** + +``` +{ + "jsonrpc": "2.0", + "result": i64, + "id": "" +} +``` + +### Height + +****Request**** + +``` +{ + "jsonrpc": "2.0", + "method": "eth.Height", + "params": "", + "id": "" +} +``` + +**Response** + +``` +{ + "jsonrpc": "2.0", + "result": u64, + "id": "" +} +``` + +### Transfer + +Transaction id is returned + +****Request**** + +``` +{ + "jsonrpc": "2.0", + "method": "eth.transfer", + "params": { + "destination": string, + "amount": u64 + }, + "id": "" +} +``` + +**Response** + +``` +{ + "jsonrpc": "2.0", + "result": string, + "id": "" +} +``` + +### EthTftSpendingAllowance + +****Request**** + +```json +{ + "jsonrpc": "2.0", + "method": "eth.EthTftSpendingAllowance", + "params": "", + "id": "" +} +``` + +**Response** + +```json +{ + "jsonrpc": "2.0", + "result": string, + "id": "" +} +``` diff --git a/lib/data/doctree/testdata/rpc/rpc.md b/lib/data/doctree/testdata/rpc/rpc.md new file mode 100644 index 00000000..10aebaa2 --- /dev/null +++ b/lib/data/doctree/testdata/rpc/rpc.md @@ -0,0 +1,12 @@ +# RPC methods + +You can find OpenRPC descriptions of RPC methods in the playground pages below: +- [All clients](playground/?schemaUrl=../openrpc/openrpc.json) +- [Bitcoin](playground/?schemaUrl=../openrpc/btc/openrpc.json) +- [Ethereum](playground/?schemaUrl=../openrpc/eth/openrpc.json) +- [Explorer](playground/?schemaUrl=../openrpc/explorer/openrpc.json) +- [IPFS](playground/?schemaUrl=../openrpc/ipfs/openrpc.json) +- [Nostr](playground/?schemaUrl=../openrpc/nostr/openrpc.json) +- [Stellar](playground/?schemaUrl=../openrpc/stellar/openrpc.json) +- [TFChain](playground/?schemaUrl=../openrpc/tfchain/openrpc.json) +- [TFGrid](playground/?schemaUrl=../openrpc/tfgrid/openrpc.json) \ No newline at end of file diff --git a/lib/data/doctree/testdata/rpc/stellar.md b/lib/data/doctree/testdata/rpc/stellar.md new file mode 100644 index 00000000..3edb9af4 --- /dev/null +++ b/lib/data/doctree/testdata/rpc/stellar.md @@ -0,0 +1,342 @@ + +# Stellar + +## Creating an account + +Json RPC 2.0 request: + +- network: the network you want to create the account on (public or testnet) + + +```json +{ + "jsonrpc":"2.0", + "method":"stellar.CreateAccount", + "params":[ + "public" + ], + "id":"a_unique_id_here" +} +``` + +Json RPC 2.0 response: + +- seed: the seed of the account that was generated + +```json +{ + "jsonrpc":"2.0", + "result":"seed_will_be_here", + "id":"id_send_in_request" +} +``` + +## Loading your key + +Json RPC 2.0 request: + +- network: the network you want to connect to (public or testnet) +- secret: the secret of your stellar account + +```json +{ + "jsonrpc":"2.0", + "method":"stellar.Load", + "params":[{ + "network":"public", + "secret":"SA33FBB67CPIMHWTZYVR489Q6UKHFUPLKTLPG9BKAVG89I2J3SZNMW21" + }], + "id":"a_unique_id_here" +} +``` + +Json RPC 2.0 response will be empty: + +```json +{ + "jsonrpc":"2.0", + "id":"id_send_in_request" +} +``` + +## Asking your public address + +Json RPC 2.0 request (no parameters): + +```json +{ + "jsonrpc":"2.0", + "method":"stellar.Address", + "params":[], + "id":"a_unique_id_here" +} +``` + +Json RPC 2.0 response: + +- address: the public address of the loaded account + +```json +{ + "jsonrpc":"2.0", + "result":"public_address_will_be_here", + "id":"id_send_in_request" +} +``` + +## Transfer tokens from one account to another + +Json RPC 2.0 request: + +- amount: the amount of tft to transfer (string) +- destination: the public address that should receive the tokens +- memo: the memo to add to the transaction + +```json +{ + "jsonrpc":"2.0", + "method":"stellar.Transfer", + "params":[{ + "amount": "1520.0", + "destination": "some_public_stellar_address", + "memo": "your_memo_comes_here" + }], + "id":"a_unique_id_here" +} +``` + +Json RPC 2.0 response: + +- hash: the hash of the transaction that was executed + +```json +{ + "jsonrpc":"2.0", + "result":"hash_will_be_here", + "id":"id_send_in_request" +} +``` + +## Swap tokens from one asset to the other + +Json RPC 2.0 request: + +- amount: the amount of tokens to swap (string) +- source_asset: the source asset to swap (should be tft or xlm) +- destination_asset: the asset to swap to (should be tft or xlm) + +```json +{ + "jsonrpc":"2.0", + "method":"stellar.Swap", + "params":[{ + "amount": "5.0", + "source_asset": "tft", + "destination_asset": "xlm" + }], + "id":"a_unique_id_here" +} +``` + +Json RPC 2.0 response: + +- hash: the hash of the transaction that was executed + +```json +{ + "jsonrpc":"2.0", + "result":"hash_will_be_here", + "id":"id_send_in_request" +} +``` + +## Get the balance of an account + +Json RPC 2.0 request: + +- address: the public address of an account to get the balance from (leave empty for your own account) + +```json +{ + "jsonrpc":"2.0", + "method":"stellar.Balance", + "params":[ + "you_can_pass_public_address_here" + ], + "id":"a_unique_id_here" +} +``` + +Json RPC 2.0 response: + +- balance: the balance of the account (string) + +```json +{ + "jsonrpc":"2.0", + "result":"balance_will_be_here", + "id":"id_send_in_request" +} +``` + +## Bridge stellar tft to ethereum + +Json RPC 2.0 request: + +- amount: the amount of tft to bridge (string) +- destination: the ethereum public address that should receive the tokens + +```json +{ + "jsonrpc":"2.0", + "method":"stellar.BridgeToEth", + "params":[{ + "amount": "298.0", + "destination": "eth_public_address_here" + ], + "id":"a_unique_id_here" +} +``` + +Json RPC 2.0 response: + +- hash: the hash of the transaction that was executed + +```json +{ + "jsonrpc":"2.0", + "result":"hash_will_be_here", + "id":"id_send_in_request" +} +``` + +## Bridge stellar tft to tfchain + +Json RPC 2.0 request: + +- amount: the amount of tft on stellar to bridge to tfchain +- twin_id: the twin id that should receive the tokens + +```json +{ + "jsonrpc":"2.0", + "method":"stellar.BridgeToTfchain", + "params":[{ + "amount": "21.0", + "twin_id": 122 + ], + "id":"a_unique_id_here" +} +``` + +Json RPC 2.0 response: + +- hash: the hash of the transaction that was executed + +```json +{ + "jsonrpc":"2.0", + "result":"hash_will_be_here", + "id":"id_send_in_request" +} +``` + +## Waiting for a transaction on the Ethereum bridge + +Json RPC 2.0 request: + +- memo: the memo to look for in the transactions + +```json +{ + "jsonrpc":"2.0", + "method":"stellar.AwaitTransactionOnEthBridge", + "params":[ + "provide_the_memo_here" + ], + "id":"a_unique_id_here" +} +``` + +Json RPC 2.0 response: empty result + +```json +{ + "jsonrpc":"2.0", + "id":"id_send_in_request" +} +``` + +## Listing transactions + +Json RPC 2.0 request: + +- account: a public stellar address to get the transactions for (leave empty for your own account) +- limit: how many transactions you want to get (default 10) +- include_failed: include the failed transactions in the result (default is false) +- cursor: where to start listing the transactions from (default is the top) +- ascending: whether to sort the transactions in ascending order (default is false, so in descending order) + +```json +{ + "jsonrpc":"2.0", + "method":"stellar.Transactions", + "params":[{ + "account": "some_account_here_or_leave_empty", + "limit": 12, + "include_failed": false, + "cursor": "leave_empty_for_top", + "ascending": false + ], + "id":"a_unique_id_here" +} +``` + +Json RPC 2.0 response: + +- a list of transactions (see [here](https://github.com/stellar/go/blob/01c7aa30745a56d7ffcc75bb8ededd38ba582a58/protocols/horizon/main.go#L484) for the definition of a transaction) + +```json +{ + "jsonrpc":"2.0", + "result":[ + { + "id": "some_id", + // many more attributes + } + ], + "id":"id_send_in_request" +} +``` + +## Showing the data related to an account + +Json RPC 2.0 request: + +- address: the stellar public address to get the account data for (leave empty for your own account) + +```json +{ + "jsonrpc":"2.0", + "method":"stellar.AccountData", + "params":[ + "account_or_leave_empty_for_your_account" + ], + "id":"a_unique_id_here" +} +``` + +Json RPC 2.0 response: + +- account data (see [here](https://github.com/stellar/go/blob/01c7aa30745a56d7ffcc75bb8ededd38ba582a58/protocols/horizon/main.go#L33) for the definition of account data) + +```json +{ + "jsonrpc":"2.0", + "result": { + "id": "some_id", + // many more attributes + }, + "id":"id_send_in_request" +} +``` diff --git a/lib/data/doctree/testdata/rpc/tfchain.md b/lib/data/doctree/testdata/rpc/tfchain.md new file mode 100644 index 00000000..0ca187ca --- /dev/null +++ b/lib/data/doctree/testdata/rpc/tfchain.md @@ -0,0 +1,251 @@ + +# TFChain +TODO: intro + +## Remote Procedure Calls + +### Load + +****Request**** +``` +{ + "jsonrpc": "2.0", + "method": "tfchain.Load", + "params": { + "passphrase": string, + "network": string + }, + "id": "" +} +``` +**Response** +``` +{ + "jsonrpc": "2.0", + "result": "", + "id": "" +} +``` + +### Transfer + +****Request**** +``` +{ + "jsonrpc": "2.0", + "method": "tfchain.Transfer", + "params": { + "destination": string, + "memo": string, + "amount": u64 + }, + "id": "" +} +``` +**Response** +``` +{ + "jsonrpc": "2.0", + "result": "", + "id": "" +} +``` + +### Balance + +****Request**** +``` +{ + "jsonrpc": "2.0", + "method": "tfchain.Balance", + "params": "
", + "id": "" +} +``` +**Response** +``` +{ + "jsonrpc": "2.0", + "result": i64, + "id": "" +} +``` + +### GetTwin + +****Request**** +``` +{ + "jsonrpc": "2.0", + "method": "tfchain.TwinGet", + "params": , + "id": "" +} +``` +**Response** +``` +{ + "jsonrpc": "2.0", + "result": , + "id": "" +} +``` + +### GetNode + +****Request**** +``` +{ + "jsonrpc": "2.0", + "method": "tfchain.NodeGet", + "params": , + "id": "" +} +``` +**Response** +``` +{ + "jsonrpc": "2.0", + "result": , + "id": "" +} +``` + +### GetFarm + +****Request**** +``` +{ + "jsonrpc": "2.0", + "method": "tfchain.FarmGet", + "params": , + "id": "" +} +``` +**Response** +``` +{ + "jsonrpc": "2.0", + "result": , + "id": "" +} +``` + +## Models + +### MODEL_TWIN +``` +{ + "id": u32, + "account": string, + "relay": string, + "entities": [MODEL_ENTITYPROOF], + "pk": string +} +``` + +### MODEL_ENTITYPROOF +``` +{ + "entityid": u32, + "signature": string +} +``` + +### MODEL_NODE +``` +{ + "id": u32, + "farmid": u32, + "twinid": u32, + "resources": , + "location": , + "public_config": { + "ip": , + "ip6": , + "domain": string + }, + "created": u64, + "farmingpolicy": u32, + "interfaces": [MODEL_INTERFACE], + "certification": "string", + "secureboot": bool, + "virtualized": bool, + "serial": string, + "connectionprice": u32 +} +``` +### MODEL_RESOURCES +``` +{ + "hru": u64, + "sru": u64, + "cru": u64, + "mru": u64 +} +``` +### MODEL_LOCATION + +``` +{ + "city": string, + "country": string, + "latitude": string, + "longitude": string +} +``` + +### MODEL_IP + +``` +{ + "ip": string, + "gw": string +} +``` +### MODEL_INTERFACE + +``` +{ + "name": string, + "mac": string, + "ips": [string] +} +``` + +### MODEL_FARM + +``` +{ + "id": u32, + "name": string, + "twinid": u32, + "pricingpolicyid": u32, + "certificationtype": string, + "publicips": [MODEL_PUBLICIP], + "dedicated": bool, + "farmingpolicylimit": +} +``` + +### MODEL_PUBLICIP + +``` +{ + "ip": string, + "gateway": string, + "contractid": u64 +} +``` + +### MODEL_FARMINGPOLICYLIMIT +``` +{ + "farmingpolicyid": u32, + "cu": u64, + "su": u64, + "end": u64, + "nodecount": u32, + "nodecertification": bool +} +``` \ No newline at end of file diff --git a/lib/data/doctree/testdata/rpc/tfgrid.md b/lib/data/doctree/testdata/rpc/tfgrid.md new file mode 100644 index 00000000..6b0e5260 --- /dev/null +++ b/lib/data/doctree/testdata/rpc/tfgrid.md @@ -0,0 +1,651 @@ + +# TFgrid +TFgrid is one of the clients that web3 proxy opens up. Below you can find the remote procedure calls it can handle. We use the json rpc 2.0 protocol. All possible json rpc request are shown below with the corresponding response that the web3 proxy will send back. + +## Remote Procedure Calls +In this section you'll find the json rpc requests and responses of all the remote procedure calls. The fields params can contain text formated as . These represent json objects that are defined further down the document in section [Models](#models). + +### Login +This rpc is used to login. It requires you to pass your menmonic and the network you want to deploy on. + +****Request**** +``` +{ + "jsonrpc": "2.0", + "method": "tfgrid.Load", + "params": [ + "", + "" + ], + "id": "" +} +``` +**Response** +``` +{ + "jsonrpc": "2.0", + "result": "", + "id": "" +} +``` + +### Gateway Name Deploy +This rpc allows you to deploy a gateway name. It requires you to pass the information required for a gateway name. Upon success it will return you that same information extended with some extra useful data. + +****Request**** +``` +{ + "jsonrpc": "2.0", + "method": "tfgrid.gateway.name.deploy", + "params": [], + "id": "" +} +``` +**Response** +``` +{ + "jsonrpc": "2.0", + "result": , + "id": "" +} +``` + +### GatewayNameDelete +This rpc allows you to delete a deployed gateway name. You should send the name in the params field. The operation succeeded if you receive a valid json rpc 2.0 result. + +****Request**** +``` +{ + "jsonrpc": "2.0", + "method": "tfgrid.GatewayNameDelete", + "params": [""], + "id": "" +} +``` +**Response** +``` +{ + "jsonrpc": "2.0", + "result": "", + "id": "" +} +``` + +### GatewayNameGet +You can always ask for information on a gateway name via the rpc shown below. Just set the name in the params field of the json rpc 2.0 request. The response will contain the requested information. + +**Request** +``` +{ + "jsonrpc": "2.0", + "method": "tfgrid.GatewayNameGet", + "params": "", + "id": "" +} +``` +**Response** +``` +{ + "jsonrpc": "2.0", + "result": , + "id": "" +} +``` + +### GatewayFQDNDeploy +If you wish for a fully qualified domain name you should use the rpc shown below. It requires the data shown in [this model](#model_gatewayfqdn) and returns that same data augmented with [some extra fields](#model_gatewayfqdnresult). + +**Request** +``` +{ + "jsonrpc": "2.0", + "method": "tfgrid.GatewayFQDNDeploy", + "params": , + "id": "" +} +``` +**Response** +``` +{ + "jsonrpc": "2.0", + "result": , + "id": "" +} +``` + +### GatewayFQDNDelete +You can delete your requested fully qualified domain name with the rpc shown below. Just fill in the name in the json rpc request. + +**Request** +``` +{ + "jsonrpc": "2.0", + "method": "tfgrid.GatewayFQDNDelete", + "params": "", + "id": "" +} +``` +**Response** +``` +{ + "jsonrpc": "2.0", + "result": "", + "id": "" +} +``` + +### GatewayFQDNGet +Once created you can always retrieve the [data](#model_gatewayfqdnresult) related to your fully qualified domain name via the rpc method *tfgrid.GatewayFQDNget*. + +**Request** +``` +{ + "jsonrpc": "2.0", + "method": "tfgrid.GatewayFQDNGet", + "params": "", + "id": "" +} +``` +**Response** +``` +{ + "jsonrpc": "2.0", + "result": , + "id": "" +} +``` + +### K8sDeploy + + +**Request** +``` +{ + "jsonrpc": "2.0", + "method": "tfgrid.K8sDeploy", + "params": , + "id": "" +} +``` +**Response** +``` +{ + "jsonrpc": "2.0", + "result": , + "id": "" +} +``` + +### K8sDelete +**Request** +``` +{ + "jsonrpc": "2.0", + "method": "tfgrid.K8sDelete", + "params": string, + "id": "" +} +``` +**Response** +``` +{ + "jsonrpc": "2.0", + "result": "", + "id": "" +} +``` + +### K8sGet +**Request** +``` +{ + "jsonrpc": "2.0", + "method": "tfgrid.K8sGet", + "params": string, + "id": "" +} +``` +**Response** +``` +{ + "jsonrpc": "2.0", + "result": , + "id": "" +} +``` + +### K8sGet +**Request** +``` +{ + "jsonrpc": "2.0", + "method": "tfgrid.K8sAddnode", + "params": { + "name": string, + "node": + }, + "id": "" +} +``` +**Response** +``` +{ + "jsonrpc": "2.0", + "result": , + "id": "" +} +``` + + +### K8sRemoveNode +**Request** +``` +{ + "jsonrpc": "2.0", + "method": "tfgrid.K8sRemovenode", + "params": { + "name": string, + "nodename": string + }, + "id": "" +} +``` +**Response** +``` +{ + "jsonrpc": "2.0", + "result": , + "id": "" +} +``` + +### MachinesDeploy +**Request** +``` +{ + "jsonrpc": "2.0", + "method": "tfgrid.MachinesDeploy", + "params": , + "id": "" +} +``` +**Response** +``` +{ + "jsonrpc": "2.0", + "result": , + "id": "" +} +``` + +### MachinesDelete +**Request** +``` +{ + "jsonrpc": "2.0", + "method": "tfgrid.MachinesDelete", + "params": string, + "id": "" +} +``` +**Response** +``` +{ + "jsonrpc": "2.0", + "result": "", + "id": "" +} +``` + +### MachinesGet +**Request** +``` +{ + "jsonrpc": "2.0", + "method": "tfgrid.MachinesGet", + "params": string, + "id": "" +} +``` +**Response** +``` +{ + "jsonrpc": "2.0", + "result": , + "id": "" +} +``` + +### MachineAdd +**Request** +``` +{ + "jsonrpc": "2.0", + "method": "tfgrid.MachinesAdd", + "params": { + "project_name": string, + "machine": + }, + "id": "" +} +``` +**Response** +``` +{ + "jsonrpc": "2.0", + "result": , + "id": "" +} +``` + +### MachineRemove +**Request** +``` +{ + "jsonrpc": "2.0", + "method": "tfgrid.MachinesRemove", + "params": { + "machine_name": string, + "project_name": string + }, + "id": "" +} +``` +**Response** +``` +{ + "jsonrpc": "2.0", + "result": , + "id": "" +} +``` + +### DeploymentDeploy +**Request** +``` +{ + "jsonrpc": "2.0", + "method": "tfgrid.DeploymentCreate", + "params": , + "id": "" +} +``` +**Response** +``` +{ + "jsonrpc": "2.0", + "result": , + "id": "" +} +``` + +### DeploymentUpdate +**Request** +``` +{ + "jsonrpc": "2.0", + "method": "tfgrid.DeploymentUpdate", + "params": , + "id": "" +} +``` +**Response** +``` +{ + "jsonrpc": "2.0", + "result": , + "id": "" +} +``` + +### DeploymentCancel +**Request** +``` +{ + "jsonrpc": "2.0", + "method": "tfgrid.DeploymentCancel", + "params": i64, + "id": "" +} +``` +**Response** +``` +{ + "jsonrpc": "2.0", + "result": "", + "id": "" +} +``` + +### DeploymentGet +**Request** +``` +{ + "jsonrpc": "2.0", + "method": "tfgrid.DeploymentGet", + "params": i64, + "id": "" +} +``` +**Response** +``` +{ + "jsonrpc": "2.0", + "result": , + "id": "" +} +``` + +### ZDBDeploy +**Request** +``` +{ + "jsonrpc": "2.0", + "method": "tfgrid.ZdbDeploy", + "params": , + "id": "" +} +``` +**Response** +``` +{ + "jsonrpc": "2.0", + "result": , + "id": "" +} +``` + +### ZDBDelete +**Request** +``` +{ + "jsonrpc": "2.0", + "method": "tfgrid.ZdbDelete", + "params": string, + "id": "" +} +``` +**Response** +``` +{ + "jsonrpc": "2.0", + "result": "", + "id": "" +} +``` + +### ZDBGet +**Request** +``` +{ + "jsonrpc": "2.0", + "method": "tfgrid.ZdbGet", + "params": string, + "id": "" +} +``` +**Response** +``` +{ + "jsonrpc": "2.0", + "result": , + "id": "" +} +``` + +## Models + +### MODEL_CREDENTIALS +``` +{ + "mnemonics": string, + "network": string +} +``` + +### MODEL_GATEWAYNAME +``` +{ + "nodeid": U32, + "name": string, + "backends": [string], + "tlspassthrough": bool, + "description": string +} +``` + +### MODEL_GATEWAYNAMERESULT +``` +{ + "nodeid": U32, + "name": string, + "backends": [string], + "tlspassthrough": bool, + "description": string, + "fqdn": string, + "namecontractid": u64, + "contractid": u64 +} +``` + +### MODEL_GATEWAYFQDN +``` +{ + "nodeid": U32, + "backends": [string], + "fqdn": string, + "name": string, + "tlspassthrough": bool, + "description": string +} +``` + +### MODEL_GATEWAYFQDNRESULT +``` +{ + "nodeid": U32, + "backends": [string], + "fqdn": string, + "name": string, + "tlspassthrough": bool, + "description": string, + "contractid": u64 +} +``` + +### MODEL_K8SCLUSTER +``` +{ + "name": string, + "master": MODEL_K8SNODE, + "workers": [MODEL_K8SNODE], + "token": string, + "ssh_key": string, +} +``` +### MODEL_K8SCLUSTER_RESULT +``` +{ + "name": string, + "master": MODEL_K8SNODE, + "workers": [MODEL_K8SNODE], + "token": string, + "ssh_key": string, + "node_deployment_id": map[u32]u64 +} +``` +### MODEL_K8SNODE +``` +{ + "name": string, + "nodeid": string, + "public_ip": bool, + "public_ip6": bool, + "planetary": bool, + "flist": string, + "cpu": u32, + "memory": u32, //in MBs + "disk_size": u32 // in GB, monted in /mydisk +} +``` +### MODEL_K8SNODERESULT +``` +{ + "name": string, + "nodeid": string, + "public_ip": bool, + "public_ip6": bool, + "planetary": bool, + "flist": string, + "cpu": u32, + "memory": u32, //in MBs + "disk_size": u32, // in GB, monted in /mydisk + "computed_ip4": string, + "computed_ip6": string, + "wg_ip": string, + "planetary_ip": string +} +``` + +### MODEL_DEPLOYMENT +``` +{ + "version": int, + "twin_id": u32, + "contract_id": u64, + "expiration": i64, + "metadata": string, + "description": string, + "workloads": [MODEL_WORKLOAD], + "signature_requirement": SignatureRequirement +} +``` + +### MODEL_ZDB +``` +{ + "node_id": u32, + "name": string, + "password": string, + "public": bool, + "size": u32, // in GB + "description": string, + "mode": string +} +``` + +### MODEL_ZDBRESULT +``` +{ + "node_id": u32, + "name": string, + "password": string, + "public": bool, + "size": u32, // in GB + "description": string, + "mode": string, + "namespace": string, + "port": u32, + "ips": [string] +} +``` \ No newline at end of file diff --git a/lib/data/doctree/testdata/tree_test/fruits/.collection b/lib/data/doctree/testdata/tree_test/fruits/.collection new file mode 100644 index 00000000..87eb9c37 --- /dev/null +++ b/lib/data/doctree/testdata/tree_test/fruits/.collection @@ -0,0 +1 @@ +name:fruits \ No newline at end of file diff --git a/lib/data/doctree/testdata/tree_test/fruits/apple.md b/lib/data/doctree/testdata/tree_test/fruits/apple.md new file mode 100644 index 00000000..b8b94fb0 --- /dev/null +++ b/lib/data/doctree/testdata/tree_test/fruits/apple.md @@ -0,0 +1,9 @@ +# Apple + +An apple a day keeps the doctor away! + +## Fun Fact + +The apple can be the same color as the following: +- Red as [strawberry](berries/strawberry.md) +- Green as [broccoli](vegetables/tomato.md) diff --git a/lib/data/doctree/testdata/tree_test/fruits/banana.txt b/lib/data/doctree/testdata/tree_test/fruits/banana.txt new file mode 100644 index 00000000..e69de29b diff --git a/lib/data/doctree/testdata/tree_test/fruits/berries/img/digital_twin.png b/lib/data/doctree/testdata/tree_test/fruits/berries/img/digital_twin.png new file mode 100644 index 0000000000000000000000000000000000000000..ced6c2720d253368eb4453a620d20041f1ec2480 GIT binary patch literal 125580 zcmd42WmlHn+BQsgNlAAJ2ugQ~bVzp#h;(;%hk$fQOLzB4cSv`ql!VkXueH{7kM}3M z3_sZ8WFGt6_N|U^B?U=z6e1KTC@6GksSnCfP_SK4P%tD&FyMC@gq4_~p!}euKZvTj z>7Qf{tI=VXl5Ie2#5%jEZ)o_hSX~1>;=L`Mp>)(j)?t_cQTac~p$z9)6D*?o|% zeILWz$ll(}CW|#Am`_Zea0D+DY9D$$L`3y>aAn0>SS%be$A69@tf2Xxm&Jfb!F=8c zeITW-sYvy^B<|5)^RIWNVv`tyT&N?ivW&4TmL(??(F)ZI0?b3-WN2EQdJzn=}jTGl#+%8=ls;hstR*~63~Ao0rSEYUWGJadqRC> zZsuAc%fif3(;SeH487`?@j@6R(*CRBZh}t~v&^JjnJIj`t~r==-A%*`<$n)tVV60% z9&@!ms+n$j;7@3ma5Qy98X{b6?ey*`7LS*amHo2qUrMIZ26d=tpS|z1f?`^@oPt=b zxc`1imlIdQqs2R*YjxpH}7ekFl>#?T?Q1Ud5`o7H6^&zFNC9esjt#Ra$GGS zud*?)E&elAF8sefNpunh*%TuZu^G%@)I_rsvMrRCm+P``3S3s-|p+T`f?FgPTnMw9u_p^=+nQrkx>ts@WW6tQ@I39qvd$6=nTpGv@Bk&~9W z(Z|Z_q`B16#pPz6n;v;9ki!4tf38C#EQ~ddo&aQHQuDEYZ7L{1eLmk|dr|=X_vmY~ zFB5{(v+eBYfsUCtBPNmB$1NX=(J&MlIAR%`Vp!A79AIE!&el7#lae6&R|S!4{eB|H z@OyCBmiGav7hhc5mkmGY|M1og_fd*z(FjTNOiMFwDP-;DU|wghXDqL&&E)CcxExZ6 zniopl_>X zho1>+9L3D+KJOiMMy7us7=JE<1-5qxM~^A?L3&QFqQ^|CH46*PN=r)v0|WUjq$eKv@2+7M2)vA0hR?mOb?JbEw8jrpa;kA>eZF~sCu z&8!t=K1-?^MM_#G`52y=wBGrSQW>}x2v*D5g)zbV>+5QkRaU0-PEJo>{`qryaPU1f zHQp!Hx*smO{7_uUMPAKCq4^8`#VF+z4aWq1Iafn5ZFxwyf+gn>yZjK*6_8a&<73AleT38T}k_xi11$=ZmLHPe1XG1g# zQ2`rYC_{-FX;w*6VXixvX_@gi4j9Oaq}aHbI0hbt#PIvgv>G%?jrHgXa_o!vM)%{$ zpD8AlV2Z(iTO1h~SyFPCNUumH;I^^9&&|nMpJ|Ov-t$VNxPXlp-FxTz7Jg9>TVR(J z=dBZb(N56~vO^5=FpG4hH``b*>Lc6F+G7ud{zAloA`WjhJAVW^dU`!Qz08b^>+5SU zQOicou}f z?t_Das_JU@>w`~DPS=l*ZP&+?9aR4wu_yE?>|AXGbAqcJ_~B(p`Y_GjD^x*4Rmof1 zQdFF(xhAB#_Fi>`O{(}Xn$f~phC$_a+{=QrXe+^Mov~j?og+xRRL)Lj@?c?Mp-3^i zu%N)Ym+&lSSdI<{PP><0Wt{TUIOS%%bw;v5vW|m<)h8VnJp<`uirJPj@8YWP@9K-B z88a4mYWZ8qf4dmf7isUq^#s;n->RLgtgI|7d~a>#S8c1PP_Sg$`U>;EjnIlq7Wl9SjRLH`}zB4B}>n6#|K++CrdTz zTknF6h|-Sh1cFRx4U5gp%!r7He*E~sq<4i1HU&zfk)I9v*&gZx0Ot7VdAZyp^2MRTmF>kXytMVNo?es&^y)7hk5j){%(_ z*lCngt@uJE({gB&>IJL9>to&7!H3;QSzKInGdO7=!a+k1xw~@%jH37-tg4Z9(D!C_ zPm?geDn`3m;BX8RBPS^mEzdnun)$@&m@uAZY+tCgqA;CrVD^V}&BV`BvzwlboZ z-hc}#e6AjL!OksY;%Oac7SI@?C2YHDkf`$>^$f8KJ3Ks$Pf4N2LRvOY=B`?ckALM= z8i!2uQ17}&z03n&^g-w`FJb6xOv<*Bno^@7(@ zSkC?Yw1RcvII;fv$aVNKva-L|JJ*+%7~9b=>iC6cIchvbx;hZHL>nAle=ZBlq>uaZ6 zC5Cpw3$_3I^inD+Gwq(YjEsyTcV#~!c1W`q@BAwZhg$-Kf93 zynK3ka>L{xaFpN=7l05Iy$@xm>!>bqnKo5<(FCWRo4q1N@|!3ZYoO?330komc`A|G z!ebYoSu~kNv6+p{*=nonrkbv46w>V-tHDQTU_*T!lgls zon2kcO-(Hf{c3zK=F}FfTvS!RB_v=6i$VLV-wwCg;uRgGm6CryPSj7-T2))+UE$(c zQBBiB8h~%WBv;6p&;jEgg<>q5#;;Sl}DDIW|Nvfid6?*3~$^5S@5*gw7 z-7u3%Z;Bz-H~<0Y-eZ3a;=%O ztGlnS&u?A<%BjIdB-v3+%YH^gNj=lqsoKXdCWL#IPh8a=v$(Z*%Z{w>5s5ChnI(J| znbta3-n+7Grq_`eKUrO`j^qew44WugT~qV!WUT4R+9-w- zqO|X+-n~m)i*s{-w9I#~TrV&%Flsc*T)8{ouiFa>;NkdaX@iB?byhj8u)bX_@y`e~ z&{Qd5TW&J%b&$!fjL&B z!T~YDqQVq9!tW%784$^wA8YHLYOezw-cqBsDKphtwmuB0uv7VG;2^=}K)$-Z-DY?2 zYpB?~CYSg1?aa)~bUJ!~S|lYVW@lq#d;i{uEmdIC>S4o8MxsbZxmnAt!^aR`Rrhr% z;rHlyZB`1&0anx_8-fILVm;gs#c#jRCKP=~MMb3;Q2JW@K-9~=kpVDcrP;A4H&@hy z*qHv`+fj+4#OLYx@MooBes=cy`f#SJON1*O{ocJNwfc+M{JVk%^ZX`vil2q_1#Rrr zb#HU)I1Adu>uOwQM4adC=)b_AVY3Aa1WE=7p}afq(6o0gGwo#V159kQet3BJm!sja zsi`TPFP>WvY}{HKiU1iaHMO{c0t-7k4u(DhZjF175lXwaU0%!A;;Iti+7|xiHtOm+ z&XT&fa+%g4|ykceZgp3%m6q8l_eHuS`OvA#%-S5sfCnw{0UR~h* z3$l7B#rc($l{GbN{vtV37PYmtJ%p5QxprnDtXSv4>~a{f<2zl{a9k-9+o*;!RC_TS z-A&xv@r1}C1ENfn0kOwWfb{`s#L3Bth=?f4T@br%V(;XXk(QRA@PuUtWw>^C0glqH zS+{IBF9J14te8jfU9X|zAu~o~uerS&Nmg;TJHpSAlZ*pgQSS>B8FgKnElqy7F8=M! z>ef~ruw_K2B^V(# z=Se-48=Lm^>uW~A>nZW1ERyVH8t2cI668yxgv~7OA8AJXJ-}p`QNg#o)5RV z5fBjg`1tZwxRFQ%NRr~?<2yQhjEpD~hx)z-X_$V7Tu8pN?RE$hMboz*gkr?p7k>M; zyt9+sQjm*FT~`Y4Gj%+baZZR z?y8~OTuP$o{!Kd(m+lY-x`#S$CAT?k=^6>D0^Rob|pri!(d4F^BmEU_d zHW=+8e_j>*rkuz^8AK~+`n-Z{9s-iC#R4G+uM&BQjC zyW!bg#I8NHi;O_iG?kaX<>ApJhW$1_GJJ3@C=MR zlEoJh%FWG9@El=C6%`c`Nkb9B8ymSZf|8T*O(zU!|Fs0V{ho=4i7VgRFMB|x2Vtjy zAxlW;%IeMqX9W~zXh_J>@o{2e;-VFoP#Pg(KCiHFXI>u62Y?ARm6e#P#AO$|BPm>5 zTsb3gtsig_~p!^6X; zr>9R&tS-<4A{UkzCNlm39W!Bm;o zgkVHel#~dFiJ2G}-f?r!&&>rFEiEmHii!dT*pDN ze|=piLg9xyNQj3AKZZPe`#AS6v}O)$ZRG)F7X2}Ct!gn0zNNEMn3}qGe*P8^Ex?-3 zrzr;hkqIru#bFb#-yVz^R|6T?&31ZvDq3%1V#3YW5cGj6I4DS}$cES0fA7NW?c29l zp+tf{k4+U7F|lLf0jL}TTkJ;SwcWpd;WOHR$7^U9mz~Y3N40hQuYy8~k5?&~&B`LP z>t0;cvTxDMxe)CFs@B25!P?sT?!Z(xr<)WABSZv*F3xOWUyLmD22y-{1Q@8467BfC?WOG_iCkM*m3mn$?y z@ROr+F@tIba~Mt3#Mjs`=Vg2i52xOxiuc6L=QEr0hcY1^p2VTOK7Pybn1$R#9%5gtn>n1HKWO&-{Ne0T^n*UtWadTOd@ zJ4JO{n*axgYK)C`a%!> z|GwJ|ZaF#?zCSZiv2cML0K}La?CgL^LqtLnq(MkVrXVAekeH|=pK}rZCggpWAEk|@ zB?|2aNlEc9h7Mm>HZ--gz6^6Yopqil{z*x1%F>($ey3nnB-8J z=F^M=_H{#{dB`{RcXn&3-EClGqZqOPKXrK)!hjwl98 z`CL0tI#9^P$w|aaNtwsLrKQEl$cUKqv7Ku&%3p;s3CT7{6$PpVE$eM+0Qt}o4Eh+9 z{okjD+lxWZk=L-~*aBiy@q?z9t;C%^|ANbMu5t+wGdZ;aWMGm1zStg6Q&Th4pos79 z@2~peX-{8#W_vdagViZx$ zC1lNQzcn{E7Z%b@8he?tGqBC##xw)yoi&h@lvGs2tlS$NMWeB_uqfj?AQ{(jacRiR zBxT@nW8|R8cl06xkO)v&Is7-?is$W_G`&%oK@Pc~N08*2P6q)GvC`MWSv}Z)A28I~ z+FEvHcC12CAU@z6frPJdas7BXxwymxfK;TRfg^(y@^f+~Wo5DO>T05#pBx^3!4B^; zWz*Q(bZDl~|D9y(H@>$&`-cdAf3-TSqk`B;jTqD4mP&i8$X3~B3f0||*qm5Jca?J# z$Hr|KgC{B`218A4Zgy6+*a~>Ck*`_9J{XIO!_E>p;b`N!;q6t|o3XL8&ntbLya1ab zH#e8_(hCJJYycWLhv#B$5j$8hAmpGu@mAH)@HKDz{EK?hii=ptY-ni64Pl6q1Qr%H z;Q{8H*B|X?dy;|Rzxxgv4HL8SdTI=YgW>)ADX{0Krluw*Cs$WVKM-acz#vZiLm(P} z_7|<}j-6+3B1H7kF5CFZprJQW+~T@7!Ph-<)K&pSWR0iA@s7?rav8B(Z+(5;0D$xL z?7{*MAD@Sr*+*Ub$Dm&7jOe8$?P4CJ+kFDR4uy>Z;6vK1wS`X@HYw+9@_@Zv;M%LH zljHk*Z(jUdqEfVBLN{I&A|g)B$i{}PuPQH(9z-^tG}{zkNk%idjC6;yYj-F3^urx8 zM%esk4!wOl(a9f}d9NA%vDm$=trkzU&c%TFv!MwN0l+uYXHtPT^z;L>v$$x)s>HaK zPqn1tsmEF}?`m5f{GC^1B%Tf34&(Lb1$*|Y=6$7jl*Yh6tgEZ5s;UBLfe=WU1WiMX zEn4>H`Z{MTIXO8gDe2gj*9+tC-@j$$odWSSoU`XGY4dDVJ$Y3d*he-5c z5dx9O*lge3_T%1u#jpv9d~J6W5fQOAc9|3i=h0f#)FeU^KXyv3XfV(31v$T~{8i)2 zx2Ad-sZg=y(5Y$i`k%zf0H2<|==s^_aCRc_;6@Pb5S;)QWf#y5eSLj!Av-!cCbEU; z@!SW58E4Gj)A^mec^( z)QQniDU8^V;k6cLR91Bdw7Q=^zXCo>=4*S;1HQK{Hw}$>?scy#c#ZaZDhmHKmdOKV zmGsGW`s~vrHGUV!-6;A%_L+xX8~ORfRaF>`#Yu%nuowkaSh*-Op{;<)A{WgJWW!)| zx}++Sr>5=CDJsR<01+%%16Xfl?@9qP$yHkTuSAN8iQ&YE9Wo8Dlr5@2Mydk=lcI+_ zRPMMyn^G^o$`tQ?ytM!WTiB(1ZL!YYlxw+h)&hj7wb1h6^YRBg$sJC<{uH9nojE6r z&%l->CfwcIv#BHZK<~S<75VBvu>A@NF77ba?pl$CgJZV4I~Op$ap4SiFM(0dqU!X_ zOmOM@-JBMjV8ZlbUB8{L zAIV|o;^3zP6K+Lh>+Nb}wCE!|=KYHQ>5|}wl6*Vs`EDL!9v44Ef1&%KPzhp?5YzU<)kD{1tqDO5}CTydaEh1RIjFlZS z+R{5G(1p?+uJ+raCi3tZs<(=4Jo^UdNWT210?))Gj0q%o8BrQGN zH{NIxu)3Tnz^S%6);RH%{eNMYDo9DirD2+ab-@Tkl6ZkKBz$r$u1y$k3Hbw+{h>H& zN!QO_SrM|w!=0V{S+TbU+~7EnBemS;xX;pupKbpBG~S5b&*e4<5t@wgGQ;xL5R@BW z2^8qq^!z6Q+QNySUP&swK9uV0GIt9nmC@EF3jY8kVqRA(V4jAirul*KnVE01Y>a(; z^hpNh@9ua+a&}1*XIEwf6PRWg`G4|-E+NwY{a7SYD9I4@iqYh|7?}^zi3J z1lUPeBZ9}ms=Yhb5ghGb$EbC5Ra&1MJxEQCb*Y9+(9Wjm%3FfK^(OytQls+*B>|&7 zsWMVi80Z3VadE>qirWMn0bog7TsmjNAk{S(B&aEe_$J$kG)TDRn&_73C9-)r zkm6ceS{mS+wzf7<3jmH)R8-QPO90t({U|_ zIbOl~h17~(9!+6#P^Ao+FTH0&iC{$_BjZGL^@G^z(`ckvBCm@(1LyWcIq01)V(LlU;?6;SDh3p1Meu zzZ=JXD}@r%v#>GI)Hjh*2=q0})KM+~u3&@jFQ!VT+DR`x-vIw&L=V*#-4kj!Yiv(X zPZN{vU%!53IwZVWCjVz`B1FDUR%SKNC_Pd#C7IqxSjyDo6w9xgA{T)1zgw{~S6gjB zYzY7G;e*uJYOD1ZTifja7%d!tWdo6BZT%LhPE^T8K@amwfcmmvg7y>$^X#Ko>!TPr zkjJ=DhS-tEYNME)q?u8NO6X^u>e1pT%|GY|+9Pox{Pp(Be2s@!^7H2a1mex3h*H{I z7RUXk+)>*NaY{cvbV+^~1M9|W9|ZtdOKYoakqx%e1%OOoOh-hZcp-&0kW*HdmAf0F z?YL$1=dtn5FrRTma*yqWlW7Ss2C{RF(ZnbQnuhLj6*m>O-S|vkVZ7gP2Zs%mi!x62 zsV?9J0O`+pg2mj=H@dMKRKZ$A$wF#*yj}O*CT9fJK}!o?7y|=?lzdm{Z=|TGD2p#& z4v&sxY~aa+eVFf^q6^eqh^2BmdGdfGh&a?N5xqvq8#|ZrnT7F8_-W%H*u19GV$E&O zj9gWFmS;&c?-SwUeXvBeH2}mu`kKH#Cq&YtknbR$X?D535f#Y1T3I3Ka2VK!s$&%p zSn2A@$<2j^`Qd9qk)yMw)q*S5`L;_$>K;A`_+HA+Itc0wseX=x~5wWld)UyKJ zgYmDnJ*cr!`(%f#f-{`nNEE+7kUbUr7LSx>tItW!DDjKMGZ73rIvx-xAuB?kF?s|4+2DiOp%Ym--icz zIk|wqz*IRpq`D08yiayE`rWE>(_!#i<|#v62nQ-VJSq+!gKfY2BDb1{UNyP8*05>8J?M<=F(5i9I|2^YH|g?tTg5(1kO84*FleaaOl z;`<&c`0cHJQ0Bvc%8f6^KxHaPM^61q)KbN@ADVqmT}&{x#W zasODNp)nYul^Eic9)B-7AdCvuqQ{`v25<)j}naG!n}AKCJtAp9aad%=9WP957J__(p$i`kjz-thZ6lKS-@#YbcBy^xXSbh3S0 zPdy)!=UT5v*sy_>*K z1Ze0p|^kE>LOtGg-XB!os{Lkz)CYoqQ=E4eHLa@Yz1b#4m$%&((SXZ6=qf zu}%OOe%F;|v}6gfaaQC*sPb}hUDf>YK|BQvD1$aPXrJaEhz?Tdn_clFJ-G!1bIZ$- z1GMl&Zugh8nF_vtuirf5)({dB#wYvVL;*XJhyA~oA2Rsnw5|Ne_qygeD-mU#tmEZ0 z_u)HV@3vr=7d>_3ZQW8XDj(bYsKTR};}-V_PG?$n=dSavbWofAbQgtI3l9)`nut{B zdYa$Yc{kA`sXqJc54!Fto-dC*Tx|PGVwJHvTip>@7`mA@HF$myW|$W$%&#yH>e;Ds z^TIIb+KK454O9P=>$n-;zg62LhM_pPbvaO&DlrBdBD#3B2sh@b}d&?{zDEuIwM!K^MUxNY}GgKofM2_VS9Vy|rVw7IMVV&MM!i0KM7 z3Gc@~HoWg#eJ|joe%AWq_I-2*JxVCK59^V?I{$Uz%@or|HsSQaM=QHC#qA>PTo%e* z$eg#p8vo4XKUE?=XvEC)^j;0`Wof^6ouAC7YI)I$Bc1*FsT8K;kye#X=DW%h3Tam8BJf z=;`YADT1GiW~L z4=gl-^O9Xu^Dyzm;9dDw$nX`=Uy>^>LR>y+CA0dKgTMYR+8z9Uwx{!DF}RZ!5ndC^<01r)8(jslYm!aHZqDxD7hcKp;2glr?*?^Bui zGNGU0E@FAwnmK*T*6CE$)M)AGWQuIwg5kN+xjZg#OZp9BfI_Y3f=H?m! zvR7OVcB`?U;)LM_YVgJ@sUXQV&R}6k{0eH$%;C(;3<(C}mgBHjQRg-y1T$rV2SC9aKYG60-P|tTwq8=dloHF?slal<&faJO%w4!2n0FM^wgr3 zii(bs()WZkd^}Pde6m-Bc<2|PFtWFYKiFkfTi~i6tW~!Dsiiaw&#!y?Hql#s(6k1) zdQL}wp<08JL1nA|c)k*BmJN2Uz~-8TaEMtH{bdS-2$3rsLKHf%4HyXUnu9|_f#Z#d zfsqWfH!-oW&m#{V9fGqkwa_b?Nq)bmD73rn6a@*ePn>I;>S+3COfsu9nW^G6bu?0v z5@CIg4|h*D4orV<9BgeJ9q*Hq;-n{Ks(kviIK70jC{j|*B7P`lXE@W>_rY94LrcqD zPOdCEIy0eW*tYY<{&My(Ff@jQ$L*lYU;Nzd?tIE|->2yvQb8}a{qdGc>I%b_`W<_N2PPy3i&<@FfpMnvj0<6 zY=TTiYHv5x1)w2h5YzM)IBx z?}yvp!gl8yn?EE!vlw*zJ-EOb+#|=5qA*R;Q2jN}U02so-{=Zl=!SY%4o*%DwfQbO zOhs0_x&0It19;gGv73`cRHNT(-fmy0Zpi(R@iVS^>@nhVH40iNSn7TlanO zKK}JK?hs&e9|RIFm0w-`-fiIG{aBvp^n!?a)&3W>ySzT%R=Yq0^@c|NB|)18iZ{tHAhVhV#3DFl9N>+7-Rf!9g4 zx}YpO+)1|p0hVb*z^%l}&B4jZH?OI;7NyqQaU#EIa*#V@3?dJV*|_-!_QN~m`?wXQ z_JQtbO^~p03 z{w;cbi29m!{qFd;WNg+T6T=%u#t}i4Cdb?7xv%rN41=+RSNd`tm}GoTnthbaBtle# zhB)71TmMYV``SA@BEB}S)ndX_V;dc7zwK!v2K@!Y=F%^Pq=LNqBtpI-D&iSZwAcik@0i7#*u%dSgRKm=)AX*kpWi%>)ms z`nVg)6j)DNV>|V_adXZfL--pL1g;}( zV~=~2^D+L~9}MzwAT9++wQ;xpAVGtrY~cVR*GQGTygVvuYKS5&v_HP={TUqcHHNqs z9+pJaF={&G_>3$({0-M6>eMf^$vKRg>L^A=3!20duMWE(8t8++H#ebEh!4?$W*&g? zofjZWX{1ZUjfky$8B(b+Y^SIUwX^jZ$a-H7K_|dj7tiw_u~nv_^-OJilMvj7J^}<$ zx$VdR3<>PuZTt_mqe@O4ZJqAWd`u$PX;DIr=d;HGD6M-hSbBA8kl~*wW{pc4&{s~ej zb>-!!GWGDbER2jj)W-g1wToM7t(&W=s;WU@ph(IgcHRf3yX=OA+JCJc=B+kg~!8=CZrXfWuY(JF~i+m4M9z;`_J#r-ZR>$dYRU z9jlCj2(Yo_$(M%VeR$9~PWEw2yXQ6H*#hpv!%JpPPO5*KQlO*PFf_{Fc4)zKct2hB zd4GpU(1p&g2q%A9-A&Z4u-Plai7VKPr~4guKFg;J8o_IC4ngA9K;F$(GfCghSD#ng zVIDNT$6EG0R?{2?uT6pF_aUIYTP4oy?y@H>Odt&8`v7zU1=&+aK^sF*upLktOiWDB z&!{wsJL~H50~p%U;-R9FXxfr50+gQVZnn6!b$MgsEjHbxomfO< zn=f>4ui?HOKF`Ly$_OKrdlGDcGLC7~`ud2{%HtkB!w=N$pbWFOZw_R>N&@4x6Aa0T zF%#nw9X*K4uWdE$(6J5993348`uk}bsj$K-5ptCVwGs>W_L$~<)|hI{trq$n+&&`mi%@Tkmn_JvOZW9a9fyOHs+eI>}}T8}`LFomL8%J` zen5q#XcyJO43p*#W^dJNvXV{r=pQPh_wuLDp_Pnr8xClb{1>*U#{T`SL54VP>33PY zx;eJ%9Z#T;2;5tce@IFWgoPo!I|PaYcvhfYqh>?3us0Vp&)WqBee;*Rpf~Ea9idI_D+1vGo5~bbJ7c zQK&L5WZO4U{8#p=+XJ6X6-WSC8+aEJVXA?$;wSOao89qqd_*D2{^a0Os>y(9%#a~1 zdFWvEkAV-C9b!Ln;drXVLovQueo2(dPa44VAKC4K$hR!%jN_wG`~MWwq^={(=k{WZ ze1n(Sh^KGhdm0{o$tS+?8^I{N;8eHUtU(`~Kv`K?mauO{UY_xnFN;e{5xxV`|Cm@U zx%nc2kNyniCLjhBF?%OqG2La3umU1dESHt3E8DUUKt_|K;94l{BsI#4BCNPV#CbM8 z+~3`8=nR=&{_cE2?GwhrLFXjuPa(7KV7574W#-f z-Tt#ni`&HUw0GzFZlD-W>3smJFP=WKE~}gF+Ts4oiHyv|3n;W+eNx$oy7SDmjcPRO6H06MLadL1-75O0awFiRg zb2>wSy}dmxEiG1cG|RKR@O7!nIf|@4Kim@Jn_O)Z8iI*R42siRW@a?FmYSLxu-D-D zsHk8J@C1Z}^krqga{BA{?VWX*hxE^@y$Hda=%z|JaSrSwlW zKXu8TF8dAUg5o|DY#WV%~bMQwg!Jn@YXWf6sN{53cD`Q?e3B|OBmE6GnT zi^f!)m=GbIC^RXl!GXt1YM|%glu(jmLmcdUvPu+0QMM9&L87%5Hat5(lbs zUyn>b{&Izw@SZzesE=mF4h{}x2-suGG(kQ7J&yH_91bcj9>t-Tz=*~?aC5zVdce#8 zYoU88T)|vQ1IlFA_LmN9u-Ib2=N3@a0v=tyig2>OKlBCM94J}6HaITMYfXOd%53a2 zhpJ^X^c#8JS)AepJrN1K$AOZWl$weRb9-n)v*89B!vNpAVh^2wMc&%j=n5=Bfzpza z63~&NXDn1M*$4pc!_P6ko=v;1b5KVBT>-#8gr62VctC!If`TF>B$QJ%`Hp~$4Bs{Z z*8(by1XR(0s zrz9Mh*DNqz(-5Z3VPUgQMg&2QVbzAQc{^4v)GYpSEX^n%z4R^v5A9X@yF@8pXb_DYI_7`dkK5q zcZ1!qh_~cCcb7?j&(8;kwMF7iY#;cbD&c1!+u1C>N-zhn$;gR`zgk;+yS7T@{DE~8 zb0nh7+Z0!FQ1CG&b{$~bE7Y1@r;GPZ{rGG`fkk7uAS-oCNC(ff$hL?ay{>Y#)ARmlj=g9?gv@I)*li(1(|x@Ua%Dxr3-cM^ zBg`oAaNFbECNK3WPDO)cd!|CmQWp1Y^W$Cj?pKY8EMC;lWW9X3w6gq8=abJ*h-893 z(~}DfdWeO^gir=II&Y+LQ6QiP!{@95U1aa#vR0OjBFZtJ#R%P|HNg6_BvvlV_)oO* zPd+I^waib!W1?Dro(VZSQtSP-oa67Kza(Yup60K;VswvK6ci}gl(mau$~1ETwiD%gwg@xC*CA7rd$8K z#PxwC{Z}))SHo>iS~uUAclyx&mPba0hyq{Y_$G5JLHUsH9RY=PPWo2^OzspQ3P3&j zaCR)Gsp-2+0fmno=V z(lgV2aads_1N!w|(m%B{pM?uQxx9yPa49n2f{ZUfe1B&Ln4M_o=-vF6_VPqLjX4jX z(f}R`6;o>+D1m}rcvlsuT7zyE;AUO9jhCyE6v-|{A``5m1EySzX$69{qf-~<4T=Xy zgVgqJYt6iA{{(UBAzWp4UK3lqZ2B07WUPUv?C#fn2SGu1kcOgbMuGl@z7L~ z=J`+9?QUHKQ&WE%jbapB6;auqOgmzMHmJ*2`REayB*6@tZN@23_+kdZrAI_@I#V9B zO{HW#eLk7xGJfph8HXruds>)l5kLbb<3zf zD=Cqn=JrBXxY%9tNo5t7zJ{p*ZyT;AFnUdwKCykJyvx^UrPWyK|8{`52eSK_^C2fi z7T>e9BLWQ+7<2=Ez&N`t2kVNWJKLm;g5#aQyA3?>z82tnKp891a~MZ1 zM#G@{EDFA}IZAz*j7RkXR6>9N)fv!Fz6Y{!Sm@=VS6m;C{G2o6_NXv4r5js5ffDHTz z=o}#Qy^TD>U?am|!w(UDzTiiylX6){xlZ>d*~;rvyl6fj&*JsG8827)$TzTh3a(Y) zw`|nwrj2cn@sazc^i^r<%-Y6h-d5&GW9k8q9En=d7so3XQKNV#(svCXa{#V`$>naZ ztp)FUGBeBZIa7BLVzK%nDcrw;RTj+w@7lG5z6r#+K04c+9gf*3<4fs7Nk1^YI)_2} z^(h#{q|iZ2JC`@Z3fT4;TpaJq$>Vl$W05(yHh4hWoR^mubf!SDQB)L~qouyyd4`k- zZVBDQ$_h;(v$@$F)NGzfMjvlz8~LL`TL;*k>_+&7(5DvDvh1LPf2u zLi}g$T?1~XI#QB&fEv{<~HG)qHkteChk}9go4ei_reQ z%k=tb+s{>d$&o9@2w}bUzh`!l+|(j#E)ph+>o+3esWnBuEzei(YLM+4{FY z0sMgXrA0EMqoU;GN*eMI`^=iw~dMe}vzP5r}OA>wG6HAV7v2_arm6Ji}gJQNhnz**(ah zt4Lp!lLI~L{yFiV`hWE@lz*0f@aAUxjQO~~2zdfQrI+b_NGJJ3_RIa!L`pUjscRkI zz9rM~F3t9-)A;mVO~PZ6fe)|mS%!s}#S{z!)f@y-o7q*M^VGASJT?Mfk=NyNXXt8w z!r);y1$826eBpbROD+HADO{|1xjaiTgFar8yFN$r!22nhnUU7WVF9=E4S!g~?4Rui zho?yuun1ov95Crf1e=A5?jY1r(tp8C+wpsqA&h=hzB9u@}Pd%6EM6DHzC(khXY%B*0*^q#Y5ioI^Ki{-*U z?RTodpAO;YgO{7M)A#yu z31jdH(WhO2g_9E}B@Wcc2FO4%N^Nc2o z6tY^*XM(dU4PGvqF9}tia9sA-xC%9PvUFR0o{Fx7JK6)4tXd@3oJ>01`+6O8IjZZT zqE6l{X8Sx|9NBj2D|^v>E%Y7j7W*~2k?=Uf)cK5ZSW~26v^p_j^8YwGtFWxPEea#u z-Q6A1-5mnb4N8Y}H_|O2ol1#xH%NDhba!|6S^v54#)k{|zO~n$bIdW`DUqVM)cF=T zdV!TP%L8-~aN^a(i$zMsf^$Lh`$d6lj~bTrEy7X9K4=Ar!{ZUMyT+( z@3J(WPxrUZ;)h6t?hgNv&1&aBzdbPC#QArb)~cbT{{Hj5Vf@{2cef9*E0eB%-r6?C z3Re>shs4dza75Igu|e8E&4xJP|3Bv7KC`bL6_~Wg$0TqK8afzJh3@qMDk^p(Hxck7bHr5(eGZ z93uv?(%ZgVrLR5ci&(xn3qNGeR5QWCO_ZJqmt3z@7X#Nu%U;N3*A?MFWoB;cf&Y%v zK3i0EfRIY_j~q9}=yhy^aTEWI=u00MJuYN#fAQbqq5=XtM@a-Ex)4Ntkv~xny{5d^ zHvfaHfJ40PZ0Cj;9TiU|`tWim8dwbGJL@PNG;A*gE)(-4O;mZzol5(FRPl zV^DL{*QiP_gJYgOWOICd?l%JBgyxj3aO*f1S)$M9Jic!$XlpNb zPiy{9R6MNyW_?|f5Mp_ve&rp>e_*btyrZ=tNwYv&D*KTF6laJ?NQLUWIGB;w=Nmns zfCv&ZA<;C$(d6TkwVYKy-XPiSD{pg~i?x>vLBIkXWR9)Lj;Tg;NZf6UTU@7=3%mtDt7&akGB&-c2!WJ{D6?v>Tsg$ zRJmkdj`d8g{FwUREjBM2*L55w&^2=$jjr{tdH9`s-9Kr=*9EvfB>SAoR^p!~Yd)Or zn||AMh!=XiJr8z3U3zoE}NMSuTLu;;L3l=0rKO!ZnU*C zj8L-G5MiIyc^29xAE^bY^ngIajwj{s#*7hvDK1HHrBtfV+v(dF0-DVdXxKXGwFpV2 zoF*UNe6LQ1l4^JwZiQEbnb1hl$R{&xEiySw<|ZZ_@}tXvV|aIad;LO%Z0%VbR265d z?Moo?!OKghC)Y39#hI*P1MZssVh?l{ROB#?@x;?+&SYAm)F=%FTY zMQE=tRh|_pnFxYjU@99vaDR3IxngiCbPUyr<}ql2O=x;MJCSs$ zA0Q9fL9XGCz-^>q+fSFjz#{v9?_$(;&20`wl*z7oKs1<_gJ-GY3#kTg?CVa&PQn%9 z8d>|@2?hLJr*Moks%S6tW8X$}3sAaAJMI0CGrA5$mEOLgsd6ZwL{@&P;W;l&`X%2g z)GLAguw4p`T~pPhYOs8Kn5bqrGq&Nlxb*1f_g7tQxTyHx>TH#ko@Mgq3mz`s-h;2d zq+esWkF$k0P5c0uR~1le>ge{CfoTZM%H+$g&WGwiAUo0I1%DqJ>Y6z?Bb%K3`*8Xs z{Pr->2oe(~jcfjQYj6JKVx-K^ct{SLdFwyPqkN-FF~8wY0v#Lr7hk>aqRA4~i;sg~|raoH=|I93vex1#i~4to`myl z1CESe)%P~Z^zdGbT@KiKavk(vZwD_XEKlF)y_W+{D?Z^8rf5uYzZVG}i$jRrU0pRg zkXQ@1^4@LM6*2x};;dsgK6*R~$>UP%+Tm79PuL}-t*TmCaZ1Iuhl%}-Q`&qi8kW1W z*kfB5vK`so#04^_s;)^>g=zf;$(VCX4mX;K$6p>~AVJzqW78+d?}ttkAqPG)Zl`7Sd|MmTi|t2!?SQ9Kzscpdr@79zm$SFPwy#R28=3V`X8k>#uQv|!cWI-9V$`r0PjZ*npYP z6gocM)XS?M#t1y~U+#k}^Kf^Mdw5$AM0=x+d;auBcD9qa{K6WFi3wluO_Aotd8jZv zWb%UpEOJ9N`#k=#PY;OhCHXiwC-^jOQ|f9}d!O~St*vlN48puC!ry&BevFZAt*vF^ zdOEg>QcC!)^V(<0N~a=qcdJ&PLBnYvlZWszj4^Qliq&#>q z5MgdV%7^Op(Mf3gTmO*itnR=uBLB6tbLB8(lkn<+K(n#{9<-mHGN=KA;#+fDcI z@F?KvC0^Ue@1+mV*#Dp~piQcY?MGi4!5YFjZo@lVqp(TH0FE5WM4i2kd z(fFSC*U0>z&(K7@1CHP_8{4k>!+4GTPTVFx&)=W1bwDrm#A&@735WlNWq01aX8gSO z_HvZ3BGSI;SKr>=(~ze%X~o5p*>08D4F$n!pJ@iCN&tE|%$dud$yX94wVamYgJv8{ zyjtHu`v(XBOBzh;2xfcZ*@cCLdf7bX4IY)oM)sy;+LUwqC<4LarPDnm>hyExg^chL z>3S7^tzBW;yM9d{!El&69i7|fmen1ShRWDZMDlWR9j~;eD$$BA40qP~+3nSrg+}>K zI5H;wz0l-g+v6Ka6ea& zkFQXzM9c`!`$QsQS&24Zp`4VUXl}+eZo0^_`DanlE0eaG3Ng~ekc~<0gFyCd3<^S< z=|f`*u2roUs+4ZwUt|;|nEPE~0rZS-sys60<5ke*newdNS9MlO>8>A-fTN3Ag z34VAAl*jepZd>q@@Z)cIG1NDkzvr%B{1lK^sj@q{{+&>8%_ogZ2o{7CMb6_3K!m6x zu(Cpxp12QRmm?;Ii@Tm!*d3A@AN#?9?fE4X&*=GS&A7WKnr+!XR0=-eX=zQ|MZ9dT z%<%5iM?c{CA@z#20^{RUy6!@u`A)st4NB~Xb2Y7x>Diec!mO_~d&!2acm9_L4Q@wk zhzFR5ABOZ%uwcf1Reh-dr_6;SHo$+9JLj{1DP8#qk~RE z<#y#m^pRxM++Zw8zmDf52r&024V5cAT zY}8O>VXq?9Ek>!azFt#9gE`<6#srjH-WgOtKmcp1Z8l^F(&6Y9!bbJW{Kk8SiSHdo zzxB$wGOktefT(Qgfb!Bs-?NtTS3o)0BOZMx~o6Hsce0aPV#-++?lHT{2V=r|K)so zR2UyvY1`Y|8%mJY9r;`ZvLk}0Dl|&3h*w#{BfyhNO-G02RIOt-p;83>Z77zA*Zp)E zA)bsrFjxGDRaJ!+OrH}}q6Ow9)~7@T^eigey75WtKi-G%e;>yhJlo|o>pk$A(or)i{(H>xc=Xgt*O2~638HI$%#BuN0tIZ}@*S@VU zm&nH{!+DnvC`5<~eUmG=y>&0m!hr8Jg|rR9g@*+u!pEmnG%*k)G4cl@+Qq|eXs8bszYWG?8~l_Gu>=4jJUrZcySv8mv1rJs zH&b(47!N0XjpfcSC2y>gY0X4cyEIEv^U^)Z6hZS)=oOMD0@r{|=$mX1G)Y zhA@_g1we&yz&)+&=7u-tdgnrEW4HxDFZbcUSr&M^?t%f+E%MFCtUYUKdj?FSr&8m zQRCI=;C=(t2M~epj$~fjjPq;|hkPN&kC6$IlcB695ykYs=pIu>{7^m{_*rj(6ZT`g zm^73oa&T8~r|>KQqDwj6}@HYdIJN*YpAKVE)Rf25{OvVFgg z_vMWi(S(CQCX8;y5NpN2L5<#&uzdV)3<_N6=z|2vDLzH!e;l{0^l(Q$a6QM*>)Cl_ zjlk@A7#}^~`mEsownIj$HkfyH9)+XH1>cJW+i|$XrH2Mz=Jx}rHl?MYb{WB3SAqD8 zXa8*Squ57;+1EB5A~0g&eLBAKtav2;d{cs^C`-MWPM5QW5`FvJ8=v2ym;(mQ&skY+Y8q=3cAf~SKq<1L)I)^eL# zlud_HUuLHb_{x&C9${tIMV~as+eLx)Ay|`6)79`psUUjV9=7_Te0GrS-^Im4kjC2D zf_igVmk%2<>yBI^g}o>XhF)ZK9O(^oh|?&duiO`M=URU7^|f81|HtfE%eso)YQnaO zlN$wfl*!KBhk(W+w2kg890u*wj><^WzM7YtVFm}c4Gp)gZq1xO^|yM1)u%zZ_wU|r z-mHy1fzTQDo+roe-!(3t5f~)8iHRD*+(Rv-hQ3w-s#2a5rWsJ)LTk!x+il*S1RpFC z@_ciY2Gk+~BknrVsz_d)b`SBHKC|<^}|*xC?K$FZR_3@`;GxszjENGO|+P#^I_HTv&?s0Nb%B3nG5C#i(lF4l)zb+8Cfe`9GrT`me zM(}J2+%MJF`!wE|YcCz`Vtx3Sm=edR=B2-WRn;CEVTeut7v>=o@!g;N0dRp`UE-wi zbOS|nnt>=-Uq*4IX&2Mp9XBclcO@#fF(wa#PAnJ>?X-GxLi7#9KaS4M<`(AuzRA4{ za&Iw8(Ogq8N03c>o=rG#9@bIb5_8s6`RrxsOF&cole4~vuF|^dcQkCjk~xA*k!3#O zkUauUn%EZEED*JTMFxng&d!%W=LPQ7)b#YB9cQw=;xNw|@}LI1d#!(39)54ecRk?} ztu3-#g!NM(? zXclA4OTXI=57W1LJPGEi;1^fN%22J=6WTI2xb`>E%@^V8gg5Bsyl4Eqzrr$bcZ|A! zlz&v`orH08Bjed{lA|bQXUBza+5Fj4<9@w;TDyJvY_1!;%Z4G>6q=@kBO4NjO4+Y( zZZ98vmug!AQqB}1>`^4_GNUB9xX>?v_tVsl0h!8QWK!Wg@%5Rr*VxB)T zoI*mx*x1k=t)>HU06;_52QV`rBmzJxA#CDe%M=k2MwNSxd?xdh2@zINfJ=q~4?hnS zi`?8b5M38C1oVXOfH)w~W4e?iAc!>IRxU#YUneQHIux-v_>)I%NoQ&B8r{@Akt%0b zUKf%;kv}2JDY(0}-L?nL> zOStw#z_`qt8^79~LYn327A8lCfCT@W&k)su^B%ZeR1ohGgC7H9czC(Xe@^~gUY=e- z>9$e9sl!&h>8c7+F4or6TqxQETt6-@uK6NXB%PYV%wuq(z_c;)Uj3$3bg)q3M<7;Z zNr}9&GAzS?26)SI5EVoJ?D>;?Q3`(jf{KmB~KysOCjd%2pGxB=1+>5Zog4&aiGPfu5;r>}Y^urEBt zAivPLgow$8;HZ}<;p(Gf8P+&78hctz{lgv*(?E&#v)r9NcmEuzp=Ea-(J;&3lrTWo&n}Or_e02s7QnBg!0?Bb-PFGB`fmr zyV=c?9Zy0#@}v^ELJ~OQui4}Mch^dC=FV#Jt$RBfwJR)}pchPlq*b6z>Vi4ahKh|v z0n;Tk(YMq8Ub?i@^$<9C^jkkie8C54ZfUs$K5@C^!e}`I1LAG5Ub3dlfvqk(p5V*h z8jLWg-*KTI2NS5shfRh+{0P9i$fITBNmuG^@qY2pGqeH46rQNAu`xJ+DOus?(lwTY za|3LSCNR>`VoL6>3Yhi|4miKN#(bJMxMh4UO8ZI1Fx{IuC9tCFHWy11USme82e6yNVbYRglW@l zRaMWztzUn3E8@GNib$NYa`y%lab_sNmS|{c{dGT9O)GWr^%;QgFd*Y0Wfy5+W~f$R zHBQI1DxZBXj8Y>Qjbh^~cTNHLkM1X(_jQ7=-&M6A*^}hAnze%5Nvd$-$(b6+^pSF#$N>zRG)IpnSq4A?h1zM ziZ3lK1vVR0FeUSQatH2%Zy)cas4<5&dcrqM|5|Z*+v!38o@OA>^?=U=(BBMVi#LMa zi5TCjpiU?Q#w2KH|FqSz(+th5d~qeMiiCsFE-*nTz_Js6qi!v- z6bk$2&sVT+=W30&3g`V1h-n}s%@$3Kki-J2#4L3I9<_VPf$ICZFr%(w^{S~)ktNTb(_AlEX%e5p&8U2vJ z80Vr6a_yvKXlPc;87boLS0gMHA+eH{))otks)HW8Xn#>~c$t_uN>#@g1FY7?l$6JcEXrWjVu2IFQNUCDRH3s7@dr0??f zTeR15FP^zI|94Y7bG<@;7R6|DGh*YYSOziE;amH-e8bTg^1A}YFOLN7P>X#YNdLxe04YOt!h=Cf(_ zubOJ(Lu{~KX?g*|e|bt3PF3^RxHwEIq#+g-6-g`GFNLnIG1FFzf?7izbl`LqJ%yX2 zQ>5j(F)>)J2|vmA)Ypu%!a0iqg6Q2-Ddwcex6a2$?zmlpkTayP?C5J4N?-Qv&5+|f*}fb9&HP5hbn)jy8}Dt7 z<^{Vyk2Gs9ts7BD*T-di+=T+7js83Od1ts*&%P*xTNrg``Ab{kOQN;7|E3Ze?Z$X0uS0zARu@Pdr3o zW2RJ_CEzOx&_;CH1K^?+z8ZWD!EmbTDHKqvR9hFAo1M++;dk1UQd3JT?1`xt>&XRM zAoTJ!9s}v|HYX3Rf&PDf0H6<^@bW4u;br*9{ZxntH)>rz| zh};@=I6G5D@*s?JvRKikVzMs*F0^x@SkZQrnL6<&Iyc(#hC5Hssr3ECifWox$H07W zxVxw4Kuiq^5)#zNIrlMh5weJwR)V_9c^8fBJz1O+1lM74an)F|RO(5k=4zn?`Y&i? zUmEisb)15D(noEQWePPa|J1(LK*kPS)SHf4lxwX?{{W3F4xYa9vwwJ_DmogHM>fZQ zR;Bv987m!5&KEyF!|d$eop`dLd_D>(;v7-j{znR;PO*4G7zOVUtRIC=vD9*2n4MaTyqVi3X*!6_GMb9nLaY>^ zV#DAfr;Am9?9sgyav~F93IE^ah!#ZqXT>S557X#Esj}oy&xZ3W4xZIuar>9D(r6p+ zUPHdJphkS;oVEI_OOD}7L)rfx#ZuHz2@%d7Vp7sW*jUh876huO|M{iOh)y5@P`iW# zq#(hiu=B#A>ZStjV>d|?z|`B{r^kT92$`nbR80m{R4k`#WsiqTbIRA}_kx1|C1SzB z;q&wKhcLCbx2H`Sa%51>5%Ss|#F!66-ya&1_xzQoRuUbDn$=?|S5{g&0`O?W;LHyD zgVtC`swkL3gNh`WmS z5M`N>EM}?|>t^*ny-JMv%h!pm>dRpGEk+JbLM@ZRL>S1}a=c(TERkw>r=t5Xmud1z z5q_MVY(+y{$p2gvhcH57lo+)OWln@{d7Yiba50V3w61Py8T!TfoOp}T*c(;V zX3<}Uub+2p6ixAmGYljVz{^&Sy2cOfqAU=4QTZ6t3KBOOec$UxuiL_VuJ<@NMh0rj zjJT#R0;K6;sg^}^EXG2uI^>giZ>kii*B_)paJTokbqSl#{sPijX}LR!GG=qrsmz#FjjTp;j&)* zvrK5V!p$hv1O_cYDFLZ2&=TTC*IQ9ttHTQsf|(7!pdgUbt*^rxrmQRfElPI5D6^Y) z@v~UZv=@G%0#zT2B=Y6^S}(_z2Fstn-1tgrMtEyCG^8#PzCBm@a-`K^WX9U~M;)2J zLIv~6N=t)e())C!r91Qqum$M^+GP+9%j&anxy;g(h<6vw!E(Bl!a&43{U7pVWcu!= zJJ5Q}Y8v0&X6A0o2G7h@SrUh!>{-btDXT0|WzlgydIC|!-6CJEpvvGKIBC$E4A9u& zI#0p(Sa$t2tas<*8J^JvtykWxeyT9bKj^MI%vcro2&Rcs#w zN&LtX4G>{qKo~~$kZ;x#eBU=_zkfGj&*#y8Z6&u6^r!9RUGvk6%leI&>S}cr8zOHuTT;~;Q0c3}iKzH+gTcfc5D z!f;22!axegPnm%?lWD#yZnP*D-^>g@De*TCs%i;P;NGHfiWI1n&Xh~xp#tl#J{%~~ z>Js3F#$F6q9b7}YJ|Q9Za)hLyoayJO|92}z$V%z!`zZRbVRSn0C^$K>W9(gc&R*3} zqvlkVc`ZtsATz3Dohe1JBl&5o|t&Z4j(aSd+ECH2iULARl@GDCAn3 znwHzVt_VooqW){5t{_44$jiZjg7%|HjLm3-nP&2wJLC+<7vTmLs?c=w!C<*KHwy`w z89h?#bI2vJ&?TY^2hoTe$$4Lg`a4#m{++HvmBE6BioKC9I1v-dbEO? zTkY#r@~REDo!#8sw`l(VohMw(jjse83(T{}-OR#F)H27b1xfa1I^y=X0{CoeYb2i( z^A=JyDnp}wW%<@LpH$YEg+6UM?mD^?_jva`BTzs7c6Ggj##Sx>mkK}@gHuu}4H}qQ zj~>%x;7Rt~S1ZxTpW1hf;Kq*ZAV`UT9f**$lZ0^4y@kB@eFa1*Ichb=b!BzOu66Ul7nI(7_pp41nN{Ro6{jRwq`G(o9yT{!B0nGu#%D3toaQ_&R%*p1vn% z>5js9Zv&OuRNYaubh-*SzI1LTbF@hpBF~t)AQzxTq0PgCoaGU<_PR9V=krxU(wUAn z4fc2{9lzE}J4|ZB1P|T3UWNr-o{>@T_V{w`p;*OT~i|TQ(a1~ z^bQ4>-B#>3N>89+VKEK)z!ni-Utck@RO@R0#Xvz~19tk(q$Df@Bi2+TOK7@ZT^|+~ zW9s=bmOg(FvonoVqGe)XY4^PN45$kGmw*01m?3@j2XO!HZiyqsaL-$C(AehL_+W!I zC>?Bw9Ys>NU1>Xt8uTkvS+k}rK+Q=k6Gq@K!cP|`T~s`)%sGG0{PDkrQ}L9S;Y+>^ zcLT@o8>0~j{jXl2`!RHscNM_W*A!3B#`(H4(5FEvib+CXq?Egv5#wPos*}U->vnNq zZFGEtslCUiFz`#O7z43yEY|Pa8<6jJaIl;IHVEh_=Z(3jl6)Mnc=mI-JwuRJsWf#7 zA>`{Se2S$2Yzs+=owChzu9U>NLbKE^6P>UU0LapT_V)Cg$AmKn2wz=%2N0#Pw${am zqxuiFk3TU!4m(vmWd*QBdlR|fIt$DX)Y5YRw2x}59L40WmaJK)qpO-YHaC*8%4GWm z8i)-ZUOvA4qobqsb;pChg}{{QIS-b0Z8-8tF>!3?w}1JI`?25K0UvpmWwI(=hq6AW(BZpKyM}UN~wkvg&E4SI0oO)@Q{*xPH)5Y)L#$Pg(Fy#{AF?_F4Fwf4&^G zt<}DVo4k-8V@pK^8tGg$?8B1!m{9OpB!*26OxxV>d`!=oX z1lqd%H{`*W1S;U4Y9-8oQiW=G{el1{?6db&19JU}#J%5@ zXxUdW^L?&@Wy4B*Yq zUbo~#3wqF2G&||!&#ux;>L78scbxxvFKrnfx=XfTFKhy~v-FTD6rzR)k5D|gV zAc&X4uOg=q6B4$7lbV)>?|3SGRvuvqSj=d!voVNKC36JOzPi!`eFv`B(v1bTv!Ch} zMlvuweTp=--3U-#7Mn=gcTmsGChE8mV^q+eT`{=*Pyah5R@; zsQElTCz?|W$e22$i`BHYmT0%Bjb}wM58C$s(=9e`vnB|z=OO%{5NhH04TDh6aZ|_a zGM124)~ug!B-S&)5)0quBm2XB^*O{_7CYmWdp<`s@!d)Xux-R@(_lL8xLE$o&i<@F z6b#%UUF(le7TVv{V_;d5x}xK#Nfybli3|n!hq|bV1lJyrPlwUL*q>CGt71c+Z1t`~ zD9Cpv#!XU)!HR1Ubgh6kBqJ*e0=B@eS^*y3^*=!;OO3%|ocJ+-wFWo|v`Gj~sNs_M zJX05@I4DI@W$S?o-YtS>JS%5cnbV80zNpjTGQ*?)AZdzU_l8rcLMxyB z6BFd(ZWVekhLrBYk{|~5cb82aDLoq&4=sB1VOm-~PR_OpRz|sIDMrruMrb^zj`h<* z8WKyA%GQ?Ny&WzuuOd3??>&;;KcF@oJGclHOEd^RL_fQS{Wsylx>_zT)6#_XGPgjI z7Fq%qr=uhKKQDk4>U@0!T_>nyn5NW2(XBqG;GhSB#1)`sXA63e@;if_2|phCa+$t~ ziDFABvRF*fCTZnwIRY}|HdVMpLx;y>_?d7%l{{4sa;Skh7dq9$>6g}!1k(kASnq69 zuDY_iu?w^9itG;A_>jWl8TO(kMw+5=Pm87NOEfHTaWoH`=}k6o`a`IPhOhqYo+Symf4zU%nOtv+MBv$I zZOCMWvv0GgRV%T#Irb33xOuSL8yD|T0#Pk<$SJ1EZee|xzDY(lWy z1fYHaSvFglm!BWu0wgH~14g8Up#J|G&svEXWBRn0)w`XGaC@+y_isHuu7Y<*)^W405HRSt~Oi(%Q9UdGw0ZNCX<9wsTS|}3Uy2_?~Q=1r3tPJ{&1uNOfk|JPd$iB$^^Z6(|mxZ%$(075ZBIT+O%4T7bI5G^{BzZ>7-x@ZK#!Gjn zWcOOUT-UN8cz3r-!)8&h9S9M*A~-&rB^hIz=}H2%3r)XDOvlD)gZ0V}Pl;O(lfgQa zo1c)E$VqfbjB6r;A(Qucm~x7WfH68wV2L^avC3*+q= zC=yPn zTdeS|OcO(bso_u?FnPP*YF<teezsq!luz8YQ0ExK|xbXgxGM93VZk^1aI+#wy!<$DOY%7y626zOD_)pdqAxVRRUmRJ`6u>jTy?QDSsHMmZ7agrpK34Mi|{{lP;eKDv|%|EjdI{q<7x^6rl=F;c2~>*CLE z7ESi$VDfAo?VVGQk?g1Q3>8XUfW1(69e=HmV3>h&N`qG4sUq(XilI z$6b8D^I9}8-yR~n+aa)ZKBG~#U1MSx^*45PBLxItXb`S?Uya2A0sZjsG)!TX=Z?k` z`HRnY1Vi(;O_bOOOJtB|L-l({Cn`!-kUnXAL?-m77_1`=|Bp53p{|Z)vo1D&4*t^T z4}WLbRKC3a-Ra5o6|+!ohVDp_dnF zhIGEqgg&S@B z*9JZNW118!S-m}R(W$jK5jA?<6McIKKz}`Zdkuw&FxfDE>xE45r+YRP33Tjyz7%z8 z49k8yJ96k?VWhR@B3v?M4nxSiU2Wje7uRTM7L|l~YwowrKd@Vq6mm(cwl4YZOB})M z3l`0_f+;^()(zNDz~~7&QXn3OW>Vx>YPE|68yj7yJZy(YuaPJ}9!Xq?pIxd4Y2y7P zV(sH5zF-`sKS&UE4+jeyTDCJRYQKi>DYH*Y-2oJ+B3{hzcjxP^;QUof{n2ctKH4V|+}SU$Hv@NQXG`9rpN>8ewKTj=k z+ftNu_1IQ+yn^rU;r=nGY@XEmop)Pf-IpmcVN9Igv*2C{0wjya=Z7ocV;A&$yap%; zz@j0$ae(;c&oY}>n}KwZEe?|}Uw@r-o9_GGfv5u8ZLNBg1!CN2YKM zqzj!+OT|YlW%_TAwv`SGm-f!iQF`SK!joU~rjJNF#iFry*S^9~6guZy4@Kc!lu;%P z(PJwBA3v~I1GE&OR7BMV0lJkEI|I};XGh_a@JKf0kce(DJI?F4z}{lc+@7m zYuAsKY0Ep(v&uRCL0XJlFYQ}Ov~$n%4i50WS0FQo{j2uNp#o4K>-1uU-|e5Aygg>V zmFc&hj-fa}I)hXFdUU)zX21DgjeERNbmPLW1w4)4t=TWP z;4C4nIgwPkzcjmy=jWSdz`*I@y9%6a;qBi#t)RX7PAtnhFMtp2_bzx1isjEdNhP^A zn`I%FT`3$~j2tr*Pf9zZtja!CJ3jG&>JHd6Y`#*hM~^Ypd>hVc{!z_F*=CiOD`IJc z5j30nO|Q~OUi-J!?xPi#z$3OSb)t!=!x)RY7hZ*SNWCo;C z#@~C%1e5;7%SJKvGNBs;qjZMk!|&UNl5i52R~hCUp+jamj6FLr8T1s(3X{WT+bUyZkId#mG~7&-X2B!ozK5 zUjBTp_BdI+f}IV3nzmeyyl~$^aj^Hb9PwESSnxKPn1yO}=saCLtU~fxxtrUa5TVHO zd3ogJaqyyJ6%l7nncwYzUZ0HL0F+GlknA$8AgvPaE%jwwL~j1#1*QVWyP>qZPABi1 z6H#Nms}s@Lx2D%%D&rR%Z;>Y24$lxN3pE)S*EoZsf9Yhga#*Y-8diivkHW%g!0z#K znUvbIN}lS1aBxu1$=q@A8sNp19%B%f*O>~$3Ak`tb@fsz#Ob zl}oXyy1GqM?dPk{FraC@L-J(;&Ye(+A16dih!G`fg*Bq|*u$lBz71_xtPOg1Cq~3q zkT0jLXJ?OFYjmi3;b?JNm9n=Eub+vgKDXPiv^cyy8CvV6W9oc+`zc!wF_;I=ZlNp(`uJC?}@kH)hn;4 z$W>Ro{~BMxvvb3BWi$$^w*ImyeJIKnamRB9mlFZ}7S~p6FJp31y?O5l=gJ6Y{r7*X zvxpb}va(>EmrSgzI!YBK+})|dsVH~ZX)=7i)b>!+aAkgg3@Uiv**C_`z}^uvlr?jB zjal;9`Ku(O{Z-p{=P|+jWFh26?AnEhN~dVItfu-Vu`P2h0)AqEBua{Qk>w0(`4SoQ}H?*Qdc~SEr)29esz{-^CWp=!yRfob(v%TC@DSJ zaA{VAkkrWY)XrB+CP|f?CllEs34?8P(+kvBK0jwFMs-5X1AikBYnEut69n3dbcBda zcx?PJ=CzxzY7)b$1pkOC!BzBh_NHz0vHJa0Au#c5ok6wJvmPYA8EnzaFnw7A-{AyS zsaoDq##84r=W~+iXVGWwIKH*TBnW-t-6y?n^u`CP~!Uq3gurM$Sy9%uUoz=G1{l&TMq3goLdWI zGhr*t5Tb;S&^025!tS)T(&B8_Us@I_mKTS%ayB*Tft@VZlN{4WnS^l71_{c%YeLAp z{DZz(Jn9-^x0iCaS3?D={r_xOR?Q(fcR`iPYZ`s&qPJ{-DH(j3M@<|jR@!G#L(TDB z!3@d|Ya#uHrjNx@Jw$(>ckh_0Y?Z5JPx9an@J7!j* zVUziUX~mTx9pL5j44R{HYs}Ey5_)7H9jcWn1l=MsJh=Ld&XPek`tDrN{<@mYw{0Ny z{W86(&2#GSHl4$LM!I@Fl|E){__!YaN5wz>)lPO9&nK~yKkX`4(koTcwitgo+#0=4 zb5@Ag@~`!cp&^%XZkZzK5DZK?7omR1A40kHFYnjmZa)cRf9YRx{iIkmzuO;__vbj; zUcOJ-3@pCt&a^<_412=srNQ*_BIp|sEHC=4PMH!d?OYD4zPD6J)6l22tU0L3wWuU+ zG_n&t^v-|5Fk2&559(H!lPV&TRbHjM&)YOp39_TE>D#eC1p_^KW3v~8x@GKm%S-0q zqzOcaVRqZ>*7xhNF?&^+8g1yuB8IFX`x{ zaSsloqx#cH%T0JZaiOdGBB=?&B&8#mzLtE}M66SBoe39(>wGB`{B;E$jSj@aHtRbk zqPNGnev(SN#fL{hxV8Jqi<60!wU=8$x!YN?XVGVx>nlUv&edEmh63-XuIjN<|MwH= z+$>*srn?*IlJ0I1knV1zySrONP(VOxBO)R#-QC?FEuEY0_?FN6{q={( zaj{p-HRl-T5Kxpag@i>Cl0ECL^- z;!K$bw~-ne#ah3*ds)8b^iPK)cQdzxH1rerv9%f?!LBXx&evkLy6V_D2T-Q7ZA7K; z9TCa4%h{&3$H)YH&-nxdi|Mt_ik-2M+)*r=ZhyBD!#2Mmkq+uS9@Z~MT!jBY9*G-& zi=w-a9dS{KQ3_7O`z&wpu*x5mpNqsWR-HfeLxTUo%gqewR zZsTvl;bHG5b0vBp=jLz9(k|xDq`kTt{P}C2chy7>-lea{-}dJWDT7DtkH*C@#;GaV zD6MIr>NGKyG9MR07xwwup;^rOqshogi5G1nh;y^6OEV+Gi-=>z)9tNjFza1^hpTGh zyO6d+z`Y}3poDF3txmUmkc9Kt1`q1?jd(_e1;;PYIvY`*{MNim34q7}caR8PfL41*tNEqI=^|tIDZwWGLLiW?U3+MXe8n@{EZYt+}4({d&b~o7krqc-s}7c zkUk(Z60wGTsUTKLyg!_0lRGylu zUE8M@3NbxctQ}XyU^p+;6zB%WRVA*rGbn#Mf8pDX*Xg(N^3}OL7lk@WWCg+EEi)yA z&aV1m_=7xmj}jA|7rgxs1j;BM*KYU5=4v1PkBTMA4t?H#9I++N-rSe+E~V2bo*CS7 zrvU+rJ?pFsf(a-GOGY+3#JhW^`Jc44jivMOHFtVaw3t%p8NPT`W3h3*QHmeY^Lc%_ zHa@KlyU1<577k#pt!z~d^NG>ptqTbA+Q*LH0~83%>GkNotHB3@0~5BY<6q9B?)L@8}D`` zdlq-I%@cb)-)PgfNEzZ8@JEETx$-FiI0Zqvv3tGIV%HKz)*5#gk7K$ETYbXO;&o@v z%hKO{={Tg`k!RPkDASEd#l4{5-?#Kay*MI8)ly>B)hvAD)WxHXZy@z(=B=&jt?eT9 z=&HZy8^lqS)|*N2>M}ZD3}Iqn;Z;k^;2hApgb$K_v@0t)6h0t6@a5hPR}7R2#!F=E zoLwpb>$~=g+*`|mcZ&cFWCEHR>5N=Ijtn`|-F;cJ36pX}+`R;n zeVUl_7Q^yNCo0OCFOyvUmkNLE`qFu9EJVd7N6s;3fc7;1`*(PXH7Cs@xK^u*a+R0_ z^k8BtN@-}%H4P6-zyZ=T;&sbWM>@94-N~wp%RN4sc>U4Th-qN$ZM>5qRbYeDkZ!I^ zZzmD9S!P0JtoZmj=D%kLSilX9CdBy~PFR)*DGCLRO~S_LB_DhHLWo@=5)kw`$HZ{@ zO@w|XN=z<(9r+|<7F?s7#N%fhYr7fG(mC{(BD z)yrwgwm_Hi;pXNq>+3~H(B|$)g$yODg;S}H3vyG90C!un!ALK)-Y7swm1Q9vD23Y@ zHGv(IJCwlCuwFI$Qa4cLvSQ1-iL|kt-&WN%{#TJ;P)gma?_a;l#AJJXOfoj;SRcq` zCDkPgIW+?3Y;H8YbP8&eo~O&*9TI?*TszE_(r#B&@k+PVxZ$m2!kK<{1z|dw{5b1u zqRl9AE+Yb>t9D_*8wsGN5a4f|@onng9}f9KOb&Z=9j*kxfu0ygDFs6wFVnyr3W(OY z5OR8g@}gs${-tdG#@k~6Qx-Q1i*FGTT|}mVl}?Mb0F6R%MV~wnsnq4*>dJXYfQt(e zhywXNL+T~eA6I~-PLB_tj5`t^WO&kwC%g(4X~zVqhHo5)t*bu021-O6y73_}um*7R z)7@rZS|FOR<$*>|eOS2r)4J9!s+ySc8P`H)?eBqOSB~1cw@(5B(x!jFkDZ;`59mB} z78xpq3Gr--3MHS1W@_(6vHi2dMsVHdA;o3*Sc6Vst!tm#tYArkob_?->&6uqb!O4deKFCr>ze*XA3S8|Gf!63%ZkI0DJi(XS%Sjb4%SYHp! z3OAaCQr~wN2}hQBfWINw@!-;Ha51ea_9zJ@VE8Ai$q)%q%Q%DZogTC7Unx z0-B|U*ze@(n&j(XVbNJH{J8)O< z(~nV;uO9uY{BmAlNj5=taq%@8#^`lp)|YIc{F6^pNMQ&h&l#{a(hu7{w?gpS96bAY zyTC~iy$Ctl{@TCs{^XpmD*=&$%>GwXIK+d)k1j1$z1u>_?RPg~MY3!u$zfp(;*MWE zqm>OGI@ZMLfI!8$cF{E8?I5FpuAoKbJpYWdWHU%!anhuO+&cn9@sgb4cX?j%hZQbTh=!#z~A`pn;NU z-0X{^k(HGN8vgXS1Hcmc{B$iLAGXZH%?=m@$flJi@|EZv{RlQiz;R^yP+1f+^TRdCL{)_LSMQ!`!m@ zk?>8x$brKNhKXsF_42u&|~LQ45^7OGC|hED%`!r|g9QOAjmixE>>-qrWiEJoOUt;Lxk! zR(&uF=iwpkltyc2r!Op+zVJ?rk4#rsasH8APOobOZ6H^7x6pRh>B`5meR%2k!Xs5d zBrMyduq~uE&Bnl6N7GZQGG;O1Vj2in+$!ar_lQ(n|G7%s&@vA@mzN)@3we(Z4yv?h zSjL3Cq<81*2S zxC6t9SsmaWfV>#0ZeX@edIFpz=`cXO7|Rwx3ZW43M%VJSwMCvpX+fNcPB2enC;#?_ z1Qn$ft-Me*H+n=ROHe9L?aqw~fFO3F7-+;k0 z<|&AjiIbthjUz^v)z>GX9(NT44f|&`$G*^&`ryY#gBI@8XpEB)gf2_&jHzq5UC@h$%Y!F!w6{krEcsOi zY)`Bl3qfdg{5=S&o1BQdrEV| zORmC7__`7phpA7SV67~#UcF*ti`oeO2!}xXUdHi1peOdV)&G@^0K^(KK);_X6afFV zLWxk(w}%IGD5idsP|rfYGgM9d{?Pv(jW>~TV6=ph;8PlV&N7hb&S#h%xUH5QaHZw^ zpB_C$)0u723;us!+R6a-GpOiMxKc*=y$UkA$nqE&8G7xi8OhS8rmh`>TdnBFscIr6 z?{97Z%r|Teyje1odMzs3O;HT#KgokV15O=m2|#giaw>iE4iIDd`uaLj&C@_69u3ZX zYmf*NU$zP({9Sh(9c`0fMG(Uzorf*XAkRqt2oefhcR>i# zkX|piM3{DfQvlF?KS=YD(Q#8qh$Wp4XMFu+#NYy63FtMmko~p zVUx$mWDN72_9MVxTSHylJo66uKT0d^!Xh)j{CrjBP)RiSz<^)Bqaa5I2O`|uojd2+ zg3z{=m6fEYE-Z;q63{`$Wk!bpi=nZm=J`gVL}xAt>P1I{Ga`g1d_-VwV0{L(>P4Q_JROut62mX5`R9iBn!UVW1P1F`XC93 zTzM%-f@F!tKVwxFYPB9+DO0}D`RIo@x{04WvK8tP;_S7&uPQAH;J=QgZEx{@NvZyt z=7drkF9ONt93miA26;3Zz}b3xHqzbAO_~}9{*v{KP&cStZBg-y6P1&lJVqqJ;@Q@` zFK7sV`udR2Q?YI3VY|CGAXT((Jpzpg)e{8vO-PZ{hWT`azM;X1?*aNodVe1f7*P&w zJ}FF)#LIDF8wVEaBuFa-4vthkP}ki9oA4T2g-0x2Pyg`n;lTkgxKt6^W~TE&!ha*Bzkb=<+2yG* zePJi>=1)?&C=X==xwK%L1I4*K-RI=wDgHpn9$@*p-bFz$YL)XRN)Q0*xb~ORSrcXD z-cPA)aLl8~m4ozFbntpm*&JV3$Vg6pjqK^}&YkjtwGE6H3k!F0Jg%t0*#!^IE=aE} zGY1SEg_-zeHh-p)hvIW>y&BXdLiYl-Mx*E$ijH(=^ZYb4b22VnNo+U+J7$9=3Ms)x zEGoY>_=Qe#IdYS1S9Anor89n6%vvxT7nr_&ysJtBEqD{>2Q-wkj9kv{9MY7Ga_8-t@#1c?oA9ktGs?ASvk7x=bS_huKl{ z!opTzSyeC1_!p^2{=lc(9U#&~h@#)N`*%^J(>MZilY>LV90;4dnetX82T=NYWU3Lu zFp5qQJr4Zc)6>(83=+VFZY2SJVdyqhUy?++Fg_JCGwNj9&EI%{QXd{xDvnq(2UU8` zM;6d+jQdxlOF~D)S~e&sUWL8z3u?i$zxFD=HSt&3U3j3|91~_lCEkr*Lzf)`9vQ8CoVLvrh+OIU$dGhDOsr zCRl@?!Jh;Jkv3*zOraPEUD;_{W*@M@B`o^5AhWF(HHQfJbX`g1HN*GtfO=pChudgrgU-MwjpZE78XW_&iX7JbmvO_FM_B!+( zH;EEB0G7MD;FRt__8!$>5}CZKgTq04dpj^T0r2ZPGEj<1Ojy z^Mo%Q{&3Lc$)|Z=%-m=@O~Q~2SO}lLYVdG%OpHBmnLW5HK_`$TBZEN%bSab)CVSeP58-25bW`e%&| z4?Z4-_q>9VeGJ(?v1aR85$ZM>fxJrMZ)~aR>>}4>l_duatP_6QXz~a`yH-3?f@*u zi=na+u6W5}cxWFK>2QTtr`CDpX7-`iIi@tSp*KF@QBA{<)2P3!@O@h@|BNg()VJx*k|_7XLL-= zvQn6}l~ow>XGH4d$Y93=Zrh)+V78Y}QR z!mRQHcbY_;RLz4wlMP6~RI=q<%^;{k&F};ah#ZbR)PJYCU02sS#vwTEh$95? zJ*ZI-a?(v=9n`J0K{X6Iij=S6a0z%h7$OLu1i;%5{T-o)#{x+xD=%l9l$V4`9hmc& zp{t_qHxZ#BE9;$My1R#&`O+jS&VEuS0wKB$h`LeJqNoy%u zWfa~oiXImtBE3t)uS-mvAb4N+Bt;lD&kTx=@$qp!iOzLclF@}U*Izf=lfm9-#H8Jt zpz4}h6;)MeR8FbJtjtjE*O%5h-&oLA8W&+s;Cijwopw6{X0Z%0rzd zUC~8uX5o6M%_LtsRp#nRy254B^`l;E@sxf-PHE?1g%n48^$@JchQcanI9)8f*8Wyd z4T52LEiiTFqD82ePuQoU2V1@larCT=ad6kL&^9)He%W{Pq5A9OB&JLGhtSaGZ{PUU z-Zm&nb${0t2C=EksnH5nx;oxFJ9X_EGa9HzFO|;h0D9X^Zjjkh7LJ z@O$zn4 z7NWq-$fN3Pf_p;>1Y)c|pq1vF>_3K=`RK)eRR8)tHzUNyR`cWcx2Z0%nN^O$pA|ul zp0d=s7BB1%A!Y^qII*Suf2&Kl%<3M@CAoImlzcc7XI@PGU4D~eK7ka8Md^6N+R`v-@bc4!X zo{6)zt$y7`Y#;4JP-2yhoAE&?XEiubR6cZLmAd}OF7Vj556Gug1aXqVH^-wHQ>{-= zLR1}HDI^Qh(&bz+$fl;4rPDHT5S5C=Aj+nE=it z+b1ToUffqW#c-n=yrhiG-)^qq%8;&_WI#K7kDT?^w+LORNNZx5Z+2C{oGwwO-t1PP z;8#mf_qO%soast+J^9jvEOmHy_rk;Jo19Rb18o^B({mo4lEE{T`PJ&`KOG%*MOtNY zZ4xxKA~&W}f3SQj>7QKJKKzUBWSs@Yg@gW}MXI6#~9R(xvobHMfGj}c5|Lu$C9 z=|?NE_|!uuFxcj?(az)7OOq2~4C+>daQM503Hus=J7b99&r^4JBV9!9qj|=yia}WL zv+TIxD1Gf9`Q>iz#zC^S`by7E-06gdXFWN`e|*}^e4SQeO8rIPkB@(mEUot%RuX%o!cS>Z;=cvLcfFS&>q<%=uDyLC-uhJ6ls@b_x=`-I3N;FE16!r@^x~&BOo>ldpvow?iWUmhoQapX zHu5z077gyWGS%}B1$M9giI`;(Gue9NXT}^#xAaA&Wh*b z<7-;ZD*0_L-k~nuPTF;9f^}zUNjPIQ>0d2={Lu3A|K2N;Iz4*irB zY%Jg{Jh=J!d(Ale3mlM`_7Jl)X0a>e3p8F4#tS*}d-?mPNRtrn!VY?5IS%)FDS}#B zT>p^viI{D>HGFPp;7b%9+h7#-UB#63f?+t`^FzG`VwAVrqrQv(bfsyP%hSSL=#513 zQ9%nvJ{HWLFVtXLx{^z&Q#xxyvyXYx_`}z|;XzB9Lur+RVrq@|gS+{0!Dev5e)-~4 zEL+C9qB)`H4Q`YjA=s(bRADQJYmj9k@^pc52?Rzj6mPjTEpMHcTo)ZRN$1zb&slob zM!Q*~qTb);>^*sz8F^Yp52XCHWYo2_1%jfBXUu#C@bRnbS5#An<}{#asHk+fC=K{w z^LN9eqclhcB;zuv3J70N7{i1r_2_WTpV#zR`MuudndUCi#Le^gK7=g&x^ z6VPYy!kyxa@cb4b#J0lI8&~iAt=ZlzkDQL!JBH~wzXl7u=q+=oNXk?smB zU-X=csCHY}xs1tJ`?r7}SKchxV*@Hf)AN!PPUX3p#9PO@CCx@wMo-<(uJ2;HGbrYK z-bwP0v=UVFodfeYm|~n*y=8wxRVywfCABubIaw1iv;`i}$%;6|U7^?5jHwFo!VDS* zh<^u#Y0xsC4|M2n4&=&G&HXS=xPN?St7@dJY_^p0n`#1eL_K#udkn{oskgSwZEClQ zcrI4j3r&-R<0o?quhbfc*Kc;4acVBxfdbF`{6(1Kb-*0Ze{U#?`uL8jF z4rF;aRZLn8)zRTVRa^^Vs(Ccj)GD}I<#BCky3+f{@Z#TzEpO}<@jZ#_gZw9_Qd$oS zp@@#5At#VLg^tF?%5s#IV_~`P_~m__3TVVoE?Ys z%%{7W0F+Ry>F|VY$^Ot)<=X07*wN6gFwWN6rfLye3m zDBh~&jJ=P+kC?TGPta&!ZUc6!q)zW|5YeYXW9>d-I3toFv>kVcM%mailRk)Z?~+)s;{Rf4;wb zKf7XX(cGT@iA~L25cpw#ydfTXj>gBp{u^) zWJ87loU;nLX0M|*EK%7l9^pw_y|S{i2MUn8XCwPIKE=;|1sYudPH}(r*N#dh)U|{5 zle{4LC?H$-_y|xpt@@_S&ddOuz3SS<<)vQ%+veMVoob7)O}qW)+P*UZ2(E{?J9}8% zxjM#4oLbQ|mYDtemW?w37^eXG$T;T2wrVgR(CmON9rmByI#SeTrhWNFw7g7J1`dfs0@jfP96}jjofEMu147q z%^t-04PVV;3oXIS-Mj4qRq!rj(ji zps)u_C-=R^7iG0}eM$?`*qEgqF=yNE{{rk}8hcE}&H(lo+lmRkF50Ilui@yqA+G4C z{6)=LhZ}(yCx&%aYN{1Z7SdGtN*Oshx#J$i18lC+ULc=^l#~?Y5x3_lLof&=2U{k= zyEaTkQLzfSYme0fIl#wMU;fDU=;ikL?Y|5VCluUWZ8P5A^UcuMW>~P#?Iuy$ z7?yvChv3OYLS*xA_9S)pts3hZNJ4@7Sy$J4jnBW7eDK%@8c}&IltWdM_`A+d6z_Kf z?gg%w{CAM*u!2vYE**UX{Ufw$uQq(Bl!iw}0J-9mB2Fg7-&$-Ml7G`hm``Q>nU4v9 zn3x!7p`y~`q9hN%pFZ~Dc^rh)72CSN1aaRNn3H2MkByC~4i7Va7b+;}VJW_CTlYb- zPSuJK1Nh^*>gsf~YCm{k9v(cx=MJ*Wdx5%e{Z`oP>OMT;5N9p z_KKJq-S)}pu0Gve#oV`u-_!bzRsOV`ORjn^JS##||BqHeh!!RNZ{cB&4N%+^JUq7W zln1#YU^XOP1q40G^{2^wyE6Td__oOO_8izCWpLRVJeL7N8}z(3Cy&BUClXD4gBOzZ zf8P4ij)mgDBJ`P4CvAC?C9f|pEA?0#jLHDg57M!_`%{yfq4gxTzm)p;75dmOD&JKE z7709^npH}kHDeV5U>||5L5DzvN*Bi94t-uOYpy3As%B?m2UGhJNHs~;zef4|)4y*k zefe5$tfBQ`c0hNv>0ws4uHCxzfyZ}j;>6|S8zFz92$N(YJxwLZ@@ic)WO;x8w+RS1 zWCYj3rbhSF_$0sY&G2K|ity=vK-$W~;b3Iz_>I;+PDZ*wgN5wR6B32p%7zKz7{~IjpQWWv zy$ys3Y?r;u?o5$!Xy#k4%w!Z4Kh&5&;j<0Fz=k}66ZakPK1$QZ`4K!%5f;Cmj+FDb zruuhaHi6n~AVs(4*?)L%8Ri`1L9^NCp?=TN_h6Upy2QUXKI2Pv4Ph{pG%-`=`U_VC zkFW)dI_y5CR2_H-UvG}X%Gm;`Qd4SP%J*cQow=th+>#R)A2ed-xcWWEh)86`uY}OA zL#-avGh=MXHL(mC^%^!ONDhKM7F8^xB#K{#4=5{-q9QuDo2yCQ#OpApntjvLb>b%B zHyd0c=Q04JzyFjs&wYJ=aLvBu7EM<|Ozton9_^Gk3=zChL5`7Rpk8=!G8eO#DnagzkSerizZZ(_CKC^hYMMzCU*gByu` zdP}$c<(oV-aziNSe>pC3R4wwPm%$)ChsKbA zR2Ub;MT}uKJA}fk6^&rdAuB6eVo9jMy0)IHfqE5F9OwjzL$C|+( zzeLie{!yfiE}SjRSIRs^7*lVYG(Q*I8JLJva3C_LUUF70ol6_Dka6UlPJlr=F;r^S*1m&$bMJCChfnNC82A3!-u5! zl5_=!A2v0kDPAM;+6VsqM^xYe%ty93<>BRZDa&BUfpYwHzNUI0vxd|WS!^kb%ZmXK z{*ydi_ak~BMPk=_Mf$M5QkI=r<64ttehQ9KmQ+N?*CwfS5#c|Jiw59vXkZv>Jq2gd z3AbHcQzMP>6FE8o%~OIE3nC`Y&mVP3E`Tv2@!N*NdQ6&nAcxDVS5us#%I0K84#yVT zcHUjh1RRXl*LV<&RP$H1uJ-oKh3Wqa^9#HwZUYvtf!6Ll}5kC@1|>9qU9T zd!@qUbU$CZAFyhmhq~Xs2*=j7Z!yJEUT+h;uZ5l}av!5E0oJxR0$AH%<0Kg42{gY~ zU16)mw0BoL!eYsmcZTwPJlov&!umVP5%`vPE#O^rFB{w52V^$%i=olclH9M>Qh9di z&K_Y5gkc+kAR)gNB|{+k0?zaubzw%5WmOXrN*_IcM0>T9Y%={u;#TMzXb^v< zn5gIyc5mAwguSHMl#wuI^bWgE-%9<6iI9rJ$qM5q9e*q&yWV zOO{YoB`OV#jQXmU>*}9mTw$IBdwL4~`QK2ucEsIlA$k64#3(K1 z)F?PK6(yy1MT$hEDa z9mRsPoSaIa0|U^O$olu&z0~5WN?F|_mO^AGf$E>)u~{IxQdWFUjHPg@!atTj+Pl4X zI#abL&UdSfSbGx~Zul}x()D1rl*s*rmLFaEQ>p^JAXiKm_20g=8dnITxIdiuJSr34 zFhl~e13^gVZyhlt^0g%jGb^Dj*E|o7PYmifCf0#=gDI|BO4sL#9i#jwDjz4J$Y)8}>1O?w$%f5UrZ zo6kawpMRaGUC0ItSP~X!*!A}9H3(sJNcmBB+)jKd@FB;82{X{qYa!fhb@!yq%w4-R z&^!!9+%$O8*16i@qarjQn4VF^ zLtvDDQUt#B^}#WoX{=?8TQJ2S#<|lxcc%rP2;E`=nL8I4 zrT{?7&y;3~@|J#JioLF{EDwmuDA^9{VM zc$${^X?QoQMnhccqNykv=(sQ@NpieM~f~#jsCK%f(^OYY*E*b5a&2 ze)pTJ-nq9seP3XKI~P6c6_gxSUUpo@C@sG~beB^EHYnNHSir z+7|uVx2S~oXEldY^;sbgv%y=t*re%x5Y41TeqVI6p|F%go0t2r{XD^u1Kw}f31cOu z5wD%+irS9E<6pW5z=E#i`mc91uMG*FoDGBjIv?@De#{@Llsa2tHL&%7FrDshc<@WZ zaqhuBV|%2PRNS4>7vq$0{`({>@JUk2Q{yNH-Kv;~A@!@*%dqU#yFtv0txF${=CiA! zv{mujeUY8#)6*K`)umh8wz0IB{8q8cll$|%`!d4AI_AcG4QU%!)07HQ$^@EP`q5>P zW)?2o)XNCQ63SqAs+$PV~p`gm)!1-~*a%gU)>7;Ji#>DTSThWCgqK~C%?BTJi*d=K1WCiP(@|pBxFE45A z3Hkm`YjVXSL#}Mc>+0jD%tgsq;(=`<%^HL3q$KFVJQpiJPe#vjoAHMr7-M2x>+k3L z7!c%ky~NU_()Qt{yIdvs>)RbB-FH4M!;B^Vz^(^6psH91FriQN*N(l^Ls_+DIPM-I zIh~!IL?W?RC1NZr@ka0GU!V`$ekR*GQOW@>r+Qh+N$ss zH#=ATAbIqTu6J(m*mSpmksCJjNs?F?+=&F#1vA3y0kuC9xVsJj)8M6Kli1oQQ~Es-A{Xp zWR{f8ZEt44Q$a_JFlwv>vI=(`3&t=SP>*1~K)JvL1%rE(zggs$9Nis~6z3gWhjr}W zC)x2=EYEm^!HleMV@8SfyGgE>Ci}{BADYDV!zz2;%(UR~vNvWQkVszpj%+jtFxPxh z*qH^m4jWx0S)zKxcPm#)l?}OONBA~JDP|jA{f9*QUj4k0lJT>(@RBw6@R++{HLRWx zD0O!JuJNkD)^g(D(&u<-Va*)#e~a=IaFbA!9C(Fu^i-_kVv3e>)MfjyOM{YTg+vzx zMk#)r6{cI{=;ck|Tpu}I4R0ual%b&}qQ+JQ59NQ6rcccw|1eYkju~2h1gI2n{4=G& zomPhmg9L|`lS3N|NeuT_|3Fl=ujHY1B(g0lk~XdZh(GRPe)xq8Ezb@xb3tFY;1nZ% zB;?OjW)E^pNlV+=+)VnvJ)4_jpj1 z2@8asO_aChK2kJ4OJ}*f+d_{}Eo;#S{Yt=~wr%g+f=<{n=?7|taSLE?Lrny+V_!mW zcaL71+ntxGay0)w7+#fVy&Cml&!xC{-j|=(m7@u1!8Tmv{xv~EzpT^$?zh$S2q;uM zg)lvV-sUNO*H4is=;)~_`AoihwB%cV;`0=2gYP-QJ5=DVo<$)q%W zSSt)LeK@{9ic1RGp~ceg?b_($;S#cHac_9v+`SFR@Zi#*EE;g%+}_sF(fO>nvjZd` zYPY6jJ^D8B5Fc6ooTy5iIz$>=DfCIk(_p&SD1`QvScHRKnR)+$W)A8=0 zeDu86El$C^R$vd%Sw=7O_~_)O=-9gXS=UH{M)TLekfZ;uCvWeHLV6Z)CMW5oe~Z>H zpcxzK|LN0A;Nn3$*uj3)dfqj7hAEt#uLjsod3nKVYU9@R(RTLsxbZ!klSG44tr_45 z44YR0KLbO1T0^PkAy%REM?pgU8@}Z@*yRXWLeW
`g@2M45<#kKCJNV#ogLQ8tl zaETlg-^cxt3av>*YYdvAF8a+lZEFmQWW>4nGqY!q_YaT1{#kgd&Xfw;(ROba*_7ht15&CL=H#p(Ihz3eWk5n{xQO};jCLSi5DW{jk-J9$7qPZuewr(= zm-_MN>nDQTyQb5Y7YLSdUe$+~vLjTPY&!cGL#R^>as?Vf!op>Sz9>sk zgDhfA;4vY-z*16D9O9pR{azORQ zM|R@hqZ+s@ya}kMpPK<)I7sSy@`PxC1y2_XKR==50p8sEZ}Y=-ip=WfOkJLxl%10k> z1zYF+zBAr6g*|m^q|q* z#7GIrz{!A}ne>uz9C%Q=x+2m+X%0|mQ78>Y!Pm#en%Mvh3uN9Fq^)fn#*Mt9qUckk z2@K-mIkE2nDeaA%9Nsb&095N47~r#PlLm^TOAUuE?DJYH>c`D#M>AscRbi2b%O zThS$&H{d%~WSR=T4^IPbs`>5eK^^u9CJ_-)xsyv1Xj#Un2)6t_n0BDlYks`UP7Jxc zJ#+2Xm7@(7#ly>){yCo#w_(&=+K6nYRQ8q=Q7yStDESu;nHj6UYH1Hz&jh3PheGnZ_YuIl4u> zbrj#pTW`k)%X_!X>l`c$-|SSwAB0#=#r5<2N5qMMfZ%YzuJ2b95K^`Jn6PXU)7&aV z_^`1M3rFbIW=Q|wPCZLo@}6T!>lJ)DVpnGSJNF?4sq77yVDHTAg+c);Qy(Q-Qd(CB zjHml+C<`{fTzZa9z_RdCA7fR11P$(6Gq*Dc2Qo!s-GZzqN{2FWV zrsDiJUO@ZlKkK$+ysA+jJgn#AQ_jlwT?G4{dSyHff0IL_c}Xwl1(!FZ{ydpx(R99_ z-XFnrh;bsLq-0D3DgyXvEYQ(;J^u%Cw6-Q06d4~MU$itclbX(n#g7*%8FD$JEA!q% zQnDTNB*6zN+j7O7oSaNdsB5f$hG5_UuSXtz_trr z4k&B6n9Y5D`jZIKv*Df}<}F~SP%?4tFcP>|shZl^!(ng3)V}dLE|8(3q$toE(X#P; z=-2!YChe(l+Yz4^BH$uRA-WcLE}O1&7cRm>h#K`h z0Fj>zK=Qly0H@)_|6L=7hEO(ER(ncwiW-QEin21u6)_Q=gnt#aF__A7&VpJEkncrH zGRn#rftg>^u;D(BaL|Jk!}Z<-GBxuNgs&qwGW5EXEu`lIFILAwu$~T~tM?MZEvFeX z#jvcP+tQ7bhu$3l@w*uZ*h05yInkqE@m-2y%WHW<|6Am2dXwXrr&37T5IvC?UY8}0 z&trVzpRKIxpWdNs0L;zsr^qa1QY7kgl zq^0nG*T=_q4rsi3u+Q1k?3kjNR45lf&Ag+FzYFw;OG`#mtn`?%{lGFfETVIMdb)RT zkZGy_eSsOXad_Cg4awG@zgu|mtQvy#Y}*D4SX~VXNPLu#s0}WmS zI}Iq`q04G374VH!tD=7YMUPoZgho9c_S7Jjy@A-0Ge)rUPGG4TGi2@4pFjLG`0~{T z7Tr=F$-TI_>#^F*GgVd1lat<2C?s{c@zmzF@N3}9R-=W~WiQVP0KiHnQV z&~$cmF4pImso z*TI*)t*>jlsn@4rYk(_vazzmn5*&D0bIsA(ct6bIlI^=UV#(3w^>@aH!`O*f-^O*$(9N;Y51)6-tPxGWavuZ)T)8$jH!lJk$a~$G``EtSO)pC; zKKrMAUs?NBd8l(IX!si?V3Yw~xLu;I$-!d9mTEYC1m}B!j-g@Ho$n7HFO5h@r9mk- zDN*k+Kk)@>LHNL$c8b?ZE5U{tnX%~@Pp>LB> z1<>n~eqUdU`5v$|>8^@hpBeT^eKqwzm| z#9-EUw?uUpY0d37@Ro0ml@y3Sra_2&PiohqhLv}88k ziJK_{6&M?dA9`U37J$_Mud=K~ka@wSe!RYp1K{I8Qcg%%n-D@(W=kw?M^LjfC-Ut3 z+&$#-bUH~EW9q-r55DB7TmaKjXgpogu;ntf>$pV)^e3th4DVG#ydIAH)!3x7!s{c`RWutkTYnHiQCdKZrey zK5695I^}qt_TcQcZLjSyeb9Hd2#iX7V;dUcZFn#5_0Gbl@*L z!z2%}lO$ctl@#7_lFDfG2pUiG^_IXgn4Ny$^Pv;+VW!r6oVijnjM1m%4Kw?HOnqfk zR$J7rbV+x2mvpCeNH?N%cSwqqlt_1XcOxN<gx-fONo zpZUbQ0G~@Nyv8xzDIDd|G5Pw)<*F5h=Uu0D%TQ=Bq5XdwqVvt2?e`Cl_+VP@Tl2_# z-lNw(ee(352;cv}a`{|SE$m!i8U&#e!D5)>F<6b!?a_}>XB%pQ1OIXOA$ZZ!3@?dHBs&Jz>Cc{q1` zKK-}fbYwf|?`QM$;Dc|p=>Cw{tYQoTiCvyDJKklJAl^NAuH5cT-Ms(x+rM%f5{1=# zCK!7McWcsAAZ}_6@eTs96#|j85#el+QEWWqCKxEHN3~F0%Cw%_pMzla*^+@0t>TZ| zF=C$uj5HuAfN{{O4h&5h;*4d9jw)tKUAoclle$7yUe+9=dVw_<}U$gM8+Lv++;2 zYioP=&lBjPcl}3^y_0Jh3ay8la)kcIB2^fG(J+~#GnDxLy=m30hJ#^rStT)q_!;>m z4vy%k;_ZZqOMXuT3ma^4@tm^&bca#7cKN(P4kM1j$Bz`PZrHsWPD&l>RR0YzG=SZ) zjNNMZ4D=}dPcCk#Q+DsUJ_~L(9bKaM`#jc^9%0#Sj+y!|`+g-e-e z1EC`YMi?tPc(dcs+wT~XlIL#(ylBa~j%!bcGCL5$R>5TachO^_y7~6`^S|P))yD}~ zGJbO3Z3Rf9k8^D*YS5Mvs_4)MW8)35m1&(U{Q1Ljjwz9;R+pDPwv>kmX4RFebyefz z%#k}Ps^8q4v8Bpm2pqn1tE;zgX{w>Ke86YeNyi3-LI>Nz1*7P8FV?ANi{lj%-iF~z zJ2DoOF(4O-wQ_KHH}!j3gr8ros-cyTjEtAu2nbNF?IQ)^=hts_HcLCj8R@|&DY$2!%$rIu|~ZVHo&N^8y7YW z!#u`^OeH2kvvE&xnak3bF~1(txhS`;*wBjO-;qibmDIg%Q8aHPNWSiNYy4TFe|`Pz z(2;QE^=?{9O4vi?+m~V7=D+9v(#ylm9iuo8C8^ZyHfw{LPCGhm)zE^oxp`I7Rz!bp ze7_S3Mdn~FB9b%7nfb1M!8W-sOg3@{8O}Ud|NWu_Vd}06=s01}h1IJKU(0#vN*B9o zG8IDopoM&N=m0D0ZYx;Dq!Hi?uPK&&>}vU!ss}}ufg3((ZB9^KYnB!#EF69;Ss$Jy zsdD~khKVP#e`3s`%}sP+mA4|-nS!-)chtK2aa+uxOpcq)i@2kp7C#m1@{QMjU2PICXWa-1=Ju znq5Vu)Rg0nC~80$Ckn|8M-S-A@+C_Niy$!2E)$v8QQsKSGIx5Ky7jkRq+Kr|V@GQ9 z)zV*}j_@{5n4wsE6(sgAFVl@LkL^voeFlI*{J@}Wqdsml^E>4#0r#HeF&=lbVC4B% zkv@}I>FLJEQ?#uuDXL+z1ZjC$q%h}sX4ywe}ilL4Yzkj#{l#Hr$YKW}W< zyWaNqY!Wr5O<*S`HGF6#v2qp3tM_}&Mn=~7Rfn@@Cn8`37dRkg@gJV-r1Em`maCUt zdoM_4VkRF11_lC5qCV-vYaC=+O3Hx$eiICUukv!8QhQ)<0w>6;6)ShaPN%AZg0W$$ zzHZ*8LN6&coi8S7^qQmCOSXM7!ho>tCMYDBx4pTT^Qy}bIiyIlB+R*oytU=~yS;&d z1YN_|!%F2kwz*)zbRxrjB>mh3&~Tmz4j8o(q{L7E+zHQtqoaeE*QynQ%fC=l zS(y`j%M;lb=rNjg^LLk`xDr;<~%eWx&fq{+L zw#!@Ks?A)^)-_v+3D_vc{Th0o@bPWaIw3Vn(ksp9g={j>tW=G7yGH#+?PQLZ4oJNH zzn_!}2RX#B!3IR1Lk`?F1?0V?lM~JN$B1LEQG+;Jp~(e_(fir-BRuI|$rny80Qms^ z_ybV>e2bpcVrrDh*S4MEW@6^2m>;1w_4Je}pB9ZcU}I@1(V|W&Fy>}TJe+I7M0g~K z9FdR?sqB5*Fmx6-1Ti_}rlu>~|4T?NwpLMkpr?B(nc=y(4111u4PUE^{AXcV3Ln48 zy3wY_2{?)2$TO9DSL=N>FhZ?zBv37HJToFd9RR(eG_cs3``b>&^o1ixM)oT{>Ekpj zM9ea6000ImU2kRMp)zP{R z+bejW-{DRVl4r`go_jFw=GRwOicYt{@uK%;zO&P}DzgSvR0{v?HkL zAauVv3o?~$zd8N!={Z5HD^u*rBX0VvNzCmr=I3Vew0Z5iGNCeK7ME|KFU6kr z9~P%Slm5Ypg1@u~oaHu$F6)ICPj^WztE=0aVLY|fHcr57dj|j!JjMZD$HpE7yH`n* z#*oXqo|KfhPoE6_-iI1k?gcB9&RM&g#4<25qpS^SNpK^gT=3_mluX@v7_54HK!!oMC8{s&BqGtFURnIhGNw z1W`0QJG}4P2&^oz&HY$e-n>y^F-lLrU9@xdH8l6NI~aZ%ti*EKKgpWZpl|nW=Tsr{ za#~|_a7!rCJ~~q~)NbrjmCv6I@0-foOqwq4U}XvP^Q*JB&&yiC{pI~Cy9GoQ0H6mv zU9HW3&-T#~sO-0==<6vU5&`!?Fw;tSVsvfCql}K^)fKgqdMuyqbeWKdqA|Aq^|r-h zN1FFb+v6oe|FO?o)~|U)!^UEkl)UKAYiu3EB%cfBQ`{q{T_()Nm&~cG?Ahl!4*Y9^ z;4_acE8Ia=p!99J5#>>+%dLilJ?K`ENh>MIJyYoD(->!ay{i&aVq(&#rRDw<2bC2$ zeSLk^X5uh+=>KizsWAvtrqRzoK#Oq8_12NNwSnuLh2`Ij^9TPmLx@6S+R7MLmRiO1 zL4&Jn+y&S~k4W%2N^$d|GMu618V~gLl|f=8F>Egjvj~A4ldHE-Ep=coT70Wn_~z>S zY7Lqt7u>>}O(P4Pm|))VKKV&!mi2VJQl*9SAjmt&PgF4D(fWfwasR%9WMzf3pkHG` zi^<)~y7>24$!u{#1#4szUVtvK>I+8)?8LaC1Kb@gO-<`z2L zth2=0{>C-0Tf{?yrSuJNUIh$z7)86g7tfX*aIHWfdAx z*Va&R^U(NFGH0xw$IYA6{VOR(%;VmvgVW+GT=5EXPHT;~|7J&W_r~w$G;hnaY^I(& zF#^8w5wgY4<+D4z6y0O)mvih7<*~55tjI=NA16-C@q5O$VJSRZkNou8baDI1%6CxA zVaaw1NnK5l<^)LRbgoPZdu`bu!dWkAx3r}zAY34Yxt&<5eKDa&0?R68j@Zp6%Edck z=P2|1Q3Ai%2fdOdXd?5flI2u&DDlkVBj&@P$a4tdmGI|fhE`yp02<_C1n}<8@i|jj zsHisOTS=IaxJ8M%l=7j7gR9GJSC%-1IrHRL;y#jU3lm|7Gz_Joa91eis{S&M*gFV; zY2u=zlcB+>Vt{;sTKfRvNl5pLjoL6sNHi(pN5atrYsRnfew(y(LwDSv0>A?ZoV;Gh zpu(&w7*^0Lq)|#mkZEdav1;p98(8p@sh8Gu_*E2|>;6;fa1u^p{(Gk!tcY|4{U)*> ztL;Y-{&sUkhG1Pc^n2jAqpB+YA07d_srTY-!4jZEhk}HpM~Bm)AMn)@V;F%9pfXXm zJfG(&Z*;>Zt9q4#?bL)lu#NAzzG;H&5&&yGA4;2nacrx99en|cl(%g|QaPn}0Isbj z$Gx>VF_rdcF1Lc0TVt-1152pnWvK=L`;aMbtz%(8xps{ z_DPzyE+lZron-!5{`5!Bwg82XSfC0Klz~cgFdOX<%1H};#a9^WO zj1s@T99he6lFPw|95rHvo7pAsa`loC7Bx0UeTCf#4cV?v6eoTS->3kS)%JjcwvYul zTP_E}NoPuk4TaJlZAZlY?9tPENy+j!En4*}(SC4DX`WldEzxbT8ZJBc%U# zSo^Rc8$o-iwIDHeRj;EXenHiLWKE|zLpqQ zn~5;ztPxBLOWtiWUhBrRuB*?b_Bdc1tW7a|z^|!a5Q405Hh`(-EkXwNCdlFp(Jl|H z{J_1mX#-2Nj2I%B82{VK$MrWH(SL{I8uSa$wM^o82Kqly!i6oKW?-jh1^O>soYX$o ze_@Bl~LHinhc ziQ4eDXq~ylNC_n8of+$|N_56=T8wk=(V8a5P+^c;u~7b1p_MS|l;|7CI0*IWy+>tb zv)OTi)rcQ$*&kNid;8b7j-H+%XTIDHV@;Se2qc%WwHlKy&G47Zl}J&k*kn2vqEhxR=b+?ww~$8R4W+Y{bJOcoMt3imKo_O;iS4RfJ`U zdPMFU_44U4$YOUuYw>q4>0mr1mZ3T8P8 zgcFGr-HbAJlwe0CnEOrS-L87q*Z+>-2jx+Z5Y+p^%%yz~GgG9Qii>MvPKNnysPXk{ zX>{N)%G-{ZbZ!D3BGzpBs;a}LRx%rV!X?$UV(l_Lmd;QEB8HZC#TMfi8&n&|NYybv z1H+Nv=@u8BHzTW6*7b^~F})5E`y&6p9&ZA$a5$8=x=r)-D>s~iY5VPY=P;W|H|y%s zPFCb!BaKH#Fg3N1kbux!ety=(A^hv(S{%=R&I*NrGNug;uhkhe1v5pI&41x4SZxRR zEX|H|GyY1vA}msXW2^dL)@t?=WRy0(-m&a^4bF#ieXGBIk}~nNZVNK{4uZVLZ!6ukinS;*jIdW>xO? zu4+0BD=iywl_=?B#dHZzPHrMD={!qNH0gPZ(<(bJtiKAv0#;k8fzL=-8Ae*v>SchscvD_G{&v=}-|9d=r z|MdBkRc!4WCbBYJBK9Y?G|R*a1bch?k`c>3i&y^(`ND24a;TuuK@FFvm%<|yI|xXD zgu!w7ZSSYLL>(C263JW^)&qX;yY5c%6K_#DV$woer!`s~KN7*rlRtl+_Hh?jlF})Y znbXq|strQ?uKUNzbtDOtI^Tt+cH9T5z%`VrmVAXTws({XTv4R0+gn*H&g<@xc&Dy_#cB$*@6O3va4i4?1Kf{N0ir$DQ5e}7*DckNx#{G&6= z`SuaL+Tg31B`?I1KTbeT+ADdh(z8_Qe%>YvmwK)`+lVD;itlD2@UQPc=jg55r703Yt~ zxvi_K3-e%NVgdF{Z8Y4LoX95jO#WuJyZN9YyFc)UG*yuVL<-VEEq5S0S#HMV3S zVPPfTx*`~BH#eN6R@XhWbF42mH54q8ByzyvFC#09j&*i<*>>q$zHGE4(qC3_KYpul zNq#hNV2z>2KlM=oo2(E;-xM%&r+;W*FGvPv3NQe{NH^Ygf-7Bg|9--#LyBE-QnY$L zt}<$qTHwOVng0`Z!PQy9M+d>&9@hJ_L=K3G_y*shosSnu`{ajBDIuV(6SS45Xn9~b z?Vvz|6)PtNID<>i0oLfZZi zZj{NQeB4A)80IPb&YQulTY(iRMS$G_%*~Y89amb6)97CH%PT2ycLaYjU`p(f^7L#e zrN##Vobi=(G&JIVl7q@Ad?>r9CicA8t~-Z^HYQ6m{@F@&^^J|RMY+x?XsD>PM&F~O zgQiEtR#|*?0pGePIY$)|cwla7N_`x5Kx^{!C1}x23NxGTAO{*7_^kzo136vGE3QOz zRMgg%7L4g4>Gg-p2#bgy z@z7K7G@6Yd_ThoxE)xUFM8?3L`mV*(v)w&Dk3Su^-Rwr$WUv2TOIE0u&E9bntC;G_ zt`m=d?3NW$0lV026>VqZOD4w=1437@+IHc61Ns?Vnu9;7h*u*W9y}|9%t?}j^9K#y zq`v4T&0J{clfLcs6)bP#bvXf6UkiyNAd7v zu#=an~zJ2nF4tolKs@< zY%MGV-cnCLe@>lec}q^G`8{e(p}?i`r1q0I$hNN)ua`z&;fjdRpEvW@`DkZ1x}9l@ zCZ8iARrR5(YVzX~Qw0U2>)*4``G~u4)-#l@o?hy18Vocllc9kDapYT^z;5xNH&A%c z!?tBgSXRJ=)dctH=}F2M7zW#AF}@F_prb3Qu12Q=+VH(|-f-&anI-V6G_YF22M3o1 zQoiTqz44{XoSnToozD?V0R8fA(CmR?^Spvm(ifjxA1si;+kfMHe__s)sIQ^1wZG3V zB4S@Aj%B5bGHC&P!PF43eE%#pe*5-qRyGHy!NZklzmvC|1a3`hM$tgTp6~MSJ4u8+ zA>1ed3lT46nHfu6LF{SVd!B4L`u2yv`@+A+Y2z-V2mvSBlfin9jbF;HSS*tD92g>l z@-)L2apS8eweAt(*!^D&*#`3_i8940tR0EZZe+DmHoU65I=0Og~h-*VW zj=|}37BjG(knCq0#y`D>x=Z6r4r&acfm|+x#VJW<>R{~mml-<<f~!D;Zr~|Aaqt_XGp^X)z^J z1q*}$`e{_yUkq~g2`H8nQy30%IHkYiqi`EiqoSkt_Wg>d-&i_cX?5Kt-~3AzKqsA} z%+1Z6^3SF2t^{86ejA%#PFPqN$jsMi|KfW3_-Ht@8MT8OI?M*(@(Zgkzbx{f>>@Dg zT?_tup{|pmq!;7A*;^0aZ(!*Au-DyOJhiuEpiRmkafgk7jNdcS40p)@U8-1u?3u+C z8(o>4T1P{04r+Ckl*(sqh7rdm_=MiPF%ae}?)5P-vYWU(qJY$}1&Fi6EAY}GcxURe zKrUNcxA*qM62(9K@QC6zL91pE(AHU~1>j0vxpuoBFL=ApaN6;cPp??e`gCcxmuevA?inWRv@)lB;|#H`7oW zR4L@YF+18E8-Oq2H3KxGyoHFadd{9R}SjPf1 zG-fU?Qwhhh2^f5I$gt3H;*XDyA3A*(uQ=J*PWC7Bi@?vyM+wM~QYL^f zOy%O3guM?c*yXYcH5HZ8r|C4jfVUxcPXU^uT?yj7iu}PJq0x$W97GK3oE#j+h3!pO zSp>iTzp}&Kg#N*OT+}4n=U4q4N7FK&BX`x;v_)QnxxoxG#RlS4OoYtw#*PCcT}TU^ zq&yY~n}q$4q4+gPk3~(%P0!BlVEE|0Yrw9Plo2{-WaK&LS22?YGUIIi5w33ke_*Uiz1@^* zt69!OiLJZTm{@$~|AWgDzU+z}N@j?<_T#oZ7UXTR^LEx@n9b#6y>U}vXTq{-Xg!Y&x03~ zgBMyhw~;5l0S_v$L;9aRz&X{+zel(4CK#(L$TdPrkSov|txvhew=gH5tbeaX;bnYWWB7hU~KUap22KE-eUhd$FA>83`Yi-kID-(+o6Zt|bRex_{&Cw+Jvy#BIfB`Sw!%fW@&g^Xc6~;k-O@JPh&k>bbAq zI>{#e^mJCU)5(7FZ|^sA^|8?7(zDpw-|g|o&`R|8h|hlL z7oPIWAbk_9-Zn0-OKZkNc<%r*2P7DnEvi`0tRMXRKgZ>&Ik=FxxX^$V;STG?jte6< zT{!k|BAmY8cg}D#_`U3peT zph~)7apP!C%`px1z2Rr9Ehx537}*I>K}S~sfE5yKPk6ZXnf0eVGpDhIk~pL}7Q}s< zIAq@TK75voBw>-ifiTmN$mx14XbW6kEEt9{yU(2*PPSZIswnkm5fL6ONvZ-3ODV}H z+`@C)+ja^jw8Cvc_Da^)%#l2_s>#FZWQef>DJ1Nf|E>l}1Yx%u9mial)) zY_^ed9SEO)g`4~NnWCT=dZxV%!q%9sht3V_&#J0AIdf8O`!vYMROV!n#+fi4C3M6Nqv{FIkFM*@sZi)oK0bC$oM#+ zozN&C)PHnvmy#~Z>EBnLfSZQ+|qKZl-0|NsQu{jDHlly0vrh-an zvcTq+nKizwI74F-91C+yz4x<7OO-Jrixq_aU8AqWW~G>Zz79>#(l;-`x3VP#2Jph7 zqO2HU`&(<Lv%<+pk-xRoRkj7BwgX^af-^TNm9wX4m<* zovPBsvpaeHe&37aTp0aY(w@ci(8dQIUhi`!1%$AkYvp*fO%|S#S9=ofJ}N&yUQw}! zM_o4IFn;?ySzrGV8oFLd`K@9^`ufT`_xf6#>cW!X{x-qwS-=M+H)owe$ln#{t^2kg zg}Xa*%sA1ZQ5&_eXTF0)!^8TS$Ji&fk2Lg2<72F4Cprngbi`0#CA7a_?E~DrT&Ftj z?=A=_iBJlcY`q5A*`;3UL))2}$f4=!2IAtc{=_iZ+1u08(Iu$B5{bh8tN|gf3|Qgf zIi*_&aB#6VT~9Z}?!IxTlIr{*sR>X9ex30K9^1Vf2j$ORd*NN}q7M?MVy9U?JC$2L ztHP~&mD4^qSr79D#W%-KFt86@@89w(TOLY zqzYh0mY0{;75xK`2?@Ama&G-}z|*c2+UQA%)5z5a(3 zvRON64RQ8ahRuBJEM=TEWQ(TBc!T)*H4RrPC1TH);Q)Z^6rWrgh?2DnOUmQ`pZC+% z4#lcHMqqSa0%gsFAV^IOBMO^v=zL;H9sqzFx}Z(Ox-E$SnI=BuLX)`OCTBADy_=H$VssbQ#ZR>hPTuV6L}t z`cn8VTDyYpeTXM-$^Fn9z`Q$8?xousU)BMHukyN6e>!p8pb zF>u8x+(yIXbdzDHut(3Jg0h1Z+n zGNFzJ){X{|^19-h&n0#C?e<@|1O}7kW`l}s+h}}IRTHy-_fGQfz9RMgz`!De+rQnp z?(Tn0@=)|DtW%ZB3@tj&oO0$-3|BQID^w%9B-tc zkQp)oO`=VOd3*i+2S06VD-9bP%g=E80y3^+1{|cw2;%5xhq7YgSH(oQSz;XBYK(P< zdL0KD00J@7KC1UcO|rw?R2~EmKH0TRlW>23n|pMfJ4)l$P<)W3sT$&R+{I z(Wes04h$qUG@^QX6?nH4CnY|K;50zt_;%Jbn%A_}suW&JEo76!krd?CKF%pOKP;Wp z#o?jP<8*{}*K*Km#Jk!ES+><39!x#II!oA40e+8;rlk4R#2DlsrWR(BF+Mz;b^a?F zb^1^FvMU~CT( zvu2e_KTDMnWOHQX7X#4qeRBhkcy&vHcb?kmcJez z5dp*A9?8p$OcOwnJ-B#U3F}wCxf@*=!Wm+AUG`Q>e2n4c^;qlJ;t}PATPrd(jnA2M z7?*!&dqzSsC;48McVklwOkWHP9!TxCuF2s0r0_v5B`Du#o_{kc+Cp!iX1d@hhQ=kP zsUe7gc9^O;m}VWqcvgJ#IFJt^f*%_zSv(q zvq4gyJv(n?{`jlpAiU_Z(=To=-}oTt3i72HsT*;rhH@}k?qaz$^?rS`0#L$2Om#Vx z?yB#7-||SJ<9&yYq5zNi;lIa(zgF~&5$k3#Jr8DAM4{igYFCnEzHu7UU}SRM z9f+dDW4m?-2y_SNrPD!_DIlxZ6@*M;Fj@TB>;xnqcXnqNh58PT5Q+-5#>Xxt^7>8G zKJA5t8bmz5{TEgfxVsAlu`Ny5Z?yT4s~Y_OKP~|34q4z;rS7pl@bmM>#%oww;@cXN zLBSuKB-vds&~P&pcUFaRjcIB9&=>-x5twrQ>UwNKS-Hn>2}So>}HgOVRbX{T|)nr(~}@xZU?nR|KDL_N&bVhLzxzwL%tY ztg5guw*WVP?dCyc^ekN3oD|QViOPhWi%WlHI`9PJK)ue?&>#WtOQ62i~J_nGP3QcL*>V>-P$HTF*-^>K)`Za=?{$L-9JTpc%V%l za)_~)BXeJqkXgD7nZ<2v$iMA{?C(>RO3~=V2>jIHzyHkw0W`mjjg2cg1B2zgZ2v0F zPcT40^+2c2VrORdLN1qTF+7P$Q%q)yg{#@??v_ADM}PPZo-RxV+=4OJd7gLybTzv5 zyxJU$zjie1pPxk&R?+H3X-Q`dg1YtIQSaM8!AmM72hgV&_T)+VN?H} zrVePaiEwaOniFOw>6aRE-Fow(fIFlSz;jRLz`?Y!p+QqLU7U*-9J^*T-4GZF4Wt>-Ax+wo;pZVUI53dUZe~=}jb&wZQGEj1z;s zpWWfx`P2Qa)<(-#KN-Ci|GF=2#ra}G!r^8Q-616zN`thUAYy|{<>2of=&=o_lG1ff zE-pm{1&R`>fK^PZHZ<-)3Ii~S!|Jgg;q5P^tbk}gPIZ(&dwVTj0@oC24s7zIf8MH# zQ=y^!y1tT2&Hi9UCOLL|%t9~~ae%?h$yFbdp_>$=N@d_mbC;Tf#BJ+~JmclyASzOM zIGHa_p54Hvm4t|VB}h>P}-v--vXSxf&vh}-P813y!}MkoOvM80*TS7A5|PT5gVG4WPOf+ zuHxs+ww<1}qOsca*&71}4E1It(R}eFjM%!lDA`$`B#)SxvEf=U7uL14Esc#uQbJTzf4dIuA$hhJ ze*Nlz@VF2!%W7YdCy;TEi!RL}CPq#|(k?@+MOSBtMrCBQs%7+fWuiBw8TEt$T@I>!%Q)x{c_dLp&&N27rOr(u_A?pLWNf} z-eoqlIjvH1bh-tKT&`p>>5N5C8R_b>FqDWr{Py07rENeys=Olj#w22r z5y8_7BOsG~fJ_MzkZRf4WA5z4F3l`7_umqmm z^on=%wFoJ!_4vq0d1-0xl%o_CX02tLc>D@_pelnI=zy65DG>9l!H2#r00pzFG%-Kx zAzj)yHNpidLN+@_;3DuCkK=tEpgmGmM#Sx7LMPA)y}kUKmXZEPRhPeSI6KIcdvP%s zdkP2;nA4tI)N6#!-7gI-=-zOW3%3$y!MeVU3JKZPPn8Do#|epzjh|lcTFhG(=v?2R zTAI(bw9@DqNEH=py;GjRin1U(VO*yv?0sv8o@|D&yq|G=Xx-==RIM^GIzM+KC4-}{ zC)L(U$IEvU9&GZ^*c}1QnmEiqy)u)az_sPXmmHjNywusLmixufvBHlIR7}J=+?SHM zCtcW!Tq?yYQaJ@gP;w}ja$;+vH#d|4!)RJtRV;I_F|B6HhBLcc_c*z1>?%!HGUDqw zZ66;s5C3}F>1L9%0Dpw)rL>c(@z`fkZE3GT2!;U_W_h&up=XJHPgfyrrxXhrk(sOv zj&7Rl-J?Z9Vnf{kzX=CHW7t@hajm8_*PXAiCKZ|dnBtMWB<_KS>SadE4Y_eSo0(r#=P8K`(cG}A=m zSz}otw&4vk3bPnZN?NDh9u}0$_2VlnJ+q7)Lgi>#)v6R!{v-{SW_tlz{1|4*UqCHM z|Dr_q$-`r8m=jrzsZYcyV@obl9y*$xqE-)F3A zE)f|198D@x;>>U1Y;{c$fCNoIo=p`MZmmcYw~-s3GDO9EDQC;P))P1o8!JB*W`OKm{zHpJ*lY^zp5HJBVF7h zM&znb4v6$v^#DZjS{H%n|6W#{;?V zgw-|TA~Q1#Lwbsf#_aE(MFR-3b#MfxSKf#tSyRWWN1Pq*;(Ris6s|}*jQPS zis|+M`aP_;yty@;mj2LsDeUcX88I|_i~sK3Xn=hv?4Ev_?8Bp30#0)pp{$I|Ha;Hr zms=shzT!gdPoGfZ)jY1@e(DcO#r#40E>1nf_R^hXkvpk?exWo$t}L)XCyX`@7!gcO zC5iuI@%#NV+y9NI&)oPVqbOr*UWch?Y;(c84AtcWt|1z3%m^ATE)*`V72xe-`3Q@! ztjio}g?>A;a=aAq>>MCoqW#{bWl&agOo||dcspBKCdV3qI)75p-W~=iywJnL*%68V^6XX_a=6^$@xfHw?yIgV(82pZtnU96@bP(EC@8pS zx}hW^bk`<7Tm0T80#;|hQwyNQz*L)=0sMGipeQddu+ti> z8bLn-I6%LD``Oqil!{u&%OlNF(9;7BIm0ZzH89SbvjL8nLvz5Gp={@3HYbPTfvJ&4 zL?n|8c+XR#2ejkh;o}3J+;wwz4-YaDl7dZ0zTFI1*s6AesJMsMXTcztZ2-C-B&ix1 zk+Fzp7mx#cH()>2EBW?qFL12G4Ly;iQ_4(D4N%r>uc^Tp?;9Rw5)erJtVRmL2~SLc zO)TH%w7@{Tgl+;1G5*ge|Na@}Xnf zP;0S!Pei`osCX}~__d0^on79WuI^YwqTzdGAIjG++aKVH`m+v`f41(f#Ef;9QjQFAy(aHAb|{t-A;7Br&kG4+jJU zZ11rsDvkh#!4$kV&J}i@-N%ow&>qZA2MxL&4w>uzPRf!86ihV-Ca0&QFwE|vVkz}+ zaH3bOeC(c>I0b>93FDHFi4|uu|lrZRmH-@Q7_L5ly?eM{l{#1$%*) z4KqVBlADPM6(MwilZOY8_XmV9>r|EelTwjDa+3?<9Qqg);)^9U(_avO01A~LZvt;I zihRvtz$%EggK}HZ)&1<*7p9p(raW>^`}_V_~-bb4a3~T=m7A{_(E; zxQ%dJm?y44b1w;Uqmiq+r~evV1^Bm=M#XZ7=wkha=Rlw_w(Q!7UyO3(NYK zZAUnpc@`I631g zhzJON(ocyYbGJW$1WU6cmw?ZAXB#g4I%O(Z-$(!itcL$*SnGkNA@{RbF2zJvj^y^%uiCJPNnn0bG$BEEcN6DyGetFJja%$TNz6!55gjTj zYIs}e*N`1f)xF8sgWx-LgLEtM(}wny2QVUn))z0*cu`y|c+gd9!%b9UFEguSwEWih$riST@ z2ljhy;>@#z_)c(I>gv!_N^tXdR)r7rk-&Rz2f9OlEhs;BE69kLkWlR$m|2lU67dx0 zh*9svGYh_~N&`T3n?Uw;=lC2;4;U3apU}C=pu)s|@YD0m+?LYe=-_`h(p_(CxT5b1 zlA&)Cq))`*aoxl3-yGzskt?j5tStqRvO?(ujuQ>XqeSZAsrx)(?nY#YZfBcC$ZlR< z_ahxso=^}C{seH02%+r(NgSf_CjIWt6Y?E*C;S^=k(h$=5vLpoZV=|@mfF0ESF+zp za!+<1&Iic}4H(TqBns3YnSfQdtPbSCh%VNjk&;~jDy8oNC`u=0Y!UFqfXTgCP8!J3 z4zaMcRjm46LUG5IhzSdZV@$CkSfT_{Z$uU@*ri{;tgWoVmSQ)7ACgn}yN8AO5B|ze ztBoevFPQR58*&8tJd(oFueTE*@lFl|>zMmNTV3)y^jC2Kp zK}9eLg$dGPrMy(P!MtiSy}ex!mjt6s()K|_k1wUFzMgAVohmlQi%lm1*NOxi`z;(u z)cpn7atY8b^B3&$_(OzzwR>^wo&$`0_ESHeNs6OGwYEEN!HbZh7zs^@l+0H(Hz#jn zOb5+KN=d;cYD)xyrS8@0Ch*2;!=r_WgXqwOH*a$!1_4@7LYjE^U66wV>iaus@xjt% zb)DmQkl)wVq5Jn17tYP)f*PrVPjcSx?u-)bY@=R=S^u8^<4{XKHm}VS*BCOdyT1=9 z`kIxpPemUJs~CyMRiEQ?tE8^Z)b>L*Ns|wUQzGEALkSKVNDqv8ul^Yg_5s8@o0)yr zbp#5Rr^j1AU*Dfe9|OW7)iVX`LA*LLn(Ql5%1z1Y9w=luRPpY_!wb@NJSZqA$mq~e z*o?idnl_oRr#K(3Q}T~br|JfYD$FQn;2`CRJF-oPLZhWmuKb)~#+jrMm^DyIx7K(B1UcH;yxb)Y>kb`z7a-u0C6sIl=LJ)F z{V_EyEp4V5e{(ABU&>us=Van+q+rF^4&?xA2S~~e5E?KXx%+1bb8ApsEb<{9__u+x zVc0MQ%Q(-KCjnmw4fM>!;QD=ky@Ch)rq1)FjqgL-)8S6CcQ@3-d|c*te@-2>GwpfZ zxIZ$KioJ8q$WC|o`wLQ9tjLSIo(b~HL}4>F{>TnMfzT^}^nzwh;iXwCP-%KvB)1xv z;{Uh4$MJN(QCio=e7laj^fju<%cb{b`uME>r=)b0Fn^uER5rh$G}7h615d*iiXgZm zM~oMQo8$RMgZkRE$42AGhx^f%rxR_rw(ZXD7tn z2Vp?WO^kx{<0vVZ`+%>x!!immU9x^hxR-icx3<|xNX}&IFaO9JlF7&2w7#gMN6!`# zYa#CKG?{Lj(sgyJ&w6wttK_*j`>41{KN}lKt~`7Wag2B3s0(E&aK6}H9H1ckayXG= z!~Q^yI3fXi5F8j>5PwLsPMK63-ekGXoerF@NyCeSvh?e#%!7&O#8%t^jOw!$3le*phPG&ONgi z?CRzWEOK<{g;_8L3Z|w1A-7=9W*e5uu#9wj>^N;u0wK=iI-Ez%Ond4-kJ&%Z-VXIV znMABja|=w+BvO*oOk%kJU%dMDf%%q$nfYXMbL%((>{oHI z^7J_6ALmwfnORwDLxoPUW zW81RNkHN~L#1Ovg9I3L74Wm3YFG`F*x3`L(o_=js`C$sWiXxuMd(jd!5bFF7m!=SU z<^H{f=7R$n{xy4}g6`n6hm@4jh6b2_2Mc7Wq5y{gU@!QkxuaP z38KM*VSU-wzunE8AIVz*RCA9eKOT}SeB@+X6l|64|5@*G-oPMlPRf3+lz2x%qHAO{ zd*@ov)bOsaFY)>&LKuy`2zrqb37Vi8UqxdZ=scm}48iy;0Qvw;ONb|7Q1wfHS4`zsm1R}5cO3RbNnG5N&B@}z&OcQ>K@fj9YrRY62$;p6a z*?=csGF4RPl|7_W_(GEi6Y8-#G%SUR+Nk4cCZnwrA2^Bvdr)TImkq<04lK|G>@(-) ztgNhBGoJErAtsJCS;f^|oahp(IQJE@gIn#@%m>|vwKruCha-B=&(EIEH{7^u_Y>QN zO5GlN%tO&QQQar?z8!%l2r=8DKSiD+xW-gz(5|i`^WG`XFZF0Q9JwEdDtKEd32ITK z_QXh1o?X@{mQ{y^A&n{oAAPIx zI>i55cHH@p;pWzrOYqX$6o~9?pUf0$U4r+z3YBa@FXRnh)T_JdE9YwN#xReQ+BqoWXzMhk0%T z5Bj;;YpZn39+imMHBq-@|nJeB{a7-HqSa=Il#=ECn8!BLicIS!r8yK^3t?uf;pto3jL=4Z3iV8FI!usk+FPES?w^-^6!8VLl)sw6!)T~ z#YN^^^$g%0$wUq~4M0;iO$!fq`$09I;O6=oaI9r5a&mLawOS;T76E^dLOc?<3%6GR z#x+<8wFvKkwg?)Bi<_HKqdqz&M$m$pgCpp7J!? z%QrYOE7rDqqv<1e&|=Z_NT7hT^Zx2J5>euV64ot0`Y<`(?Y?)Ws4(Viac?cXWUbj} ztZ*vQH*U{w=II*Am^e7S+H5ziTJ+lpC=d)4q0IXjS9YjuKsLw>x#HHsf-3Wo_$wbv z6O(Lz*v9CxGD{#>foG*NGcl2)N6311a>fO7@#+56n;9+AEEd}7)YO6=8+P9}Tfi6m-2J>9F zKfxm87{K5cj0nD)i$fP9VcL?S<9X0jdMpw|~uoc>VcJ|wf9FY5!cUR|d_Lh9S{ang^UXun3!zWSV9Vm;p z7yU4doypEGTpb}hV84K72ocrD@;~I0qYeX?IWWR_5^Q^tY;>d{Jl99DfPsguf2s+Y& zGx+x&%-$bMQY_mB_i)~oY4-6+K2A2?w!eQ@fHVQ5JsR3qzo(}MG7DX?I@&tR6#`d? z1bsccGi@%4h(s@ovjuQ{1ClBHZ{ooMU57AA(BiK)l!_DJkMQ4x{F-cVIwfgN5y|?F zeN7qL(hFZB|0vU7y#ZkfHK%3FLS;oaCW*RP?||Bg!zFGUKbOA=XEMyYJHUeGU}1?1 zZD(6rs5aVQHfQq*9Hqhu+mHpQ%OzRqz?#6DpsxsqSb~n^FPuMEz8kP7^ur^bb0GGd z!7oiv{5{t^W#MQUGlN@U4ZlXDC&J5!AINy#bGsd7c>n&pm=nj4^d_+5EfJO2dYLGy z`w2i3;0z89dfp#%#M;O}t2G>XBL?_pbPD=fe~(K~Zvh1CEX*At);=UcA6P*A0g!2b z7^)+nCyH9JGc#8$O2dS^eFb(&uyAm5XvVjsMBqQA5|pZ}9wU_Wpo0+u=>EB*8~z2r z1+JcY*$VlAa2$+s*wzwupmc7!|)$JFOk&%&?m)FzN z1HoSME25xp&9eGMKR!sUItsY2mt=l3fMa$6PO#sU;5~r67FgD0ft z$Fu7WD{1~u2bVi0V~k(#pKK{Vmegy~dAhSa*+F@EfzlIvO`pGf0oMq=CcO35;poQj zB%l!kTg(quIt3fot%{0|*|HLez_uQ=VYMN$N=m&uwga}};&2WEgB1(r?8ay_B+FdP z%%D*$0U8gB2k7E7e2oiPoSmJW04G-qg?$I*_yZ(*Q+$R(^O^QOij|Dl_4`$IbwPFX zfBeCIb?5Ep=LbsO_I7Xi6%#;~mdFHo&e@JXoz#2;UI7#`>w`RTxK_JmPaTx+kp!q& zED#D`y>yQ;eKgA+n{V7bw^~R^JRFxd#y>@6mfey}A@Y@BKTPF$7sfY=u2ShYuOCq_ zrNqX@M(lM{!22Gly)YrW0gP_6H5hQs3AkG0>_q4=c`#Wq6D;c^FqS`^faxm6&4^HF%KROn2}Sw~_(L9%6WZ$kXS zD$EN zW!8(dNTpK!g}I^k^jSE1WdLkSK46~x3KW^Jx3{w1oH#D;KS3(sT z803o{@Ir;*}57GurIH#Pe)XoWnE%+rG3!v10x4> zD7l{<9SmNNK^nBF+c(vl!1kt3*DHV{o0X^ zfq_JR%>8vM1_s9F!GYw7eo%QDXyd@t1W-{_0m6c5L1qjeEt%iw6 zmq5_%F9dw%@>*Gf1)Llle8RdQG>j4~?|&f;LbR2Erul3ImfHQ{_HSZhBEZr)&4E8Q zPYeLLfs}n7iM{ssZ`v7!DKP7KQ9^TcwSwEQw^+pTC$B70SpIXLt%eGdpM8b%eI|M4 z(Jsta`aPtDov>kjp#u*epXrHt{1YjkyF`>gU)CI)|H_S^-vYvG0P~|Dl?CO-|B6(U{-6yo6q{$k z{`txW6&YDy3~c?r;2YCySK|0_humtr(HTI_2Ig`kvsm##rS`D{IU?w{B+ zbFkSB%2@mr#r3y|ZKmp^U%!4e%|L>v$f&B~vvIpBDdA{L$W7_DUy*2BeSU{wYH8gl zz`@2Q_ox~IX)#lg%i5qg1P9DnjL?C*g55V}fUj)mlCUELfU%%3#%rI^k&ytIrO8Pl z@@?Va)m2Y>`*@N47pMaT)Wv462bY$Yhr{_8fq(j?92er5?QJUp5|W*hlhM2D*X!NW zJXmw(+EJmQ@_gvV<5Pg`;~yB<842-u)vC9DG2pZ8Eua9l=R0UGiyvtOv;g1 zK#OW$-)IU=8ME?(ln!dAX&08AO7|DqU60LFhVy#PJg##%X}jD{Ki`~^KA#e{ftHSN zEzPDuFLeuJ3L|DIQ+>$v&zALW(l$YTw(Jx(%lbg)4KGXMj?Wa3g0y-)^TDIxhfLtn7=H6ieqZ}=Ys_|o^4WzPgJkL%WA zcd!|^5=?yp+4Z$M!N_lb>cfW*u@-Lx&z~qEC`jXapZ(!4fhfHQYVx(u6Ijy&-;4ga zqUpoE>`?RqE`p5gHYvuvU$^~%>Vb`l8QLwsJeCppJYX6?=%fZ47ysUsDMlsvEsVnu zw2JnhW`pDQ1ERfz1Ypn+hG;a}htTnmu6O##1?*YEdALshsn)YRdv@bbX}wxV_iD5I zli*>b&2Rm;^y{hTS;=vB*jw?MDVYP@2NSjP@lsmyYCUUnN6hC%`*!#NjSd0>?))$i zTLiih+RXa%{e2pZsDJnO!lyaDWPN>+#1#A)0&V1?I{omh4WW9=KLa2wUsPW(D2DN2=02Gs)f&%!_OYE}l%mVKWFc{Sl z0=qq$e^5nLmFt7^n5eDNaX}lmm+kU>7e(Z$r`6N7va6Nn&#P0XvgW(%%<_}Avs0&M z*Tbp%(+8cWKTJ_pqd`yBl)Go8+uF6&GJFzmq)kDN3PMtsVRg|14vL(Tl9;8=qGL4C zThj`BhP^^m0blS&$KQ}Dv*HnuxAiN4h*8v_?-DfQP5xktbcg)eBXA5;-fnBQR9(>n zIdleSjk&?4PAt&)z=GL6Sa2Z%bF;IPn7IjGy9vNCYysApx-S6Mtu+i4_2w5A^e}g?YAMnqo&5i!I`wn0=3^hvp=Lo)cN%{PCN)oQVcpex_94HucQq8_(Y> zHwh<)r&=yM$JJybZr6{MMyG$gj(M*JTluxPo&R}}uDINP@%yhlXvK|R-Rm}W3fIUN zRWrEenih|E&U~X9a6u1{w1k0B(x?s5RTWeNt}ZShi+E;sHWvQdUvE_CjpO!x&9fr` zV=^sm?XNIkwHFz+KfTX}MLZu#+RuejNc0mV(TgX`F;!m}$9^xcu!skgw|>V4jc zkCim4r%}~&^hsY^3TXq5`_wziPXz&e+PWl`3ZMu}b=KPt1=NS~Lm_xUR)0`R0K}Xe zJa)!7z_SM$)^PGe@iJK{8=TE4+EYTe%zFK&Ed)QS$bHYNuu)w8wdaSkuJTAc52wz} z0=vjIEqh+qp5tL@WzEC>AD4?AOp%IrLU5LB@qb!7Y=d4<%PI!2=9AjNB#&ThS7>Kw z5c|AARB&xx!{bfKPm_;LRUiH<&u9YjjPHfT=`&0}i?yh~JzhTNTx4uG%-M(RZ)|)& zQUiZ0I3HlIS`g4kFI7e|R@tzo|3TFZ%cu`|!N5~M-6|N38zDlj$~Sg)otK+S0a)G) z43W#lqTU9<0%=;vgoy*)iT*$_4ywDD81%apT#QTskG=B@`4CVB-?VE&cT?_y{m_gozk1$VdnX0J%YOv8ldZwz~Vn%mpJ?GgoG;0r` zNGNV!i>^+9dB8t{g{jj3c3j}Zut*F5Jr`8)#Bt%0X>kNuQSjawA0pMOy&TB#ub`kI zuZw!V!~m0ue0R7eX>g{<$WYmkk1Mi>(Hog24DF;OC&%o7IV(Rue}!GKH}U+1X-|dV zY|-bf^Ye3nLS-z58^8Jm`h*wJut>ExD=rq+$6WXvz$yXE3YOfZAoV&+CSgefU^?Id ziwH?ZBY`H+pIcmXH8z&`Hw-}$_tT3t05?h!rL3hTL+1|Q^GsdWDb;ff()ZWMXz)0L8|}c7|J$h=k!o!@@FTz72SB(Y$;q zVV37`_CW{$_|_z}wKK!DKFqkt0`O5kne}9UUq!Fg{$=Qo=?y|&0K8<vV z&;^a5D^&pLsXd&$|A3C(;>n#srv%I{hxd2E*miGsH#WKP4>*r;K-)n_PY>7vRi?VS z%)sr4900&T6ujNq4k}D29w>mV0v|*MR`2B?4r) zSskCA79`@AoS^|UGrW=fCnvFLAU^>QPE9t)%Q%>y=+oDO3X$BGE_v@2jG6*+M8$Kn z<;7h`*z=>nVf)*yC{MfFho5`6Mh`N}h|(0V_!MF+(z&3`5#f!FYL}fY%~0d{!uS2 zM}q}+n@b-C=!Ghv*8tLh)XIy9oFDfgtJ12h0&%beYSbBzelk+?t(O?>D zJw^UE3N9w{wdfR!#xBVc4%$9IuSuf%mc7XcbwFfX1jJ%&1az`UKXhOgYX5HzVg_w3 zNtpEGbaej7ljFKr^N(;Fh}A(I8*W|ie0^WI*#B$Sr4>z$$!W>bIOhb<7fqje0knB6 zjV@VY6e+L%w-!p@W3Uu6@P%{!Bm0|T5#uS~CoHe5)1jkrr6NCK?7=Ti6T|E@ZwcJf z063s0ioOk#6_T^IVFVvWc z(_H$2MaUxqjL!0JB0Q8`rRU|KG zR!EkB8DfCEoW@eFMcm&4n_k`<+md~p(_gKHRU3&hE+OWGYgThJGkGDdjqp{l1f^;q zDC9@>hz^K@0=ogA8pM~t+Z!A2gT1+M@16)$JY&E^^H>_Exw-kt@@3VUgV{ZS{%5lD zP?|QKc-yYleK~7CJ4t0jD6`lP^Ro+Mpd%%C+#nQG^6T58&%CzslgN_?Oik>9!zHp3 zgqrh#npMyifV#oUi=S=t$sPO#bzyi!go&A%o(mBGn54ZSr3k^KAGQ%)WJS)42vL=# zFvFor>{l)xHfhBcswD%2uB_CVfRO<=_Ym0lkn~MWY3b=B@K=g?!UVuB83(;zR>rUc zLZ1RafgZaFbjkEsh`@#F9+af6XfLxPI@v4^VwlV}#kFNj9Oc__o`=IV6fJ(|gXf2v z`{(=J4FF~AH1QKjw}ej81`^zJDqH}-Nw+Cn48(42CNWj{fi=>t%MmsMtwx&w>~}vX zllo8%85$azv)?zsxc0!GL)vMH{xfh}<-rQ=*sy_%iq}(a@oHSZ>4I5f{K`qf^Ydoy zru<~V?GAK4Pd5`IZ&1}Ao#`LKzn@^IN?Igw-Jk7NF2_60cg+&f6kT=v+5s~(ea1=aO=lKO!7sX_WAW4);oTI@7g zjHTRH(47#Ik{XGjZv&V1p*qjHpGd6lmEjJ|4Gj|$5=03A?@I!o`{k%ouy_XT<1{Ve zKfz;9o8NgSBC7VrOYgFS z5j8XY?CBsXOBk2jvp(^5>gp#Pd&8sOuhQh9?2yzFH2rFjeUX}xGPYq2h?2>2;KPB= z_JOpR)I(!?cr0$MI~iJe+TN^zD*LLvM|qVGsPIXF>U*{Nbl41z;qgYe)It+|zu~inGv1eei;nnQ84(6xLwKqthlSyg4@ayq8 z4S(~bb3VWnCF~V=uw+n0%_<*q{SVv>IJW^U=4D_`_Q4n2$S@<_x9OXOdP-tGz51K2 zu<{fBi}E@dAg;xFa5GpV1wIZxG4{tCR-byUpWk*t0Y$alPDWGX8}h zb2uBT_$yO;E#m~=?|{U39Z?c0Q8GqrVrEQ?N3utE27Nx#^w*0507KCWLumR-Ku5yP zdkR@a=R{zC*@wVXfbpB93ew{J@ZYT0Py?$zhPnSC;^4EbH_G$0{6@3&ja~~Fuvx*z$+5y zB$FplUV%)3g98cx<)F>iraChi-unAPZbkstU3CchOD7=5TG@Q2iWAgV5NiWg z4?o_5dO9M>b0zF$Hv|eiZ7OhV0JvT(O{`URaEFO>>-6_qtL-N%)EI;82#6>j<}e#Yv_Sys3@= z5PFVkJ!rMJaRY(kRKYDArtFTr7P23pBs@5Qn36GG* z#%2F^ufL!Mf@HCfQ7gK2LD!-XR;57|CnYm#XgJL|-_zLeF26we>N5QN9|VXK5PX0i zz=`$XfBgWUP*73jN6_@004`nKpd5Sl*Vk#29JvB05sxq&eFeVS?T$VhwA^eRTDjZa ztUKkCKJMNwAdFd!!R@P1);dJ^3V=cYQrNA^fEgmXr5BgiGxFLqc{WjQ_U+us>KetiB~WLpt&Vf|%bu<*|zAyfqH02KQ zH%Xb9q%5IFme$ZO5*Kd4tgX1P)#vlx4_4Z@V{mnE+|AAhPGz(+t{)C+#%{dss0}#~ z0@M%uqy(QnQ(~~2H8%gE;XNL& z#v`X~*Y-bd-;GWJh+7Hp5>>i1=Y{z5-p3cmBcD77Iec>iY^eYj5m^Bx<-LzgI*7ZL zU%lxMylA6HNcY}#C#k85HZ)KISx)r=i$0ASd5rl^H#W4(AF!B#u^dY-q4$WP0=~k_ zva{4#LSmiY*7PVJ^JKY~=HAT_ZT)SQETE8F`s4b6$C zIfJANqvJS?A@K6qTmlN7tE~c&$N{KIX10s=n=$cHfR4gA?n^Wp^2$H*h}m zy;bgy!x#jzzSX9~k#7pP)Lm#__x6oeG7QaM7wV zU1Ehw+T`Tyt7keOxCFBB$ZK@QC|0jEDIc9L25uDnN=DzmXScRy;IF(gFfaf+>VI#3 z>*XTi2Mh!eW^j??7+)$*r6p>6=xvm4rAWt09Fa+fJh5i@V1avABe3RnnE!2eo7>7a zB#8Pqsmp6wK8~;Z%aXY=+uZ0SD;4k#0fryIcdK%EX+ADZ(t!ZCrgA3@@r8Iq!^f$cz;Kp4<$0f&^Jpn2asO4Ad|N)aBWj-s-IS0?NnNN+BO zj!uYh5JdG$WNFSXMlNrL$lnuzSCDjaVhLPHzzBZ!&Fsse(CLV4yxE6XDWbc9;cy=xsI_tqgLt8g!000H|zjUhkSx6~p z4_ctMD8C(}Bjiytq_*9UTt#_K^4XtH)D@K0jBLdsHL1ZF_U5~|ZwxBLHDhJq81JeM z=ugIMg!G?5?53=N#9+E+aJ<6Z?n1(VaEl8;8| zUVa>q^~rz(BmPTI6J4;72;pVtGx(F@`rx`=(5J!o>EL_(#N{ zi_;V!7Oe7{KVJQ7@2ZQ9{ImV=UD0`{Y?-YpyCFE@Wxf!HHzqxNq|E@BUH2>X@ zuI0`2x;o~6emv2(@^F0y1t012$@R=a)+vM7lQZu|h0>kev0`Rn%0fdAHrr)DN579J z&n{gY5{&M^6lIBx{<|wT~e5(#a9jduPvwry4F>ddH92ge}k2#GIGZ zeY~xlTVDIR({)Lk!^?=UK?~~QS%PVK7z@isy6T6rE~?yNY{FwRhVV6uH3pbR3kSs~ zySq=x$uLEVRLcD`21hUSU=}b~0Ug-zK5VpP-E6qb%LXepp9nd|k-vQUPFS%whpOcc z^V5Z~@ZzPseVSWUC_lR2s`X2HzL_`%B`)d1>Sjx(#p=I_2S4p2KK|q9;{bT|(@q=q zAeVYvBy?k|@^Bwd_S*{}e|L3FXfUY{a#OQftJg><{foB?UsJ3SWbYija~k*E5+f#F zwpUmBvq5YgT*t^hs}e-`>&@9 zZl(bHy=jJd_7ljYgtUNq_%SFP9Er^U^2P0=8(WL|ctb-|c7P40@MFqsxl?zkS5iN3Z!0CsWha{8sK}rX)#?*@wE?MWZto9r+dd8gW z+K8?V*J4l_5ypcDhj`YrXH~KO-v~Z16g~kE1Tb3rn0pvB*4NqI1o;IOLDuwg!DKFW z8lEP3Q87}P+K_s|VO=&HlwJ1Iw_~2OZWXtEYbQmi$t_`~ZhNCFvxx7ME7em}C zaToGeD?9*|1NG#vUzxuY6((W;*k3Fx{J^x6Y>jZ*cwYG(PC($UM)UgZ+_yPkvDNe3 zx2nq79kY#eTkd>%vbu1xs+H^H?(#zok?JFD&j{xoTpMR;Z1AT0qGT)xg zt$g%BK$D;v(EnrQMh(Cv>A8=ZSKs6`>0>*Z=#uv^AQ8Tw%EWTzXyUOA(#tqDzdl>@ zw=;gFiPddv`}j9SfZD(RJ=X5{`ZiWFL);LG!ut9z{w$1-;jZ%2rnN+Ezn;d|DaJWD z5B-zAWnH}v(GcB}=q+mQ_$)RDpXU*ctUd={>y`FN2!M*@*&;=vO693DW;8Jp09kue zt)8MLcA27T(A*CJ-?v94+$g=|t(ZPj}3m~-Y=shHoExu-}e7j3Jhi{;Ktnxt+2q=mcRk6!o9 z-~Gs0mo>GRXJh#PQG-uaHe>pqv+LIS}McmZwV_x*%Ac`e&0T97iXNGkV}e#XfCS#z=4QUl(V4r3XG*UOKry20awewE++9+w*B_eVqpuWXNh9bmTA+-xceWsR?*>ciyRs zSA4|z7~Cfisb;<2`CtRGiKXHk=*L~%0%-|Fl16rK>@jmGz8wFe&?EnnpO3dcsBpJP zRJ57BS|H2$*9QCAabn}U=y7S66qRz3;-5XHu^i!1u2L}uajLI3j+j5}Wt{qqzigoe z(L+zAdz@MGA-oIJCqC}F;hnT>paL205s^|z(7N`lu8C)|S#RAU6vzgIQU>SH>x}+rOoNA&Fk?8l02@RN?K|BJ7_9{S50dMp1Ehnd!GZi)U z)YKG;-W~q3T$Rlf&6qMLlMsH}H6|p)mYZDQ8+=Kb>2{rXBnVMio#oox zN_Ux}%&s#M$`uSu+Y8dfF@Tp-E0v^*tF1+Kt1no#mF^!f!6QsX57W=h1w5K0hk>6} zCd|3{jY~r;UB_ZVpXCUHy5@{y>fCVL{>{E$M%bG|cJZwn(IH?$ z@PG-cic@uO@;VqDP)ew=`#gx`1j=14+fA@>4bZxKJ(<8Jb zP|NF|wcn!5hHAb3l|E%!E14HNhMSO3Bg}N#P@vh0weP@hXKgK6q)6{>Ib z2#uq`bqd5}+AMeVl#zv&fDd^WZEDlFY_smv!KSPI8*Wz&*D10;_ zJhx(1AQgi*A8%aKsI3)uFuN%X|QTMM-yG1}A-=4%Zm5dn*I+fJed77DwQqZTX zh&8c5d@)2%X;umS>dFt9OZlxb`KPi*)lRT&O`GGi<+?)p|KYDTMb%5lr1B6UEh4 z2j;&zp&o6C&2xl&ZMhf4RiyZsQCk`dx_|>Wj{5kDba*IjWp&v3rBvH#GCom!TZ*2I z^;3Ir&n7UW>YA_3QltfZw23BXX(UWJBi?+y3ou(gOuJ$j_woFP44Wjm(=C#qs_HN1 zk9~8A_UR?yS5;oUvQfQc0wmn{yq1@;FB-WzD-4b;`rN3IZEg33rk^84U`QWv5-WXV zx71d|ozl=qFsr)y(4(382l5GPGCHZcr4=DIiQ4$gr{3>K)BD8HU&51eK~Uq)@o8pw z$osnL2(YbX{K#TbZQhPgiC%{^Gj*x%MGtI)Da^|?&zP1J7fT(;Gpl}07k$PuOxX(r zHbI^$rx4=xq%qyUMlpnlGgJD8&MH0$qWX#=CRT5JMm9=j!l>}UvL(2H~2VMFNTJKstQ zsPLI$3>;_CW4?>>YC@x?mdn%Mxn0s#MY6>!bB^<8plO?05rPNJA@-vqBY1QNQ6aWO z81fh0m?({(;Kxsb-V}@CM07!6cE>-Zxh}8`g9syN zNC=JxQHW9rCVUS?*U#=-#1CN=?cNW-ms5Z7^164lTsSGZ zvArs9gk&BOX+c0x(!3fV9wl7A31_4!SE7kWej`musWiVNc7MMMPN$L?$;Z&SdV#HV zJP;lk8y5#o3P^%vL`VozxJP8Oi0)lg8a22|QVfkObRW-(dK}nVqRcF~+dYjs_^7B7 zJc4&jc*~V?wzl#+4q>wVYf|IJdGaQ*PJZr$%30J@#;w5uxnIfMC6S?B|C*!Js_}~b zKtLU>s|*bedrwF}Hc|L1W@BJ_+tdw6=*x$*JKjs^$!ck2*;pfcn}P&L!rn5N`tkNo z6unGb#u#2bvVuGL^p7tW1gnvigtou%?^_OnzsX0LFtfL1qX@f6el4d)s3=mj*yjtX z`rV7NO%e3tinJh}vAD&-TFK}LeZL11k*J)jNGm3upS@%~!ZSbdws(+u_)zOkV69%V z$iJok$CYkW9}L)9KCS0~%(lSFe3blADn7d{xr!vK`K>V@EfAU_7xQdrlpyKgV33($pl>M? zCm9f4rDuVuwk#^z`=4I}lzd?!AyCB$0l-E~Eb#kLwZ7xiZgUdqPd6*(I_%9u@Sc8Q zTKj0JWieGOy4c2=xUkt<>rA$jp^W3HXQ|Q&e*SFq-2U_Lo&Y17v2a30`thHPv%iBT zrbHSbr;Gtq?H{s4PisAN6=@SdGaK${*NEI=Vg;)cqc)w5^5u%WT^v0AS%i84 zezB|#2iw^1-$_8x%T%5skw+g#TW{_oLf&Or?ANGnarBL|x6g!6Ys?gtHD0@9LNX8N zwP|p^RL15YF!_vHns$Qw+A{&IDIf_2R+_z*x>MEGRYA%3aQXe|quXqsSqyKWK^Ndb zxv;N8T4DwChLJ8gsm6`2*yx)XG_+6-gi5sA1y*yeC z4UI2f!oL5Ko5-$nH`ZpbpATHP=rZoA+E@owBq4aX)dG5mrHaLiOJZrnTb4~dm3sAw&+v2vOq`*?c8+vA**VRJ>$Ck=&kKn{_ z;uZx?L$$w=$BpUEu{7Vsl1+WWso^NWHAWTxk){~IKS&5?#D@IlO@;l0JEnbt?NtwK%VM`#AOsQn@53;zK(%W$fPszMcfh_+(EHSZoyzkyb zaC=dx(-`(#D<2){ot>S>jaj?q3!qG4u`j2kVL9;|gnXU^ztYAHV98GW`g=*n%NGL$ z2e9heU)F3B5I9a{0ESsWc~4u7xll zQIaeB1B*6wWVeeUqWA0{#Ru=S1hEh3LlSW{m(yHBX5T?va_S=q7DgIy0A)Z6p6Oo? z%|2)7d-(JI6A?*zNB+1949v)`GNfGH9~5`;@6C-fYt%+^#p$WX=S4mT2z6vtmwez( zC*DdU(ravjum0L50gpP`_}Y}jM?eSfp8SWXWyE;lEUF?zw-QgXW9#pCKW~Y5kd1FS ztxZ(xXdxnC3RI+c@GgXvm1lWzR_mLlf|i`Bg8JOdiZB_*cic=yZ@#vTjv?>aIL2Sf zQRSUJe$_H61(3&AGlIqgS(L_>1XY!xzpF#um+xn>G)U%UpC7=ue&_Me&Vl_>R&?M{ z6INRtQe1k19pZ0VtLvI#bMLrGT=#Rg_jR_cujD_y1l)#94F=`tf@D?vt2z44lZJxp zt4P18xX;7EBmGDEMN$=v;-})22HIvRU~<`pklMZmw>?ZAgAF8>3&O2M~jUXeX|xzbrx%t zc5@kT_#XBT*uuJhZa+P}?pF#ly&Ejl+9zXeMP+>{l^+pV4?=^3NBPDF5(XC=wZ@g; zn)ZtaEn(B4*xH^9Eh&>Ij0;JvZG&rTLksCnF%Y}^rC{lV(`Cre_V(Bp zR03udozDLWRjP#Z#dljL!WTF6qkOb&2$%f6yUVJg3cJ>tFux+INONhXJYrgt5`rLU zt+UE@v0fNF3C{cwl^mDtg+;vRJjo)guytO_Z{Jo%BW>`DKd2BsgxjJ-G!f(M{JAA&08Vs=RsiA(c ze*B2WI$_;MAV|wxsuU@Cvc(=CLYtnPJv}?K(z@k&5XR_qvRdtMcs-*2+j6l6e}Ua< zp$dBfkhgB0_onx766c`&d^;-Of&!_iKTl1uU?E0`<#vhXO|FXvAMGE7ewBbW;rn-T zZ@u0tox0}f))k0C$YHq*jc@6LEXQuOT4mEE794bQcUkQPn1gSSQ4#z*`_kB_GZ-z= z2>w?)Yx`)yb7_>s3~tZN@Zu=@r4wx)nBFBLldyA@fPMg@=o}~)4jb5Y}8TyM4n_Z<$u0&C!(>_c4cYuqvx%qn^PKxx@#z(YLC2F+KO>V(9HrTQ_ zmC%Fl6wI<%K-{wez6JM;PN!laBu@>)|QSenT#2tV(EL#!ea>F+(5{dF9M!Uft4j&Z0>}O zl`NQwQ;?4$;(lG$gplTRIIGF+WT8z~AAEMQ9MVnpH~6{p13XjCG$BWaAR#-MW(gxE2Hf*5=xv9LXN+@%2gYmJnD&aB=i9(YA#hzNRCtfc&zcnj;S z?b$b^oOoz>tisuN8JCcF$sN;T6M}hz1ZMJMVu#ys-(Rx>` znAl<8FN)+|Ii>jvo>W^VJ>0FYz$WfE!tl9#Y0}UfD~irlO8@fjgLnkBoIU+(#`pSB$dCQ2)#7Z6)C~8F|L-iyyUv>?DKy z6?~0N6hZ%jX!rKG-Y#ngATL@SZ?}qGx?HM;gBS#Ze!PF8K?_X+EQ^3hD!8YxeXG}P zrm*Gar{*F#_S_DM8btL)hS6dfOI|XeoE#kF{5QEAHy?WXG}(m5f4${Ni0il5IFP1M(QE3=beBmyNRP!e^2FQJmRP>6RIV-J@$isZvkoV z&#;~hSGx}C#csxK1`I`Txq7AB!r+eRl5&*h2(Ik1RADXQqd4-iz~95wt+>FJUah9W z8Ms}p!ntd1lvs-evDqF;vOQ`zy!}Lx8!ZlYS#FN2vr|&v$3y(4e$LdHnbfoPrl+1p z_nsc4fqn}r)y33;BV(|NM_a7!rfKoAEB8v)ZV7repW?_lL{QW!gLPbVN*m_lTHIBS zzCCUjpn&PY20qcB=g-8^;6eF_fdCjTA=NSOpH*uFpmVD7%TcLZ-+!AxW^^0RbU#^w z6(+BwZDEyL3y&6IQH+e%WBI!k%-uRzGKD&#l6Cd_t}lA2-E?`*E{)sn^<;GM-!I%9 zw}X5Du2oJWz$MyCVTIzSa(m3erUe701)$okytXA)yew%mpL9-?$&D&!pY z)Ts8nH;!m2P}_~HXDu)#C-@O#NugR@WuE7NSnXo9d`C^XYg?!fDf*XJ#ReK%*6040 z0lL=?dzpE0BxfRcPm@JC14m;0Y%OS_G;?;(F415tEDp(i2FlCQu_1c|WV(`y!>hZA zHXk~2iW(1}K$;{5m+UF$_ib{9>eA1Q%|4_-0OV_3!;0?as%T}qkA683g~ak}D9X`P z=+Eak>9J8OeBxv=3xWUjwpnJr-C!ki60`I1eZzm^1&3z|3PUF>x!xV?n#8D+kf^i< z`^6GG{kFbS$rHD;%rsXE==ey!E~?o?2;>mwuX(G*1|O``0^6({3ut>tP(ch0P}&7<`Cy5hsk z$5Zj;eRNyW4qr2(D~EwU9Cx*d*ta94{}JbKR5~|qupe7ziZs>bgX?}KZtP5YdjENq$zLk*T$Pw8 z1#fS2qjr2^JOj2bzlV2KqcrPxh;r1jaO7zki?SacXUQ!GlIQ*z6L{e%m`TW}n-xD7 zgD#+#*SC5}`b1L(C;U@TQ#~!f>X|@e6EV=6x1#t6b+osCzsvLmXO6$nyc6vOVNLg5$UQ@&DIc802V)`vfTkEe(NF z>NmeBVYYUs)KN~4WMsZtv60+RxZI7_r`yxzV8YuK-fF`nyoIZ7(f!YqQt-8RXUqQglR4W1 z(UhKkD4y-^$NN)3@aTa_MUH2hzuvp%xzZsMEm3U9Eu&kOgbTD*d#wLic_#LRfeFT5(*7J4QbO(qW`N* zS=1@lEhi-=Z(5ga^I@aqYWm6EqdbTMo~V4)t3SVgUfMH^I1(L&fzyAueEYdPHc~z& zEaCo9#`V>ZwKXZHM*w80{PIl(~P5XLDmouoz9m)SDw> zrJ1Jvx&xk8ey^QD@3W4#u6ZMps*d)f%%h9-34f16;f?)Jbdt3~cdHi>Uc!ZD^P>hk zejlIbWch!&`R{fv3)`5?6s+oMr?@_Zn`CIFkxz1OQd>a5{>teC#jV`|>k3kz!+yxK zfV_;y^C81@#J?#CGkv~HnU;}>5fCRb_bNweuN5XMLvamI>03AApTb|ZkzcC~jt^Q6 z=8PSsMMbZaK`y1G#oOKePEN~_SPgrBbS1SMe!R8m>G)sIuP31<-(#W{h3YbNLuO~^E=%SK9TvH54o?O+_pYMpRJN~87_;pD(R;BI~~pp zVSPety&7K=^zO3X#4!6D4O==zu)x9R`96|31r=KIwF{oC^=)J#RG--G$=eqFX6055-Fbt#$|dMSq*uq-EwT<$E8L7$mnPS-1obmkzK5s?oHhb)ach82Oi(S zEOGzd@$(Xlq9mqbi_n$oM$&bl(bNgXF}j^-*@$=U;<=L*X;<*5_LjACv9>h-^xZZ@ zVHcqS<{$hXcj@fma-;pr<54AfLWoWpH~D1r)Gpm@n;Be(sS8c1U#D=((C3D3ioAXI z4t?7DFCF@h<^%ieXQLqrCGGu?Vno75zpsCpYwD^zJ%Ew9Yi$kabR;MUETT-8lCwo7 z)~;k$oHn|1CeB))a^F5#iYXcVd|sp2Y_Yk>%7|btA&ecu4v`Wt{Q<3`V#p9Q?&k&# zu6xEiG~Mv@iOnrGqeDWEwJ{mYMkxQg2#4n_h^vrVaK=o*rYdf8aiJ*26Ueob{O^s~ zL1Y#O;e7TKTl^M$(wI-$NZe_l&(Y+%9L=QHFg{TuLgjI1zjNCTs&t0W-6~j$+&m{q z66{(#d+;Vjy!ec<<-BrrEBa~qPFUFWdQc_SR@u%r;9;|q!zIj2Zu!tlq$ohTpV?ddS0ISRGYuk=YkgYr4dYR(YDFh)|d!_PSKW zc1t!8Gz4^GcE*5qDS4c;#NV~=y2|!fbcH`QS}pDYEJeZKkK@$Z8mX61NK+c?3)VHm z>%)8?60)u&4S_(rc0ZO}iVvm>)np)Y_Qs^r*K-NtoCKC%YP{X4nTYd$hHLL?k@P|~ z^>hske0P89xVY5G)}LCuw$3D{iv&>{w*GTIUmrIznz%o)NNIA9^0mtK7HhGTUEW>x zOe}{<+4A)7sng|(VNjF6*#V#Md4JalnjSWUXe zNtY=PMzZe_f$LH@NC87EW?65)1JqDY5x^V$V}T9o8XX!F&l+ztw;zV#1Ihcn3g4$& z5+RrS-gzVYt7hNMyVHuIfOum+_+03_FYqKccPoU4wd>=Ty)9>?9($dq1}$;HC%EVW z6R`i1wmT(@?@m^|cP2L}g?(Dq$G%Hd@f2NcI=;F84vG$bJVm})-95XAPJD=)5EAs* z=mhWSAxAl1_IzvXx+6N9f-Y0zG?j9h7#SZA zODjS2rM0ci*~y6(d)Nsz>);)!6w~baDxorP(tB7c3Y+xu@%x(jTWj~PIGt5SeHUY6<3gn@H&<6wM8x8f65x0$ zFmfc0$FjQrtKB5wd#N|5#Pf6PIn`k6$>(aoq(sJgY}yzWT-z9TosFV{*n3>vA_++{ z^~c@kd+A{bZ!yBuE1z@{aSRwD%BEnwtiI9<)MeM2n(f~8ffLbO0MNRL^(<)Ab|+Y^ zu}>aE9;dH>b`C^b7~9C~pm{aym{ic|IrOaIVV(cqNxLw`My_(lcARJ^1@zk0)e3=& z{{&MZ)>#KUbekp5ATzgDn-ywHhVzKn#Oi;4h%onkrO?Sfd;CjsxBq;CddWw6>`bxp z=BPgD{joh%%;)xDZTZZ~cjrnC9Jp3$Ez!9xy~ao4(iFz z*1Z|0%g^0YaZDP+eDo*F@QhH)n8?tCAaL}K3!S+Ng{dln(KVv-a_pOQ*|9x7IVK$= z$6IT5EAA*>lYT`@%hPI$I|(uIcm_X!>*;7~-;4dC)EC^xj;{If)1LL9doHf3d(NA# zp2uW3hu;`e--=+0FXU%0|b7LbOk{>{k zFff!x;IW8h*Vnr$poevOzW(pE6KuGNe6dJ+jTe8#IrEyoUkd;rMv~7hLAt`DTVdGE z=6P3(7sU1y`X4Cb{_Un9LbqK$5gKG)q#=BFlt;~19s0_OVyXqBYYrloDe@M7-op~* z`eM_>>*%XC+dV~(rE!}UL@d(ZpMu979q>RdYbS1-_=qo?Yx%p$IPVKhq`0@rks32BNK4hn(07aGFr#k_|7}{ zB%QntDH35lc(`VNw=N(cy2p0E1?ru|9uTd@rvy>`+rV~C9;Hy$#*o=X_}Deb$xvK##E|r3W92g> z1PvX0CRB^5J|Loe5V#yo;Q*nCf@gVR46E7zpQ_E&wQ#y$0pnN`RWR``%({+qli1E- zql6j8QEH#fXtMfo#;SttD5nh%T=&{)Ss`#cPM@*U#d1J23*fxW}QdT_`@7z2!)nr*a1R_F|&&=%$IX%lj zIUlpLBTdifYw6(SrRnPI6mfE@F+SebHikilNo~QloGmS1aW| zoY4e@i{E>Gxede(BF^IVdyZFo`)uprI(T&3mHqT2=8N{^O?=*p=;?PpHL%cn_yt)k zm$1$CsG)qhVYSuO+sCB43ZIq%rIRT5QpnRKs+ZO61u804no6~3CRrp6c1I@!+beHw zK+6TE5t9(EfUlo+RYDgpZjM{F_=IwoCq^>SWkt56olx76iS}(dZK&&tauOLrN}eKr zdvWmqq`!b8NUCt&4dM8KMvqVj3f$t8&OgY5TU>s-P81zin>k5ufhfD?xgvDExkq!2 zH$|==D{W;TUBTA3udmeW$8znTlSNoYu_ARS;sB@!{BEE5`LV)#gT24YBdK9q{UL1$ z9mF$mo>*b5N3(8962jNyHk^~%lTBdR`2a0WLP~)ME0!y7txZ1ZGK~##YqAtm9s)v_ zW)h%XokWHdY=ZF~ddx87^%W8We)*F|9!7OT7YwhxSKp^j_jT{PwwSl8tRA#E__L;y zi?)`LftPI~CI9Em(194zAece@hzD_HZ%HF}M%`{t=2qW<(^YSgKXKkzO6kx05fd>+ z9nI@Kc1i22L5&Wr?zfHTLN7b_)uv~omB)~^@q1Qy<>$7i_3*Gr6Ly^t^RKm_#o>}N z<(}Iazpv<7Bjjdz8=rM&_n8LPVgh*Qo$`mdTX*c8?C>D}Y0Q(wPWyZ+9%d^T<^B!R z`^aaT(>J%-cfK99;B=R;H2M6m``6~$_a&US^z7C-Y9jZr^vp~lkRM;jD^}PESOZ*L z4`*i%?5>X`j3s|^zXv4fFK)*#dT`|Y6)KsbY3)T~;?i_tCzO7nO88I41tJ2o_p=-+5cJ6?Vx{vyBD`k-2P&=A|E*n=pK0SNWrP)U zHau4!%Nu#f(N#qY9c`ryRVXEiy@a{ZUb>#hh?^YU-dwNTfr%pF5nME@Y$h_>mK%-# zwwm%gEsWbOH*{4mPnA^x2CvQ!v?w6lx??tT8ti%xA5F%O=1D4aw=Bc}=&e34Yu;xh zpM5;uo_}EMoZBr-EpINA>owXfR9$qe`)KLv_JbNc(rFe5@ch1g+AazuHM&_R;*;tkcJ$Z4 zVW^mAI0A^IJq70|;g`bczW#my?rQ{tO~-ZQ;64+Oo;Ei#yN;T!fW-;mY>ZJ$1_K=&3$)@5Plf-Z;WYXhVR5bKe)XeVktR(y)8THQeqN%Joq=?@#N<>e&*(AZ8~vHrO=Dp=~`m&kHM0- z)5(?3j07c0b8HuZq9;P=aYIsC`D4`ZlvM(sivmiIDQT!??({d;Sn4bmw~qMdqk|{v zK0Asy?NSZ#H>V*KE!=$_vwXCA)#CkwkNRBbu5EDw3KrH}u_>CNHD;M3L4O3z2F)CE zDP-TCc0pLcg+386kqwcxs~T{VDhwK)tJ@@ zJUwJg(6+YRQaK)LHklRTd8)@vCG2>P6GLg4&2S1s`oHXuUBKelXK2+@? z3o#(pDbvD;0u)ekax&}x|DAt`9M$pBL};zD;%7~{@D%lSbKQ*8zP7Bj-S&^4874ODWGAk@RFqLq!uy{F3g09dD;hDLK=#yKgZ?6$4 zojL5vDdulf8_&84M_?f(P9;}TS<(xY3XuU>XWYK?;_|ujT&BIN=fi-+?Sg^#Ls7jE=gvx%plX6~1}BL;)=g zq}D!gUO|sr)N$HE!i>j36uvO-Gfs=!$DW?$#uJer_^S~`Ck0H7h&Gjf8<&5pvO9=taEh<- z4@2h=-{A0apoA#lnu{rstO|ZY8K-D-xOGU^hXpn zJpv4KJUu;uxfsCZIO`7>c=2gIPVNvC_^U78CFREWoOmCnb>D#b-Ddha z?;W6ePdEtM`|itC#*EYWS?k=liO*KryEZ_|W%|U;2>qK_)^)ogu}Hc!kb}35NB>O= zue&?T?4PB*yKr?1J0T)Ri;A^xrhdI0dD+SPMiW%1LRp+dbkxTVb2D?uxkJ}n>5fb5 zA?EWrjVVB!Q#VSPmWj^3!-uYOj;|!&8o}VZO!@3Lj1Gwp}^DmE3Xd0i0EgxK$XOho2!*&mYRBD;N z#3`UL%b~I)N^4KVU{A(yL^X4*P=}^;qcBNhl`g~A*zEosx=hCqBLdXX{&%bHV3`TP zsK8n`L!OU)XCs4<&{|iQ@i(MlF)4@CvJ;X*g}B4Q8?3I^INfaD|CbRXV7#w}!oIbW7&TzGc>e*VRP z_mXjJy0k21?v#(4!Vbw}!`Rnxsh8fu@7wd6d*E18!R(th3z5z5wXiUCDAZ)IEJ#T? z;L^acgZ6N!#;3r`nim&oX%Y}sw0}9ndrm*j<>Qw$b4t+!+kpw>cTT}#W~KRa47z7ZL2 zWlSY&P8-ehKsm`|SBWwV$qS|QWo0`{(UqNk|E7V*R$a4aD0^}<+0CN=;g^1}IJ%Dt z;MR6!0ubiL=_%TodGc0M&sSD(Is+i`Kl;oG7qTL+4w*VUCiH<KofdBIy>^SvF*=xD9pw)9x7di&dutLZJ|DCjZ# zwC^7j1ssR^2$GIDI?Zm@{<>@UT3 zukK$M!A_RFXM(J%$-rVdzB+%t^3zRx4zEVC!`-mt(%ICxni|UWDkh7R zk_xg6sBqs^(jIn2;M0o0*eXRHVh%f5lwQ#9$DRUL5xGm7G~(?Y&SUUlW3`r@5UkFj zk$6fu>v_E+1s%5s^7tX}&ZC%;!L!RnO^_Lpnh7rbS0gSJ{ql15=5bT+|8ith`F3RF zFzTS~F*N^W*{1U4XjaSU{qD=2u+anf_hmphRturya@YprZONvRW$m?o!`1lfsT41g zqQ!Ip6IwKnuHG`rga?#5{p`N~sgiB1&+)j6Y~R_w*PsbRb6EnLe?Zf09{t{uYfZM~ zdqky6RH;iuWpkpkD?>FQcjFKG`Fr{#kFc`Vc>VWl;M3;P#lz3rsMGjt3SXu9-{7zd{Tg|hLGrH8R51OnYYp={>3|M%}-YD@2}R$5S|&R`Nx-|pH-KH06Wd7qAD zgmHFMU+wX@J>Ne@lWhz%-We=-^*YUa^W6G+-DRj{KE0jz-7~Dm3lI3Z&`|T{K)Sq2 zgb0fABG59P z15fZYXWn^9NexO$Y+}&J<5?HLI~iI4#aF-}Ptp}&Ae$J-%Z7!Ke6){0`Hc?}r!wCYK@Z*w_k=?vm^x0ng>OLLfl6Os zFD-4l6E7^TR@>{D1Uy2Y*?4)A0ht#}k1eJwc_OjGO-DF5ITJGD9>yW!Z+HIwPfzQj z-bUWLvwcLkx&9f;7%H#J>#wi=Z+G(@&%2e56B>NYEC-^zM^KG76mxhPKXmRI=K zDpqM-POjoua=@STYO$MVZhrm_ z{MUllD>vIeiM>S=qrv|S2u6evla(Frk&pw+mcYaoOGms@9A{l1r=M@6xjl*P8`*t| zpH_TI!19e`sDw=Z3v#CJYLW_%@}z+7A{W{&2@Qv2q0T|Q@4Ps&jrV{Sh84;^4OX~B z>O{D;`egt-mFJJQWU-)6iq}9~ zz3@=clNqye>vP=vB;K<9*7o#LPo)?0-_oz@;0Q8+)BwCpp;340SYyi+F%^wj_k+O7 zM00J^ZLpRO+b1!Qhv%CkqsBN=y7}@V(FSjd=rZ`5kg!;OZi(I2hQ#>RTiMM*+(kmg z*d8lr95W&hDHy4^S7pVuhxR;~)0c6!3hVZ`3cc-Gb1fmEfDU$g&anNom1 z8iX}hy{J>CT6BG2hHIZl%p_QqE^)Yt*jUC`PhQm*{wpYTu3Rr5=outWCt902 zfzsV3h$STSIV$QsV5$-ZS+lp6Yv$raZ9~4xNh1%cHd4qoGC9 zy)Ti-os!fvbNouq(s922_CSn%J3-#+|3#kA2XFQD;qE~z3LgAIyXRXRN zLzKr?*@c7BO+34v)%VUnzdaVoLo6=b(b}gUy=q%qeS%X|7hEC&yMMXIEG{id#9&&e z5At68UH?iGPez>1@mW;o@+WM$dg+YXG>X*IzW7tcvVIKVewI2NUerNFxQ^MmwxGE2 z7YS_tiw6a*q#t0ZI7L2Mv4LB@RW{p{m0PlX6WjYdAh_NYhL5>o^6(e0OxVW{bOZ$T zK|%3yscaJ6)mZU${O+nkJ`-qp`dQ%;6jKxY#Ds+ZjYM$>f)Z%S6>tg|K!p9w==x2L zV`yODh=x{Hg^F^`(^%)_;YA|ocbTYV^py4Xf@S;m)ZzcSF8o5EPXPUPCG7v0_4Z&C zyE^}R(|;=XF)9tdRr)^-bQt>hejaAST763qDr%Fs_P8UdG8{KE!D_~J0;f_yC%vXkL zYiQ(ajn)H?BuMnz+1X8nVt&&N9b(HjI342RiCaY0IF;eLn?Cg6AbPGz`+7BGdo5kw z_TD>=fFuz^5fm>fHa72Hxn*Vlk>8O|0w#oZ%lixwh)BT6)`bV)b7|xM+y)kc;I5F| zJ%`5NTG)sFZl(9>j_&S_QBex&>Iv%frifID2ECJk4g4~*%IC}B8h8KMwEjKvsU^&r zY1dI$BS5`VwQ=ZK>n-7z(EziwU?$4RB#}M6lAM`yZ=FpCnvLs?7jR~AtS3C}@%#>s zfJdnEJp2r?w1X~Pzi{NO+Uf4`EU zsEVvuDGqbLu8!~AV5HR8(*H(YseIEBW)Rg=x_|%e$`aPEQklM<&=$|@oG}%KAaNEU z_`kBq5M;|2ef!>*>zB5&QLk$0$3fcLC6NNUpv!30Rsr@A*`=z!Laof&YWAW8hUveh zEC!dg^@wjT>F%;7I1zo~@Pm8(9o1&F`^VUcvgInK@eO$9&@X7DS_w(hE)T!o5*b9J zK^7SjeE#t(;s%m~m6f&!0%&PDzyBuiLho7%L+C&s3{CU2fLfOeU^sl-Ys3Ag5tZ#= z8oEDj;O%y@=6!rW#M1HF|D3w(%aRp2%BXw-P;}BO^UF_GG;;Q_H2@iun_F8~MnS_( zdini(i)+u~&0IzcMD(N1ViWiyt!`|Uj>Jln;~(9MT3Pm9nrHko2Z(>b_R;3$;;OHx zKtrBBaEW8xhx7@Y+=c!&gJnNu+N!6^n~t} z$_p(!HvFxLvF%S`Tt7+hEeD))&|SXo?pcOV1xVg0{k8ELa884Zr}zFX$Jc|jls#h% zTOQX(f{3%Ka}$JP&`U}YJd8=NK1fOKGI%@|3SG`p52eqHC;#$zbEM?h_

$hYIE~ z1$uoDjAQpc%`nb3%gEJU#t+E9go71YS}tM5d}mW_GlSVfB1>y zw9IUT02b`*?T9unepE0c!akIaGsHJFL{BbayD1UO-kT2jkfN=tLw+Uo1e^Yd|h2)PkDW5C;NkdaY!Hzlsw(Y0A6 zDwIL8KeyO4Cm>pLz?q6kFYl7jOR41XF?|HHl9T)!Ux3M>HyYSXendo^Od%j7^hzFI z=+p`uvjBiyFpRstz3rYyRM*be6y+#UFE&HUm#El5qPA^EFnpjWmPSe~Q_rb|uTIF0 z0UxtTN%13;j7}hd=0XBOKn{+ecV^(c2<#u)jB}vFM*d-K@LV91-_Zo1O2L@V(9lqV z0#g>MugfCV6gD&(Mv8nBp;sVIo&1-KnX)}KyA28noK)7`T3XWMGU$?eAg! zgKKMN_wxLF-f@(Q?e7DY_4W6F`^m~tG5h_Ca|)x7&k41Oj>FxbTD9jsXTPS;H{=of z`^UMu;bHX>6b#I^VD)Npmad}QRe~0yI05Mo66ihQVAm1IW#c%_a;EedUnQmK)xiCO zXdC-938kyIk|7w}_i2Wzwf^8cib7FkEeN3=jd*Ne<>)kmTV)LkKpZ zWlS5Blc_)xh4Bi{1uy_ZY*1a<*w{j~09@96E_6Y;x=pwpQ8daebg^bl#Pz;$RVGVb4i3x`dc zeGh1F7y%x<;-5Z!vXw=o0@ZckZ-D>&%!~-Mw7#Jsnz5~ngEabTKv-DVbfJ>?dMB-K zULm~DPItGM@^U~x0Py`(`IJd4gt5<2cY+WwXhF@J2od1MP>X-68q8S+RtBgxRRob@ z0u)J8)7;R|(5NU>pB~hScYDD#?J!7jufY#v zKA8w48DGd^qvWr&BKrQj{L6hc(uH^ z*5@T%J)=WA4=ZWfUV5Hwa*fds`!vQAU$tELNRVTqqa`IJO*R3=96YF@2ZqGTDW>SS z)s2l4gM*IWzZ+L;pC1Jbzum3-56EMt^U~2F*2*9Rkq=K1Ou+etp9EWM9nO{lfP0V| z;1Pf1<_7C}v!kOZ-I6JuCfHR|cV%UawD({F&~*$1M!dw)0GFGykE3J63AjG2BZowxm`Z(+@IHmLy$QsCLiN0u1$Vjcm)0{Fy>#T zy<+If&(H9Iz-*bTesR@^3HD45vXeJZU`ZR3I_(iBBM#G8)HOv0VcxNM{?g`}tFB9A zvx;A#&>8u7`|$f4@v_BRPOzv^FFfhnqhnxGNbf<6K)Ozws<`+NgpKNIqXXV0F-U4= zW;)yKEJ(o;`erq;s>1{2gf{*m=Dw1t@xE$de!j7Bk(H0HZ#8RjU1A~*ZisB}`uf|{ zwX(yZUPSOi+MpE-T68f~(kQ*dawsyADmM>TTb`bS68g+1$+|r}H0;8%G8ANSgTf?> zBE_t|eRjB+iN^_UYU(dWyN1m!GXDN?F)>O2xg{bJ06Zw_|3Ihm>t9d-fTl}1O1zvo zQ8ov&>)+?EYIs#-#wbht!dTSj=m z3shlA$>Ca?_uSlE_LSpz_RK~Xcop@rAD$CX$~Lq`AQhhPIu!1;Hx7fw`_Io=Io%o> z8YVRYlqPz5wt*2Y*3QL+)8!Mk&B2Fl0WuIEpqkw8P*x##u9Xm@v&t`5n7OgZ+05zF z8Vj;5?`9a`Uu4YG<>EnVscPqL7Kx`h_z3mjIx8rd-fc`ixzFtK5wkvI0B@cLWySGl z{U2PyjVwzCBI}@p#)uxAB8>$vl1C+L9SAql`KB5zHY3Wcxw|_LZit2W8;@*Jbk5JN zZjiTEUN$i<+Rs_*>Yju?kZRx5F50eQosj^05y+;qJh=T`4?$20HLr`SF|fK9;GdAk z$0Yk8`oJ-Zjyw|Ua?MgQlYTn zBO$wT{hSL~Z44NVWUhiW?|7o|m_`Asmh30GOSU znHd6odvkMuEEFN6g1=NeH>U|yC~cbH@3M@%ZhYU##Ke?y4+NLz5D_U8pjwfqNz=`O zl0s*mjDT5s7pyw8dwF@K=l42A$6{Fy0b1sD}nOBQV*(o|ex<<)^A!t(6M zOtkSsL*w6?oj!94wWs%)00;Ui`vkqe8}tjO$XEqgrp_OescxKT+=_2?j`g|0#)MJ0 zWMnbHc%;alhj<@US+qqF;rGx3Wb*^t%|SYOY~+11{C$ebv1ep~pO7no&p4psYvU7N zgN<-)eCOfNgGi+7=!7eq1!@|a!*v8(4dJlZ|Ni^!Zi4aW)04X8)Gp@ciAXBg+*w}A zR!9J=C}}AvqAw_lpCh5YL7W?i4w2S*SO-};odS|2Jj%5?>txUcLSI z0jJ_|z@0$;?iSq~5fwEU9Y)_17_fK#0Eh<_RqyI5Nqh4-Yy&Wab&FXdw!?u@(D{Sa zq3hld|f5ZZQWMF3HiI@Z0ZSSa{<)lfx6MUx|J+Npk* zoL0p9B)F%?$1@|>&hLf|RaGyqCWOT&awYOH#fIQ&j4vF5^zlE zD&OQm2+vog<77Br4e_v2Q@<(=-AtzWBT`8 zt6*X4)29PbsL%C-*wKlK4jPPUlNWfh!!*I(@ z^y|((>=LqG@Oyf?9o&s}LsfnGFKRsjehBb*`*_H8Esyz^IXE*~*%-w}3$B2!M@^P~Lcl0Fu*QX6g1#lCApY3UJSzx!tQ%pRPerLX;D%iR2X$uR`xwegKHBpRb$^Mg(aKPGV;y$_v7`MR?Of_Oiy2#oO}m=dw1u5HO2$hH^9jn z3{*VAD9Mqb zp(ei9fh-LlUaKgTZCYmlF z5@~c?jA+7)6@=P=8rtxFZfy*KpB0Z z|9M*m;5jnl{Zuh>cIJhI8<2cgEu0OD@hw}4e{I${b zGo-l}p$@GWGg~oFMca<4N1o52%HPB!-K&Rf&I(%aNhEjfXIvroP}J9=WD(0*?=@h6 z1fRUr^c0KRYYZA0&TxqmQ8Fa1EJqD!)FEDCW7!YPll~aENNh0Ks6E{q@ZdI~?cw7? zd<=yw{#59%E~TUtBP77i{#&W`T$?+syyM|R(peI6(a{WjV!%WeJ{hpGSlQTlp6VTm z_zNeBb!9r;`Ov2W^Bi0dBpBSHt{W+Xwkyt0z)j7I?uTKK-wwV+Q*A94a$`foA^&f3 zRV7wUyBcY#|A1x}Y`BU{5Vuaw&T25?_Bf!nxSGL2;1Xe$X|liakY@^dbAS8@<@E{- ztQ&EUP1~h#?Nqh8t7Y01dCp(3I8RwqW`&3L2@1RB_(jR6*buO9IU^>ph7zvOojuq$ zpdm(eX=z01B>*XR4fO#FD+l{r5b}s2^m7-6a19$;y9?+(yV5}Q-E4lHd{C!lPbxxOA&vFsf0|H~1>%kkghbCg-2uzTXY)aHGGwo6QWn^^O_=Sta$c|(6#y*u@G z*E=q)Xg)k$T{~U%UH#osYGZ(m%ZNY`7)aiR2X)W}j16a8*Q8=%v>$`76`y?no3b>; zz$N-U`oz_5%L%|_-q(NV7m7Pp| z5{((n6}}Y(s4?7$$}*_Zck@arDsd?(1nJm5Ti_H36ldAvj)QVJ_kxB~s;;o;Yua^FD_;m$dl_S$&Hcsfn` z3@yxk>YFbtT7zk+@F>^5N*PtisN-p=CL^;WuHP&Th$7@7#Nz(^uG!91*=T%YZ}aF9 z6wD9#++**qodm5|a`_TW9V%=6ygs z-d$XgVxjdmf)Dq(kVM!^K8iH*_y)|xe`{x77i_tM#Q=?+mI&edmoKnC!P*c8vKXjr zn9j5Q(guT-?>|_PkYsFanbf_zaR8f;Mw0jTU`MyPsR^R+`;25KZ^{b9_e!sKd(7d^ zSh1l?cXSXlrFEx78)BG*ZL-eWbb2w>u)DJ)Nd?{tt$USXOi+AkOyy)uBXxF1l2FJe zsi~>Kxy{iNtWtwbMKK$widewf;yoIOA=}&C%{kPgPZXO*W~M5Cs72h!fD{&~)9e3W z*zK|<<}Md0jb%vS_)&F|^n}Uxi9qY<^a& zIEv}_O_FDf;3!+myEi6u2CPg?eGgV``1RYUO;n??LYuLo^r*HbvbHV^5oMH%Bg(~D z!S%Z$I~T~Hmzmdk#3E~aDs|9wjg7dH%{Q^Nu1vyyD!XFV=2sXiiJ5GIqe6%@Qyd2dveH_xA?}&nGuJH^>p(jEo$Vz^f+EO&0s^mpo03GHtHQ z8 z*(e3#7ehJTTUx%Fs;DvlgBYTAgvmkR2LUzZ_WIiAdLL6?c(RN_k$90KU8rso5W{k0 z(1T}3&}F-9dAaLFSc?B;szq@WDANFt87k`Ys($>_qZNCTm>QB1?WF83I2HZa+jiM* zfm~FZi;Hjjv}1e#(@Qh;x^n*4z zQA*QRku@Jw-RB2KQA*nz2Kv~hj{ShTizKZ_?>^Uv18+7c)26hge3bJ23&+zFEqn3d zQ(3wi(vPjUxMHxAHvRO`o>|5q!>sA~ZtV^av5sCPM;Q%W;r_vlXdN{=oSu|0@6rRh zi1+PWnl%|Ux&R~S8S~0t{6}(Q3y?!-YZ0KN;qR3bgbDywIxX7imsZ>cnMlulj4!l(bcYJ(O)hu98 z{(UgJwx)Qk)bU#9LWUnv;%&MBF7Vmo=LKw8sNd&cgdRbnG8CPRkbq!uX-Ta1Hp>50 z<;zLo@rUT-_H+^Psz}<8N;Q2SsxPSvKLy$lRp9>d$bJC^wmP35aOsq-M09j?1qB7* z2#$_|@?&c^DvQ@5WAPZeynR}Pi(HrLzW~~S0j;$*tc{aIL^#Hzp_zztCS}@b>1>VF zs*+UE&)j@~)NJR(LrYWQ;ut`K5%zDkv4OFKrcSNAjlD2^@2XcFDX3?&krNg0IijX5 zv*A?S9Vn5t-1$nA<7M)h#@?g3UErekIM)za%@7o8*V~-y(m3O``WWJ9jkErl)mT-G z9b+u3%Jy{o6>~`XyFTo+qXY~78dhzH5ab$6AcX* zs{YJfJ#*gpr@{*mCbLNmDI3`gx|2Ho$#l?`Oh6b3OU>)29 z|9yOvS98W%IA(d;-8qzHIUu)c%Q%}G$9RfD|3(zuJ~cFR3_(fN=X6|)5g<5r1c=pQbq zODnezwPok^s{TWPH`l~tD@oH@S`Nt|`Hyjwg8i?Jv6NVx*=dT|^npAtKr*et-WYf( zQ;7}JqmAtFEKQJ|Q}o&6TI}B*-o4Lv zpkrb#7#*crndi50jID@e*o=(@qyeF-b&SKr)?u&UPfHMY|-ksRd;$7Xz#f4k)R zQDYWZJnE1vOf?KQkW~cAc3$;P(7~WZ!&OA#|38whI;iTcYr~cxPb$cC7WRCrSVx&8L2$;G>`LH)W=8 zTUT7xh|9d|98i$-Jbi2gLeLn~R6r74_c0ED5v2)G#e+uJX5Q{8l(w}!UhMwnCBxb) z1Wy+MqjKvhD*@u&d!0BH8Gx>)wyS`FR_zK!yz)GKMpcZ69npQ1)`8=`;P`$Gb_9u` zdZqWrWgHgNVM|^dPN&F9QnD;GEHn_r24*$B!9hdY=OA^p{QLJ*MFs;+G&D3^<7KsI z&yJsvM~Wzxjc*(0Z-Rqs`}-kzB2TZMZvUE7y4&3ok<^7Cl8~-opkuKf>=v9f1_|*7 ztnG5ou~s$aYmSbLqRY{Nq!g5I;yiq4p+8hP-nqEOG`AF{q=yyJy^Hy*h#Hzb>by74 zpUJ{`Q`=x6<7p{ES6ZJhd5KV>Nt>1NAvx*mfsNUBwmcS=W<(#Z1K`^wMV?;q{m9ae z>;so4`sL5hiJMf_*Z%7g(O{5lu#@@-BEih3{)(VyGS)dMISd;?hZr0N2{o z%?;OjJxN=Y$+Vs9rr#*1$cQp3%*J*Arg!!a?nJn-sFU?DqrHpr zFQT0l`!9yd_aoS>&d01Sl*`T)%e=p;QD@9%s7~lk@=uBhiw$hW@-$JE@X?HKqG`Ko zB~BRr2jbP%5xji!)h!(zPeJJj{JK~50ZZcO@83Fk@e3SMwsB4qwYG8^CNmJL;{ofe zW6k`8LzX3Rj8B*$yGK)FajP(W@?1xpjauMjL>q^OxZd4Z`ibFv*xeJ3VplvqOXt+B z&)T0q7)KBE@kMRa772;9oX2m%{!36WcM;C3DxROD9fXdp%)MgbX#UJ4#!CG5?-`Lu zk1^_|)+lq6fswm_kSMS5cI#rpv)9R@eZ|w$Jl!N}dhjSoaHk1BXR&JNWMg$TUgCfv z!}&lwGT2ebM!`A+70Al+7awr2uzWpYkX)@56^HArXD==dr514Q@}?tuAlT6d!S6mDM4R3k50C~g^_Rk!Ph%I z*&5~S(T$4HiG$C=>i4hGdo)HxUv4h?zYr0jpWIcP_Q1bftx10Tn5&`9v`K;8 z5YMarW%kbkcmK!p<=%@jQcUFYOh{->axzKU4Mn7^qz}NS2=MVUhwc_%u!bmknYS^cg!^#vA0k*L3&^EHnvC7e@$9+dA8? zMxs2A%2m3$zm1A5b8wZfeHA-AWDDWT%Fr5YH%4EDbQ^boatU4(_DUVI$_6(PfCo<6Z)F1wcZ* z+XXr=E_l^QK;{^J-Zs!Bhns%}v8&6gRZG_-xUlqM^~wHj!T4ET!Azq?f`s2YUscVC zbghW>ZfNpYmp7*=28o>dZg5eOzMl3>w%(MEM)~iCDhi*f9$c{6ek9swGpUr7VY zvU@Mg9)UkyX^ISF(aQaJ`4s(E*xDE5^=_)kC7SKh%EhTjoG~g7-7x6G<)E z-QVTQv9|Rjr;-02-|OVO(=+^l$6Qd#2y`=LITCI4obBa2uZnuaD1440b0rU3Ra6N)%L7#5BIU`=0x99P?6hurz|b+mtza zkcq>}xhkrmA-b~S03S7^d)y+`U}FBwl~0D1C0^oJO}=qitugpB7dg!ROcvcoD4sukX*_7)ncg2`{yhW|wD2&F}Nsav^=vICT+!wlUVT~!aF{ZjBII;Z~ zmGxoDV#>`uR{4iVZuTFU37;6Y`kw=Z`@y%)1PR zelbZ~ZSdX7BTR=aPrV2OBaOnw47dXBTX{FtbS*DeGcLb~9nQliC`Cr@y;ULMx_!KE zYybd1IA5N+J3tNrD|XxC22E4==^!W-{h3zwkx^ZIeZ?Y!*Fj8V-p#=Q$V;S+WF?Fg z5cfRa*Dx|V+T7$76~%=3b_%_)X|y!S7$AtqcZx88f6b;5K9wgZ%^fOTm{6+z!6(Ay zLlC90a`SrTM)4T;GenoT9Ze!tG zPl9!H;0&S_1<=ofJUy&AkDsjs|BfU`{M^&k3JbkO%7Lou|Gw|1Wns=SP5Mu@6xn8& zATO8C(;G5k@+$o3lY0-aXo~Rg^=+H!&QK^g164*EYY51C<0x6#M8ToTP5_Wde}6yy zrCtjdJE!4EgA7qSJC0LE@VSGb=UOa@AT5p~`O{3JQIxhHDX744S(_`7JeM?N2Yr$%EGkdtQG4tSYsPz74$S$OgnIZP0g_Gyr?I+^P|woFB=9J4GY; zvscYUQ-3gq5#BtFaNXAWt~1|vJ7Y&A8>nAoe$ac97)De(eW_;Q5mfc?v7viU3(?Ee z6oRH9MFr`3ui;#vud?)C8IBVXUWY8xKZ4+;85pS=>;|^^4RDWhnLd)ka zQO`;H&&|(BNTP2O4Y0)QJU;5YNhqfs2e}mBK?cqkGZPc?N=o#Fe|v58`17EgY{Tx? zanZxU9XCg7%l~N`Q#e1KHE#E`#Lf56#oc_zk=8e;fyRDT6mwHyWwrHx-^bIbPqgD6 zlLY+~5~4Z{?rAwLety3-(T5X;l}1N|AD35GX=!P>#(%W&VAj_A$={b>rQncHPf2hR zZEb@K!1nOK@AhwZvhkxCa7fdyx4GX3#Q1HA=f5iccjwRdzn_b{z@J%97ZP92t~>l| zT^Fy$Bv!5uJD#0y2dBr!U!Cm~8{~{HZdVa|Gp(JQW1;pr8FliX@#3KZN-#W&u2#bG{NwX<-K6~&SWAmo=|_s zs!)Lcd1YzoalI#MobG2`FM|aSIty1n ztaZLTTAtl*BY;uGUSp>(=s^xwS5*s;qsc-nBFJ-Zef&UAm!7`(^-k&O>HhdXT3QZ+ zM(CfCnnwEf@SF=5VD-~;Q9vTO zd0~eLP?kh|`yoIN1QEjLW6$ua|B;fC5)!=@V~S?7aeP~PK=sXvk|LJL%9~KF{9f?! zv+ufN*<2P|Zme$i!-p5=Wl9fkwi8;Ogkm+yFw#(V+3aAid0HbydyEX~x(qcrnQk@* z2teq4E5-G{1#_``z2x4fBysz&aK(9iBw~7gDL?jXgevI7y!p%#CXQW3h}yHj>5(T& zu+zmgSZhTblh{#4E}achVT}Y8O?`EIQDyb?(q`BLz&z$HnUo;df#D+S@9uJ#muz*~cU4=`cw*s%k+ZN0qiZnp*Sz)c2d2ab-81{?xK)z4S)2RrN9 z!cmMGEPoXWq#3r+>3Iw$==23Ui4~ngY}%!n;)XUA0PX)vzm*qLnBTnkZe6_1lU@d? z62`aGRA%B1!}mXJKJe#2IT`YksD$1>T{6#RUF0g0^=7t_IM0i!&2pa}EWxh7I5F%W z#@vN+T4-w<%E>+L>;RhE@1_jneroiKT(SsF;yYqu^?90q7g+?z>9wvfHu?CmKL6!7 z6c8N^4VNYdA%`0q8hUyP4-uUvKa~=#dnYINeycP*JRVc~K1?uD`#rKtM-nJiNr0nW?`9Pkg znF8pJ|Dl7d&|9CU^D+Nb5JwG|d*BIVNo4sl7+)&<@8r2F)}wO9uvl^0Q}0rwQe>J$ zkDS1G9wwOW!0~I3O*2|Nj8pgBaJWLY(0`S)TJkljN>K*PV=xfgZ@s zsj7+oUbg?*eGInk(YptN2R!}!0wq&_G^n6ruzwv8|5?x)926ApVQ5#HAR<7WZEsU24gyCtw=a!nBLB9B>PbggHl*qxa1H_nS0C!Q?W#_Hx8nD~4 z3KQ#~czie*@ZDBE5-CH^%6ily8qK(QsAV}2bM-kM*bwP>{5TXtxw^mwhr_R)Xz+`z9j;Q@R21aQbbbr73D>N^$4dB!aj5-7I)1dzM^P3kF)BIvN zV)L&lwF+<=m<3hke8Vx^&heG)Vnmhg;uS*jDCJ~}+fMz_@$2J-&Xr$U9-6DT zz=Sgr0=s6U4wmsxOG)V7KCip3#Z=9>*30GtrdgJ@w$sYKw%I^o$SLbsqM4SN3Fs@- zgkT6pOia952e?InD6Aw9Z+E9c7ranJDSI&!J-4HAI|4}s#R&@ z2el+Dn6?v~_fD@*^mu*I_X;G*XUo66Gy;PL7%TkA9r#KMaCm8@OH zts|ceUTwz0xssCUtNLpACm}vQLz7MQU8`^NV2x^Irpy!6k%_Cr;OJwIF3Gd}n$5Rk zzLlPQ4o>Ezn3;W?Ty19ADeGF>4yxEYotYlPru@_H`$@PMF7RnRN(xMzOd~vQ+iQai zPEk5)I%LOTwE}!OiwZzWwH|}V5HBBoiGpB&32=FL5bt_akA~vRWB8&>W-HE2kB zeBrs}?^i3{K54ad-NR#542Zpav)L`Ihmy#erC;>)xu*x|W|BL#HEFA}_=Hx!Y^OLJ zWIW}Us+jxvso73~98kIJmo=p?%lF*0M_&o*>Vg8|)USPhTFa2YrBG4Km+y(p>{W_D zmj)oI%;E>Ef1Ui+BEHyedvDPq?mbvBcIE5;;aNq3>gIMQ&^X4=@nMdngW`VuM#VS- zy0(qL=qK!ePvyVtyZW=jFzZmg2H2Y%F1qRHFM_v%V`btj`z8P}4Q|;7=xiIrB*>zA ztnBP$96cI9t&-*c5~bX`LO;73a(M%I!FaRuR;heO|dmO;ep#f*@q z;7>_U-v^X*0|QKT<)Yu#R#pcG2X=oNg<#M=$60Zd<~m9_X0@1t zo%JWDwSHhgcVO>)w$(AaJYk6Fx5hzX?^FK?t#|s(+|HyLh)`H7S$kgxO|A_V`jzbS z@gu)LQRn>2iDP*;T}yIv8;}YdtXFyb5-bu9MIAKQO>TyC&L*aM;1O7{9R6vtzP4z2 z86bH)-(W8g@_2tmAy{>NvexzqN6cHa_If-=45eevd51<6c+r!zeV83J1a91;l9G}dlX%Vq+CPvpe!S8F&I&0p zu?%e-*veUBBtRbZo2FYX4(%NSFBvp~+tq?cRPt$W&k6_=fuec2)l_#o z@-A-!UXdq_FpTdw8D|)$#3~GSILuxbN+sETs4;iY`k;;RtMPv%i7eyz|%fE%YvRmHvR^hjsYJzD{%?BDp+^7Nv^u`U(Lm0<( z8@}~GaHr2nG5M8Kgmh~!oXhov-(#gc@NuuGUmF|GE0yw^wV)YC8Os!PIW7Mv=<#&X ziD7}ADbjSkN|IG_emvK1_WXpf`yw@CUbk-N!4d8}HOs%Op}f^hPfss5H@8+}g`usj zts9-8>Uv;cU^~knC!mbPpsA&05paXA4i`a31&*KR$tt{ugN6n{w2WEJJnTZWIT;n| zJPH-A{R53s!{KeknV6U`8P?VYgA*+>7 zeCud3I=~v!^6Y7QpR;o1<9oU4&QN5B85z~yOAQ$#_zqF_2#L*g(DiTmuZl$ z#u5=>%DxH7_FjgUti;br(ZOlv`Flqhwf7jQqA&5~HKht>ug7Pwx%U55oAr(Jl7NmE zu>HCfowrB7N$o;Ee4xu0^?MK$5lNqeBq#we-`CfdX>9G+C$;?CT+`k8tEHu-mwzV< z-jZ+_1caBBS;Js36&0LnJODoC?iBsr1$rhh>H~K*z;VDd0)|J7*y?6#8YGzmrScg3 z^M|7JOJk$p99zB*F!_NU8*qTeo3sZWlo1}b=H~Q-gWZL=?<$Q#f9SK~1$)NA@xcAU zjp(t)0p74l%eJ_%L`(^yoh7;KYbxQJztKa+OHDa1+Idwa_6(Qqr<05OynuIqUu+i4 z(vm2mR-j$|ODRf~74;(uq7QDjPRu1WJ6yATd5@un27>u2=;UG0a3P9OP!j{{lqRpZ zxHw?$0q!l}7qdjg#o7^PB(=CZvPC;0S!sQ2IpS{Z>sTi>TRnC2>BNyU_3@MK_PaY{ zN_Qbo0r|0l!Ksdsm!%@27FD(;dKCt(LcV`k2g2sx$?8o_|XX$c?Dbej^fqXA~6`rADE^ z3u^LtnK&$|&u!4fWm;1Bv@$q1GxaO;@q*O2g1KJ)iX98ic(L2z1C)hDuP18IfM5#bY zw=ti(1Ml{*DZoGT;_1v{_Hjk`=x%%IJ4%Ir^YPvcf`G*R!cA0O%nHaY@nZcu>+NYV zqEIoXy>wkV#B}≥Y#P>Qm+?sWk~3zPbWT8^;{g>HYG%E4!QHW5Bv&Vr2zusGr%M zPYh$q1Sosr9$$m8Qg&cPMFr4rc_be#wG_=u1spNFd@@iccZ0#0HbcJUW`F~gM)g@I z3M#m5xdLy9+m2aNQzP;0^DvJaQ?#~=3reD^=bI(?X{=C{V`kBl zq3WWwBI;`j!GK=xpVl3>TPul)TCm1FJ=NI2Wj5cy?o>%3`J&%h{!WH~eEs^6m`U<= zd9r0eDI{HA2UqUHXWf^!VQ(`xF(-9{H- z2n`U}0(}4g5CBe=%wY%#l-U_3BG1}$cclq=L?mLh>ZMEU407Z>p_M!oM8Gz z2>F&WeG_GOBcXgpc{gvnf>*Zm%suZoXR`mapLNg%dA}d1-|}SH`W!y;j?9VH0f_^TWfYVjgkyk>O8A?VGeRrC4rw3R+98gFo?gEMe)01Q_>L?T48QM^*b z4?rV7{&p{p9L%t~vA9r~&VJ@li+W_IzEI~)Rt@}tKd8i{GXcd5NrGBquL1Xmw^o66 zmX^RBG)NEeX?PGYjO{qs*t-9>?gXuvO--KOS3d1#eac;#*Hog7vPA83-aD(y{yw2y z+r~L3f0sFNKoRz9hoNgse6h`WyMj{0T}Y_qi>z#2!xQZkgxx=OuvY*nS|<3a!)>nx z$`B98tvQX-v;Z&+jg?}Gi({OReyp9^<5(H?TKq>23w8H#wv!lWK8&~B$+9NaTO~E7 zN?A-WT$$w;l}}Gi{Jz2276O$z73bzw*Vg_Bic&yh@$cfpAtBO41fO@DCPd;T7ZX$e z=qLv*Eo3rh5K1qB96Uz|IV3T>Tq?-sj1fM3>UFtaThyPT9LqYGzV#UEiM=*j`0;!< zTKQT0c`Pqj*0BBVazH}m_G#Z>_1Q4u-bmmw#_;v=&y=gJ8KUVb?892DVFd`*8!agG z=h;Pp1q3TDC@ARR;b9gJ#?%c+`K773@;*MIT(tQE_MD`_ZYiv=S_AFkXbENPo2bj~LFe`{rKk&h zC=aft0AA*8Vq*j@6?mtg4jZ0F2M5Ejtd7lItd2K|Q~`yBri#d=A#mF{KRcTf#e-E( z#7}Kp|A|crd(P5D6fKqNjzhtn!n@3RE{e&hji`xXDcQ>nH+MQ@Z>MsAS5Z=Wefwv6 z8YvlFEgHno`AvoH_5S(OnxBtIW_H46a1OTqKRzsNTW&7I$j;n6F+LthPBaV+v4r;b z_V~w&Uk+3MU`7bK|xTbN=Q)CR~Tl) zo2KEhji=xQyt!0#`%{NUMpbP-q@8=Odx;A1)u11T)kjwz{V}i`!^PguSOf>Y#`0M6 z{eAu6Cv3{m;lYnTbQW4Xsp8~9j^6gNmB?y9xyrsZxA!MHe|#Bg#TY~Pa8sOfUoFDH zgw606T)n~_7;`hqM$6P<`Q8y8YtP#kpa_+fmFemFU`nU0O&rb%;|lKFIX?cxvI<;v z!QDpJQIszj3o7uD3v>~$v~q=B~tj2>1Fcbtc1%rsDWeOU9kdzcc{rpFT#w1{vG zcZgHWw-Zg*$n3WWEw(aF`u|%R6&Rz>?`ACJ7XDtJ9W*tsVm5-l?WliF&&;1u9RyM*i4Yja7AGDDlYDh zVM}I3ZcvEFO5F3&^J$B(1Vv!qqin|~ht)Zp(vx_WgXi78$3DNChxE$pgU1Yy0lQ1N z9niGgq96i{vd;ZnbXA~{^9vl->ltN3K%(u?>7i}X8lQ# zA{sUtWzp|C_dNL6A>W)+lYrJ%(Gl?|R|Lcb-u#pII;ZiU!xmM3a5U#<)~O&$AQwE% zb0f38fJn#a_hi;aOIU1r3^4rZmk?m#tP6~x5)ufxQ$K&2GBYA(8z;}`0Y-2+ai@1%f%P%(N6St8p6V2c!5~uiKbjIS zHEu!%@R*obTMWgm9k5TQ={#w4-sY&cRWf@{(|(zv|@}_GKCPO0HW6 zO(3#vLM`$Nn>Rq}tK1aE4^!KxNOur~iCtJ<;`THGKqk^+Xl}9VU;N`Lf`k`1%WRI`fID6GVoSR0)gg zUo~whQv3(#b^G{wURE5G>Zkd;1_#v$$9%* zo2W&*m69O`zqhaen}2zB7WmBGt#$?j%s=_W_44zqui59Wt0B!!^ByFSu=hano#5FWD78zV_dwu>cN9`z<~p_%$r>bpvHIt0&6q^7cX}e6-P&T7njd9 zHBoDZc_1u6(CcDOV8zFK=Op?5n)=3PrTy`~L-65m@`~U6abBOFu>V@_+LHI^XocFu z#7Sx9GB7B<0q*Na4tEWQLDWQ96}5mil>&eIHFKg=N<0e<6AeegAh@8+o*OlM zZq?4i1Imja-!jh zLD&C;p%+#|j=Vn9;!~40{40v1sqn#y(qc2qdw_mkqGdPKjD58R^vYs>4<0}!afVq3 z%oh0gn8byIm~mUXacZjSz&l>d2{IYLKH1yb0|`QIg^7S)C}K|DVG#HHN=GK)vvT#R za2hr8ztd79St!yHSaucq-*x%shLWOB4P|$P)yZA=br% z8^;xL>jgocYCz7ie<@1v^ zR6RO6I%!$i{hgicI%T7EH(Y?=bC()#o2#RngCOZu%CNY#H9!@pd+v|9l>+4`D+|lN z-;~J`Ob)>{Z|?N>aWyEu-Vx0_AFUqBC4iz-*%ay--cP6yVLJy({uI3Q47=#>&QPj;AjdHVJ3AXeqDH~j5Y_1|jxL+v>F(NiLt@wNBw z!F>TBt!t@{S(Cu!?4R!KZ5En+Y`5!q22#g*_l+FSL2)+=r^EFFTX^-Vr3U))KX!kL zjgtE-H|)X8T1yi> zhbNk_*KeiO^nE;K-K30_uIJ!v!$#M!CQ_Br5@Q^R^5PGA!@@v82R9iTBo>~53B z12yBv8RxO`HY8x>6sFggO$=}VEG!_ZML-Dd?v`$BWhO*V9`Yp@`v&&?eA=chN<>1rcSQUpXDkHs`KYBS{Gb$!2N|2r$F~hN-F>A3P|MJ+H8n1 zRzw=b!R#p|h4gTW2yUB|l@%e7ijp#sb?XR)pqX)6M~09bX;U1N-Q{y7AVg#I+hLAcV~^Tn58Iv%*f_kcsE%ag z!yay-PB>1~Mu;t?+F>4@%(bEtBOyuFZ?FRo?)=ow$UA ze3i_B6;;4_IJmi8?fxzTmMvEFlDM#<1fHviZOV91Amv3aMC!|?q4at8l1jPE(0rTc z6P_Md8L_289kt{}V-YpWa_AHSv=a(0oC}P=wiA!m=4Rn>B_Le`Fr*fD2U}HLRn;p( za1~f*WFaEnsd+d`QM-Qbw53iRwx{~cZhJjs<)BjG7+DFH#6iCNLZK3C#9qE~x8#tL zJkvzuMMmF%mh!1ppmu{8fFReFqGDr#P%pGT@E=e^G~)r65)B7lwZ8TBAXT~}>RhcU zH(5GuAO8iH75|HNq0`mADC`wuhnh>~Vw*gy$;QvR)doV9dM#+6NSZ;yB_WP_1DZ%1 zn9is1g-JRSpX840R%yT!B&%`C9(EuyqfH{%n|#8YsE4i zw9$^OIKO4)TQJVH)JKf~Hv1_W_8uQaFPan^TZJ{S#5q^hJHFVzjbzp&*1o zWtccz2!bX{V~&x}_K*La&*k~JG3(_B!KvBB^CL)BlrWBTY6>cS8!$|oY)8TLw;PoxvS%EL5M zjwL}ZRa8w`YDGSMZg?T{&1Ki(f`~^ZP<(7|ZfX=Mw{ z*7qwXwg*Y zLVs#Wl+sF~MKR{rcmT%bS}422oi-4y!hm+#*3|TCQc{?#WCzK=bs$|V^7mXHAdwJ0 zdfMu|`gNz@(OCW0S!?CI&zr;V$MxAvuveC^um7{VoAJ~7?WN^&dr2bjXV=Vq^{NewvM9UO4m$v|Db z@ZT$=2r%od7#%Gs0kW3%r?3!Dr0kvjg9B0bBLg`(RKg+dRK=VO0y)k90vP9f&gfdw zy8!JTc2LB3S@tpbh%!A#(UD>PLAPCpE|GP6JI5sJ9YSU380DbVv`LIbR{NGYoMSPK za)w|vHaLL@eOo}_I!%$NfE37oGQfW^G;t~S`+VR>G&6Vz3?s%(d-C4@Y%0Pn2_;j$dBPS=P zqs|!vvP-ZWG!n4YG5&JLVZqmf5Io!b4Vv}rq2k}Pf`X)xRIgl_cK`V&R7u@}2rpD^ zrPkKcQuI=vb(N7{VA2C)*qxYAn?vGy$3%Dp2GMYg3{G9Q;GN5h551FeUS}M^GJQqO zf8rWk_@56F+1|b}4-jc|k>IL?-!H!;j>=hW^SO1hv#S8HAuo}MWVqQAOMUd{lF-+( z6giUmyYZRna*^=|OGX(&3Bz%j;S$c7@Rl@DZgX_n{T`d^>Mk+tkG&TGR}G}C&~S6( zV`CyNh}UOGXC4%d1hArP^3F>qlOb*HiVd_MrIzfcdJeIk$Jmel8i%+~TX7qX1aA=7 z*g>^}ttBsarXn$B0Nx)A1xgZL0fCK(F#&Ntr6Wgl{ht=c;ZL+*LpacIq&>W}=bUK!XIW*>b6wcIRD{(hM0;O4! zhC;c8RM^sq6f8pVE93zGu$#L{j%6c~Zj;$fL8;+rqjvg}1JrkWch~cBZyLBefL^~- z03vcBgN$*3EGZ-)F_6N}X75jta`uzqV#c?FHEfy0-aDgZ&Fd=c{&rD?gB;0NAgbN= zh5s1G#KM0J zpsJDvOQEizk=6e3g-$kCIxa55D@L-!g_T)ZfG678J^dO7Wa!^ZdUZ z&j){6pcI&!n5+Vr9Sq?B{0f?iY+ZVF1cvW>ZZoPM2w#(7y~B=+p<4R~KfS9kXaYn2 zxVV4$!hrb?{RYN5i1a5dX@9@h)oH1yROaSN=6nf;uAB_F9wfPSGSB}MnEkJSp}oAO z%CoA_yWG;G)!d=AJyuVKOulj-YuJ(-hqe76$!q$*%OK~kwuhrZvXNhT455Uucpty33%_aQR!`tAyQmBfqnR3f(z;^qUEKk9ufx zyC(|PDG5Z<#>@<%E7H$EYShZo60HZm-%&vUJq{Hb7*0!52Q%T?Z1OxWY-tg($pQ01 zv6c(kz_3Yz-Ev|O{+Gb6A4!b9Qebf(N*TD&mEPJ61zo*|rzZe@@&Bs_qzj2S3mPVeG0qt@@@fo6ip2D`arE;W)`xt_A1xo zt2E{=E>=!XZb|?B62S1oE#4CN@JiwB+Z;`l(>CvGj)WxO8~`{FY{VEBdI0I9Dz0C3 zw6nvG_70PD_43jyq^b(^%FE#Z=m?Uxo3k@`L52ZkBO(G+7eTb{pmIA`PH`;+$obEo z*;Q4lEQ!*@b&Yh|ZeJPYe_e!}Dfiu=OLb(h+F638V|0N&qx!Ar{uhr4`pd$1NuC~R zP&b7B3&D8ugxQ@;O+Mj~r(Z}Et8Hi3*PpI&%P9re1lYj-vi}OanVIgBGKE;9WiT)? z5$oO%(J34p9ksh1k~NQDN512&BE5u4@dJy*k#tBh#>FJM*!Y+49UaCzND8(<EE~{un8v``oSX!75pVU3~$X7HY!1NMRAC$WKQUxch$p^_}`Q0cj2po5zc7E}94=h99Qip&~JYs|#PnhWASeF7phWr19* z(4xw=;B8BL`wsVGBN>^nEtQrOEF@ysFnK^RDp1FNImRDFAK3xAM?%?%SSqo}0~Q&6 zMa(rgu~7Bkxm66L1;CNiOt~Q-Dp&2dN@Z;}^pf8&2>=~A3d$!5LRc{}(1PXMghl5W zS#-Ets(<768`+#F#_{^S5u`+S%C|8BrD2 z01rYS;LnG$?srEJ0L~Ji&18QD-owRGdKH-oQr`Q=s7#ySzrnmQJCsI^iD%Eth1vq_ zl2>RfsH3(VFftfb_NDrZUj!L(@}oZmTY^QkCghcicN`)s8xPW)dO1dU>cR$BheyUb z$st9^mr+;Zye6+mff@rayrkb@f6NO6!~SL{7s_Bd;4Sp5e-m;48IYXmt__hRL=OcA zDlK9>n=wu<%`6*~T}yxds6K>3u)Gm5{LeP!!88jPw1IY%31Zfp-nM{$$fsSki)vZ5lbagz3(J-pawA=A$G^LL zVhOvtP3nAYm}yliv~lI_R+!(EKb2yoz0e~lHRZ?tcdo(!&kvBZas{De-;wBY$z^#7 zwzsyjyEUIz@9%N6K(6h=BfHw*`dcN9>a_&_(lK)T zH50Ck#~VmO7@GDXsRI#o-~8-=F)}a0#WnHgPeww*1&BGbY4ONYB@9o~dE>#^xFCeh#oV#E%Pva5) zW$x4?8DWrUGio>lcC%;9dJaJ=_9Y zHPDEWkQ4z!gXzPwihy9?tqETlx)DMk?Hm?>mFM|Jbrw@v)2}>DPdX7*B`2LhjmlIK ze_abMd58!-6vl%8VR3vISQEo(93LMW7bKIN+ktun{4}6@a)nnaFD|YESFf6efx&05 z7(URkea`{sAPvHfA3-}U{pQWY?gglWfuV3PG}sRC&J0@IY&R~tg6P2)0{qqA;58qW z1lEXCzfWE=CP^R^(7Xw&Nh)LPl~Ku&z9?4|+i(TCl=yf88@CT1LKF&sn-dsQb?Jim z7Mm|g2wR~hfgmFBA3I(zfY@1(0^FU{)LEdD86Q^#bAMzhzGDTjo;o^nED%6r81q98 zXNcn??Zw%dWJ6lHU&oR#f}Sz3K(;6aj(q$3i<6Vwe0)6{ryu~1Yqzct`rp+ONl=BBBT6oF1$?J%{dP1W|ZVX@Y)M? z9KMJ6SOLbV#aO1TogMPZ;r_me>mH#fsNUul7lZAV+kMk3D95^{Tf|07Ls>Xq} zCNSy4#|P@&8ufa2)E&-L#W0yJV2=t|4k$+$Mb7&(-vNl_L68j(;Chlw6m>uah5mS5 zH$nD~>;MlB58Q--lcFVqDPbVfOs@minl9H3_>*-0-iHaKVkb~hcr4kx)E!*ol z{xIU=ZCg9#ThT}hT>r8lb=R!KJ-nRi7b9)BEezOF_e@;-qq%om5?|z)3iN^4BsUiz zOTr-8qL(fqF>!ikhC{T41Ymo_1{(EeKQZUX!Wl@nSv;579h4+Pxw*LD8NUJuv>dRt zP&McJ`YaqARL}21_@N-j*vg6}RPyxfY;b4@!Mnbu2EjW_211u>ZFIgRdHDFAp9;`1gaszibMtXWY&X?K*(l___dCa*0L~;s`^jDLL z+eUq%z0b)tLx3CAQ%av63h1-|d2b4z*Tt57EE9c;6fcAjgroq9@733ony;qeGs8r` zP#Ph)^?dt#)>yjI4L?`9m+V-ba$`;r&GcqHn%5g1(fg^S5=PZ=74d$V;Us5sdm9MR zUZ(=n&u@u|1)A=8_{rmwJY%E4QXnb{p*n2roj2<-5yoCYe`o$g?q3JKuX6dopC?EL z%-Dcwa;IgG$_gla067?h(F!kt-1axwwB#>d1iC|W+H-K*-Ytmyv@!YrPd$Wm2eyqK zQZ%SOB5_l5l&3Qs$iQB$!Pi@EI5bo|li537NK`JVgv-4^YUeiMIe`$h*6H3)R10Sw|RC8jzH2fvPW`2~3w7j(%qVAr#@ zA}f-GcxQJQACCXGI=;cXu-P5r^?w1S1X}wI@EIh~;2#>dYDqMEqkp$&wLs&}ym!b- zZ$6|$>*)OX^LcrBBO@a*F)>%JTzTI+p|v0c8d{sv)6?(;xOeYfZf-8rP;mP22m+my zlmr(uT&Elk$J^T*QW6srot&H;9UX^;hP1bc*{EQF#%y#hU%pI?fri2tN+nB6ORYT4 z&d%tD?qkP}g@lB_BaKGW%zy3D=T#zV`vR-p0qr zYin!4mEkHSu02Aa(RT_e8gp~=($Z4YwTuK>M@Q$*ojU~u1<<&J>a4i9n8)LRYC}%g z;QU6Kas;yqSfDW*o#jI~3=IvnQtsZpi^?yQ8Jhc9Sy`GHut5L+cJ8hvkuVIv@b9gl zU_yw>JSijzO9Bb{prX7*MkxfNKZOu}(s99*6nqyX@%+~hPCs|XqKICWA zaC7~6xLNgaYa%-;S%vb8M+P-KqiGF5YnnDOF_BCr1A&0lq!))4B9X{uvr^T4eSKYBT~gBOp~-G60*S2DbzM69;hn(@ zY3Td=ds#0l-&=$BL_h-o0DP8I@A>(8G#VWq9+r$wBoeo`w=c6t&o8T+rn#}PF*P+c zKR;h66yAIh8s0@f0{{TH&}cM@#bQ36molzYDz7h{A-Noj#U!sMCnp1eKqwTty1Me{ z$O_&>Kmz~(xb{I1OI}MiFE1}kH)k>#pU=0uyDQo4TnEo0paB2?zLH@WgM)+dc-%D2 z last_state_ok { + last_state_ok = cur_state_ok + start = time.now() + } + + if cur_state_ok == num_workloads { + dl.workloads = new_workloads + return + } + + if (time.now() - start).minutes() > 5 { + return error('failed to deploy deployment: contractID: ${contract_id}, some workloads are not ready after wating 5 minutes') + } else { + d.logger.info('Waiting for deployment to become ready') + time.sleep(500 * time.millisecond) + } + } +} + +pub fn (mut d Deployer) get_deployment(contract_id u64, node_id u32) !models.Deployment { + twin_id := d.client.get_node_twin(node_id)! + payload := { + 'contract_id': contract_id + } + res := d.rmb_deployment_get(twin_id, json.encode(payload)) or { + return error('failed to get deployment with contract id ${contract_id} due to: ${err}') + } + return json.decode(models.Deployment, res) +} + +pub fn (mut d Deployer) delete_deployment(contract_id u64, node_id u32) !models.Deployment { + twin_id := d.client.get_node_twin(node_id)! + payload := { + 'contract_id': contract_id + } + res := d.rmb_deployment_delete(twin_id, json.encode(payload))! + return json.decode(models.Deployment, res) +} + +pub fn (mut d Deployer) deployment_changes(node_id u32, contract_id u64) ![]models.Workload { + twin_id := d.client.get_node_twin(node_id)! + + res := d.rmb_deployment_changes(twin_id, contract_id)! + return json.decode([]models.Workload, res) +} + +pub fn (mut d Deployer) batch_deploy(name_contracts []string, mut dls map[u32]&models.Deployment, solution_provider ?u64) !(map[string]u64, map[u32]&models.Deployment) { + mut batch_create_contract_data := []griddriver.BatchCreateContractData{} + for name_contract in name_contracts { + batch_create_contract_data << griddriver.BatchCreateContractData{ + name: name_contract + } + } + + mut hash_map := map[u32]string{} + for node, dl in dls { + public_ips := dl.count_public_ips() + hash_hex := dl.challenge_hash().hex() + hash_map[node] = hash_hex + batch_create_contract_data << griddriver.BatchCreateContractData{ + node: node + body: dl.metadata + hash: hash_hex + public_ips: public_ips + solution_provider_id: solution_provider + } + } + + contract_ids := d.client.batch_create_contracts(batch_create_contract_data)! + mut name_contracts_map := map[string]u64{} + mut threads := []thread !{} + for idx, data in batch_create_contract_data { + contract_id := contract_ids[idx] + if data.name != '' { + name_contracts_map[data.name] = contract_id + continue + } + + mut dl := dls[data.node] or { return error('Node ${data.node} not found in dls map') } + dl.contract_id = contract_id + threads << spawn d.handle_deploy(data.node, mut dl, hash_map[data.node]) + } + + for th in threads { + th.wait() or { + console.print_stderr('Rolling back: cancling the depolyed contracts: ${contract_ids} due to ${err}') + d.client.batch_cancel_contracts(contract_ids) or { + return error('Faild to cancel contracts dut to: ${err}') + } + return error('Deployment failed: ${err}') + } + } + + return name_contracts_map, dls +} diff --git a/lib/threefold/grid/deployment_state.v b/lib/threefold/grid/deployment_state.v new file mode 100644 index 00000000..45979dcf --- /dev/null +++ b/lib/threefold/grid/deployment_state.v @@ -0,0 +1,35 @@ +module grid + +import freeflowuniverse.herolib.clients.redisclient + +struct DeploymentStateDB { + redis redisclient.Redis + secret string // to encrypt symmetric +} + +struct DeploymentState { + name string + vms []VMDeployed + zdbs []ZDBDeployed +} + +pub fn (db DeploymentStateDB) set(deployment_name string, key string, val string) ! { + // store e.g. \n separated list of all keys per deployment_name + // encrypt +} + +// pub fn (db DeploymentStateDB) get(deployment_name string, key string)!string { + +// } + +// pub fn (db DeploymentStateDB) delete(deployment_name string, key string)! { + +// } + +// pub fn (db DeploymentStateDB) keys(deployment_name string)![]string { + +// } + +// pub fn (db DeploymentStateDB) load(deployment_name string)!DeploymentState { + +// } diff --git a/lib/threefold/grid/factory.v b/lib/threefold/grid/factory.v new file mode 100644 index 00000000..48c2feaa --- /dev/null +++ b/lib/threefold/grid/factory.v @@ -0,0 +1,69 @@ +module grid + +import freeflowuniverse.herolib.core.base +import freeflowuniverse.herolib.core.playbook +import freeflowuniverse.herolib.ui +import freeflowuniverse.herolib.ui.console + +pub struct TFGridClient[T] { + base.BaseConfig[T] +} + +@[params] +pub struct Config { +pub mut: + mnemonics string @[secret] + network string +} + +pub fn get(instance string, cfg Config) !TFGridClient[Config] { + mut self := TFGridClient[Config]{} + if cfg.mnemonics.len > 0 { + // first the type of the instance, then name of instance, then action + self.init('tfgridclient', instance, .set, cfg)! + } else { + self.init('tfgridclient', instance, .get)! + } + return self +} + +pub fn heroplay(mut plbook playbook.PlayBook) ! { + for mut action in plbook.find(filter: 'tfgridclient.define')! { + mut p := action.params + instance := p.get_default('instance', 'default')! + mut cl := get(instance)! + mut cfg := cl.config_get()! + cfg.mnemonics = p.get('mnemonics')! + cfg.network = p.get('network')! + cl.config_save()! + } +} + +pub fn (mut self TFGridClient[Config]) config_interactive() ! { + mut myui := ui.new()! + // console.clear() + console.print_debug('\n## Configure TFGrid client') + console.print_debug('==========================') + console.print_debug('## Instance: ${self.instance}') + console.print_debug('==========================\n\n') + + mut cfg := self.config()! + + // self.instance = myui.ask_question( + // question: 'name for configuration instance' + // default: self.instance + // )! + + cfg.mnemonics = myui.ask_question( + question: 'please enter your mnemonics here' + minlen: 24 + default: cfg.mnemonics + )! + + cfg.network = myui.ask_dropdown( + question: 'choose environment' + items: envs.values() + )! + + self.config_save()! +} diff --git a/lib/threefold/grid/graphql.v b/lib/threefold/grid/graphql.v new file mode 100644 index 00000000..fa36b97a --- /dev/null +++ b/lib/threefold/grid/graphql.v @@ -0,0 +1,156 @@ +module grid + +import net.http +import json +import x.json2 +import log +import freeflowuniverse.herolib.threefold.grid.models + +pub struct GraphQl { + url string +pub mut: + logger log.Log +} + +pub struct Contract { +pub: + contract_id string @[json: contractID] + deployment_data string @[json: deploymentData] + state string + node_id u32 @[json: nodeID] + name string +} + +pub struct Contracts { +pub mut: + name_contracts []Contract @[json: nameContracts] + node_contracts []Contract @[json: nodeContracts] + rent_contracts []Contract @[json: rentContracts] +} + +// contractsList, err := c.ListContractsByTwinID([]string{"Created, GracePeriod"}) +pub fn (mut g GraphQl) list_twin_contracts(twin_id u32, states []string) !Contracts { + state := '[${states.join(', ')}]' + + options := '(where: {twinID_eq: ${twin_id}, state_in: ${state}}, orderBy: twinID_ASC)' + name_contracts_count := g.get_item_total_count('nameContracts', options)! + node_contracts_count := g.get_item_total_count('nodeContracts', options)! + rent_contracts_count := g.get_item_total_count('rentContracts', options)! + contracts_data := g.query('query getContracts(\$nameContractsCount: Int!, \$nodeContractsCount: Int!, \$rentContractsCount: Int!){ + nameContracts(where: {twinID_eq: ${twin_id}, state_in: ${state}}, limit: \$nameContractsCount) { + contractID + state + name + } + nodeContracts(where: {twinID_eq: ${twin_id}, state_in: ${state}}, limit: \$nodeContractsCount) { + contractID + deploymentData + state + nodeID + } + rentContracts(where: {twinID_eq: ${twin_id}, state_in: ${state}}, limit: \$rentContractsCount) { + contractID + state + nodeID + } + }', // map[string]u32{} + { + 'nodeContractsCount': node_contracts_count + 'nameContractsCount': name_contracts_count + 'rentContractsCount': rent_contracts_count + })! + + return json.decode(Contracts, contracts_data.str())! +} + +// GetItemTotalCount return count of items +fn (g GraphQl) get_item_total_count(item_name string, options string) !u32 { + count_body := 'query { items: ${item_name}Connection${options} { count: totalCount } }' + request_body := { + 'query': count_body + } + json_body := json.encode(request_body) + + resp := http.post_json(g.url, json_body)! + query_data := json2.raw_decode(resp.body)! + query_map := query_data.as_map() + + errors := query_map['errors'] or { '' }.str() + if errors != '' { + return error('graphQl query error: ${errors}') + } + + data := query_map['data']! as map[string]json2.Any + items := data['items']! as map[string]json2.Any + count := u32(items['count']!.int()) + return count +} + +struct QueryRequest { + query string + variables map[string]u32 +} + +// Query queries graphql +fn (g GraphQl) query(body string, variables map[string]u32) !map[string]json2.Any { + mut request_body := QueryRequest{ + query: body + variables: variables + } + json_body := json.encode(request_body) + resp := http.post_json(g.url, json_body)! + + query_data := json2.raw_decode(resp.body)! + data_map := query_data.as_map() + result := data_map['data']!.as_map() + return result +} + +pub fn (mut g GraphQl) get_contract_by_project_name(mut deployer Deployer, project_name string) !Contracts { + mut contracts := Contracts{} + + g.logger.debug('Getting user twin') + twin_id := deployer.client.get_user_twin()! + g.logger.debug('Getting twin ${twin_id} contracts...') + + contract_list := g.list_twin_contracts(twin_id, ['Created', 'GracePeriod'])! + + g.logger.debug('filtering contract with project name: ${project_name}') + for contract in contract_list.node_contracts { + data := json.decode(models.DeploymentData, contract.deployment_data)! + if data.project_name == project_name { + contracts.node_contracts << contract + } + } + g.logger.debug('filtering name contracts related to project name: ${project_name}') + gw_workload := name_gw_in_node_contract(mut deployer, contracts.node_contracts)! + contracts.name_contracts << filter_name_contract(contract_list.name_contracts, gw_workload)! + return contracts +} + +fn name_gw_in_node_contract(mut deployer Deployer, node_contracts []Contract) ![]models.Workload { + mut gw_workloads := []models.Workload{} + for contract in node_contracts { + dl := deployer.get_deployment(contract.contract_id.u64(), contract.node_id) or { + return error("Couldn't get deployment workloads: ${err}") + } + for wl in dl.workloads { + if wl.type_ == models.workload_types.gateway_name { + gw_workloads << wl + } + } + } + return gw_workloads +} + +fn filter_name_contract(name_contract []Contract, gw_workload []models.Workload) ![]Contract { + mut contracts := []Contract{} + for contract in name_contract { + for wl in gw_workload { + if wl.name == contract.name { + contracts << contract + } + } + } + return contracts +} diff --git a/lib/threefold/grid/models/computecapacity.v b/lib/threefold/grid/models/computecapacity.v new file mode 100644 index 00000000..ac2ba646 --- /dev/null +++ b/lib/threefold/grid/models/computecapacity.v @@ -0,0 +1,16 @@ +module models + +pub struct ComputeCapacity { +pub mut: + // cpu cores + cpu u8 + // memory in bytes, minimal 100 MB + memory i64 +} + +pub fn (mut c ComputeCapacity) challenge() string { + mut out := '' + out += '${c.cpu}' + out += '${c.memory}' + return out +} diff --git a/lib/threefold/grid/models/deployment.v b/lib/threefold/grid/models/deployment.v new file mode 100644 index 00000000..6e92abce --- /dev/null +++ b/lib/threefold/grid/models/deployment.v @@ -0,0 +1,188 @@ +module models + +import crypto.md5 +import json + +pub struct SignatureRequest { +pub mut: + // unique id as used in TFGrid DB + twin_id u32 + // if put on required then this twin_id needs to sign + required bool + // signing weight + weight int +} + +// Challenge computes challenge for SignatureRequest +pub fn (request SignatureRequest) challenge() string { + mut out := []string{} + out << '${request.twin_id}' + out << '${request.required}' + out << '${request.weight}' + + return out.join('') +} + +pub struct Signature { +pub mut: + // unique id as used in TFGrid DB + twin_id u32 + // signature (done with private key of the twin_id) + signature string + signature_type string +} + +pub struct SignatureRequirement { +pub mut: + // the requests which can allow to get to required quorum + requests []SignatureRequest + // minimal weight which needs to be achieved to let this workload become valid + weight_required int + signatures []Signature + signature_style string +} + +// Challenge computes challenge for SignatureRequest +pub fn (requirement SignatureRequirement) challenge() string { + mut out := []string{} + + for request in requirement.requests { + out << request.challenge() + } + + out << '${requirement.weight_required}' + out << '${requirement.signature_style}' + return out.join('') +} + +// deployment is given to each Zero-OS who needs to deploy something +// the zero-os'es will only take out what is relevant for them +// if signature not done on the main Deployment one, nothing will happen +@[heap] +pub struct Deployment { +pub mut: + // increments for each new interation of this model + // signature needs to be achieved when version goes up + version u32 = 1 + // the twin who is responsible for this deployment + twin_id u32 + // each deployment has unique id (in relation to originator) + contract_id u64 + // when the full workload will stop working + // default, 0 means no expiration + expiration i64 + metadata string + description string + // list of all worklaods + workloads []Workload + + signature_requirement SignatureRequirement +} + +@[params] +pub struct DeploymentArgs { +pub: + version ?u32 + twin_id u32 + contract_id u64 + expiration ?i64 + metadata DeploymentData + description ?string + workloads []Workload + signature_requirement SignatureRequirement +} + +pub fn (deployment Deployment) challenge() string { + // we need to scape `"` with `\"`char when sending the payload to be a valid json but when calculating the challenge we should remove `\` so we don't get invlaid signature + metadata := deployment.metadata.replace('\\"', '"') + mut out := []string{} + out << '${deployment.version}' + out << '${deployment.twin_id}' + out << '${metadata}' + out << '${deployment.description}' + out << '${deployment.expiration}' + for workload in deployment.workloads { + out << workload.challenge() + } + out << deployment.signature_requirement.challenge() + ret := out.join('') + return ret +} + +// ChallengeHash computes the hash of the challenge signed +// by the user. used for validation +pub fn (deployment Deployment) challenge_hash() []u8 { + return md5.sum(deployment.challenge().bytes()) +} + +pub fn (mut d Deployment) add_signature(twin u32, signature string) { + for mut sig in d.signature_requirement.signatures { + if sig.twin_id == twin { + sig.signature = signature + return + } + } + + d.signature_requirement.signatures << Signature{ + twin_id: twin + signature: signature + signature_type: 'sr25519' + } +} + +pub fn (mut d Deployment) json_encode() string { + mut encoded_workloads := []string{} + for mut w in d.workloads { + encoded_workloads << w.json_encode() + } + + workloads := '[${encoded_workloads.join(',')}]' + return '{"version":${d.version},"twin_id":${d.twin_id},"contract_id":${d.contract_id},"expiration":${d.expiration},"metadata":"${d.metadata}","description":"${d.description}","workloads":${workloads},"signature_requirement":${json.encode(d.signature_requirement)}}' +} + +pub fn (dl Deployment) count_public_ips() u8 { + mut count := u8(0) + for wl in dl.workloads { + if wl.type_ == workload_types.public_ip { + count += 1 + } + } + return count +} + +pub fn new_deployment(args DeploymentArgs) Deployment { + return Deployment{ + version: args.version or { 0 } + twin_id: args.twin_id + contract_id: args.contract_id + expiration: args.expiration or { 0 } + metadata: args.metadata.json_encode() + description: args.description or { '' } + workloads: args.workloads + signature_requirement: args.signature_requirement + } +} + +pub struct DeploymentData { +pub: + type_ string @[json: 'type'] + name string + project_name string @[json: 'projectName'] +} + +pub fn (data DeploymentData) json_encode() string { + return "{\\\"type\\\":\\\"${data.type_}\\\",\\\"name\\\":\\\"${data.name}\\\",\\\"projectName\\\":\\\"${data.project_name}\\\"}" +} + +pub fn (mut dl Deployment) add_metadata(type_ string, project_name string) { + mut data := DeploymentData{ + type_: type_ + name: project_name + project_name: '${type_}/${project_name}' // To be listed in the dashboard. + } + dl.metadata = data.json_encode() +} + +pub fn (mut d Deployment) parse_metadata() !DeploymentData { + return json.decode(DeploymentData, d.metadata)! +} diff --git a/lib/threefold/grid/models/gw_fqdn.v b/lib/threefold/grid/models/gw_fqdn.v new file mode 100644 index 00000000..7575b10f --- /dev/null +++ b/lib/threefold/grid/models/gw_fqdn.v @@ -0,0 +1,35 @@ +module models + +import json + +pub struct GatewayFQDNProxy { +pub: + tls_passthrough bool + backends []string // The backends of the gateway proxy. must be in the format ip:port if tls_passthrough is set, otherwise the format should be http://ip[:port] + network ?string // Network name to join, if backend IP is private. + fqdn string // The fully qualified domain name of the deployed workload. +} + +pub fn (g GatewayFQDNProxy) challenge() string { + mut output := '' + output += g.fqdn + output += '${g.tls_passthrough}' + for b in g.backends { + output += b + } + output += g.network or { '' } + + return output +} + +pub fn (g GatewayFQDNProxy) to_workload(args WorkloadArgs) Workload { + return Workload{ + version: args.version or { 0 } + name: args.name + type_: workload_types.gateway_fqdn + data: json.encode(g) + metadata: args.metadata or { '' } + description: args.description or { '' } + result: args.result or { WorkloadResult{} } + } +} diff --git a/lib/threefold/grid/models/gw_name.v b/lib/threefold/grid/models/gw_name.v new file mode 100644 index 00000000..42568f46 --- /dev/null +++ b/lib/threefold/grid/models/gw_name.v @@ -0,0 +1,41 @@ +module models + +import json + +pub struct GatewayNameProxy { +pub: + tls_passthrough bool + backends []string // The backends of the gateway proxy. must be in the format ip:port if tls_passthrough is set, otherwise the format should be http://ip[:port] + network ?string // Network name to join, if backend IP is private. + name string // Domain prefix. The fqdn will be .. This has to be unique within the deployment. Must contain only alphanumeric and underscore characters. +} + +pub fn (g GatewayNameProxy) challenge() string { + mut output := '' + output += g.name + output += '${g.tls_passthrough}' + for b in g.backends { + output += b + } + output += g.network or { '' } + + return output +} + +// GatewayProxyResult results +pub struct GatewayProxyResult { +pub mut: + fqdn string +} + +pub fn (g GatewayNameProxy) to_workload(args WorkloadArgs) Workload { + return Workload{ + version: args.version or { 0 } + name: args.name + type_: workload_types.gateway_name + data: json.encode(g) + metadata: args.metadata or { '' } + description: args.description or { '' } + result: args.result or { WorkloadResult{} } + } +} diff --git a/lib/threefold/grid/models/ip.v b/lib/threefold/grid/models/ip.v new file mode 100644 index 00000000..80bb7ab9 --- /dev/null +++ b/lib/threefold/grid/models/ip.v @@ -0,0 +1,37 @@ +module models + +import json + +pub struct PublicIP { +pub: + v4 bool + v6 bool +} + +pub fn (p PublicIP) challenge() string { + mut output := '' + output += '${p.v4}' + output += '${p.v6}' + + return output +} + +// PublicIPResult result returned by publicIP reservation +struct PublicIPResult { +pub mut: + ip string + ip6 string + gateway string +} + +pub fn (p PublicIP) to_workload(args WorkloadArgs) Workload { + return Workload{ + version: args.version or { 0 } + name: args.name + type_: workload_types.public_ip + data: json.encode(p) + metadata: args.metadata or { '' } + description: args.description or { '' } + result: args.result or { WorkloadResult{} } + } +} diff --git a/lib/threefold/grid/models/qsfs.v b/lib/threefold/grid/models/qsfs.v new file mode 100644 index 00000000..1fa7faed --- /dev/null +++ b/lib/threefold/grid/models/qsfs.v @@ -0,0 +1,52 @@ +module models + +pub struct QuantumSafeFS { + cache u64 + config QuantumSafeFSConfig +} + +pub struct QuantumSafeFSConfig { + minimal_shards u32 + expected_shards u32 + redundant_groups u32 + redundant_nodes u32 + max_zdb_data_dir_size u32 + encryption Encryption + meta QuantumSafeMeta + goups []ZDBGroup + compression QuantumCompression +} + +pub struct Encryption { + algorithm string = 'AES' // configuration to use for the encryption stage. Currently only AES is supported. + key []u8 // 64 long hex encoded encryption key (e.g. 0000000000000000000000000000000000000000000000000000000000000000). +} + +pub struct QuantumSafeMeta { + type_ string = 'ZDB' @[json: 'type'] // configuration for the metadata store to use, currently only ZDB is supported. + config QuantumSafeConfig +} + +pub struct ZDBGroup { + backends []ZDBBackend +} + +pub struct ZDBBackend { + address string // Address of backend ZDB (e.g. [300:a582:c60c:df75:f6da:8a92:d5ed:71ad]:9900 or 60.60.60.60:9900). + namespace string // ZDB namespace. + password string // Namespace password. +} + +pub struct QuantumCompression { + algorithm string = 'snappy' // configuration to use for the compression stage. Currently only snappy is supported. +} + +pub struct QuantumSafeConfig { + prefix string // Data stored on the remote metadata is prefixed with. + encryption Encryption + backends []ZDBBackend +} + +pub fn (qsfs QuantumSafeFS) challenge() string { + return '' +} diff --git a/lib/threefold/grid/models/workload.v b/lib/threefold/grid/models/workload.v new file mode 100644 index 00000000..d87aa7d6 --- /dev/null +++ b/lib/threefold/grid/models/workload.v @@ -0,0 +1,166 @@ +module models + +import json +import crypto.md5 + +pub struct WorkloadTypes { +pub: + zmachine string = 'zmachine' + zmount string = 'zmount' + network string = 'network' + zdb string = 'zdb' + public_ip string = 'ip' + qsfs string = 'qsfs' + gateway_name string = 'gateway-name-proxy' + gateway_fqdn string = 'gateway-fqdn-proxy' + zlogs string = 'zlogs' +} + +pub const workload_types = WorkloadTypes{} + +type WorkloadType = string + +pub struct ResultStates { +pub: + error ResultState = 'error' + ok ResultState = 'ok' + deleted ResultState = 'deleted' +} + +pub const result_states = ResultStates{} + +type ResultState = string + +pub fn challenge(data string, type_ string) !string { + match type_ { + workload_types.zmount { + mut w := json.decode(Zmount, data)! + return w.challenge() + } + workload_types.network { + mut w := json.decode(Znet, data)! + return w.challenge() + } + workload_types.zdb { + mut w := json.decode(Zdb, data)! + return w.challenge() + } + workload_types.zmachine { + mut w := json.decode(Zmachine, data)! + return w.challenge() + } + workload_types.qsfs { + mut w := json.decode(QuantumSafeFS, data)! + return w.challenge() + } + workload_types.public_ip { + mut w := json.decode(PublicIP, data)! + return w.challenge() + } + workload_types.gateway_name { + mut w := json.decode(GatewayNameProxy, data)! + return w.challenge() + } + workload_types.gateway_fqdn { + mut w := json.decode(GatewayFQDNProxy, data)! + return w.challenge() + } + workload_types.zlogs { + mut w := json.decode(ZLogs, data)! + return w.challenge() + } + else { + return '' + } + } +} + +pub enum Right { + restart + delete + stats + logs +} + +// Access Control Entry +pub struct ACE { + // the administrator twin id + twin_ids []int + rights []Right +} + +pub struct WorkloadResult { +pub mut: + created i64 + state ResultState + error string + data string @[raw] // also json.RawMessage + message string +} + +pub struct Workload { +pub mut: + version u32 + // unique name per Deployment + name string + type_ WorkloadType @[json: 'type'] + // this should be something like json.RawMessage in golang + data string @[raw] // serialize({size: 10}) ---> "data": {size:10}, + metadata string + description string + // list of Access Control Entries + // what can an administrator do + // not implemented in zos + // acl []ACE + + result WorkloadResult +} + +pub fn (workload Workload) challenge() string { + mut out := []string{} + out << '${workload.version}' + out << '${workload.name}' + out << '${workload.type_}' + out << '${workload.metadata}' + out << '${workload.description}' + out << challenge(workload.data, workload.type_) or { return out.join('') } + + return out.join('') +} + +pub fn (workload Workload) challenge_hash() []u8 { + return md5.sum(workload.challenge().bytes()) +} + +pub fn (mut w Workload) json_encode() string { + return '{"version":${w.version},"name":"${w.name}","type":"${w.type_}","data":${w.data},"metadata":"${w.metadata}","description":"${w.description}"}' +} + +type WorkloadData = GatewayFQDNProxy + | GatewayNameProxy + | PublicIP + | QuantumSafeFS + | ZLogs + | Zdb + | Zmachine + | Zmount + | Znet +type WorkloadDataResult = GatewayProxyResult + | PublicIPResult + | ZdbResult + | ZmachineResult + | ZmountResult + +// pub fn(mut w WorkloadData) challenge() string { +// return w.challenge() +// } + +@[params] +pub struct WorkloadArgs { +pub: + version ?u32 + name string + description ?string + metadata ?string + result ?WorkloadResult +} diff --git a/lib/threefold/grid/models/zdb.v b/lib/threefold/grid/models/zdb.v new file mode 100644 index 00000000..98d1f899 --- /dev/null +++ b/lib/threefold/grid/models/zdb.v @@ -0,0 +1,61 @@ +module models + +import json + +pub type ZdbMode = string + +pub struct ZdbModes { +pub: + seq string = 'seq' + user string = 'user' +} + +pub const zdb_modes = ZdbModes{} + +type DeviceType = string + +pub struct DeviceTypes { +pub: + hdd string = 'hdd' + ssd string = 'ssd' +} + +pub const device_types = DeviceTypes{} + +pub struct Zdb { +pub mut: + // size in bytes + size u64 + mode ZdbMode + password string + public bool +} + +pub fn (mut z Zdb) challenge() string { + mut out := '' + out += '${z.size}' + out += '${z.mode}' + out += z.password + out += '${z.public}' + + return out +} + +pub struct ZdbResult { +pub mut: + namespace string @[json: 'Namespace'] + ips []string @[json: 'IPs'] + port u32 @[json: 'Port'] +} + +pub fn (z Zdb) to_workload(args WorkloadArgs) Workload { + return Workload{ + version: args.version or { 0 } + name: args.name + type_: workload_types.zdb + data: json.encode(z) + metadata: args.metadata or { '' } + description: args.description or { '' } + result: args.result or { WorkloadResult{} } + } +} diff --git a/lib/threefold/grid/models/zlogs.v b/lib/threefold/grid/models/zlogs.v new file mode 100644 index 00000000..a0b4b46c --- /dev/null +++ b/lib/threefold/grid/models/zlogs.v @@ -0,0 +1,29 @@ +module models + +import json + +pub struct ZLogs { +pub: + zmachine string // zmachine name to stream logs of + output string // the `target` location to stream the logs to, it must be a redis or web-socket url +} + +pub fn (z ZLogs) challenge() string { + mut output := '' + output += z.zmachine + output += z.output + + return output +} + +pub fn (z ZLogs) to_workload(args WorkloadArgs) Workload { + return Workload{ + version: args.version or { 0 } + name: args.name + type_: workload_types.zlogs + data: json.encode(z) + metadata: args.metadata or { '' } + description: args.description or { '' } + result: args.result or { WorkloadResult{} } + } +} diff --git a/lib/threefold/grid/models/zmachine.v b/lib/threefold/grid/models/zmachine.v new file mode 100644 index 00000000..26789fb9 --- /dev/null +++ b/lib/threefold/grid/models/zmachine.v @@ -0,0 +1,139 @@ +module models + +import json + +pub struct Zmachine { +pub mut: + flist string // if full url means custom flist meant for containers, if just name should be an official vm + network ZmachineNetwork + size u64 // size of the rootfs disk in bytes + compute_capacity ComputeCapacity + mounts []Mount + entrypoint string // how to invoke that in a vm? + env map[string]string // environment for the zmachine + corex bool + gpu []string +} + +pub struct ZmachineNetwork { +pub mut: + public_ip string // PublicIP optional public IP attached to this machine. If set it must be a valid name of a PublicIP workload in the same deployment + interfaces []ZNetworkInterface // Interfaces list of user znets to join + planetary bool // Planetary support planetary network + mycelium ?MyceliumIP +} + +pub struct ZNetworkInterface { +pub mut: + network string // Network name (znet name) to join + ip string // IP of the zmachine on this network must be a valid Ip in the selected network +} + +pub struct MyceliumIP { +pub mut: + network string + hex_seed string +} + +pub fn (mut n ZmachineNetwork) challenge() string { + mut out := '' + out += n.public_ip + out += n.planetary.str() + + for iface in n.interfaces { + out += iface.network + out += iface.ip + } + + if m := n.mycelium { + out += m.challenge() + } + return out +} + +pub fn (m MyceliumIP) challenge() string { + mut out := '' + out += m.network + out += m.hex_seed + return out +} + +pub struct Mount { +pub mut: + name string + mountpoint string // the path to mount the disk into e.g. '/disk1' +} + +pub fn (mut m Mount) challenge() string { + mut out := '' + out += m.name + out += m.mountpoint + return out +} + +pub fn (mut m Zmachine) challenge() string { + mut out := '' + + out += m.flist + out += m.network.challenge() + out += '${m.size}' + out += m.compute_capacity.challenge() + + for mut mnt in m.mounts { + out += mnt.challenge() + } + out += m.entrypoint + + mut keys := m.env.keys() + keys.sort() + for key in keys { + out += key + out += '=' + out += m.env[key] + } + return out +} + +// response of the deployment +pub struct ZmachineResult { +pub mut: + // name unique per deployment, re-used in request & response + id string + ip string + planetary_ip string + mycelium_ip string + console_url string +} + +pub fn (z Zmachine) to_workload(args WorkloadArgs) Workload { + return Workload{ + version: args.version or { 0 } + name: args.name + type_: workload_types.zmachine + data: json.encode(z) + metadata: args.metadata or { '' } + description: args.description or { '' } + result: args.result or { WorkloadResult{} } + } +} + +// VM struct used to deploy machine in a high level manner +pub struct VM { +pub: + name string = 'myvm' + flist string = 'https://hub.grid.tf/tf-official-apps/base:latest.flist' + entrypoint string = '/sbin/zinit init' + env_vars map[string]string + cpu int = 1 + memory int = 1024 + rootfs_size int +} + +pub fn (vm VM) json_encode() string { + mut env_vars := []string{} + for k, v in vm.env_vars { + env_vars << '"${k}": "${v}"' + } + + return '{"name":"${vm.name}","flist":"${vm.flist}","entrypoint":"${vm.entrypoint}","env_vars":{${env_vars.join(',')}},"cpu":${vm.cpu},"memory":${vm.memory}, "rootfs_size": ${vm.rootfs_size}}' +} diff --git a/lib/threefold/grid/models/zmount.v b/lib/threefold/grid/models/zmount.v new file mode 100644 index 00000000..5228cf55 --- /dev/null +++ b/lib/threefold/grid/models/zmount.v @@ -0,0 +1,32 @@ +// ssd mounts under zmachine + +module models + +import json + +// ONLY possible on SSD +pub struct Zmount { +pub mut: + size i64 // bytes +} + +pub fn (mut mount Zmount) challenge() string { + return '${mount.size}' +} + +pub struct ZmountResult { +pub mut: + volume_id string +} + +pub fn (z Zmount) to_workload(args WorkloadArgs) Workload { + return Workload{ + version: args.version or { 0 } + name: args.name + type_: workload_types.zmount + data: json.encode(z) + metadata: args.metadata or { '' } + description: args.description or { '' } + result: args.result or { WorkloadResult{} } + } +} diff --git a/lib/threefold/grid/models/znet.v b/lib/threefold/grid/models/znet.v new file mode 100644 index 00000000..6b9e53b2 --- /dev/null +++ b/lib/threefold/grid/models/znet.v @@ -0,0 +1,117 @@ +module models + +import json +import rand +// wg network reservation (znet) + +pub struct Znet { +pub mut: + // unique nr for each network chosen, this identified private networks as connected to a container or vm or ... + // corresponds to the 2nd number of a class B ipv4 address + // is a class C of a chosen class B + // IPV4 subnet for this network resource + // this must be a valid subnet of the entire network ip range. + // for example 10.1.1.0/24 + subnet string + // IP range of the network, must be an IPv4 /16 + // for example a 10.1.0.0/16 + ip_range string + // wireguard private key, curve25519 + wireguard_private_key string // can be generated using `wg genkey` command + wireguard_listen_port u16 + peers []Peer + mycelium ?Mycelium +} + +pub struct Mycelium { +pub mut: + hex_key string + peers []string +} + +pub fn (mut n Znet) challenge() string { + mut out := '' + out += n.ip_range + out += n.subnet + out += n.wireguard_private_key + out += n.wireguard_listen_port.str() + for mut p in n.peers { + out += p.challenge() + } + + if m := n.mycelium { + out += m.challenge() + } + + return out +} + +pub fn (m Mycelium) challenge() string { + mut out := '' + out += m.hex_key + for p in m.peers { + out += p + } + + return out +} + +// is a remote wireguard client which can connect to this node +pub struct Peer { +pub mut: + subnet string // IPV4 subnet of the network resource of the peer + // WGPublicKey of the peer (driven from its private key) + wireguard_public_key string // can be generated by `echo | wg pubkey` command + // is ipv4 or ipv6 address from a wireguard client who connects + // this should be the node's subnet and the wireguard routing ip that should start with `100.64` + // then the 2nd and 3rd part of the node's subnet + // e.g. ["10.20.2.0/24", "100.64.20.2/32"] + allowed_ips []string + // Entrypoint of the peer; ipv4 or ipv6, + // can be empty, one of the 2 need to be filled in though + // e.g. [2a10:b600:0:9:225:90ff:fe82:7130]:7777 + endpoint string +} + +pub struct PublicConfig { +pub: + type_ string // Type define if we need to use the Vlan field or the MacVlan + ipv4 string + ipv6 string + gw4 string + gw6 string + domain string // Domain is the node domain name e.g. gent01.devnet.grid.tf +} + +pub fn (mut p Peer) challenge() string { + mut out := '' + out += p.wireguard_public_key + out += p.endpoint + out += p.subnet + + for ip in p.allowed_ips { + out += ip + } + return out +} + +pub fn (z Znet) to_workload(args WorkloadArgs) Workload { + return Workload{ + version: args.version or { 0 } + name: args.name + type_: workload_types.network + data: json.encode(z) + metadata: args.metadata or { '' } + description: args.description or { '' } + result: args.result or { WorkloadResult{} } + } +} + +pub fn rand_port(takenPorts []u16) !u16 { + mut port := u16(rand.u32n(u32(6000))! + 2000) + + for takenPorts.any(it == port) { + port = u16(rand.u32n(u32(6000))! + 2000) + } + return port +} diff --git a/lib/threefold/grid/rmb.v b/lib/threefold/grid/rmb.v new file mode 100644 index 00000000..81084d44 --- /dev/null +++ b/lib/threefold/grid/rmb.v @@ -0,0 +1,45 @@ +module grid + +import json +import freeflowuniverse.herolib.threefold.grid.models + +// TODO: decode/encode the params/result here +pub fn (mut d Deployer) rmb_deployment_changes(dst u32, contract_id u64) !string { + payload := json.encode({ + 'contract_id': contract_id + }) + res := d.client.rmb_call(dst, 'zos.deployment.changes', payload)! + return res +} + +pub fn (mut d Deployer) rmb_deployment_get(dst u32, data string) !string { + res := d.client.rmb_call(dst, 'zos.deployment.get', data)! + return res +} + +pub fn (mut d Deployer) rmb_deployment_deploy(dst u32, data string) !string { + return d.client.rmb_call(dst, 'zos.deployment.deploy', data)! +} + +pub fn (mut d Deployer) rmb_deployment_update(dst u32, data string) !string { + return d.client.rmb_call(dst, 'zos.deployment.update', data)! +} + +pub fn (mut d Deployer) rmb_deployment_delete(dst u32, data string) !string { + return d.client.rmb_call(dst, 'zos.deployment.delete', data)! +} + +pub fn (mut d Deployer) get_node_pub_config(node_id u32) !models.PublicConfig { + node_twin := d.client.get_node_twin(node_id)! + data := json.encode('') + res := d.client.rmb_call(node_twin, 'zos.network.public_config_get', data)! + public_config := json.decode(models.PublicConfig, res)! + return public_config +} + +pub fn (mut d Deployer) assign_wg_port(node_id u32) !u16 { + node_twin := d.client.get_node_twin(node_id)! + taken_ports := d.client.list_wg_ports(node_twin)! + port := models.rand_port(taken_ports) or { return error("can't assign wireguard port: ${err}") } + return port +} diff --git a/lib/threefold/grid/vm.v b/lib/threefold/grid/vm.v new file mode 100644 index 00000000..bf1a199c --- /dev/null +++ b/lib/threefold/grid/vm.v @@ -0,0 +1,97 @@ +module grid + +import json +import log +import freeflowuniverse.herolib.builder +import freeflowuniverse.herolib.threefold.grid.models + +struct VMSpecs { + deployment_name string + name string + nodeid u32 + pub_sshkeys []string + flist string // if any, if used then ostype not used + size u32 // size of the rootfs disk in bytes + compute_capacity models.ComputeCapacity + ostype OSType +} + +enum OSType { + ubuntu_22_04 + ubuntu_24_04 + arch + alpine +} + +struct VMDeployed { + name string + nodeid u32 + guid string + yggdrasil_ip string + mycelium_ip string +} + +pub fn (vm VMDeployed) builder_node() !&builder.Node { + mut factory := builder.new()! + return factory.node_new( + ipaddr: vm.mycelium_ip + )! +} + +// only connect to yggdrasil and mycelium +fn (mut deployer Deployer) vm_deploy(args_ VMSpecs) !VMDeployed { + mut args := args_ + + if args.pub_sshkeys.len == 0 { + return error('at least one ssh key needed to deploy vm') + } + // deploymentstate_db.set(args.deployment_name,"vm_${args.name}",json.encode(VMDeployed))! + + _ := models.VM{ + name: 'vm1' + env_vars: { + 'SSH_KEY': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDTwULSsUubOq3VPWL6cdrDvexDmjfznGydFPyaNcn7gAL9lRxwFbCDPMj7MbhNSpxxHV2+/iJPQOTVJu4oc1N7bPP3gBCnF51rPrhTpGCt5pBbTzeyNweanhedkKDsCO2mIEh/92Od5Hg512dX4j7Zw6ipRWYSaepapfyoRnNSriW/s3DH/uewezVtL5EuypMdfNngV/u2KZYWoeiwhrY/yEUykQVUwDysW/xUJNP5o+KSTAvNSJatr3FbuCFuCjBSvageOLHePTeUwu6qjqe+Xs4piF1ByO/6cOJ8bt5Vcx0bAtI8/MPApplUU/JWevsPNApvnA/ntffI+u8DCwgP' + } + } + + mut env_vars := { + 'SSH_KEY': args.pub_sshkeys[0] + } + // QUESTION: how to implement multiple ssh keys + for i, key in args.pub_sshkeys[0..] { + env_vars['SSH_KEY${i}'] = key + } + + machine := models.Zmachine{ + flist: args.flist + size: args.size + compute_capacity: args.compute_capacity + env: env_vars + } + + mut deployment := models.new_deployment( + // twin_id: + workloads: [machine.to_workload()] + metadata: models.DeploymentData{ + name: args.deployment_name + } + ) + + contract_id := deployer.deploy(args.nodeid, mut deployment, args.name, 0)! + deployed := deployer.get_deployment(contract_id, args.nodeid)! + if deployed.workloads.len < 1 { + panic('deployment should have at least one workload for vm') + } + vm_workload := deployed.workloads[0] + zmachine := json.decode(models.Zmachine, vm_workload.data)! + mycelium_ip := zmachine.network.mycelium or { panic('deployed vm must have mycelium ip') } + vm_deployed := VMDeployed{ + name: vm_workload.name + nodeid: args.nodeid + guid: vm_workload.name + // yggdrasil_ip: zmachine.network. + mycelium_ip: '${mycelium_ip.network}${mycelium_ip.hex_seed}' + } + + return vm_deployed +} diff --git a/lib/threefold/grid/vm_test.v b/lib/threefold/grid/vm_test.v new file mode 100644 index 00000000..b9ddf4ed --- /dev/null +++ b/lib/threefold/grid/vm_test.v @@ -0,0 +1,22 @@ +module grid + +import freeflowuniverse.herolib.installers.threefold.griddriver +import os + +fn testsuite_begin() ! { + griddriver.install()! +} + +fn test_vm_deploy() ! { + mnemonics := os.getenv('TFGRID_MNEMONIC') + ssh_key := os.getenv('SSH_KEY') + + chain_network := ChainNetwork.main // User your desired network + mut deployer := new_deployer(mnemonics, chain_network)! + deployer.vm_deploy( + name: 'test_vm' + deployment_name: 'test_deployment' + nodeid: 24 + pub_sshkeys: [ssh_key] + )! +} diff --git a/lib/threefold/grid/zdb.v b/lib/threefold/grid/zdb.v new file mode 100644 index 00000000..e70de8f5 --- /dev/null +++ b/lib/threefold/grid/zdb.v @@ -0,0 +1,90 @@ +module grid + +import freeflowuniverse.herolib.clients.redisclient + +struct ZDBSpecs { + deployment_name string + nodeid string + namespace string + secret string +} + +struct ZDBDeployed { + nodeid string + namespace string + secret string + yggdrasil_ip string + mycelium_ip string +} + +// //only connect to yggdrasil and mycelium +// fn (mut deployer Deployer) vm_deploy(args_ VMSpecs) !VMDeployed { +// mut args := args_ + +// if args.pub_sshkeys.len == 0 { +// return error('at least one ssh key needed to deploy vm') +// } +// // deploymentstate_db.set(args.deployment_name,"vm_${args.name}",json.encode(VMDeployed))! + +// vm := models.VM { +// name: 'vm1' +// env_vars: { +// 'SSH_KEY': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDTwULSsUubOq3VPWL6cdrDvexDmjfznGydFPyaNcn7gAL9lRxwFbCDPMj7MbhNSpxxHV2+/iJPQOTVJu4oc1N7bPP3gBCnF51rPrhTpGCt5pBbTzeyNweanhedkKDsCO2mIEh/92Od5Hg512dX4j7Zw6ipRWYSaepapfyoRnNSriW/s3DH/uewezVtL5EuypMdfNngV/u2KZYWoeiwhrY/yEUykQVUwDysW/xUJNP5o+KSTAvNSJatr3FbuCFuCjBSvageOLHePTeUwu6qjqe+Xs4piF1ByO/6cOJ8bt5Vcx0bAtI8/MPApplUU/JWevsPNApvnA/ntffI+u8DCwgP' +// } +// } + +// mut env_vars := {'SSH_KEY': args.pub_sshkeys[0]} +// // QUESTION: how to implement multiple ssh keys +// for i, key in args.pub_sshkeys[0..] { +// env_vars['SSH_KEY${i}'] = key +// } + +// machine := models.Zmachine{ +// flist: args.flist +// size: args.size +// compute_capacity: args.compute_capacity +// env: env_vars +// } + +// mut deployment := models.new_deployment( +// // twin_id: +// workloads: [machine.to_workload()] +// metadata: models.DeploymentData{ +// name: args.deployment_name +// } +// ) + +// contract_id := deployer.deploy(args.nodeid, mut deployment, '', 0)! +// deployed := deployer.get_deployment(contract_id, args.nodeid)! +// if deployed.workloads.len < 1 { +// panic('deployment should have at least one workload for vm') +// } +// vm_workload := deployed.workloads[0] +// zmachine := json.decode(models.Zmachine, vm_workload.data)! +// mycelium_ip := zmachine.network.mycelium or {panic('deployed vm must have mycelium ip')} +// vm_deployed := grid.VMDeployed{ +// name: vm_workload.name +// nodeid: args.nodeid +// guid: vm_workload.name +// // yggdrasil_ip: zmachine.network. +// mycelium_ip: '${mycelium_ip.network}${mycelium_ip.hex_seed}' +// } + +// return vm_deployed +// } + +// test zdb is answering +pub fn (zdb ZDBDeployed) ping() bool { + panic('implement') +} + +pub fn (zdb ZDBDeployed) redisclient() !redisclient.Redis { + redis_addr := '${zdb.mycelium_ip}:6379' + return redisclient.new(redis_addr)! +} + +// //only connect to yggdrasil and mycelium +// // +// fn zdb_deploy(args_ ZDBSpecs) ZDBDeployed{ + +// } diff --git a/lib/threefold/grid4/cloudslices/loader.v b/lib/threefold/grid4/cloudslices/loader.v new file mode 100644 index 00000000..cdab6cd4 --- /dev/null +++ b/lib/threefold/grid4/cloudslices/loader.v @@ -0,0 +1,17 @@ +module cloudslices + +import json +import freeflowuniverse.herolib.core.pathlib + +// load the cloudboxes from a path +pub fn load(path string) ![]Node { + mut p := pathlib.get_dir(path: path, create: false)! + mut items := p.list(regex: [r'.*\.json$'])! + mut r := []Node{} + for mut item in items.paths { + d := item.read()! + mynode := json.decode(Node, d)! + r << mynode + } + return r +} diff --git a/lib/threefold/grid4/cloudslices/model.v b/lib/threefold/grid4/cloudslices/model.v new file mode 100644 index 00000000..54824040 --- /dev/null +++ b/lib/threefold/grid4/cloudslices/model.v @@ -0,0 +1,95 @@ +module cloudslices + +import time + +pub struct Node { +pub mut: + id int + name string + cost f64 + deliverytime time.Time + description string + cpu_brand string + cpu_version string + inca_reward int + image string + mem string + hdd string + ssd string + url string + reputation int + uptime int + continent string + country string + passmark int + cloudbox []CloudBox + aibox []AIBox + storagebox []StorageBox + vendor string + grant NodeGrant +} + +pub struct NodeGrant { +pub mut: + grant_month_usd string + grant_month_inca string + grant_max_nrnodes int +} + +pub struct CloudBox { +pub mut: + amount int + description string + storage_gb f64 + passmark int + vcores int + mem_gb f64 + price_range []f64 = [0.0, 0.0] + price_simulation f64 + ssd_nr int +} + +pub struct AIBox { +pub mut: + amount int + gpu_brand string + gpu_version string + description string + storage_gb f64 + passmark int + vcores int + mem_gb f64 + mem_gb_gpu f64 + price_range []f64 = [0.0, 0.0] + price_simulation f64 + hdd_nr int + ssd_nr int +} + +pub struct StorageBox { +pub mut: + amount int + description string + price_range []f64 = [0.0, 0.0] + price_simulation f64 +} + +fn (mut n Node) validate_percentage(v int) ! { + if v < 0 || v > 100 { + return error('Value must be between 0 and 100') + } +} + +pub fn preprocess_value(v string) string { + // Implement the preprocessing logic here + return v +} + +pub fn (mut n Node) preprocess_location(v string) ! { + n.continent = preprocess_value(v) + n.country = preprocess_value(v) +} + +// pub fn (mut n Node) parse_deliverytime(v string) ! { +// n.deliverytime = time.parse(v, 'YYYY-MM-DD')! +// } diff --git a/lib/threefold/grid4/cloudslices/model_aggregated.v b/lib/threefold/grid4/cloudslices/model_aggregated.v new file mode 100644 index 00000000..f4386415 --- /dev/null +++ b/lib/threefold/grid4/cloudslices/model_aggregated.v @@ -0,0 +1,75 @@ +module cloudslices + +import time + +pub struct NodeTotal { +pub mut: + id int + name string + cost f64 + deliverytime time.Time + description string + cpu_brand string + cpu_version string + inca_reward int + image string + mem string + hdd string + ssd string + url string + reputation int + uptime int + continent string + country string + + storage_gb f64 + mem_gb f64 + mem_gb_gpu f64 + price_simulation f64 + passmark int + vcores int +} + +pub fn (n Node) node_total() NodeTotal { + mut total := NodeTotal{ + id: n.id + name: n.name + cost: n.cost + deliverytime: n.deliverytime + description: n.description + cpu_brand: n.cpu_brand + cpu_version: n.cpu_version + inca_reward: n.inca_reward + image: n.image + mem: n.mem + hdd: n.hdd + ssd: n.ssd + url: n.url + reputation: n.reputation + uptime: n.uptime + continent: n.continent + country: n.country + } + for box in n.cloudbox { + total.storage_gb += box.storage_gb * f64(box.amount) + total.mem_gb += box.mem_gb * f64(box.amount) + total.price_simulation += box.price_simulation * f64(box.amount) + total.passmark += box.passmark * box.amount + total.vcores += box.vcores * box.amount + } + + for box in n.aibox { + total.storage_gb += box.storage_gb * f64(box.amount) + total.mem_gb += box.mem_gb * f64(box.amount) + total.mem_gb_gpu += box.mem_gb_gpu * f64(box.amount) + total.price_simulation += box.price_simulation * f64(box.amount) + total.passmark += box.passmark * box.amount + total.vcores += box.vcores * box.amount + } + + for box in n.storagebox { + total.price_simulation += box.price_simulation * f64(box.amount) + } + + return total +} diff --git a/lib/threefold/grid4/cloudslices/play.v b/lib/threefold/grid4/cloudslices/play.v new file mode 100644 index 00000000..1970ab02 --- /dev/null +++ b/lib/threefold/grid4/cloudslices/play.v @@ -0,0 +1,123 @@ +module cloudslices + +import freeflowuniverse.herolib.core.playbook { PlayBook } + +// this play script should never be called from hero directly its called by gridsimulator +pub fn play(mut plbook PlayBook) !map[string]&Node { + mut actions2 := plbook.actions_find(actor: 'tfgrid_simulator')! + + mut nodesdict := map[string]&Node{} + for action in actions2 { + if action.name == 'node_define' { + mut name := action.params.get_default('name', '')! + mut node := Node{ + grant: NodeGrant{} + } + + nodesdict[name] = &node + + node.cpu_brand = action.params.get_default('cpu_brand', '')! + node.cpu_version = action.params.get_default('cpu_version', '')! + // node.deliverytime = action.params.get_default('deliverytime', '')! + node.description = action.params.get_default('description', '')! + node.hdd = action.params.get_default('hdd', '')! + node.image = action.params.get_default('image', '')! + node.inca_reward = action.params.get_int('inca_reward')! + node.mem = action.params.get_default('mem', '')! + node.passmark = action.params.get_int_default('passmark', 0)! + node.cost = action.params.get_float('cost')! // This is required + node.ssd = action.params.get_default('ssd', '')! + node.url = action.params.get_default('url', '')! + node.vendor = action.params.get_default('vendor', '')! + + // get the grants + node.grant.grant_month_usd = action.params.get('grant_month_usd') or { '' } + node.grant.grant_month_inca = action.params.get('grant_month_inca') or { '' } + node.grant.grant_max_nrnodes = action.params.get_int('grant_max_nrnodes') or { 0 } + } + } + // now all nodes are defined lets now do the sub parts + for action in actions2 { + if action.name == 'cloudbox_define' { + mut node_name := action.params.get('node')! // needs to be specified + mut node := nodesdict[node_name] or { + return error("can't find node with name: ${node_name}") + } + + mut subobj := CloudBox{ + amount: action.params.get_int_default('amount', 1)! + description: action.params.get_default('description', '')! + ssd_nr: action.params.get_int_default('ssd_nr', 1)! + storage_gb: action.params.get_float('storage_gb')! // required + passmark: action.params.get_int_default('passmark', 1)! + vcores: action.params.get_int('vcores')! + mem_gb: action.params.get_float('mem_gb')! + price_range: action.params.get_list_f64('price_range')! + price_simulation: action.params.get_float('price_simulation')! + } + + if subobj.price_range.len != 2 { + return error('price range needs to be 2 elements for \n${subobj}') + } + if subobj.price_simulation == 0.0 { + return error('price_simulation needs to be specified for \n${subobj}') + } + + node.cloudbox << subobj + } + + if action.name == 'storagebox_define' { + mut node_name := action.params.get('node')! // needs to be specified + mut node := nodesdict[node_name] or { + return error("can't find node with name: ${node_name}") + } + + mut subobj := StorageBox{ + amount: action.params.get_int_default('amount', 1)! + description: action.params.get_default('description', '')! + price_range: action.params.get_list_f64('price_range')! + price_simulation: action.params.get_float('price_simulation')! + } + + if subobj.price_range.len != 2 { + return error('price range needs to be 2 elements for \n${subobj}') + } + if subobj.price_simulation == 0.0 { + return error('price_simulation needs to be specified for \n${subobj}') + } + node.storagebox << subobj + } + + if action.name == 'aibox_define' { + mut node_name := action.params.get('node')! // needs to be specified + mut node := nodesdict[node_name] or { + return error("can't find node with name: ${node_name}") + } + + mut subobj := AIBox{ + amount: action.params.get_int_default('amount', 1)! + description: action.params.get_default('description', '')! + ssd_nr: action.params.get_int_default('ssd_nr', 1)! + storage_gb: action.params.get_float('storage_gb')! // required + mem_gb_gpu: action.params.get_float('mem_gb_gpu')! + passmark: action.params.get_int_default('passmark', 1)! + vcores: action.params.get_int('vcores')! + mem_gb: action.params.get_float('mem_gb')! + price_range: action.params.get_list_f64('price_range')! + price_simulation: action.params.get_float('price_simulation')! + gpu_brand: action.params.get_default('gpu_brand', '')! + gpu_version: action.params.get_default('gpu_version', '')! + } + + if subobj.price_range.len != 2 { + return error('price range needs to be 2 elements for \n${subobj}') + } + if subobj.price_simulation == 0.0 { + return error('price_simulation needs to be specified for \n${subobj}') + } + + node.aibox << subobj + } + } + return nodesdict +} diff --git a/lib/threefold/grid4/farmingsimulator/factory.v b/lib/threefold/grid4/farmingsimulator/factory.v new file mode 100644 index 00000000..06d3e2bb --- /dev/null +++ b/lib/threefold/grid4/farmingsimulator/factory.v @@ -0,0 +1,96 @@ +module farmingsimulator + +import freeflowuniverse.herolib.core.playbook +import freeflowuniverse.herolib.core.texttools +import freeflowuniverse.herolib.develop.gittools +import freeflowuniverse.herolib.biz.spreadsheet +import freeflowuniverse.herolib.ui.console + +__global ( + farmingsimulators shared map[string]&Simulator +) + +@[params] +pub struct SimulatorArgs { +pub mut: + name string = 'default' // name of simulation + path string + git_url string + git_reset bool + git_pull bool +} + +// is called from the play +pub fn new(args_ SimulatorArgs) !Simulator { + mut args := args_ + + if args.name == '' { + return error('simulation needs to have a name') + } + args.name = texttools.name_fix(args.name) + + console.print_header('farming simulator \'${args.name}\'') + + // if args.mdbook_name == '' { + // args.mdbook_name = args.name + // } + + // mut cs := currency.new() + mut sh := spreadsheet.sheet_new(name: 'tffarmingsim_${args.name}')! + mut sim := Simulator{ + name: args.name + sheet: sh + args: args + // params: args + // currencies: cs + } + + if args.git_url.len > 0 { + mut gs := gittools.new()! + mut repo := gs.get_repo( + url: args.git_url + pull: args.git_pull + reset: args.git_reset + reload: false + )! + + args.path = repo.get_path()! + } + + if args.path.len > 0 { + sim.load()! + } + + simulator_set(sim) + + return sim +} + +// get sheet from global +pub fn simulator_get(name string) !&Simulator { + rlock farmingsimulators { + if name in farmingsimulators { + return farmingsimulators[name] or { + return error('Farming simulator ${name} not found') + } + } + } + return error("cann't find tfgrid gridsimulator:'${name}' in global farmingsimulators") +} + +// remember sheet in global +pub fn simulator_set(sim Simulator) { + lock farmingsimulators { + farmingsimulators[sim.name] = &sim + } + spreadsheet.sheet_set(sim.sheet) +} + +// load the mdbook content from path or git +fn (mut self Simulator) load() ! { + console.print_header('farming simulator load from ${self.args.path}') + + mut plbook := playbook.new(path: self.args.path)! + + self.play(mut plbook)! +} diff --git a/lib/threefold/grid4/farmingsimulator/model_capacity.v b/lib/threefold/grid4/farmingsimulator/model_capacity.v new file mode 100644 index 00000000..915fbae2 --- /dev/null +++ b/lib/threefold/grid4/farmingsimulator/model_capacity.v @@ -0,0 +1,48 @@ +module farmingsimulator + +import math + +// https://library.threefold.me/info/threefold/#/tfgrid/resource_units +pub struct ResourceUnits { +pub mut: + cru f64 // 1 logical core + mru f64 // 1 GB of memory + hru f64 // 1 GB of HD + sru f64 // 1 GB of SSD +} + +// cu = min((mru - 1) / 4, cru * 4 / 2, sru / 50) +// su = hru / 1200 + sru * 0.8 / 200 +// https://library.threefold.me/info/threefold/#/tfgrid/farming/cloudunits +pub struct CloudUnits { +pub mut: + cu f64 + su f64 + nu f64 // GB per month +} + +// this is the calculation as result of defining the node template +@[heap] +pub struct FarmingCapacity { +pub mut: + resourceunits ResourceUnits + cloudunits CloudUnits + cost f64 + // consumption for 1 node in watt + power f64 + // expressed in U, there are 44 in 1 rack + rackspace f64 +} + +fn cloudunits_calc(ru ResourceUnits) CloudUnits { + mut cu := 0.0 + mut su := 0.0 + cu = math.min((ru.mru - 1) / 4, ru.cru * 4 / 2) + cu = math.min(cu, ru.sru / 50) // make sure that we have enough SSD + su = ru.hru / 1200 + ru.sru * 0.8 / 200 + cloudunits := CloudUnits{ + cu: cu + su: su + } + return cloudunits +} diff --git a/lib/threefold/grid4/farmingsimulator/model_nodesbatch.v b/lib/threefold/grid4/farmingsimulator/model_nodesbatch.v new file mode 100644 index 00000000..b22c70f1 --- /dev/null +++ b/lib/threefold/grid4/farmingsimulator/model_nodesbatch.v @@ -0,0 +1,63 @@ +module farmingsimulator + +// import freeflowuniverse.herolib.calc + +// X nr of nodes who are added in 1 month +struct NodesBatch { +pub mut: + node_template &NodeTemplate @[str: skip] + nrnodes int + start_month int + nrmonths int + hw_cost f64 + regional_internet &RegionalInternet @[str: skip] +} + +struct NBCalc { +pub mut: + power_kwh int + tokens_farmed f64 + rackspace f64 + power_cost f64 + rackspace_cost f64 + hw_cost f64 + support_cost f64 + nrnodes f64 +} + +fn (mut nb NodesBatch) calc(month int) !NBCalc { + mut ri := nb.regional_internet + + power_kwh := nb.node_template.capacity.power * 24 * 30 / 1000 * nb.nrnodes + rackspace := nb.node_template.capacity.rackspace * nb.nrnodes + _ := ri.simulator.params + tokens_farmed := ri.token_farming(nb.node_template, month)! + + if month < nb.start_month { + return NBCalc{} + } + if month > nb.start_month + nb.nrmonths { + return NBCalc{} + } + + mut cost_power_unit_row := ri.sheet.row_get('cost_power')! + mut rackspace_cost_unit_row := ri.sheet.row_get('rackspace_cost_unit')! + mut support_cost_node_row := ri.sheet.row_get('support_cost_node')! + + cost_power_unit := cost_power_unit_row.cells[month].val + rackspace_cost_unit := rackspace_cost_unit_row.cells[month].val + support_cost_node := support_cost_node_row.cells[month].val + + nbc := NBCalc{ + power_kwh: int(power_kwh) + power_cost: power_kwh * cost_power_unit + rackspace: rackspace + rackspace_cost: rackspace * rackspace_cost_unit + hw_cost: nb.hw_cost / 6 / 12 * nb.nrnodes // over 6 years + support_cost: support_cost_node + nb.node_template.capacity.cost * 0.02 / 12 * nb.nrnodes // 2% of HW has to be replaced + tokens_farmed: tokens_farmed * nb.nrnodes + nrnodes: nb.nrnodes + } + + return nbc +} diff --git a/lib/threefold/grid4/farmingsimulator/model_nodetemplate.v b/lib/threefold/grid4/farmingsimulator/model_nodetemplate.v new file mode 100644 index 00000000..90c709e6 --- /dev/null +++ b/lib/threefold/grid4/farmingsimulator/model_nodetemplate.v @@ -0,0 +1,109 @@ +module farmingsimulator + +// is a template which can be used for deploying threefold nodes +// a component group describes which components make up the node template +@[heap] +pub struct NodeTemplate { +pub mut: + name string + components []ComponentGroup + capacity FarmingCapacity // the result for this node template +} + +// as used in node template +pub struct ComponentGroup { +pub mut: + name string + nr int // nr of components + component Component +} + +// is a component as used to create a node template +// see https://library.threefold.me/info/threefold/#/tfgrid/resource_units +pub struct Component { +pub mut: + name string + description string + cost f64 // cost always in USD + rackspace f64 // expressed in U, typical rack has 44 units + power f64 // expressed in watt + cru f64 // 1 logical core + mru f64 // 1 GB of memory + hru f64 // 1 GB of HD + sru f64 // 1 GB of SSD +} + +// a node template, holds the construction of a node as used in a grid +pub fn node_template_new(name string) NodeTemplate { + return NodeTemplate{ + capacity: FarmingCapacity{} + } +} + +pub struct ComponentGroupArgs { +pub mut: + nr int // nr of components + component Component +} + +pub fn (mut nt NodeTemplate) components_add(cg ComponentGroupArgs) { + nt.components << ComponentGroup{ + name: cg.component.name + nr: cg.nr + component: cg.component + } + nt.calc() +} + +// recalculate the totals of the template +fn (mut nt NodeTemplate) calc() { + mut fc := FarmingCapacity{} + for cg in nt.components { + fc.cost += cg.component.cost * cg.nr + fc.rackspace += cg.component.rackspace * cg.nr + fc.power += cg.component.power * cg.nr + fc.resourceunits.cru += cg.component.cru * cg.nr + fc.resourceunits.mru += cg.component.mru * cg.nr + fc.resourceunits.hru += cg.component.hru * cg.nr + fc.resourceunits.sru += cg.component.sru * cg.nr + } + fc.cloudunits = cloudunits_calc(fc.resourceunits) // calculate the cloudunits + nt.capacity = fc +} + +// //define a template node +// cpu_amd_gr9 := sim.Component{ +// name: "AMD32" +// description: "powerful amd cpu" +// cost:250.0 +// power:70 +// cru:32 +// } +// case1u := sim.Component{ +// name: "case_1u" +// description: "1U rack mountable case" +// cost:150.0 +// rackspace:1 +// power:20 +// } +// mem32 := sim.Component{ +// name: "32GB" +// description: "memory 32 GB" +// cost:90.0 +// power:20 +// mru:32 +// } +// ssd1 := sim.Component{ +// name: "ssd2gb" +// description: "SSD of 1 GB" +// cost:120.0 +// power:5 +// sru:2000 +// } + +// //lets populate our template +// mut node_1u_template := sim.node_template_new("1u") +// node_1u_template.components_add(nr:1,component:case1u) //add case +// node_1u_template.components_add(nr:1,component:cpu_amd_gr9) //add CPU +// node_1u_template.components_add(nr:4,component:mem32) //add mem +// node_1u_template.components_add(nr:2,component:ssd1) //add ssd diff --git a/lib/threefold/grid4/farmingsimulator/model_params.v b/lib/threefold/grid4/farmingsimulator/model_params.v new file mode 100644 index 00000000..88f9c175 --- /dev/null +++ b/lib/threefold/grid4/farmingsimulator/model_params.v @@ -0,0 +1,121 @@ +module farmingsimulator + +import freeflowuniverse.herolib.core.playbook + +pub struct ParamsCultivation { +pub mut: + utilization_nodes string = '1:0,24:70' + revenue_per_cu_usd string = '1:5,60:4' + revenue_per_su_usd string = '1:5,60:3' + revenue_per_nu_usd string = '1:0.01,60:0.005' + cost_per_cu_usd string = '1:0' + cost_per_su_usd string = '1:0' + cost_per_nu_usd string = '1:0.005,60:0.0025' +} + +pub struct ParamsEnvironment { +pub mut: + power_cost string = '1:0.06,60:0.15' + rackspace_cost string = '1:10,60:5' +} + +pub struct ParamsFarming { +pub mut: + farming_lockup int = 24 + farming_min_utilizaton int = 30 + price_increase_nodecost string = '1:1,60:0.4' + support_cost_node string = '1:20' +} + +pub struct ParamsTokens { +pub mut: + chi_total_tokens_million int = 1000 + chi_price_usd string = '1:0.1' +} + +pub struct Params { +pub mut: + wiki_path string = '/tmp/simulatorwiki' + cultivation ParamsCultivation + env ParamsEnvironment + farming ParamsFarming + tokens ParamsTokens +} + +// TODO: check carefully + +pub fn params_new(parser playbook.PlayBook) !Params { + mut p := Params{} + + for action in parser.actions { + if action.name == 'cultivation_params_define' { + mut pc := ParamsCultivation{} + if action.params.exists('utilization_nodes') { + pc.utilization_nodes = action.params.get('utilization_nodes')! + } + if action.params.exists('revenue_per_cu_usd') { + pc.revenue_per_cu_usd = action.params.get('revenue_per_cu_usd')! + } + if action.params.exists('revenue_per_su_usd') { + pc.revenue_per_su_usd = action.params.get('revenue_per_su_usd')! + } + if action.params.exists('revenue_per_nu_usd') { + pc.revenue_per_nu_usd = action.params.get('revenue_per_nu_usd')! + } + if action.params.exists('cost_per_cu_usd') { + pc.cost_per_cu_usd = action.params.get('cost_per_cu_usd')! + } + if action.params.exists('cost_per_su_usd') { + pc.cost_per_su_usd = action.params.get('cost_per_su_usd')! + } + if action.params.exists('cost_per_nu_usd') { + pc.cost_per_nu_usd = action.params.get('cost_per_nu_usd')! + } + p.cultivation = pc + } + if action.name == 'cultivation_params_define' { + mut pe := ParamsEnvironment{} + if action.params.exists('power_cost') { + pe.power_cost = action.params.get('power_cost')! + } + if action.params.exists('rackspace_cost') { + pe.rackspace_cost = action.params.get('rackspace_cost')! + } + p.env = pe + } + + if action.name == 'farming_params_define' { + mut pf := ParamsFarming{} + if action.params.exists('farming_lockup') { + pf.farming_lockup = action.params.get_int('farming_lockup')! + } + if action.params.exists('farming_min_utilizaton') { + pf.farming_min_utilizaton = action.params.get_int('farming_min_utilizaton')! + } + if action.params.exists('price_increase_nodecost') { + pf.price_increase_nodecost = action.params.get('price_increase_nodecost')! + } + if action.params.exists('support_cost_node') { + pf.support_cost_node = action.params.get('support_cost_node')! + } + p.farming = pf + } + + if action.name == 'token_params_define' { + mut pt := ParamsTokens{} + if action.params.exists('chi_price_usd') { + pt.chi_price_usd = action.params.get('chi_price_usd')! + } + if action.params.exists('chi_total_tokens_million') { + pt.chi_total_tokens_million = action.params.get_int('chi_total_tokens_million')! + } + p.tokens = pt + } + if action.name == 'simulator_params_define' { + if action.params.exists('wiki_path') { + p.wiki_path = action.params.get('wiki_path')! + } + } + } + return p +} diff --git a/lib/threefold/grid4/farmingsimulator/model_regionalinternet.v b/lib/threefold/grid4/farmingsimulator/model_regionalinternet.v new file mode 100644 index 00000000..e58fc222 --- /dev/null +++ b/lib/threefold/grid4/farmingsimulator/model_regionalinternet.v @@ -0,0 +1,132 @@ +module farmingsimulator + +import freeflowuniverse.herolib.biz.spreadsheet + +pub struct RegionalInternet { +pub mut: + name string + batches []NodesBatch + simulator &Simulator @[str: skip] + sheet spreadsheet.Sheet +} + +pub struct RegionalInternetNew { +pub mut: + name string +} + +pub fn (mut sim Simulator) regionalinternet_add(name string) !&RegionalInternet { + mut sh := spreadsheet.sheet_new(name: 'regionalinternet_${name}', nrcol: sim.nrmonths) or { + panic(err) + } + mut ri := RegionalInternet{ + name: name + simulator: &sim + sheet: sh + } + + mut params := ri.simulator.params + ri.sheet.row_new(name: 'nrnodes', aggregatetype: .max)! + ri.sheet.row_new(name: 'powerusage')! + ri.sheet.row_new(name: 'chi_total_tokens', growth: '1:0.0', aggregatetype: .max)! + ri.sheet.row_new(name: 'rackspace', aggregatetype: .max)! + + // power and rackspace cost + ri.sheet.row_new(name: 'cost_power', growth: params.env.power_cost)! + ri.sheet.row_new(name: 'cost_power', growth: '1:0', tags: 'cost')! + ri.sheet.row_new(name: 'rackspace_cost_unit', growth: params.env.rackspace_cost)! + ri.sheet.row_new(name: 'rackspace_cost', growth: '1:0', tags: 'cost')! + + // how does the cost price increase per node + ri.sheet.row_new(name: 'price_increase_nodecost', growth: params.farming.price_increase_nodecost)! + // how does the support cost increase per node + ri.sheet.row_new(name: 'support_cost_node', growth: params.farming.support_cost_node)! + + ri.sheet.row_new(name: 'cost_network', tags: 'cost')! + ri.sheet.row_new(name: 'cost_hardware', tags: 'cost')! + ri.sheet.row_new(name: 'cost_support', tags: 'cost')! + + ri.sheet.row_new(name: 'chi_price_usd', growth: params.tokens.chi_price_usd, aggregatetype: .max)! + + ri.sheet.row_new(name: 'chi_farmed_month', aggregatetype: .max)! + + // mut utilization_nodes := ri.sheet.row_new(name:'utilization_nodes',growth:ri.simulator.params.utilization_nodes,aggregatetype:.max)! + + sim.regional_internets[ri.name] = &ri + + return &ri +} + +pub struct RegionalInternetNodesAddArgs { +pub mut: + template NodeTemplate + growth string = '3:0,4:50,12:100,24:1000,60:5000' +} + +// add nodes to a regional internet: +// args: +// nodetemplate NodeTemplate +// nodegrowth string = '3:0,4:50,12:5000,24:50000,60:1000000' +pub fn (mut ri RegionalInternet) nodes_add(args RegionalInternetNodesAddArgs) ! { + mut price_increase_nodecost_row := ri.sheet.row_get('price_increase_nodecost')! + + mut sh := spreadsheet.sheet_new(name: 'temp') or { panic(err) } + mut nrnodes_add := sh.row_new(name: 'nrnodes_added', growth: args.growth)! + // nrnodes_add.int() + // mut nrnodes := nrnodes_add.aggregate('nrnodes')! + // println(nrnodes_add) + // println(nrnodes) + mut month := 0 + for cell in nrnodes_add.cells { + // mut sh_nb := calc.sheet_new(name: "nb",nrcol:ri.simulator.params.nrmonths) or { panic(err) } + price_increase_nodecost := price_increase_nodecost_row.cell_get(month)! + hw_cost := args.template.capacity.cost * price_increase_nodecost.val + mut nb := NodesBatch{ + node_template: &args.template + hw_cost: hw_cost + nrnodes: int(cell.val) + start_month: month + nrmonths: ri.simulator.nrmonths + regional_internet: &ri + // sheet: sh_nb + } + // nb.node_template.calc()//not needed done in each component step + ri.batches << nb + month += 1 + } +} + +// calculate how a regional internet will expand in relation to the arguments given +pub fn (mut ri RegionalInternet) calc() ! { + mut nrnodes := ri.sheet.row_get('nrnodes')! + mut powerusage := ri.sheet.row_get('powerusage')! + mut rackspace := ri.sheet.row_get('rackspace')! + mut cost_power := ri.sheet.row_get('cost_power')! + mut rackspace_cost := ri.sheet.row_get('rackspace_cost')! + mut cost_hardware := ri.sheet.row_get('cost_hardware')! + mut cost_support := ri.sheet.row_get('cost_support')! + mut chi_farmed_month := ri.sheet.row_get('chi_farmed_month')! + mut chi_total_tokens := ri.sheet.row_get('chi_total_tokens')! + + for x in 0 .. ri.simulator.nrmonths { + for mut nb in ri.batches { + res := nb.calc(x)! + nrnodes.cells[x].add(res.nrnodes) + powerusage.cells[x].add(res.power_kwh) + rackspace.cells[x].add(res.rackspace) + cost_power.cells[x].add(res.power_cost) + rackspace_cost.cells[x].add(res.rackspace_cost) + cost_hardware.cells[x].add(res.hw_cost) + cost_support.cells[x].add(res.support_cost) + chi_farmed_month.cells[x].add(res.tokens_farmed) + if x > 0 { + chi_total_tokens.cells[x].val = chi_farmed_month.cells[x].val + + chi_total_tokens.cells[x - 1].val + } else { + chi_total_tokens.cells[x].val = chi_farmed_month.cells[x].val + } + } + } + // chi_farmed_month.divide('chi_farmed_month_node', nrnodes)! + // mut chi_total_tokens := chi_farmed_month.aggregate("chi_total_tokens")! +} diff --git a/lib/threefold/grid4/farmingsimulator/model_simulator.v b/lib/threefold/grid4/farmingsimulator/model_simulator.v new file mode 100644 index 00000000..265cad5b --- /dev/null +++ b/lib/threefold/grid4/farmingsimulator/model_simulator.v @@ -0,0 +1,37 @@ +module farmingsimulator + +import freeflowuniverse.herolib.biz.spreadsheet +// import freeflowuniverse.herolib.core.pathlib +import freeflowuniverse.herolib.develop.gittools +import freeflowuniverse.herolib.core.texttools +import freeflowuniverse.herolib.core.playbook +import freeflowuniverse.herolib.ui.console +import freeflowuniverse.herolib.threefold.grid4.cloudslices + +@[heap] +pub struct Simulator { +pub mut: + name string + sheet &spreadsheet.Sheet + args SimulatorArgs + params Params + nrmonths int = 6 * 12 + regional_internets map[string]&RegionalInternet + node_templates map[string]&NodeTemplate + components map[string]&Component + // params Params +} + +pub fn (mut s Simulator) regionalinternet_get(name_ string) !&RegionalInternet { + name := name_.to_lower() + return s.regional_internets[name] or { + return error('Cannot find regional internet with name: ${name}') + } +} + +pub fn (mut s Simulator) nodetemplate_get(name_ string) !&NodeTemplate { + name := name_.to_lower() + return s.node_templates[name] or { + return error('Cannot find note template with name: ${name}') + } +} diff --git a/lib/threefold/grid4/farmingsimulator/play.v b/lib/threefold/grid4/farmingsimulator/play.v new file mode 100644 index 00000000..e8190094 --- /dev/null +++ b/lib/threefold/grid4/farmingsimulator/play.v @@ -0,0 +1,126 @@ +module farmingsimulator + +import freeflowuniverse.herolib.ui.console +import freeflowuniverse.herolib.core.playbook { PlayBook } +// import freeflowuniverse.herolib.threefold.grid4.farmingsimulator + +pub fn play(mut plbook PlayBook) ! { + // mut sheet_name := '' + // first make sure we find a run action to know the name + + mut my_actions := plbook.actions_find(actor: 'tfgridsimulation_farming')! + + if my_actions.len == 0 { + return + } + + _ := '' + // console.print_header("AAAA") + // console.print_debug(plbook) + // console.print_header("BBBB") + + for mut action in my_actions { + if action.name == 'run' { + mut sim := new( + name: action.params.get_default('name', 'default')! + path: action.params.get_default('path', '')! + git_url: action.params.get_default('git_url', '')! + git_reset: action.params.get_default_false('git_reset') + git_pull: action.params.get_default_false('git_pull') + )! + console.print_header('run the grid farming simulator') + sim.play(mut plbook)! + simulator_set(sim) + console.print_debug('done') + } + } +} + +pub fn (mut s Simulator) play(mut plbook PlayBook) ! { + mut actions2 := plbook.actions_find(actor: 'tfgridsimulation_farming')! + + if actions2.len == 0 { + // means nothing to do return quickly + return + } + + for action_nt in actions2 { + // ADD THE NODE TEMPLATES + if action_nt.name == 'component_define' { + mut c_name := action_nt.params.get_default('name', '')! + c_name = c_name.to_lower() + mut c_description := action_nt.params.get_default('description', '')! + mut c_cost := action_nt.params.get_float('cost')! + mut rackspace := action_nt.params.get_float_default('rackspace', 0)! + mut power := action_nt.params.get_float_default('power', 0)! + mut cru := action_nt.params.get_float_default('cru', 0)! + mut mru := action_nt.params.get_float_default('mru', 0)! + mut hru := action_nt.params.get_float_default('hru', 0)! + mut sru := action_nt.params.get_float_default('sru', 0)! + mut component := Component{ + name: c_name + description: c_description + cost: c_cost + rackspace: rackspace + power: power + cru: cru + mru: mru + hru: hru + sru: sru + } + s.components[c_name] = &component + } + if action_nt.name == 'node_template_define' { + mut nt_name := action_nt.params.get('name')! + nt_name = nt_name.to_lower() + mut node_template := node_template_new(nt_name) + s.node_templates[nt_name] = &node_template + } + if action_nt.name == 'node_template_component_add' { + mut comp_templ_name := action_nt.params.get('name')! + mut comp_name := action_nt.params.get('component')! + mut comp_nr := action_nt.params.get_int('nr')! + comp_templ_name = comp_templ_name.to_lower() + comp_name = comp_name.to_lower() + mut node_template := s.node_templates[comp_templ_name] or { + return error("Cannot find node template: '${comp_templ_name}', has it been defined?") + } + component := s.components[comp_name] or { + return error("Cannot find component: '${comp_name}', has it been defined?") + } + node_template.components_add(nr: comp_nr, component: component) + } + } + + // NOW ADD THE REGIONAL INTERNETS + mut actions3 := plbook.actions_find(actor: 'tfgridsimulation_farming')! + for action_ri in actions3 { + if action_ri.name == 'regional_internet_add' { + mut iname := action_ri.params.get('name')! + s.regionalinternet_add(iname)! + } + if action_ri.name == 'regional_internet_nodes_add' { + mut ri_name := action_ri.params.get('name')! + mut ri_template := action_ri.params.get('template')! + mut ri_t_growth := action_ri.params.get('growth')! + mut ri := s.regionalinternet_get(ri_name)! + mut template := s.nodetemplate_get(ri_template)! + ri.nodes_add(template: template, growth: ri_t_growth)! + } + } + + // now do the simulation, run it + mut actions4 := plbook.actions_find(actor: 'tfgridsimulation_farming')! + for action_ri in actions4 { + if action_ri.name == 'regional_internet_add' { + mut iname := action_ri.params.get('name')! + s.regionalinternet_add(iname)! + } + } + + for _, mut ri in s.regional_internets { + ri.calc()! + } + + simulator_set(s) +} diff --git a/lib/threefold/grid4/farmingsimulator/playmacro.v b/lib/threefold/grid4/farmingsimulator/playmacro.v new file mode 100644 index 00000000..3e2fd629 --- /dev/null +++ b/lib/threefold/grid4/farmingsimulator/playmacro.v @@ -0,0 +1,44 @@ +module farmingsimulator + +import freeflowuniverse.herolib.core.playbook { Action } +import freeflowuniverse.herolib.ui.console +// import json + +pub fn playmacro(action Action) !string { + // sheet_name := action.params.get('sheetname') or { + // return error("can't find sheetname from sheet.chart macro.") + // } + // mut sh := sheet_get(sheet_name)! + + // console.print_debug(sh) + + supported_actions := ['node_wiki', 'regionalinternet_wiki'] + + if !supported_actions.contains(action.name) { + return '' + } + + mut p := action.params + + simulator_name := action.params.get_default('simulator', 'default')! + + mut sim := simulator_get(simulator_name) or { + return error("can't find simulator with name ${simulator_name}") + } + + if action.name == 'node_wiki' { + console.print_green('playmacro node_wiki') + name := p.get('name') or { return error('name needs to be specified for wiki_node macro') } + return sim.node_template_wiki(name)! + } + + if action.name == 'regionalinternet_wiki' { + console.print_green('playmacro regionalinternet_wiki') + name := p.get('name') or { + return error('name needs to be specified for regionalinternet_wiki macro') + } + return sim.regional_internet_wiki(name)! + } + + return '' +} diff --git a/lib/threefold/grid4/farmingsimulator/templates/node_template.md b/lib/threefold/grid4/farmingsimulator/templates/node_template.md new file mode 100644 index 00000000..f3238bd5 --- /dev/null +++ b/lib/threefold/grid4/farmingsimulator/templates/node_template.md @@ -0,0 +1,33 @@ +# Node Template: @{nodetemplate.name} + +## Capacity + +- CRU: @{nodetemplate.capacity.resourceunits.cru} +- MRU: @{nodetemplate.capacity.resourceunits.mru} GB +- SRU: @{nodetemplate.capacity.resourceunits.sru} GB +- HRU: @{nodetemplate.capacity.resourceunits.hru} GB + +## Components + +| Component | Quantity | +|-----------|----------| +@for group in nodetemplate.components +| @{group.name} | @{group.nr} | +@end + +## Detailed Component Information + +@for group in nodetemplate.components + +### @{group.name} (x@{group.nr}) + +- Description: @{group.component.description} +- Cost: $@{group.component.cost:.2f} +- Rackspace: @{group.component.rackspace} U +- Power: @{group.component.power} W +- CRU: @{group.component.cru} +- MRU: @{group.component.mru} GB +- HRU: @{group.component.hru} GB +- SRU: @{group.component.sru} GB + +@end \ No newline at end of file diff --git a/lib/threefold/grid4/farmingsimulator/templates/regionalinternet_template.md b/lib/threefold/grid4/farmingsimulator/templates/regionalinternet_template.md new file mode 100644 index 00000000..3be27ee4 --- /dev/null +++ b/lib/threefold/grid4/farmingsimulator/templates/regionalinternet_template.md @@ -0,0 +1,10 @@ +# Regional Internet: @{ri.name} + +!!!sheet.graph_line_row sheetname:'${ri.sheet.name}' + rowname:'rackspace_cost_unit' + period_type:quarter + + +```json +${ri.sheet} +``` \ No newline at end of file diff --git a/lib/threefold/grid4/farmingsimulator/token_cultivation.v b/lib/threefold/grid4/farmingsimulator/token_cultivation.v new file mode 100644 index 00000000..b86e72e2 --- /dev/null +++ b/lib/threefold/grid4/farmingsimulator/token_cultivation.v @@ -0,0 +1,32 @@ +module farmingsimulator + +// //calculates the token retur for farming +// fn (mut ri RegionalInternet) token_cultivation(node_template NodeTemplate, month int)!f64 { + +// utilization_nodes + +// cu := node_template.capacity.cloudunits.cu +// su := node_template.capacity.cloudunits.su +// nu:= node_template.capacity.cloudunits.nu + +// utilization_nodes := ri.sheet.row_get("utilization_nodes")! + +// chi_price_usd:=ri.sheet.row_get("chi_price_usd")! +// mut chi_price_usd_now := chi_price_usd.cells[month].val + +// //https://docs.google.com/spreadsheets/d/1KQGxaQuMOdy16H68SeSaWYqOyblzvSHvAcpcCdyMp6w/edit#gid=111700120 + +// //expressed in USD +// token_farming_usd := cu * 2.4 + su * 1 + nu * 0.03 +// println("++ $month $chi_total_tokens_now_million:: $chi_max_tokens_million") +// token_farming_usd_after_difficulty := token_farming_usd * (1-(chi_total_tokens_now_million/chi_max_tokens_million)) + +// token_farming_chi := token_farming_usd_after_difficulty / chi_price_usd_now + +// // println(node_template) +// // println(chi_price_usd_now) +// // println(token_farming_chi) + +// return token_farming_chi + +// } diff --git a/lib/threefold/grid4/farmingsimulator/token_farming.v b/lib/threefold/grid4/farmingsimulator/token_farming.v new file mode 100644 index 00000000..125d6ddc --- /dev/null +++ b/lib/threefold/grid4/farmingsimulator/token_farming.v @@ -0,0 +1,29 @@ +module farmingsimulator + +// calculates the token retur for farming +fn (mut ri RegionalInternet) token_farming(node_template NodeTemplate, month int) !f64 { + cu := node_template.capacity.cloudunits.cu + su := node_template.capacity.cloudunits.su + nu := node_template.capacity.cloudunits.nu + + chi_total_tokens := ri.sheet.row_get('chi_total_tokens')! + chi_max_tokens_million := ri.simulator.params.tokens.chi_total_tokens_million + chi_total_tokens_now_million := chi_total_tokens.cells[month].val / 1000000 + chi_price_usd := ri.sheet.row_get('chi_price_usd')! + mut chi_price_usd_now := chi_price_usd.cells[month].val + + // https://docs.google.com/spreadsheets/d/1KQGxaQuMOdy16H68SeSaWYqOyblzvSHvAcpcCdyMp6w/edit#gid=111700120 + + // expressed in USD + token_farming_usd := cu * 2.4 + su * 1 + nu * 0.03 + println('++ ${month} ${chi_total_tokens_now_million}:: ${chi_max_tokens_million}') + token_farming_usd_after_difficulty := token_farming_usd * (1 - (chi_total_tokens_now_million / chi_max_tokens_million)) + + token_farming_chi := token_farming_usd_after_difficulty / chi_price_usd_now + + // println(node_template) + // println(chi_price_usd_now) + // println(token_farming_chi) + + return token_farming_chi +} diff --git a/lib/threefold/grid4/farmingsimulator/wiki.v b/lib/threefold/grid4/farmingsimulator/wiki.v new file mode 100644 index 00000000..0a2ac483 --- /dev/null +++ b/lib/threefold/grid4/farmingsimulator/wiki.v @@ -0,0 +1,23 @@ +module farmingsimulator + +import freeflowuniverse.herolib.ui.console + +pub fn (mut s Simulator) node_template_wiki(name_ string) !string { + name := name_.to_lower() + mut nodetemplate := s.node_templates[name] or { + return error('Cannot find node template with name: ${name}') + } + + nodewiki := $tmpl('templates/node_template.md') + return nodewiki +} + +pub fn (mut s Simulator) regional_internet_wiki(name_ string) !string { + name := name_.to_lower() + mut ri := s.regional_internets[name] or { + return error('Cannot find note regional internet with name: ${name}') + } + + wiki := $tmpl('templates/regionalinternet_template.md') + return wiki +} diff --git a/lib/threefold/grid4/gridsimulator/factory.v b/lib/threefold/grid4/gridsimulator/factory.v new file mode 100644 index 00000000..94036dd0 --- /dev/null +++ b/lib/threefold/grid4/gridsimulator/factory.v @@ -0,0 +1,96 @@ +module gridsimulator + +import freeflowuniverse.herolib.biz.spreadsheet +// import freeflowuniverse.herolib.core.pathlib +import freeflowuniverse.herolib.develop.gittools +import freeflowuniverse.herolib.core.texttools +import freeflowuniverse.herolib.core.playbook +import freeflowuniverse.herolib.ui.console +import freeflowuniverse.herolib.threefold.grid4.cloudslices + +__global ( + grid_simulators shared map[string]&Simulator +) + +pub struct Simulator { +pub mut: + name string + sheet &spreadsheet.Sheet + params SimulatorArgs + nodes map[string]&cloudslices.Node +} + +@[params] +pub struct SimulatorArgs { +pub mut: + name string = 'default' // name of simulation + path string + git_url string + git_reset bool + git_pull bool +} + +pub fn new(args_ SimulatorArgs) !Simulator { + mut args := args_ + + if args.name == '' { + return error('simulation needs to have a name') + } + args.name = texttools.name_fix(args.name) + // if args.mdbook_name == '' { + // args.mdbook_name = args.name + // } + + // mut cs := currency.new() + mut sh := spreadsheet.sheet_new(name: 'tfgridsim_${args.name}')! + mut sim := Simulator{ + name: args.name + sheet: sh + params: args + // currencies: cs + } + + if args.git_url.len > 0 { + mut gs := gittools.new()! + mut repo := gs.get_repo( + url: args.git_url + pull: args.git_pull + reset: args.git_reset + reload: false + )! + + args.path = repo.get_path()! + } + + simulator_set(sim) + sim.load()! + + return sim +} + +// get sheet from global +pub fn simulator_get(name string) !&Simulator { + rlock grid_simulators { + if name in grid_simulators { + return grid_simulators[name] or { return error('Grid simulator ${name} not found') } + } + } + return error("cann't find tfgrid gridsimulator:'${name}' in global grid_simulators") +} + +// remember sheet in global +pub fn simulator_set(sim Simulator) { + lock grid_simulators { + grid_simulators[sim.name] = &sim + } + spreadsheet.sheet_set(sim.sheet) +} + +// load the mdbook content from path or git +pub fn (mut self Simulator) load() ! { + console.print_header('GRID SIMULATOR LOAD ${self.params.name}') + + mut plbook := playbook.new(path: self.params.path)! + + self.play(mut plbook)! +} diff --git a/lib/threefold/grid4/gridsimulator/manual/.collection b/lib/threefold/grid4/gridsimulator/manual/.collection new file mode 100644 index 00000000..14cb5a39 --- /dev/null +++ b/lib/threefold/grid4/gridsimulator/manual/.collection @@ -0,0 +1 @@ +name:gridsimulator diff --git a/lib/threefold/grid4/gridsimulator/manual/home.md b/lib/threefold/grid4/gridsimulator/manual/home.md new file mode 100644 index 00000000..2db4d835 --- /dev/null +++ b/lib/threefold/grid4/gridsimulator/manual/home.md @@ -0,0 +1 @@ +# Grid Simulator diff --git a/lib/threefold/grid4/gridsimulator/manual/macros.md b/lib/threefold/grid4/gridsimulator/manual/macros.md new file mode 100644 index 00000000..e69de29b diff --git a/lib/threefold/grid4/gridsimulator/play.v b/lib/threefold/grid4/gridsimulator/play.v new file mode 100644 index 00000000..642d22f5 --- /dev/null +++ b/lib/threefold/grid4/gridsimulator/play.v @@ -0,0 +1,238 @@ +module gridsimulator + +import freeflowuniverse.herolib.core.playbook { PlayBook } +import freeflowuniverse.herolib.threefold.grid4.cloudslices + +pub fn play(mut plbook PlayBook) ! { + // first make sure we find a run action to know the name + mut my_actions := plbook.actions_find(actor: 'tfgrid_simulator')! + + if my_actions.len == 0 { + return + } + + mut name := '' + + for mut action in my_actions { + if action.name == 'run' { + name = action.params.get_default('name', 'default')! // when name not specified is 'default' + + mut sim := new( + name: name + path: action.params.get_default('path', '')! + git_url: action.params.get_default('git_url', '')! + git_reset: action.params.get_default_false('git_reset') + git_pull: action.params.get_default_false('git_pull') + )! + + sim.play(mut plbook)! + simulator_set(sim) + } + } +} + +pub fn (mut self Simulator) play(mut plbook PlayBook) ! { + // make sure we know the inca price + mut actions4 := plbook.actions_find(actor: 'tfgrid_simulator')! + + if actions4.len == 0 { + return + } + self.nodes = cloudslices.play(mut plbook)! + + for mut action in actions4 { + if action.name == 'incaprice_define' { + mut incaprice := self.sheet.row_new( + name: 'incaprice' + growth: action.params.get_default('incaprice_usd', '0.1')! + descr: '"INCA Price in USD' + extrapolate: true + aggregatetype: .avg + )! + for mycel in incaprice.cells { + if f64(mycel.val) == 0.0 { + return error('INCA price cannot be 0.') + } + } + } + } + + if 'incaprice' !in self.sheet.rows { + return error("can't find incaprice_define action for tfgrid_simulator, needs to define INCA price.") + } + + mut actions2 := plbook.actions_find(actor: 'tfgrid_simulator')! + for action in actions2 { + if action.name == 'node_growth_define' { + mut node_name := action.params.get_default('node_name', '')! + + mut node := self.nodes[node_name] or { + return error("can't find node in simulate with name: ${node_name}") + } + + mut new_nodes_per_month := self.sheet.row_new( + name: '${node_name}_new_per_month' + growth: action.params.get('new_month')! + tags: 'nrnodes_new nodetype:${node_name}' + descr: '"new nodes we add per month for node type ${node_name}' + extrapolate: true + aggregatetype: .max + )! + + println('new per month for ${node_name}:') + println(new_nodes_per_month.cells) + + mut investment_nodes := new_nodes_per_month.copy( + name: '${node_name}_investment_usd' + tags: 'node_investment nodetype:${node_name}' + descr: "investment needed for node type ${node_name}'" + )! + for mut cell in investment_nodes.cells { + cell.val = cell.val * node.cost + } + + _ = self.sheet.row_new( + name: '${node_name}_churn' + growth: action.params.get('churn')! + tags: 'churn nodetype:${node_name}' + descr: '"nr of nodes in percentage we loose per year for node type: ${node_name}' + extrapolate: true + aggregatetype: .avg + )! + + mut utilization := self.sheet.row_new( + name: '${node_name}_utilization' + growth: action.params.get('utilization')! + tags: 'utilization nodetype:${node_name}' + descr: '"utilization in 0..100 percent for node type: ${node_name}' + extrapolate: true + aggregatetype: .avg + )! + + mut discount := self.sheet.row_new( + name: '${node_name}_discount' + growth: action.params.get('discount')! + tags: 'discount nodetype:${node_name}' + descr: '"discount in 0..100 percent for node type: ${node_name}' + extrapolate: true + aggregatetype: .avg + )! + + mut row_nr_nodes_total := new_nodes_per_month.recurring( + name: '${node_name}_nr_active' + delaymonths: 2 + tags: 'nrnodes_active nodetype:${node_name}' + descr: '"nr nodes active for for node type: ${node_name}' + aggregatetype: .max + )! + + node_total := node.node_total() + + mut node_rev := self.sheet.row_new( + name: '${node_name}_rev_month' + growth: '${node_total.price_simulation}' + tags: 'nodetype:${node_name}' + descr: '"Sales price in USD per node of type:${node_name} per month (usd)' + extrapolate: true + aggregatetype: .sum + )! + + mut node_rev_total := self.sheet.row_new( + name: '${node_name}_rev_total' + tags: 'noderev nodetype:${node_name}' + descr: '"Sales price in USD total for node type: ${node_name} per month' + aggregatetype: .sum + growth: '1:0' + )! + + // apply the sales price discount & calculate the sales price in total + mut counter := 0 + for mut cell in node_rev.cells { + discount_val := discount.cells[counter].val + cell.val = cell.val * (1 - discount_val / 100) * utilization.cells[counter].val / 100 + node_rev_total.cells[counter].val = cell.val * row_nr_nodes_total.cells[counter].val + counter += 1 + } + + // grant_month_usd:'1:60,24:60,25:0' + // grant_month_inca:'1:0,24:0' + // grant_max_nrnodes:1000 //max nr of nodes which will get this grant + + mut grant_node_month_usd := self.sheet.row_new( + name: '${node_name}_grant_node_month_usd' + descr: '"Grant in USD for node type: ${node_name}' + aggregatetype: .sum + growth: node.grant.grant_month_usd + )! + + mut grant_node_month_inca := self.sheet.row_new( + name: '${node_name}_grant_node_month_inca' + descr: '"Grant in INCA for node type: ${node_name}' + aggregatetype: .sum + growth: node.grant.grant_month_inca + )! + + mut inca_grant_node_month_inca := self.sheet.row_new( + name: '${node_name}_grant_node_total' + tags: 'incagrant' + descr: '"INCA grant for node type: ${node_name}' + aggregatetype: .sum + growth: '0:0' + )! + mut counter2 := 0 + row_incaprice := self.sheet.rows['incaprice'] or { + return error("can't find row incaprice") + } + for mut cell in inca_grant_node_month_inca.cells { + grant_usd := grant_node_month_usd.cells[counter2].val + grant_inca := grant_node_month_inca.cells[counter2].val + mut nr_nodes := row_nr_nodes_total.cells[counter2].val + if nr_nodes > node.grant.grant_max_nrnodes { + nr_nodes = node.grant.grant_max_nrnodes + } + incaprice_now := f64(row_incaprice.cells[counter2].val) + if incaprice_now == 0.0 { + panic('bug incaprice_now cannot be 0') + } + // println(" nrnodes: ${nr_nodes} incaprice:${incaprice_now} grant_usd:${grant_usd} grant_inca:${grant_inca}") + cell.val = nr_nodes * (grant_usd / incaprice_now + grant_inca) + counter2 += 1 + } + // println(inca_grant_node_month_inca.cells) + } + } + + // MAIN SIMULATION LOGIC + + // Removed unused variables + // incaprice := self.sheet.rows['incaprice'] or { return error("can't find row incaprice") } + + // mut rev_usd := self.sheet.group2row( + // name: 'noderev' + // tags: 'nodestats_total' + // include: ['noderev'] + // descr: 'revenue in USD from all nodes per month' + // )! + + // mut investment_usd := self.sheet.group2row( + // name: 'investment' + // tags: 'nodestats_total' + // include: ['node_investment'] + // descr: 'investment in USD from all nodes per month' + // )! + + // mut investment_usd := self.sheet.group2row( + // name: 'investment_usd' + // tags: 'total' + // include: ['node_investment'] + // descr: 'investment in USD from all nodes per month' + // )! + + simulator_set(self) + + // println(self.sheet) + + // if true{ + // panic("arym") + // } +} diff --git a/lib/threefold/grid4/gridsimulator/readme.md b/lib/threefold/grid4/gridsimulator/readme.md new file mode 100644 index 00000000..cd94d283 --- /dev/null +++ b/lib/threefold/grid4/gridsimulator/readme.md @@ -0,0 +1,6 @@ +# Grid Simulator + +see ... for example + +we can use use the play command to execute on it. + diff --git a/lib/threefold/griddriver/client.v b/lib/threefold/griddriver/client.v new file mode 100644 index 00000000..de8a2bbd --- /dev/null +++ b/lib/threefold/griddriver/client.v @@ -0,0 +1,12 @@ +module griddriver + +pub struct Client { +pub: + mnemonic string + substrate string + relay string +mut: + node_twin map[u32]u32 +} + +// TODO: add the rest of griddriver functionalities diff --git a/lib/threefold/griddriver/rmb.v b/lib/threefold/griddriver/rmb.v new file mode 100644 index 00000000..a89def55 --- /dev/null +++ b/lib/threefold/griddriver/rmb.v @@ -0,0 +1,33 @@ +module griddriver + +import os +import x.json2 +import json + +pub fn (mut c Client) rmb_call(dst u32, cmd string, payload string) !string { + res := os.execute("griddriver rmb --cmd '${cmd}' --dst '${dst}' --payload '${payload}' --substrate '${c.substrate}' --mnemonics '${c.mnemonic}' --relay '${c.relay}'") + if res.exit_code != 0 { + return error(res.output) + } + return res.output +} + +pub struct Version { + zinit string + zos string +} + +pub fn (mut c Client) get_zos_version(dst u32) !Version { + data := json.encode('') + res := c.rmb_call(dst, 'zos.system.version', data)! + ver := json2.decode[Version](res)! + return ver +} + +pub fn (mut c Client) list_wg_ports(dst u32) ![]u16 { + res := os.execute("griddriver rmb-taken-ports --dst ${dst} --substrate \"${c.substrate}\" --mnemonics \"${c.mnemonic}\" --relay \"${c.relay}\"") + if res.exit_code != 0 { + return error(res.output) + } + return json.decode([]u16, res.output)! +} diff --git a/lib/threefold/griddriver/substrate.v b/lib/threefold/griddriver/substrate.v new file mode 100644 index 00000000..0efd3e7f --- /dev/null +++ b/lib/threefold/griddriver/substrate.v @@ -0,0 +1,111 @@ +module griddriver + +import os +import strconv +import json +import freeflowuniverse.herolib.ui.console + +pub fn (mut c Client) get_node_twin(node_id u64) !u32 { + if u32(node_id) in c.node_twin { + return c.node_twin[u32(node_id)] + } + + res := os.execute("griddriver node-twin --substrate \"${c.substrate}\" --node_id ${node_id}") + if res.exit_code != 0 { + return error(res.output) + } + + twin_id := u32(strconv.parse_uint(res.output, 10, 32)!) + c.node_twin[u32(node_id)] = twin_id + return twin_id +} + +pub fn (mut c Client) get_user_twin() !u32 { + res := os.execute("griddriver user-twin --mnemonics \"${c.mnemonic}\" --substrate \"${c.substrate}\"") + if res.exit_code != 0 { + return error(res.output) + } + + return u32(strconv.parse_uint(res.output, 10, 32)!) +} + +pub fn (mut c Client) create_node_contract(node_id u32, body string, hash string, public_ips u32, solution_provider u64) !u64 { + console.print_debug('url: ${c.substrate}') + res := os.execute("griddriver new-node-cn --substrate \"${c.substrate}\" --mnemonics \"${c.mnemonic}\" --node_id ${node_id} --hash \"${hash}\" --public_ips ${public_ips} --body \"${body}\" --solution_provider ${solution_provider}") + if res.exit_code != 0 { + return error(res.output) + } + + return strconv.parse_uint(res.output, 10, 64)! +} + +pub fn (mut c Client) create_name_contract(name string) !u64 { + res := os.execute("griddriver new-name-cn --substrate \"${c.substrate}\" --mnemonics \"${c.mnemonic}\" --name ${name}") + if res.exit_code != 0 { + return error(res.output) + } + + return strconv.parse_uint(res.output, 10, 64)! +} + +pub fn (mut c Client) update_node_contract(contract_id u64, body string, hash string) ! { + res := os.execute("griddriver update-cn --substrate \"${c.substrate}\" --mnemonics \"${c.mnemonic}\" --contract_id ${contract_id} --body \"${body}\" --hash \"${hash}\"") + if res.exit_code != 0 { + return error(res.output) + } +} + +pub fn (mut c Client) cancel_contract(contract_id u64) ! { + res := os.execute("griddriver cancel-cn --substrate \"${c.substrate}\" --mnemonics \"${c.mnemonic}\" --contract_id ${contract_id}") + if res.exit_code != 0 { + return error(res.output) + } +} + +pub struct BatchCreateContractData { +pub mut: + node u32 + body string + hash string + public_ips u32 + solution_provider_id ?u64 + // for name contracts. if set the contract is assumed to be a name contract and other fields are ignored + name string +} + +struct Hamada { + key []BatchCreateContractData +} + +pub fn (mut c Client) batch_create_contracts(contracts_data_ []BatchCreateContractData) ![]u64 { + mut contracts_data := contracts_data_.clone() + mut body := '' + + for mut contract in contracts_data { + if contract.body.len > 0 { + body = contract.body + } + + contract.body = '' + } + + data := json.encode(contracts_data) + res := os.execute("griddriver batch-create-contract --substrate \"${c.substrate}\" --mnemonics \"${c.mnemonic}\" --contracts-data '${data}' --contracts-body \"${body}\"") + + if res.exit_code != 0 { + return error(res.output) + } + + contract_ids := json.decode([]u64, res.output) or { + return error('Cannot decode the result due to ${err}') + } + return contract_ids +} + +pub fn (mut c Client) batch_cancel_contracts(contract_ids []u64) ! { + data := json.encode(contract_ids) + res := os.execute("griddriver batch-cancel-contract --substrate \"${c.substrate}\" --mnemonics \"${c.mnemonic}\" --contract-ids \"${data}\"") + if res.exit_code != 0 { + return error(res.output) + } +} diff --git a/lib/threefold/griddriver/utils.v b/lib/threefold/griddriver/utils.v new file mode 100644 index 00000000..167261c8 --- /dev/null +++ b/lib/threefold/griddriver/utils.v @@ -0,0 +1,38 @@ +module griddriver + +import os +import freeflowuniverse.herolib.threefold.grid.models + +pub fn (mut c Client) sign_deployment(hash string) !string { + res := os.execute("griddriver sign --substrate \"${c.substrate}\" --mnemonics \"${c.mnemonic}\" --hash \"${hash}\"") + if res.exit_code != 0 { + return error(res.output) + } + return res.output +} + +pub fn (mut c Client) deploy_single_vm(node_id u32, solution_type string, vm models.VM, env string) !string { + data := vm.json_encode() + res := os.execute("griddriver deploy-single --mnemonics \"${c.mnemonic}\" --env ${env} --solution_type \"${solution_type}\" --node ${node_id} --data '${data}'") + return res.output +} + +// returns priv, pub key separated by a space +pub fn (mut c Client) generate_wg_priv_key() ![]string { + res := os.execute('griddriver generate-wg-key') + key := res.output.split(' ') + if key.len != 2 { + return error('could not generate private key: ${res.output}') + } + return key +} + +// returns priv, pub key separated by a space +pub fn (mut c Client) generate_wg_public_key(key string) !string { + res := os.execute('griddriver generate-wg-public-key --key "${key}"') + public_key := res.output.split(' ') + if public_key.len != 1 { + return error('could not generate public key: ${res.output}') + } + return public_key[0] +} diff --git a/lib/threefold/gridproxy/README.md b/lib/threefold/gridproxy/README.md new file mode 100644 index 00000000..8bc46335 --- /dev/null +++ b/lib/threefold/gridproxy/README.md @@ -0,0 +1,93 @@ +# GridProxy API client + +Easily access Threefold grid APIs from vlang. gridproxy is v module include the API client along with API-specific information such as the root URL for the different networks available in the threefold grid. They also include classes that represent entities in the context of the API in sub-module `model`, and that are useful for making conversions between JSON objects and V objects. and some types with helper methods to convert the machine-friendly units returned by the API to more human-friendly units. + +### import the client: + +```v +import freeflowuniverse.herolib.threefold.gridproxy + +// create a client for the testnet, with API cache disabled +// you can pass true as second arg to enable cache +mut gp_client := gridproxy.get(.test, false)! + +``` + +### use the client to interact with the gridproxy API: + +```v +// get farm list +farms := gp_client.get_farms()! // you should handle any possible errors in your code +// get gateway list +gateways := gp_client.get_gateways()! +// get node list +nodes := gp_client.get_nodes()! +// get contract list +contracts := gp_client.get_contracts()! +// get grid stats +stats := gp_client.get_stats()! +// get node by id +node := gp_client.get_node_by_id(u64(16))! +// get node stats +node_stats := gp_client.get_node_stats_by_id(u64(16))! +// get twins +twins := gp_client.get_twins()! +``` + +for all available methods on the client, see [GridProxy API client modules doc](./docs/) + +### filtering: + +```v +// getting only dedicated farms +farms_dedicated := gp_client.get_farms(dedicated: true)! +// getting only farms with at least one free ip +farms_with_free_ips := gp_client.get_farms(free_ips: u64(1))! +// pagination options: +// get first page of farms +farms_first_page := gp_client.get_farms(page: u64(1))! +// you can mix any filters and pagination options +farms_first_page_dedicated := gp_client.get_farms(page: u64(1), dedicated: true)! +// access the field of first farm in the list +// the API could return an empty list if no farm is found +// you should handle this case in your code +if farms_first_page.len > 0 { + println(farms_first_page[0].name) +} +``` + +for all available filters, see [GridProxy API client modules doc](./docs/) + +### helper methods: + +```v +node := nodes[0] +node.updated_at // 1655940222 +node.created // 1634637306 +// you can convert the timestamp to V Time object easily with the helper method +node.created.to_time() // 2021-10-19 09:55:06 +node.created.to_time().local() // 2021-10-19 11:55:06 +node.created.to_time().relative() // last Oct 19 +node.created.to_time().relative_short() // 246d ago +// lets check another field with different type +node.uptime // 18958736 +// you can convert the seconds to a human-readable duration with the helper method +node.uptime.to_days() // 219.42981481481482 +node.uptime.to_hours() // 5266.315555555556 +node.uptime.to_minutes() // 315978.93333333335 +// now to the capacity helper methods +node.total_resources.mru // 202803036160 +// you can `to_megabytes`, `to_gigabytes` and `to_terabytes` methods on any resources field. +node.total_resources.mru.to_gigabytes() // 202.80303616 +// the helper methods available for the billing to help you convert the TFT units as well +``` + +for all available helper methods, see [GridProxy API client modules doc](./docs/) + +TODO: + +- Documented the client iterators and higher-level methods + +## Client Examples + +there are scripts available to serve as examples in the [examples](../examples/) directory. [Docs](../examples/README.md) diff --git a/lib/threefold/gridproxy/gridproxy_core.v b/lib/threefold/gridproxy/gridproxy_core.v new file mode 100644 index 00000000..2ff4df4c --- /dev/null +++ b/lib/threefold/gridproxy/gridproxy_core.v @@ -0,0 +1,489 @@ +module gridproxy + +// client library for threefold gridproxy API. +import json +import math +import freeflowuniverse.herolib.threefold.gridproxy.model { Bill, Contract, ContractFilter, ContractIterator, Farm, FarmFilter, FarmIterator, GridStat, Node, NodeFilter, NodeIterator, NodeStats, Node_, StatFilter, Twin, TwinFilter, TwinIterator } +import freeflowuniverse.herolib.ui.console + +/* +all errors returned by the gridproxy API or the client are wrapped in a standard `Error` object with two fields. +{ + msg string + code int // could be API call error code or client error code +} + +`code` is an error code that can be used to identify the error. +in API call errors, `code` represents the HTTP status code. (100..599) + +Client errors codes are represented by numbers in the range of 1..99 +currently, the following client error codes are used: +id not found error code: 4 +json parsing error code: 10 +http client error code: 11 +invalid response from server (e.g. empty response) error code: 24 +*/ +// clinet error codes +const err_not_found = 4 +const err_json_parse = 10 +const err_http_client = 11 +const err_invalid_resp = 24 +const err_grid_client = 30 + +// get_node_by_id fetchs specific node information by node id. +// +// * `node_id` (u64): node id. +// +// returns: `Node` or `Error`. +pub fn (mut c GridProxyClient) get_node_by_id(node_id u64) !Node { + // needed to allow to use threads + mut http_client := c.http_client + + res := http_client.send(prefix: 'nodes/', id: '${node_id}') or { + return error_with_code('http client error: ${err.msg()}', err_http_client) + } + + if !res.is_ok() { + return error_with_code(res.data, res.code) + } + + if res.data == '' { + return error_with_code('empty response', err_invalid_resp) + } + + node := json.decode(Node, res.data) or { + return error_with_code('error to get jsonstr for node data, json decode: node id: ${node_id}, data: ${res.data}', + err_json_parse) + } + return node +} + +// get_node_stats_by_id fetchs specific node statistics by node id. +// +// * `node_id` (u64): node id. +// +// returns: `Node_stats` or `Error`. +pub fn (mut c GridProxyClient) get_node_stats_by_id(node_id u64) !NodeStats { + // needed to allow to use threads + mut http_client := c.http_client + + res := http_client.send(prefix: 'nodes/', id: '${node_id}/statistics') or { + return error_with_code('http client error: ${err.msg()}', err_http_client) + } + + if !res.is_ok() { + return error_with_code(res.data, res.code) + } + + if res.data == '' { + return error_with_code('empty response', err_invalid_resp) + } + + node_stats := json.decode(NodeStats, res.data) or { + return error_with_code('error to get jsonstr for node data, json decode: node id: ${node_id}, data: ${res.data}', + err_json_parse) + } + return node_stats +} + +// get_gateway_by_id fetchs specific gateway information by node id. +// +// * `node_id` (u64): node id. +// +// returns: `Node` or `Error`. +pub fn (mut c GridProxyClient) get_gateway_by_id(node_id u64) !Node { + // needed to allow to use threads + mut http_client := c.http_client + + res := http_client.send(prefix: 'gateways/', id: '${node_id}') or { + return error_with_code('http client error: ${err.msg()}', err_http_client) + } + + if !res.is_ok() { + return error_with_code(res.data, res.code) + } + + if res.data == '' { + return error_with_code('empty response', err_invalid_resp) + } + + node := json.decode(Node, res.data) or { + return error_with_code('error to get jsonstr for gateway data, json decode: gateway id: ${node_id}, data: ${res.data}', + err_json_parse) + } + return node +} + +// get_nodes fetchs nodes information and public configurations with pagination. +// +// * `available_for` (u64): Available for twin id. [optional]. +// * `certification_type` (string): Certificate type NotCertified, Silver or Gold. [optional]. +// * `city_contains` (string): Node partial city filter. [optional]. +// * `city` (string): Node city filter. [optional]. +// * `country_contains` (string): Node partial country filter. [optional]. +// * `country` (string): Node country filter. [optional]. +// * `dedicated` (bool): Set to true to get the dedicated nodes only. [optional]. +// * `domain` (string): Set to true to filter nodes with domain. [optional]. +// * `farm_ids` ([]u64): List of farm ids. [optional]. +// * `farm_name_contains` (string): Get nodes for specific farm. [optional]. +// * `farm_name` (string): Get nodes for specific farm. [optional]. +// * `free_hru` (u64): Min free reservable hru in bytes. [optional]. +// * `free_ips` (u64): Min number of free ips in the farm of the node. [optional]. +// * `free_mru` (u64): Min free reservable mru in bytes. [optional]. +// * `free_sru` (u64): Min free reservable sru in bytes. [optional]. +// * `gpu_available` (bool): Filter nodes that have available GPU. [optional]. +// * `gpu_device_id` (string): Filter nodes based on GPU device ID. [optional]. +// * `gpu_device_name` (string): Filter nodes based on GPU device partial name. [optional]. +// * `gpu_vendor_id` (string): Filter nodes based on GPU vendor ID. [optional]. +// * `gpu_vendor_name` (string): Filter nodes based on GPU vendor partial name. [optional]. +// * `has_gpu`: Filter nodes on whether they have GPU support or not. [optional]. +// * `ipv4` (string): Set to true to filter nodes with ipv4. [optional]. +// * `ipv6` (string): Set to true to filter nodes with ipv6. [optional]. +// * `node_id` (u64): Node id. [optional]. +// * `page` (u64): Page number. [optional]. +// * `rentable` (bool): Set to true to filter the available nodes for renting. [optional]. +// * `rented_by` (u64): Rented by twin id. [optional]. +// * `ret_count` (bool): Set nodes' count on headers based on filter. [optional]. +// * `size` (u64): Max result per page. [optional]. +// * `status` (string): Node status filter, set to 'up' to get online nodes only. [optional]. +// * `total_cru` (u64): Min total cru in bytes. [optional]. +// * `total_hru` (u64): Min total hru in bytes. [optional]. +// * `total_mru` (u64): Min total mru in bytes. [optional]. +// * `total_sru` (u64): Min total sru in bytes. [optional]. +// * `twin_id` (u64): Twin id. [optional]. +// +// returns: `[]Node` or `Error`. +pub fn (mut c GridProxyClient) get_nodes(params NodeFilter) ![]Node { + // needed to allow to use threads + mut http_client := c.http_client + params_map := params.to_map() + res := http_client.send(prefix: 'nodes/', params: params_map) or { + return error_with_code('http client error: ${err.msg()}', err_http_client) + } + + if !res.is_ok() { + return error_with_code(res.data, res.code) + } + + if res.data == '' { + return error_with_code('empty response', err_invalid_resp) + } + + nodes_ := json.decode([]Node_, res.data) or { + return error_with_code('error to get jsonstr for node list data, json decode: node filter: ${params_map}, data: ${res.data}', + err_json_parse) + } + nodes := nodes_.map(it.with_nested_capacity()) + return nodes +} + +// get_gateways fetchs gateways information and public configurations and domains with pagination. +// +// * `available_for` (u64): Available for twin id. [optional]. +// * `certification_type` (string): Certificate type NotCertified, Silver or Gold. [optional]. +// * `city_contains` (string): Node partial city filter. [optional]. +// * `city` (string): Node city filter. [optional]. +// * `country_contains` (string): Node partial country filter. [optional]. +// * `country` (string): Node country filter. [optional]. +// * `dedicated` (bool): Set to true to get the dedicated nodes only. [optional]. +// * `domain` (bool): Set to true to filter nodes with domain. [optional]. +// * `farm_ids` ([]u64): List of farm ids. [optional]. +// * `farm_name_contains` (string): Get nodes for specific farm. [optional]. +// * `farm_name` (string): Get nodes for specific farm. [optional]. +// * `free_hru` (u64): Min free reservable hru in bytes. [optional]. +// * `free_ips` (u64): Min number of free ips in the farm of the node. [optional]. +// * `free_mru` (u64): Min free reservable mru in bytes. [optional]. +// * `free_sru` (u64): Min free reservable sru in bytes. [optional]. +// * `gpu_available` (bool): Filter nodes that have available GPU. [optional]. +// * `gpu_device_id` (string): Filter nodes based on GPU device ID. [optional]. +// * `gpu_device_name` (string): Filter nodes based on GPU device partial name. [optional]. +// * `gpu_vendor_id` (string): Filter nodes based on GPU vendor ID. [optional]. +// * `gpu_vendor_name` (string): Filter nodes based on GPU vendor partial name. [optional]. +// * `has_gpu`: Filter nodes on whether they have GPU support or not. [optional]. +// * `ipv4` (string): Set to true to filter nodes with ipv4. [optional]. +// * `ipv6` (string): Set to true to filter nodes with ipv6. [optional]. +// * `node_id` (u64): Node id. [optional]. +// * `page` (u64): Page number. [optional]. +// * `rentable` (bool): Set to true to filter the available nodes for renting. [optional]. +// * `rented_by` (u64): Rented by twin id. [optional]. +// * `ret_count` (bool): Set nodes' count on headers based on filter. [optional]. +// * `size` (u64): Max result per page. [optional]. +// * `status` (string): Node status filter, set to 'up' to get online nodes only. [optional]. +// * `total_cru` (u64): Min total cru in bytes. [optional]. +// * `total_hru` (u64): Min total hru in bytes. [optional]. +// * `total_mru` (u64): Min total mru in bytes. [optional]. +// * `total_sru` (u64): Min total sru in bytes. [optional]. +// * `twin_id` (u64): Twin id. [optional]. +// +// returns: `[]Node` or `Error`. +pub fn (mut c GridProxyClient) get_gateways(params NodeFilter) ![]Node { + // needed to allow to use threads + mut http_client := c.http_client + params_map := params.to_map() + res := http_client.send(prefix: 'gateways/', params: params_map) or { + return error_with_code('http client error: ${err.msg()}', err_http_client) + } + + if !res.is_ok() { + return error_with_code(res.data, res.code) + } + + if res.data == '' { + return error_with_code('empty response', err_invalid_resp) + } + + nodes_ := json.decode([]Node_, res.data) or { + return error_with_code('error to get jsonstr for gateways list data, json decode: gateway filter: ${params_map}, data: ${res.data}', + err_json_parse) + } + nodes := nodes_.map(it.with_nested_capacity()) + return nodes +} + +// get_stats fetchs stats about the grid. +// +// * `status` (string): Node status filter, set to 'up' to get online nodes only.. [optional]. +// +// returns: `GridStat` or `Error`. +pub fn (mut c GridProxyClient) get_stats(filter StatFilter) !GridStat { + // needed to allow to use threads + mut http_client := c.http_client + mut params_map := map[string]string{} + params_map['status'] = match filter.status { + .all { '' } + .online { 'up' } + } + + res := http_client.send(prefix: 'stats/', params: params_map) or { + return error_with_code('http client error: ${err.msg()}', err_http_client) + } + + if !res.is_ok() { + return error_with_code(res.data, res.code) + } + + if res.data == '' { + return error_with_code('empty response', err_invalid_resp) + } + + stats := json.decode(GridStat, res.data) or { + return error_with_code('error to get jsonstr for grid stats data, json decode: stats filter: ${params_map}, data: ${res.data}', + err_json_parse) + } + return stats +} + +// get_twins fetchs twins information with pagaination. +// +// * `account_id` (string): Account address. [optional]. +// * `page` (u64): Page number. [optional]. +// * `public_key` (string): twin public key used for e2e encryption. [optional]. +// * `relay` (string): relay domain name. [optional]. +// * `ret_count` (bool): Set farms' count on headers based on filter. [optional]. +// * `size` (u64): Max result per page. [optional]. +// * `twin_id` (u64): Twin id. [optional]. +// +// returns: `[]Twin` or `Error`. +pub fn (mut c GridProxyClient) get_twins(params TwinFilter) ![]Twin { + // needed to allow to use threads + mut http_client := c.http_client + params_map := params.to_map() + res := http_client.send(prefix: 'twins/', params: params_map) or { + return error_with_code('http client error: ${err.msg()}', err_http_client) + } + + if !res.is_ok() { + return error_with_code(res.data, res.code) + } + + if res.data == '' { + return error_with_code('empty response', err_invalid_resp) + } + + twins := json.decode([]Twin, res.data) or { + return error_with_code('error to get jsonstr for twin list data, json decode: twin filter: ${params_map}, data: ${res.data}', + err_json_parse) + } + return twins +} + +// get_contracts fetchs contracts information with pagination. +// +// * `contract_id` (u64): Contract id. [optional]. +// * `contract_type` (string): [optional]. +// * `deployment_data` (string): Contract deployment data in case of 'node' contracts. [optional]. +// * `deployment_hash` (string): Contract deployment hash in case of 'node' contracts. [optional]. +// * `name` (string): Contract name in case of 'name' contracts. [optional]. +// * `node_id` (u64): Node id which contract is deployed on in case of ('rent' or 'node' contracts). [optional]. +// * `number_of_public_ips` (u64): Min number of public ips in the 'node' contract. [optional]. +// * `page` (u64): Page number. [optional]. +// * `randomize` (bool): [optional]. +// * `ret_count` (bool): Set farms' count on headers based on filter. [optional]. +// * `size` (u64): Max result per page. [optional]. +// * `state` (string): Contract state 'Created', or 'Deleted'. [optional]. +// * `twin_id` (u64): Twin id. [optional]. +// * `type` (string): Contract type 'node', 'name', or 'rent'. [optional]. +// +// * returns: `[]Contract` or `Error`. +pub fn (mut c GridProxyClient) get_contracts(params ContractFilter) ![]Contract { + // needed to allow to use threads + mut http_client := c.http_client + params_map := params.to_map() + res := http_client.send(prefix: 'contracts/', params: params_map) or { + return error_with_code('http client error: ${err.msg()}', err_http_client) + } + + if !res.is_ok() { + return error_with_code(res.data, res.code) + } + + if res.data == '' { + return error_with_code('empty response', err_invalid_resp) + } + + contracts := json.decode([]Contract, res.data) or { + return error_with_code('error to get jsonstr for contract list data, json decode: contract filter: ${params_map}, data: ${res.data}', + err_json_parse) + } + return contracts +} + +pub fn (mut c GridProxyClient) get_contract_bill(contract_id u64) ![]Bill { + // needed to allow to use threads + mut http_client := c.http_client + + res := http_client.send(prefix: 'contracts/', id: '${contract_id}/bills') or { + return error_with_code('http client error: ${err.msg()}', err_http_client) + } + + if !res.is_ok() { + return error_with_code(res.data, res.code) + } + + if res.data == '' { + return error_with_code('empty response', err_invalid_resp) + } + console.print_debug(res.data) + bills := json.decode([]Bill, res.data) or { + return error_with_code('error to get jsonstr for billing data, json decode: contract_id id: ${contract_id}, data: ${res.data}', + err_json_parse) + } + return bills +} + +pub fn (mut c GridProxyClient) get_contract_hourly_bill(contract_id u64) !f64 { + bills := c.get_contract_bill(contract_id)! + if bills.len == 0 { + return f64(0) + } + mut duration := u64(0) + if bills.len >= 2 { + duration = (bills[0].timestamp - bills[1].timestamp) / 3600 // one hour + } else if bills.len == 1 { + contracts := c.get_contracts(contract_id: contract_id)! + if contracts.len > 0 { + duration = (bills[0].timestamp - contracts[0].created_at) / 3600 + } + } + if duration > 0 { + return bills[0].amount_billed / duration / math.pow(10, 7) + } + return f64(0) +} + +// get_farms fetchs farms information and public ips. +// +// * `certification_type` (string): Certificate type DIY or Certified. [optional]. +// * `country` (string): Farm country. [optional]. +// * `dedicated` (bool): Farm is dedicated. [optional]. +// * `farm_id` (u64): Farm id. [optional]. +// * `free_ips` (u64): Min number of free ips in the farm. [optional]. +// * `name_contains` (string): Farm name contains. [optional]. +// * `name` (string): Farm name. [optional]. +// * `node_available_for` (u64): Twin ID of user for whom there is at least one node that is available to be deployed to in the farm. [optional]. +// * `node_certified` (bool): True for farms who have at least one certified node. [optional]. +// * `node_free_hru` (u64): Min free reservable hru for at least a single node that belongs to the farm, in bytes. [optional]. +// * `node_free_mru` (u64): Min free reservable mru for at least a single node that belongs to the farm, in bytes. [optional]. +// * `node_free_sru` (u64): Min free reservable sru for at least a single node that belongs to the farm, in bytes. [optional]. +// * `node_has_gpu` (bool): True for farms who have at least one node with a GPU +// * `node_rented_by` (u64): Twin ID of user who has at least one rented node in the farm +// * `node_status` (string): Node status for at least a single node that belongs to the farm +// * `page` (u64): Page number. [optional]. +// * `pricing_policy_id` (u64): Pricing policy id. [optional]. +// * `randomize` (bool): [optional]. +// * `ret_count` (bool): Set farms' count on headers based on filter. [optional]. +// * `size` (u64): Max result per page. [optional]. +// * `stellar_address` (string): Farm stellar_address. [optional]. +// * `total_ips` (u64): Min number of total ips in the farm. [optional]. +// * `twin_id` (u64): Twin id associated with the farm. [optional]. +// * `version` (u64): Farm version. [optional]. +// +// returns: `[]Farm` or `Error`. +pub fn (mut c GridProxyClient) get_farms(params FarmFilter) ![]Farm { + // needed to allow to use threads + mut http_client := c.http_client + params_map := params.to_map() + res := http_client.send(prefix: 'farms/', params: params_map) or { + return error_with_code('http client error: ${err.msg()}', err_http_client) + } + + if !res.is_ok() { + return error_with_code(res.data, res.code) + } + + if res.data == '' { + return error_with_code('empty response', err_invalid_resp) + } + + farms := json.decode([]Farm, res.data) or { + return error_with_code('error to get jsonstr for farm list data, json decode: farm filter: ${params_map}, data: ${res.data}', + err_json_parse) + } + return farms +} + +// is_pingable checks if API server is reachable and responding. +// +// returns: bool, `true` if API server is reachable and responding, `false` otherwise +pub fn (mut c GridProxyClient) is_pingable() !bool { + mut http_client := c.http_client + res := http_client.send(prefix: 'ping/') or { return false } + if !res.is_ok() { + return false + } + health_map := json.decode(map[string]string, res.data) or { return false } + + if health_map['ping'] != 'pong' { + return false + } + + return true +} + +// Iterators have the next() method, which returns the next page of the objects. +// to be used in a loop to get all available results, or to lazely traverse pages till a specific condition is met. + +// get_nodes_iterator creates an iterator through node pages with custom filter +fn (mut c GridProxyClient) get_nodes_iterator(filter NodeFilter) NodeIterator { + return NodeIterator{filter, c.get_nodes} +} + +// get_gateways_iterator creates an iterator through gateway pages with custom filter +fn (mut c GridProxyClient) get_gateways_iterator(filter NodeFilter) NodeIterator { + return NodeIterator{filter, c.get_gateways} +} + +// get_farms_iterator creates an iterator through farms pages with custom filter +fn (mut c GridProxyClient) get_farms_iterator(filter FarmFilter) FarmIterator { + return FarmIterator{filter, c.get_farms} +} + +// get_twins_iterator creates an iterator through twin pages with custom filter +fn (mut c GridProxyClient) get_twins_iterator(filter TwinFilter) TwinIterator { + return TwinIterator{filter, c.get_twins} +} + +// get_contracts_iterator creates an iterator through contracts pages with custom filter +fn (mut c GridProxyClient) get_contracts_iterator(filter ContractFilter) ContractIterator { + return ContractIterator{filter, c.get_contracts} +} diff --git a/lib/threefold/gridproxy/gridproxy_factory.v b/lib/threefold/gridproxy/gridproxy_factory.v new file mode 100644 index 00000000..f63f4b1b --- /dev/null +++ b/lib/threefold/gridproxy/gridproxy_factory.v @@ -0,0 +1,111 @@ +module gridproxy + +import freeflowuniverse.herolib.clients.httpconnection +import freeflowuniverse.herolib.threefold.gridproxy.model +// import freeflowuniverse.herolib.installers.threefold.griddriver + +@[heap] +pub struct GridProxyClient { +pub mut: + http_client httpconnection.HTTPConnection +} + +pub enum TFGridNet { + main + test + dev + qa +} + +@[heap] +struct GridproxyFactory { +mut: + instances map[string]&GridProxyClient +} + +fn init_factory() GridproxyFactory { + mut ef := GridproxyFactory{} + return ef +} + +// Singleton creation +const factory = init_factory() + +fn factory_get() &GridproxyFactory { + return &factory +} + +fn gridproxy_url_get(net TFGridNet) string { + return match net { + .main { 'https://gridproxy.grid.tf' } + .test { 'https://gridproxy.test.grid.tf' } + .dev { 'https://gridproxy.dev.grid.tf' } + .qa { 'https://gridproxy.qa.grid.tf/' } + } +} + +// return which net in string form +fn tfgrid_net_string(net TFGridNet) string { + return match net { + .main { 'main' } + .test { 'test' } + .dev { 'dev' } + .qa { 'qa' } + } +} + +@[params] +pub struct GridProxyClientArgs { +pub mut: + net TFGridNet = .main + cache bool +} + +// get returns a gridproxy client for the given net. +// +//``` +// net TFGridNet = .main +// cache bool +//``` +pub fn new(args GridProxyClientArgs) !&GridProxyClient { + mut f := factory_get() + netstr := tfgrid_net_string(args.net) + if netstr !in factory.instances { + url := gridproxy_url_get(args.net) + mut httpconn := httpconnection.new( + name: 'gridproxy_${netstr}' + url: url + cache: args.cache + )! + // do the settings on the connection + httpconn.cache.expire_after = 7200 // make the cache timeout 2h + mut connection := GridProxyClient{ + http_client: httpconn + } + f.instances[netstr] = &connection + } + return f.instances[netstr] or { + return error_with_code('http client error: unknow error happened while trying to access the GridProxyClient instance', + err_grid_client) + } +} + +pub fn nodefilter() !model.NodeFilter { + return model.NodeFilter{} +} + +pub fn contractfilter() !model.ContractFilter { + return model.ContractFilter{} +} + +pub fn farmfilter() !model.FarmFilter { + return model.FarmFilter{} +} + +pub fn twinfilter() !model.TwinFilter { + return model.TwinFilter{} +} + +pub fn statfilter() !model.StatFilter { + return model.StatFilter{} +} diff --git a/lib/threefold/gridproxy/gridproxy_highlevel.v b/lib/threefold/gridproxy/gridproxy_highlevel.v new file mode 100644 index 00000000..8f215987 --- /dev/null +++ b/lib/threefold/gridproxy/gridproxy_highlevel.v @@ -0,0 +1,169 @@ +module gridproxy + +import freeflowuniverse.herolib.threefold.gridproxy.model { Contract, ContractFilter, Farm, FarmFilter, Node, NodeFilter, ResourceFilter, Twin } + +// fetch specific twin information by twin id. +// +// * `twin_id`: twin id. +// +// returns: `Twin` or `Error`. +pub fn (mut c GridProxyClient) get_twin_by_id(twin_id u64) !Twin { + twins := c.get_twins(twin_id: twin_id) or { + return error_with_code('http client error: ${err.msg()}', err_http_client) + } + if twins.len == 0 { + return error_with_code('no twin found for id: ${twin_id}', err_not_found) + } + return twins[0] +} + +// fetch specific twin information by account. +// +// * `account_id`: account id. +// +// returns: `Twin` or `Error`. +pub fn (mut c GridProxyClient) get_twin_by_account(account_id string) !Twin { + twins := c.get_twins(account_id: account_id) or { + return error_with_code('http client error: ${err.msg()}', err_http_client) + } + if twins.len == 0 { + return error_with_code('no twin found for account_id: ${account_id}', err_not_found) + } + return twins[0] +} + +// fetch specific farm information by id. +// +// * `farm_id`: farm id. +// +// returns: `Farm` or `Error`. +pub fn (mut c GridProxyClient) get_farm_by_id(farm_id u64) !Farm { + farms := c.get_farms(farm_id: farm_id) or { + return error_with_code('http client error: ${err.msg()}', err_http_client) + } + if farms.len == 0 { + return error_with_code('no farm found for id: ${farm_id}', err_not_found) + } + return farms[0] +} + +// fetch specific farm information by farm name. +// +// * `farm_name`: farm name. +// +// returns: `Farm` or `Error`. +pub fn (mut c GridProxyClient) get_farm_by_name(farm_name string) !Farm { + farms := c.get_farms(name: farm_name) or { + return error_with_code('http client error: ${err.msg()}', err_http_client) + } + if farms.len == 0 { + return error_with_code('no farm found with name: ${farm_name}', err_not_found) + } + return farms[0] +} + +// get_farms_by_twin_id returns iterator over all farms information associated with specific twin. +// +// * `twin_id`: twin id. +// +// returns: `FarmIterator`. +pub fn (mut c GridProxyClient) get_farms_by_twin_id(twin_id u64) []Farm { + mut filter := FarmFilter{ + twin_id: twin_id + } + mut iter := c.get_farms_iterator(filter) + mut result := []Farm{} + for f in iter { + result << f + } + return result +} + +// get_contracts_by_twin_id returns iterator over all contracts owned by specific twin. +// +// * `twin_id`: twin id. +// +// returns: `ContractIterator`. +pub fn (mut c GridProxyClient) get_contracts_by_twin_id(twin_id u64) []Contract { + /* + contracts := c.get_contracts(twin_id: twin_id) or { + return error_with_code('http client error: $err.msg()', gridproxy.err_http_client) + }*/ + mut filter := ContractFilter{ + twin_id: twin_id + } + mut iter := c.get_contracts_iterator(filter) + mut result := []Contract{} + for f in iter { + result << f + } + return result +} + +// get_active_contracts returns iterator over `created` contracts owned by specific twin. +// +// * `twin_id`: twin id. +// +// returns: `ContractIterator`. +pub fn (mut c GridProxyClient) get_contracts_active(twin_id u64) []Contract { + /* + contracts := c.get_contracts(twin_id: twin_id) or { + return error_with_code('http client error: $err.msg()', gridproxy.err_http_client) + }*/ + mut filter := ContractFilter{ + twin_id: twin_id + state: 'created' + } + + mut iter := c.get_contracts_iterator(filter) + mut result := []Contract{} + for f in iter { + result << f + } + return result +} + +// get_contracts_by_node_id returns iterator over all contracts deployed on specific node. +// +// * `node_id`: node id. +// +// returns: `ContractIterator`. +pub fn (mut c GridProxyClient) get_contracts_by_node_id(node_id u64) []Contract { + /* + contracts := c.get_contracts(node_id: node_id) or { + return error_with_code('http client error: $err.msg()', gridproxy.err_http_client) + }*/ + mut filter := ContractFilter{ + node_id: node_id + } + mut iter := c.get_contracts_iterator(filter) + mut result := []Contract{} + for f in iter { + result << f + } + return result +} + +// get_nodes_has_resources returns iterator over all nodes with specific minimum free reservable resources. +// +// * `free_ips` (u64): minimum free ips. [optional]. +// * `free_mru_gb` (u64): minimum free mru in GB. [optional]. +// * `free_sru_gb` (u64): minimum free sru in GB. [optional]. +// * `free_hru_gb` (u64): minimum free hru in GB. [optional]. +// +// returns: `NodeIterator`. +fn (mut c GridProxyClient) get_nodes_has_resources(filter ResourceFilter) []Node { + mut filter_ := NodeFilter{ + free_ips: filter.free_ips + free_mru: filter.free_mru_gb * (1204 * 1204 * 1204) + free_sru: filter.free_sru_gb * (1204 * 1204 * 1204) + free_hru: filter.free_hru_gb * (1204 * 1204 * 1204) + total_cru: filter.free_cpu + } + mut iter := c.get_nodes_iterator(filter_) + mut result := []Node{} + for f in iter { + result << f + } + return result +} diff --git a/lib/threefold/gridproxy/gridproxy_test.v b/lib/threefold/gridproxy/gridproxy_test.v new file mode 100644 index 00000000..5de44ad2 --- /dev/null +++ b/lib/threefold/gridproxy/gridproxy_test.v @@ -0,0 +1,247 @@ +module gridproxy + +import freeflowuniverse.herolib.threefold.gridproxy.model +import time + +const cache = false +const dummy_node = model.Node{ + id: '0000129706-000001-c1e78' + node_id: 1 + farm_id: 2 + twin_id: 8 + grid_version: 3 + uptime: model.SecondUnit(86400) // 86400 seconds = 1440 minutes = 24 hours = 1 day + created: model.UnixTime(1654848126) // GMT: 2022-06-10 08:02:06 + farming_policy_id: 1 + updated_at: model.UnixTime(1654848132) // GMT: 2022-06-10 08:02:12 + capacity: model.NodeCapacity{ + total_resources: model.NodeResources{ + cru: 4 + mru: model.ByteUnit(5178437632) // 5178437632 bytes = 5178.437632 megabytes = 5.2 gigabytes = 0.005178437632 terabytes + sru: model.ByteUnit(1610612736000) // 1610612736000 bytes = 1610612.736000 megabytes = 1610.612736 gigabytes = 16.1 terabytes + hru: model.ByteUnit(1073741824000) // 1073741824000 bytes = 1073741.824 megabytes = 1073.741824 gigabytes = 10.7 terabytes + } + used_resources: model.NodeResources{ + cru: 0 + mru: model.ByteUnit(0) + sru: model.ByteUnit(0) + hru: model.ByteUnit(0) + } + } + location: model.NodeLocation{ + country: 'Belgium' + city: 'Lochristi' + } + public_config: model.PublicConfig{ + domain: '' + gw4: '' + gw6: '' + ipv4: '' + ipv6: '' + } + certification: 'Diy' + status: 'down' + dedicated: false + rent_contract_id: 0 + rented_by_twin_id: 0 +} +const dummy_contract_billing = model.ContractBilling{ + amount_billed: model.DropTFTUnit(10000000) // 1 TFT == 1000 mTFT == 1000000 uTFT + discount_received: 'None' + timestamp: model.UnixTime(1655118966) +} + +fn test_create_gridproxy_client_qa() { + mut gp := get(.qa, cache)! + assert gp.is_pingable()! == true +} + +fn test_create_gridproxy_client_dev() { + mut gp := get(.dev, cache)! + assert gp.is_pingable()! == true +} + +fn test_create_gridproxy_client_test() { + mut gp := get(.test, cache)! + assert gp.is_pingable()! == true +} + +fn test_create_gridproxy_client_main() { + mut gp := get(.main, cache)! + assert gp.is_pingable()! == true +} + +fn test_get_nodes_qa() { + mut gp := get(.qa, cache)! + nodes := gp.get_nodes() or { panic('Failed to get nodes') } + assert nodes.len > 0 +} + +fn test_get_nodes_dev() { + mut gp := get(.dev, cache)! + nodes := gp.get_nodes() or { panic('Failed to get nodes') } + assert nodes.len > 0 +} + +fn test_get_nodes_test() { + mut gp := get(.test, cache)! + nodes := gp.get_nodes() or { panic('Failed to get nodes') } + assert nodes.len > 0 +} + +fn test_get_nodes_main() { + mut gp := get(.main, cache)! + nodes := gp.get_nodes() or { panic('Failed to get nodes') } + assert nodes.len > 0 +} + +fn test_get_gateways_qa() { + mut gp := get(.qa, cache)! + nodes := gp.get_gateways() or { panic('Failed to get gateways') } + assert nodes.len > 0 +} + +fn test_get_gateways_dev() { + mut gp := get(.dev, cache)! + nodes := gp.get_gateways() or { panic('Failed to get gateways') } + assert nodes.len > 0 +} + +fn test_get_gateways_test() { + mut gp := get(.test, cache)! + nodes := gp.get_gateways() or { panic('Failed to get gateways') } + assert nodes.len > 0 +} + +fn test_get_gateways_main() { + mut gp := get(.main, cache)! + nodes := gp.get_gateways() or { panic('Failed to get gateways') } + assert nodes.len > 0 +} + +fn test_get_twins_qa() { + mut gp := get(.qa, cache)! + twins := gp.get_twins() or { panic('Failed to get twins') } + assert twins.len > 0 +} + +fn test_get_twins_dev() { + mut gp := get(.dev, cache)! + twins := gp.get_twins() or { panic('Failed to get twins') } + assert twins.len > 0 +} + +fn test_get_twins_test() { + mut gp := get(.test, cache)! + twins := gp.get_twins() or { panic('Failed to get twins') } + assert twins.len > 0 +} + +fn test_get_twins_main() { + mut gp := get(.main, cache)! + twins := gp.get_twins() or { panic('Failed to get twins') } + assert twins.len > 0 +} + +fn test_get_stats_qa() { + mut gp := get(.qa, cache)! + stats := gp.get_stats() or { panic('Failed to get stats') } + assert stats.nodes > 0 +} + +fn test_get_stats_dev() { + mut gp := get(.dev, cache)! + stats := gp.get_stats() or { panic('Failed to get stats') } + assert stats.nodes > 0 +} + +fn test_get_stats_test() { + mut gp := get(.test, cache)! + stats := gp.get_stats() or { panic('Failed to get stats') } + assert stats.nodes > 0 +} + +fn test_get_stats_main() { + mut gp := get(.test, cache)! + stats := gp.get_stats() or { panic('Failed to get stats') } + assert stats.nodes > 0 +} + +fn test_get_contracts_qa() { + mut gp := get(.qa, cache)! + contracts := gp.get_contracts() or { panic('Failed to get contracts') } + assert contracts.len > 0 +} + +fn test_get_contracts_dev() { + mut gp := get(.dev, cache)! + contracts := gp.get_contracts() or { panic('Failed to get contracts') } + assert contracts.len > 0 +} + +fn test_get_contracts_test() { + mut gp := get(.test, cache)! + contracts := gp.get_contracts() or { panic('Failed to get contracts') } + assert contracts.len > 0 +} + +fn test_get_contracts_main() { + mut gp := get(.main, cache)! + contracts := gp.get_contracts() or { panic('Failed to get contracts') } + assert contracts.len > 0 +} + +fn test_get_farms_qa() { + mut gp := get(.qa, cache)! + farms := gp.get_farms() or { panic('Failed to get farms') } + assert farms.len > 0 +} + +fn test_get_farms_dev() { + mut gp := get(.dev, cache)! + farms := gp.get_farms() or { panic('Failed to get farms') } + assert farms.len > 0 +} + +fn test_get_farms_test() { + mut gp := get(.test, cache)! + farms := gp.get_farms() or { panic('Failed to get farms') } + assert farms.len > 0 +} + +fn test_get_farms_main() { + mut gp := get(.main, cache)! + farms := gp.get_farms() or { panic('Failed to get farms') } + assert farms.len > 0 +} + +fn test_elapsed_seconds_conversion() { + assert dummy_node.uptime.to_minutes() == 1440 + assert dummy_node.uptime.to_hours() == 24 + assert dummy_node.uptime.to_days() == 1 +} + +fn test_timestamp_conversion() { + assert dummy_node.created.to_time() == time.unix(1654848126) + assert dummy_node.updated_at.to_time() == time.unix(1654848132) +} + +fn test_storage_unit_conversion() { + assert dummy_node.capacity.total_resources.mru.to_megabytes() == 5178.437632 + assert dummy_node.capacity.total_resources.mru.to_gigabytes() == 5.178437632 + assert dummy_node.capacity.total_resources.mru.to_terabytes() == 0.005178437632 +} + +fn test_tft_conversion() { + assert dummy_contract_billing.amount_billed.to_tft() == 1 + assert dummy_contract_billing.amount_billed.to_mtft() == 1000 + assert dummy_contract_billing.amount_billed.to_utft() == 1000000 +} + +fn test_calc_available_resources_on_node() { + // dummy node was created with 0 used resources + assert dummy_node.calc_available_resources().mru == dummy_node.capacity.total_resources.mru + assert dummy_node.calc_available_resources().hru == dummy_node.capacity.total_resources.hru + assert dummy_node.calc_available_resources().sru == dummy_node.capacity.total_resources.sru + assert dummy_node.calc_available_resources().cru == dummy_node.capacity.total_resources.cru +} diff --git a/lib/threefold/gridproxy/model/contract.v b/lib/threefold/gridproxy/model/contract.v new file mode 100644 index 00000000..b8e82bf8 --- /dev/null +++ b/lib/threefold/gridproxy/model/contract.v @@ -0,0 +1,52 @@ +module model + +pub struct ContractBilling { +pub: + amount_billed DropTFTUnit @[json: amountBilled] + discount_received string @[json: discountReceived] + timestamp UnixTime @[json: timestamp] +} + +pub struct NodeContractDetails { +pub: + node_id u64 @[json: nodeId] + deployment_data string @[json: deployment_data] + deployment_hash string @[json: deployment_hash] + number_of_public_ips u64 @[json: number_of_public_ips] +} + +pub struct Contract { +pub: + contract_id u64 + twin_id u64 + state string @[json: state] + created_at UnixTime @[json: created_at] + contract_type string @[json: 'type'] + details NodeContractDetails @[json: details] +} + +pub struct Bill { +pub: + amount_billed u64 @[json: amountBilled] + timestamp UnixTime @[json: timestamp] + discount_received string @[json: discountReceived] +} + +// total_billed returns the total amount billed for the contract. +// +// returns: `DropTFTUnit` +// pub fn (c &Contract) total_billed() DropTFTUnit { +// if c.billing.len == 0 { +// return 0 +// } +// mut total := u64(0) +// for b in c.billing { +// total += b.amount_billed +// } +// return DropTFTUnit(total) +// } + +// TODO: Implement Limit struct (size, page, retcount, randomize) +// and embeded it in other structs like Contract to avoid duplicated code +// TODO: check if RetCount is bool or string as swagger doc says +// TODO: check if Randomize can be used in the client and where, it is not documnetd in swagger diff --git a/lib/threefold/gridproxy/model/farm.v b/lib/threefold/gridproxy/model/farm.v new file mode 100644 index 00000000..86bc34a8 --- /dev/null +++ b/lib/threefold/gridproxy/model/farm.v @@ -0,0 +1,22 @@ +module model + +pub struct PublicIP { +pub: + id string + ip string + farm_id string @[json: farmId] + contract_id int @[json: contractId] + gateway string +} + +pub struct Farm { +pub: + name string + farm_id u64 @[json: farmId] + twin_id u64 @[json: twinId] + pricing_policy_id u64 @[json: pricingPolicyId] + certification_type string @[json: certificationType] + stellar_address string @[json: stellarAddress] + dedicated bool + public_ips []PublicIP @[json: publicIps] +} diff --git a/lib/threefold/gridproxy/model/filter.v b/lib/threefold/gridproxy/model/filter.v new file mode 100644 index 00000000..27b3c3ec --- /dev/null +++ b/lib/threefold/gridproxy/model/filter.v @@ -0,0 +1,575 @@ +module model + +import json + +type OptionU64 = EmptyOption | u64 +type OptionBool = EmptyOption | bool + +@[params] +pub struct FarmFilter { +pub mut: + page OptionU64 = EmptyOption{} + size OptionU64 = EmptyOption{} + ret_count OptionBool = EmptyOption{} + randomize OptionBool = EmptyOption{} + free_ips OptionU64 = EmptyOption{} + total_ips OptionU64 = EmptyOption{} + stellar_address string + pricing_policy_id OptionU64 = EmptyOption{} + farm_id OptionU64 = EmptyOption{} + twin_id OptionU64 = EmptyOption{} + name string + name_contains string + certification_type string + dedicated OptionBool = EmptyOption{} + country string + node_free_mru OptionU64 = EmptyOption{} + node_free_hru OptionU64 = EmptyOption{} + node_free_sru OptionU64 = EmptyOption{} + node_status string + node_rented_by OptionU64 = EmptyOption{} + node_available_for OptionU64 = EmptyOption{} + node_has_gpu OptionBool = EmptyOption{} + node_certified OptionBool = EmptyOption{} +} + +// serialize FarmFilter to map +pub fn (f &FarmFilter) to_map() map[string]string { + mut m := map[string]string{} + + match f.page { + EmptyOption {} + u64 { + m['page'] = f.page.str() + } + } + match f.size { + EmptyOption {} + u64 { + m['size'] = f.size.str() + } + } + match f.ret_count { + EmptyOption {} + bool { + m['ret_count'] = f.ret_count.str() + } + } + match f.randomize { + EmptyOption {} + bool { + m['randomize'] = f.randomize.str() + } + } + match f.free_ips { + EmptyOption {} + u64 { + m['free_ips'] = f.free_ips.str() + } + } + match f.total_ips { + EmptyOption {} + u64 { + m['total_ips'] = f.total_ips.str() + } + } + if f.stellar_address != '' { + m['stellar_address'] = f.stellar_address + } + match f.pricing_policy_id { + EmptyOption {} + u64 { + m['pricing_policy_id'] = f.pricing_policy_id.str() + } + } + match f.farm_id { + EmptyOption {} + u64 { + m['farm_id'] = f.farm_id.str() + } + } + match f.twin_id { + EmptyOption {} + u64 { + m['twin_id'] = f.twin_id.str() + } + } + + if f.name != '' { + m['name'] = f.name + } + if f.name_contains != '' { + m['name_contains'] = f.name_contains + } + if f.certification_type != '' { + m['certification_type'] = f.certification_type + } + if f.country != '' { + m['country'] = f.country + } + match f.dedicated { + EmptyOption {} + bool { + m['dedicated'] = f.dedicated.str() + } + } + match f.node_available_for { + EmptyOption {} + u64 { + m['node_available_for'] = f.node_available_for.str() + } + } + match f.node_free_hru { + EmptyOption {} + u64 { + m['node_free_hru'] = f.node_free_hru.str() + } + } + match f.node_free_mru { + EmptyOption {} + u64 { + m['node_free_mru'] = f.node_free_mru.str() + } + } + match f.node_free_sru { + EmptyOption {} + u64 { + m['node_free_sru'] = f.node_free_sru.str() + } + } + match f.node_rented_by { + EmptyOption {} + u64 { + m['node_rented_by'] = f.node_rented_by.str() + } + } + match f.node_has_gpu { + EmptyOption {} + bool { + m['node_has_gpu'] = f.node_has_gpu.str() + } + } + match f.node_certified { + EmptyOption {} + bool { + m['node_certified'] = f.node_certified.str() + } + } + if f.node_status != '' { + m['node_status'] = f.node_status + } + return m +} + +@[params] +pub struct ContractFilter { +pub mut: + page OptionU64 = EmptyOption{} + size OptionU64 = EmptyOption{} + ret_count OptionBool = EmptyOption{} + randomize OptionBool = EmptyOption{} + contract_id OptionU64 = EmptyOption{} + twin_id OptionU64 = EmptyOption{} + node_id OptionU64 = EmptyOption{} + contract_type string + state string + name string + number_of_public_ips OptionU64 = EmptyOption{} + deployment_data string + deployment_hash string +} + +// serialize ContractFilter to map +pub fn (f &ContractFilter) to_map() map[string]string { + mut m := map[string]string{} + match f.page { + EmptyOption {} + u64 { + m['page'] = f.page.str() + } + } + match f.size { + EmptyOption {} + u64 { + m['size'] = f.size.str() + } + } + match f.ret_count { + EmptyOption {} + bool { + m['ret_count'] = f.ret_count.str() + } + } + match f.randomize { + EmptyOption {} + bool { + m['randomize'] = f.randomize.str() + } + } + match f.contract_id { + EmptyOption {} + u64 { + m['contract_id'] = f.contract_id.str() + } + } + match f.twin_id { + EmptyOption {} + u64 { + m['twin_id'] = f.twin_id.str() + } + } + match f.node_id { + EmptyOption {} + u64 { + m['node_id'] = f.node_id.str() + } + } + if f.contract_type != '' { + m['type'] = f.contract_type + } + if f.state != '' { + m['state'] = f.state + } + if f.name != '' { + m['name'] = f.name + } + match f.number_of_public_ips { + EmptyOption {} + u64 { + m['number_of_public_ips'] = f.number_of_public_ips.str() + } + } + if f.deployment_data != '' { + m['deployment_data'] = f.deployment_data + } + if f.deployment_hash != '' { + m['deployment_hash'] = f.deployment_hash + } + return m +} + +@[params] +pub struct NodeFilter { +pub mut: + page OptionU64 = EmptyOption{} + size OptionU64 = EmptyOption{} + ret_count OptionBool = EmptyOption{} + randomize OptionBool = EmptyOption{} + free_mru OptionU64 = EmptyOption{} + free_sru OptionU64 = EmptyOption{} + free_hru OptionU64 = EmptyOption{} + free_ips ?u64 + total_mru OptionU64 = EmptyOption{} + total_sru OptionU64 = EmptyOption{} + total_hru OptionU64 = EmptyOption{} + total_cru OptionU64 = EmptyOption{} + city string + city_contains string + country string + country_contains string + farm_name string + farm_name_contains string + ipv4 OptionBool = EmptyOption{} + ipv6 OptionBool = EmptyOption{} + domain OptionBool = EmptyOption{} + status string + dedicated OptionBool = EmptyOption{} + healthy OptionBool = EmptyOption{} + rentable OptionBool = EmptyOption{} + rented_by OptionU64 = EmptyOption{} + rented OptionBool = EmptyOption{} + available_for OptionU64 = EmptyOption{} + farm_ids []u64 + node_ids []u64 + node_id ?u32 + twin_id OptionU64 = EmptyOption{} + certification_type string + has_gpu OptionBool = EmptyOption{} + has_ipv6 ?bool + gpu_device_id string + gpu_device_name string + gpu_vendor_id string + gpu_vendor_name string + gpu_available OptionBool = EmptyOption{} + features []string +} + +// serialize NodeFilter to map +pub fn (p &NodeFilter) to_map() map[string]string { + mut m := map[string]string{} + match p.page { + EmptyOption {} + u64 { + m['page'] = p.page.str() + } + } + match p.size { + EmptyOption {} + u64 { + m['size'] = p.size.str() + } + } + match p.ret_count { + EmptyOption {} + bool { + m['ret_count'] = p.ret_count.str() + } + } + match p.randomize { + EmptyOption {} + bool { + m['randomize'] = p.randomize.str() + } + } + match p.free_mru { + EmptyOption {} + u64 { + m['free_mru'] = p.free_mru.str() + } + } + match p.free_sru { + EmptyOption {} + u64 { + m['free_sru'] = p.free_sru.str() + } + } + match p.free_hru { + EmptyOption {} + u64 { + m['free_hru'] = p.free_hru.str() + } + } + + if v := p.free_ips { + m['free_ips'] = v.str() + } + + if v := p.has_ipv6 { + m['has_ipv6'] = v.str() + } + + match p.total_cru { + EmptyOption {} + u64 { + m['total_cru'] = p.total_cru.str() + } + } + match p.total_hru { + EmptyOption {} + u64 { + m['total_hru'] = p.total_hru.str() + } + } + match p.total_mru { + EmptyOption {} + u64 { + m['total_mru'] = p.total_mru.str() + } + } + match p.total_sru { + EmptyOption {} + u64 { + m['total_sru'] = p.total_sru.str() + } + } + if p.status != '' { + m['status'] = p.status + } + if p.city != '' { + m['city'] = p.city + } + if p.city_contains != '' { + m['city_contains'] = p.city_contains + } + if p.country != '' { + m['country'] = p.country + } + if p.country_contains != '' { + m['country_contains'] = p.country_contains + } + if p.farm_name != '' { + m['farm_name'] = p.farm_name + } + if p.farm_name_contains != '' { + m['farm_name_contains'] = p.farm_name_contains + } + match p.ipv4 { + EmptyOption {} + bool { + m['ipv4'] = p.ipv4.str() + } + } + match p.ipv6 { + EmptyOption {} + bool { + m['ipv6'] = p.ipv6.str() + } + } + match p.healthy { + EmptyOption {} + bool { + m['healthy'] = p.healthy.str() + } + } + match p.domain { + EmptyOption {} + bool { + m['domain'] = p.domain.str() + } + } + match p.dedicated { + EmptyOption {} + bool { + m['dedicated'] = p.dedicated.str() + } + } + match p.rentable { + EmptyOption {} + bool { + m['rentable'] = p.rentable.str() + } + } + match p.rented_by { + EmptyOption {} + u64 { + m['rented_by'] = p.rented_by.str() + } + } + match p.rented { + EmptyOption {} + bool { + m['rented'] = p.rented.str() + } + } + match p.available_for { + EmptyOption {} + u64 { + m['available_for'] = p.available_for.str() + } + } + if p.features.len > 0 { + m['features'] = json.encode(p.features).all_after('[').all_before(']') + } + if p.farm_ids.len > 0 { + m['farm_ids'] = json.encode(p.farm_ids).all_after('[').all_before(']') + } + if p.node_ids.len > 0 { + m['node_ids'] = json.encode(p.node_ids).all_after('[').all_before(']') + } + if n := p.node_id { + m['node_id'] = n.str() + } + match p.twin_id { + EmptyOption {} + u64 { + m['twin_id'] = p.twin_id.str() + } + } + if p.certification_type != '' { + m['certification_type'] = p.certification_type + } + match p.has_gpu { + EmptyOption {} + bool { + m['has_gpu'] = p.has_gpu.str() + } + } + if p.gpu_device_id != '' { + m['gpu_device_id'] = p.gpu_device_id + } + if p.gpu_device_name != '' { + m['gpu_device_name'] = p.gpu_device_name + } + if p.gpu_vendor_id != '' { + m['gpu_vendor_id'] = p.gpu_vendor_id + } + if p.gpu_vendor_name != '' { + m['gpu_vendor_name'] = p.gpu_vendor_name + } + match p.gpu_available { + EmptyOption {} + bool { + m['gpu_available'] = p.gpu_available.str() + } + } + return m +} + +pub enum NodeStatus { + all + online +} + +@[params] +pub struct ResourceFilter { +pub mut: + free_mru_gb u64 + free_sru_gb u64 + free_hru_gb u64 + free_cpu u64 + free_ips u64 +} + +@[params] +pub struct StatFilter { +pub mut: + status NodeStatus +} + +@[params] +pub struct TwinFilter { +pub mut: + page OptionU64 = EmptyOption{} + size OptionU64 = EmptyOption{} + ret_count OptionBool = EmptyOption{} + randomize OptionBool = EmptyOption{} + twin_id OptionU64 = EmptyOption{} + account_id string + relay string + public_key string +} + +// serialize TwinFilter to map +pub fn (p &TwinFilter) to_map() map[string]string { + mut m := map[string]string{} + match p.page { + EmptyOption {} + u64 { + m['page'] = p.page.str() + } + } + match p.size { + EmptyOption {} + u64 { + m['size'] = p.size.str() + } + } + match p.ret_count { + EmptyOption {} + bool { + m['ret_count'] = p.ret_count.str() + } + } + match p.randomize { + EmptyOption {} + bool { + m['randomize'] = p.randomize.str() + } + } + match p.twin_id { + EmptyOption {} + u64 { + m['twin_id'] = p.twin_id.str() + } + } + if p.account_id != '' { + m['account_id'] = p.account_id + } + if p.relay != '' { + m['relay'] = p.relay + } + if p.public_key != '' { + m['public_key'] = p.public_key + } + return m +} diff --git a/lib/threefold/gridproxy/model/iterators.v b/lib/threefold/gridproxy/model/iterators.v new file mode 100644 index 00000000..94ec96f1 --- /dev/null +++ b/lib/threefold/gridproxy/model/iterators.v @@ -0,0 +1,101 @@ +module model + +pub type NodeGetter = fn (NodeFilter) ![]Node + +pub struct NodeIterator { +pub mut: + filter NodeFilter +pub: + get_func NodeGetter @[required] +} + +pub fn (mut i NodeIterator) next() ?[]Node { + match i.filter.page { + EmptyOption { + i.filter.page = u64(1) + } + u64 { + i.filter.page = i.filter.page as u64 + 1 + } + } + nodes := i.get_func(i.filter) or { return none } + if nodes.len == 0 { + return none + } + return nodes +} + +pub type FarmGetter = fn (FarmFilter) ![]Farm + +pub struct FarmIterator { +pub mut: + filter FarmFilter +pub: + get_func FarmGetter @[required] +} + +pub fn (mut i FarmIterator) next() ?[]Farm { + match i.filter.page { + EmptyOption { + i.filter.page = u64(1) + } + u64 { + i.filter.page = i.filter.page as u64 + 1 + } + } + farms := i.get_func(i.filter) or { return none } + if farms.len == 0 { + return none + } + return farms +} + +pub type ContractGetter = fn (ContractFilter) ![]Contract + +pub struct ContractIterator { +pub mut: + filter ContractFilter +pub: + get_func ContractGetter @[required] +} + +pub fn (mut i ContractIterator) next() ?[]Contract { + match i.filter.page { + EmptyOption { + i.filter.page = u64(1) + } + u64 { + i.filter.page = i.filter.page as u64 + 1 + } + } + contracts := i.get_func(i.filter) or { return none } + if contracts.len == 0 { + return none + } + return contracts +} + +pub type TwinGetter = fn (TwinFilter) ![]Twin + +pub struct TwinIterator { +pub mut: + filter TwinFilter +pub: + get_func TwinGetter @[required] +} + +pub fn (mut i TwinIterator) next() ?[]Twin { + match i.filter.page { + EmptyOption { + i.filter.page = u64(1) + } + u64 { + i.filter.page = i.filter.page as u64 + 1 + } + } + twins := i.get_func(i.filter) or { return none } + if twins.len == 0 { + return none + } + return twins +} diff --git a/lib/threefold/gridproxy/model/model.v b/lib/threefold/gridproxy/model/model.v new file mode 100644 index 00000000..9198cc86 --- /dev/null +++ b/lib/threefold/gridproxy/model/model.v @@ -0,0 +1,106 @@ +module model + +import time { Time } +import math { floor, pow10 } + +type ByteUnit = u64 + +pub fn (u ByteUnit) to_megabytes() f64 { + return f64(u) / 1e+6 +} + +pub fn (u ByteUnit) to_gigabytes() f64 { + return f64(u) / 1e+9 +} + +pub fn (u ByteUnit) to_terabytes() f64 { + return f64(u) / 1e+12 +} + +pub fn (u ByteUnit) str() string { + if u >= 1e+12 { + return '${u.to_terabytes():.2} TB' + } else if u >= 1e+9 { + return '${u.to_gigabytes():.2} GB' + } else if u >= 1e+6 { + return '${u.to_megabytes():.2} MB' + } + return '${u64(u)} Bytes' +} + +// SecondUnit represents a duration in seconds +type SecondUnit = u64 + +pub fn (u SecondUnit) to_minutes() f64 { + return f64(u) / 60 +} + +pub fn (u SecondUnit) to_hours() f64 { + return f64(u) / (60 * 60) +} + +pub fn (u SecondUnit) to_days() f64 { + return f64(u) / (60 * 60 * 24) +} + +pub fn (u SecondUnit) str() string { + sec_num := u64(u) + d := floor(sec_num / 86400) + h := math.fmod(floor(sec_num / 3600), 24) + m := math.fmod(floor(sec_num / 60), 60) + s := sec_num % 60 + mut str := '' + if d > 0 { + str += '${d} days ' + } + if h > 0 { + str += '${h} hours ' + } + if m > 0 { + str += '${m} minutes ' + } + if s > 0 { + str += '${s} seconds' + } + return str +} + +// UnixTime represent time in seconds since epoch (timestamp) +type UnixTime = u64 + +pub fn (t UnixTime) to_time() Time { + return time.unix(t) +} + +pub fn (t UnixTime) str() string { + return '${t.to_time().local()}' +} + +// this is the smallest unit used to calculate the billing and and the one natively fetched from the API +// 1 TFT = 10_000_000 drops = 1_000 mTFT = 1_000_000 uTFT +type DropTFTUnit = u64 + +pub fn (t DropTFTUnit) to_tft() f64 { + return f64(t) / pow10(7) // 1 TFT = 10_000_000 drops +} + +pub fn (t DropTFTUnit) to_mtft() f64 { + return f64(t) / pow10(4) // 1 mTFT (milliTFT) = 10_000 drops +} + +pub fn (t DropTFTUnit) to_utft() f64 { + return f64(t) / 10.0 // 1 uTFT (microTFT) = 10 drops +} + +pub fn (u DropTFTUnit) str() string { + if u >= pow10(7) { + return '${u.to_tft():.3} TFT' + } else if u >= pow10(4) { + return '${u.to_mtft():.3} mTFT' + } else if u >= 10 { + return '${u.to_utft():.3} uTFT' + } + return '${u64(u)} dTFT' // Short for dropTFT (1 TFT = 10_000_000 drops). dylan suggests the name and i'm using this till we have an officail name! +} + +struct EmptyOption {} diff --git a/lib/threefold/gridproxy/model/node.v b/lib/threefold/gridproxy/model/node.v new file mode 100644 index 00000000..72f6554e --- /dev/null +++ b/lib/threefold/gridproxy/model/node.v @@ -0,0 +1,128 @@ +module model + +pub struct NodeResources { +pub: + cru u64 + mru ByteUnit + sru ByteUnit + hru ByteUnit +} + +pub struct NodeCapacity { +pub: + total_resources NodeResources + used_resources NodeResources +} + +pub struct NodeLocation { +pub: + country string + city string +} + +pub struct PublicConfig { +pub: + domain string + gw4 string + gw6 string + ipv4 string + ipv6 string +} + +// this is ugly, but it works. we need two models for `Node` and reimplemnt the same fields expcept for capacity srtucture +// it's a hack to make the json parser work as the gridproxy API have some inconsistencies +// see for more context: https://github.com/threefoldtech/tfgridclient_proxy/issues/164 +pub struct Node_ { +pub: + id string + node_id u64 @[json: nodeId] + farm_id u64 @[json: farmId] + twin_id u64 @[json: twinId] + grid_version u64 @[json: gridVersion] + uptime SecondUnit + created UnixTime @[json: created] + farming_policy_id u64 @[json: farmingPolicyId] + updated_at UnixTime @[json: updatedAt] + total_resources NodeResources + used_resources NodeResources + location NodeLocation + public_config PublicConfig @[json: publicConfig] + certification string @[json: certificationType] + status string + dedicated bool + healthy bool + rent_contract_id u64 @[json: rentContractId] + rented_by_twin_id u64 @[json: rentedByTwinId] +} + +pub struct Node { +pub: + id string + node_id u64 @[json: nodeId] + farm_id u64 @[json: farmId] + twin_id u64 @[json: twinId] + grid_version u64 @[json: gridVersion] + uptime SecondUnit + created UnixTime @[json: created] + farming_policy_id u64 @[json: farmingPolicyId] + updated_at UnixTime @[json: updatedAt] + capacity NodeCapacity + location NodeLocation + public_config PublicConfig @[json: publicConfig] + certification string @[json: certificationType] + status string + dedicated bool + healthy bool + rent_contract_id u64 @[json: rentContractId] + rented_by_twin_id u64 @[json: rentedByTwinId] +} + +fn calc_available_resources(total_resources NodeResources, used_resources NodeResources) NodeResources { + return NodeResources{ + cru: total_resources.cru - used_resources.cru + mru: total_resources.mru - used_resources.mru + sru: total_resources.sru - used_resources.sru + hru: total_resources.hru - used_resources.hru + } +} + +// calc_available_resources calculate the reservable capacity of the node. +// +// Returns: `NodeResources` +pub fn (n &Node) calc_available_resources() NodeResources { + total_resources := n.capacity.total_resources + used_resources := n.capacity.used_resources + return calc_available_resources(total_resources, used_resources) +} + +// with_nested_capacity enable the client to have one representation of the node model +pub fn (n &Node_) with_nested_capacity() Node { + return Node{ + id: n.id + node_id: n.node_id + farm_id: n.farm_id + twin_id: n.twin_id + grid_version: n.grid_version + uptime: n.uptime + created: n.created + farming_policy_id: n.farming_policy_id + updated_at: n.updated_at + capacity: NodeCapacity{ + total_resources: n.total_resources + used_resources: n.used_resources + } + location: n.location + public_config: n.public_config + certification: n.certification + status: n.status + dedicated: n.dedicated + healthy: n.healthy + rent_contract_id: n.rent_contract_id + rented_by_twin_id: n.rented_by_twin_id + } +} + +// is_online returns true if the node is online, otherwise false. +pub fn (n &Node) is_online() bool { + return n.status == 'up' +} diff --git a/lib/threefold/gridproxy/model/stats.v b/lib/threefold/gridproxy/model/stats.v new file mode 100644 index 00000000..3b938479 --- /dev/null +++ b/lib/threefold/gridproxy/model/stats.v @@ -0,0 +1,44 @@ +module model + +pub struct GridStat { +pub: + nodes u64 + farms u64 + countries u64 + total_cru u64 @[json: totalCru] + total_sru ByteUnit @[json: totalSru] + total_mru ByteUnit @[json: totalMru] + total_hru ByteUnit @[json: totalHru] + public_ips u64 @[json: publicIps] + access_nodes u64 @[json: accessNodes] + gateways u64 + twins u64 + contracts u64 + nodes_distribution map[string]u64 @[json: nodesDistribution] +} + +pub struct NodeStatisticsResources { +pub: + cru u64 + hru ByteUnit + ipv4u u64 + mru ByteUnit + sru ByteUnit +} + +pub struct NodeStatisticsUsers { +pub: + deployments u64 + workloads u64 +} + +pub struct NodeStats { +pub: + system NodeStatisticsResources + + total NodeStatisticsResources + + used NodeStatisticsResources + + users NodeStatisticsUsers +} diff --git a/lib/threefold/gridproxy/model/twin.v b/lib/threefold/gridproxy/model/twin.v new file mode 100644 index 00000000..4285b431 --- /dev/null +++ b/lib/threefold/gridproxy/model/twin.v @@ -0,0 +1,8 @@ +module model + +pub struct Twin { +pub: + twin_id u64 @[json: twinId] + account_id string @[json: accountId] + ip string +} diff --git a/lib/threefold/main.v b/lib/threefold/main.v new file mode 100644 index 00000000..5cdf5711 --- /dev/null +++ b/lib/threefold/main.v @@ -0,0 +1,6 @@ +module main + +import freeflowuniverse.herolib.threefold.deploy + +fn main() { +} diff --git a/lib/threefold/nodepilot/nodepilot.v b/lib/threefold/nodepilot/nodepilot.v new file mode 100644 index 00000000..401a603a --- /dev/null +++ b/lib/threefold/nodepilot/nodepilot.v @@ -0,0 +1,108 @@ +module nodepilot + +import freeflowuniverse.herolib.builder + +struct NodePilot { + noderoot string + repository string +mut: + node builder.Node +} + +pub fn nodepilot_new(name string, ipaddr string) ?NodePilot { + node := builder.node_new(name: name, ipaddr: ipaddr)? + return NodePilot{ + node: node + noderoot: '/root/node-pilot-light' + repository: 'https://github.com/threefoldtech/node-pilot-light' + } +} + +pub fn (mut n NodePilot) prepare() ? { + // not how its supposed to be used, todo is the right way + prepared := n.node.cache.get('nodepilot-prepare') or { '' } + if prepared != '' { + return + } + + if !n.node.cmd_exists('git') { + n.node.package_install(name: 'git')? + } + + if !n.node.cmd_exists('docker') { + n.node.package_install(name: 'ca-certificates curl gnupg lsb-release')? + n.node.executor.exec('curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --batch --yes --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg')? + + arch := n.node.executor.exec('dpkg --print-architecture')?.trim_space() + release := n.node.executor.exec('lsb_release -cs')?.trim_space() + + n.node.executor.exec('echo "deb [arch=${arch} signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu ${release} stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null')? + + n.node.package_refresh()? + n.node.package_install(name: 'docker-ce docker-ce-cli containerd.io')? + n.node.executor.exec('service docker start')? + } + + n.node.executor.exec('docker ps -a')? + + if !n.node.executor.dir_exists(n.noderoot) { + // FIXME: repository is private + n.node.executor.exec('git clone ${n.repository} ${n.noderoot}')? + } + + n.node.cache.set('nodepilot-prepare', 'ready', 600)? +} + +fn (mut n NodePilot) is_running(s string) bool { + test := n.node.executor.exec('docker ps | grep ${s}') or { return false } + return true +} + +pub fn (mut n NodePilot) fuse_running() bool { + return n.is_running('fuse-000') +} + +pub fn (mut n NodePilot) fuse() ? { + rootdir := '/mnt/bc-fuse' + n.node.executor.exec('root=${rootdir} bash -x ${n.noderoot}/fuse/fuse.sh')? +} + +pub fn (mut n NodePilot) harmony_running() bool { + return n.is_running('harmony') +} + +pub fn (mut n NodePilot) harmony() ? { + rootdir := '/mnt/bc-harmony' + n.node.executor.exec('root=${rootdir} bash -x ${n.noderoot}/harmony/harmony.sh')? +} + +pub fn (mut n NodePilot) pokt_running() bool { + return n.is_running('pokt-000') +} + +pub fn (mut n NodePilot) pokt() ? { + test := n.node.executor.exec('docker ps | grep pokt-000') or { '' } + if test != '' { + return error('Pokt instance already running') + } + + rootdir := '/mnt/bc-pokt' + n.node.executor.exec('root=${rootdir} bash -x ${n.noderoot}/pokt/pokt.sh')? +} + +fn (mut n NodePilot) overlayfs(ropath string, rwpath string, tmp string, target string) ? { + n.node.executor.exec('mount -t overlay overlay -o lowerdir=${ropath},upperdir=${rwpath},workdir=${tmp} ${target}')? +} + +// make it easy by using the same password everywhere and the same host +// only namespace names needs to be different +fn (mut n NodePilot) zdbfs(host string, meta string, data string, temp string, password string, mountpoint string) ? { + mut zdbcmd := 'zdbfs ${mountpoint} -o ro ' + zdbcmd += '-o mh=${host} -o mn=${meta} -o ms=${password} ' + zdbcmd += '-o dh=${host} -o dn=${data} -o ds=${password} ' + zdbcmd += '-o th=${host} -o tn=${temp} -o ts=${password}' + + n.node.executor.exec(zdbcmd)? +} + +// TODO: pokt chains diff --git a/lib/threefold/nodepilot/readme.md b/lib/threefold/nodepilot/readme.md new file mode 100644 index 00000000..f301bf8b --- /dev/null +++ b/lib/threefold/nodepilot/readme.md @@ -0,0 +1,6 @@ +# Pokt.Network installer + +A set of tools to install your own pokt.network node in an easy way. + +> TODO: not sure if finished + diff --git a/lib/threefold/rmb/model_rmb.v b/lib/threefold/rmb/model_rmb.v new file mode 100644 index 00000000..a5235295 --- /dev/null +++ b/lib/threefold/rmb/model_rmb.v @@ -0,0 +1,32 @@ +module rmb + +pub struct RmbMessage { +pub mut: + ver int = 1 + cmd string + src string + ref string + exp u64 + dat string + dst []u32 + ret string + now u64 + shm string +} + +pub struct RmbError { +pub mut: + code int + message string +} + +pub struct RmbResponse { +pub mut: + ver int = 1 + ref string // todo: define + dat string + dst string // todo: define what is this + now u64 + shm string // todo: what is this? + err RmbError +} diff --git a/lib/threefold/rmb/readme.md b/lib/threefold/rmb/readme.md new file mode 100644 index 00000000..3590ea78 --- /dev/null +++ b/lib/threefold/rmb/readme.md @@ -0,0 +1,34 @@ +# RMB + +Reliable Message Bus + +Can talk to ZOS'es, ... + +## requirements + +We need client to rmb-rs + +compile rmb-rs (see below) + +```bash +rmb-peer --mnemonics "$(cat mnemonic.txt)" --relay wss://relay.dev.grid.tf:443 --substrate wss://tfchain.dev.grid.tf:443 + +#OR: + +export TFCHAINSECRET='something here' + +rmb-peer --mnemonics "$TFCHAINSECRET" --relay wss://relay.dev.grid.tf:443 --substrate wss://tfchain.dev.grid.tf:443 + +``` + +### for developers + +more info see https://github.com/threefoldtech/rmb-rs +the message format of RMB itself https://github.com/threefoldtech/rmb-rs/blob/main/proto/types.proto + + +> TODO: implement each endpoint on the zerohub here at client + +> TODO: the original code comes from code/github/threefoldtech/farmerbot/farmerbot/system/zos.v + + diff --git a/lib/threefold/rmb/rmb_calls_zos.v b/lib/threefold/rmb/rmb_calls_zos.v new file mode 100644 index 00000000..5a1485b4 --- /dev/null +++ b/lib/threefold/rmb/rmb_calls_zos.v @@ -0,0 +1,30 @@ +module rmb + +import encoding.base64 +import json + +// if true the ZOS has a public ip address +pub fn (mut z RMBClient) zos_has_public_ipaddr(dst u32) !bool { + response := z.rmb_request('zos.network.public_config_get', dst, '')! + if response.err.message != '' { + return false + } + return true +} + +pub fn (mut z RMBClient) get_zos_system_version(dst u32) !string { + response := z.rmb_request('zos.system.version', dst, '')! + if response.err.message != '' { + return error('${response.err.message}') + } + return base64.decode_str(response.dat) +} + +// TODO: point to documentation where it explains what this means, what is zos_wg_port and why do we need it +pub fn (mut z RMBClient) get_zos_wg_ports(dst u32) ![]u16 { + response := z.rmb_request('zos.network.list_wg_ports', dst, '')! + if response.err.message != '' { + return error('${response.err.message}') + } + return json.decode([]u16, base64.decode_str(response.dat)) +} diff --git a/lib/threefold/rmb/rmb_calls_zos_statistics.v b/lib/threefold/rmb/rmb_calls_zos_statistics.v new file mode 100644 index 00000000..b7410b95 --- /dev/null +++ b/lib/threefold/rmb/rmb_calls_zos_statistics.v @@ -0,0 +1,29 @@ +module rmb + +import json +import encoding.base64 + +pub struct ZosResources { +pub mut: + cru u64 + sru u64 + hru u64 + mru u64 + ipv4u u64 +} + +pub struct ZosResourcesStatistics { +pub mut: + total ZosResources + used ZosResources + system ZosResources +} + +// get zos statistic from a node, nodeid is the parameter +pub fn (mut z RMBClient) get_zos_statistics(dst u32) !ZosResourcesStatistics { + response := z.rmb_client_request('zos.statistics.get', dst, '')! + if response.err.message != '' { + return error('${response.err.message}') + } + return json.decode(ZosResourcesStatistics, base64.decode_str(response.dat))! +} diff --git a/lib/threefold/rmb/rmb_calls_zos_storagepools.v b/lib/threefold/rmb/rmb_calls_zos_storagepools.v new file mode 100644 index 00000000..06dc0b53 --- /dev/null +++ b/lib/threefold/rmb/rmb_calls_zos_storagepools.v @@ -0,0 +1,42 @@ +module rmb + +import json +import encoding.base64 + +struct ZosPoolJSON { +mut: + name string + pool_type string @[json: 'type'] // TODO: this should be an enum and we need to define what it is + size int // TODO: what does it mean? used how much what type? + used int // TODO: what does it mean? used how much what type? +} + +pub struct ZosPool { +pub mut: + name string + pool_type PoolType + size int + used int +} + +enum PoolType { + dontknow // TODO: +} + +// get storage pools from a zos, the argument is u32 address of the zos +pub fn (mut z RMBClient) get_storage_pools(dst u32) ![]ZosPool { + response := z.rmb_client_request('zos.storage.pools', dst, '')! + if response.err.message != '' { + return error('${response.err.message}') + } + objs := json.decode([]ZosPoolJSON, base64.decode_str(response.dat)) + _ := []ZosPool{} + for o in objs { + res = ZosPool{ + name: o.name + size: o.size + used: o.used + pool_type: .dontknow // TODO + } + } +} diff --git a/lib/threefold/rmb/rmb_client.v b/lib/threefold/rmb/rmb_client.v new file mode 100644 index 00000000..78b1e6d1 --- /dev/null +++ b/lib/threefold/rmb/rmb_client.v @@ -0,0 +1,80 @@ +module rmb + +// import freeflowuniverse.herolib.clients.httpconnection +import freeflowuniverse.herolib.clients.redisclient { RedisURL } +import os + +pub struct RMBClient { +pub mut: + relay_url string + tfchain_url string + tfchain_mnemonic string + redis &redisclient.Redis @[str: skip] +} + +pub enum TFNetType { + unspecified + main + test + dev + qa +} + +@[params] +pub struct RMBClientArgs { +pub: + nettype TFNetType + relay_url string + tfchain_url string +} + +// params +// relay_url string +// TFNetType, default not specified, can chose unspecified, main, test, dev, qa +// tfchain_url string= e.g. "wss://relay.dev.grid.tf:443" OPTIONAL +// tfchain_mnemonic string= e.g. "wss://tfchain.dev.grid.tf:443" OPTIONAL +pub fn new(args_ RMBClientArgs) !RMBClient { + mut args := args_ + if tfchain_mnemonic == '' { + if 'TFCHAINSECRET' in os.environ { + args.tfchain_mnemonic = os.environ['TFCHAINSECRET'] + } else { + return error('need to specify TFCHAINSECRET (menomics for TFChain) as env argument or inside client') + } + } + if args.nettype == .main { + args.relay_url = 'wss://relay.grid.tf:443' + args.tfchain_url = 'wss://tfchain.grid.tf:443' + } + if args.nettype == .test { + args.relay_url = 'wss://relay.test.grid.tf:443' + args.tfchain_url = 'wss://tfchain.test.grid.tf:443' + } + if args.nettype == .dev { + args.relay_url = 'wss://relay.dev.grid.tf:443' + args.tfchain_url = 'wss://tfchain.dev.grid.tf:443' + } + if args.nettype == .qa { + args.relay_url = 'wss://relay.qa.grid.tf:443' + args.tfchain_url = 'wss://tfchain.qa.grid.tf:443' + } + + mut redis := redisclient.core_get(RedisURL{})! + + mut cl := RMBClient{ + redis: redis + relay_url: args.relay_url + tfchain_url: args.tfchain_url + tfchain_mnemonic: args.tfchain_mnemonic + } + if args.relay_url == '' || args.tfchain_url == '' { + return error('need to specify relay_url and tfchain_url.') + } + if args.tfchain_mnemonic.len < 20 { + return error('need to specify tfchain mnemonic, now too short.') + } + + // TODO: there should be a check here that rmb peer is accessible and working + + return cl +} diff --git a/lib/threefold/rmb/rmb_request.v b/lib/threefold/rmb/rmb_request.v new file mode 100644 index 00000000..7936b602 --- /dev/null +++ b/lib/threefold/rmb/rmb_request.v @@ -0,0 +1,23 @@ +module rmb + +import encoding.base64 +import time +import json + +// cmd is e.g. +pub fn (mut z RMBClient) rmb_request(cmd string, dst u32, payload string) !RmbResponse { + msg := RmbMessage{ + ver: 1 + cmd: cmd + exp: 5 + dat: base64.encode_str(payload) + dst: [dst] + ret: rand.uuid_v4() + now: u64(time.now().unix()) + } + request := json.encode_pretty(msg) + z.redis.lpush('msgbus.system.local', request)! + response_json := z.redis.blpop(msg.ret, 5)! + response := json.decode(RmbResponse, response_json[1])! + return response +} diff --git a/lib/threefold/rmb/rmb_test.v b/lib/threefold/rmb/rmb_test.v new file mode 100644 index 00000000..780064c1 --- /dev/null +++ b/lib/threefold/rmb/rmb_test.v @@ -0,0 +1,13 @@ +module rmb + +import freeflowuniverse.herolib.ui.console + +fn test_main() ? { + mut cl := new(nettype: .dev)! + + mut r := cl.get_zos_statistics(1)! + + console.print_debug(r) + + panic('ddd') +} diff --git a/lib/threefold/tfgrid3deployer/.heroscript b/lib/threefold/tfgrid3deployer/.heroscript new file mode 100644 index 00000000..c413c05f --- /dev/null +++ b/lib/threefold/tfgrid3deployer/.heroscript @@ -0,0 +1,8 @@ + +!!hero_code.generate_client + name:'tfgrid3deployer' + classname:'TFGridDeployer' + singleton:0 + default:1 + hasconfig:1 + reset:0 \ No newline at end of file diff --git a/lib/threefold/tfgrid3deployer/_todo/base.v b/lib/threefold/tfgrid3deployer/_todo/base.v new file mode 100644 index 00000000..de5c3204 --- /dev/null +++ b/lib/threefold/tfgrid3deployer/_todo/base.v @@ -0,0 +1,44 @@ +module models + +import freeflowuniverse.herolib.threefold.grid +import log + +// ContractMetaData struct to represent a deployment metadata. +pub struct ContractMetaData { +pub mut: + type_ string @[json: 'type'] + name string + project_name string @[json: 'projectName'] +} + +// // GridMachinesModel struct to represent multiple machines in the grid +// pub struct GridMachinesModel { +// mnemonic string +// ssh_key string +// chain_network grid.ChainNetwork +// pub mut: +// client &GridClient = unsafe { nil } +// node_id int +// network NetworkInfo +// machines []MachineModel +// name string +// metadata string +// } + +// // GridContracts struct to represent contracts in the grid +// pub struct GridContracts { +// pub mut: +// client &GridClient = unsafe { nil } +// network grid.ChainNetwork +// } + +// // GridClient struct to represent the client interacting with the grid +// pub struct GridClient { +// pub mut: +// mnemonic string +// ssh_key string +// chain_network grid.ChainNetwork +// deployer grid.Deployer +// machines GridMachinesModel +// contracts GridContracts +// } diff --git a/lib/threefold/tfgrid3deployer/_todo/client.v b/lib/threefold/tfgrid3deployer/_todo/client.v new file mode 100644 index 00000000..a4b15486 --- /dev/null +++ b/lib/threefold/tfgrid3deployer/_todo/client.v @@ -0,0 +1,16 @@ +module models + +import freeflowuniverse.herolib.threefold.grid +import log + +// GridClient struct to represent the client interacting with the grid +pub struct Deployment { +mut: + deployer grid.Deployer +pub mut: + mnemonic string + ssh_key string + chain_network grid.ChainNetwork + machines GridMachinesModel + contracts GridContracts +} diff --git a/lib/threefold/tfgrid3deployer/_todo/k8s.v b/lib/threefold/tfgrid3deployer/_todo/k8s.v new file mode 100644 index 00000000..61c9c934 --- /dev/null +++ b/lib/threefold/tfgrid3deployer/_todo/k8s.v @@ -0,0 +1,20 @@ +module models + +pub struct KubernetesModel { +} + +pub fn (mut km KubernetesModel) deploy() { + println('Not Implemented') +} + +pub fn (mut km KubernetesModel) delete() { + println('Not Implemented') +} + +pub fn (mut km KubernetesModel) get() { + println('Not Implemented') +} + +pub fn (mut km KubernetesModel) update() { + println('Not Implemented') +} diff --git a/lib/threefold/tfgrid3deployer/_todo/machines.v b/lib/threefold/tfgrid3deployer/_todo/machines.v new file mode 100644 index 00000000..4cd682a2 --- /dev/null +++ b/lib/threefold/tfgrid3deployer/_todo/machines.v @@ -0,0 +1,264 @@ +module models + +import freeflowuniverse.herolib.threefold.grid +import freeflowuniverse.herolib.threefold.grid.models as grid_models +import rand +import freeflowuniverse.herolib.ui.console +import json + +// Deploy the workloads +pub fn (mut gm GridMachinesModel) deploy(vms GridMachinesModel) ! { + console.print_header('Starting deployment process.') + + // Prepare Workloads + workloads := create_workloads(mut gm, vms)! + + // Create and deploy deployment + contract_id := create_and_deploy_deployment(mut gm, vms, workloads)! + + // Fetch deployment result + machine_res := fetch_deployment_result(mut gm.client.deployer, contract_id, u32(vms.node_id))! + console.print_header('Zmachine result: ${machine_res}') +} + +// Helper function to create workloads +fn create_workloads(mut gm GridMachinesModel, vms GridMachinesModel) ![]Workload { + console.print_header('Creating workloads.') + + mut workloads := []grid_models.Workload{} + + // Create network workload + wg_port := gm.client.deployer.assign_wg_port(u32(vms.node_id))! + workloads << create_network_workload(vms, wg_port) + + // Create machine workloads + mut public_ip_name := '' + for machine in vms.machines { + if machine.network_access.public_ip4 || machine.network_access.public_ip6 { + public_ip_name = rand.string(5).to_lower() + workloads << create_public_ip_workload(machine.network_access.public_ip4, + machine.network_access.public_ip6, public_ip_name) + } + workloads << create_zmachine_workload(machine, vms.network, gm.ssh_key, public_ip_name).to_workload( + name: machine.name + description: 'VGridClient Zmachine' + ) + } + + return workloads +} + +// Helper function to create and deploy deployment +fn create_and_deploy_deployment(mut gm GridMachinesModel, vms GridMachinesModel, workloads []Workload) !int { + console.print_header('Creating deployment.') + + mut deployment := grid_models.new_deployment( + twin_id: gm.client.deployer.twin_id + description: 'VGridClient Deployment' + workloads: workloads + signature_requirement: create_signature_requirement(gm.client.deployer.twin_id) + ) + + log_and_set_metadata(mut logger, mut deployment, 'vm', vms.name) + + console.print_header('Deploying workloads...') + contract_id := gm.client.deployer.deploy(u32(vms.node_id), mut deployment, deployment.metadata, + 0) or { + logger.error('Deployment failed: ${err}') + return err + } + + console.print_header('Deployment successful. Contract ID: ${contract_id}') + return int(contract_id) +} + +// Helper function to fetch deployment result +fn fetch_deployment_result(mut deployer grid.Deployer, contract_id int, node_id u32) !ZmachineResult { + dl := deployer.get_deployment(u64(contract_id), node_id) or { + logger.error('Failed to get deployment data: ${err}') + exit(1) + } + + return get_machine_result(dl)! +} + +// Helper function to create a Zmachine workload +fn create_zmachine_workload(machine MachineModel, network NetworkInfo, ssh_key string, public_ip_name string) Zmachine { + console.print_header('Creating Zmachine workload.') + return grid_models.Zmachine{ + flist: 'https://hub.grid.tf/tf-official-vms/ubuntu-24.04-latest.flist' + network: grid_models.ZmachineNetwork{ + interfaces: [ + grid_models.ZNetworkInterface{ + network: network.name + ip: network.ip_range.split('/')[0] + }, + ] + public_ip: public_ip_name + planetary: machine.network_access.planetary + mycelium: grid_models.MyceliumIP{ + network: network.name + hex_seed: rand.string(6).bytes().hex() + } + } + entrypoint: '/sbin/zinit init' + compute_capacity: grid_models.ComputeCapacity{ + cpu: u8(machine.capacity.cpu) + memory: i64(machine.capacity.memory) * 1024 * 1024 + } + env: { + 'SSH_KEY': ssh_key + } + } +} + +// Helper function to create a network workload +fn create_network_workload(gm GridMachinesModel, wg_port u32) Workload { + console.print_header('Creating network workload.') + return grid_models.Znet{ + ip_range: gm.network.ip_range + subnet: gm.network.subnet + wireguard_private_key: 'GDU+cjKrHNJS9fodzjFDzNFl5su3kJXTZ3ipPgUjOUE=' + wireguard_listen_port: u16(wg_port) + mycelium: grid_models.Mycelium{ + hex_key: rand.string(32).bytes().hex() + } + peers: [ + grid_models.Peer{ + subnet: gm.network.subnet + wireguard_public_key: '4KTvZS2KPWYfMr+GbiUUly0ANVg8jBC7xP9Bl79Z8zM=' + allowed_ips: [gm.network.subnet] + }, + ] + }.to_workload( + name: gm.network.name + description: 'VGridClient Network' + ) +} + +// Helper function to create a public IP workload +fn create_public_ip_workload(is_v4 bool, is_v6 bool, name string) Workload { + console.print_header('Creating Public IP workload.') + return grid_models.PublicIP{ + v4: is_v4 + v6: is_v6 + }.to_workload(name: name) +} + +// Helper function to create signature requirements +fn create_signature_requirement(twin_id int) SignatureRequirement { + console.print_header('Setting signature requirement.') + return grid_models.SignatureRequirement{ + weight_required: 1 + requests: [ + grid_models.SignatureRequest{ + twin_id: u32(twin_id) + weight: 1 + }, + ] + } +} + +// Helper function to log and set metadata +fn log_and_set_metadata(mut logger log.Log, mut deployment Deployment, key string, value string) { + console.print_header('Setting ${key} metadata.') + deployment.add_metadata(key, value) +} + +// Helper function to get the deployment result +fn get_machine_result(dl Deployment) !ZmachineResult { + for _, w in dl.workloads { + if w.type_ == grid_models.workload_types.zmachine { + res := json.decode(grid_models.ZmachineResult, w.result.data)! + return res + } + } + return error('Failed to get Zmachine workload') +} + +pub fn (mut gm GridMachinesModel) list() ![]Deployment { + mut deployments := []grid_models.Deployment{} + console.print_header('Listing active contracts.') + contracts := gm.client.contracts.get_my_active_contracts() or { + return error('Cannot list twin contracts due to: ${err}') + } + + console.print_header('Active contracts listed.') + console.print_header('Listing deployments.') + + for contract in contracts { + console.print_header('Listing deployment node ${contract.details.node_id}.') + if contract.contract_type == 'node' { + dl := gm.client.deployer.get_deployment(contract.contract_id, u32(contract.details.node_id)) or { + console.print_stderror('Cannot list twin deployment for contract ${contract.contract_id} due to: ${err}.') + continue + } + deployments << dl + console.print_header('Deployment Result: ${dl}.') + } + } + return deployments +} + +fn (mut gm GridMachinesModel) list_contract_names() ![]string { + contracts := gm.client.contracts.get_my_active_contracts()! + mut names := []string{} + for contract in contracts { + res := json.decode(ContractMetaData, contract.details.deployment_data) or { + return error('Cannot decode the deployment metadata due to: ${err}') + } + names << res.name + } + return names +} + +pub fn (mut gm GridMachinesModel) delete(deployment_name string) ! { + console.print_header('Deleting deployment with name: ${deployment_name}.') + console.print_header('Listing the twin `${gm.client.deployer.twin_id}` active contracts.') + contracts := gm.client.contracts.get_my_active_contracts() or { + return error('Cannot list twin contracts due to: ${err}') + } + + console.print_header('Active contracts listed.') + + for contract in contracts { + res := json.decode(ContractMetaData, contract.details.deployment_data) or { + return error('Cannot decode the contract deployment data due to: ${err}') + } + + if res.name == deployment_name { + console.print_header('Start deleting deployment ${deployment_name}.') + gm.client.deployer.client.cancel_contract(contract.contract_id) or { + return error('Cannot delete deployment due to: ${err}') + } + console.print_header('Deployment ${deployment_name} deleted!.') + } + } +} + +// Placeholder for get operation +pub fn (mut gm GridMachinesModel) get(deployment_name string) ![]Deployment { + mut deployments := []grid_models.Deployment{} + contracts := gm.client.contracts.get_my_active_contracts() or { + return error('Cannot list twin contracts due to: ${err}') + } + + for contract in contracts { + if contract.contract_type == 'node' { + dl := gm.client.deployer.get_deployment(contract.contract_id, u32(contract.details.node_id)) or { + console.print_stderror('Cannot list twin deployment for contract ${contract.contract_id} due to: ${err}.') + continue + } + if dl.metadata.len != 0 { + res := json.decode(ContractMetaData, dl.metadata) or { + return error('Cannot decode the deployment metadata due to: ${err}') + } + if deployment_name == res.name { + deployments << dl + } + } + } + } + console.print_header('Deployments: ${deployments}') + return deployments +} diff --git a/lib/threefold/tfgrid3deployer/contracts.v b/lib/threefold/tfgrid3deployer/contracts.v new file mode 100644 index 00000000..4e8c1b7b --- /dev/null +++ b/lib/threefold/tfgrid3deployer/contracts.v @@ -0,0 +1,39 @@ +module tfgrid3deployer + +import freeflowuniverse.herolib.threefold.gridproxy +import freeflowuniverse.herolib.threefold.gridproxy.model as proxy_models + +@[params] +pub struct ContractGetArgs { +pub mut: + active bool = true + twin_id u64 +} + +// Retrieves all contracts (active and inactive) on the selected grid network. +// +// This function interacts with the Grid Proxy to retrieve all contracts associated +// with the twin ID of the current deployer (from GridClient). +// +// Returns: +// - An array of `gridproxy.Contract` containing contract information. +// +// Example: +// contracts := cn.get_my_contracts()! +pub fn (mut self TFDeployment) tfchain_contracts(args ContractGetArgs) ![]proxy_models.Contract { + net := resolve_network()! + args2 := gridproxy.GridProxyClientArgs{ + net: net + cache: true + } + + mut proxy := gridproxy.new(args2)! + if args.active { + return proxy.get_contracts_active(args.twin_id) + } else { + params := proxy_models.ContractFilter{ + twin_id: args.twin_id + } + return proxy.get_contracts(params) + } +} diff --git a/lib/threefold/tfgrid3deployer/deployment.v b/lib/threefold/tfgrid3deployer/deployment.v new file mode 100644 index 00000000..708aa09e --- /dev/null +++ b/lib/threefold/tfgrid3deployer/deployment.v @@ -0,0 +1,555 @@ +module tfgrid3deployer + +import freeflowuniverse.herolib.threefold.grid.models as grid_models +import freeflowuniverse.herolib.threefold.grid +import freeflowuniverse.herolib.ui.console +import compress.zlib +import encoding.hex +import x.crypto.chacha20 +import crypto.sha256 +import json +import rand + +struct GridContracts { +pub mut: + name []u64 + node map[string]u64 + rent map[string]u64 +} + +@[heap] +pub struct TFDeployment { +pub mut: + name string + description string + vms []VMachine + zdbs []ZDB + webnames []WebName + network NetworkSpecs +mut: + // Set the deployed contracts on the deployment and save the full deployment to be able to delete the whole deployment when need. + contracts GridContracts + deployer &grid.Deployer @[skip; str: skip] + kvstore KVStoreFS @[skip; str: skip] +} + +fn get_deployer() !grid.Deployer { + mut grid_client := get()! + + network := match grid_client.network { + .dev { grid.ChainNetwork.dev } + .qa { grid.ChainNetwork.qa } + .test { grid.ChainNetwork.test } + .main { grid.ChainNetwork.main } + } + + return grid.new_deployer(grid_client.mnemonic, network)! +} + +pub fn new_deployment(name string) !TFDeployment { + kvstore := KVStoreFS{} + + if _ := kvstore.get(name) { + return error('Deployment with the same name is already exist.') + } + + deployer := get_deployer()! + return TFDeployment{ + name: name + kvstore: KVStoreFS{} + deployer: &deployer + } +} + +pub fn get_deployment(name string) !TFDeployment { + mut deployer := get_deployer()! + mut dl := TFDeployment{ + name: name + kvstore: KVStoreFS{} + deployer: &deployer + } + + dl.load() or { return error('Faild to load the deployment due to: ${err}') } + + return dl +} + +pub fn (mut self TFDeployment) deploy() ! { + console.print_header('Starting deployment process.') + self.set_nodes()! + old_deployment := self.list_deployments()! + + println('old_deployment ${old_deployment}') + + mut setup := new_deployment_setup(self.network, self.vms, self.zdbs, self.webnames, + old_deployment, mut self.deployer)! + + // Check we are in which state + self.finalize_deployment(setup)! + self.save()! +} + +fn (mut self TFDeployment) set_nodes() ! { + for mut vm in self.vms { + mut node_ids := []u64{} + + for node_id in vm.requirements.nodes { + node_ids << u64(node_id) + } + + nodes := filter_nodes( + node_ids: node_ids + healthy: true + free_mru: convert_to_gigabytes(u64(vm.requirements.memory)) + total_cru: u64(vm.requirements.cpu) + free_ips: if vm.requirements.public_ip4 { u64(1) } else { none } + has_ipv6: if vm.requirements.public_ip6 { vm.requirements.public_ip6 } else { none } + status: 'up' + features: if vm.requirements.public_ip4 { [] } else { ['zmachine'] } + )! + + if nodes.len == 0 { + if node_ids.len != 0 { + return error("The provided vm nodes ${node_ids} don't have enough resources.") + } + return error('Requested the Grid Proxy and no nodes found.') + } + idx := rand.u32() % u32(nodes.len) + // println('chodes node: ${}') + vm.node_id = u32(nodes[idx].node_id) + } + + for mut zdb in self.zdbs { + size := convert_to_gigabytes(u64(zdb.requirements.size)) + nodes := filter_nodes( + free_sru: size + status: 'up' + healthy: true + node_id: zdb.requirements.node_id + )! + + if nodes.len == 0 { + return error('Requested the Grid Proxy and no nodes found.') + } + + zdb.node_id = u32(nodes[0].node_id) + } + + for mut webname in self.webnames { + nodes := filter_nodes( + domain: true + status: 'up' + healthy: true + node_id: webname.requirements.node_id + features: ['zmachine'] + )! + + if nodes.len == 0 { + return error('Requested the Grid Proxy and no nodes found.') + } + + webname.node_id = u32(nodes[0].node_id) + } +} + +fn (mut self TFDeployment) finalize_deployment(setup DeploymentSetup) ! { + mut new_deployments := map[u32]&grid_models.Deployment{} + old_deployments := self.list_deployments()! + mut current_contracts := []u64{} + mut create_deployments := map[u32]&grid_models.Deployment{} + + for node_id, workloads in setup.workloads { + console.print_header('Creating deployment on node ${node_id}.') + mut deployment := grid_models.new_deployment( + twin_id: setup.deployer.twin_id + description: 'VGridClient Deployment' + workloads: workloads + signature_requirement: grid_models.SignatureRequirement{ + weight_required: 1 + requests: [ + grid_models.SignatureRequest{ + twin_id: u32(setup.deployer.twin_id) + weight: 1 + }, + ] + } + ) + + if d := old_deployments[node_id] { + deployment.version = d.version + deployment.contract_id = d.contract_id + current_contracts << d.contract_id + } else { + create_deployments[node_id] = &deployment + } + + deployment.add_metadata('VGridClient/Deployment', self.name) + new_deployments[node_id] = &deployment + } + + mut create_name_contracts := []string{} + mut delete_contracts := []u64{} + + mut returned_deployments := map[u32]&grid_models.Deployment{} + mut name_contracts_map := setup.name_contract_map.clone() + + // Create stage. + for contract_name, contract_id in setup.name_contract_map { + if contract_id == 0 { + create_name_contracts << contract_name + } + } + + if create_name_contracts.len > 0 || create_deployments.len > 0 { + console.print_header('Batch deploying the deployment') + created_name_contracts_map, ret_dls := self.deployer.batch_deploy(create_name_contracts, mut + create_deployments, none)! + + for node_id, deployment in ret_dls { + returned_deployments[node_id] = deployment + } + + for contract_name, contract_id in created_name_contracts_map { + name_contracts_map[contract_name] = contract_id + } + } + + // Cancel stage. + for contract_id in self.contracts.name { + if !setup.name_contract_map.values().contains(contract_id) { + delete_contracts << contract_id + } + } + + for node_id, dl in old_deployments { + if _ := new_deployments[node_id] { + continue + } + delete_contracts << dl.contract_id + } + + if delete_contracts.len > 0 { + self.deployer.client.batch_cancel_contracts(delete_contracts)! + } + + // Update stage. + for node_id, mut dl in new_deployments { + mut deployment := *dl + if _ := old_deployments[node_id] { + self.deployer.update_deployment(node_id, mut deployment, dl.metadata)! + returned_deployments[node_id] = deployment + } + } + + self.update_state(name_contracts_map, returned_deployments)! +} + +fn (mut self TFDeployment) update_state(name_contracts_map map[string]u64, dls map[u32]&grid_models.Deployment) ! { + mut workloads := map[u32]map[string]&grid_models.Workload{} + + for node_id, deployment in dls { + workloads[node_id] = map[string]&grid_models.Workload{} + for id, _ in deployment.workloads { + workloads[node_id][deployment.workloads[id].name] = &deployment.workloads[id] + } + } + + self.contracts = GridContracts{} + for _, contract_id in name_contracts_map { + self.contracts.name << contract_id + } + + for node_id, dl in dls { + self.contracts.node['${node_id}'] = dl.contract_id + } + + for mut vm in self.vms { + vm_workload := workloads[vm.node_id][vm.requirements.name] + res := json.decode(grid_models.ZmachineResult, vm_workload.result.data)! + vm.mycelium_ip = res.mycelium_ip + vm.planetary_ip = res.planetary_ip + vm.wireguard_ip = res.ip + vm.contract_id = dls[vm.node_id].contract_id + + if vm.requirements.public_ip4 || vm.requirements.public_ip6 { + ip_workload := workloads[vm.node_id]['${vm.requirements.name}_pubip'] + ip_res := json.decode(grid_models.PublicIPResult, ip_workload.result.data)! + vm.public_ip4 = ip_res.ip + vm.public_ip6 = ip_res.ip6 + } + } + + for mut zdb in self.zdbs { + zdb_workload := workloads[zdb.node_id][zdb.requirements.name] + res := json.decode(grid_models.ZdbResult, zdb_workload.result.data)! + zdb.ips = res.ips + zdb.namespace = res.namespace + zdb.port = res.port + zdb.contract_id = dls[zdb.node_id].contract_id + } + + for mut wn in self.webnames { + wn_workload := workloads[wn.node_id][wn.requirements.name] + res := json.decode(grid_models.GatewayProxyResult, wn_workload.result.data)! + wn.fqdn = res.fqdn + wn.node_contract_id = dls[wn.node_id].contract_id + wn.name_contract_id = name_contracts_map[wn.requirements.name] + } +} + +pub fn (mut self TFDeployment) vm_get(vm_name string) !VMachine { + console.print_header('Getting ${vm_name} VM.') + for vmachine in self.vms { + if vmachine.requirements.name == vm_name { + return vmachine + } + } + return error('Machine does not exist.') +} + +pub fn (mut self TFDeployment) zdb_get(zdb_name string) !ZDB { + console.print_header('Getting ${zdb_name} Zdb.') + for zdb in self.zdbs { + if zdb.requirements.name == zdb_name { + return zdb + } + } + return error('Zdb does not exist.') +} + +pub fn (mut self TFDeployment) webname_get(wn_name string) !WebName { + console.print_header('Getting ${wn_name} webname.') + for wbn in self.webnames { + if wbn.requirements.name == wn_name { + return wbn + } + } + return error('Webname does not exist.') +} + +pub fn (mut self TFDeployment) load() ! { + value := self.kvstore.get(self.name)! + decrypted := self.decrypt(value)! + decompressed := self.decompress(decrypted)! + self.decode(decompressed)! +} + +fn (mut self TFDeployment) save() ! { + encoded_data := self.encode()! + self.kvstore.set(self.name, encoded_data)! +} + +fn (self TFDeployment) compress(data []u8) ![]u8 { + return zlib.compress(data) or { error('Cannot compress the data due to: ${err}') } +} + +fn (self TFDeployment) decompress(data []u8) ![]u8 { + return zlib.decompress(data) or { error('Cannot decompress the data due to: ${err}') } +} + +fn (self TFDeployment) encrypt(compressed []u8) ![]u8 { + key_hashed := sha256.hexhash(self.deployer.mnemonics) + name_hashed := sha256.hexhash(self.name) + key := hex.decode(key_hashed)! + nonce := hex.decode(name_hashed)![..12] + encrypted := chacha20.encrypt(key, nonce, compressed) or { + return error('Cannot encrypt the data due to: ${err}') + } + return encrypted +} + +fn (self TFDeployment) decrypt(data []u8) ![]u8 { + key_hashed := sha256.hexhash(self.deployer.mnemonics) + name_hashed := sha256.hexhash(self.name) + key := hex.decode(key_hashed)! + nonce := hex.decode(name_hashed)![..12] + + compressed := chacha20.decrypt(key, nonce, data) or { + return error('Cannot decrypt the data due to: ${err}') + } + return compressed +} + +fn (self TFDeployment) encode() ![]u8 { + // TODO: Change to 'encoder' + + data := json.encode(self).bytes() + + compressed := self.compress(data)! + encrypted := self.encrypt(compressed)! + return encrypted +} + +fn (mut self TFDeployment) decode(data []u8) ! { + obj := json.decode(TFDeployment, data.bytestr())! + self.vms = obj.vms + self.zdbs = obj.zdbs + self.webnames = obj.webnames + self.contracts = obj.contracts + self.network = obj.network + self.name = obj.name + self.description = obj.description +} + +// Set a new machine on the deployment. +pub fn (mut self TFDeployment) add_machine(requirements VMRequirements) { + self.vms << VMachine{ + requirements: requirements + } +} + +pub fn (mut self TFDeployment) remove_machine(name string) ! { + l := self.vms.len + for id, vm in self.vms { + if vm.requirements.name == name { + self.vms[id], self.vms[l - 1] = self.vms[l - 1], self.vms[id] + self.vms.delete_last() + return + } + } + + return error('vm with name ${name} is not found') +} + +// Set a new zdb on the deployment. +pub fn (mut self TFDeployment) add_zdb(zdb ZDBRequirements) { + self.zdbs << ZDB{ + requirements: zdb + } +} + +pub fn (mut self TFDeployment) remove_zdb(name string) ! { + l := self.zdbs.len + for id, zdb in self.zdbs { + if zdb.requirements.name == name { + self.zdbs[id], self.zdbs[l - 1] = self.zdbs[l - 1], self.zdbs[id] + self.zdbs.delete_last() + return + } + } + + return error('zdb with name ${name} is not found') +} + +// Set a new webname on the deployment. +pub fn (mut self TFDeployment) add_webname(requirements WebNameRequirements) { + self.webnames << WebName{ + requirements: requirements + } +} + +pub fn (mut self TFDeployment) remove_webname(name string) ! { + l := self.webnames.len + for id, wn in self.webnames { + if wn.requirements.name == name { + self.webnames[id], self.webnames[l - 1] = self.webnames[l - 1], self.webnames[id] + self.webnames.delete_last() + return + } + } + + return error('webname with name ${name} is not found') +} + +// lists deployments used with vms, zdbs, and webnames +pub fn (mut self TFDeployment) list_deployments() !map[u32]grid_models.Deployment { + mut threads := []thread !grid_models.Deployment{} + mut dls := map[u32]grid_models.Deployment{} + mut contract_node := map[u64]u32{} + for node_id, contract_id in self.contracts.node { + contract_node[contract_id] = node_id.u32() + threads << spawn self.deployer.get_deployment(contract_id, node_id.u32()) + } + + for th in threads { + dl := th.wait()! + node_id := contract_node[dl.contract_id] + dls[node_id] = dl + } + + return dls +} + +// fn (mut self TFDeployment) vm_delete(vm_name string) ! { +// // delete myself, check on TFChain that deletion was indeed done +// vm := self.vm_get(vm_name)! + +// // get all deployments +// mut dls := self.list_deployments()! + +// // load network +// mut network_handler := NetworkHandler{ +// deployer: self.deployer +// } + +// // network_handler.load_network_state(dls)! + +// // remove vm workload +// mut vm_dl := dls[vm.node_id] +// mut public_ip_name := '' +// for idx, workload in vm_dl.workloads { +// if workload.name == vm_name { +// zmachine := json.decode(grid_models.Zmachine, workload.data)! +// public_ip_name = zmachine.network.public_ip +// vm_dl.workloads[idx], vm_dl.workloads[vm_dl.workloads.len - 1] = vm_dl.workloads[vm_dl.workloads.len - 1], vm_dl.workloads[idx] +// vm_dl.workloads.delete_last() +// break +// } +// } + +// for idx, workload in vm_dl.workloads { +// if workload.name == public_ip_name { +// vm_dl.workloads[idx], vm_dl.workloads[vm_dl.workloads.len - 1] = vm_dl.workloads[vm_dl.workloads.len - 1], vm_dl.workloads[idx] +// vm_dl.workloads.delete_last() +// break +// } +// } + +// // decide if we want to remove the node +// if vm_dl.workloads.len == 1 && vm_dl.workloads[0].type_ == grid_models.workload_types.network { +// mut ipv4_nodes := 0 +// for _, endpoint in network_handler.endpoints { +// if endpoint.split('.').len == 4 { +// ipv4_nodes += 1 +// } +// } + +// if network_handler.public_node == vm.node_id && (ipv4_nodes > 1 +// || network_handler.hidden_nodes.len == 0 +// || (network_handler.nodes.len == 2 && network_handler.hidden_nodes.len == 1) +// || (ipv4_nodes == 1 && network_handler.hidden_nodes.len > 0)) { +// // we can remove the node +// dls.delete(vm.node_id) +// network_handler.remove_node(vm.node_id)! +// } +// } + +// // use network handler to prepare network +// network_workloads := network_handler.generate_workloads(self.dl_versions)! + +// // replace deloyments network workloads with the ones coming from network handler +// for node_id, mut dl in dls { +// network_wl := network_workloads[node_id] or { continue } +// for id, _ in dl.workloads { +// if dl.workloads[id].name == network_wl.name { +// dl.workloads[id] = network_wl +// } +// } +// } + +// // TODO: update deployments +// /* +// what issues we face: +// 1. Delete the network workload if not needed +// 2. Remove the vm node peer from the other deployments if contract is deleted +// 3. Deploy an access node if the deleted contract was an access node + +// node1 := dl -> hidden +// node2 := dl -> hidden +// node3 := dl -> public // will delete it, we need to deploy another access node for node1 and node2 + +// node1 := dl -> public // Assign node1 instead of node3 and delete node1 +// node2 := dl -> hidden +// node3 := dl -> public // will delete it, we need to deploy another access node for node1 and node2 +// */ +// } diff --git a/lib/threefold/tfgrid3deployer/deployment_setup.v b/lib/threefold/tfgrid3deployer/deployment_setup.v new file mode 100644 index 00000000..2c8e9ec7 --- /dev/null +++ b/lib/threefold/tfgrid3deployer/deployment_setup.v @@ -0,0 +1,290 @@ +// This file should only contains any functions, helpers that related to the deployment setup. +module tfgrid3deployer + +import freeflowuniverse.herolib.threefold.grid.models as grid_models +import freeflowuniverse.herolib.threefold.grid +import freeflowuniverse.herolib.ui.console +import rand + +// a struct that prepare the setup for the deployment +struct DeploymentSetup { +mut: + workloads map[u32][]grid_models.Workload + network_handler NetworkHandler + + deployer &grid.Deployer @[skip; str: skip] + contracts_map map[u32]u64 + name_contract_map map[string]u64 +} + +// Sets up a new deployment with network, VM, and ZDB workloads. +// Parameters: +// - network_specs: NetworkSpecs struct containing network setup specifications +// - vms: Array of VMachine instances representing the virtual machines to set up workloads for +// - zdbs: Array of ZDB objects containing ZDB requirements +// - webnames: Array of WebName instances representing web names +// - deployer: Reference to the grid.Deployer for deployment operations +// Modifies: +// - dls: Modified DeploymentSetup struct with network, VM, and ZDB workloads set up +// Returns: +// - None +fn new_deployment_setup(network_specs NetworkSpecs, vms []VMachine, zdbs []ZDB, webnames []WebName, old_deployments map[u32]grid_models.Deployment, mut deployer grid.Deployer) !DeploymentSetup { + mut dls := DeploymentSetup{ + deployer: deployer + network_handler: NetworkHandler{ + deployer: deployer + network_name: network_specs.name + mycelium: network_specs.mycelium + ip_range: network_specs.ip_range + } + } + + dls.setup_network_workloads(vms, old_deployments)! + dls.setup_vm_workloads(vms)! + dls.setup_zdb_workloads(zdbs)! + dls.setup_webname_workloads(webnames)! + dls.match_versions(old_deployments) + return dls +} + +fn (mut self DeploymentSetup) match_versions(old_dls map[u32]grid_models.Deployment) { + for node_id, dl in old_dls { + mut wl_versions := map[string]u32{} + for wl in dl.workloads { + wl_versions['${wl.name}:${wl.type_}'] = wl.version + } + + for mut wl in self.workloads[node_id] { + wl.version = wl_versions['${wl.name}:${wl.type_}'] + } + } +} + +// Sets up network workloads for the deployment setup. +// Parameters: +// - vms: Array of VMachine instances representing the virtual machines to set up workloads for +// Modifies: +// - st: Modified DeploymentSetup struct with network workloads set up +// Returns: +// - None +fn (mut st DeploymentSetup) setup_network_workloads(vms []VMachine, old_deployments map[u32]grid_models.Deployment) ! { + st.network_handler.load_network_state(old_deployments)! + st.network_handler.create_network(vms)! + println('Network handler: ${st.network_handler}') + data := st.network_handler.generate_workloads()! + + for node_id, workload in data { + st.workloads[node_id] << workload + } +} + +// Sets up VM workloads for the deployment setup. +// +// This method iterates over a list of VMachines, processes each machine's requirements, +// sets up public IP if required, creates a Zmachine workload, and updates the used IP octets map. +// +// Parameters: +// - machines: Array of VMachine instances representing the virtual machines to set up workloads for +// Modifies: +// - self: Modified DeploymentSetup struct with VM workloads set up +// - used_ip_octets: Map of u32 to arrays of u8 representing used IP octets +// Returns: +// - None +fn (mut self DeploymentSetup) setup_vm_workloads(machines []VMachine) ! { + mut used_ip_octets := map[u32][]u8{} + for machine in machines { + mut req := machine.requirements + mut public_ip_name := '' + + if req.public_ip4 || req.public_ip6 { + public_ip_name = '${req.name}_pubip' + self.set_public_ip_workload(machine.node_id, public_ip_name, req)! + } + + console.print_header('Creating Zmachine workload.') + self.set_zmachine_workload(machine, public_ip_name, mut used_ip_octets)! + } +} + +// Sets up Zero-DB (ZDB) workloads for deployment. +// +// This function takes a list of ZDB results, processes each result into a ZDB workload model, +// assigns it to a healthy node, and then adds it to the deployment workloads. +// +// `zdbs`: A list of ZDB objects containing the ZDB requirements. +// +// Each ZDB is processed to convert the requirements into a grid workload and associated with a healthy node. +fn (mut self DeploymentSetup) setup_zdb_workloads(zdbs []ZDB) ! { + for zdb in zdbs { + // Retrieve ZDB requirements from the result + mut req := zdb.requirements + console.print_header('Creating a ZDB workload for `${req.name}` DB.') + + // Create the Zdb model with the size converted to bytes + zdb_model := grid_models.Zdb{ + size: convert_to_gigabytes(u64(req.size)) // Convert size from MB to bytes + mode: req.mode + public: req.public + password: req.password + } + + // Generate a workload based on the Zdb model + zdb_workload := zdb_model.to_workload( + name: req.name + description: req.description + ) + + // Append the workload to the node's workload list in the deployment setup + self.workloads[zdb.node_id] << zdb_workload + } +} + +// Sets up web name workloads for the deployment setup. +// +// This method processes each WebName instance in the provided array, sets up gateway name proxies based on the requirements, +// and adds the gateway name proxy workload to the deployment workloads. It also updates the name contract map accordingly. +// +// Parameters: +// - webnames: Array of WebName instances representing web names to set up workloads for +// Modifies: +// - self: Modified DeploymentSetup struct with web name workloads set up +// Returns: +// - None +fn (mut self DeploymentSetup) setup_webname_workloads(webnames []WebName) ! { + for wn in webnames { + req := wn.requirements + + gw_name := if req.name == '' { + rand.string(5).to_lower() + } else { + req.name + } + + gw := grid_models.GatewayNameProxy{ + tls_passthrough: req.tls_passthrough + backends: [req.backend] + name: gw_name + } + + self.workloads[wn.node_id] << gw.to_workload( + name: gw_name + ) + self.name_contract_map[gw_name] = wn.name_contract_id + } +} + +// Sets up a Zmachine workload for the deployment setup. +// +// This method prepares a Zmachine workload based on the provided VMachine, assigns private and public IPs, +// sets up Mycelium IP if required, and configures compute capacity and environment variables. +// +// Parameters: +// - vmachine: VMachine instance representing the virtual machine for which the workload is being set up +// - public_ip_name: Name of the public IP to assign to the Zmachine +// - used_ip_octets: Map of u32 to arrays of u8 representing used IP octets +// Throws: +// - Error if grid client is not available or if there are issues setting up the workload +fn (mut self DeploymentSetup) set_zmachine_workload(vmachine VMachine, public_ip_name string, mut used_ip_octets map[u32][]u8) ! { + mut grid_client := get()! + mut env_map := vmachine.requirements.env.clone() + env_map['SSH_KEY'] = grid_client.ssh_key + + zmachine_workload := grid_models.Zmachine{ + network: grid_models.ZmachineNetwork{ + interfaces: [ + grid_models.ZNetworkInterface{ + network: self.network_handler.network_name + ip: if vmachine.wireguard_ip.len > 0 { + used_ip_octets[vmachine.node_id] << vmachine.wireguard_ip.all_after_last('.').u8() + vmachine.wireguard_ip + } else { + self.assign_private_ip(vmachine.node_id, mut used_ip_octets)! + } + }, + ] + public_ip: public_ip_name + planetary: vmachine.requirements.planetary + mycelium: if mycelium := vmachine.requirements.mycelium { + grid_models.MyceliumIP{ + network: self.network_handler.network_name + hex_seed: mycelium.hex_seed + } + } else { + none + } + } + size: convert_to_gigabytes(u64(vmachine.requirements.size)) + flist: vmachine.requirements.flist + entrypoint: vmachine.requirements.entrypoint + compute_capacity: grid_models.ComputeCapacity{ + cpu: u8(vmachine.requirements.cpu) + memory: i64(convert_to_gigabytes(u64(vmachine.requirements.memory))) + } + env: env_map + }.to_workload( + name: vmachine.requirements.name + description: vmachine.requirements.description + ) + + self.workloads[vmachine.node_id] << zmachine_workload +} + +// Sets up a public IP workload for a specific node. +// +// This method creates a PublicIP workload based on the provided VMRequirements, +// assigns IPv4 and IPv6 addresses, and adds the workload to the DeploymentSetup workloads for the specified node. +// +// Parameters: +// - node_id: u32 representing the node ID where the public IP workload will be set up +// - public_ip_name: Name of the public IP to assign to the workload +fn (mut self DeploymentSetup) set_public_ip_workload(node_id u32, public_ip_name string, vm VMRequirements) ! { + // Add the public IP workload + console.print_header('Creating Public IP workload.') + public_ip_workload := grid_models.PublicIP{ + v4: vm.public_ip4 + v6: vm.public_ip6 + }.to_workload(name: public_ip_name) + + self.workloads[node_id] << public_ip_workload +} + +// Assigns a private IP to a specified node based on the provided node ID and used IP octets map. +// +// Parameters: +// - node_id: u32 representing the node ID to assign the private IP to +// - used_ip_octets: Map of u32 to arrays of u8 representing the used IP octets for each node +// Returns: +// - string: The assigned private IP address +// Throws: +// - Error if failed to assign a private IP in the subnet +fn (mut self DeploymentSetup) assign_private_ip(node_id u32, mut used_ip_octets map[u32][]u8) !string { + console.print_header('Assign private IP to node ${node_id}.') + ip := self.network_handler.wg_subnet[node_id].split('/')[0] + mut split_ip := ip.split('.') + last_octet := ip.split('.').last().u8() + for candidate := last_octet + 2; candidate < 255; candidate += 1 { + if candidate in used_ip_octets[node_id] { + continue + } + split_ip[3] = '${candidate}' + used_ip_octets[node_id] << candidate + ip_ := split_ip.join('.') + console.print_header('Private IP Assigned: ${ip_}.') + return ip_ + } + return error('failed to assign private IP in subnet: ${self.network_handler.wg_subnet[node_id]}') +} + +/* + TODO's: + # TODO: + - add action methods e.g. delete, ping... + - cache node and user twin ids + - chainge the encoding/decoding behavior + + # Done: + - return result after deployment + - use batch calls for substrate + - send deployments to nodes concurrently + - add roll back behavior +*/ diff --git a/lib/threefold/tfgrid3deployer/kvstore.v b/lib/threefold/tfgrid3deployer/kvstore.v new file mode 100644 index 00000000..cbd50622 --- /dev/null +++ b/lib/threefold/tfgrid3deployer/kvstore.v @@ -0,0 +1,29 @@ +module tfgrid3deployer + +import freeflowuniverse.herolib.core.base as context + +// Will be changed when we support the logic of the TFChain one +pub struct KVStoreFS {} + +fn (kvs KVStoreFS) set(key string, data []u8) ! { + // set in context + mut mycontext := context.context_new()! + mut session := mycontext.session_new(name: 'deployer')! + mut db := session.db_get()! + db.set(key: key, valueb: data) or { return error('Cannot set the key due to: ${err}') } +} + +fn (kvs KVStoreFS) get(key string) ![]u8 { + mut mycontext := context.context_new()! + mut session := mycontext.session_new(name: 'deployer')! + mut db := session.db_get()! + value := db.get(key: key) or { return error('Cannot get value of key ${key} due to: ${err}') } + if value.len == 0 { + return error('The value is empty.') + } + + return value.bytes() +} + +fn (kvs KVStoreFS) delete(key string) ! { +} diff --git a/lib/threefold/tfgrid3deployer/network.v b/lib/threefold/tfgrid3deployer/network.v new file mode 100644 index 00000000..b5863bb2 --- /dev/null +++ b/lib/threefold/tfgrid3deployer/network.v @@ -0,0 +1,324 @@ +module tfgrid3deployer + +import freeflowuniverse.herolib.threefold.grid.models as grid_models +import freeflowuniverse.herolib.threefold.gridproxy +import freeflowuniverse.herolib.threefold.grid +import freeflowuniverse.herolib.ui.console +import json +import rand + +// NetworkInfo struct to represent network details +pub struct NetworkSpecs { +pub mut: + name string = 'net' + rand.string(5) + ip_range string = '10.10.0.0/16' + mycelium string = rand.hex(64) +} + +struct NetworkHandler { +mut: + network_name string + nodes []u32 + ip_range string + wg_ports map[u32]u16 + wg_keys map[u32][]string + wg_subnet map[u32]string + endpoints map[u32]string + public_node u32 + hidden_nodes []u32 + none_accessible_ip_ranges []string + mycelium string + + deployer &grid.Deployer @[skip; str: skip] +} + +// TODO: maybe rename to fill_network or something similar +fn (mut self NetworkHandler) create_network(vmachines []VMachine) ! { + // Set nodes + self.nodes = [] + + for vmachine in vmachines { + if !self.nodes.contains(vmachine.node_id) { + self.nodes << vmachine.node_id + } + } + + console.print_header('Loaded nodes: ${self.nodes}.') + self.setup_wireguard_data()! + self.setup_access_node()! +} + +fn (mut self NetworkHandler) generate_workload(node_id u32, peers []grid_models.Peer, mycleium_hex_key string) !grid_models.Workload { + mut network_workload := grid_models.Znet{ + ip_range: self.ip_range + subnet: self.wg_subnet[node_id] + wireguard_private_key: self.wg_keys[node_id][0] + wireguard_listen_port: self.wg_ports[node_id] + peers: peers + mycelium: grid_models.Mycelium{ + hex_key: mycleium_hex_key + peers: [] + } + } + + return network_workload.to_workload( + name: self.network_name + description: 'VGridClient network workload' + ) +} + +fn (mut self NetworkHandler) prepare_hidden_node_peers(node_id u32) ![]grid_models.Peer { + mut peers := []grid_models.Peer{} + if self.public_node != 0 { + peers << grid_models.Peer{ + subnet: self.wg_subnet[self.public_node] + wireguard_public_key: self.wg_keys[self.public_node][1] + allowed_ips: [self.ip_range, '100.64.0.0/16'] + endpoint: '${self.endpoints[self.public_node]}:${self.wg_ports[self.public_node]}' + } + } + return peers +} + +fn (mut self NetworkHandler) setup_access_node() ! { + // Case 1: Deployment on 28 which is hidden node + // - Setup access node + // Case 2: Deployment on 11 which is public node + // - Already have the access node + // Case 3: if the saved state has already public node. + // - Check the new deployment if its node is hidden take the saved one + // - if the access node is already set, that means we have set its values e.g. the wireguard port, keys + + if self.hidden_nodes.len < 1 || self.nodes.len == 1 { + self.public_node = 0 + return + } + + if self.public_node != 0 { + if !self.nodes.contains(self.public_node) { + self.nodes << self.public_node + } + return + } + + /* + - In this case a public node should be assigned. + - We need to store it somewhere to inform the user that the deployment has one more contract on another node, + also delete that contract when delete the full deployment. + - Assign the public node with the new node id. + */ + console.print_header('No public nodes found based on your specs.') + console.print_header('Requesting the Proxy to assign a public node.') + + mut myfilter := gridproxy.nodefilter()! + myfilter.ipv4 = true // Only consider nodes with IPv4 + myfilter.status = 'up' + myfilter.healthy = true + + nodes := filter_nodes(myfilter)! + access_node := nodes[0] + + self.public_node = u32(access_node.node_id) + console.print_header('Public node ${self.public_node}') + + self.nodes << self.public_node + + wg_port := self.deployer.assign_wg_port(self.public_node)! + keys := self.deployer.client.generate_wg_priv_key()! // The first index will be the private. + mut parts := self.ip_range.split('/')[0].split('.') + parts[2] = '${self.nodes.len + 2}' + subnet := parts.join('.') + '/24' + + self.wg_ports[self.public_node] = wg_port + self.wg_keys[self.public_node] = keys + self.wg_subnet[self.public_node] = subnet + self.endpoints[self.public_node] = access_node.public_config.ipv4.split('/')[0] +} + +fn (mut self NetworkHandler) setup_wireguard_data() ! { + // TODO: We need to set the extra node + console.print_header('Setting up network workload.') + self.hidden_nodes, self.none_accessible_ip_ranges = [], [] + + for node_id in self.nodes { + // TODO: Check if there values don't re-generate + mut public_config := self.deployer.get_node_pub_config(node_id) or { + if err.msg().contains('no public configuration') { + grid_models.PublicConfig{} + } else { + return error('Failed to get node public config: ${err}') + } + } + + if _ := self.wg_ports[node_id] { + // The node already exists + if public_config.ipv4.len != 0 { + self.endpoints[node_id] = public_config.ipv4.split('/')[0] + if self.public_node == 0 { + self.public_node = node_id + } + } else if public_config.ipv6.len != 0 { + self.endpoints[node_id] = public_config.ipv6.split('/')[0] + } else { + self.hidden_nodes << node_id + self.none_accessible_ip_ranges << self.wg_subnet[node_id] + self.none_accessible_ip_ranges << wireguard_routing_ip(self.wg_subnet[node_id]) + } + + continue + } + + self.wg_ports[node_id] = self.deployer.assign_wg_port(node_id)! + console.print_header('Assign Wireguard port for node ${node_id}.') + + console.print_header('Generate Wireguard keys for node ${node_id}.') + self.wg_keys[node_id] = self.deployer.client.generate_wg_priv_key()! + console.print_header('Wireguard keys for node ${node_id} are ${self.wg_keys[node_id]}.') + + console.print_header('Calculate subnet for node ${node_id}.') + self.wg_subnet[node_id] = self.calculate_subnet()! + console.print_header('Node ${node_id} subnet is ${self.wg_subnet[node_id]}.') + + console.print_header('Node ${node_id} public config ${public_config}.') + + if public_config.ipv4.len != 0 { + self.endpoints[node_id] = public_config.ipv4.split('/')[0] + self.public_node = node_id + } else if public_config.ipv6.len != 0 { + self.endpoints[node_id] = public_config.ipv6.split('/')[0] + } else { + self.hidden_nodes << node_id + self.none_accessible_ip_ranges << self.wg_subnet[node_id] + self.none_accessible_ip_ranges << wireguard_routing_ip(self.wg_subnet[node_id]) + } + } +} + +fn (mut self NetworkHandler) prepare_public_node_peers(node_id u32) ![]grid_models.Peer { + mut peers := []grid_models.Peer{} + for peer_id in self.nodes { + if peer_id in self.hidden_nodes || peer_id == node_id { + continue + } + + subnet := self.wg_subnet[peer_id] + mut allowed_ips := [subnet, wireguard_routing_ip(subnet)] + + if peer_id == self.public_node { + allowed_ips << self.none_accessible_ip_ranges + } + + peers << grid_models.Peer{ + subnet: subnet + wireguard_public_key: self.wg_keys[peer_id][1] + allowed_ips: allowed_ips + endpoint: '${self.endpoints[peer_id]}:${self.wg_ports[peer_id]}' + } + } + + if node_id == self.public_node { + for hidden_node_id in self.hidden_nodes { + subnet := self.wg_subnet[hidden_node_id] + routing_ip := wireguard_routing_ip(subnet) + + peers << grid_models.Peer{ + subnet: subnet + wireguard_public_key: self.wg_keys[hidden_node_id][1] + allowed_ips: [subnet, routing_ip] + endpoint: '' + } + } + } + + return peers +} + +fn (mut self NetworkHandler) calculate_subnet() !string { + mut parts := self.ip_range.split('/')[0].split('.') + for i := 2; i <= 255; i += 1 { + parts[2] = '${i}' + candidate := parts.join('.') + '/24' + if !self.wg_subnet.values().contains(candidate) { + return candidate + } + } + + return error('failed to calcuate subnet') +} + +fn (mut self NetworkHandler) load_network_state(dls map[u32]grid_models.Deployment) ! { + // load network from deployments + + mut network_name := '' + mut subnet_node := map[string]u32{} + mut subnet_to_endpoint := map[string]string{} + for node_id, dl in dls { + mut znet := grid_models.Znet{} + for wl in dl.workloads { + network_name = wl.name + if wl.type_ == grid_models.workload_types.network { + znet = json.decode(grid_models.Znet, wl.data)! + break + } + } + + if znet.subnet == '' { + // deployment didn't have a network workload. skip.. + continue + } + + self.network_name = network_name + self.nodes << node_id + self.ip_range = znet.ip_range + self.wg_ports[node_id] = znet.wireguard_listen_port + self.wg_keys[node_id] = [znet.wireguard_private_key, + self.deployer.client.generate_wg_public_key(znet.wireguard_private_key)!] + self.wg_subnet[node_id] = znet.subnet + self.mycelium = if myclelium := znet.mycelium { myclelium.hex_key } else { '' } + subnet_node[znet.subnet] = node_id + for peer in znet.peers { + subnet_to_endpoint[peer.subnet] = peer.endpoint + + if peer.endpoint == '' { + // current node is the access node + self.public_node = node_id + } + } + } + + for subnet, endpoint in subnet_to_endpoint { + node_id := subnet_node[subnet] + if endpoint == '' { + self.hidden_nodes << node_id + continue + } + self.endpoints[node_id] = endpoint.all_before_last(':').trim('[]') + } + + for node_id in self.hidden_nodes { + self.none_accessible_ip_ranges << self.wg_subnet[node_id] + self.none_accessible_ip_ranges << wireguard_routing_ip(self.wg_subnet[node_id]) + } +} + +fn (mut self NetworkHandler) generate_workloads() !map[u32]grid_models.Workload { + mut workloads := map[u32]grid_models.Workload{} + for node_id in self.nodes { + if node_id in self.hidden_nodes { + mut peers := self.prepare_hidden_node_peers(node_id)! + workloads[node_id] = self.generate_workload(node_id, peers, self.mycelium)! + continue + } + + mut peers := self.prepare_public_node_peers(node_id)! + workloads[node_id] = self.generate_workload(node_id, peers, self.mycelium)! + } + + return workloads +} + +fn (mut n NetworkHandler) remove_node(node_id u32) ! { +} + +fn (mut n NetworkHandler) add_node() ! { +} diff --git a/lib/threefold/tfgrid3deployer/readme.md b/lib/threefold/tfgrid3deployer/readme.md new file mode 100644 index 00000000..adf4c325 --- /dev/null +++ b/lib/threefold/tfgrid3deployer/readme.md @@ -0,0 +1,27 @@ +# tfgrid3deployer + +To get started + +```vlang + + + +import freeflowuniverse.herolib.clients. tfgrid3deployer + +mut client:= tfgrid3deployer.get()! + +client... + + + + +``` + +## example heroscript + +```hero +!!tfgrid3deployer.configure + secret: '...' + host: 'localhost' + port: 8888 +``` diff --git a/lib/threefold/tfgrid3deployer/tfgrid3deployer_factory_.v b/lib/threefold/tfgrid3deployer/tfgrid3deployer_factory_.v new file mode 100644 index 00000000..74eb0c61 --- /dev/null +++ b/lib/threefold/tfgrid3deployer/tfgrid3deployer_factory_.v @@ -0,0 +1,106 @@ +module tfgrid3deployer + +import freeflowuniverse.herolib.core.base +import freeflowuniverse.herolib.core.playbook + +__global ( + tfgrid3deployer_global map[string]&TFGridDeployer + tfgrid3deployer_default string +) + +/////////FACTORY + +@[params] +pub struct ArgsGet { +pub mut: + name string = 'default' +} + +fn args_get(args_ ArgsGet) ArgsGet { + mut args := args_ + if args.name == '' { + args.name = tfgrid3deployer_default + } + if args.name == '' { + args.name = 'default' + } + return args +} + +pub fn get(args_ ArgsGet) !&TFGridDeployer { + mut args := args_get(args_) + if args.name !in tfgrid3deployer_global { + if !config_exists() { + if default { + config_save()! + } + } + config_load()! + } + return tfgrid3deployer_global[args.name] or { + println(tfgrid3deployer_global) + panic('bug in get from factory: ') + } +} + +fn config_exists(args_ ArgsGet) bool { + mut args := args_get(args_) + mut context := base.context() or { panic('bug') } + return context.hero_config_exists('tfgrid3deployer', args.name) +} + +fn config_load(args_ ArgsGet) ! { + mut args := args_get(args_) + mut context := base.context()! + mut heroscript := context.hero_config_get('tfgrid3deployer', args.name)! + play(heroscript: heroscript)! +} + +fn config_save(args_ ArgsGet) ! { + mut args := args_get(args_) + mut context := base.context()! + context.hero_config_set('tfgrid3deployer', args.name, heroscript_default()!)! +} + +fn set(o TFGridDeployer) ! { + mut o2 := obj_init(o)! + tfgrid3deployer_global['default'] = &o2 +} + +@[params] +pub struct PlayArgs { +pub mut: + name string = 'default' + heroscript string // if filled in then plbook will be made out of it + plbook ?playbook.PlayBook + reset bool + + start bool + stop bool + restart bool + delete bool + configure bool // make sure there is at least one installed +} + +pub fn play(args_ PlayArgs) ! { + mut args := args_ + + if args.heroscript == '' { + args.heroscript = heroscript_default()! + } + mut plbook := args.plbook or { playbook.new(text: args.heroscript)! } + + mut install_actions := plbook.find(filter: 'tfgrid3deployer.configure')! + if install_actions.len > 0 { + for install_action in install_actions { + mut p := install_action.params + mycfg := cfg_play(p)! + set(mycfg)! + } + } +} + +// switch instance to be used for tfgrid3deployer +pub fn switch(name string) { + tfgrid3deployer_default = name +} diff --git a/lib/threefold/tfgrid3deployer/tfgrid3deployer_model.v b/lib/threefold/tfgrid3deployer/tfgrid3deployer_model.v new file mode 100644 index 00000000..7cda7d48 --- /dev/null +++ b/lib/threefold/tfgrid3deployer/tfgrid3deployer_model.v @@ -0,0 +1,63 @@ +module tfgrid3deployer + +import freeflowuniverse.herolib.data.paramsparser +import os + +pub const version = '1.0.0' +const singleton = false +const default = true + +pub fn heroscript_default() !string { + ssh_key := os.getenv_opt('SSH_KEY') or { '' } + mnemonic := os.getenv_opt('TFGRID_MNEMONIC') or { '' } + network := os.getenv_opt('TFGRID_NETWORK') or { 'main' } // main,test,dev,qa + heroscript := " + !!tfgrid3deployer.configure name:'default' + ssh_key: '${ssh_key}' + mnemonic: '${mnemonic}' + network: ${network} + + " + if ssh_key.len == 0 || mnemonic.len == 0 || network.len == 0 { + return error('please configure the tfgrid deployer or set SSH_KEY, TFGRID_MNEMONIC, and TFGRID_NETWORK.') + } + return heroscript +} + +pub enum Network { + dev + main + test + qa +} + +pub struct TFGridDeployer { +pub mut: + name string = 'default' + ssh_key string + mnemonic string + network Network +} + +fn cfg_play(p paramsparser.Params) !TFGridDeployer { + network_str := p.get_default('network', 'main')! + network := match network_str { + 'dev' { Network.dev } + 'test' { Network.test } + 'qa' { Network.qa } + else { Network.main } + } + + mut mycfg := TFGridDeployer{ + ssh_key: p.get_default('ssh_key', '')! + mnemonic: p.get_default('mnemonic', '')! + network: network + } + return mycfg +} + +fn obj_init(obj_ TFGridDeployer) !TFGridDeployer { + // never call get here, only thing we can do here is work on object itself + mut obj := obj_ + return obj +} diff --git a/lib/threefold/tfgrid3deployer/utils.v b/lib/threefold/tfgrid3deployer/utils.v new file mode 100644 index 00000000..5f49ed52 --- /dev/null +++ b/lib/threefold/tfgrid3deployer/utils.v @@ -0,0 +1,55 @@ +module tfgrid3deployer + +import freeflowuniverse.herolib.threefold.gridproxy +import freeflowuniverse.herolib.threefold.grid.models as grid_models +import freeflowuniverse.herolib.threefold.gridproxy.model as gridproxy_models +import rand + +// Resolves the correct grid network based on the `cn.network` value. +// +// This utility function converts the custom network type of GridContracts +// to the appropriate value in `gridproxy.TFGridNet`. +// +// Returns: +// - A `gridproxy.TFGridNet` value corresponding to the grid network. +fn resolve_network() !gridproxy.TFGridNet { + mut cfg := get()! + return match cfg.network { + .dev { gridproxy.TFGridNet.dev } + .test { gridproxy.TFGridNet.test } + .main { gridproxy.TFGridNet.main } + .qa { gridproxy.TFGridNet.qa } + } +} + +/* + * This should be the node's subnet and the wireguard routing ip that should start with 100.64 then the 2nd and 3rd part of the node's subnet +*/ +fn wireguard_routing_ip(ip string) string { + parts := ip.split('.') + return '100.64.${parts[1]}.${parts[2]}/32' +} + +/* + * Just generate a hex key for the mycelium network +*/ +fn get_mycelium() grid_models.Mycelium { + return grid_models.Mycelium{ + hex_key: rand.string(32).bytes().hex() + peers: [] + } +} + +pub fn filter_nodes(filter gridproxy_models.NodeFilter) ![]gridproxy_models.Node { + // Resolve the network configuration + net := resolve_network()! + + // Create grid proxy client and retrieve the matching nodes + mut gp_client := gridproxy.new(net: net, cache: true)! + nodes := gp_client.get_nodes(filter)! + return nodes +} + +fn convert_to_gigabytes(bytes u64) u64 { + return bytes * 1024 * 1024 * 1024 +} diff --git a/lib/threefold/tfgrid3deployer/vmachine.v b/lib/threefold/tfgrid3deployer/vmachine.v new file mode 100644 index 00000000..705c7895 --- /dev/null +++ b/lib/threefold/tfgrid3deployer/vmachine.v @@ -0,0 +1,166 @@ +module tfgrid3deployer + +import freeflowuniverse.herolib.ui.console +import json +import os +import rand + +@[params] +pub struct Mycelium { + hex_seed string = rand.hex(12) +} + +// MachineNetworkReq struct to represent network access configuration +@[params] +pub struct VMRequirements { +pub mut: + name string + description string + cpu int // vcores + size u64 + memory int // gbyte + public_ip4 bool + public_ip6 bool + planetary bool + mycelium ?Mycelium + flist string = 'https://hub.grid.tf/tf-official-vms/ubuntu-24.04-latest.flist' + entrypoint string = '/sbin/zinit init' + env map[string]string + nodes []u32 // if set will chose a node from the list to deploy on +} + +// MachineModel struct to represent a machine and its associat ed details +pub struct VMachine { +pub mut: + tfchain_id string + contract_id u64 + requirements VMRequirements + node_id u32 + planetary_ip string + mycelium_ip string + public_ip4 string + wireguard_ip string + public_ip6 string +} + +// Helper function to encode a VMachine +fn (self VMachine) encode() ![]u8 { + // mut b := encoder.new() + // b.add_string(self.name) + // b.add_string(self.tfchain_id) + // b.add_int(self.contract_id) + // b.add_int(self.cpu) + // b.add_int(self.memory) + // b.add_string(self.description) + // for now we just use json, will do bytes when needed + return json.encode(self).bytes() +} + +// Helper function to decode a VMachine +fn decode_vmachine(data []u8) !VMachine { + // mut d encoder.Decode + // return VMachine{ + // name: d.get_string() + // tfchain_id: d.get_string() + // contract_id: d.get_int() + // cpu: d.get_int() + // memory: d.get_int() + // description: d.get_string() + // } + data_string := data.bytestr() + return json.decode(VMachine, data_string) +} + +// Call zos to get the zos version running on the node +fn (self VMachine) check_node_up() !bool { + console.print_header('Pinging node: ${self.node_id}') + mut deployer := get_deployer()! + node_twin_id := deployer.client.get_node_twin(self.node_id) or { + return error('faild to get the node twin ID due to: ${err}') + } + deployer.client.get_zos_version(node_twin_id) or { return false } + console.print_header('Node ${self.node_id} is reachable.') + return true +} + +fn ping(ip string) bool { + res := os.execute('ping -c 1 -W 2 ${ip}') + return res.exit_code == 0 +} + +// Ping the VM supported interfaces +fn (self VMachine) check_vm_up() bool { + if self.public_ip4 != '' { + console.print_header('Pinging public IPv4: ${self.public_ip4}') + pingable := ping(self.public_ip4) + if !pingable { + console.print_stderr("The public IPv4 isn't pingable.") + } + return pingable + } + + if self.public_ip6 != '' { + console.print_header('Pinging public IPv6: ${self.public_ip6}') + pingable := ping(self.public_ip6) + if !pingable { + console.print_stderr("The public IPv6 isn't pingable.") + } + return pingable + } + + if self.planetary_ip != '' { + console.print_header('Pinging planetary IP: ${self.planetary_ip}') + pingable := ping(self.planetary_ip) + if !pingable { + console.print_stderr("The planetary IP isn't pingable.") + } + return pingable + } + + if self.mycelium_ip != '' { + console.print_header('Pinging mycelium IP: ${self.mycelium_ip}') + pingable := ping(self.mycelium_ip) + if !pingable { + console.print_stderr("The mycelium IP isn't pingable.") + } + return pingable + } + return false +} + +pub fn (self VMachine) healthcheck() !bool { + console.print_header('Doing a healthcheck on machine ${self.requirements.name}') + + is_vm_up := self.check_node_up()! + if !is_vm_up { + console.print_stderr("The VM isn't reachable, pinging node ${self.node_id}") + is_node_up := self.check_node_up()! + if !is_node_up { + console.print_stderr("The VM node isn't reachable.") + return false + } + return false + } + + console.print_header('The VM is up and reachable.') + return true +} + +// NetworkInfo struct to represent network details +pub struct RecoverArgs { +pub mut: + reinstall bool // reinstall if needed and run heroscript +} + +fn (self VMachine) recover(args RecoverArgs) ! { +} + +// NetworkInfo struct to represent network details +pub struct DeployArgs { +pub mut: + reset bool // careful will delete existing machine if true +} + +fn (self VMachine) deploy(args DeployArgs) ! { + // check the machine is there, if yes and reset used then delete the machine before deploying a new one +} diff --git a/lib/threefold/tfgrid3deployer/webnames.v b/lib/threefold/tfgrid3deployer/webnames.v new file mode 100644 index 00000000..f372c7f9 --- /dev/null +++ b/lib/threefold/tfgrid3deployer/webnames.v @@ -0,0 +1,27 @@ +module tfgrid3deployer + +import json + +@[params] +pub struct WebNameRequirements { +pub mut: + name string @[required] + node_id ?u32 + // must be in the format ip:port if tls_passthrough is set, otherwise the format should be http://ip[:port] + backend string @[required] + tls_passthrough bool +} + +pub struct WebName { +pub mut: + fqdn string + name_contract_id u64 + node_contract_id u64 + requirements WebNameRequirements + node_id u32 +} + +// Helper function to encode a WebName +fn (self WebName) encode() ![]u8 { + return json.encode(self).bytes() +} diff --git a/lib/threefold/tfgrid3deployer/zdbs.v b/lib/threefold/tfgrid3deployer/zdbs.v new file mode 100644 index 00000000..f6f35f73 --- /dev/null +++ b/lib/threefold/tfgrid3deployer/zdbs.v @@ -0,0 +1,32 @@ +module tfgrid3deployer + +import freeflowuniverse.herolib.threefold.grid.models as grid_models +// import freeflowuniverse.herolib.ui.console +import json + +@[params] +pub struct ZDBRequirements { +pub mut: + name string @[required] + password string @[required] + size int @[required] + node_id ?u32 + description string + mode grid_models.ZdbMode = 'user' + public bool +} + +pub struct ZDB { +pub mut: + ips []string + port u32 + namespace string + contract_id u64 + requirements ZDBRequirements + node_id u32 +} + +// Helper function to encode a ZDB +fn (self ZDB) encode() ![]u8 { + return json.encode(self).bytes() +} diff --git a/lib/threefold/tfgrid_actions/README.md b/lib/threefold/tfgrid_actions/README.md new file mode 100644 index 00000000..6993cd49 --- /dev/null +++ b/lib/threefold/tfgrid_actions/README.md @@ -0,0 +1,22 @@ +# heroscript actions handlers + +takes input in heroscript language and can then call v clients to talk to e.g. web3gw, web3gw is proxy in golang to tfgrid functionality. + +## Usage + +- For documentation on how to use heroscript, refer to this document [here](../../manual/src/threelang/parser.md) + +> todo: update doc + +## Development + +- To add new books to the parser, follow these instructions: + + - Create a new module inside the threelang folder + - Inside the new module, create a new handler for this book. + - While creating a new Runner, the new handler should be initialized, then saved to the Runner's state. + - The new handler should have its actions exposed in the Runner.run() method + - The new handler must implement a handle_action method. + - The handle_action method receives an playbook.Action, and executes the action however it sees fit. + - Handlers are responsible for logging their output, if any. + - To add documentation on how to use the new book, create a new folder [here](../../manual/src/threelang/) with the book's name, and add all needed documentation files in this folder. diff --git a/lib/threefold/tfgrid_actions/blockchain/blockchain.v b/lib/threefold/tfgrid_actions/blockchain/blockchain.v new file mode 100644 index 00000000..575f8ffa --- /dev/null +++ b/lib/threefold/tfgrid_actions/blockchain/blockchain.v @@ -0,0 +1,15 @@ +module blockchain + +import freeflowuniverse.herolib.core.playbook { Actions } +import freeflowuniverse.herolib.core.texttools +import freeflowuniverse.herolib.data.paramsparser + +// TODO: not implemented, + +fn (mut c Controller) actions(actions_ Actions) ! { + mut actions2 := actions_.filtersort(actor: '???')! + for action in actions2 { + if action.name == '???' { + } + } +} diff --git a/lib/threefold/tfgrid_actions/blockchain/factory.v b/lib/threefold/tfgrid_actions/blockchain/factory.v new file mode 100644 index 00000000..6b78332a --- /dev/null +++ b/lib/threefold/tfgrid_actions/blockchain/factory.v @@ -0,0 +1,11 @@ +module blockchain + +// import freeflowuniverse.herolib.core.playbook + +pub struct Controller { +} + +pub fn new() !Controller { + mut c := Controller{} + return c +} diff --git a/lib/threefold/tfgrid_actions/clients/clients.v b/lib/threefold/tfgrid_actions/clients/clients.v new file mode 100644 index 00000000..1cf279e1 --- /dev/null +++ b/lib/threefold/tfgrid_actions/clients/clients.v @@ -0,0 +1,16 @@ +module clients + +import freeflowuniverse.herolib.threefold.web3gw.tfgrid { TFGridClient } +import freeflowuniverse.herolib.threefold.web3gw.tfchain { TfChainClient } +import freeflowuniverse.herolib.threefold.web3gw.stellar { StellarClient } +import freeflowuniverse.herolib.threefold.web3gw.eth { EthClient } +import freeflowuniverse.herolib.threefold.web3gw.btc { BtcClient } + +pub struct Clients { +pub mut: + tfg_client TFGridClient + tfc_client TfChainClient + str_client StellarClient + eth_client EthClient + btc_client BtcClient +} diff --git a/lib/threefold/tfgrid_actions/factory.v b/lib/threefold/tfgrid_actions/factory.v new file mode 100644 index 00000000..9ae0a09d --- /dev/null +++ b/lib/threefold/tfgrid_actions/factory.v @@ -0,0 +1,94 @@ +module tfgrid_actions + +import log +import freeflowuniverse.herolib.core.playbook +import freeflowuniverse.herolib.data.rpcwebsocket { RpcWsClient } +import freeflowuniverse.herolib.threefold.web3gw.tfgrid as tfgrid_client +import freeflowuniverse.herolib.threefold.web3gw.tfchain as tfchain_client +import freeflowuniverse.herolib.threefold.web3gw.stellar as stellar_client +import freeflowuniverse.herolib.threefold.web3gw.eth as eth_client +import freeflowuniverse.herolib.threefold.web3gw.btc as btc_client +import freeflowuniverse.herolib.threefold.tfgrid_actions.tfgrid { TFGridHandler } +import freeflowuniverse.herolib.threefold.tfgrid_actions.web3gw { Web3GWHandler } +import freeflowuniverse.herolib.threefold.tfgrid_actions.clients { Clients } +import freeflowuniverse.herolib.threefold.tfgrid_actions.stellar { StellarHandler } + +const tfgrid_book = 'tfgrid' +const web3gw_book = 'web3gw' +const stellar_book = 'stellar' + +pub struct Runner { +pub mut: + path string + clients Clients + tfgrid_handler TFGridHandler + web3gw_handler Web3GWHandler + stellar_handler StellarHandler +} + +@[params] +pub struct RunnerArgs { +pub mut: + name string + path string + address string +} + +pub fn new(args RunnerArgs, debug_log bool) !Runner { + mut ap := playbook.new(path: args.path)! + + mut logger := log.Logger(&log.Log{ + level: if debug_log { .debug } else { .info } + }) + + mut rpc_client := rpcwebsocket.new_rpcwsclient(args.address, &logger) or { + return error('Failed creating rpc websocket client: ${err}') + } + _ := spawn rpc_client.run() + + mut gw_clients := get_clients(mut rpc_client)! + + tfgrid_handler := tfgrid.new(mut rpc_client, logger, mut gw_clients.tfg_client) + web3gw_handler := web3gw.new(mut rpc_client, &logger, mut gw_clients) + stellar_handler := stellar.new(mut rpc_client, &logger, mut gw_clients.str_client) + + mut runner := Runner{ + path: args.path + tfgrid_handler: tfgrid_handler + web3gw_handler: web3gw_handler + clients: gw_clients + stellar_handler: stellar_handler + } + + runner.run(mut ap)! + return runner +} + +pub fn (mut r Runner) run(mut acs playbook.Actions) ! { + for action in acs.actions { + match action.book { + threelang.tfgrid_book { + r.tfgrid_handler.handle_action(action)! + } + threelang.web3gw_book { + r.web3gw_handler.handle_action(action)! + } + threelang.stellar_book { + r.stellar_handler.handle_action(action)! + } + else { + return error('module ${action.book} is invalid') + } + } + } +} + +pub fn get_clients(mut rpc_client RpcWsClient) !Clients { + return Clients{ + tfg_client: tfgrid_client.new(mut rpc_client) + tfc_client: tfchain_client.new(mut rpc_client) + btc_client: btc_client.new(mut rpc_client) + eth_client: eth_client.new(mut rpc_client) + str_client: stellar_client.new(mut rpc_client) + } +} diff --git a/lib/threefold/tfgrid_actions/nostr/channel.v b/lib/threefold/tfgrid_actions/nostr/channel.v new file mode 100644 index 00000000..35394905 --- /dev/null +++ b/lib/threefold/tfgrid_actions/nostr/channel.v @@ -0,0 +1,59 @@ +module nostr + +import freeflowuniverse.herolib.core.playbook { Action } + +fn (mut n NostrHandler) channel(action Action) ! { + match action.name { + 'create' { + // create a new channel + name := action.params.get('name')! + about := action.params.get_default('description', '')! + pic_url := action.params.get_default('picture', '')! + + channel_id := n.client.create_channel(name: name, about: about, picture: pic_url)! + n.logger.info('Channel ID ${channel_id}') + } + 'send' { + // send message to channel + channel_id := action.params.get('channel')! + content := action.params.get('content')! + message_id := action.params.get_default('reply_to', '')! + public_key := action.params.get_default('public_key_author', '')! + + n.client.create_channel_message( + channel_id: channel_id + content: content + message_id: message_id + public_key: public_key + )! + } + 'read_sub' { + // read subscription messages + channel_id := action.params.get('channel')! + mut id := action.params.get_default('id', '')! + if id == '' { + id = n.client.subscribe_channel_message(id: channel_id)! + n.logger.info('Subscription ID: ${id}') + } + count := action.params.get_u32_default('count', 10)! + + messages := n.client.get_subscription_events(id: id, count: count)! + n.logger.info('Channel Messages: ${messages}') + } + 'read' { + // read all channel messages + channel_id := action.params.get('channel')! + + messages := n.client.get_channel_message(channel_id: channel_id)! + n.logger.info('Channel Messages: ${messages}') + } + 'list' { + // list all channels on relay + channels := n.client.list_channels()! + n.logger.info('Channels: ${channels}') + } + else { + return error('operation ${action.name} is not supported on nostr groups') + } + } +} diff --git a/lib/threefold/tfgrid_actions/nostr/direct.v b/lib/threefold/tfgrid_actions/nostr/direct.v new file mode 100644 index 00000000..796429a7 --- /dev/null +++ b/lib/threefold/tfgrid_actions/nostr/direct.v @@ -0,0 +1,34 @@ +module nostr + +import freeflowuniverse.herolib.core.playbook { Action } + +fn (mut n NostrHandler) direct(action Action) ! { + match action.name { + 'send' { + // send direct message + receiver := action.params.get('receiver')! + content := action.params.get('content')! + + n.client.publish_direct_message( + receiver: receiver + content: content + )! + } + 'read' { + // reads and subscribes to direct messages + mut id := action.params.get_default('subscription_id', '')! + if id == '' { + id = n.client.subscribe_to_direct_messages()! + n.logger.info('subscription id: ${id}') + } + + count := action.params.get_u32_default('count', 10)! + + events := n.client.get_subscription_events(id: id, count: count)! + n.logger.info('Direct Message Events: ${events}') + } + else { + return error('operation ${action.name} is not supported on nostr direct messages') + } + } +} diff --git a/lib/threefold/tfgrid_actions/nostr/handler.v b/lib/threefold/tfgrid_actions/nostr/handler.v new file mode 100644 index 00000000..3304c7aa --- /dev/null +++ b/lib/threefold/tfgrid_actions/nostr/handler.v @@ -0,0 +1,35 @@ +module nostr + +import threefoldtech.threebot.nostr as nostr_client { NostrClient } +import freeflowuniverse.herolib.core.playbook { Action } +import freeflowuniverse.herolib.data.rpcwebsocket { RpcWsClient } +import log { Logger } + +pub struct NostrHandler { +pub mut: + client NostrClient + logger Logger +} + +pub fn new(mut rpc_client RpcWsClient, logger Logger) NostrHandler { + mut cl := nostr_client.new(mut rpc_client) + + return NostrHandler{ + client: cl + logger: logger + } +} + +pub fn (mut n NostrHandler) handle_action(action Action) ! { + match action.actor { + 'channel' { + n.channel(action)! + } + 'direct' { + n.direct(action)! + } + else { + return error('actor ${action.actor} is not supported') + } + } +} diff --git a/lib/threefold/tfgrid_actions/stellar/account.v b/lib/threefold/tfgrid_actions/stellar/account.v new file mode 100644 index 00000000..c0771a44 --- /dev/null +++ b/lib/threefold/tfgrid_actions/stellar/account.v @@ -0,0 +1,47 @@ +module stellar + +import freeflowuniverse.herolib.core.playbook { Action } + +fn (mut h StellarHandler) account(action Action) ! { + match action.name { + 'address' { + res := h.client.address()! + + h.logger.info(res) + } + 'create' { + network := action.params.get_default('network', 'public')! + + res := h.client.create_account(network)! + + h.logger.info(res) + } + 'transactions' { + account := action.params.get_default('account', '')! + limit := action.params.get_u32_default('limit', 10)! + include_failed := action.params.get_default_false('include_failed') + cursor := action.params.get_default('cursor', '')! + ascending := action.params.get_default_false('ascending') + + res := h.client.transactions( + account: account + limit: limit + include_failed: include_failed + cursor: cursor + ascending: ascending + )! + + h.logger.info('Transactions: ${res}') + } + 'data' { + account := action.params.get('account')! + + res := h.client.account_data(account)! + + h.logger.info('${res}') + } + else { + return error('account action ${action.name} is invalid') + } + } +} diff --git a/lib/threefold/tfgrid_actions/stellar/handler.v b/lib/threefold/tfgrid_actions/stellar/handler.v new file mode 100644 index 00000000..99a5315d --- /dev/null +++ b/lib/threefold/tfgrid_actions/stellar/handler.v @@ -0,0 +1,30 @@ +module stellar + +import freeflowuniverse.herolib.threefold.web3gw.stellar as stellar_client { StellarClient } +import freeflowuniverse.herolib.core.playbook { Action } +import freeflowuniverse.herolib.data.rpcwebsocket { RpcWsClient } +import log { Logger } + +pub struct StellarHandler { +pub mut: + client StellarClient + logger Logger +} + +pub fn new(mut rpc_client RpcWsClient, logger Logger, mut client StellarClient) StellarHandler { + return StellarHandler{ + client: client + logger: logger + } +} + +pub fn (mut h StellarHandler) handle_action(action Action) ! { + match action.actor { + 'account' { + h.account(action)! + } + else { + return error('action actor ${action.actor} is invalid') + } + } +} diff --git a/lib/threefold/tfgrid_actions/tfgrid/contracts.v b/lib/threefold/tfgrid_actions/tfgrid/contracts.v new file mode 100644 index 00000000..bcc86173 --- /dev/null +++ b/lib/threefold/tfgrid_actions/tfgrid/contracts.v @@ -0,0 +1,64 @@ +module tfgrid + +import freeflowuniverse.herolib.core.playbook { Action } +import freeflowuniverse.herolib.threefold.web3gw.tfgrid as tfgrid_client { ContractFilter, FindContracts, Limit } + +pub fn (mut h TFGridHandler) contracts(action Action) ! { + match action.name { + 'get' { + mnemonics := action.params.get_default('mnemonics', '')! + network := action.params.get_default('network', 'main')! + h.tfgrid.load( + mnemonic: mnemonics + network: network + )! + mut filter := ContractFilter{} + if action.params.exists('contract_id') { + filter.contract_id = action.params.get_u64('contract_id')! + } + if action.params.exists('twin_id') { + filter.twin_id = action.params.get_u64('twin_id')! + } + if action.params.exists('node_id') { + filter.node_id = action.params.get_u64('node_id')! + } + if action.params.exists('type') { + filter.type_ = action.params.get('type')! + } + if action.params.exists('state') { + filter.state = action.params.get('state')! + } + if action.params.exists('name') { + filter.name = action.params.get('name')! + } + if action.params.exists('number_of_public_ips') { + filter.number_of_public_ips = action.params.get_u64('number_of_public_ips')! + } + if action.params.exists('deployment_data') { + filter.deployment_data = action.params.get('deployment_data')! + } + if action.params.exists('deployment_hash') { + filter.deployment_hash = action.params.get('deployment_hash')! + } + + page := action.params.get_u64_default('page', 1)! + size := action.params.get_u64_default('size', 50)! + randomize := action.params.get_default_false('randomize') + + req := FindContracts{ + filters: filter + pagination: Limit{ + page: page + size: size + randomize: randomize + } + } + + res := h.tfgrid.find_contracts(req)! + h.logger.info('contracts: ${res}') + } + else { + return error('explorer does not support operation: ${action.name}') + } + } +} diff --git a/lib/threefold/tfgrid_actions/tfgrid/core.v b/lib/threefold/tfgrid_actions/tfgrid/core.v new file mode 100644 index 00000000..e27dcbdb --- /dev/null +++ b/lib/threefold/tfgrid_actions/tfgrid/core.v @@ -0,0 +1,17 @@ +module tfgrid + +import freeflowuniverse.herolib.core.playbook { Action } + +fn (mut t TFGridHandler) core(action Action) ! { + match action.name { + 'login' { + mnemonic := action.params.get_default('mnemonic', '')! + netstring := action.params.get_default('network', 'main')! + + t.tfgrid.load(mnemonic: mnemonic, network: netstring)! + } + else { + return error('core action ${action.name} is invalid') + } + } +} diff --git a/lib/threefold/tfgrid_actions/tfgrid/discourse.v b/lib/threefold/tfgrid_actions/tfgrid/discourse.v new file mode 100644 index 00000000..565aa1e7 --- /dev/null +++ b/lib/threefold/tfgrid_actions/tfgrid/discourse.v @@ -0,0 +1,54 @@ +module tfgrid + +import freeflowuniverse.herolib.core.playbook { Action } +import rand + +fn (mut t TFGridHandler) discourse(action Action) ! { + match action.name { + 'create' { + name := action.params.get_default('name', rand.string(10).to_lower())! + farm_id := action.params.get_int_default('farm_id', 0)! + capacity := action.params.get_default('capacity', 'medium')! + ssh_key_name := action.params.get_default('ssh_key', 'default')! + ssh_key := t.get_ssh_key(ssh_key_name)! + developer_email := action.params.get_default('developer_email', '')! + smtp_address := action.params.get_default('smtp_address', 'smtp.gmail.com')! + smtp_port := action.params.get_int_default('smtp_port', 587)! + smtp_username := action.params.get_default('smtp_username', '')! + smtp_password := action.params.get_default('smtp_password', '')! + smtp_tls := action.params.get_default_false('smtp_tls') + + deploy_res := t.tfgrid.deploy_discourse( + name: name + farm_id: u64(farm_id) + capacity: capacity + ssh_key: ssh_key + developer_email: developer_email + smtp_address: smtp_address + smtp_port: u32(smtp_port) + smtp_username: smtp_username + smtp_password: smtp_password + smtp_enable_tls: smtp_tls + )! + + t.logger.info('${deploy_res}') + } + 'get' { + name := action.params.get('name')! + + get_res := t.tfgrid.get_discourse_deployment(name)! + + t.logger.info('${get_res}') + } + 'delete' { + name := action.params.get('name')! + + t.tfgrid.cancel_discourse_deployment(name) or { + return error('failed to delete discourse instance: ${err}') + } + } + else { + return error('operation ${action.name} is not supported on discourse') + } + } +} diff --git a/lib/threefold/tfgrid_actions/tfgrid/farms.v b/lib/threefold/tfgrid_actions/tfgrid/farms.v new file mode 100644 index 00000000..c153feeb --- /dev/null +++ b/lib/threefold/tfgrid_actions/tfgrid/farms.v @@ -0,0 +1,61 @@ +module tfgrid + +import freeflowuniverse.herolib.core.playbook { Action } +import freeflowuniverse.herolib.threefold.web3gw.tfgrid { FarmFilter, FindFarms, Limit } + +pub fn (mut h TFGridHandler) farms(action Action) ! { + match action.name { + 'get' { + mut filter := FarmFilter{} + if action.params.exists('free_ips') { + filter.free_ips = action.params.get_u64('free_ips')! + } + if action.params.exists('total_ips') { + filter.total_ips = action.params.get_u64('total_ips')! + } + if action.params.exists('stellar_address') { + filter.stellar_address = action.params.get('stellar_address')! + } + if action.params.exists('pricing_policy_id') { + filter.pricing_policy_id = action.params.get_u64('pricing_policy_id')! + } + if action.params.exists('farm_id') { + filter.farm_id = action.params.get_u64('farm_id')! + } + if action.params.exists('twin_id') { + filter.twin_id = action.params.get_u64('twin_id')! + } + if action.params.exists('name') { + filter.name = action.params.get('name')! + } + if action.params.exists('name_contains') { + filter.name_contains = action.params.get('name_contains')! + } + if action.params.exists('certification_type') { + filter.certification_type = action.params.get('certification_type')! + } + if action.params.exists('dedicated') { + filter.dedicated = action.params.get_default_false('dedicated') + } + + page := action.params.get_u64_default('page', 1)! + size := action.params.get_u64_default('size', 50)! + randomize := action.params.get_default_false('randomize') + + req := FindFarms{ + filters: filter + pagination: Limit{ + page: page + size: size + randomize: randomize + } + } + + res := h.tfgrid.find_farms(req)! + h.logger.info('farms: ${res}') + } + else { + return error('explorer does not support operation: ${action.name}') + } + } +} diff --git a/lib/threefold/tfgrid_actions/tfgrid/funkwhale.v b/lib/threefold/tfgrid_actions/tfgrid/funkwhale.v new file mode 100644 index 00000000..79339e3e --- /dev/null +++ b/lib/threefold/tfgrid_actions/tfgrid/funkwhale.v @@ -0,0 +1,48 @@ +module tfgrid + +import freeflowuniverse.herolib.core.playbook { Action } +import rand + +fn (mut t TFGridHandler) funkwhale(action Action) ! { + match action.name { + 'create' { + name := action.params.get_default('name', rand.string(10).to_lower())! + farm_id := action.params.get_int_default('farm_id', 0)! + capacity := action.params.get_default('capacity', 'meduim')! + ssh_key_name := action.params.get_default('ssh_key', 'default')! + ssh_key := t.get_ssh_key(ssh_key_name)! + admin_email := action.params.get('admin_email')! + admin_username := action.params.get_default('admin_username', '')! + admin_password := action.params.get_default('admin_password', '')! + + deploy_res := t.tfgrid.deploy_funkwhale( + name: name + farm_id: u64(farm_id) + capacity: capacity + ssh_key: ssh_key + admin_email: admin_email + admin_username: admin_username + admin_password: admin_password + )! + + t.logger.info('${deploy_res}') + } + 'get' { + name := action.params.get('name')! + + get_res := t.tfgrid.get_funkwhale_deployment(name)! + + t.logger.info('${get_res}') + } + 'delete' { + name := action.params.get('name')! + + t.tfgrid.cancel_funkwhale_deployment(name) or { + return error('failed to delete funkwhale instance: ${err}') + } + } + else { + return error('operation ${action.name} is not supported on funkwhale') + } + } +} diff --git a/lib/threefold/tfgrid_actions/tfgrid/gateway_fqdn.v b/lib/threefold/tfgrid_actions/tfgrid/gateway_fqdn.v new file mode 100644 index 00000000..8e638dae --- /dev/null +++ b/lib/threefold/tfgrid_actions/tfgrid/gateway_fqdn.v @@ -0,0 +1,40 @@ +module tfgrid + +import freeflowuniverse.herolib.core.playbook { Action } +import freeflowuniverse.herolib.threefold.web3gw.tfgrid as tfgrid_client { GatewayFQDN } +import rand + +fn (mut t TFGridHandler) gateway_fqdn(action Action) ! { + match action.name { + 'create' { + node_id := action.params.get_int('node_id')! + name := action.params.get_default('name', rand.string(10).to_lower())! + tls_passthrough := action.params.get_default_false('tls_passthrough') + backend := action.params.get('backend')! + fqdn := action.params.get('fqdn')! + + gw_deploy := t.tfgrid.deploy_gateway_fqdn(GatewayFQDN{ + name: name + node_id: u32(node_id) + tls_passthrough: tls_passthrough + backends: [backend] + fqdn: fqdn + })! + + t.logger.info('${gw_deploy}') + } + 'delete' { + name := action.params.get('name')! + t.tfgrid.cancel_gateway_fqdn(name)! + } + 'get' { + name := action.params.get('name')! + gw_get := t.tfgrid.get_gateway_fqdn(name)! + + t.logger.info('${gw_get}') + } + else { + return error('action ${action.name} is not supported on gateways') + } + } +} diff --git a/lib/threefold/tfgrid_actions/tfgrid/gateway_name.v b/lib/threefold/tfgrid_actions/tfgrid/gateway_name.v new file mode 100644 index 00000000..05df8221 --- /dev/null +++ b/lib/threefold/tfgrid_actions/tfgrid/gateway_name.v @@ -0,0 +1,38 @@ +module tfgrid + +import freeflowuniverse.herolib.core.playbook { Action } +import freeflowuniverse.herolib.threefold.web3gw.tfgrid as tfgrid_client { GatewayName } +import rand + +fn (mut t TFGridHandler) gateway_name(action Action) ! { + match action.name { + 'create' { + node_id := action.params.get_int_default('node_id', 0)! + name := action.params.get_default('name', rand.string(10).to_lower())! + tls_passthrough := action.params.get_default_false('tls_passthrough') + backend := action.params.get('backend')! + + gw_deploy := t.tfgrid.deploy_gateway_name(GatewayName{ + name: name + node_id: u32(node_id) + tls_passthrough: tls_passthrough + backends: [backend] + })! + + t.logger.info('${gw_deploy}') + } + 'delete' { + name := action.params.get('name')! + t.tfgrid.cancel_gateway_name(name)! + } + 'get' { + name := action.params.get('name')! + gw_get := t.tfgrid.get_gateway_name(name)! + + t.logger.info('${gw_get}') + } + else { + return error('action ${action.name} is not supported on gateways') + } + } +} diff --git a/lib/threefold/tfgrid_actions/tfgrid/handler.v b/lib/threefold/tfgrid_actions/tfgrid/handler.v new file mode 100644 index 00000000..1a5169b3 --- /dev/null +++ b/lib/threefold/tfgrid_actions/tfgrid/handler.v @@ -0,0 +1,49 @@ +module tfgrid + +import freeflowuniverse.herolib.threefold.web3gw.tfgrid as tfgrid_client { TFGridClient } +import freeflowuniverse.herolib.core.playbook { Action } +import freeflowuniverse.herolib.data.rpcwebsocket { RpcWsClient } +import log { Logger } + +@[heap] +pub struct TFGridHandler { +pub mut: + tfgrid TFGridClient + ssh_keys map[string]string + logger Logger + handlers map[string]fn (action Action) ! +} + +pub fn new(mut rpc_client RpcWsClient, logger Logger, mut grid_client TFGridClient) TFGridHandler { + mut t := TFGridHandler{ + tfgrid: grid_client + logger: logger + } + + t.handlers = { + 'core': t.core + 'gateway_fqdn': t.gateway_fqdn + 'gateway_name': t.gateway_name + 'kubernetes': t.k8s + 'machine': t.vm + 'zdbs': t.zdb + 'discourse': t.discourse + 'funkwhale': t.funkwhale + 'peertube': t.peertube + 'taiga': t.taiga + 'presearch': t.presearch + 'nodes': t.nodes + 'farms': t.farms + 'twins': t.twins + 'contracts': t.contracts + 'stats': t.stats + } + + return t +} + +pub fn (mut t TFGridHandler) handle_action(action Action) ! { + handler := t.handlers[action.actor] or { return t.helper(action) } + + return handler(action) +} diff --git a/lib/threefold/tfgrid_actions/tfgrid/helpers.v b/lib/threefold/tfgrid_actions/tfgrid/helpers.v new file mode 100644 index 00000000..7d6220f5 --- /dev/null +++ b/lib/threefold/tfgrid_actions/tfgrid/helpers.v @@ -0,0 +1,31 @@ +module tfgrid + +import freeflowuniverse.herolib.core.playbook { Action } + +pub fn (mut t TFGridHandler) helper(action Action) ! { + match action.actor { + 'sshkeys' { + t.ssh_key_helper(action)! + } + else { + return error('helper action ${action.actor} is invalid') + } + } +} + +fn (mut t TFGridHandler) ssh_key_helper(action Action) ! { + match action.name { + 'new' { + name := action.params.get('name')! + key := action.params.get('ssh_key')! + t.ssh_keys[name] = key + } + else { + return error('helper action name ${action.name} is invalid') + } + } +} + +fn (mut t TFGridHandler) get_ssh_key(name string) !string { + return t.ssh_keys[name] or { return error('ssh key ${name} does not exist') } +} diff --git a/lib/threefold/tfgrid_actions/tfgrid/k8s.v b/lib/threefold/tfgrid_actions/tfgrid/k8s.v new file mode 100644 index 00000000..bc8cc7dc --- /dev/null +++ b/lib/threefold/tfgrid_actions/tfgrid/k8s.v @@ -0,0 +1,130 @@ +module tfgrid + +import freeflowuniverse.herolib.core.playbook { Action } +import freeflowuniverse.herolib.threefold.web3gw.tfgrid as tfgrid_client { AddWorkerToK8sCluster, K8sCluster, K8sNode, RemoveWorkerFromK8sCluster } +import rand + +fn (mut t TFGridHandler) k8s(action Action) ! { + match action.name { + 'create' { + name := action.params.get_default('name', rand.string(8).to_lower())! + farm_id := action.params.get_int_default('farm_id', 0)! + capacity := action.params.get_default('capacity', 'small')! + number_of_workers := action.params.get_int_default('workers', 1)! + ssh_key_name := action.params.get_default('sshkey', 'default')! + ssh_key := t.get_ssh_key(ssh_key_name)! + master_public_ip := action.params.get_default_false('add_public_ip_to_master') + worerks_public_ip := action.params.get_default_false('add_public_ips_to_workers') + add_wg_access := action.params.get_default_false('add_wireguard_access') + + cpu, memory, disk_size := get_k8s_capacity(capacity)! + + mut node := K8sNode{ + name: 'master' + farm_id: u32(farm_id) + cpu: cpu + memory: memory + disk_size: disk_size + public_ip: master_public_ip + } + + mut workers := []K8sNode{} + for _ in 0 .. number_of_workers { + mut worker := K8sNode{ + name: 'wr' + rand.string(6) + farm_id: u32(farm_id) + cpu: cpu + memory: memory + disk_size: disk_size + public_ip: worerks_public_ip + } + + workers << worker + } + + cluster := K8sCluster{ + name: name + token: rand.string(6) + ssh_key: ssh_key + master: node + workers: workers + add_wg_access: add_wg_access + } + + deploy_res := t.tfgrid.deploy_k8s_cluster(cluster)! + + t.logger.info('${deploy_res}') + } + 'get' { + name := action.params.get('name')! + + get_res := t.tfgrid.get_k8s_cluster(name)! + + t.logger.info('${get_res}') + } + 'add' { + name := action.params.get('name')! + farm_id := action.params.get_int_default('farm_id', 0)! + capacity := action.params.get_default('capacity', 'medium')! + add_public_ip := action.params.get_default_false('add_public_ip') + + cpu, memory, disk_size := get_k8s_capacity(capacity)! + + mut worker := K8sNode{ + name: 'wr' + rand.string(6) + farm_id: u32(farm_id) + cpu: cpu + memory: memory + disk_size: disk_size + public_ip: add_public_ip + } + + add_res := t.tfgrid.add_worker_to_k8s_cluster(AddWorkerToK8sCluster{ + cluster_name: name + worker: worker + })! + + t.logger.info('${add_res}') + } + 'remove' { + name := action.params.get('name')! + worker_name := action.params.get('worker_name')! + + remove_res := t.tfgrid.remove_worker_from_k8s_cluster(RemoveWorkerFromK8sCluster{ + cluster_name: name + worker_name: worker_name + })! + t.logger.info('${remove_res}') + } + 'delete' { + name := action.params.get('name')! + + t.tfgrid.cancel_k8s_cluster(name) or { + return error('failed to delete k8s cluster: ${err}') + } + } + else { + return error('operation ${action.name} is not supported on k8s') + } + } +} + +fn get_k8s_capacity(capacity string) !(u32, u32, u32) { + match capacity { + 'small' { + return 1, 2048, 10 + } + 'medium' { + return 2, 4096, 20 + } + 'large' { + return 8, 8192, 40 + } + 'extra-large' { + return 8, 16384, 100 + } + else { + return error('invalid capacity ${capacity}') + } + } +} diff --git a/lib/threefold/tfgrid_actions/tfgrid/network.v b/lib/threefold/tfgrid_actions/tfgrid/network.v new file mode 100644 index 00000000..0464808c --- /dev/null +++ b/lib/threefold/tfgrid_actions/tfgrid/network.v @@ -0,0 +1,88 @@ +module tfgrid + +import freeflowuniverse.herolib.core.playbook { Action } +import freeflowuniverse.herolib.threefold.web3gw.tfgrid { NetworkConfiguration, VMConfiguration } +import rand + +fn (mut t TFGridHandler) network(action Action) ! { + match action.name { + 'create' { + name := action.params.get_default('name', rand.string(6).to_lower())! + description := action.params.get_default('description', '')! + farm_id := action.params.get_int_default('farm_id', 0)! + flist := action.params.get_default('flist', '')! + entrypoint := action.params.get_default('entrypoint', '')! + public_ip := action.params.get_default_false('public_ip') + public_ip6 := action.params.get_default_false('public_ip6') + planetary := action.params.get_default_false('planetary') + cpu := action.params.get_u32_default('cpu', 1)! + memory := action.params.get_u64_default('memory', 1024)! + disk_size := action.params.get_storagecapacity_in_gigabytes('disk_size') or { 0 } + times := action.params.get_int_default('times', 1)! + wg := action.params.get_default_false('add_wireguard_access') + ssh_key_name := action.params.get_default('sshkey', 'default')! + ssh_key := t.get_ssh_key(ssh_key_name)! + + env_vars := { + ssh_key_name: ssh_key + } + // construct vms from the provided data + mut vm_configs := []VMConfiguration{} + for i := 0; i < times; i++ { + vm_config := VMConfiguration{ + name: name + farm_id: u32(farm_id) + flist: flist + entrypoint: entrypoint + public_ip: public_ip + public_ip6: public_ip6 + planetary: planetary + cpu: cpu + memory: memory + rootfs_size: u32(disk_size) + env_vars: env_vars + } + vm_configs << vm_config + } + mut net_config := NetworkConfiguration{ + name: name + add_wireguard_access: wg + } + deploy_res := t.tfgrid.deploy_network( + name: name + description: description + network: net_config + vms: vm_configs + )! + + t.logger.info('${deploy_res}') + } + 'get' { + network := action.params.get('network')! + + get_res := t.tfgrid.get_network_deployment(network)! + + t.logger.info('${get_res}') + } + 'remove' { + network := action.params.get('network')! + machine := action.params.get('machine')! + + remove_res := t.tfgrid.remove_vm_from_network_deployment( + network: network + vm: machine + )! + t.logger.info('${remove_res}') + } + 'delete' { + network := action.params.get('network')! + + t.tfgrid.cancel_network_deployment(network) or { + return error('failed to delete vm network: ${err}') + } + } + else { + return error('operation ${action.name} is not supported on vms') + } + } +} diff --git a/lib/threefold/tfgrid_actions/tfgrid/nodes.v b/lib/threefold/tfgrid_actions/tfgrid/nodes.v new file mode 100644 index 00000000..51613901 --- /dev/null +++ b/lib/threefold/tfgrid_actions/tfgrid/nodes.v @@ -0,0 +1,112 @@ +module tfgrid + +import freeflowuniverse.herolib.core.playbook { Action } +import freeflowuniverse.herolib.threefold.web3gw.tfgrid { FindNodes, Limit, NodeFilter } + +pub fn (mut h TFGridHandler) nodes(action Action) ! { + match action.name { + 'get' { + // network := action.params.get_default('network', 'main')! + // h.tfgrid.load(network)! + + mut filter := NodeFilter{} + if action.params.exists('status') { + filter.status = action.params.get('status')! + } + if action.params.exists('free_mru') { + filter.free_mru = action.params.get_storagecapacity_in_bytes('free_mru')! + } + if action.params.exists('free_hru') { + filter.free_hru = action.params.get_storagecapacity_in_bytes('free_hru')! + } + if action.params.exists('free_sru') { + filter.free_sru = action.params.get_storagecapacity_in_bytes('free_sru')! + } + if action.params.exists('total_mru') { + filter.total_mru = action.params.get_storagecapacity_in_bytes('total_mru')! + } + if action.params.exists('total_hru') { + filter.total_hru = action.params.get_storagecapacity_in_bytes('total_hru')! + } + if action.params.exists('total_sru') { + filter.total_sru = action.params.get_storagecapacity_in_bytes('total_sru')! + } + if action.params.exists('total_cru') { + filter.total_cru = action.params.get_u64('total_cru')! + } + if action.params.exists('country') { + filter.country = action.params.get('country')! + } + if action.params.exists('country_contains') { + filter.country_contains = action.params.get('country_contains')! + } + if action.params.exists('city') { + filter.city = action.params.get('city')! + } + if action.params.exists('city_contains') { + filter.city_contains = action.params.get('city_contains')! + } + if action.params.exists('farm_name') { + filter.farm_name = action.params.get('farm_name')! + } + if action.params.exists('farm_name_contains') { + filter.farm_name_contains = action.params.get('farm_name_contains')! + } + if action.params.exists('farm_id') { + filter.farm_ids = action.params.get_list_u64('farm_id')! + } + if action.params.exists('free_ips') { + filter.free_ips = action.params.get_u64('free_ips')! + } + if action.params.exists('ipv4') { + filter.ipv4 = action.params.get_default_false('ipv4') + } + if action.params.exists('ipv6') { + filter.ipv6 = action.params.get_default_false('ipv6') + } + if action.params.exists('domain') { + filter.domain = action.params.get_default_false('domain') + } + if action.params.exists('dedicated') { + filter.dedicated = action.params.get_default_false('dedicated') + } + if action.params.exists('rentable') { + filter.rentable = action.params.get_default_false('rentable') + } + if action.params.exists('rented') { + filter.rented = action.params.get_default_false('rented') + } + if action.params.exists('rented_by') { + filter.rented_by = action.params.get_u64('rented_by')! + } + if action.params.exists('available_for') { + filter.available_for = action.params.get_u64('available_for')! + } + if action.params.exists('node_id') { + filter.node_id = action.params.get_u64('node_id')! + } + if action.params.exists('twin_id') { + filter.twin_id = action.params.get_u64('twin_id')! + } + + page := action.params.get_u64_default('page', 1)! + size := action.params.get_u64_default('size', 50)! + randomize := action.params.get_default_false('randomize') + + req := FindNodes{ + filters: filter + pagination: Limit{ + page: page + size: size + randomize: randomize + } + } + + res := h.tfgrid.find_nodes(req)! + h.logger.info('nodes: ${res}') + } + else { + return error('explorer does not support operation: ${action.name}') + } + } +} diff --git a/lib/threefold/tfgrid_actions/tfgrid/peertube.v b/lib/threefold/tfgrid_actions/tfgrid/peertube.v new file mode 100644 index 00000000..697e6745 --- /dev/null +++ b/lib/threefold/tfgrid_actions/tfgrid/peertube.v @@ -0,0 +1,48 @@ +module tfgrid + +import freeflowuniverse.herolib.core.playbook { Action } +import rand + +fn (mut t TFGridHandler) peertube(action Action) ! { + match action.name { + 'create' { + name := action.params.get_default('name', rand.string(8).to_lower())! + farm_id := action.params.get_int_default('farm_id', 0)! + capacity := action.params.get_default('capacity', 'meduim')! + ssh_key_name := action.params.get_default('sshkey', 'default')! + ssh_key := t.get_ssh_key(ssh_key_name)! + admin_email := action.params.get('admin_email')! + db_username := action.params.get_default('db_username', rand.string(8).to_lower())! + db_password := action.params.get_default('db_password', rand.string(8).to_lower())! + + deploy_res := t.tfgrid.deploy_peertube( + name: name + farm_id: u64(farm_id) + capacity: capacity + ssh_key: ssh_key + admin_email: admin_email + db_username: db_username + db_password: db_password + )! + + t.logger.info('${deploy_res}') + } + 'get' { + name := action.params.get('name')! + + get_res := t.tfgrid.get_peertube_deployment(name)! + + t.logger.info('${get_res}') + } + 'delete' { + name := action.params.get('name')! + + t.tfgrid.cancel_peertube_deployment(name) or { + return error('failed to delete peertube instance: ${err}') + } + } + else { + return error('operation ${action.name} is not supported on peertube') + } + } +} diff --git a/lib/threefold/tfgrid_actions/tfgrid/presearch.v b/lib/threefold/tfgrid_actions/tfgrid/presearch.v new file mode 100644 index 00000000..db69a003 --- /dev/null +++ b/lib/threefold/tfgrid_actions/tfgrid/presearch.v @@ -0,0 +1,50 @@ +module tfgrid + +import freeflowuniverse.herolib.core.playbook { Action } +import rand + +fn (mut t TFGridHandler) presearch(action Action) ! { + match action.name { + 'create' { + name := action.params.get_default('name', rand.string(10).to_lower())! + farm_id := action.params.get_int_default('farm_id', 0)! + ssh_key_name := action.params.get_default('sshkey', 'default')! + ssh_key := t.get_ssh_key(ssh_key_name)! + disk_size := action.params.get_storagecapacity_in_gigabytes('disk_size') or { 0 } + public_ipv4 := action.params.get_default_false('public_ip') + registration_code := action.params.get('registration_code')! + public_restore_key := action.params.get_default('public_restore_key', '')! + private_restore_key := action.params.get_default('private_restore_key', '')! + + deploy_res := t.tfgrid.deploy_presearch( + name: name + farm_id: u64(farm_id) + ssh_key: ssh_key + disk_size: u32(disk_size) + public_ipv4: public_ipv4 + registration_code: registration_code + public_restore_key: public_restore_key + private_restore_key: private_restore_key + )! + + t.logger.info('${deploy_res}') + } + 'get' { + name := action.params.get('name')! + + get_res := t.tfgrid.get_presearch_deployment(name)! + + t.logger.info('${get_res}') + } + 'delete' { + name := action.params.get('name')! + + t.tfgrid.cancel_presearch_deployment(name) or { + return error('failed to delete presearch instance: ${err}') + } + } + else { + return error('operation ${action.name} is not supported on presearch') + } + } +} diff --git a/lib/threefold/tfgrid_actions/tfgrid/stats.v b/lib/threefold/tfgrid_actions/tfgrid/stats.v new file mode 100644 index 00000000..e622ceb4 --- /dev/null +++ b/lib/threefold/tfgrid_actions/tfgrid/stats.v @@ -0,0 +1,24 @@ +module tfgrid + +import freeflowuniverse.herolib.core.playbook { Action } +import freeflowuniverse.herolib.threefold.web3gw.tfgrid { GetStatistics } + +pub fn (mut h TFGridHandler) stats(action Action) ! { + match action.name { + 'get' { + // network := action.params.get_default('network', 'main')! + // h.explorer.load(network)! + + mut filter := GetStatistics{} + if action.params.exists('status') { + filter.status = action.params.get('status')! + } + + res := h.tfgrid.statistics(filter)! + h.logger.info('stats: ${res}') + } + else { + return error('explorer does not support operation: ${action.name}') + } + } +} diff --git a/lib/threefold/tfgrid_actions/tfgrid/taiga.v b/lib/threefold/tfgrid_actions/tfgrid/taiga.v new file mode 100644 index 00000000..a6b0b743 --- /dev/null +++ b/lib/threefold/tfgrid_actions/tfgrid/taiga.v @@ -0,0 +1,50 @@ +module tfgrid + +import freeflowuniverse.herolib.core.playbook { Action } +import rand + +fn (mut t TFGridHandler) taiga(action Action) ! { + match action.name { + 'create' { + name := action.params.get_default('name', rand.string(8).to_lower())! + farm_id := action.params.get_int_default('farm_id', 0)! + capacity := action.params.get_default('capacity', 'meduim')! + ssh_key_name := action.params.get_default('sshkey', 'default')! + ssh_key := t.get_ssh_key(ssh_key_name)! + admin_username := action.params.get('admin_username')! + admin_password := action.params.get('admin_password')! + admin_email := action.params.get('admin_email')! + disk_size := action.params.get_storagecapacity_in_gigabytes('disk_size') or { 50 } + + deploy_res := t.tfgrid.deploy_taiga( + name: name + farm_id: u64(farm_id) + capacity: capacity + ssh_key: ssh_key + admin_username: admin_username + admin_password: admin_password + admin_email: admin_email + disk_size: u32(disk_size) + )! + + t.logger.info('${deploy_res}') + } + 'get' { + name := action.params.get('name')! + + get_res := t.tfgrid.get_taiga_deployment(name)! + + t.logger.info('${get_res}') + } + 'delete' { + name := action.params.get('name')! + + t.tfgrid.cancel_taiga_deployment(name) or { + return error('failed to delete taiga instance: ${err}') + } + } + else { + return error('operation ${action.name} is not supported on taiga') + } + } +} diff --git a/lib/threefold/tfgrid_actions/tfgrid/twins.v b/lib/threefold/tfgrid_actions/tfgrid/twins.v new file mode 100644 index 00000000..4f49a022 --- /dev/null +++ b/lib/threefold/tfgrid_actions/tfgrid/twins.v @@ -0,0 +1,43 @@ +module tfgrid + +import freeflowuniverse.herolib.core.playbook { Action } +import freeflowuniverse.herolib.threefold.web3gw.tfgrid { FindTwins, Limit, TwinFilter } + +pub fn (mut h TFGridHandler) twins(action Action) ! { + match action.name { + 'get' { + mut filter := TwinFilter{} + if action.params.exists('twin_id') { + filter.twin_id = action.params.get_u64('twin_id')! + } + if action.params.exists('account_id') { + filter.account_id = action.params.get('account_id')! + } + if action.params.exists('relay') { + filter.relay = action.params.get('relay')! + } + if action.params.exists('public_key') { + filter.public_key = action.params.get('public_key')! + } + + page := action.params.get_u64_default('page', 1)! + size := action.params.get_u64_default('size', 50)! + randomize := action.params.get_default_false('randomize') + + req := FindTwins{ + filters: filter + pagination: Limit{ + page: page + size: size + randomize: randomize + } + } + + res := h.tfgrid.find_twins(req)! + h.logger.info('twins: ${res}') + } + else { + return error('explorer does not support operation: ${action.name}') + } + } +} diff --git a/lib/threefold/tfgrid_actions/tfgrid/vm.v b/lib/threefold/tfgrid_actions/tfgrid/vm.v new file mode 100644 index 00000000..9181f7c8 --- /dev/null +++ b/lib/threefold/tfgrid_actions/tfgrid/vm.v @@ -0,0 +1,75 @@ +module tfgrid + +import freeflowuniverse.herolib.core.playbook { Action } +import freeflowuniverse.herolib.threefold.web3gw.tfgrid as tfgrid_client { DeployVM, RemoveVMFromNetworkDeployment } +import rand + +fn (mut t TFGridHandler) vm(action Action) ! { + match action.name { + 'create' { + name := action.params.get_default('name', rand.string(6).to_lower())! + node_id := action.params.get_int_default('node_id', 0)! + farm_id := action.params.get_int_default('farm_id', 0)! + flist := action.params.get_default('flist', 'https://hub.grid.tf/tf-official-apps/base:latest.flist')! + entrypoint := action.params.get_default('entrypoint', '/sbin/zinit init')! + public_ip := action.params.get_default_false('add_public_ipv4') + public_ip6 := action.params.get_default_false('add_public_ipv6') + planetary := action.params.get_default_true('planetary') + cpu := action.params.get_int_default('cpu', 1)! + memory := action.params.get_int_default('memory', 1024)! + rootfs := action.params.get_int_default('rootfs', 2048)! + gateway := action.params.get_default_false('gateway') + add_wireguard_access := action.params.get_default_false('add_wireguard_access') + ssh_key_name := action.params.get_default('sshkey', 'default')! + ssh_key := t.get_ssh_key(ssh_key_name)! + env_vars := { + ssh_key_name: ssh_key + } + deploy_res := t.tfgrid.deploy_vm(DeployVM{ + name: name + node_id: u32(node_id) + farm_id: u32(farm_id) + flist: flist + entrypoint: entrypoint + public_ip: public_ip + public_ip6: public_ip6 + planetary: planetary + cpu: u32(cpu) + memory: u64(memory) + rootfs_size: u64(rootfs) + env_vars: env_vars + add_wireguard_access: add_wireguard_access + gateway: gateway + })! + + t.logger.info('${deploy_res}') + } + 'get' { + network := action.params.get('network')! + + get_res := t.tfgrid.get_vm_deployment(network)! + + t.logger.info('${get_res}') + } + 'remove' { + network := action.params.get('network')! + machine := action.params.get('machine')! + + remove_res := t.tfgrid.remove_vm_from_network_deployment(RemoveVMFromNetworkDeployment{ + network: network + vm: machine + })! + t.logger.info('${remove_res}') + } + 'delete' { + network := action.params.get('network')! + + t.tfgrid.cancel_network_deployment(network) or { + return error('failed to delete vm network: ${err}') + } + } + else { + return error('operation ${action.name} is not supported on vms') + } + } +} diff --git a/lib/threefold/tfgrid_actions/tfgrid/zdb.v b/lib/threefold/tfgrid_actions/tfgrid/zdb.v new file mode 100644 index 00000000..7328f721 --- /dev/null +++ b/lib/threefold/tfgrid_actions/tfgrid/zdb.v @@ -0,0 +1,42 @@ +module tfgrid + +import freeflowuniverse.herolib.core.playbook { Action } +import freeflowuniverse.herolib.threefold.web3gw.tfgrid as tfgrid_client { ZDBDeployment } +import rand + +fn (mut t TFGridHandler) zdb(action Action) ! { + match action.name { + 'create' { + node_id := action.params.get_int_default('node_id', 0)! + name := action.params.get_default('name', rand.string(10).to_lower())! + password := action.params.get_default('password', rand.string(10).to_lower())! + public := action.params.get_default_false('public') + size := action.params.get_storagecapacity_in_gigabytes('size') or { 10 } + mode := action.params.get_default('mode', 'user')! + + zdb_deploy := t.tfgrid.deploy_zdb(ZDBDeployment{ + node_id: u32(node_id) + name: name + password: password + public: public + size: u32(size) + mode: mode + })! + + t.logger.info('${zdb_deploy}') + } + 'delete' { + name := action.params.get('name')! + t.tfgrid.cancel_zdb_deployment(name)! + } + 'get' { + name := action.params.get('name')! + zdb_get := t.tfgrid.get_zdb_deployment(name)! + + t.logger.info('${zdb_get}') + } + else { + return error('action ${action.name} is not supported on zdbs') + } + } +} diff --git a/lib/threefold/tfgrid_actions/web3gw/handler.v b/lib/threefold/tfgrid_actions/web3gw/handler.v new file mode 100644 index 00000000..84106b2b --- /dev/null +++ b/lib/threefold/tfgrid_actions/web3gw/handler.v @@ -0,0 +1,38 @@ +module web3gw + +import log { Logger } +import freeflowuniverse.herolib.core.playbook { Action } +import freeflowuniverse.herolib.data.rpcwebsocket { RpcWsClient } +import freeflowuniverse.herolib.threefold.tfgrid_actions.clients { Clients } + +@[heap] +pub struct Web3GWHandler { +pub mut: + logger Logger + clients Clients + handlers map[string]fn (Action) ! +} + +pub fn new(mut rpc RpcWsClient, logger &Logger, mut wg_clients Clients) Web3GWHandler { + mut h := Web3GWHandler{ + logger: logger + clients: wg_clients + } + h.handlers = { + 'keys.define': h.keys_define + 'money.send': h.money_send + 'money.swap': h.money_swap + 'money.balance': h.money_balance + } + return h +} + +pub fn (mut h Web3GWHandler) handle_action(action Action) ! { + key := '${action.actor}.${action.name}' + if key in h.handlers { + handler := h.handlers[key] + handler(action)! + } else { + h.logger.error('unknown actor: ${action.actor}') + } +} diff --git a/lib/threefold/tfgrid_actions/web3gw/keys.v b/lib/threefold/tfgrid_actions/web3gw/keys.v new file mode 100644 index 00000000..23b41fb0 --- /dev/null +++ b/lib/threefold/tfgrid_actions/web3gw/keys.v @@ -0,0 +1,47 @@ +module web3gw + +import freeflowuniverse.herolib.core.playbook { Action } + +pub fn (mut h Web3GWHandler) keys_define(action Action) ! { + tfc_mnemonic := action.params.get_default('mnemonic', '')! + tfc_network := action.params.get_default('network', 'main')! + if tfc_mnemonic != '' { + h.clients.tfc_client.load( + network: tfc_network + mnemonic: tfc_mnemonic + )! + h.clients.tfg_client.load( + network: tfc_network + mnemonic: tfc_mnemonic + )! + } + + btc_host := action.params.get_default('bitcoin_host', '')! + btc_user := action.params.get_default('bitcoin_user', '')! + btc_pass := action.params.get_default('bitcoin_pass', '')! + if btc_host != '' || btc_user != '' || btc_pass != '' { + h.clients.btc_client.load( + host: btc_host + user: btc_user + pass: btc_pass + )! + } + + eth_url := action.params.get_default('ethereum_url', '')! + eth_secret := action.params.get_default('ethereum_secret', '')! + if eth_url != '' || eth_secret != '' { + h.clients.eth_client.load( + url: eth_url + secret: eth_secret + )! + } + + str_network := action.params.get_default('stellar_network', 'public')! + str_secret := action.params.get_default('stellar_secret', '')! + if str_secret != '' { + h.clients.str_client.load( + network: str_network + secret: str_secret + )! + } +} diff --git a/lib/threefold/tfgrid_actions/web3gw/money.v b/lib/threefold/tfgrid_actions/web3gw/money.v new file mode 100644 index 00000000..ca92b362 --- /dev/null +++ b/lib/threefold/tfgrid_actions/web3gw/money.v @@ -0,0 +1,152 @@ +module web3gw + +import freeflowuniverse.herolib.core.playbook { Action } +import strconv + +const default_currencies = { + 'bitcoin': 'btc' + 'ethereum': 'eth' + 'stellar': 'xlm' + 'tfchain': 'tft' +} + +pub fn (mut h Web3GWHandler) money_send(action Action) ! { + channel := action.params.get('channel')! + bridge_to := action.params.get_default('channel_to', '')! + to := action.params.get('to')! + amount := action.params.get('amount')! + + if bridge_to != '' { + if channel == 'ethereum' && bridge_to == 'stellar' { + hash_bridge_to_stellar := h.clients.eth_client.bridge_to_stellar( + amount: amount + destination: to + )! + h.clients.str_client.await_transaction_on_eth_bridge(hash_bridge_to_stellar)! + h.logger.info('bridge to stellar done') + } else if channel == 'stellar' && bridge_to == 'ethereum' { + res := h.clients.str_client.bridge_to_eth( + amount: amount + destination: to + )! + h.logger.info(res) + } else if channel == 'stellar' && bridge_to == 'tfchain' { + mut twin_id := strconv.atoi(to) or { 0 } + if twin_id == 0 { + // make call for tfchain to get tht twin_id from address + res := h.clients.tfc_client.get_twin_by_pubkey(to)! + twin_id = int(res) + } + + hash_bridge_to_tfchain := h.clients.str_client.bridge_to_tfchain( + amount: amount + twin_id: u32(twin_id) + )! + h.clients.tfc_client.await_transaction_on_tfchain_bridge(hash_bridge_to_tfchain)! + h.logger.info('bridge to tfchain done') + } else if channel == 'tfchain' && bridge_to == 'stellar' { + h.clients.tfc_client.swap_to_stellar( + amount: amount.u64() + target_stellar_address: to + )! + } else { + return error('unsupported bridge') + } + } else { + match channel { + 'bitcoin' { + res := h.clients.btc_client.send_to_address( + address: to + amount: amount.i64() + )! + h.logger.info(res) + } + 'stellar' { + res := h.clients.str_client.transfer( + destination: to + amount: amount + )! + h.logger.info(res) + } + 'ethereum' { + res := h.clients.eth_client.transfer( + destination: to + amount: amount + )! + h.logger.info(res) + } + 'tfchain' { + h.clients.tfc_client.transfer( + destination: to + amount: amount.u64() + )! + h.logger.info('transfered') + } + else { + return error('Unknown channel: ${channel}') + } + } + } +} + +pub fn (mut h Web3GWHandler) money_swap(action Action) ! { + from := action.params.get('from')! + to := action.params.get('to')! + amount := action.params.get('amount')! + + if from == 'eth' && to == 'tft' { + res := h.clients.eth_client.swap_eth_for_tft(amount)! + h.logger.info(res) + } else if from == 'tft' && to == 'eth' { + res := h.clients.eth_client.swap_tft_for_eth(amount)! + h.logger.info(res) + } else if from == 'tft' && to == 'xlm' { + res := h.clients.str_client.swap( + amount: amount + source_asset: from + destination_asset: to + )! + h.logger.info(res) + } else if from == 'xlm' && to == 'tft' { + res := h.clients.str_client.swap( + amount: amount + source_asset: from + destination_asset: to + )! + h.logger.info(res) + } else { + return error('unsupported swap') + } +} + +pub fn (mut h Web3GWHandler) money_balance(action Action) ! { + channel := action.params.get('channel')! + mut currency := action.params.get_default('currency', '')! + + if currency == '' { + currency = default_currencies[channel]! + } + + if channel == 'bitcoin' { + account := action.params.get('account')! + res := h.clients.btc_client.get_balance(account)! + h.logger.info('balance on ${channel} is ${res}') + } else if channel == 'ethereum' && currency == 'eth' { + address := h.clients.eth_client.address()! + res := h.clients.eth_client.balance(address)! + h.logger.info('balance on ${channel} is ${res}') + } else if channel == 'ethereum' && currency == 'tft' { + res := h.clients.eth_client.tft_balance()! + h.logger.info('balance on ${channel} is ${res}') + } else if channel == 'stellar' { + address := h.clients.str_client.address()! + res := h.clients.str_client.balance(address)! + h.logger.info('balance on ${channel} is ${res}') + } else if channel == 'tfchain' { + address := h.clients.tfc_client.address()! + res := h.clients.tfc_client.balance(address)! + h.logger.info('balance on ${channel} is ${res}') + } else { + return error('unsupported channel. should be one of: ${default_currencies.keys()}') + } +} diff --git a/lib/threefold/tfrobot/README.md b/lib/threefold/tfrobot/README.md new file mode 100644 index 00000000..e0774f13 --- /dev/null +++ b/lib/threefold/tfrobot/README.md @@ -0,0 +1,3 @@ +# TFRobot + +Wrapper for TFGrid mass deployer `tfrobot` \ No newline at end of file diff --git a/lib/threefold/tfrobot/cancel.v b/lib/threefold/tfrobot/cancel.v new file mode 100644 index 00000000..8d4463a8 --- /dev/null +++ b/lib/threefold/tfrobot/cancel.v @@ -0,0 +1,46 @@ +module tfrobot + +import json +import freeflowuniverse.herolib.core.pathlib +import freeflowuniverse.herolib.osal + +pub struct CancelConfig { +mut: + name string @[required] + mnemonic string @[required] + network Network @[required] + node_groups []CancelGroup @[required] +} + +pub struct CancelGroup { + name string @[required] +} + +pub fn (mut robot TFRobot[Config]) cancel(mut config CancelConfig) ! { + cfg := robot.config()! + if config.mnemonic == '' { + config.mnemonic = cfg.mnemonics + } + config.network = Network.from(cfg.network)! + check_cancel_config(config)! + + mut cancel_file := pathlib.get_file( + path: '${tfrobot_dir}/deployments/${config.name}_cancel.json' + create: true + )! + + cancel_file.write(json.encode(config))! + osal.exec( + cmd: 'tfrobot cancel -c ${cancel_file.path}' + stdout: true + )! +} + +fn check_cancel_config(config CancelConfig) ! { + if config.node_groups.len == 0 { + return error('No node groups specified to cancel.') + } + if config.node_groups.any(it.name == '') { + return error('Cannot cancel deployment without node_group name.') + } +} diff --git a/lib/threefold/tfrobot/cancel_test.v b/lib/threefold/tfrobot/cancel_test.v new file mode 100644 index 00000000..0d365eab --- /dev/null +++ b/lib/threefold/tfrobot/cancel_test.v @@ -0,0 +1,69 @@ +module tfrobot + +import os +import toml + +__global ( + mnemonics string + ssh_key string +) + +const test_name = 'cancel_test' +const test_flist = 'https://hub.grid.tf/mariobassem1.3bot/threefolddev-holochain-latest.flist' +const test_entrypoint = '/usr/local/bin/entrypoint.sh' + +fn testsuite_begin() ! { + env := toml.parse_file(os.dir(@FILE) + '/.env') or { toml.Doc{} } + mnemonics = os.getenv_opt('TFGRID_MNEMONIC') or { + env.value_opt('TFGRID_MNEMONIC') or { + panic('TFGRID_MNEMONIC variable should either be set as environment variable or set in .env file for this test') + }.string() + } + ssh_key = os.getenv_opt('SSH_KEY') or { + env.value_opt('SSH_KEY') or { + panic('SSH_KEY variable should either be set as environment variable or set in .env file for this test') + }.string() + } +} + +fn test_cancel() ! { + mut robot := new()! + result := robot.deploy( + name: '${test_name}_deployment' + mnemonic: mnemonics + network: .main + node_groups: [ + NodeGroup{ + name: '${test_name}_group' + nodes_count: 1 + free_cpu: 1 + free_mru: 256 + }, + ] + vms: [ + VMConfig{ + name: '${test_name}_vm' + vms_count: 1 + cpu: 1 + mem: 256 + node_group: '${test_name}_group' + ssh_key: '${test_name}_key' + entry_point: test_entrypoint + flist: test_flist + }, + ] + ssh_keys: { + '${test_name}_key': ssh_key + } + )! + + assert result.ok.keys() == ['${test_name}_group'] + robot.cancel( + name: '${test_name}_deployment' + mnemonic: mnemonics + network: .main + node_groups: [CancelGroup{ + name: '${test_name}_group' + }] + )! +} diff --git a/lib/threefold/tfrobot/deploy.v b/lib/threefold/tfrobot/deploy.v new file mode 100644 index 00000000..138aa4af --- /dev/null +++ b/lib/threefold/tfrobot/deploy.v @@ -0,0 +1,184 @@ +module tfrobot + +import freeflowuniverse.herolib.clients.redisclient +import json +import os +import freeflowuniverse.herolib.ui.console +import freeflowuniverse.herolib.core.pathlib +import freeflowuniverse.herolib.osal +import freeflowuniverse.herolib.osal.sshagent + +const tfrobot_dir = '${os.home_dir()}/hero/tfrobot' // path to tfrobot dir in fs + +pub struct DeployConfig { +pub mut: + name string + mnemonic string + network Network = .main + node_groups []NodeGroup @[required] + vms []VMConfig @[required] + ssh_keys map[string]string + debug bool +} + +pub struct NodeGroup { + name string + nodes_count int @[required] + free_cpu int @[required] // number of logical cores + free_mru int @[required] // amount of memory in GB + free_ssd int // amount of ssd storage in GB + free_hdd int // amount of hdd storage in GB + dedicated bool // are nodes dedicated + public_ip4 bool + public_ip6 bool + certified bool // should the nodes be certified(if false the nodes could be certified of diyed) + region string // region could be the name of the continents the nodes are located in (africa, americas, antarctic, antarctic ocean, asia, europe, oceania, polar) +} + +pub struct VMConfig { +pub mut: + name string @[required] + vms_count int = 1 @[required] + node_group string + cpu int = 4 @[required] + mem int = 4 @[required] // in GB + public_ip4 bool + public_ip6 bool + ygg_ip bool = true + mycelium_ip bool = true + flist string @[required] + entry_point string @[required] + root_size int = 20 + ssh_key string + env_vars map[string]string +} + +pub struct DeployResult { +pub: + ok map[string][]VMOutput + error map[string]string +} + +pub struct VMOutput { +pub mut: + name string @[json: 'Name'; required] + network_name string @[json: 'NetworkName'; required] + node_group string + deployment_name string + public_ip4 string @[json: 'PublicIP4'; required] + public_ip6 string @[json: 'PublicIP6'; required] + yggdrasil_ip string @[json: 'YggIP'; required] + mycelium_ip string @[json: 'MyceliumIP'; required] + ip string @[json: 'IP'; required] + mounts []Mount @[json: 'Mounts'; required] + node_id u32 @[json: 'NodeID'] + contract_id u64 @[json: 'ContractID'] +} + +pub struct Mount { +pub: + disk_name string + mount_point string +} + +// get all keys from ssh_agent and add to the config +pub fn sshagent_keys_add(mut config DeployConfig) ! { + mut ssha := sshagent.new()! + if ssha.keys.len == 0 { + return error('no ssh-keys found in ssh-agent, cannot add to tfrobot deploy config.') + } + for mut key in ssha.keys_loaded()! { + config.ssh_keys[key.name] = key.keypub()!.trim('\n') + } +} + +pub fn (mut robot TFRobot[Config]) deploy(config_ DeployConfig) !DeployResult { + mut config := config_ + cfg := robot.config()! + if config.mnemonic == '' { + config.mnemonic = cfg.mnemonics + } + config.network = Network.from(cfg.network)! + + if config.ssh_keys.len == 0 { + return error('no ssh-keys found in config') + } + + if config.node_groups.len == 0 { + return error('there are no node requirement groups defined') + } + + node_group := config.node_groups.first().name + + for mut vm in config.vms { + if vm.ssh_key.len == 0 { + vm.ssh_key = config.ssh_keys.keys().first() // first one of the dict + } + if vm.ssh_key !in config.ssh_keys { + return error('Could not find specified sshkey: ${vm.ssh_key} in known sshkeys.\n${config.ssh_keys.values()}') + } + if vm.node_group == '' { + vm.node_group = node_group + } + } + + check_deploy_config(config)! + + mut config_file := pathlib.get_file( + path: '${tfrobot_dir}/deployments/${config.name}_config.json' + create: true + )! + mut output_file := pathlib.get_file( + path: '${tfrobot_dir}/deployments/${config.name}_output.json' + create: false + )! + config_json := json.encode(config) + config_file.write(config_json)! + cmd := 'tfrobot deploy -c ${config_file.path} -o ${output_file.path}' + if config.debug { + console.print_debug(config.str()) + console.print_debug(cmd) + } + _ := osal.exec( + cmd: cmd + stdout: true + ) or { return error('TFRobot command ${cmd} failed:\n${err}') } + output := output_file.read()! + mut res := json.decode(DeployResult, output)! + + if res.ok.len == 0 { + return error('No vm was deployed, empty result') + } + + mut redis := redisclient.core_get()! + + redis.hset('tfrobot:${config.name}', 'config', config_json)! + for groupname, mut vms in res.ok { + for mut vm in vms { + if config.debug { + console.print_header('vm deployed: ${vm.name}') + console.print_debug(vm.str()) + } + vm.node_group = groupname // remember the groupname + vm.deployment_name = config.name + vm_json := json.encode(vm) + redis.hset('tfrobot:${config.name}', vm.name, vm_json)! + } + } + return res +} + +fn check_deploy_config(config DeployConfig) ! { + // Checking if configuration is valid. For instance that there is no ssh_key key that isnt defined, + // or that the specified node group of a vm configuration exists + vms := config.vms.filter(it.node_group !in config.node_groups.map(it.name)) + if vms.len > 0 { + error_msgs := vms.map('Node group: `${it.node_group}` for VM: `${it.name}`') + return error('${error_msgs.join(',')} not found.') + } + + unknown_keys := config.vms.filter(it.ssh_key !in config.ssh_keys).map(it.ssh_key) + if unknown_keys.len > 0 { + return error('SSH Keys [${unknown_keys.join(',')}] not found.') + } +} diff --git a/lib/threefold/tfrobot/deploy_test.v b/lib/threefold/tfrobot/deploy_test.v new file mode 100644 index 00000000..53a4d69f --- /dev/null +++ b/lib/threefold/tfrobot/deploy_test.v @@ -0,0 +1,64 @@ +module tfrobot + +import os +import toml + +__global ( + mnemonics string + ssh_key string +) + +fn testsuite_begin() ! { + env := toml.parse_file(os.dir(@FILE) + '/.env') or { toml.Doc{} } + mnemonics = os.getenv_opt('TFGRID_MNEMONIC') or { + env.value_opt('TFGRID_MNEMONIC') or { + panic('TFGRID_MNEMONIC variable should either be set as environment variable or set in .env file for this test') + }.string() + } + ssh_key = os.getenv_opt('SSH_KEY') or { + env.value_opt('SSH_KEY') or { + panic('SSH_KEY variable should either be set as environment variable or set in .env file for this test') + }.string() + } +} + +fn test_deploy() ! { + mut robot := new()! + result := robot.deploy( + name: 'test' + mnemonic: mnemonics + network: .main + node_groups: [ + NodeGroup{ + name: 'test_group' + nodes_count: 1 + free_cpu: 1 + free_mru: 256 + }, + ] + vms: [ + VMConfig{ + name: 'test' + vms_count: 1 + cpu: 1 + mem: 256 + node_group: 'test_group' + ssh_key: 'test_key' + entry_point: '/usr/local/bin/entrypoint.sh' + flist: 'https://hub.grid.tf/mariobassem1.3bot/threefolddev-holochain-latest.flist' + }, + ] + ssh_keys: { + 'test_key': ssh_key + } + )! + + assert result.error.keys().len == 0 + assert result.ok.keys() == ['test_group'] + assert result.ok['test_group'].len == 1 + assert result.ok['test_group'][0].name == 'test0' + assert result.ok['test_group'][0].public_ip4 == '' + assert result.ok['test_group'][0].public_ip6 == '' + assert result.ok['test_group'][0].planetary_ip == '' + assert result.ok['test_group'][0].mounts.len == 0 +} diff --git a/lib/threefold/tfrobot/factory.v b/lib/threefold/tfrobot/factory.v new file mode 100644 index 00000000..543f22c0 --- /dev/null +++ b/lib/threefold/tfrobot/factory.v @@ -0,0 +1,80 @@ +module tfrobot + +import freeflowuniverse.herolib.installers.threefold.tfrobot as tfrobot_installer +import freeflowuniverse.herolib.core.base +import freeflowuniverse.herolib.ui +import freeflowuniverse.herolib.ui.console + +pub struct TFRobot[T] { + base.BaseConfig[T] +pub mut: + jobs map[string]Job +} + +@[params] +pub struct Config { +pub mut: + configtype string = 'tfrobot' // needs to be defined + mnemonics string + network string = 'main' +} + +pub fn get(instance string) !TFRobot[Config] { + tfrobot_installer.install(reset: true)! + mut robot := TFRobot[Config]{} + robot.init('tfrobot', instance, .get)! + return robot +} + +pub fn configure(instance string, config_ Config) !TFRobot[Config] { + // tfrobot_installer.install()! + mut config := config_ + mut robot := get(instance)! + mut cfg := robot.config()! + cfg = &config + robot.init('tfrobot', instance, .set, cfg)! + return robot +} + +// pub fn heroplay(args play.PLayBookAddArgs) ! { +// // make session for configuring from heroscript +// mut session := play.session_new(session_name: 'config')! +// session.playbook_add(path: args.path, text: args.text, git_url: args.git_url)! +// for mut action in session.plbook.find(filter: 'tfrobot.define')! { +// mut p := action.params +// instance := p.get_default('instance', 'default')! +// mut cl := get(instance: instance)! +// mut cfg := cl.config()! +// cfg.description = p.get('description')! +// cfg.mnemonics = p.get('mnemonics')! +// cfg.network = p.get('network')! +// cl.config_save()! +// } +// } + +// pub fn (mut self TFRobot[Config]) config_interactive() ! { +// mut myui := ui.new()! +// console.clear() +// console.print_debug('\n## Configure tfrobot') +// console.print_debug('========================\n\n') + +// mut cfg := self.config()! + +// self.instance = myui.ask_question( +// question: 'name for tfrobot' +// default: self.instance +// )! +// cfg.mnemonics = myui.ask_question( +// question: 'please enter your mnemonics here' +// minlen: 24 +// default: cfg.mnemonics +// )! + +// envs := ['main', 'qa', 'test', 'dev'] +// cfg.network = myui.ask_dropdown( +// question: 'choose environment' +// items: envs +// )! + +// self.config_save()! +// } diff --git a/lib/threefold/tfrobot/factory_test.v b/lib/threefold/tfrobot/factory_test.v new file mode 100644 index 00000000..4c21a601 --- /dev/null +++ b/lib/threefold/tfrobot/factory_test.v @@ -0,0 +1,5 @@ +module tfrobot + +fn test_new() { + bot := new()! +} diff --git a/lib/threefold/tfrobot/job.v b/lib/threefold/tfrobot/job.v new file mode 100644 index 00000000..1c4eebb1 --- /dev/null +++ b/lib/threefold/tfrobot/job.v @@ -0,0 +1,153 @@ +module tfrobot + +// import os +// import arrays +// import freeflowuniverse.herolib.core.pathlib +// import freeflowuniverse.herolib.osal +// import json +// import freeflowuniverse.herolib.ui.console + +// VirtualMachine represents the VM info outputted by tfrobot +pub struct VirtualMachine { + name string + ip4 string + ip6 string + yggip string + ip string + // mounts []string +} + +pub struct Job { +pub: + name string + network Network + mneumonic string @[required] +pub mut: + ssh_keys map[string]string + deployments []Deployment + vms map[string]VirtualMachine +} + +// Deployment is an instruction to deploy a quantity of VMs with a given configuration +pub struct Deployment { +pub: + config VMConfig + quantity int +} + +// pub struct VMConfig { +// pub: +// name string +// region string +// nrcores int +// flist string +// memory_gb int +// ssh_key string +// pub_ip bool +// env_vars map[string]string +// } + +// pub struct Output { +// ok map[string][]VMOutput +// error map[string]string +// } + +// pub struct VMOutput { +// name string +// public_ip4 string +// public_ip6 string +// ygg_ip string +// ip string +// mounts []Mount +// } + +// pub struct Mount { +// disk_name string +// mount_point string +// } + +pub enum Network { + main + dev + qa + test +} + +pub fn (mut r TFRobot[Config]) job_new(job Job) !Job { + r.jobs[job.name] = job + return job +} + +pub fn (mut j Job) deploy_vms(config VMConfig, quantity int) { + j.deployments << Deployment{ + config: config + quantity: quantity + } +} + +// pub fn (mut j Job) run() ![]VMOutput { +// if j.deployments.len == 0 { +// return error('Nothing to deploy.') +// } +// if j.ssh_keys.keys().len == 0 { +// return error('Job requires at least one ssh key.') +// } + +// jsonfile := pathlib.get_file( +// path: '${os.home_dir()}/hero/tfrobot/jobs/${j.name}.json' +// create: true +// )! +// config := $tmpl('./templates/config.json') +// // console.print_debug('config file*******\n${config}\n****') +// pathlib.template_write(config, jsonfile.path, true)! +// j. +// result := osal.exec(cmd: 'tfrobot deploy -c ${jsonfile.path}', stdout: true)! + +// vms := parse_output(result.output)! +// // for vm in vms { +// // j.vms[vm.name] = vm +// // } +// return vms +// } + +pub fn (j Job) vm_get(name string) ?VirtualMachine { + if name !in j.vms { + return none + } + return j.vms[name] +} + +pub fn (mut j Job) add_ssh_key(name string, key string) { + j.ssh_keys[name] = key +} + +// // parse_output parses the output of the tfrobot cli command +// fn parse_output(output string) ![]VMOutput { +// res := json.decode(Output, output) or { return error('invalid json syntax. output:\n${output}') } +// if res.error.len > 0{ +// return error('TFRobot CLI Error, output:\n${output}') +// } + +// mut vms := []VMOutput{} +// for k, v in res.ok{ +// vms << v +// } + +// return vms +// // if !output.trim_space().starts_with('ok:') { +// // return error('TFRobot CLI Error, output:\n${output}') +// // } + +// // to_parse := output.trim_space().trim_string_left('ok:\n') +// // trimmed := to_parse.trim_space().trim_string_left('[').trim_string_right(']').trim_space() +// // vms_lst := arrays.chunk(trimmed.split_into_lines()[1..], 6) +// // vms := vms_lst.map(VirtualMachine{ +// // name: it[0].trim_space().trim_string_left('name: ') +// // ip4: it[1].trim_string_left('publicip4: ') +// // ip6: it[2].trim_string_left('publicip6: ') +// // yggip: it[3].trim_string_left('yggip: ') +// // ip: it[4].trim_string_left('ip: ') +// // mounts: [] +// // }) +// // return vms +// } diff --git a/lib/threefold/tfrobot/job_test.v b/lib/threefold/tfrobot/job_test.v new file mode 100644 index 00000000..ee9be4b1 --- /dev/null +++ b/lib/threefold/tfrobot/job_test.v @@ -0,0 +1,35 @@ +module tfrobot + +const test_ssh_key = '' +const test_mneumonic = '' +const test_flist = 'https://hub.grid.tf/mariobassem1.3bot/threefolddev-holochain-latest.flist' + +fn test_job_new() { + mut bot := new()! + bot.job_new( + name: 'test_job' + mneumonic: test_mneumonic + )! +} + +fn test_job_run() { + mut bot := new()! + mut job := bot.job_new( + name: 'test_job' + mneumonic: test_mneumonic + )! + + job.add_ssh_key('my_key', test_ssh_key) + vm_config := VMConfig{ + name: 'holo_vm' + region: 'europe' + nrcores: 4 + memory_mb: 4096 + ssh_key: 'my_key' + flist: test_flist + pub_ip: true + } + + job.deploy_vms(vm_config, 10) + job.run()! +} diff --git a/lib/threefold/tfrobot/templates/config.json b/lib/threefold/tfrobot/templates/config.json new file mode 100644 index 00000000..e9c17a3a --- /dev/null +++ b/lib/threefold/tfrobot/templates/config.json @@ -0,0 +1,57 @@ +{ + "node_groups": + @for deployment in j.deployments + [ + { + "name": "@{deployment.config.name}_group", + "nodes_count": @{deployment.quantity}, + "free_cpu": @{deployment.config.nrcores}, + "free_mru": @{deployment.config.memory_gb}, + "free_ssd": 100, + "free_hdd": 50, + "dedicated": false, + "public_ip4": @{deployment.config.pub_ip}, + "public_ip6": true, + "certified": false, + "region": "@{deployment.config.region}" + } + ], + @end + @for deployment in j.deployments + "vms": [ + { + "name": "@{deployment.config.name}", + "vms_count": @{deployment.quantity}, + "node_group": "@{deployment.config.name}_group", + "cpu": @{deployment.config.nrcores}, + "mem": @{deployment.config.memory_gb}, + "ssd": [ + { + "size": 15, + "mount_point": "/mnt/ssd" + } + ], + "public_ip4": @{deployment.config.pub_ip}, + "public_ip6": true, + "flist": "@{deployment.config.flist}", + "entry_point": "/usr/local/bin/entrypoint.sh", + "root_size": 0, + "ssh_key": "@{deployment.config.ssh_key}", + "env_vars": { + @for key, val in deployment.config.env_vars + "@{key}": "@{val}" + @end + } + } + ], + @end + "ssh_keys": { + @for key, val in j.ssh_keys + "@{key}": "${val}" + @end + }, + "mnemonic": "@{j.mneumonic}", + "network": "@{j.network}", + "max_retries": 5 + } + \ No newline at end of file diff --git a/lib/threefold/tfrobot/templates/config.yaml b/lib/threefold/tfrobot/templates/config.yaml new file mode 100644 index 00000000..b01c38b7 --- /dev/null +++ b/lib/threefold/tfrobot/templates/config.yaml @@ -0,0 +1,40 @@ +node_groups: +@for deployment in j.deployments + - name: @{deployment.config.name}_group + nodes_count: @{deployment.quantity} # amount of nodes to be found + free_cpu: @{deployment.config.nrcores} # number of logical cores + free_mru: @{deployment.config.memory_gb} # amount of memory in GB + free_ssd: 100 # amount of ssd storage in GB + free_hdd: 50 # amount of hdd storage in GB + dedicated: false # are nodes dedicated + public_ip4: @{deployment.config.pub_ip} + public_ip6: true + certified: false # should the nodes be certified(if false the nodes could be certified of diyed) + region: @{deployment.config.region} # region could be the name of the continents the nodes are located in (africa, americas, antarctic, antarctic ocean, asia, europe, oceania, polar) +@end +vms: +@for deployment in j.deployments + - name: @{deployment.config.name} + vms_count: @{deployment.quantity} # amount of vms with the same configurations + node_group: @{deployment.config.name}_group # the name of the predefined group of nodes + cpu: @{deployment.config.nrcores} # number of logical cores + mem: @{deployment.config.memory_gb} # amount of memory in GB + public_ip4: @{deployment.config.pub_ip} + public_ip6: true + flist: @{deployment.config.flist} + entry_point: /usr/local/bin/entrypoint.sh + root_size: 0 # root size in GB + ssh_key: @{deployment.config.ssh_key} # the name of the predefined ssh key + env_vars: # env vars are passed to the newly created vms + @for key, val in deployment.config.env_vars + @{key}: "${val}" + @end +@end + +ssh_keys: # map of ssh keys with key=name and value=the actual ssh key +@for key, val in j.ssh_keys + @{key}: "${val}" +@end + +mnemonic: "@{j.mneumonic}" # mnemonic of the user +network: @{j.network} # eg: main, test, qa, dev diff --git a/lib/threefold/tfrobot/tfrobot_redis.v b/lib/threefold/tfrobot/tfrobot_redis.v new file mode 100644 index 00000000..e4e3a279 --- /dev/null +++ b/lib/threefold/tfrobot/tfrobot_redis.v @@ -0,0 +1,47 @@ +module tfrobot + +import json +// import freeflowuniverse.herolib.ui.console +import freeflowuniverse.herolib.clients.redisclient + +pub fn config_get(configname string) !DeployConfig { + mut redis := redisclient.core_get()! + data := redis.hget('tfrobot:${configname}', 'config')! + if data.len == 0 { + return error("couldn't find tfrobot config with name:${configname}") + } + return json.decode(DeployConfig, data)! +} + +pub fn vms_get(configname string) ![]VMOutput { + mut vms := []VMOutput{} + mut redis := redisclient.core_get()! + for vmname in redis.hkeys('tfrobot:${configname}')! { + if vmname == 'config' { + continue + } + vms << vm_get(configname, vmname)! + } + return vms +} + +pub fn vm_get(configname string, name string) !VMOutput { + mut redis := redisclient.core_get()! + data := redis.hget('tfrobot:${configname}', name)! + if data.len == 0 { + return error("couldn't find tfrobot config with name:${name}") + } + return json.decode(VMOutput, data)! +} + +pub fn vm_config_get(configname string, name string) !VMConfig { + mut config := config_get(configname)! + // console.print_debug(name) + for vm in config.vms { + // console.print_debug(vm) + if name.starts_with(vm.name) { + return vm + } + } + return error('Could not find vmconfig for ${configname}:${name}') +} diff --git a/lib/threefold/tfrobot/vm.v b/lib/threefold/tfrobot/vm.v new file mode 100644 index 00000000..9679317d --- /dev/null +++ b/lib/threefold/tfrobot/vm.v @@ -0,0 +1,214 @@ +module tfrobot + +// import os +import freeflowuniverse.herolib.builder +import freeflowuniverse.herolib.osal +// import freeflowuniverse.herolib.servers.daguserver as dagu +import freeflowuniverse.herolib.clients.daguclient as dagu_client +import freeflowuniverse.herolib.ui.console +import time + +// pub fn (vm VMOutput) ssh_interactive(key_path string) ! { +// // b := builder.new() +// // node := b.node_new(ipaddr:"root@${vm.ip4}")! +// // node.exec_interactive('${homedir}/hero/bin/install.sh')! +// // time.sleep(15 * time.second) +// if vm.public_ip4 != '' { +// osal.execute_interactive('ssh -i ${key_path} root@${vm.public_ip4.all_before('/')}')! +// } else if vm.yggdrasil_ip != '' { +// osal.execute_interactive('ssh -i ${key_path} root@${vm.yggdrasil_ip}')! +// } else { +// return error('no public nor planetary ip available to use') +// } +// } + +@[params] +pub struct NodeArgs { +pub mut: + ip4 bool = true + ip6 bool = true + planetary bool = true + mycelium bool = true + timeout int = 120 // timeout in sec +} + +// return ssh node (can be used to do actions remotely) +// will check all available channels till it can ssh into the node +pub fn (vm VMOutput) node(args NodeArgs) !&builder.Node { + mut b := builder.new()! + start_time := time.now().unix_milli() + mut run_time := 0.0 + for true { + if args.ip4 && vm.public_ip4.len > 0 { + console.print_debug('test ipv4 to: ${vm.public_ip4} for ${vm.name}') + if osal.tcp_port_test(address: vm.public_ip4, port: 22, timeout: 2000) { + console.print_debug('SSH port test ok') + return b.node_new( + ipaddr: 'root@${vm.public_ip4}' + name: '${vm.deployment_name}_${vm.name}' + ) + } + } + if args.ip6 && vm.public_ip6.len > 0 { + console.print_debug('test ipv6 to: ${vm.public_ip6} for ${vm.name}') + if osal.tcp_port_test(address: vm.public_ip6, port: 22, timeout: 2000) { + console.print_debug('SSH port test ok') + return b.node_new( + ipaddr: 'root@[${vm.public_ip6}]' + name: '${vm.deployment_name}_${vm.name}' + ) + } + } + if args.planetary && vm.yggdrasil_ip.len > 0 { + console.print_debug('test planetary to: ${vm.yggdrasil_ip} for ${vm.name}') + if osal.tcp_port_test(address: vm.yggdrasil_ip, port: 22, timeout: 2000) { + console.print_debug('SSH port test ok') + return b.node_new( + ipaddr: 'root@[${vm.yggdrasil_ip}]' + name: '${vm.deployment_name}_${vm.name}' + ) + } + } + run_time = time.now().unix_milli() + if run_time > start_time + args.timeout * 1000 { + break + } + time.sleep(100 * time.millisecond) + } + return error("couldn't connect to node, probably timeout.") +} + +pub fn (vm VMOutput) tcpport_addr_get(port int) !string { + start_time := time.now().unix_milli() + mut run_time := 0.0 + for true { + if vm.yggdrasil_ip.len > 0 { + console.print_debug('test planetary for port ${port}: ${vm.yggdrasil_ip} for ${vm.name}') + if osal.tcp_port_test(address: vm.yggdrasil_ip, port: port, timeout: 2000) { + console.print_debug('port test ok') + return vm.yggdrasil_ip + } + } + + // if vm.public_ip4.len>0 { + // console.print_debug("test ipv4 to: ${vm.public_ip4} for ${vm.name}") + // if osal.tcp_port_test(address:vm.public_ip4,port:22, timeout:2000) { + // console.print_debug("SSH port test ok") + // return b.node_new(ipaddr:"root@${vm.public_ip4}",name:"${vm.deployment_name}_${vm.name}")! + // } + // } + // if args.ip6 && vm.public_ip6.len>0 { + // console.print_debug("test ipv6 to: ${vm.public_ip6} for ${vm.name}") + // if osal.tcp_port_test(address:vm.public_ip6, port:22, timeout:2000) { + // console.print_debug("SSH port test ok") + // return b.node_new(ipaddr:"root@[${vm.public_ip6}]",name:"${vm.deployment_name}_${vm.name}")! + // } + // } + run_time = time.now().unix_milli() + if run_time > start_time + 20000 { + break + } + time.sleep(100 * time.millisecond) + } + return error("couldn't connect to node, probably timeout.") +} + +// // create new DAG +// // ``` +// // name string // The name of the DAG (required) +// // description ?string // A brief description of the DAG. +// // tags ?string // Free tags that can be used to categorize DAGs, separated by commas. +// // env ?map[string]string // Environment variables that can be accessed by the DAG and its steps. +// // restart_wait_sec ?int // The number of seconds to wait after the DAG process stops before restarting it. +// // hist_retention_days ?int // The number of days to retain execution history (not for log files). +// // delay_sec ?int // The interval time in seconds between steps. +// // max_active_runs ?int // The maximum number of parallel running steps. +// // max_cleanup_time_sec ?int // The maximum time to wait after sending a TERM signal to running steps before killing them. +// // ``` +// pub fn (mut vm VMOutput) tasks_new(args_ dagu.DAGArgs) &dagu.DAG { +// mut args := args_ +// mut d := dagu.dag_new( +// name: args.name +// description: args.description +// tags: args.tags +// env: args.env +// restart_wait_sec: args.restart_wait_sec +// hist_retention_days: args.hist_retention_days +// delay_sec: args.delay_sec +// max_active_runs: args.max_active_runs +// max_cleanup_time_sec: args.max_cleanup_time_sec +// ) + +// d.env = { +// 'PATH': '/root/.nix-profile/bin:/root/hero/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:\$PATH' +// } + +// return &d +// } + +// // name is the name of the tasker (dag), which has set of staps we will execute +// pub fn (vm VMOutput) tasks_run(dag &dagu.DAG) ! { +// // console.print_debug(dag) +// r := vm.dagu_addr_get()! +// console.print_debug('connect to dagu on ${vm.name} -> ${r.addr}') +// mut client := dagu_client.get(instance: 'robot_dagu')! +// mut cfg := client.config()! +// cfg.url = 'http://${r.addr}:${r.port}' +// cfg.username = r.username +// cfg.password = r.password + +// if dag.name in client.list_dags()!.dags.map(it.dag.name) { +// console.print_debug('delete dag: ${dag.name}') +// client.delete_dag(dag.name)! +// } + +// console.print_header('send dag to node: ${dag.name}') +// console.print_debug(dag.str()) +// client.new_dag(dag)! // will post it +// client.start_dag(dag.name)! +// } + +// pub fn (vm VMOutput) tasks_see(dag &dagu.DAG) ! { +// r := vm.dagu_addr_get()! +// // http://[302:1d81:cef8:3049:fbe1:69ba:bd8c:52ec]:8081/dags/holochain_scaffold +// cmd3 := "open 'http://[${r.addr}]:8081/dags/${dag.name}'" +// // console.print_debug(cmd3) +// osal.exec(cmd: cmd3)! +// } + +pub fn (vm VMOutput) vscode() ! { + r := vm.dagu_addr_get()! + cmd3 := "open 'http://[${r.addr}]:8080'" + osal.exec(cmd: cmd3)! +} + +pub fn (vm VMOutput) vscode_holochain() ! { + r := vm.dagu_addr_get()! + cmd3 := "open 'http://[${r.addr}]:8080/?folder=/root/Holochain/hello-world'" + osal.exec(cmd: cmd3)! +} + +pub fn (vm VMOutput) vscode_holochain_proxy() ! { + r := vm.dagu_addr_get()! + cmd3 := "open 'http://[${r.addr}]:8080/proxy/8282/" + osal.exec(cmd: cmd3)! +} + +struct DaguInfo { +mut: + addr string + username string + password string + port int +} + +fn (vm VMOutput) dagu_addr_get() !DaguInfo { + mut vm_config := vm_config_get(vm.deployment_name, vm.name)! + mut env := vm_config.env_vars.clone() + mut r := DaguInfo{} + r.username = env['DAGU_BASICAUTH_USERNAME'] or { 'admin' } + r.password = env['DAGU_BASICAUTH_PASSWORD'] or { 'planetfirst' } + r.port = 8081 + r.addr = vm.tcpport_addr_get(r.port)! + return r +} diff --git a/lib/threefold/tfrobot/vm_deploy.v b/lib/threefold/tfrobot/vm_deploy.v new file mode 100644 index 00000000..a3a1df5b --- /dev/null +++ b/lib/threefold/tfrobot/vm_deploy.v @@ -0,0 +1,102 @@ +module tfrobot + +import rand + +struct VMSpecs { + deployment_name string + name string + nodeid u32 + pub_sshkeys []string + flist string // if any, if used then ostype not used + size u32 // size of the rootfs disk in bytes + cores int // number of virtual cores + memory u32 // ram in mb + ostype OSType +} + +enum OSType { + ubuntu_22_04 + ubuntu_24_04 + arch + alpine +} + +// only connect to yggdrasil and mycelium +pub fn (mut robot TFRobot[Config]) vm_deploy(args_ VMSpecs) !VMOutput { + mut args := args_ + + if args.pub_sshkeys.len == 0 { + return error('at least one ssh key needed to deploy vm') + } + + size := if args.size < 20 { + 20 + } else { + args.size + } + // deploymentstate_db.set(args.deployment_name,"vm_${args.name}",json.encode(VMDeployed))! + + mut ssh_keys := { + 'SSH_KEY': args.pub_sshkeys[0] + } + // QUESTION: how to implement multiple ssh keys + for i, key in args.pub_sshkeys[0..] { + ssh_keys['SSH_KEY${i}'] = key + } + + flist := if args.flist == '' { + 'https://hub.grid.tf/samehabouelsaad.3bot/abouelsaad-grid3_ubuntu20.04-latest.flist' + } else { + args.flist + } + + node_group := 'ng_${args.cores}_${args.memory}_${args.size}_${rand.string(8).to_lower()}' + + config := robot.config()! + mneumonics := config.mnemonics + output := robot.deploy( + name: args.name + mnemonic: mneumonics + network: .main + node_groups: [ + NodeGroup{ + name: node_group + nodes_count: 1 + free_cpu: args.cores + free_mru: int(args.memory) + free_ssd: int(size) + }, + ] + vms: [ + VMConfig{ + name: args.name + vms_count: 1 + cpu: args.cores + mem: int(args.memory) + root_size: int(size) + node_group: node_group + ssh_key: 'SSH_KEY' + flist: flist + entry_point: '/sbin/zinit init' + }, + ] + ssh_keys: ssh_keys + ) or { return error('\nTFRobot deploy error:\n - ${err}') } + + if output.ok.len < 1 { + if output.error.len < 1 { + panic('this should never happen') + } + + err := output.error[output.error.keys()[0]] + return error('failed to deploy vm ${err}') + } + + vm_outputs := output.ok[output.ok.keys()[0]] + if vm_outputs.len != 1 { + panic('this should never happen ${vm_outputs}') + } + + vm_output := vm_outputs[0] + return vm_output +} diff --git a/lib/threefold/tfrobot/vm_deploy_test.v b/lib/threefold/tfrobot/vm_deploy_test.v new file mode 100644 index 00000000..7596f9ee --- /dev/null +++ b/lib/threefold/tfrobot/vm_deploy_test.v @@ -0,0 +1,32 @@ +module tfrobot + +import os +import freeflowuniverse.herolib.osal + +const testdata_dir = '${os.dir(@FILE)}/testdata' + +fn testsuite_begin() ! { + osal.load_env_file('${testdata_dir}/.env')! +} + +fn test_vm_deploy() ! { + mneumonics := os.getenv('MNEUMONICS') + ssh_key := os.getenv('SSH_KEY') + + mut robot := configure('testrobot', + mnemonics: mneumonics + network: 'main' + )! + result := robot.vm_deploy( + deployment_name: 'test_deployment' + name: 'test_vm' + cores: 1 + memory: 256 + pub_sshkeys: [ssh_key] + )! + panic(result) + + assert result.name.starts_with('test_vm') + assert result.yggdrasil_ip.len > 0 + assert result.mycelium_ip.len > 0 +} diff --git a/lib/threefold/tokens/readme.md b/lib/threefold/tokens/readme.md new file mode 100644 index 00000000..4d40155d --- /dev/null +++ b/lib/threefold/tokens/readme.md @@ -0,0 +1,2 @@ + +> TODO:! please make example and see it works \ No newline at end of file diff --git a/lib/threefold/tokens/tokens_fetch.v b/lib/threefold/tokens/tokens_fetch.v new file mode 100644 index 00000000..10d6a5d1 --- /dev/null +++ b/lib/threefold/tokens/tokens_fetch.v @@ -0,0 +1,422 @@ +module tokens + +import json +import freeflowuniverse.herolib.httpcache +import freeflowuniverse.herolib.ui.console + +// +// Raw JSON struct +// +struct Raw_Wallet { + address string + description string + liquid bool + amount string +} + +struct Raw_FoundationAccountInfo { + category string + wallets []Raw_Wallet +} + +struct Raw_StatsTFT { + total_tokens string + total_accounts string + total_locked_tokens string + total_vested_tokens string + total_liquid_foundation_tokens string + total_illiquid_foundation_tokens string + total_liquid_tokens string + foundation_accounts_info []Raw_FoundationAccountInfo + locked_tokens_info []string +} + +struct Raw_Balance { + amount string + asset string +} + +struct Raw_Account { + address string + balances []Raw_Balance + vesting_accounts []Raw_VestingAccount + locked_amounts []Raw_LockedAmount +} + +struct Raw_VestingAccount { + address string + vestingscheme string + balances []Raw_Balance +} + +struct Raw_LockedAmount { + address string + locked_until string + balances []Raw_Balance +} + +struct Raw_StellarBalance { + asset string + balance string +} + +struct Raw_StellarHistory { + ts int + payments int + trades int + balances []Raw_StellarBalance +} + +struct Raw_StellarAccount { + account string + history []Raw_StellarHistory +} + +// +// Improved struct +// +pub struct Wallet { +pub mut: + address string + description string + liquid bool + amount f64 +} + +pub struct FoundationAccountInfo { +pub mut: + category string + wallets []Wallet +} + +struct LockedTokensInfo { +pub mut: + amount f64 + until string +} + +struct StatsTFT { +pub mut: + total_tokens f64 + total_accounts f64 + total_locked_tokens f64 + total_vested_tokens f64 + total_liquid_foundation_tokens f64 + total_illiquid_foundation_tokens f64 + total_liquid_tokens f64 + foundation_accounts_info []FoundationAccountInfo + locked_tokens_info []LockedTokensInfo +} + +struct Balance { +pub: + amount f64 + asset string +} + +struct Account { +pub mut: + address string + balances []Balance + vesting_accounts []VestingAccount + locked_amounts []LockedAmount +} + +struct VestingAccount { +pub mut: + address string + vestingscheme string + balances []Balance +} + +struct LockedAmount { +pub mut: + address string + locked_until string + balances []Balance +} + +struct Group { +pub mut: + name string + distribution f32 // in percent from 0..1 + farmed f64 // in tokens + done f64 + amount f64 + remain f64 +} + +// +// Workflow +// +fn account_url(account string) string { + return 'https://statsdata.threefoldtoken.com/stellar_stats/api/account/' + account +} + +fn parsef(f string) f64 { + x := f.replace(',', '') + return x.f64() +} + +fn parse(tft Raw_StatsTFT, tfta Raw_StatsTFT, stellar Raw_StellarAccount) StatsTFT { + mut final := StatsTFT{} + + final.total_tokens = parsef(tft.total_tokens) + parsef(tfta.total_tokens) + final.total_accounts = parsef(tft.total_accounts) + parsef(tfta.total_accounts) + final.total_locked_tokens = parsef(tft.total_locked_tokens) + parsef(tfta.total_locked_tokens) + final.total_vested_tokens = parsef(tft.total_vested_tokens) + parsef(tfta.total_vested_tokens) + final.total_liquid_foundation_tokens = parsef(tft.total_liquid_foundation_tokens) + + parsef(tfta.total_liquid_foundation_tokens) + final.total_illiquid_foundation_tokens = parsef(tft.total_illiquid_foundation_tokens) + + parsef(tfta.total_illiquid_foundation_tokens) + final.total_liquid_tokens = parsef(tft.total_liquid_tokens) + parsef(tfta.total_liquid_tokens) + + mut info := map[string]map[string]Wallet{} + src := [tft, tfta] + + // + // FoundationAccountInfo + // + for source in src { + for entry in source.foundation_accounts_info { + for wal in entry.wallets { + mut found := info[entry.category][wal.address] + + found.address = wal.address + found.description = wal.description + found.liquid = wal.liquid + found.amount += parsef(wal.amount) + + info[entry.category][wal.address] = found + } + } + } + + for cat, val in info { + mut accountinfo := FoundationAccountInfo{ + category: cat + } + + for _, wal in val { + accountinfo.wallets << wal + } + + final.foundation_accounts_info << accountinfo + } + + // + // LockedTokensInfo + // + for source in src { + for locked in source.locked_tokens_info { + x := locked.fields() + + final.locked_tokens_info << LockedTokensInfo{ + amount: parsef(x[0]) + until: x[3] + ' ' + x[4] + } + } + } + + return final +} + +pub fn parse_special(s StatsTFT) map[string]Group { + // fixed 4 billion tokens + // master_total_tokens := f64(4000000000) + total_tokens := s.total_tokens + + // mut liquidity := tokens.FoundationAccountInfo{} + mut contribution := FoundationAccountInfo{} + mut council := FoundationAccountInfo{} + + for info in s.foundation_accounts_info { + if info.category == 'threefold contribution wallets' { + contribution = info + } + + /* + if info.category == "liquidity wallets" { + liquidity = info + } + */ + + if info.category == 'wisdom council wallets' { + council = info + } + } + + // console.print_debug(liquidity) + + mut group := map[string]Group{} + + // Farming rewards after April 19 2018 (***) + group['farming-rewards-2018'] = Group{ + name: 'Farming rewards after April 19 2018' + distribution: 0.75 + done: s.total_tokens - 695000000 // Genesis pool + } + + mut grant_amount := f64(0) + + for wallet in contribution.wallets { + if wallet.description == 'TF Grants Wallet' { + grant_amount += f64(wallet.amount) + } + } + + for wallet in council.wallets { + if wallet.description == 'TF Grants Wisdom' { + grant_amount += f64(wallet.amount) + } + } + + // Ecosystem Grants (*) + group['ecosystem-grants'] = Group{ + name: 'Ecosystem Grants' + distribution: 0.03 + done: grant_amount + } + + // Promotion & Marketing Effort + group['promotion-marketing'] = Group{ + name: 'Promotion & Marketing Effort ' + distribution: 0.05 + done: 100000000 // estimation + } + + mut liquidity_amount := i64(0) + + for info in s.foundation_accounts_info { + for wallet in info.wallets { + if wallet.liquid == true { + liquidity_amount += i64(wallet.amount) + } + } + } + + // Ecosystem Contribution, Liquidity Exchanges + group['ecosystem-contribution'] = Group{ + name: 'Ecosystem Contribution, Liquidity Exchanges' + distribution: 0.04 + done: liquidity_amount + } + + // Technology Acquisition + Starting Team (40p) + group['technology'] = Group{ + name: 'Technology Acquisition + Starting Team' + distribution: 0.07 + done: 290000000 + } + + // Advisors, Founders & Team + group['advisors-founders'] = Group{ + name: 'Advisors, Founders & Team' + distribution: 0.06 + } + + sum := group['farming-rewards-2018'].done + group['ecosystem-grants'].done + + group['promotion-marketing'].done + group['ecosystem-contribution'].done + + group['technology'].done + + group['advisors-founders'].done = total_tokens - sum + + return group +} + +fn parse_balance(bal Raw_Balance) Balance { + return Balance{ + amount: parsef(bal.amount) + asset: bal.asset + } +} + +fn account_info(account Raw_Account) Account { + mut final := Account{ + address: account.address + } + + for bal in account.balances { + final.balances << parse_balance(bal) + } + + for vest in account.vesting_accounts { + mut vesting := VestingAccount{ + address: vest.address + vestingscheme: vest.vestingscheme + } + + for bal in vest.balances { + vesting.balances << parse_balance(bal) + } + + final.vesting_accounts << vesting + } + + for locking in account.locked_amounts { + mut locked := LockedAmount{ + address: locking.address + locked_until: locking.locked_until + } + + for bal in locking.balances { + locked.balances << parse_balance(bal) + } + + final.locked_amounts << locked + } + + return final +} + +pub fn load_tokens() ?StatsTFT { + mut hc := httpcache.newcache() + + urltft := 'https://statsdata.threefoldtoken.com/stellar_stats/api/stats?detailed=true' + urltfta := 'https://statsdata.threefoldtoken.com/stellar_stats/api/stats?detailed=true&tokencode=TFTA' + + // console.print_debug("[+] fetching tokens data from redis") + rtft := hc.getex(urltft, 86400)? + rtfta := hc.getex(urltfta, 86400)? + + // extra stellar account for missing account in tft + addac := 'GB2C5HCZYWNGVM6JGXDWQBJTMUY4S2HPPTCAH63HFAQVL2ALXDW7SSJ7' + addurl := account_url(addac) + rstel := hc.getex(addurl, 86400)? + + tft := json.decode(Raw_StatsTFT, rtft) or { + console.print_debug('Failed to decode json (statsdata: ${urltft})') + return StatsTFT{} + } + + tfta := json.decode(Raw_StatsTFT, rtfta) or { + console.print_debug('Failed to decode json (statsdata: ${urltfta})') + return StatsTFT{} + } + + stellar := json.decode(Raw_StellarAccount, rstel) or { + console.print_debug('Failed to decode json (account: ${addurl})') + return StatsTFT{} + } + + merged := parse(tft, tfta, stellar) + + return merged +} + +pub fn load_account(accid string) ?Account { + mut hc := httpcache.newcache() + + // console.print_debug("[+] fetching account data from redis") + accurl := account_url(accid) + raccount := hc.getex(accurl, 86400)? + + account := json.decode(Raw_Account, raccount) or { + console.print_debug('Failed to decode json (stellar: ${accurl})') + return Account{} + } + + nicer := account_info(account) + + return nicer +} diff --git a/lib/threefold/zerohub/flist.v b/lib/threefold/zerohub/flist.v new file mode 100644 index 00000000..d2485e2e --- /dev/null +++ b/lib/threefold/zerohub/flist.v @@ -0,0 +1,176 @@ +module zerohub + +import net.http +import json +import x.json2 +import os + +pub struct Repository { +pub: + name string + official bool +} + +pub struct FlistInfo { +pub: + name string + size string + updated i64 + type_ string + linktime i64 + target string +} + +pub struct FlistContents { +pub: + regular i32 + failure i32 + directory i32 + symlink string + fullsize i64 + content []File +} + +pub struct File { +pub: + size i64 + path string +} + +pub fn (mut cl ZeroHubClient) get_flists() ![]string { + resp := http.get('https://${cl.url}/api/flist')! + return json.decode([]string, resp.body)! +} + +pub fn (mut cl ZeroHubClient) get_repos() ![]Repository { + resp := http.get('https://${cl.url}/api/repositories')! + return json.decode([]Repository, resp.body)! +} + +pub fn (mut cl ZeroHubClient) get_files() !map[string][]FlistInfo { + resp := http.get('https://${cl.url}/api/fileslist')! + return json.decode(map[string][]FlistInfo, resp.body) +} + +pub fn (mut cl ZeroHubClient) get_repo_flists(repo_name string) ![]FlistInfo { + resp := http.get('https://${cl.url}/api/flist/${repo_name}')! + return json.decode([]FlistInfo, resp.body) +} + +pub fn (mut cl ZeroHubClient) get_flist_dump(repo_name string, flist_name string) !FlistContents { + resp := http.get('https://${cl.url}/api/flist/${repo_name}/${flist_name}')! + data := json.decode(FlistContents, resp.body)! + return data +} + +pub fn (mut cl ZeroHubClient) get_me() !json2.Any { + req := http.Request{ + method: http.Method.get + header: cl.header + url: 'https://${cl.url}/api/flist/me' + } + + resp := req.do()! + return json2.raw_decode(resp.body)! +} + +pub fn (mut cl ZeroHubClient) get_my_flist(flist string) !FlistContents { + req := http.Request{ + method: http.Method.get + header: cl.header + url: 'https://${cl.url}/api/flist/me/${flist}' + } + + resp := req.do()! + data := json.decode(FlistContents, resp.body)! + return data +} + +pub fn (mut cl ZeroHubClient) remove_my_flist(flist string) !json2.Any { + req := http.Request{ + method: http.Method.delete + header: cl.header + url: 'https://${cl.url}/api/flist/me/${flist}' + } + + resp := req.do()! + return json2.raw_decode(resp.body)! +} + +pub fn (mut cl ZeroHubClient) symlink(source string, linkname string) !string { + req := http.Request{ + method: http.Method.get + header: cl.header + url: 'https://${cl.url}/api/flist/me/${source}/link/${linkname}' + } + resp := req.do()! + return resp.body +} + +pub fn (mut cl ZeroHubClient) cross_symlink(repo string, source string, linkname string) !string { + req := http.Request{ + method: http.Method.get + header: cl.header + url: 'https://${cl.url}/api/flist/me/${linkname}/crosslink/${repo}/${source}' + } + resp := req.do()! + return resp.body +} + +pub fn (mut cl ZeroHubClient) rename(source string, dest string) !string { + req := http.Request{ + method: http.Method.get + header: cl.header + url: 'https://${cl.url}/api/flist/me/${source}/rename/${dest}' + } + resp := req.do()! + return resp.body +} + +pub fn (mut cl ZeroHubClient) promote(source_repo string, source_name string, localname string) !string { + // Copy cross-repository sourcerepo/sourcefile to your [local-repository]/localname + req := http.Request{ + method: http.Method.get + header: cl.header + url: 'https://${cl.url}/api/flist/me/promote/${source_repo}/${source_name}/${localname}' + } + resp := req.do()! + return resp.body +} + +pub fn (mut cl ZeroHubClient) convert(image string) !string { + form := http.PostMultipartFormConfig{ + form: { + 'image': image + } + header: cl.header + } + + resp := http.post_multipart_form('https://${cl.url}/api/flist/me/docker', form)! + return resp.body +} + +pub fn (mut cl ZeroHubClient) merge_flists(flists []string, target string) !string { + req := http.Request{ + method: http.Method.post + header: cl.header + url: 'https://${cl.url}/api/flist/me/merge/${target}' + data: json.encode(flists) + } + resp := req.do()! + return resp.body +} + +pub fn (mut cl ZeroHubClient) upload_flist(path string) !os.Result { + cmd := "curl -X Post -H 'Authorization: Bearer ${cl.secret}' -F 'file=@${path}' https://${cl.url}/api/flist/me/upload-flist" + + res := os.execute(cmd) + return res +} + +pub fn (mut cl ZeroHubClient) upload_archive(path string) !os.Result { + cmd := "curl -X Post -H 'Authorization: Bearer ${cl.secret}' -F 'file=@${path}' https://${cl.url}/api/flist/me/upload" + + res := os.execute(cmd) + return res +} diff --git a/lib/threefold/zerohub/readme.md b/lib/threefold/zerohub/readme.md new file mode 100644 index 00000000..d1068c48 --- /dev/null +++ b/lib/threefold/zerohub/readme.md @@ -0,0 +1,18 @@ +# ZeroHub + +This is a SAL for the ZeroHub + +The default hub we connect to is https://hub.grid.tf/ + +### for developers + +more info see https://github.com/threefoldtech/0-hub#public-api-endpoints-no-authentication-needed + + +> TODO: implement each endpoint on the zerohub here at client + + +## Hub Authorization +ZeroHub authorized enpoints can be accessed with exporting a jwt in env vars. to do so: +- go to https://hub.grid.tf, and on the login section try `Generate API Token` +- copy the token you got and `export HUB_JWT=` \ No newline at end of file diff --git a/lib/threefold/zerohub/zerohub.v b/lib/threefold/zerohub/zerohub.v new file mode 100644 index 00000000..080fc41d --- /dev/null +++ b/lib/threefold/zerohub/zerohub.v @@ -0,0 +1,42 @@ +module zerohub + +import net.http + +// import freeflowuniverse.herolib.clients.httpconnection + +// TODO: curl -H "Authorization: bearer 6Pz6giOpHSaA3KdYI6LLpGSLmDmzmRkVdwvc7S-E5PVB0-iRfgDKW9Rb_ZTlj-xEW4_uSCa5VsyoRsML7DunA1sia3Jpc3RvZi4zYm90IiwgMTY3OTIxNTc3MF0=" https://hub.grid.tf/api/flist/ + +pub struct ZeroHubClient { +pub mut: + url string + secret string // is called bearer in documentation + header http.Header +} + +@[params] +pub struct ZeroHubClientArgs { +pub: + url string = 'hub.grid.tf' + secret string // is called bearer in documentation +} + +// see https://hub.grid.tf/ +// more info see https://github.com/threefoldtech/0-hub#public-api-endpoints-no-authentication-needed +pub fn new(args ZeroHubClientArgs) !ZeroHubClient { + // mut conn := httpconnection.new(name:'zerohub', url:'https://${args.url}') + + // TODO: use our caching rest client (httpclient) + // example which was working: curl -H "Authorization: bearer ""..." https://hub.grid.tf/api/flist/ + // see how to get this Authorization bearer to work with our httpclient, certain header to be set. + // if args.reset { + // //if reset asked for cache will be emptied + // conn.cache.cache_drop()! + // } + + mut cl := ZeroHubClient{ + url: args.url + secret: args.secret + } + // TODO: there should be a check here that its accessible + return cl +} diff --git a/lib/threefold/zerohub/zerohub_test.v b/lib/threefold/zerohub/zerohub_test.v new file mode 100644 index 00000000..ac4a4233 --- /dev/null +++ b/lib/threefold/zerohub/zerohub_test.v @@ -0,0 +1,68 @@ +module zerohub + +import net.http +import os +import freeflowuniverse.herolib.ui.console + +const secret = '6Pz6giOpHSaA3KdYI6LLpGSLmDmzmRkVdwvc7S-E5PVB0-iRfgDKW9Rb_ZTlj-xEW4_uSCa5VsyoRsML7DunA1sia3Jpc3RvZi4zYm90IiwgMTY3OTIxNTc3MF0=' + +fn test_main() ? { + mut cl := new(secret: secret)! + + // flists := cl.get_flists()! + // console.print_debug(flists) + + // repos := cl.get_repos()! + // console.print_debug(repos) + + // files := cl.get_files()! + // console.print_debug(files) + + // flists := cl.get_repo_flists('omarabdulaziz.3bot')! + // console.print_debug(flists) + + // flist_data := cl.get_flist_dump('omarabdulaziz.3bot', 'omarabdul3ziz-obuntu-zinit.flist')! + // console.print_debug(flist_data) + + hub_token := os.getenv('HUB_JWT') + header_config := http.HeaderConfig{ + key: http.CommonHeader.authorization + value: 'bearer ${hub_token}' + } + + cl.header = http.new_header(header_config) + cl.secret = hub_token + + // mine := cl.get_me()! + // console.print_debug(mine.as_map()["status"]) + + // flist := cl.get_my_flist("omarabdul3ziz-forum-docker-v3.1.flist")! + // console.print_debug(flist) + + // resp := cl.remove_my_flist("threefolddev-presearch-v2.3.flist")! + // console.print_debug(resp) + + // res := cl.symlink("mahmoudemmad-mastodon_after_update-test3.flist", "testsymlink")! + // console.print_debug(res) + + // res := cl.cross_symlink("abdelrad", "0-hub.flist", "testcrosssymlink")! + // console.print_debug(res) + + // res := cl.rename("omarabdul3ziz-forum-docker-v3.1.flist", "renamed")! + // console.print_debug(res) + + // res := cl.promote("abdelrad", "0-hub.flist", "promoted")! + // console.print_debug(res) + + // res := cl.convert("alpine")! + // console.print_debug(res) + + // res := cl.merge_flists( ["omarabdulaziz.3bot/omarabdul3ziz-obuntu-zinit.flist", "omarabdulaziz.3bot/omarabdul3ziz-peertube-v3.1.1.flist"], "merged")! + // console.print_debug(res) + + // res := cl.upload_flist("./testup.flist")! + // console.print_debug(res) + + res := cl.upload_archive('./alpine.tar.gz')! + console.print_debug(res) +}