Merge commit '9790ef4dacdf729d8825dbe745379bd6c669b9dd' as 'components/rfs'

This commit is contained in:
2025-08-16 21:12:45 +02:00
96 changed files with 14003 additions and 0 deletions

View File

@@ -0,0 +1 @@
/target

View File

@@ -0,0 +1,67 @@
on:
push:
# Sequence of patterns matched against refs/tags
tags:
- "v*" # Push events to matching v*, i.e. v1.0, v20.15.10
name: Create Release
jobs:
build:
name: Releasing rfs
# we use 18.04 to be compatible with libc version on zos
runs-on: ubuntu-latest
steps:
- name: Prepare
run: |
sudo apt-get update
sudo apt-get install musl-dev musl-tools
sudo apt-get install capnproto
- name: Checkout code
uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
name: Install toolchain
with:
toolchain: stable
target: x86_64-unknown-linux-musl
- uses: actions-rs/cargo@v1
with:
command: build
args: --release --target=x86_64-unknown-linux-musl --features build-binary
- name: Strip
run: |
strip target/x86_64-unknown-linux-musl/release/rfs
- name: Strip
run: |
strip target/x86_64-unknown-linux-musl/release/docker2fl
- name: Create Release
id: create_release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: ${{ github.ref }}
release_name: Release ${{ github.ref }}
draft: false
prerelease: false
- name: Upload Release Asset for RFS
id: upload-release-asset-rfs
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: target/x86_64-unknown-linux-musl/release/rfs
asset_name: rfs
asset_content_type: application/x-pie-executable
- name: Upload Release Asset for docker2fl
id: upload-release-asset-docker2fl
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: target/x86_64-unknown-linux-musl/release/docker2fl
asset_name: docker2fl
asset_content_type: application/x-pie-executable

View File

@@ -0,0 +1,33 @@
name: Unit and Integration Test
on: push
env:
CARGO_TERM_COLOR: always
jobs:
check_fmt:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@nightly
with:
components: rustfmt
- uses: clechasseur/rs-fmt-check@v2
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Get Dependencies
run: |
sudo apt-get update
sudo apt-get install capnproto
- uses: actions-rs/toolchain@v1
with:
toolchain: stable
- uses: Swatinem/rust-cache@v1
- name: Unit Testing
run: cargo test --features build-binary
env:
RUST_MIN_STACK: 8388608

6
components/rfs/.gitignore vendored Normal file
View File

@@ -0,0 +1,6 @@
/target
/tests/*.flist.d
result
.direnv/
fl-server/flists
fl-server/config.toml

4437
components/rfs/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

12
components/rfs/Cargo.toml Normal file
View File

@@ -0,0 +1,12 @@
[workspace]
resolver = "2"
members = [
"rfs",
"docker2fl",
"fl-server"
]
[profile.release]
lto = true
codegen-units = 1

23
components/rfs/Dockerfile Normal file
View File

@@ -0,0 +1,23 @@
FROM rust:slim as builder
WORKDIR /src
COPY fl-server /src/fl-server
COPY rfs /src/rfs
COPY docker2fl /src/docker2fl
COPY Cargo.toml .
COPY Cargo.lock .
COPY config.toml .
RUN apt-get update && apt-get install curl build-essential libssl-dev musl-tools -y
RUN rustup target add x86_64-unknown-linux-musl
RUN cargo build --release --bin fl-server --target=x86_64-unknown-linux-musl
FROM alpine:3.19
WORKDIR /app
COPY --from=builder /src/target/x86_64-unknown-linux-musl/release/fl-server .
COPY --from=builder /src/config.toml .
ENTRYPOINT [ "./fl-server", "--config-path", "config.toml"]

201
components/rfs/LICENSE Normal file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright TF TECH NV (Belgium)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

43
components/rfs/README.md Normal file
View File

@@ -0,0 +1,43 @@
# rfs
This repo contains the binaries related to rfs.
[![Test](https://github.com/threefoldtech/rfs/actions/workflows/tests.yaml/badge.svg?branch=master)](https://github.com/threefoldtech/rfs/actions/workflows/tests.yaml)
## Introduction
`rfs` is the main tool to create, mount and extract FungiStore lists (FungiList)`fl` for short. An `fl` is a simple format
to keep information about an entire filesystem in a compact form. It does not hold the data itself but enough information to
retrieve this data back from a `store`.
## Build
Make sure you have rust installed then run the following commands:
```bash
# this is needed to be run once to make sure the musl target is installed
rustup target add x86_64-unknown-linux-musl
# build all binaries
cargo build --features build-binary --release --target=x86_64-unknown-linux-musl
```
The rfs binary will be available under `./target/x86_64-unknown-linux-musl/release/rfs`
The docker2fl binary will be available under `./target/x86_64-unknown-linux-musl/release/docker2fl`
you can copy the binaries then to `/usr/bin/` to be able to use from anywhere on your system.
## Binaries and libraries
- [rfs](./rfs/README.md)
- [docker2fl](./docker2fl/README.md)
- [fl-server](./fl-server/README.md)
- [fl-frontend](./frontend/README.md)
## Flist-Server
- Dockerfile for the backend: https://github.com/threefoldtech/rfs/blob/master/Dockerfile
- backend config: https://github.com/threefoldtech/rfs/blob/master/fl-server/README.md
- Dockerfile for the frontend: https://github.com/threefoldtech/rfs/blob/master/frontend/Dockerfile

View File

@@ -0,0 +1,6 @@
services:
fl-server:
build: .
ports:
# <HOST_PORT>:<CONTAINER_PORT>
- 3000:3000

View File

@@ -0,0 +1,34 @@
[package]
name = "docker2fl"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[build-dependencies]
git-version = "0.3.5"
[lib]
name = "docker2fl"
path = "src/docker2fl.rs"
[[bin]]
name = "docker2fl"
path = "src/main.rs"
[dependencies]
log = "0.4"
anyhow = "1.0.44"
regex = "1.9.6"
rfs = { path = "../rfs"}
tokio = { version = "1", features = [ "rt", "rt-multi-thread", "macros", "signal"] }
bollard = "0.15.0"
futures-util = "0.3"
simple_logger = {version = "1.0.1"}
uuid = { version = "1.3.1", features = ["v4"] }
tempdir = "0.3"
serde_json = "1.0"
toml = "0.4.2"
clap = { version = "4.2", features = ["derive"] }
serde = { version = "1.0.159" , features = ["derive"] }
tokio-async-drop = "0.1.0"
walkdir = "2.5.0"

View File

@@ -0,0 +1,137 @@
# docker2fl
`docker2fl` is a tool to extract docker images and convert them to flist using [rfs](../rfs) tool.
## Build
To build docker2fl make sure you have rust installed then run the following commands:
```bash
# this is needed to be run once to make sure the musl target is installed
rustup target add x86_64-unknown-linux-musl
# build the binary
cargo build --release --target=x86_64-unknown-linux-musl
```
the binary will be available under `./target/x86_64-unknown-linux-musl/release/docker2fl` you can copy that binary then to `/usr/bin/`
to be able to use from anywhere on your system.
```bash
sudo mv ./target/x86_64-unknown-linux-musl/release/docker2fl /usr/bin/
```
## Stores
A store in where the actual data lives. A store can be as simple as a `directory` on your local machine in that case the files on the `fl` are only 'accessible' on your local machine. A store can also be a `zdb` running remotely or a cluster of `zdb`. Right now only `dir`, `zdb` and `s3` stores are supported but this will change in the future to support even more stores.
## Usage
### Creating an `fl`
```bash
docker2fl -i redis -s <store-specs>
```
This tells docker2fl to create an `fl` named `redis-latest.fl` using the store defined by the url `<store-specs>` and upload all the files under the temp docker directory that include exported docker image recursively.
The simplest form of `<store-specs>` is a `url`. the store `url` defines the store to use. Any `url` has a schema that defines the store type. Right now we have support only for:
- `dir`: dir is a very simple store that is mostly used for testing. A dir store will store the fs blobs in another location defined by the url path. An example of a valid dir url is `dir:///tmp/store`
- `zdb`: [zdb](https://github.com/threefoldtech/0-db) is a append-only key value store and provides a redis like API. An example zdb url can be something like `zdb://<hostname>[:port][/namespace]`
- `s3`: aws-s3 is used for storing and retrieving large amounts of data (blobs) in buckets (directories). An example `s3://<username>:<password>@<host>:<port>/<bucket-name>`
`region` is an optional param for s3 stores, if you want to provide one you can add it as a query to the url `?region=<region-name>`
`<store-specs>` can also be of the form `<start>-<end>=<url>` where `start` and `end` are a hex bytes for partitioning of blob keys. rfs will then store a set of blobs on the defined store if they blob key falls in the `[start:end]` range (inclusive).
If the `start-end` range is not provided a `00-FF` range is assume basically a catch all range for the blob keys. In other words, all blobs will be written to that store.
This is only useful because `docker2fl` can accept multiple stores on the command line with different and/or overlapping ranges.
For example `-s 00-80=dir:///tmp/store0 -s 81-ff=dir://tmp/store1` means all keys that has prefix byte in range `[00-80]` will be written to /tmp/store0 all other keys `00-ff` will be written to store1.
The same range can appear multiple times, which means the blob will be replicated to all the stores that matches its key prefix.
To quickly test this operation
```bash
docker2fl -i redis -s "dir:///tmp/store0"
```
this command will use redis image and effectively create the `redis.fl` and store (and shard) the blobs across the location /tmp/store0.
```bash
#docker2fl --help
Usage: docker2fl [OPTIONS] --image-name <IMAGE_NAME>
Options:
--debug...
enable debugging logs
-i, --image-name <IMAGE_NAME>
name of the docker image to be converted to flist
-s, --store <STORE>
store url for rfs in the format [xx-xx=]<url>. the range xx-xx is optional and used for sharding. the URL is per store type, please check docs for more information
-h, --help
Print help
-V, --version
Print version
```
## Generate an flist using ZDB
### Deploy a vm
1. Deploy a vm with a public IP
2. add docker (don't forget to add a disk for it with mountpoint = "/var/lib/docker")
3. add caddy
### Install zdb and run an instance of it
1. Execute `git clone -b development-v2 https://github.com/threefoldtech/0-db /zdb` then `cd /zdb`
2. Build
```bash
cd libzdb
make
cd ..
cd zdbd
make STATIC=1
cd ..
make
```
3. Install `make install`
4. run `zdb --listen 0.0.0.0`
5. The result info you should know
```console
zdbEndpoint = "<vm public IP>:<port>"
zdbNameSpace = "default"
zdbPassword = "default"
```
### Install docker2fl
1. Execute `git clone -b development-v2 https://github.com/threefoldtech/rfs` then `cd /rfs`
2. Execute
```bash
rustup target add x86_64-unknown-linux-musl`
cargo build --features build-binary --release --target=x86_64-unknown-linux-musl
mv ./target/x86_64-unknown-linux-musl/release/docker2fl /usr/bin/
```
### Convert docker image to an fl
1. Try an image for example `threefolddev/ubuntu:22.04` image
2. Executing `docker2fl -i threefolddev/ubuntu:22.04 -s "zdb://<vm public IP>:<port>/default" -d`
3. You will end up having `threefolddev-ubuntu-22.04.fl` (flist)
### Serve the flist using caddy
1. In the directory includes the output flist, you can run `caddy file-server --listen 0.0.0.0:2015 --browse`
2. The flist will be available as `http://<vm public IP>:2015/threefolddev-ubuntu-22.04.fl`
3. Use the flist to deploy any virtual machine.

View File

@@ -0,0 +1,9 @@
fn main() {
println!(
"cargo:rustc-env=GIT_VERSION={}",
git_version::git_version!(
args = ["--tags", "--always", "--dirty=-modified"],
fallback = "unknown"
)
);
}

View File

@@ -0,0 +1,335 @@
use bollard::auth::DockerCredentials;
use bollard::container::{
Config, CreateContainerOptions, InspectContainerOptions, RemoveContainerOptions,
};
use bollard::image::{CreateImageOptions, RemoveImageOptions};
use bollard::Docker;
use std::sync::mpsc::Sender;
use tempdir::TempDir;
use walkdir::WalkDir;
use anyhow::{Context, Result};
use futures_util::stream::StreamExt;
use serde_json::json;
use std::collections::HashMap;
use std::default::Default;
use std::fs;
use std::path::Path;
use std::process::Command;
use tokio_async_drop::tokio_async_drop;
use rfs::fungi::Writer;
use rfs::store::Store;
struct DockerInfo {
image_name: String,
container_name: String,
docker: Docker,
}
impl Drop for DockerInfo {
fn drop(&mut self) {
tokio_async_drop!({
let res = clean(&self.docker, &self.image_name, &self.container_name)
.await
.context("failed to clean docker image and container");
if res.is_err() {
log::error!(
"cleaning docker image and container failed with error: {:?}",
res.err()
);
}
});
}
}
pub struct DockerImageToFlist {
meta: Writer,
image_name: String,
credentials: Option<DockerCredentials>,
docker_tmp_dir: TempDir,
}
impl DockerImageToFlist {
pub fn new(
meta: Writer,
image_name: String,
credentials: Option<DockerCredentials>,
docker_tmp_dir: TempDir,
) -> Self {
DockerImageToFlist {
meta,
image_name,
credentials,
docker_tmp_dir,
}
}
pub fn files_count(&self) -> usize {
WalkDir::new(self.docker_tmp_dir.path()).into_iter().count()
}
pub async fn prepare(&mut self) -> Result<()> {
#[cfg(unix)]
let docker = Docker::connect_with_socket_defaults().context("failed to create docker")?;
let container_file =
Path::file_stem(self.docker_tmp_dir.path()).expect("failed to get directory name");
let container_name = container_file
.to_str()
.expect("failed to get container name")
.to_owned();
let docker_info = DockerInfo {
image_name: self.image_name.to_owned(),
container_name,
docker,
};
extract_image(
&docker_info.docker,
&docker_info.image_name,
&docker_info.container_name,
self.docker_tmp_dir.path(),
self.credentials.clone(),
)
.await
.context("failed to extract docker image to a directory")?;
log::info!(
"docker image '{}' is extracted successfully",
docker_info.image_name
);
Ok(())
}
pub async fn pack<S: Store>(&mut self, store: S, sender: Option<Sender<u32>>) -> Result<()> {
rfs::pack(
self.meta.clone(),
store,
&self.docker_tmp_dir.path(),
true,
sender,
)
.await
.context("failed to pack flist")?;
log::info!("flist has been created successfully");
Ok(())
}
pub async fn convert<S: Store>(&mut self, store: S, sender: Option<Sender<u32>>) -> Result<()> {
self.prepare().await?;
self.pack(store, sender).await?;
Ok(())
}
}
async fn extract_image(
docker: &Docker,
image_name: &str,
container_name: &str,
docker_tmp_dir_path: &Path,
credentials: Option<DockerCredentials>,
) -> Result<()> {
pull_image(docker, image_name, credentials).await?;
create_container(docker, image_name, container_name)
.await
.context("failed to create docker container")?;
export_container(container_name, docker_tmp_dir_path)
.context("failed to export docker container")?;
container_boot(docker, container_name, docker_tmp_dir_path)
.await
.context("failed to boot docker container")?;
Ok(())
}
async fn pull_image(
docker: &Docker,
image_name: &str,
credentials: Option<DockerCredentials>,
) -> Result<()> {
log::info!("pulling docker image {}", image_name);
let options = Some(CreateImageOptions {
from_image: image_name,
..Default::default()
});
let mut image_pull_stream = docker.create_image(options, None, credentials);
while let Some(msg) = image_pull_stream.next().await {
msg.context("failed to pull docker image")?;
}
Ok(())
}
async fn create_container(docker: &Docker, image_name: &str, container_name: &str) -> Result<()> {
log::debug!("Inspecting docker image configurations {}", image_name);
let image = docker
.inspect_image(image_name)
.await
.context("failed to inspect docker image")?;
let image_config = image.config.context("failed to get docker image configs")?;
let mut command = "";
if image_config.cmd.is_none() && image_config.entrypoint.is_none() {
command = "/bin/sh";
}
log::debug!("Creating a docker container {}", container_name);
let options = Some(CreateContainerOptions {
name: container_name,
platform: None,
});
let config = Config {
image: Some(image_name),
hostname: Some(container_name),
cmd: Some(vec![command]),
..Default::default()
};
docker
.create_container(options, config)
.await
.context("failed to create docker temporary container")?;
Ok(())
}
fn export_container(container_name: &str, docker_tmp_dir_path: &Path) -> Result<()> {
log::debug!("Exporting docker container {}", container_name);
Command::new("sh")
.arg("-c")
.arg(format!(
"docker export {} | tar -xpf - -C {}",
container_name,
docker_tmp_dir_path.display()
))
.output()
.expect("failed to execute export docker container");
Ok(())
}
async fn container_boot(
docker: &Docker,
container_name: &str,
docker_tmp_dir_path: &Path,
) -> Result<()> {
log::debug!(
"Inspecting docker container configurations {}",
container_name
);
let options = Some(InspectContainerOptions { size: false });
let container = docker
.inspect_container(container_name, options)
.await
.context("failed to inspect docker container")?;
let container_config = container
.config
.context("failed to get docker container configs")?;
let command;
let args;
let mut env: HashMap<String, String> = HashMap::new();
let mut cwd = String::from("/");
let cmd = container_config.cmd.expect("failed to get cmd configs");
if let Some(entrypoint) = container_config.entrypoint {
command = (entrypoint.first().expect("failed to get first entrypoint")).to_string();
if entrypoint.len() > 1 {
let (_, entries) = entrypoint
.split_first()
.expect("failed to split entrypoint");
args = entries.to_vec();
} else {
args = cmd;
}
} else {
command = (cmd.first().expect("failed to get first cmd")).to_string();
let (_, entries) = cmd.split_first().expect("failed to split cmd");
args = entries.to_vec();
}
if let Some(envs) = container_config.env {
for entry in envs.iter() {
if let Some((key, value)) = entry.split_once('=') {
env.insert(key.to_string(), value.to_string());
}
}
}
if let Some(ref working_dir) = container_config.working_dir {
if !working_dir.is_empty() {
cwd = working_dir.to_string();
}
}
let metadata = json!({
"startup": {
"entry": {
"name": "core.system",
"args": {
"name": command,
"args": args,
"env": env,
"dir": cwd,
}
}
}
});
let toml_metadata: toml::Value = serde_json::from_str(&metadata.to_string())?;
log::info!(
"Creating '.startup.toml' file from container {} contains {}",
container_name,
toml_metadata.to_string()
);
fs::write(
docker_tmp_dir_path.join(".startup.toml"),
toml_metadata.to_string(),
)
.expect("failed to create '.startup.toml' file");
Ok(())
}
async fn clean(docker: &Docker, image_name: &str, container_name: &str) -> Result<()> {
log::info!("cleaning docker image and container");
let options = Some(RemoveContainerOptions {
force: true,
..Default::default()
});
docker
.remove_container(container_name, options)
.await
.context("failed to remove docker container")?;
let remove_options = Some(RemoveImageOptions {
force: true,
..Default::default()
});
docker
.remove_image(image_name, remove_options, None)
.await
.context("failed to remove docker image")?;
Ok(())
}

View File

@@ -0,0 +1,115 @@
use anyhow::Result;
use bollard::auth::DockerCredentials;
use clap::{ArgAction, Parser};
use rfs::fungi;
use rfs::store::parse_router;
use tokio::runtime::Builder;
use uuid::Uuid;
mod docker2fl;
#[derive(Parser, Debug)]
#[clap(name ="docker2fl", author, version = env!("GIT_VERSION"), about, long_about = None)]
struct Options {
/// enable debugging logs
#[clap(short, long, action=ArgAction::Count)]
debug: u8,
/// store url for rfs in the format [xx-xx=]<url>. the range xx-xx is optional and used for
/// sharding. the URL is per store type, please check docs for more information
#[clap(short, long, required = true, action=ArgAction::Append)]
store: Vec<String>,
/// name of the docker image to be converted to flist
#[clap(short, long, required = true)]
image_name: String,
// docker credentials
/// docker hub server username
#[clap(long, required = false)]
username: Option<String>,
/// docker hub server password
#[clap(long, required = false)]
password: Option<String>,
/// docker hub server auth
#[clap(long, required = false)]
auth: Option<String>,
/// docker hub server email
#[clap(long, required = false)]
email: Option<String>,
/// docker hub server address
#[clap(long, required = false)]
server_address: Option<String>,
/// docker hub server identity token
#[clap(long, required = false)]
identity_token: Option<String>,
/// docker hub server registry token
#[clap(long, required = false)]
registry_token: Option<String>,
}
fn main() -> Result<()> {
let rt = Builder::new_multi_thread()
.thread_stack_size(8 * 1024 * 1024)
.enable_all()
.build()
.unwrap();
rt.block_on(run())
}
async fn run() -> Result<()> {
let opts = Options::parse();
simple_logger::SimpleLogger::new()
.with_utc_timestamps()
.with_level({
match opts.debug {
0 => log::LevelFilter::Info,
1 => log::LevelFilter::Debug,
_ => log::LevelFilter::Trace,
}
})
.with_module_level("sqlx", log::Level::Error.to_level_filter())
.init()?;
let mut docker_image = opts.image_name.to_string();
if !docker_image.contains(':') {
docker_image.push_str(":latest");
}
let credentials = Some(DockerCredentials {
username: opts.username,
password: opts.password,
auth: opts.auth,
email: opts.email,
serveraddress: opts.server_address,
identitytoken: opts.identity_token,
registrytoken: opts.registry_token,
});
let fl_name = docker_image.replace([':', '/'], "-") + ".fl";
let meta = fungi::Writer::new(&fl_name, true).await?;
let store = parse_router(&opts.store).await?;
let container_name = Uuid::new_v4().to_string();
let docker_tmp_dir =
tempdir::TempDir::new(&container_name).expect("failed to create tmp directory");
let mut docker_to_fl =
docker2fl::DockerImageToFlist::new(meta, docker_image, credentials, docker_tmp_dir);
let res = docker_to_fl.convert(store, None).await;
// remove the file created with the writer if fl creation failed
if res.is_err() {
tokio::fs::remove_file(fl_name).await?;
return res;
}
Ok(())
}

View File

@@ -0,0 +1,73 @@
# FungiList specifications
## Introduction
The idea behind the FL format is to build a full filesystem description that is compact and also easy to use from almost ANY language. The format need to be easy to edit by tools like `rfs` or any other tool.
We decided to eventually use `sqlite`! Yes the `FL` file is just a `sqlite` database that has the following [schema](../rfs/schema/schema.sql)
## Tables
### Inode
Inode table describe each entry on the filesystem. It matches really closely the same `inode` structure on the linux operating system. Each inode has a unique id called `ino`, a parent `ino`, name, and other parameters (user, group, etc...).
The type of the `inode` is defined by its `mode` which is a `1:1` mapping from the linux `mode`
> from the [inode manual](https://man7.org/linux/man-pages/man7/inode.7.html)
```
POSIX refers to the stat.st_mode bits corresponding to the mask
S_IFMT (see below) as the file type, the 12 bits corresponding to
the mask 07777 as the file mode bits and the least significant 9
bits (0777) as the file permission bits.
The following mask values are defined for the file type:
S_IFMT 0170000 bit mask for the file type bit field
S_IFSOCK 0140000 socket
S_IFLNK 0120000 symbolic link
S_IFREG 0100000 regular file
S_IFBLK 0060000 block device
S_IFDIR 0040000 directory
S_IFCHR 0020000 character device
S_IFIFO 0010000 FIFO
```
## Extra
the `extra` table holds any **optional** data associated to the inode based on its type. For now it holds the `link target` for symlink inodes.
## Tag
tag is key value for some user defined data associated with the FL. The standard keys are:
- `version`
- `description`
- `author`
But an FL author can add other custom keys there
## Block
the `block` table is used to associate data file blocks with files. An `id` field is the blob `id` in the `store`, the `key` is the key used to decrypt the blob. The current implementation of `rfs` does the following:
- For each blob (512k) the `sha256`. This becomes the encryption key of the block. We call it `key`
- The block is then `snap` compressed
- Then encrypted with `aes_gcm` using the `key`, and the first 12 bytes of the key as `nonce`
- The final encrypted blocked is hashed again with `sha256` this becomes the `id` of the block
- The final encrypted blob is then sent to the store using the `id` as a key.
## Route
the route table holds routing information for the blobs. It basically describe where to find `blobs` with certain `ids`. The routing is done as following:
> Note routing table is loaded one time when `rfs` is started.
- We use the first byte of the blob `id` as the `route key`
- The `route key` is then consulted against the routing table
- While building an `FL` all matching stores are updated with the new blob. This is how the system does replication
- On `getting` an object, the list of matching routes are tried in random order the first one to return a value is used
- Note that same range and overlapping ranges are allowed, this is how shards and replications are done.

View File

@@ -0,0 +1,52 @@
[package]
name = "fl-server"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[build-dependencies]
git-version = "0.3.5"
[[bin]]
name = "fl-server"
path = "src/main.rs"
[dependencies]
log = "0.4"
anyhow = "1.0.44"
regex = "1.9.6"
rfs = { path = "../rfs"}
docker2fl = { path = "../docker2fl"}
tokio = { version = "1", features = ["full"] }
bollard = "0.15.0"
futures-util = "0.3"
simple_logger = {version = "1.0.1"}
uuid = { version = "1.3.1", features = ["v4"] }
tempdir = "0.3"
serde_json = "1.0"
toml = "0.4.2"
clap = { version = "4.5.8", features = ["derive"] }
serde = { version = "1.0.159" , features = ["derive"] }
axum = "0.7"
axum-macros = "0.4.1"
tower = { version = "0.4", features = ["util", "timeout", "load-shed", "limit"] }
tower-http = { version = "0.5.2", features = ["fs", "cors", "add-extension", "auth", "compression-full", "trace", "limit"] }
tokio-async-drop = "0.1.0"
mime_guess = "2.0.5"
mime = "0.3.17"
percent-encoding = "2.3.1"
tracing = "0.1.40"
askama = "0.12.1"
hyper = { version = "1.4.0", features = ["full"] }
time = { version = "0.3.36", features = ["formatting"] }
chrono = "0.4.38"
jsonwebtoken = "9.3.0"
utoipa = { version = "4", features = ["axum_extras"] }
utoipa-swagger-ui = { version = "7", features = ["axum"] }
thiserror = "1.0.63"
hostname-validator = "1.1.1"
walkdir = "2.5.0"
sha256 = "1.5.0"
async-trait = "0.1.53"

View File

@@ -0,0 +1,42 @@
# Flist server
Flist server helps using rfs and docker2fl tools to generate different flists from docker images.
## Build
```bash
cargo build
```
## Run
First create `config.toml` check [configuration](#configuration)
```bash
cargo run --bin fl-server -- --config-path config.toml -d
```
### Configuration
Before building or running the server, create `config.toml` in the current directory.
example `config.toml`:
```toml
host="Your host to run the server on, required, example: 'localhost'"
port="Your port to run the server on, required, example: 3000, validation: between [0, 65535]"
store_url="List of stores to pack flists in which can be 'dir', 'zdb', 's3', required, example: ['dir:///tmp/store0']"
flist_dir="A directory to save each user flists, required, example: 'flists'"
jwt_secret="secret for jwt, required, example: 'secret'"
jwt_expire_hours="Life time for jwt token in hours, required, example: 5, validation: between [1, 24]"
[[users]] # list of authorized user in the server
username = "user1"
password = "password1"
[[users]]
username = "user2"
password = "password2"
...
```

View File

@@ -0,0 +1,9 @@
fn main() {
println!(
"cargo:rustc-env=GIT_VERSION={}",
git_version::git_version!(
args = ["--tags", "--always", "--dirty=-modified"],
fallback = "unknown"
)
);
}

View File

@@ -0,0 +1,154 @@
use std::sync::Arc;
use axum::{
extract::{Json, Request, State},
http::{self, StatusCode},
middleware::Next,
response::IntoResponse,
};
use axum_macros::debug_handler;
use chrono::{Duration, Utc};
use jsonwebtoken::{decode, encode, DecodingKey, EncodingKey, Header, TokenData, Validation};
use serde::{Deserialize, Serialize};
use utoipa::ToSchema;
use crate::{
config,
response::{ResponseError, ResponseResult},
};
#[derive(Serialize, Deserialize)]
pub struct Claims {
pub exp: usize, // Expiry time of the token
pub iat: usize, // Issued at time of the token
pub username: String, // Username associated with the token
}
#[derive(Deserialize, ToSchema)]
pub struct SignInBody {
pub username: String,
pub password: String,
}
#[derive(Serialize, ToSchema)]
pub struct SignInResponse {
pub access_token: String,
}
#[utoipa::path(
post,
path = "/v1/api/signin",
request_body = SignInBody,
responses(
(status = 200, description = "User signed in successfully", body = SignInResponse),
(status = 500, description = "Internal server error"),
(status = 401, description = "Unauthorized user"),
)
)]
#[debug_handler]
pub async fn sign_in_handler(
State(state): State<Arc<config::AppState>>,
Json(user_data): Json<SignInBody>,
) -> impl IntoResponse {
let user = match state.db.get_user_by_username(&user_data.username) {
Some(user) => user,
None => {
return Err(ResponseError::Unauthorized(
"User is not authorized".to_string(),
));
}
};
if user_data.password != user.password {
return Err(ResponseError::Unauthorized(
"Wrong username or password".to_string(),
));
}
let token = encode_jwt(
user.username.clone(),
state.config.jwt_secret.clone(),
state.config.jwt_expire_hours,
)
.map_err(|_| ResponseError::InternalServerError)?;
Ok(ResponseResult::SignedIn(SignInResponse {
access_token: token,
}))
}
pub fn encode_jwt(
username: String,
jwt_secret: String,
jwt_expire: i64,
) -> Result<String, StatusCode> {
let now = Utc::now();
let exp: usize = (now + Duration::hours(jwt_expire)).timestamp() as usize;
let iat: usize = now.timestamp() as usize;
let claim = Claims { iat, exp, username };
encode(
&Header::default(),
&claim,
&EncodingKey::from_secret(jwt_secret.as_ref()),
)
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)
}
pub fn decode_jwt(jwt_token: String, jwt_secret: String) -> Result<TokenData<Claims>, StatusCode> {
let result: Result<TokenData<Claims>, StatusCode> = decode(
&jwt_token,
&DecodingKey::from_secret(jwt_secret.as_ref()),
&Validation::default(),
)
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR);
result
}
pub async fn authorize(
State(state): State<Arc<config::AppState>>,
mut req: Request,
next: Next,
) -> impl IntoResponse {
let auth_header = match req.headers_mut().get(http::header::AUTHORIZATION) {
Some(header) => header
.to_str()
.map_err(|_| ResponseError::Forbidden("Empty header is not allowed".to_string()))?,
None => {
return Err(ResponseError::Forbidden(
"No JWT token is added to the header".to_string(),
))
}
};
let mut header = auth_header.split_whitespace();
let (_, token) = (header.next(), header.next());
let token_str = match token {
Some(t) => t.to_string(),
None => {
log::error!("failed to get token string");
return Err(ResponseError::InternalServerError);
}
};
let token_data = match decode_jwt(token_str, state.config.jwt_secret.clone()) {
Ok(data) => data,
Err(_) => {
return Err(ResponseError::Forbidden(
"Unable to decode JWT token".to_string(),
))
}
};
let current_user = match state.db.get_user_by_username(&token_data.claims.username) {
Some(user) => user,
None => {
return Err(ResponseError::Unauthorized(
"You are not an authorized user".to_string(),
));
}
};
req.extensions_mut().insert(current_user.username.clone());
Ok(next.run(req).await)
}

View File

@@ -0,0 +1,63 @@
use anyhow::{Context, Result};
use serde::{Deserialize, Serialize};
use std::{
collections::HashMap,
fs,
path::PathBuf,
sync::{Arc, Mutex},
};
use utoipa::ToSchema;
use crate::{
db::{User, DB},
handlers,
};
#[derive(Debug, ToSchema, Serialize, Clone)]
pub struct Job {
pub id: String,
}
#[derive(ToSchema)]
pub struct AppState {
pub jobs_state: Mutex<HashMap<String, handlers::FlistState>>,
pub flists_progress: Mutex<HashMap<PathBuf, f32>>,
pub db: Arc<dyn DB>,
pub config: Config,
}
#[derive(Debug, Default, Clone, Deserialize)]
pub struct Config {
pub host: String,
pub port: u16,
pub store_url: Vec<String>,
pub flist_dir: String,
pub jwt_secret: String,
pub jwt_expire_hours: i64,
pub users: Vec<User>,
}
/// Parse the config file into Config struct.
pub async fn parse_config(filepath: &str) -> Result<Config> {
let content = fs::read_to_string(filepath).context("failed to read config file")?;
let c: Config = toml::from_str(&content).context("failed to convert toml config data")?;
if !hostname_validator::is_valid(&c.host) {
anyhow::bail!("host '{}' is invalid", c.host)
}
rfs::store::parse_router(&c.store_url)
.await
.context("failed to parse store urls")?;
fs::create_dir_all(&c.flist_dir).context("failed to create flists directory")?;
if c.jwt_expire_hours < 1 || c.jwt_expire_hours > 24 {
anyhow::bail!(format!(
"jwt expiry interval in hours '{}' is invalid, must be between [1, 24]",
c.jwt_expire_hours
))
}
Ok(c)
}

View File

@@ -0,0 +1,36 @@
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use utoipa::ToSchema;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct User {
pub username: String,
pub password: String,
}
pub trait DB: Send + Sync {
fn get_user_by_username(&self, username: &str) -> Option<User>;
}
#[derive(Debug, ToSchema)]
pub struct MapDB {
users: HashMap<String, User>,
}
impl MapDB {
pub fn new(users: &[User]) -> Self {
Self {
users: users
.iter()
.map(|u| (u.username.clone(), u.to_owned()))
.collect(),
}
}
}
impl DB for MapDB {
fn get_user_by_username(&self, username: &str) -> Option<User> {
self.users.get(username).cloned()
}
}

View File

@@ -0,0 +1,585 @@
use anyhow::Error;
use axum::{
extract::{Path, Query, State},
response::IntoResponse,
Extension, Json,
};
use axum_macros::debug_handler;
use std::{
collections::HashMap,
fs,
path::PathBuf,
sync::{mpsc, Arc},
};
use bollard::auth::DockerCredentials;
use serde::{Deserialize, Serialize};
use crate::{
auth::{SignInBody, SignInResponse, __path_sign_in_handler},
response::{DirListTemplate, DirLister, ErrorTemplate, TemplateErr},
};
use crate::{
config::{self, Job},
response::{FileInfo, ResponseError, ResponseResult},
serve_flists::visit_dir_one_level,
};
use rfs::fungi::{Reader, Writer};
use utoipa::{OpenApi, ToSchema};
use uuid::Uuid;
#[derive(OpenApi)]
#[openapi(
paths(health_check_handler, create_flist_handler, get_flist_state_handler, preview_flist_handler, list_flists_handler, sign_in_handler),
components(schemas(DirListTemplate, DirLister, FlistBody, Job, ResponseError, ErrorTemplate, TemplateErr, ResponseResult, FileInfo, SignInBody, FlistState, SignInResponse, FlistStateInfo, PreviewResponse)),
tags(
(name = "fl-server", description = "Flist conversion API")
)
)]
pub struct FlistApi;
#[derive(Debug, Deserialize, Serialize, Clone, ToSchema)]
pub struct FlistBody {
#[schema(example = "redis")]
pub image_name: String,
pub username: Option<String>,
pub password: Option<String>,
pub auth: Option<String>,
pub email: Option<String>,
pub server_address: Option<String>,
pub identity_token: Option<String>,
pub registry_token: Option<String>,
}
#[derive(Debug, Deserialize, Serialize, Clone, ToSchema)]
pub struct PreviewResponse {
pub content: Vec<PathBuf>,
pub metadata: String,
pub checksum: String,
}
#[derive(Debug, Clone, Serialize, PartialEq, ToSchema)]
pub enum FlistState {
Accepted(String),
Started(String),
InProgress(FlistStateInfo),
Created(String),
Failed,
}
#[derive(Debug, Clone, Serialize, PartialEq, ToSchema)]
pub struct FlistStateInfo {
msg: String,
progress: f32,
}
const DEFAULT_LIMIT: usize = 10;
const DEFAULT_PAGE: usize = 1;
#[derive(Deserialize)]
pub struct Pagination {
page: Option<usize>,
limit: Option<usize>,
}
#[derive(Deserialize, Clone)]
pub struct Filter {
pub max_size: Option<usize>,
pub min_size: Option<usize>,
username: Option<String>,
pub name: Option<String>,
}
#[utoipa::path(
get,
path = "/v1/api",
responses(
(status = 200, description = "flist server is working", body = String)
)
)]
pub async fn health_check_handler() -> ResponseResult {
ResponseResult::Health
}
#[utoipa::path(
post,
path = "/v1/api/fl",
request_body = FlistBody,
responses(
(status = 201, description = "Flist conversion started", body = Job),
(status = 401, description = "Unauthorized user"),
(status = 403, description = "Forbidden"),
(status = 409, description = "Conflict"),
(status = 500, description = "Internal server error"),
)
)]
#[debug_handler]
pub async fn create_flist_handler(
State(state): State<Arc<config::AppState>>,
Extension(username): Extension<String>,
Json(body): Json<FlistBody>,
) -> impl IntoResponse {
let cfg = state.config.clone();
let credentials = Some(DockerCredentials {
username: body.username,
password: body.password,
auth: body.auth,
email: body.email,
serveraddress: body.server_address,
identitytoken: body.identity_token,
registrytoken: body.registry_token,
});
let mut docker_image = body.image_name.to_string();
if !docker_image.contains(':') {
docker_image.push_str(":latest");
}
let fl_name = docker_image.replace([':', '/'], "-") + ".fl";
let username_dir = std::path::Path::new(&cfg.flist_dir).join(&username);
let fl_path = username_dir.join(&fl_name);
if fl_path.exists() {
return Err(ResponseError::Conflict("flist already exists".to_string()));
}
if let Err(err) = fs::create_dir_all(&username_dir) {
log::error!(
"failed to create user flist directory `{:?}` with error {:?}",
&username_dir,
err
);
return Err(ResponseError::InternalServerError);
}
let meta = match Writer::new(&fl_path, true).await {
Ok(writer) => writer,
Err(err) => {
log::error!(
"failed to create a new writer for flist `{:?}` with error {}",
fl_path,
err
);
return Err(ResponseError::InternalServerError);
}
};
let store = match rfs::store::parse_router(&cfg.store_url).await {
Ok(s) => s,
Err(err) => {
log::error!("failed to parse router for store with error {}", err);
return Err(ResponseError::InternalServerError);
}
};
// Create a new job id for the flist request
let job: Job = Job {
id: Uuid::new_v4().to_string(),
};
let current_job = job.clone();
state
.jobs_state
.lock()
.expect("failed to lock state")
.insert(
job.id.clone(),
FlistState::Accepted(format!("flist '{}' is accepted", &fl_name)),
);
let flist_download_url = std::path::Path::new(&format!("{}:{}", cfg.host, cfg.port))
.join(cfg.flist_dir)
.join(username)
.join(&fl_name);
tokio::spawn(async move {
state
.jobs_state
.lock()
.expect("failed to lock state")
.insert(
job.id.clone(),
FlistState::Started(format!("flist '{}' is started", fl_name)),
);
let container_name = Uuid::new_v4().to_string();
let docker_tmp_dir =
tempdir::TempDir::new(&container_name).expect("failed to create tmp dir for docker");
let (tx, rx) = mpsc::channel();
let mut docker_to_fl =
docker2fl::DockerImageToFlist::new(meta, docker_image, credentials, docker_tmp_dir);
let res = docker_to_fl.prepare().await;
if res.is_err() {
let _ = tokio::fs::remove_file(&fl_path).await;
state
.jobs_state
.lock()
.expect("failed to lock state")
.insert(job.id.clone(), FlistState::Failed);
return;
}
let files_count = docker_to_fl.files_count();
let st = state.clone();
let job_id = job.id.clone();
let cloned_fl_path = fl_path.clone();
tokio::spawn(async move {
let mut progress: f32 = 0.0;
for _ in 0..files_count - 1 {
let step = rx.recv().expect("failed to receive progress") as f32;
progress += step;
let progress_percentage = progress / files_count as f32 * 100.0;
st.jobs_state.lock().expect("failed to lock state").insert(
job_id.clone(),
FlistState::InProgress(FlistStateInfo {
msg: "flist is in progress".to_string(),
progress: progress_percentage,
}),
);
st.flists_progress
.lock()
.expect("failed to lock state")
.insert(cloned_fl_path.clone(), progress_percentage);
}
});
let res = docker_to_fl.pack(store, Some(tx)).await;
// remove the file created with the writer if fl creation failed
if res.is_err() {
log::error!("failed creation failed with error {:?}", res.err());
let _ = tokio::fs::remove_file(&fl_path).await;
state
.jobs_state
.lock()
.expect("failed to lock state")
.insert(job.id.clone(), FlistState::Failed);
return;
}
state
.jobs_state
.lock()
.expect("failed to lock state")
.insert(
job.id.clone(),
FlistState::Created(format!(
"flist {:?} is created successfully",
flist_download_url
)),
);
state
.flists_progress
.lock()
.expect("failed to lock state")
.insert(fl_path, 100.0);
});
Ok(ResponseResult::FlistCreated(current_job))
}
#[utoipa::path(
get,
path = "/v1/api/fl/{job_id}",
responses(
(status = 200, description = "Flist state", body = FlistState),
(status = 404, description = "Flist not found"),
(status = 500, description = "Internal server error"),
(status = 401, description = "Unauthorized user"),
(status = 403, description = "Forbidden"),
),
params(
("job_id" = String, Path, description = "flist job id")
)
)]
#[debug_handler]
pub async fn get_flist_state_handler(
Path(flist_job_id): Path<String>,
State(state): State<Arc<config::AppState>>,
) -> impl IntoResponse {
if !&state
.jobs_state
.lock()
.expect("failed to lock state")
.contains_key(&flist_job_id.clone())
{
return Err(ResponseError::NotFound("flist doesn't exist".to_string()));
}
let res_state = state
.jobs_state
.lock()
.expect("failed to lock state")
.get(&flist_job_id.clone())
.expect("failed to get from state")
.to_owned();
match res_state {
FlistState::Accepted(_) => Ok(ResponseResult::FlistState(res_state)),
FlistState::Started(_) => Ok(ResponseResult::FlistState(res_state)),
FlistState::InProgress(_) => Ok(ResponseResult::FlistState(res_state)),
FlistState::Created(_) => {
state
.jobs_state
.lock()
.expect("failed to lock state")
.remove(&flist_job_id.clone());
Ok(ResponseResult::FlistState(res_state))
}
FlistState::Failed => {
state
.jobs_state
.lock()
.expect("failed to lock state")
.remove(&flist_job_id.clone());
Err(ResponseError::InternalServerError)
}
}
}
#[utoipa::path(
get,
path = "/v1/api/fl",
responses(
(status = 200, description = "Listing flists", body = HashMap<String, Vec<FileInfo>>),
(status = 401, description = "Unauthorized user"),
(status = 403, description = "Forbidden"),
(status = 500, description = "Internal server error"),
)
)]
#[debug_handler]
pub async fn list_flists_handler(
State(state): State<Arc<config::AppState>>,
pagination: Query<Pagination>,
filter: Query<Filter>,
) -> impl IntoResponse {
let mut flists: HashMap<String, Vec<FileInfo>> = HashMap::new();
let pagination: Pagination = pagination.0;
let page = pagination.page.unwrap_or(DEFAULT_PAGE);
let limit = pagination.limit.unwrap_or(DEFAULT_LIMIT);
if page == 0 {
return Err(ResponseError::BadRequest(
"requested page should be nonzero positive number".to_string(),
));
}
let filter: Filter = filter.0;
let rs: Result<Vec<FileInfo>, std::io::Error> =
visit_dir_one_level(&state.config.flist_dir, &state, None).await;
let files = match rs {
Ok(files) => files,
Err(e) => {
log::error!("failed to list flists directory with error: {}", e);
return Err(ResponseError::InternalServerError);
}
};
for file in files {
if !file.is_file {
let flists_per_username =
visit_dir_one_level(&file.path_uri, &state, Some(filter.clone())).await;
if let Some(ref filter_username) = filter.username {
if filter_username.clone() != file.name {
continue;
}
}
match flists_per_username {
Ok(files) => {
let username = file.name;
flists.insert(username.clone(), Vec::new());
let start = limit * (page - 1);
let end = limit * page;
if files.len() > start {
if files.len() >= end {
flists.insert(username, files[start..end].to_vec());
} else {
flists.insert(username, files[start..].to_vec());
}
}
}
Err(e) => {
log::error!("failed to list flists per username with error: {}", e);
return Err(ResponseError::InternalServerError);
}
};
};
}
Ok(ResponseResult::Flists(flists))
}
#[utoipa::path(
get,
path = "/v1/api/fl/preview/{flist_path}",
responses(
(status = 200, description = "Flist preview result", body = PreviewResponse),
(status = 400, description = "Bad request"),
(status = 401, description = "Unauthorized user"),
(status = 403, description = "Forbidden"),
(status = 500, description = "Internal server error"),
),
params(
("flist_path" = String, Path, description = "flist file path")
)
)]
#[debug_handler]
pub async fn preview_flist_handler(
State(state): State<Arc<config::AppState>>,
Path(flist_path): Path<String>,
) -> impl IntoResponse {
let fl_path = flist_path;
match validate_flist_path(&state, &fl_path).await {
Ok(_) => (),
Err(err) => return Err(ResponseError::BadRequest(err.to_string())),
};
let content = match get_flist_content(&fl_path).await {
Ok(paths) => paths,
Err(_) => return Err(ResponseError::InternalServerError),
};
let bytes = match std::fs::read(&fl_path) {
Ok(b) => b,
Err(err) => {
log::error!(
"failed to read flist '{}' into bytes with error {}",
fl_path,
err
);
return Err(ResponseError::InternalServerError);
}
};
Ok(ResponseResult::PreviewFlist(PreviewResponse {
content,
metadata: state.config.store_url.join("-"),
checksum: sha256::digest(&bytes),
}))
}
async fn validate_flist_path(state: &Arc<config::AppState>, fl_path: &String) -> Result<(), Error> {
// validate path starting with `/`
if fl_path.starts_with("/") {
anyhow::bail!("invalid flist path '{}', shouldn't start with '/'", fl_path);
}
// path should include 3 parts [parent dir, username, flist file]
let parts: Vec<_> = fl_path.split("/").collect();
if parts.len() != 3 {
anyhow::bail!(
format!("invalid flist path '{}', should consist of 3 parts [parent directory, username and flist name", fl_path
));
}
// validate parent dir
if parts[0] != state.config.flist_dir {
anyhow::bail!(
"invalid flist path '{}', parent directory should be '{}'",
fl_path,
state.config.flist_dir
);
}
// validate username
match state.db.get_user_by_username(&parts[1]) {
Some(_) => (),
None => {
anyhow::bail!(
"invalid flist path '{}', username '{}' doesn't exist",
fl_path,
parts[1]
);
}
};
// validate flist extension
let fl_name = parts[2].to_string();
let ext = match std::path::Path::new(&fl_name).extension() {
Some(ex) => ex.to_string_lossy().to_string(),
None => "".to_string(),
};
if ext != "fl" {
anyhow::bail!(
"invalid flist path '{}', invalid flist extension '{}' should be 'fl'",
fl_path,
ext
);
}
// validate flist existence
if !std::path::Path::new(parts[0])
.join(parts[1])
.join(&fl_name)
.exists()
{
anyhow::bail!("flist '{}' doesn't exist", fl_path);
}
Ok(())
}
async fn get_flist_content(fl_path: &String) -> Result<Vec<PathBuf>, Error> {
let mut visitor = ReadVisitor::default();
let meta = match Reader::new(&fl_path).await {
Ok(reader) => reader,
Err(err) => {
log::error!(
"failed to initialize metadata database for flist `{}` with error {}",
fl_path,
err
);
anyhow::bail!("Internal server error");
}
};
match meta.walk(&mut visitor).await {
Ok(()) => return Ok(visitor.into_inner()),
Err(err) => {
log::error!(
"failed to walk through metadata for flist `{}` with error {}",
fl_path,
err
);
anyhow::bail!("Internal server error");
}
};
}
#[derive(Default)]
struct ReadVisitor {
inner: Vec<PathBuf>,
}
impl ReadVisitor {
pub fn into_inner(self) -> Vec<PathBuf> {
self.inner
}
}
#[async_trait::async_trait]
impl rfs::fungi::meta::WalkVisitor for ReadVisitor {
async fn visit(
&mut self,
path: &std::path::Path,
_node: &rfs::fungi::meta::Inode,
) -> rfs::fungi::meta::Result<rfs::fungi::meta::Walk> {
self.inner.push(path.to_path_buf());
Ok(rfs::fungi::meta::Walk::Continue)
}
}

View File

@@ -0,0 +1,186 @@
mod auth;
mod config;
mod db;
mod handlers;
mod response;
mod serve_flists;
use anyhow::{Context, Result};
use axum::{
error_handling::HandleErrorLayer,
http::StatusCode,
middleware,
response::IntoResponse,
routing::{get, post},
BoxError, Router,
};
use clap::{ArgAction, Parser};
use hyper::{
header::{ACCEPT, AUTHORIZATION, CONTENT_TYPE},
Method,
};
use std::{
borrow::Cow,
collections::HashMap,
sync::{Arc, Mutex},
time::Duration,
};
use tokio::{runtime::Builder, signal};
use tower::ServiceBuilder;
use tower_http::cors::CorsLayer;
use tower_http::{cors::Any, trace::TraceLayer};
use utoipa::OpenApi;
use utoipa_swagger_ui::SwaggerUi;
#[derive(Parser, Debug)]
#[clap(name ="fl-server", author, version = env!("GIT_VERSION"), about, long_about = None)]
struct Options {
/// enable debugging logs
#[clap(short, long, action=ArgAction::Count)]
debug: u8,
/// config file path
#[clap(short, long)]
config_path: String,
}
fn main() -> Result<()> {
let rt = Builder::new_multi_thread()
.thread_stack_size(8 * 1024 * 1024)
.enable_all()
.build()
.unwrap();
rt.block_on(app())
}
async fn app() -> Result<()> {
let opts = Options::parse();
simple_logger::SimpleLogger::new()
.with_utc_timestamps()
.with_level({
match opts.debug {
0 => log::LevelFilter::Info,
1 => log::LevelFilter::Debug,
_ => log::LevelFilter::Trace,
}
})
.with_module_level("sqlx", log::Level::Error.to_level_filter())
.init()?;
let config = config::parse_config(&opts.config_path)
.await
.context("failed to parse config file")?;
let db = Arc::new(db::MapDB::new(&config.users.clone()));
let app_state = Arc::new(config::AppState {
jobs_state: Mutex::new(HashMap::new()),
flists_progress: Mutex::new(HashMap::new()),
db,
config,
});
let cors = CorsLayer::new()
.allow_origin(Any)
.allow_methods([Method::GET, Method::POST])
.allow_headers([AUTHORIZATION, ACCEPT, CONTENT_TYPE]);
let v1_routes = Router::new()
.route("/v1/api", get(handlers::health_check_handler))
.route("/v1/api/signin", post(auth::sign_in_handler))
.route(
"/v1/api/fl",
post(handlers::create_flist_handler).layer(middleware::from_fn_with_state(
app_state.clone(),
auth::authorize,
)),
)
.route(
"/v1/api/fl/:job_id",
get(handlers::get_flist_state_handler).layer(middleware::from_fn_with_state(
app_state.clone(),
auth::authorize,
)),
)
.route(
"/v1/api/fl/preview/:flist_path",
get(handlers::preview_flist_handler),
)
.route("/v1/api/fl", get(handlers::list_flists_handler))
.route("/*path", get(serve_flists::serve_flists));
let app = Router::new()
.merge(
SwaggerUi::new("/swagger-ui")
.url("/api-docs/openapi.json", handlers::FlistApi::openapi()),
)
.merge(v1_routes)
.layer(
ServiceBuilder::new()
.layer(HandleErrorLayer::new(handle_error))
.load_shed()
.concurrency_limit(1024)
.timeout(Duration::from_secs(10))
.layer(TraceLayer::new_for_http()),
)
.with_state(Arc::clone(&app_state))
.layer(cors);
let address = format!("{}:{}", app_state.config.host, app_state.config.port);
let listener = tokio::net::TcpListener::bind(address)
.await
.context("failed to bind address")?;
log::info!(
"🚀 Server started successfully at {}:{}",
app_state.config.host,
app_state.config.port
);
axum::serve(listener, app)
.with_graceful_shutdown(shutdown_signal())
.await
.context("failed to serve listener")?;
Ok(())
}
async fn shutdown_signal() {
let ctrl_c = async {
signal::ctrl_c()
.await
.expect("failed to install Ctrl+C handler");
};
#[cfg(unix)]
let terminate = async {
signal::unix::signal(signal::unix::SignalKind::terminate())
.expect("failed to install signal handler")
.recv()
.await;
};
tokio::select! {
_ = ctrl_c => {},
_ = terminate => {},
}
}
async fn handle_error(error: BoxError) -> impl IntoResponse {
if error.is::<tower::timeout::error::Elapsed>() {
return (StatusCode::REQUEST_TIMEOUT, Cow::from("request timed out"));
}
if error.is::<tower::load_shed::error::Overloaded>() {
return (
StatusCode::SERVICE_UNAVAILABLE,
Cow::from("service is overloaded, try again later"),
);
}
(
StatusCode::INTERNAL_SERVER_ERROR,
Cow::from(format!("Unhandled internal error: {}", error)),
)
}

View File

@@ -0,0 +1,178 @@
use std::collections::HashMap;
use askama::Template;
use axum::{
body::Body,
http::StatusCode,
response::{Html, IntoResponse, Response},
Json,
};
use serde::Serialize;
use utoipa::ToSchema;
use crate::{
auth::SignInResponse,
config::Job,
handlers::{FlistState, PreviewResponse},
};
#[derive(Serialize, ToSchema)]
pub enum ResponseError {
InternalServerError,
Conflict(String),
NotFound(String),
Unauthorized(String),
BadRequest(String),
Forbidden(String),
TemplateError(ErrorTemplate),
}
impl IntoResponse for ResponseError {
fn into_response(self) -> Response<Body> {
match self {
ResponseError::InternalServerError => {
(StatusCode::INTERNAL_SERVER_ERROR, "Internal server error").into_response()
}
ResponseError::Conflict(msg) => (StatusCode::CONFLICT, msg).into_response(),
ResponseError::NotFound(msg) => (StatusCode::NOT_FOUND, msg).into_response(),
ResponseError::Unauthorized(msg) => (StatusCode::UNAUTHORIZED, msg).into_response(),
ResponseError::BadRequest(msg) => (StatusCode::BAD_REQUEST, msg).into_response(),
ResponseError::Forbidden(msg) => (StatusCode::FORBIDDEN, msg).into_response(),
ResponseError::TemplateError(t) => match t.render() {
Ok(html) => {
let mut resp = Html(html).into_response();
match t.err {
TemplateErr::NotFound(reason) => {
*resp.status_mut() = StatusCode::NOT_FOUND;
resp.headers_mut()
.insert(FAIL_REASON_HEADER_NAME, reason.parse().unwrap());
}
TemplateErr::BadRequest(reason) => {
*resp.status_mut() = StatusCode::BAD_REQUEST;
resp.headers_mut()
.insert(FAIL_REASON_HEADER_NAME, reason.parse().unwrap());
}
TemplateErr::InternalServerError(reason) => {
*resp.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
resp.headers_mut()
.insert(FAIL_REASON_HEADER_NAME, reason.parse().unwrap());
}
}
resp
}
Err(err) => {
tracing::error!("template render failed, err={}", err);
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to render template. Error: {}", err),
)
.into_response()
}
},
}
}
}
#[derive(ToSchema)]
pub enum ResponseResult {
Health,
FlistCreated(Job),
FlistState(FlistState),
Flists(HashMap<String, Vec<FileInfo>>),
PreviewFlist(PreviewResponse),
SignedIn(SignInResponse),
DirTemplate(DirListTemplate),
Res(hyper::Response<tower_http::services::fs::ServeFileSystemResponseBody>),
}
impl IntoResponse for ResponseResult {
fn into_response(self) -> Response<Body> {
match self {
ResponseResult::Health => (
StatusCode::OK,
Json(serde_json::json!({"msg": "flist server is working"})),
)
.into_response(),
ResponseResult::SignedIn(token) => (StatusCode::CREATED, Json(token)).into_response(),
ResponseResult::FlistCreated(job) => (StatusCode::CREATED, Json(job)).into_response(),
ResponseResult::FlistState(flist_state) => (
StatusCode::OK,
Json(serde_json::json!({
"flist_state": flist_state
})),
)
.into_response(),
ResponseResult::Flists(flists) => (StatusCode::OK, Json(flists)).into_response(),
ResponseResult::PreviewFlist(content) => {
(StatusCode::OK, Json(content)).into_response()
}
ResponseResult::DirTemplate(t) => match t.render() {
Ok(html) => Html(html).into_response(),
Err(err) => {
tracing::error!("template render failed, err={}", err);
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to render template. Error: {}", err),
)
.into_response()
}
},
ResponseResult::Res(res) => res.map(axum::body::Body::new),
}
}
}
//////// TEMPLATES ////////
#[derive(Serialize, Clone, Debug, ToSchema)]
pub struct FileInfo {
pub name: String,
pub path_uri: String,
pub is_file: bool,
pub size: u64,
pub last_modified: i64,
pub progress: f32,
}
#[derive(Serialize, ToSchema)]
pub struct DirLister {
pub files: Vec<FileInfo>,
}
#[derive(Template, Serialize, ToSchema)]
#[template(path = "index.html")]
pub struct DirListTemplate {
pub lister: DirLister,
pub cur_path: String,
}
mod filters {
pub(crate) fn datetime(ts: &i64) -> ::askama::Result<String> {
if let Ok(format) =
time::format_description::parse("[year]-[month]-[day] [hour]:[minute]:[second] UTC")
{
return Ok(time::OffsetDateTime::from_unix_timestamp(*ts)
.unwrap()
.format(&format)
.unwrap());
}
Err(askama::Error::Fmt(std::fmt::Error))
}
}
#[derive(Template, Serialize, ToSchema)]
#[template(path = "error.html")]
pub struct ErrorTemplate {
pub err: TemplateErr,
pub cur_path: String,
pub message: String,
}
const FAIL_REASON_HEADER_NAME: &str = "fl-server-fail-reason";
#[derive(Serialize, ToSchema)]
pub enum TemplateErr {
BadRequest(String),
NotFound(String),
InternalServerError(String),
}

View File

@@ -0,0 +1,174 @@
use axum::extract::State;
use std::{io::Error, path::PathBuf, sync::Arc};
use tokio::io;
use tower::util::ServiceExt;
use tower_http::services::ServeDir;
use axum::{
body::Body,
http::{Request, StatusCode},
response::IntoResponse,
};
use axum_macros::debug_handler;
use percent_encoding::percent_decode;
use crate::{
config,
handlers::Filter,
response::{
DirListTemplate, DirLister, ErrorTemplate, FileInfo, ResponseError, ResponseResult,
TemplateErr,
},
};
#[debug_handler]
pub async fn serve_flists(
State(state): State<Arc<config::AppState>>,
req: Request<Body>,
) -> impl IntoResponse {
let path = req.uri().path().to_string();
return match ServeDir::new("").oneshot(req).await {
Ok(res) => {
let status = res.status();
match status {
StatusCode::NOT_FOUND => {
let full_path = match validate_path(&path) {
Ok(p) => p,
Err(_) => {
return Err(ResponseError::TemplateError(ErrorTemplate {
err: TemplateErr::BadRequest("invalid path".to_string()),
cur_path: path.to_string(),
message: "invalid path".to_owned(),
}));
}
};
let cur_path = std::path::Path::new(&full_path);
match cur_path.is_dir() {
true => {
let rs = visit_dir_one_level(&full_path, &state, None).await;
match rs {
Ok(files) => Ok(ResponseResult::DirTemplate(DirListTemplate {
lister: DirLister { files },
cur_path: path.to_string(),
})),
Err(e) => Err(ResponseError::TemplateError(ErrorTemplate {
err: TemplateErr::InternalServerError(e.to_string()),
cur_path: path.to_string(),
message: e.to_string(),
})),
}
}
false => Err(ResponseError::TemplateError(ErrorTemplate {
err: TemplateErr::NotFound("file not found".to_string()),
cur_path: path.to_string(),
message: "file not found".to_owned(),
})),
}
}
_ => Ok(ResponseResult::Res(res)),
}
}
Err(err) => Err(ResponseError::TemplateError(ErrorTemplate {
err: TemplateErr::InternalServerError(format!("Unhandled error: {}", err)),
cur_path: path.to_string(),
message: format!("Unhandled error: {}", err),
})),
};
}
fn validate_path(path: &str) -> io::Result<PathBuf> {
let path = path.trim_start_matches('/');
let path = percent_decode(path.as_ref()).decode_utf8_lossy();
let mut full_path = PathBuf::new();
// validate
for seg in path.split('/') {
if seg.starts_with("..") || seg.contains('\\') {
return Err(Error::other("invalid path"));
}
full_path.push(seg);
}
Ok(full_path)
}
pub async fn visit_dir_one_level<P: AsRef<std::path::Path>>(
path: P,
state: &Arc<config::AppState>,
filter: Option<Filter>,
) -> io::Result<Vec<FileInfo>> {
let path = path.as_ref();
let mut dir = tokio::fs::read_dir(path).await?;
let mut files: Vec<FileInfo> = Vec::new();
while let Some(child) = dir.next_entry().await? {
let path_uri = child.path().to_string_lossy().to_string();
let is_file = child.file_type().await?.is_file();
let name = child.file_name().to_string_lossy().to_string();
let size = child.metadata().await?.len();
let mut progress = 0.0;
if is_file {
match state
.flists_progress
.lock()
.expect("failed to lock state")
.get(&path.join(&name).to_path_buf())
{
Some(p) => progress = *p,
None => progress = 100.0,
}
let ext = child
.path()
.extension()
.expect("failed to get path extension")
.to_string_lossy()
.to_string();
if ext != "fl" {
continue;
}
}
if let Some(ref filter_files) = filter {
if let Some(ref filter_name) = filter_files.name {
if filter_name.clone() != name {
continue;
}
}
if let Some(ref filter_max_size) = filter_files.max_size {
if filter_max_size.clone() < size as usize {
continue;
}
}
if let Some(ref filter_min_size) = filter_files.min_size {
if filter_min_size.clone() > size as usize {
continue;
}
}
}
files.push(FileInfo {
name,
path_uri,
is_file,
size: size,
last_modified: child
.metadata()
.await?
.modified()?
.duration_since(std::time::SystemTime::UNIX_EPOCH)
.expect("failed to get duration")
.as_secs() as i64,
progress,
});
}
Ok(files)
}

View File

@@ -0,0 +1,57 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>{% block title %}{{ title }}{% endblock %}</title>
<style>
a {
text-decoration: none;
color: #4c7883;
cursor: pointer;
}
a:hover, a:hover .directory, a:hover .file {
color: #6ea90c;
}
.directory {
color:#6ea90c;
}
.file {
color: #585a56;
}
#main {
width: 60%;
max-width: 1600px;
margin: 1em auto;
}
footer {
display: block;
padding: 40px;
font-size: 12px;
text-align: center;
}
</style>
{% block head %}{% endblock %}
</head>
<body>
<div id="main">
{% block content %}{% endblock %}
</div>
<footer>
Served with <a href="https://github.com/threefoldtech/rfs">flist-server</a>
</footer>
{% block foot %}{% endblock %}
</body>
</html>

View File

@@ -0,0 +1,16 @@
{% extends "base.html" %}
{% block title %}Error Occured when listing directory for {{ cur_path }}{% endblock %}
{% block content %}
<h1>Error</h1>
<hr>
<dl>
<dt> Request Path: </dt> <dd> <span class="directory">{{ cur_path }}</span> </dd>
<dt> Error Message: </dt> <dd> <span class="error">{{ message }} </span> </dd>
</dl>
<hr />
{% endblock %}

View File

@@ -0,0 +1,41 @@
{% extends "base.html" %}
{% block title %}Directory listing for /{{ cur_path }}{% endblock %}
{% block head %}
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css">
{% endblock %}
{% block content %}
<h1>Directory listing for <span class="directory">/{{ cur_path }}</span></h1>
<hr>
<ol>
{% for file in lister.files %}
{% if file.is_file %}
<li class="item">
<a class="file"
href="/{{ file.path_uri|urlencode }}"
data-src="{{ file.path_uri|urlencode }}"
title="{{file.name}} {{ file.last_modified|datetime }}">
<span class="fa fa-file"></span> {{file.name}}
</a>
</li>
{% else %}
<li>
<a href="/{{ file.path_uri|urlencode }}/" title="{{ file.last_modified|datetime }}">
<span class="fa fa-folder"></span> {{file.name}}/
</a>
</li>
{% endif %}
{% endfor %}
</ol>
<hr>
{% endblock %}

View File

@@ -0,0 +1 @@
VITE_API_URL="http://localhost:4000"

24
components/rfs/frontend/.gitignore vendored Normal file
View File

@@ -0,0 +1,24 @@
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*
lerna-debug.log*
node_modules
dist
dist-ssr
*.local
# Editor directories and files
.vscode/*
!.vscode/extensions.json
.idea
.DS_Store
*.suo
*.ntvs*
*.njsproj
*.sln
*.sw?

View File

@@ -0,0 +1,3 @@
{
"recommendations": ["Vue.volar"]
}

View File

@@ -0,0 +1,13 @@
# build stage
FROM node:lts-alpine as build-stage
WORKDIR /app
COPY package*.json ./
RUN npm install
COPY . .
RUN npm run build
# production stage
FROM nginx:stable-alpine as production-stage
COPY --from=build-stage /app/dist /usr/share/nginx/html
EXPOSE 80
CMD ["nginx", "-g", "daemon off;"]

View File

@@ -0,0 +1,82 @@
# Threefold RFS
## Description
`Threefold RFS` is a frontend that helps manage the RFS server for creating, mounting, and extracting FungiStore lists, or fl for short. An fl is a simple format that stores information about a whole filesystem in a compact way. It doesn't hold the actual data but includes enough details to retrieve the data from a store.
## Prerequesites
- build essentials
```bash
sudo apt-get install build-essential
```
- [node js](https://nodejs.org/en/download/package-manager)
- [rust](https://www.rust-lang.org/tools/install)
- Cargo, to be configured to run in the shell
- musl tool
```bash
sudo apt install musl-tools
```
## Installation
```bash
git clone https://github.com/threefoldtech/rfs.git
```
### backend
In fl-server dir:
- create flists dir containaing dirs for each user
ex:
- fl-server
- flists
- user1
- user2
- include config file
ex:
```yml
host='localhost'
port=4000
store_url=['dir:///tmp/store0']
flist_dir='flists'
jwt_secret='secret'
jwt_expire_hours=5
[[users]] # list of authorized user in the server
username = "user1"
password = "password1"
[[users]]
username = "user2"
password = "password2"
```
- Move to `fl-server` directory and execute the following command to run the backend:
```bash
cargo run --bin fl-server -- --config-path config.toml
```
### frontend
- Move to `frontend` directory, open new terminal and execute the following commands to run the frontend:
```bash
npm install
npm run dev
```
## Usage
- Login with users listed in config.toml with their username and password
- Create Flist
- Preview Flist
- List all Flists
- Download Flist

View File

@@ -0,0 +1,18 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<link rel="icon" type="image" href="./src/assets/logo.png">
<title>Threefold Flist</title>
<link href="./src/style.css" rel="stylesheet">
<link href="https://cdn.materialdesignicons.com/5.4.55/css/materialdesignicons.min.css" rel="stylesheet">
</head>
<body>
<div id="app"></div>
<script type="module" src="/src/main.ts"></script>
</body>
</html>

1476
components/rfs/frontend/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,29 @@
{
"name": "frontend",
"private": true,
"version": "0.0.0",
"type": "module",
"scripts": {
"dev": "vite",
"build": "vue-tsc -b && vite build",
"preview": "vite preview"
},
"dependencies": {
"@mdi/font": "^7.4.47",
"@vueuse/core": "^10.11.1",
"axios": "^1.7.3",
"filesize": "^10.1.4",
"mdi": "^2.2.43",
"vue": "^3.4.31",
"vue-router": "^4.4.2",
"vue3-toastify": "^0.2.2",
"vuetify": "^3.6.14"
},
"devDependencies": {
"@types/node": "^22.1.0",
"@vitejs/plugin-vue": "^5.0.5",
"typescript": "^5.2.2",
"vite": "^5.3.4",
"vue-tsc": "^2.0.24"
}
}

View File

@@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" class="iconify iconify--logos" width="31.88" height="32" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 257"><defs><linearGradient id="IconifyId1813088fe1fbc01fb466" x1="-.828%" x2="57.636%" y1="7.652%" y2="78.411%"><stop offset="0%" stop-color="#41D1FF"></stop><stop offset="100%" stop-color="#BD34FE"></stop></linearGradient><linearGradient id="IconifyId1813088fe1fbc01fb467" x1="43.376%" x2="50.316%" y1="2.242%" y2="89.03%"><stop offset="0%" stop-color="#FFEA83"></stop><stop offset="8.333%" stop-color="#FFDD35"></stop><stop offset="100%" stop-color="#FFA800"></stop></linearGradient></defs><path fill="url(#IconifyId1813088fe1fbc01fb466)" d="M255.153 37.938L134.897 252.976c-2.483 4.44-8.862 4.466-11.382.048L.875 37.958c-2.746-4.814 1.371-10.646 6.827-9.67l120.385 21.517a6.537 6.537 0 0 0 2.322-.004l117.867-21.483c5.438-.991 9.574 4.796 6.877 9.62Z"></path><path fill="url(#IconifyId1813088fe1fbc01fb467)" d="M185.432.063L96.44 17.501a3.268 3.268 0 0 0-2.634 3.014l-5.474 92.456a3.268 3.268 0 0 0 3.997 3.378l24.777-5.718c2.318-.535 4.413 1.507 3.936 3.838l-7.361 36.047c-.495 2.426 1.782 4.5 4.151 3.78l15.304-4.649c2.372-.72 4.652 1.36 4.15 3.788l-11.698 56.621c-.732 3.542 3.979 5.473 5.943 2.437l1.313-2.028l72.516-144.72c1.215-2.423-.88-5.186-3.54-4.672l-25.505 4.922c-2.396.462-4.435-1.77-3.759-4.114l16.646-57.705c.677-2.35-1.37-4.583-3.769-4.113Z"></path></svg>

After

Width:  |  Height:  |  Size: 1.5 KiB

View File

@@ -0,0 +1,33 @@
<template>
<v-app>
<router-view v-slot="{ Component, route }">
<Navbar v-if="route.path != `/login`"></Navbar>
<v-main class="mn-height" style="--v-layout-left: 0px;" >
<div :key="route.path">
<component :is="Component" />
</div>
</v-main>
<Footer v-if="route.path != `/login`"></Footer>
</router-view>
</v-app>
</template>
<script setup lang="ts">
import Footer from './components/Footer.vue';
import Navbar from './components/Navbar.vue';
</script>
<style scoped>
.logo {
height: 6em;
padding: 1.5em;
will-change: filter;
transition: filter 300ms;
}
.logo:hover {
filter: drop-shadow(0 0 2em #646cffaa);
}
.logo.vue:hover {
filter: drop-shadow(0 0 2em #42b883aa);
}
</style>

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.6 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 76 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 336 KiB

View File

@@ -0,0 +1,11 @@
import axios from "axios";
export const api = axios.create({
baseURL: import.meta.env.VITE_API_URL,
headers: {
"Content-Type": "application/json",
Authorization: "Bearer " + sessionStorage.getItem("token"),
},
});

View File

@@ -0,0 +1,308 @@
<template>
<div class="d-flex flex-column justify-center mt-10" >
<v-container fluid>
<v-row justify="center">
<v-col cols="8">
<h2 class="mb-2">Create a Flist:</h2>
</v-col>
</v-row>
<v-form>
<v-row justify="center">
<v-col cols="8">
<label
for="image-name"
class="text-subtitle-1 text-medium-emphasis d-flex align-center"
>
Image Name<span style="color: red">*</span>
</label>
<v-text-field
class="pr-5 rounded"
id="image-name"
v-model="flist.image_name"
variant="solo-filled"
density="compact"
required
placeholder="example: redis, keinos/sqlite3, alpine"
>
</v-text-field>
<v-checkbox
value="true"
v-model="privateReg"
hide-details
density="compact"
><template v-slot:label>
<span class="text-subtitle-2">Private Registery</span>
<v-tooltip activator="parent" location="start">Check this box to pull the Docker image from your private registry instead of the public repository.</v-tooltip>
</template>
</v-checkbox>
<div v-if="privateReg">
<v-alert text="Select a sign-in method" type="info" density="compact" color = "#1aa18f" closable width="60em"></v-alert>
<v-radio-group class="p-0 m-0" v-model="privateType" inline>
<v-radio value="username">
<template v-slot:label>
<span class="text-subtitle-2">Username - Password</span>
</template>
</v-radio>
<v-radio value="email">
<template v-slot:label>
<span class="text-subtitle-2">Email - Password</span>
</template>
</v-radio>
<v-radio value="token">
<template v-slot:label>
<span class="text-subtitle-2">Identity Token</span>
<v-tooltip activator="parent" location="bottom">Token you can as an alternative to your email/username password</v-tooltip>
</template>
</v-radio>
</v-radio-group>
<v-container class="pr-0 pl-0">
<v-row>
<v-col>
<div v-if="privateType === `email`">
<label
for="email"
class="text-subtitle-1 text-medium-emphasis d-flex align-center justify-space-between"
>
Email
</label>
<v-text-field
class="pr-5 rounded"
id="email"
v-model="flist.email"
variant="solo-filled"
density="compact"
placeholder="johndoe@gmail.com"
type="email"
>
</v-text-field>
</div>
<div v-if="privateType !== `email`">
<label
for="username"
class="text-subtitle-1 text-medium-emphasis d-flex align-center justify-space-between"
>
Username
</label>
<v-text-field
class="pr-5 text-medium-emphasis"
id="username"
v-model="flist.username"
variant="solo-filled"
density="compact"
:placeholder="
privateType === `token` ? `token` : `johndoe`
"
:value="privateType === `token`?`token`:``"
:readonly="privateType === `token`"
>
</v-text-field>
</div>
</v-col>
<v-col>
<div
v-if="privateType.length != 0 && privateType !== `token`"
>
<label
for="password"
class="text-subtitle-1 text-medium-emphasis d-flex align-center justify-space-between"
>
Password
</label>
<v-text-field
class="pr-5 rounded"
id="password"
v-model="flist.password"
variant="solo-filled"
:append-inner-icon="visible ? 'mdi-eye-off' : 'mdi-eye'"
:type="visible ? 'text' : 'password'"
@click:append-inner="visible = !visible"
density="compact"
>
</v-text-field>
</div>
<div v-if="privateType === `token`">
<label
for="identity-token"
class="text-subtitle-1 text-medium-emphasis d-flex align-center justify-space-between"
>
Identity Token
</label>
<v-text-field
class="pr-5 rounded"
id="identity-token"
v-model="flist.identity_token"
variant="solo-filled"
density="compact"
>
</v-text-field>
</div>
</v-col>
</v-row>
</v-container>
</div>
<v-checkbox
value="true"
v-model="registeryAddress"
hide-details
density="compact"
><template v-slot:label>
<span class="text-subtitle-2">Self Hosted Registery</span>
<v-tooltip activator="parent" location="start">Check this box to pull the Docker image from your self-hosted registry using registery address</v-tooltip>
</template>
</v-checkbox>
<div v-if="registeryAddress">
<label
for="server-address"
class="text-subtitle-1 text-medium-emphasis d-flex align-center justify-space-between"
>
Registery Address
</label>
<v-text-field
class="pr-5 rounded"
id="server-address"
v-model="flist.server_address"
variant="solo-filled"
density="compact"
placeholder="localhost:5000"
>
</v-text-field>
</div>
<v-checkbox
value="true"
v-model="registeryToken"
density="compact"
hide-details
><template v-slot:label>
<span class="text-subtitle-2">Web Registery Token</span>
<v-tooltip activator="parent" location="start">Check this box to use web registry token to pull image from your registry with secure authentication</v-tooltip>
</template>
</v-checkbox>
<div v-if="registeryToken">
<label
for="registery-token"
class="text-subtitle-1 text-medium-emphasis d-flex align-center justify-space-between"
>
Registery Token
</label>
<v-text-field
class="pr-5 rounded mb-5"
id="registery-token"
v-model="flist.registry_token"
variant="solo-filled"
density="compact"
>
</v-text-field>
</div>
</v-col>
</v-row>
<v-row>
<v-col offset="8" class="pa-0">
<div class="position-relative" style="left: -5%;" >
<v-btn
class="pr-5 rounded-pill background-green mb-8 mt-5 text-white"
size="large"
width="50%"
@click="create"
:disabled="pending"
v-if = "!pending"
>
Create
</v-btn>
<v-progress-linear
:size="70"
color="#1aa18f"
indeterminate
class="mb-5 mt-5 w-50"
rounded=""
height="20"
v-else
>
<template v-slot:default> {{ progress }} % </template>
</v-progress-linear>
</div>
</v-col>
</v-row>
</v-form>
</v-container>
</div>
</template>
<script setup lang="ts">
import { ref, watch } from "vue";
import { Flist } from "../types/Flist";
import { toast } from "vue3-toastify";
import "vue3-toastify/dist/index.css";
import { api } from "../client";
import router from "../router";
const pending = ref<boolean>(false);
let progress = ref<number>(0);
const stopPolling = ref<boolean>(false);
let polling: NodeJS.Timeout;
let id = ""
const pullLists = async () => {
try {
const response = await api.get("v1/api/fl/" + id);
if (response.data.flist_state.InProgress) {
progress.value = Math.floor(
response.data.flist_state.InProgress.progress
);
} else {
stopPolling.value = true;
pending.value = false;
router.push({name: "myflists"})
}
} catch (error: any) {
pending.value = false;
stopPolling.value = true;
toast.error(error.response?.data)
}
};
watch(stopPolling, () => {
if (stopPolling.value) {
clearInterval(polling);
}
});
const privateReg = ref<boolean>(false);
const registeryAddress = ref<boolean>(false);
const registeryToken = ref<boolean>(false);
const privateType = ref<string>("username");
const flist = ref<Flist>({
auth: "",
email: "",
identity_token: "",
image_name: "",
password: "",
registry_token: "",
server_address: "",
username: "",
});
const visible = ref<boolean>(false);
const create = async () => {
try {
const response = await api.post("/v1/api/fl", flist.value);
id = response.data.id
pending.value = true
polling = setInterval(pullLists, 1 * 10000);
} catch (error: any) {
toast.error(error.response?.data || "error occured");
const errors: Number[] = [401, 403];
if (errors.includes(error.response?.status)) {
sessionStorage.removeItem("token");
}
}
};
</script>

View File

@@ -0,0 +1,23 @@
<template>
<v-footer class="bg-grey-darken-3 d-flex justify-center w-100 m-0">
All rights reserved © 2024 -
<a
href="https://threefold.io"
style="color: inherit; text-decoration: none"
>
ThreeFold <v-icon icon="mdi-link" style="font-size: 1em" />
</a>
<a
href="https://github.com/threefoldtech"
style="color: inherit; text-decoration: none"
>
<v-icon icon="mdi-github" style="margin-left: 7px" />
</a>
</v-footer>
</template>
<style>
.v-footer {
height: 7% !important;
}
</style>

View File

@@ -0,0 +1,158 @@
<template >
<div class="w-100 position-relative" style="top: -62.5px" >
<v-img :src="image" cover style="z-index: 2"></v-img>
</div>
<div class="d-flex justify-center mt-0">
<v-navigation-drawer
app
class="position-absolute mx-height"
style="top: 30%; left: 0; height: 62.5%; width: fit-content; min-width: 12.5%;"
>
<v-list>
<v-list-item nav>
<v-list-item-title class=" text-h6 " > Users</v-list-item-title>
</v-list-item>
<v-divider></v-divider>
<v-list-item density="compact"
v-for="userName in userNameList"
:key="userName"
@click="username = userName"
>
<template v-slot:prepend >
<v-icon icon="mdi-account" color="#1aa18f" style="font-size: 15px;"></v-icon>
<v-list-item-title style="padding: 2px 4px;
font-size: 15px;
font-weight: 300;">
{{ userName }}
</v-list-item-title>
</template>
</v-list-item>
</v-list>
</v-navigation-drawer>
<v-container
class="d-flex flex-column w-75 "
fluid
style="height: fit-content; position: relative; left: 6%;"
>
<h2 class="mb-2" v-if="username.length != 0">
<v-icon icon="mdi-account" color="#1aa18f"></v-icon>{{ username }}
</h2>
<!-- table containe flists -->
<v-data-table density="compact"
:items="filteredFlist"
:headers="tableHeader"
dense
class="thick-border "
items-per-page="25"
>
<template #item.name="{ value }">
<v-icon icon="mdi-text-box" class="mr-1" color="grey"/>
<span class="file-name">{{ value }}</span>
</template>
<template v-slot:item.preview = "{index}" >
<a :href="`/` + filteredFlist[index].path_uri">
<v-btn class="elevation-0">
<v-icon icon="mdi-eye-outline" color="grey"></v-icon>
</v-btn>
</a>
</template>
<template #item.size="{value}">
{{filesize(value, {standard: "jedec", precision: 3})}}
</template>
<template #item.last_modified="{ value }">
{{ new Date(value * 1000).toString().split("(")[0] }}
</template>
<template #item.path_uri="{ value }">
<v-btn class="elevation-0">
<a :href="baseURL + `/` + value" download>
<v-icon icon="mdi-download" color="grey"></v-icon
></a>
<v-tooltip activator="parent" location="start"
>Download flist</v-tooltip
>
</v-btn>
<v-btn @click="copyLink(baseURL + `/` + value)" class="elevation-0">
<v-icon icon="mdi-content-copy" color="grey"></v-icon>
<v-tooltip activator="parent">Copy Link</v-tooltip>
</v-btn>
</template>
</v-data-table>
</v-container>
</div>
</template>
<script setup lang="ts">
import { onMounted, ref, watch } from "vue";
import image from "../assets/home.png";
import { FlistsResponseInterface, FlistBody } from "../types/Flist.ts";
import { toast } from "vue3-toastify";
import "vue3-toastify/dist/index.css";
import { api } from "../client.ts";
import { copyLink } from "../helpers.ts";
import {filesize} from "filesize";
const baseURL = import.meta.env.VITE_API_URL;
const tableHeader = [
{ title: "File Name", key: "name" },
{ title: "Preview", key:"preview"},
{ title: "Size", key: "size" },
{ title: "Last Modified", key: "last_modified" },
{ title: "Download", key: "path_uri", sortable: false },
];
var flists = ref<FlistsResponseInterface>({});
const username = ref("");
const userNameList = ref<string[]>([]);
let filteredFlist = ref<FlistBody[]>([]);
const filteredFlistFn = () => {
filteredFlist.value = [];
const map = flists.value;
if (username.value.length === 0) {
for (var flistMap in map) {
for (let flist of map[flistMap]) {
if (flist.progress === 100) {
filteredFlist.value.push(flist);
}
}
}
} else {
for (let flist of map[username.value]) {
if (flist.progress === 100) {
filteredFlist.value.push(flist);
}
}
}
};
const getUserNames = () => {
const list: string[] = [];
const map = flists.value;
for (var flistMap in map) {
list.push(flistMap);
}
userNameList.value = list;
};
onMounted(async () => {
try {
flists.value = (await api.get<FlistsResponseInterface>("/v1/api/fl")).data;
getUserNames();
filteredFlistFn();
} catch (error: any) {
toast.error(error.response?.data);
}
});
watch(username, () => {
filteredFlistFn();
});
</script>
<style lang="css" scoped>
.mx-height {
max-height: 600px;
}
.mn-height {
min-height: calc(100% - 37%);
}
</style>

View File

@@ -0,0 +1,113 @@
<template>
<v-container fluid class="overflow-hidden pa-0" style="height: 100vh;">
<v-row class="h-100 ma-0 pa-0">
<v-col :cols="4" class="position-relative ma-0 pa-0 h-100">
<v-img :src="image" cover height="100%" style="z-index: 900"> </v-img>
<v-container
class="position-absolute top-0 d-flex flex-column justify-center ga-0"
style="z-index: 1000; height: 70%"
>
<v-img
:src="whiteLogo"
height="10%"
width="15%"
class="mb-5 flex-grow-0"
></v-img>
<p class="mt-0 text-white" style="width: 90%">
FungiStore is the main tool to create, mount, and extract FungiStore lists (Fungilist or FL for short). An FL is a simple format used to store information about an entire filesystem in a compact form. It does not contain the data itself but provides enough information to retrieve this data from a store.
</p>
</v-container>
</v-col>
<v-col :cols="8" class="d-flex align-center">
<v-container class="d-flex flex-column align-center justify-center">
<v-col :cols="6">
<v-form>
<v-img :src="logo" class="mb-10" height="10%" width="15%"></v-img>
<h2 class="mb-5">Sign in</h2>
<label
for="username"
class="text-subtitle-1 text-medium-emphasis d-flex align-center justify-space-between"
>
Username
</label>
<v-text-field
class="pr-5 rounded"
v-model="user.username"
variant="outlined"
density="compact"
id="username"
required
>
</v-text-field>
<label
for="password"
class="text-subtitle-1 text-medium-emphasis d-flex align-center justify-space-between"
>
Password
</label>
<v-text-field
class="mb-5 pr-5 rounded"
v-model="user.password"
:append-inner-icon="visible ? 'mdi-eye-off' : 'mdi-eye'"
:type="visible ? 'text' : 'password'"
variant="outlined"
@click:append-inner="visible = !visible"
density="compact"
id="password"
required
>
</v-text-field>
<v-btn
class="pr-5 rounded-pill background-green text-white position-relative"
style="left: 205px;"
size="large"
width="50%"
:disabled="loading"
@click="login"
>Sign In</v-btn>
</v-form>
</v-col>
</v-container>
</v-col>
</v-row>
</v-container>
</template>
<script setup lang="ts">
import { ref } from "vue";
import image from "./../assets/side.png";
import logo from "./../assets/logo.png";
import whiteLogo from "../assets/logo_white.png";
import { User } from "../types/User.ts";
import { api } from "../client.ts";
import { toast } from "vue3-toastify";
import "vue3-toastify/dist/index.css";
import router from "../router/index.ts";
const user = ref<User>({ username: "", password: "" });
const loading = ref<boolean>(false)
const visible = ref<boolean>(false);
const login = async () => {
try {
const response = await api.post("/v1/api/signin", user.value);
const token = response.data.access_token;
sessionStorage.setItem("token", token);
sessionStorage.setItem("username", user.value.username);
api.interceptors.request.use((config) => {
if (token) {
config.headers["Authorization"] = `Bearer ${token}`;
}
return config;
});
router.push("/myflists")
} catch (error: any) {
toast.error(error.response?.data || "error occured");
}
};
</script>

View File

@@ -0,0 +1,63 @@
<template>
<v-app-bar color="#1aa18f">
<v-app-bar-nav-icon to="/" class="ml-8">
<v-img :src="whiteLogo" contain height="50px" width="50px"></v-img>
</v-app-bar-nav-icon>
<v-spacer> </v-spacer>
<div class="mr-5" v-if="auth === null || auth?.length === 0">
<v-btn to="login">Login</v-btn>
</div>
<div class="mr-5" v-else>
<v-btn to="create"
><v-icon icon="mdi-plus-circle-outline" class="mr-2"></v-icon>Create
flist</v-btn
>
<v-menu class="white">
<template v-slot:activator="{ props }">
<v-btn
class="align-self-center me-4"
height="100%"
rounded="50%"
variant="plain"
v-bind="props"
style="font-size: 20px"
>
<v-icon icon="mdi-account"></v-icon>
</v-btn>
</template>
<v-list>
<v-list-item>
<v-btn><a href="/myflists" class="text-black" style="text-decoration:none;">My FLists</a></v-btn>
</v-list-item>
<v-list-item>
<v-btn @click="logout"
><v-icon icon="mdi-logout" style="font-size: 20px" />log
out</v-btn
>
</v-list-item>
</v-list>
</v-menu>
</div>
</v-app-bar>
</template>
<script setup lang="ts">
import { ref } from "vue";
import whiteLogo from "../assets/logo_white.png";
import { toast } from "vue3-toastify";
import router from "../router";
const auth= ref<string|null>(sessionStorage.getItem("token"));
const logout = async () => {
try {
sessionStorage.removeItem("token")
sessionStorage.removeItem("username")
auth.value = sessionStorage.getItem("token");
router.push("/")
} catch (error: any) {
toast.error(error.response?.data || "error occured");
}
};
</script>

View File

@@ -0,0 +1,140 @@
<template>
<div class="w-100 position-relative" style="top: -62.5px">
<v-img :src="image" cover style="z-index: 2"></v-img>
<div
class="position-absolute w-100 text-white d-flex justify-content align-content "
style="z-index: 4; top: 55%;left:40%;"
>
</div>
</div>
<div class="mn-height mb-10" v-if="!pending">
<v-container class="m-0 pa-0">
<v-row>
<div>
<h2 class="text-h4 mb-3">{{
id
}}</h2>
<p>This Flist was created by <v-chip color="#1aa18f" label>{{ username }} </v-chip> </p>
</div>
</v-row>
<v-row class="d-flex flex-column">
<h3 class="text-subtitle-1 text-grey-darken-2">Source file</h3>
<v-text-field rounded="20" variant="outlined" density="compact" readonly class="text-grey-darken-1 mr-0">
{{ baseURL + url }}
<template #append>
<v-btn
color="#1aa18f"
value="Copy"
class="Btn"
@click="copyLink(baseURL + url)">
Copy
</v-btn>
</template>
</v-text-field>
</v-row>
<v-row class="d-flex flex-column">
<h3 class="text-subtitle-1 text-grey-darken-2">Archive Checksum (MD5)</h3>
<v-text-field rounded="20" variant="outlined" density="compact" readonly class="text-grey-darken-1 mr-0">
{{flistPreview.checksum}}
<template #append>
<v-btn
color="#1aa18f"
value="Copy"
class="Btn"
@click="copyLink(flistPreview.checksum)">
Copy
</v-btn>
</template>
</v-text-field>
</v-row>
<v-row class="d-flex flex-column">
<h3 class="text-subtitle-1 text-grey-darken-2">Metadata</h3>
<v-text-field rounded="20" variant="outlined" density="compact" readonly class="text-grey-darken-1 mr-0" width="98.5%">
{{ flistPreview.metadata}}
<template #prepend-inner>
<v-chip color="#1aa18f" label class ="chip">Backend (default)</v-chip>
</template>
</v-text-field>
</v-row>
<v-row class="d-flex flex-column">
<h3 class="text-subtitle-1 text-grey-darken-2">Content</h3>
<v-textarea :model-value="showContent" variant="outlined" readonly rows="1" :class= "linkDecoration" class="text-grey-darken-1" auto-grow width="98.5%" @click="contentShow()">
</v-textarea>
</v-row>
</v-container>
</div>
<div class="d-flex align-center justify-center mb-12 mt-12" v-else>
<v-progress-circular
:size="70"
:width="7"
color="#1aa18f"
indeterminate
class="mb-5"
>
</v-progress-circular>
</div>
</template>
<script setup lang="ts">
import { onMounted, ref } from "vue";
import image from "../assets/home.png";
import { toast } from "vue3-toastify";
import "vue3-toastify/dist/index.css";
import { api } from "../client.ts";
import { copyLink } from "../helpers.ts";
import { FlistPreview } from "../types/Flist.ts";
const pending = ref<boolean>(true)
const flistPreview = ref<FlistPreview>({checksum:"", content:[], metadata:""});
const urlPartition = window.location.href.split("/")
const id = ref<string>(urlPartition[urlPartition.length - 1])
const username = ref<string>(urlPartition[urlPartition.length - 2])
const baseURL = ref<string>(import.meta.env.VITE_API_URL + "/");
const url ="flists" + "/" + username.value + "/" + id.value
const showContent = ref<string>()
const linkDecoration = ref<string>("text-as-anchor")
const contentShow = () => {
showContent.value = flistPreview.value?.content.join("\n")
linkDecoration.value = ""
}
onMounted(async () => {
try {
const encodedUrl = url.replaceAll("/", "%2F");
flistPreview.value = (await api.get<FlistPreview>("/v1/api/fl/preview/" + encodedUrl)).data;
flistPreview.value.content = flistPreview.value.content.slice(1)
showContent.value = "show content on click"
pending.value = false
} catch (error: any) {
toast.error(error.response?.data);
}
});
</script>
<style scoped>
.Btn{
position: relative;
left: -18px;
height: 40px;
width: 110px;
margin-left:0px;
}
.chip{
height: 40px;
position: relative;
left: -11px;
}
.text-as-anchor {
color: #42A5F5;
cursor: pointer;
}
.text-as-anchor:hover {
text-decoration: underline;
}
</style>

View File

@@ -0,0 +1,129 @@
<template>
<div>
<v-container class="pa-0">
<v-row no-gutters class="pa-0 ma-0">
<div class="user">
<h2 class="mt-5 mb-5 text-h5 text-grey-darken-2">
<v-icon icon="mdi-account" color="#1aa18f"></v-icon
>{{ loggedInUser }}
</h2>
</div>
</v-row>
<v-row no-gutters class="pa-0 ma-0">
<v-data-table
density="compact"
v-if="loggedInUser"
:items="currentUserFlists"
:headers="tableHeader"
dense
items-per-page="25"
class="thick-border"
>
<template #item.name="{ value }">
<v-icon icon="mdi-text-box" class="mr-1" color="grey" />
<span class="file-name">{{ value }}</span>
</template>
<template v-slot:item.preview = "{index}" >
<a :href="`/` + currentUserFlists[index].path_uri">
<v-btn class="elevation-0">
<v-icon icon="mdi-eye-outline" color="grey"></v-icon>
</v-btn>
</a>
</template>
<template #item.size="{ value }">
{{ filesize(value, { standard: "jedec", precision: 3 }) }}
</template>
<template v-slot:item.path_uri="{ index, value }">
<template v-if="currentUserFlists[index].progress === 100">
<v-btn class="elevation-0">
<a :href="baseURL + `/` + value" download>
<v-icon icon="mdi-download" color="grey"></v-icon
></a>
<v-tooltip activator="parent" location="start"
>Download flist</v-tooltip
>
</v-btn>
<v-btn
@click="copyLink(baseURL + `/` + value)"
class="elevation-0"
>
<v-icon icon="mdi-content-copy" color="grey"></v-icon>
<v-tooltip activator="parent">Copy Link</v-tooltip>
</v-btn>
</template>
<template v-else>
<span>loading... </span>
</template>
</template>
<template #item.last_modified="{ value }">
{{ new Date(value * 1000).toString().split("(")[0] }}
</template>
<template v-slot:item.progress="{ value }" class="w-25">
<template v-if="value != 100">
<v-progress-linear
:model-value="value"
color="#1aa18f"
height="20"
rounded="sm"
>
<template v-slot:default="{ value }">
<span class="text-white">{{ Math.floor(value) }}%</span>
</template>
</v-progress-linear>
</template>
<template v-else>
<v-chip color="#1aa18f">finished</v-chip>
</template>
</template>
</v-data-table>
</v-row>
</v-container>
</div>
</template>
<script setup lang="ts">
import { FlistsResponseInterface } from "../types/Flist.ts";
import { computed } from "vue";
import { onMounted, ref } from "vue";
import { toast } from "vue3-toastify";
import { api } from "../client.ts";
import { copyLink } from "../helpers.ts";
import { filesize } from "filesize";
const tableHeader = [
{ title: "File Name", key: "name" },
{ title: "Preview", key:"preview"},
{ title: "Size", key: "size" },
{ title: "Last Modified", key: "last_modified" },
{ title: "Download", key: "path_uri", sortable: false },
{ title: "Progress", key: "progress", width: "20%" },
];
const loggedInUser = sessionStorage.getItem("username");
var flists = ref<FlistsResponseInterface>({});
const baseURL = import.meta.env.VITE_API_URL;
let currentUserFlists = computed(() => {
return loggedInUser?.length ? flists.value[loggedInUser] : [];
});
onMounted(async () => {
try {
flists.value = (await api.get<FlistsResponseInterface>("/v1/api/fl")).data;
currentUserFlists = computed(() => {
return loggedInUser?.length ? flists.value[loggedInUser] : [];
});
} catch (error: any) {
toast.error(error.response?.data);
}
});
</script>
<style scoped>
.user .v-icon--size-default {
font-size: 25px;
}
</style>

View File

@@ -0,0 +1,8 @@
import { useClipboard } from "@vueuse/core";
import { toast } from "vue3-toastify";
const { copy } = useClipboard();
export const copyLink = (url: string) => {
copy(url);
toast.success("Link Copied to Clipboard");
};

View File

@@ -0,0 +1,17 @@
import { createApp } from "vue";
import "vuetify/styles";
import { createVuetify } from "vuetify";
import * as components from "vuetify/components";
import * as directives from "vuetify/directives";
import App from "./App.vue";
import router from "./router/index";
import createToast from "vue3-toastify";
const toast = createToast;
const vuetify = createVuetify({
components,
directives,
});
createApp(App).use(router).use(toast).use(vuetify).mount("#app");

View File

@@ -0,0 +1,52 @@
import { createRouter, createWebHistory, RouteRecordRaw } from "vue-router";
const Login = () => import("../components/Login.vue");
const CreateFlist = () => import("../components/CreateFlist.vue");
const Home = () => import("../components/Home.vue");
const UserFlist = () => import("../components/UserFlist.vue");
const PreviewFlist = () => import("../components/PreviewFlist.vue");
const routes: Array<RouteRecordRaw> = [
{
path: "/login",
name: "login",
component: Login,
},
{
path: "/myflists",
name: "myflists",
component: UserFlist,
meta: { requiresAuth: true },
},
{
path: "/create",
name: "create",
component: CreateFlist,
meta: { requiresAuth: true },
},
{
path: "/flists/:username/:id",
name: "previewflist",
component: PreviewFlist,
},
{
path: "/",
name: "home",
component: Home,
},
];
const router = createRouter({
history: createWebHistory(import.meta.env.BASE_URL),
routes,
});
router.beforeEach((to, _, next) => {
const token: string | null = sessionStorage.getItem("token");
if (to.meta.requiresAuth && (token == null || token.length == 0)) {
next({ name: "login" });
} else {
next();
}
});
export default router;

View File

@@ -0,0 +1,21 @@
.background-green {
background-color: #1aa18f !important;
}
.thick-border .v-data-table__wrapper {
border: 3px solid #000;
}
.v-data-table-footer__items-per-page {
display: none !important;
}
.v-data-table td{
padding: 4px 8px;
font-size: 12px;
font-weight: 500;
}
.mn-height {
min-height: calc(100% - 7%);
}
.file-name {
font-weight: 500;
}

View File

@@ -0,0 +1,30 @@
export interface Flist {
auth: string;
email: string;
identity_token: string;
image_name: string;
password: string;
registry_token: string;
server_address: string;
username: string;
}
export interface FlistBody {
is_file: Boolean;
last_modified: bigint;
name: string;
path_uri: string;
progress: number;
size: number;
}
export interface FlistsResponseInterface {
[key: string]: FlistBody[];
}
export interface FlistPreview{
checksum: string;
content: string[];
metadata: string;
}

View File

@@ -0,0 +1,4 @@
export interface User {
username: string;
password: string;
}

View File

@@ -0,0 +1 @@
/// <reference types="vite/client" />

View File

@@ -0,0 +1,27 @@
{
"compilerOptions": {
"composite": true,
"tsBuildInfoFile": "./node_modules/.tmp/tsconfig.app.tsbuildinfo",
"target": "ES2021",
"useDefineForClassFields": true,
"module": "ESNext",
"lib": ["ES2021", "DOM", "DOM.Iterable"],
"skipLibCheck": true,
/* Bundler mode */
"moduleResolution": "bundler",
"allowImportingTsExtensions": true,
"resolveJsonModule": true,
"isolatedModules": true,
"moduleDetection": "force",
"noEmit": true,
"jsx": "preserve",
/* Linting */
"strict": true,
"noUnusedLocals": true,
"noUnusedParameters": true,
"noFallthroughCasesInSwitch": true
},
"include": ["src/**/*.ts", "src/**/*.tsx", "src/**/*.vue"]
}

View File

@@ -0,0 +1,11 @@
{
"files": [],
"references": [
{
"path": "./tsconfig.app.json"
},
{
"path": "./tsconfig.node.json"
}
]
}

View File

@@ -0,0 +1,13 @@
{
"compilerOptions": {
"composite": true,
"tsBuildInfoFile": "./node_modules/.tmp/tsconfig.node.tsbuildinfo",
"skipLibCheck": true,
"module": "ESNext",
"moduleResolution": "bundler",
"allowSyntheticDefaultImports": true,
"strict": true,
"noEmit": true
},
"include": ["vite.config.ts"]
}

View File

@@ -0,0 +1,7 @@
import { defineConfig } from 'vite'
import vue from '@vitejs/plugin-vue'
// https://vitejs.dev/config/
export default defineConfig({
plugins: [vue()],
})

View File

@@ -0,0 +1,5 @@
if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then
source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4="
fi
use flake

View File

@@ -0,0 +1,67 @@
[package]
name = "rfs"
version = "0.2.0"
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[build-dependencies]
git-version = "0.3.5"
[[bin]]
name = "rfs"
path = "src/main.rs"
required-features = ["build-binary"]
[features]
build-binary = [
"dep:polyfuse",
"dep:simple_logger",
"dep:tempfile",
"dep:daemonize",
"dep:clap"
]
[lib]
name = "rfs"
path = "src/lib.rs"
[dependencies]
anyhow = "1.0.44"
time = "0.3"
sqlx = { version = "0.7.4", features = [ "runtime-tokio-rustls", "sqlite" ] }
tokio = { version = "1", features = [ "rt", "rt-multi-thread", "macros"] }
libc = "0.2"
futures = "0.3"
thiserror = "1.0"
bytes = "1.1.0"
log = "0.4"
lru = "0.7.0"
nix = "0.23.0"
snap = "1.0.5"
bb8-redis = "0.13"
async-trait = "0.1.53"
url = "2.3.1"
blake2b_simd = "1"
aes-gcm = "0.10"
hex = "0.4"
rand = "0.8"
# next are only needed for the binarys
clap = { version = "4.2", features = ["derive"], optional = true}
simple_logger = {version = "1.0.1", optional = true}
daemonize = { version = "0.5", optional = true }
tempfile = { version = "3.3.0", optional = true }
workers = { git="https://github.com/threefoldtech/tokio-worker-pool.git" }
rust-s3 = "0.34.0-rc3"
openssl = { version = "0.10", features = ["vendored"] }
regex = "1.9.6"
which = "6.0"
reqwest = "0.11"
[dependencies.polyfuse]
branch = "master"
git = "https://github.com/muhamadazmy/polyfuse"
optional = true
[dev-dependencies]
reqwest = { version = "0.11", features = ["blocking"] }
assert_cmd = "2.0"

View File

@@ -0,0 +1,149 @@
# Introduction
`rfs` is the main tool to create, mount and extract FungiStore lists (FungiList)`fl` for short. An `fl` is a simple format
to keep information about an entire filesystem in a compact form. It does not hold the data itself but enough information to
retrieve this data back from a `store`.
## Building rfs
To build rfs make sure you have rust installed then run the following commands:
```bash
# this is needed to be run once to make sure the musl target is installed
rustup target add x86_64-unknown-linux-musl
# build the binary
cargo build --features build-binary --release --target=x86_64-unknown-linux-musl
```
the binary will be available under `./target/x86_64-unknown-linux-musl/release/rfs` you can copy that binary then to `/usr/bin/`
to be able to use from anywhere on your system.
## Stores
A store in where the actual data lives. A store can be as simple as a `directory` on your local machine in that case the files on the `fl` are only 'accessible' on your local machine. A store can also be a `zdb` running remotely or a cluster of `zdb`. Right now only `dir`, `http`, `zdb` and `s3` stores are supported but this will change in the future to support even more stores.
## Usage
### Creating an `fl`
```bash
rfs pack -m output.fl -s <store-specs> <directory>
```
This tells rfs to create an `fl` named `output.fl` using the store defined by the url `<store-specs>` and upload all the files under directory recursively.
The simplest form of `<store-specs>` is a `url`. the store `url` defines the store to use. Any `url`` has a schema that defines the store type. Right now we have support only for:
- `dir`: dir is a very simple store that is mostly used for testing. A dir store will store the fs blobs in another location defined by the url path. An example of a valid dir url is `dir:///tmp/store`
- `zdb`: [zdb](https://github.com/threefoldtech/0-db) is a append-only key value store and provides a redis like API. An example zdb url can be something like `zdb://<hostname>[:port][/namespace]`
- `s3`: aws-s3 is used for storing and retrieving large amounts of data (blobs) in buckets (directories). An example `s3://<username>:<password>@<host>:<port>/<bucket-name>`
`region` is an optional param for s3 stores, if you want to provide one you can add it as a query to the url `?region=<region-name>`
- `http`: http is a store mostly used for wrapping a dir store to fetch data through http requests. It does not support uploading, just fetching the data.
It can be set in the FL file as the store to fetch the data with `rfs config`. Example: `http://localhost:9000/store` (https works too).
`<store-specs>` can also be of the form `<start>-<end>=<url>` where `start` and `end` are a hex bytes for partitioning of blob keys. rfs will then store a set of blobs on the defined store if they blob key falls in the `[start:end]` range (inclusive).
If the `start-end` range is not provided a `00-FF` range is assume basically a catch all range for the blob keys. In other words, all blobs will be written to that store.
This is only useful because `rfs` can accept multiple stores on the command line with different and/or overlapping ranges.
For example `-s 00-80=dir:///tmp/store0 -s 81-ff=dir:///tmp/store1` means all keys that has prefix byte in range `[00-80]` will be written to /tmp/store0 all other keys `[81-ff]` will be written to store1.
The same range can appear multiple times, which means the blob will be replicated to all the stores that matches its key prefix.
To quickly test this operation
```bash
rfs pack -m output.fl -s 00-80=dir:///tmp/store0 -s 81-ff=dir:///tmp/store1 ~/Documents
```
this command will effectively create the `output.fl` and store (and shard) the blobs across the 2 locations /tmp/store0 and /tmp/store1.
```bash
#rfs pack --help
create an FL and upload blocks to provided storage
Usage: rfs pack [OPTIONS] --meta <META> <TARGET>
Arguments:
<TARGET> target directory to upload
Options:
-m, --meta <META> path to metadata file (flist)
-s, --store <STORE> store url in the format [xx-xx=]<url>. the range xx-xx is optional and used for sharding. the URL is per store type, please check docs for more information
--no-strip-password disables automatic password stripping from store url, otherwise password will be stored in the fl.
-h, --help Print help
```
#### Password stripping
During creation of an flist you will probably provide a password in the URL of the store. This is normally needed to allow write operation to the store (say s3 bucket)
Normally this password is removed from the store info so it's safe to ship the fl to users. A user of the flist then will only have read access, if configured correctly
in the store
For example a `zdb` store has the notion of a public namespace which is password protected for writes, but open for reads. An S3 bucket can have the policy to allow public reads, but protected writes (minio supports that via bucket settings)
If you wanna disable the password stripping from the store url, you can provide the `--no-strip-password` flag during creation. This also means someone can extract
this information from the fl and gain write access to your store, so be careful how u use it.
# Mounting an `fl`
Once the `fl` is created it can be distributes to other people. Then they can mount the `fl` which will allow them then to traverse the packed filesystem and also access (read-only) the files.
To mount an `fl` only the `fl` is needed since all information regarding the `stores` is already stored in the `fl`. This also means you can only share the `fl` if the other user can actually reach the store used to crate the `fl`. So a `dir` store is not sharable, also a `zdb` instance that is running on localhost :no_good:
```bash
sudo rfs mount -m output.fl <target>
```
The `<target>` is the mount location, usually `/mnt` but can be anywhere. In another terminal you can now `cd <target>` and walk the filesystem tree. Opening the files will trigger a file download from the store only on read access.
full command help
```bash
# rfs mount --help
mount an FL
Usage: rfs mount [OPTIONS] --meta <META> <TARGET>
Arguments:
<TARGET> target mountpoint
Options:
-m, --meta <META> path to metadata file (flist)
-c, --cache <CACHE> directory used as cache for downloaded file chuncks [default: /tmp/cache]
-d, --daemon run in the background
-l, --log <LOG> log file only used with daemon mode
-h, --help Print help
```
# Unpack an `fl`
Similar to `mount` rfs provides an `unpack` subcommand that downloads the entire content (extract) of an `fl` to a provided directory.
```bash
rfs unpack --help
unpack (downloads) content of an FL the provided location
Usage: rfs unpack [OPTIONS] --meta <META> <TARGET>
Arguments:
<TARGET> target directory to upload
Options:
-m, --meta <META> path to metadata file (flist)
-c, --cache <CACHE> directory used as cache for downloaded file chuncks [default: /tmp/cache]
-p, --preserve-ownership preserve files ownership from the FL, otherwise use the current user ownership setting this flag to true normally requires sudo
-h, --help Print help
```
By default when unpacking the `-p` flag is not set. which means downloaded files will be `owned` by the current user/group. If `-p` flag is set, the files ownership will be same as the original files used to create the fl (preserve `uid` and `gid` of the files and directories) this normally requires `sudo` while unpacking.
# Specifications
Please check [docs](../docs)

View File

@@ -0,0 +1,9 @@
fn main() {
println!(
"cargo:rustc-env=GIT_VERSION={}",
git_version::git_version!(
args = ["--tags", "--always", "--dirty=-modified"],
fallback = "unknown"
)
);
}

105
components/rfs/rfs/flake.lock generated Normal file
View File

@@ -0,0 +1,105 @@
{
"nodes": {
"crane": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1709610799,
"narHash": "sha256-5jfLQx0U9hXbi2skYMGodDJkIgffrjIOgMRjZqms2QE=",
"owner": "ipetkov",
"repo": "crane",
"rev": "81c393c776d5379c030607866afef6406ca1be57",
"type": "github"
},
"original": {
"owner": "ipetkov",
"repo": "crane",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1709126324,
"narHash": "sha256-q6EQdSeUZOG26WelxqkmR7kArjgWCdw5sfJVHPH/7j8=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "d465f4819400de7c8d874d50b982301f28a84605",
"type": "github"
},
"original": {
"id": "flake-utils",
"type": "indirect"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1709677081,
"narHash": "sha256-tix36Y7u0rkn6mTm0lA45b45oab2cFLqAzDbJxeXS+c=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "880992dcc006a5e00dd0591446fdf723e6a51a64",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-23.11",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"crane": "crane",
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs",
"rust-overlay": "rust-overlay"
}
},
"rust-overlay": {
"inputs": {
"flake-utils": [
"flake-utils"
],
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1712542394,
"narHash": "sha256-UZebDBECRSrJqw4K+LxZ6qFdYnScu6q1XCwqtsu1cas=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "ece8bdb3c3b58def25f204b9a1261dee55d7c9c0",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"type": "github"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

View File

@@ -0,0 +1,71 @@
{
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-23.11";
crane.url = "github:ipetkov/crane";
crane.inputs.nixpkgs.follows = "nixpkgs";
flake-utils.inputs.nixpkgs.follows = "nixpkgs";
rust-overlay = {
url = "github:oxalica/rust-overlay";
inputs = {
nixpkgs.follows = "nixpkgs";
flake-utils.follows = "flake-utils";
};
};
};
outputs = {
self,
nixpkgs,
crane,
flake-utils,
rust-overlay,
}:
flake-utils.lib.eachSystem
[
flake-utils.lib.system.x86_64-linux
flake-utils.lib.system.aarch64-linux
flake-utils.lib.system.aarch64-darwin
] (system: let
pkgs = import nixpkgs {
inherit system;
overlays = [(import rust-overlay)];
};
customToolchain = pkgs.rust-bin.fromRustupToolchainFile ./rust-toolchain.toml;
craneLib = (crane.mkLib pkgs).overrideToolchain customToolchain;
in {
devShells.default = craneLib.devShell {
packages = [
pkgs.rust-analyzer
];
RUST_SRC_PATH = "${pkgs.rustPlatform.rustLibSrc}";
};
packages.default = craneLib.buildPackage {
src = self;
# 2024-03-07 failing test:
# > thread 'test::pack_unpack' has overflowed its stack
# > fatal runtime error: stack overflow
# > error: test failed, to rerun pass `--lib`
#
# appearantly needs `RUST_MIN_STACK: 8388608` according to https://github.com/threefoldtech/rfs/blob/eae5186cc6b0f8704f3e4715d2e3644f1f3baa2c/.github/workflows/tests.yaml#L25C1-L25C34
doCheck = false;
cargoExtraArgs = "--bin rfs --features=build-binary";
nativeBuildInputs = [
pkgs.perl
pkgs.pkg-config
];
buildInputs = [
pkgs.openssl
pkgs.openssl.dev
];
};
});
}

View File

@@ -0,0 +1,3 @@
[toolchain]
channel = "1.74.0"

View File

@@ -0,0 +1,46 @@
-- inode table and main entrypoint of the schema
CREATE TABLE IF NOT EXISTS inode (
ino INTEGER PRIMARY KEY AUTOINCREMENT,
parent INTEGER,
name VARCHAR(255),
size INTEGER,
uid INTEGER,
gid INTEGER,
mode INTEGER,
rdev INTEGER,
ctime INTEGER,
mtime INTEGER
);
CREATE INDEX IF NOT EXISTS parents ON inode (parent);
CREATE INDEX IF NOT EXISTS names ON inode (name);
-- extra data for each inode for special types (say link targets)
CREATE TABLE IF NOT EXISTS extra (
ino INTEGER PRIMARY KEY,
data VARCHAR(4096)
);
-- blocks per file, order of insertion is important
CREATE TABLE IF NOT EXISTS block (
ino INTEGER,
id VARCHAR(32),
key VARCHAR(32)
);
CREATE INDEX IF NOT EXISTS block_ino ON block (ino);
-- global flist tags, this can include values like `version`, `description`, `block-size`, etc..
-- it can also hold extra user-defined tags for extensions
CREATE TABLE IF NOT EXISTS tag (
key VARCHAR(10) PRIMARY KEY,
value VARCHAR(255)
);
-- routing table define ranges where blobs can be found. This allows "sharding" by be able to retrieve
-- blobs from different partitions using the prefix range (hashes that are )
CREATE TABLE IF NOT EXISTS route (
start integer, -- one byte hash prefix
end integer, -- one byte hash prefix
url VARCHAR(2048)
);

151
components/rfs/rfs/src/cache/mod.rs vendored Normal file
View File

@@ -0,0 +1,151 @@
use crate::fungi::meta::Block;
use crate::store::{BlockStore, Store};
use anyhow::{Context, Result};
use std::os::unix::io::AsRawFd;
use std::path::PathBuf;
use tokio::fs::{self, File, OpenOptions};
use tokio::io::{AsyncSeekExt, AsyncWriteExt};
/// Cache implements a caching layer on top of a block store
//#[derive(Clone)]
pub struct Cache<S: Store> {
store: BlockStore<S>,
root: PathBuf,
}
impl<S> Cache<S>
where
S: Store,
{
pub fn new<P>(root: P, store: S) -> Self
where
P: Into<PathBuf>,
{
Cache {
store: store.into(),
root: root.into(),
}
}
// download given an open file, writes the content of the chunk to the file
async fn download(&self, file: &mut File, block: &Block) -> Result<u64> {
let data = self.store.get(block).await?;
file.write_all(&data).await?;
Ok(data.len() as u64)
}
async fn prepare(&self, id: &[u8]) -> Result<File> {
let name = id.hex();
if name.len() < 4 {
anyhow::bail!("invalid chunk hash");
}
let path = self.root.join(&name[0..2]).join(&name[2..4]);
fs::create_dir_all(&path).await?;
let path = path.join(name);
let file = OpenOptions::new()
.create(true)
.read(true)
.write(true)
.truncate(false)
.open(path)
.await?;
Ok(file)
}
/// get a file block either from cache or from remote if it's already
/// not cached
pub async fn get(&self, block: &Block) -> Result<(u64, File)> {
let mut file = self
.prepare(&block.id)
.await
.context("failed to prepare cache block")?;
// TODO: locking must happen here so no
// other processes start downloading the same chunk
let locker = Locker::new(&file);
locker.lock().await?;
let meta = file
.metadata()
.await
.context("failed to get block metadata")?;
if meta.len() > 0 {
// chunk is already downloaded
debug!("block cache hit: {}", block.id.as_slice().hex());
locker.unlock().await?;
return Ok((meta.len(), file));
}
debug!("downloading block with key: {}", block.id.as_slice().hex());
let size = self
.download(&mut file, block)
.await
.context("failed to download block")?;
// if file is just downloaded, we need
// to seek to beginning of the file.
file.rewind().await?;
locker.unlock().await?;
Ok((size, file))
}
/// direct downloads all the file blocks from remote and write it to output
#[allow(dead_code)]
pub async fn direct(&self, blocks: &[Block], out: &mut File) -> Result<()> {
use tokio::io::copy;
for (index, block) in blocks.iter().enumerate() {
let (_, mut chunk) = self.get(block).await?;
copy(&mut chunk, out)
.await
.with_context(|| format!("failed to copy block {}", index))?;
}
Ok(())
}
}
pub struct Locker {
fd: std::os::unix::io::RawFd,
}
impl Locker {
pub fn new(f: &File) -> Locker {
Locker { fd: f.as_raw_fd() }
}
pub async fn lock(&self) -> Result<()> {
let fd = self.fd;
tokio::task::spawn_blocking(move || {
nix::fcntl::flock(fd, nix::fcntl::FlockArg::LockExclusive)
})
.await
.context("failed to spawn file locking")?
.context("failed to lock file")?;
Ok(())
}
pub async fn unlock(&self) -> Result<()> {
let fd = self.fd;
tokio::task::spawn_blocking(move || nix::fcntl::flock(fd, nix::fcntl::FlockArg::Unlock))
.await
.context("failed to spawn file lunlocking")?
.context("failed to unlock file")?;
Ok(())
}
}
trait Hex {
fn hex(&self) -> String;
}
impl Hex for &[u8] {
fn hex(&self) -> String {
hex::encode(self)
}
}

View File

@@ -0,0 +1,128 @@
use crate::{
cache::Cache,
fungi::{meta::Block, Reader, Result},
store::{BlockStore, Store},
};
use anyhow::Error;
use futures::lock::Mutex;
use hex::ToHex;
use std::sync::Arc;
use tokio::io::AsyncReadExt;
const WORKERS: usize = 10;
pub async fn clone<S: Store>(reader: Reader, store: S, cache: Cache<S>) -> Result<()> {
let failures = Arc::new(Mutex::new(Vec::new()));
let cloner = BlobCloner::new(cache, store.into(), failures.clone());
let mut workers = workers::WorkerPool::new(cloner, WORKERS);
let mut offset = 0;
loop {
if !failures.lock().await.is_empty() {
break;
}
let blocks = reader.all_blocks(1000, offset).await?;
if blocks.is_empty() {
break;
}
for block in blocks {
offset += 1;
let worker = workers.get().await;
worker.send(block)?;
}
}
workers.close().await;
let failures = failures.lock().await;
if failures.is_empty() {
return Ok(());
}
log::error!("failed to clone one or more blocks");
for (block, error) in failures.iter() {
log::error!(" - failed to clone block {}: {}", block, error);
}
Err(crate::fungi::Error::Anyhow(anyhow::anyhow!(
"failed to clone ({}) blocks",
failures.len()
)))
}
struct BlobCloner<S>
where
S: Store,
{
cache: Arc<Cache<S>>,
store: Arc<BlockStore<S>>,
failures: Arc<Mutex<Vec<(String, Error)>>>,
}
impl<S> Clone for BlobCloner<S>
where
S: Store,
{
fn clone(&self) -> Self {
Self {
cache: self.cache.clone(),
store: self.store.clone(),
failures: self.failures.clone(),
}
}
}
impl<S> BlobCloner<S>
where
S: Store,
{
fn new(
cache: Cache<S>,
store: BlockStore<S>,
failures: Arc<Mutex<Vec<(String, Error)>>>,
) -> Self {
Self {
cache: Arc::new(cache),
store: Arc::new(store),
failures,
}
}
}
#[async_trait::async_trait]
impl<S> workers::Work for BlobCloner<S>
where
S: Store,
{
type Input = Block;
type Output = ();
async fn run(&mut self, block: Self::Input) -> Self::Output {
let mut file = match self.cache.get(&block).await {
Ok((_, f)) => f,
Err(err) => {
self.failures
.lock()
.await
.push((block.id.as_slice().encode_hex(), err));
return;
}
};
let mut data = Vec::new();
if let Err(err) = file.read_to_end(&mut data).await {
self.failures
.lock()
.await
.push((block.id.as_slice().encode_hex(), err.into()));
return;
}
if let Err(err) = self.store.set(&data).await {
self.failures
.lock()
.await
.push((block.id.as_slice().encode_hex(), err.into()));
return;
}
}
}

View File

@@ -0,0 +1,72 @@
use crate::{
fungi::{meta::Tag, Reader, Result, Writer},
store::{self, Store},
};
pub async fn tag_list(reader: Reader) -> Result<()> {
let tags = reader.tags().await?;
if !tags.is_empty() {
println!("tags:");
}
for (key, value) in tags {
println!("\t{}={}", key, value);
}
Ok(())
}
pub async fn tag_add(writer: Writer, tags: Vec<(String, String)>) -> Result<()> {
for (key, value) in tags {
writer.tag(Tag::Custom(key.as_str()), value).await?;
}
Ok(())
}
pub async fn tag_delete(writer: Writer, keys: Vec<String>, all: bool) -> Result<()> {
if all {
writer.delete_tags().await?;
return Ok(());
}
for key in keys {
writer.delete_tag(Tag::Custom(key.as_str())).await?;
}
Ok(())
}
pub async fn store_list(reader: Reader) -> Result<()> {
let routes = reader.routes().await?;
if !routes.is_empty() {
println!("routes:")
}
for route in routes {
println!(
"\trange:[{}-{}] store:{}",
route.start, route.end, route.url
);
}
Ok(())
}
pub async fn store_add(writer: Writer, stores: Vec<String>) -> Result<()> {
let store = store::parse_router(stores.as_slice()).await?;
for route in store.routes() {
writer
.route(
route.start.unwrap_or(u8::MIN),
route.end.unwrap_or(u8::MAX),
route.url,
)
.await?;
}
Ok(())
}
pub async fn store_delete(writer: Writer, stores: Vec<String>, all: bool) -> Result<()> {
if all {
writer.delete_routes().await?;
return Ok(());
}
for store in stores {
writer.delete_route(store).await?;
}
Ok(())
}

View File

@@ -0,0 +1,407 @@
#![allow(clippy::unnecessary_mut_passed)]
#![deny(clippy::unimplemented, clippy::todo)]
use crate::cache;
use crate::fungi::{
meta::{FileType, Inode},
Reader,
};
use crate::store::Store;
use anyhow::{ensure, Context, Result};
use polyfuse::reply::FileAttr;
use polyfuse::{
op,
reply::{AttrOut, EntryOut, ReaddirOut, StatfsOut},
KernelConfig, Operation, Request, Session,
};
use std::io::SeekFrom;
use std::sync::Arc;
use std::{io, path::PathBuf, time::Duration};
use tokio::fs::File;
use tokio::sync::Mutex;
use tokio::{
io::{unix::AsyncFd, AsyncReadExt, AsyncSeekExt, Interest},
task::{self, JoinHandle},
};
const CHUNK_SIZE: usize = 512 * 1024; // 512k and is hardcoded in the hub. the block_size value is not used
const TTL: Duration = Duration::from_secs(60 * 60 * 24 * 365);
const LRU_CAP: usize = 5; // Least Recently Used File Capacity
const FS_BLOCK_SIZE: u32 = 4 * 1024;
type FHash = [u8; 32];
type BlockSize = u64;
pub struct Filesystem<S>
where
S: Store,
{
meta: Reader,
cache: Arc<cache::Cache<S>>,
lru: Arc<Mutex<lru::LruCache<FHash, (File, BlockSize)>>>,
}
impl<S> Clone for Filesystem<S>
where
S: Store,
{
fn clone(&self) -> Self {
Self {
meta: self.meta.clone(),
cache: Arc::clone(&self.cache),
lru: Arc::clone(&self.lru),
}
}
}
impl<S> Filesystem<S>
where
S: Store,
{
pub fn new(meta: Reader, cache: cache::Cache<S>) -> Self {
Filesystem {
meta,
cache: Arc::new(cache),
lru: Arc::new(Mutex::new(lru::LruCache::new(LRU_CAP))),
}
}
pub async fn mount<P>(&self, mnt: P) -> Result<()>
where
P: Into<PathBuf>,
{
let mountpoint: PathBuf = mnt.into();
ensure!(mountpoint.is_dir(), "mountpoint must be a directory");
let mut options = KernelConfig::default();
options.mount_option(&format!(
"ro,allow_other,fsname={},subtype=g8ufs,default_permissions",
std::process::id()
));
// polyfuse assumes an absolute path, see https://github.com/ubnt-intrepid/polyfuse/issues/83
let fusermount_path =
which::which("fusermount").context("looking up 'fusermount' in PATH")?;
options.fusermount_path(fusermount_path);
let session = AsyncSession::mount(mountpoint, options).await?;
// release here
while let Some(req) = session.next_request().await? {
let fs = self.clone();
let handler: JoinHandle<Result<()>> = task::spawn(async move {
let result = match req.operation()? {
Operation::Lookup(op) => fs.lookup(&req, op).await,
Operation::Getattr(op) => fs.getattr(&req, op).await,
Operation::Read(op) => fs.read(&req, op).await,
Operation::Readdir(op) => fs.readdir(&req, op).await,
Operation::Readlink(op) => fs.readlink(&req, op).await,
Operation::Statfs(op) => fs.statfs(&req, op).await,
op => {
debug!("function is not implemented: {:?}", op);
Ok(req.reply_error(libc::ENOSYS)?)
}
};
if result.is_err() {
req.reply_error(libc::ENOENT)?;
}
Ok(())
});
drop(handler);
}
Ok(())
}
async fn statfs(&self, req: &Request, _op: op::Statfs<'_>) -> Result<()> {
let mut out = StatfsOut::default();
let stats = out.statfs();
stats.bsize(FS_BLOCK_SIZE);
req.reply(out)?;
Ok(())
}
async fn readlink(&self, req: &Request, op: op::Readlink<'_>) -> Result<()> {
let link = self.meta.inode(op.ino()).await?;
if !link.mode.is(FileType::Link) {
return Ok(req.reply_error(libc::ENOLINK)?);
}
if let Some(target) = link.data {
req.reply(target)?;
return Ok(());
}
Ok(req.reply_error(libc::ENOLINK)?)
}
async fn read(&self, req: &Request, op: op::Read<'_>) -> Result<()> {
let entry = self.meta.inode(op.ino()).await?;
if !entry.mode.is(FileType::Regular) {
return Ok(req.reply_error(libc::EISDIR)?);
};
let offset = op.offset() as usize;
let size = op.size() as usize;
let chunk_size = CHUNK_SIZE; // file.block_size as usize;
let chunk_index = offset / chunk_size;
let blocks = self.meta.blocks(op.ino()).await?;
if chunk_index >= blocks.len() || op.size() == 0 {
// reading after the end of the file
let data: &[u8] = &[];
return Ok(req.reply(data)?);
}
// offset inside the file
let mut offset = offset - (chunk_index * chunk_size);
let mut buf: Vec<u8> = vec![0; size];
let mut total = 0;
'blocks: for block in blocks.iter().skip(chunk_index) {
// hash works as a key inside the LRU
let hash = block.id;
// getting the file descriptor from the LRU or from the cache if not found in the LRU
let lru = self.lru.lock().await.pop(&hash);
let (mut fd, block_size) = match lru {
Some((descriptor, bsize)) => {
debug!("lru hit");
(descriptor, bsize)
}
None => {
let (bsize, descriptor) = match self.cache.get(block).await {
Ok(out) => out,
Err(err) => {
error!("io cache error: {:#}", err);
return Ok(req.reply_error(libc::EIO)?);
}
};
(descriptor, bsize)
}
};
// seek to the position <offset>
fd.seek(SeekFrom::Start(offset as u64)).await?;
let mut chunk_offset = offset as u64;
loop {
// read the file bytes into buf
let read = match fd.read(&mut buf[total..]).await {
Ok(n) => n,
Err(err) => {
error!("read error: {:#}", err);
return Ok(req.reply_error(libc::EIO)?);
}
};
chunk_offset += read as u64;
// calculate the total size and break if the required bytes (=size) downloaded
total += read;
if total >= size {
// if only part of the block read -> store it in the lruf
if chunk_offset < block_size {
let mut lruf = self.lru.lock().await;
lruf.put(hash, (fd, block_size));
}
break 'blocks;
}
// read = 0 means the EOF (end of the block)
if read == 0 {
break;
}
}
offset = 0;
}
Ok(req.reply(&buf[..size])?)
}
async fn getattr(&self, req: &Request, op: op::Getattr<'_>) -> Result<()> {
log::debug!("getattr({})", op.ino());
let entry = self.meta.inode(op.ino()).await?;
let mut attr = AttrOut::default();
let fill = attr.attr();
entry.fill(fill);
req.reply(attr)?;
Ok(())
}
async fn readdir(&self, req: &Request, op: op::Readdir<'_>) -> Result<()> {
log::debug!("readdir({})", op.ino());
let root = self.meta.inode(op.ino()).await?;
if !root.mode.is(FileType::Dir) {
req.reply_error(libc::ENOTDIR)?;
return Ok(());
}
let mut out = ReaddirOut::new(op.size() as usize);
let mut offset = op.offset();
let mut query_offset = offset;
if offset == 0 {
out.entry(".".as_ref(), op.ino(), libc::DT_DIR as u32, 1);
out.entry(
"..".as_ref(),
match op.ino() {
1 => 1,
_ => root.parent,
},
libc::DT_DIR as u32,
2,
);
offset = 2;
} else {
// we don't add the . and .. but
// we also need to change the offset to
query_offset -= 2;
}
let children = self.meta.children(root.ino, 10, query_offset).await?;
for entry in children.iter() {
offset += 1;
let full = match entry.mode.file_type() {
FileType::Dir => {
//let inode = self.meta.dir_inode(&sub.key).await?;
out.entry(entry.name.as_ref(), entry.ino, libc::DT_DIR as u32, offset)
}
FileType::Regular => {
out.entry(entry.name.as_ref(), entry.ino, libc::DT_REG as u32, offset)
}
FileType::Link => {
out.entry(entry.name.as_ref(), entry.ino, libc::DT_LNK as u32, offset)
}
_ => {
warn!("unkonwn entry");
false
}
};
if full {
break;
}
}
Ok(req.reply(out)?)
}
async fn lookup(&self, req: &Request, op: op::Lookup<'_>) -> Result<()> {
log::debug!("lookup(parent: {}, name: {:?})", op.parent(), op.name());
let name = match op.name().to_str() {
Some(name) => name,
None => {
req.reply_error(libc::ENOENT)?;
return Ok(());
}
};
let node = self.meta.lookup(op.parent(), name).await?;
let node = match node {
Some(node) => node,
None => {
req.reply_error(libc::ENOENT)?;
return Ok(());
}
};
let mut out = EntryOut::default();
node.fill(out.attr());
out.ino(node.ino);
out.ttl_attr(TTL);
out.ttl_entry(TTL);
Ok(req.reply(out)?)
}
}
// ==== AsyncSession ====
struct AsyncSession {
inner: AsyncFd<Session>,
}
impl AsyncSession {
async fn mount(mountpoint: PathBuf, config: KernelConfig) -> io::Result<Self> {
tokio::task::spawn_blocking(move || {
let session = Session::mount(mountpoint, config)?;
Ok(Self {
inner: AsyncFd::with_interest(session, Interest::READABLE)?,
})
})
.await
.expect("join error")
}
async fn next_request(&self) -> io::Result<Option<Request>> {
use futures::{future::poll_fn, ready, task::Poll};
poll_fn(|cx| {
let mut guard = ready!(self.inner.poll_read_ready(cx))?;
match self.inner.get_ref().next_request() {
Err(err) if err.kind() == io::ErrorKind::WouldBlock => {
guard.clear_ready();
Poll::Pending
}
res => {
guard.retain_ready();
Poll::Ready(res)
}
}
})
.await
}
}
trait AttributeFiller {
fn fill(&self, attr: &mut FileAttr);
}
impl AttributeFiller for Inode {
fn fill(&self, attr: &mut FileAttr) {
attr.mode(self.mode.mode());
attr.ino(self.ino);
attr.ctime(Duration::from_secs(self.ctime as u64));
attr.mtime(Duration::from_secs(self.mtime as u64));
attr.uid(self.uid);
attr.gid(self.gid);
attr.size(self.size);
attr.rdev(self.rdev as u32);
attr.blksize(FS_BLOCK_SIZE);
let mut blocks = self.size / 512;
blocks += match self.size % 512 {
0 => 0,
_ => 1,
};
attr.blocks(blocks);
match self.mode.file_type() {
FileType::Dir => attr.nlink(2),
FileType::Regular => attr.blksize(4 * 1024),
_ => (),
};
}
}

View File

@@ -0,0 +1,644 @@
use std::{
collections::LinkedList,
path::{Path, PathBuf},
};
use sqlx::{
sqlite::{SqliteConnectOptions, SqliteJournalMode, SqlitePoolOptions, SqliteRow},
FromRow, Row, SqlitePool,
};
use crate::store;
const ID_LEN: usize = 32;
const KEY_LEN: usize = 32;
const TYPE_MASK: u32 = nix::libc::S_IFMT;
#[repr(u32)]
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum FileType {
Regular = nix::libc::S_IFREG,
Dir = nix::libc::S_IFDIR,
Link = nix::libc::S_IFLNK,
Block = nix::libc::S_IFBLK,
Char = nix::libc::S_IFCHR,
Socket = nix::libc::S_IFSOCK,
FIFO = nix::libc::S_IFIFO,
Unknown = 0,
}
impl From<u32> for FileType {
fn from(value: u32) -> Self {
match value {
nix::libc::S_IFREG => Self::Regular,
nix::libc::S_IFDIR => Self::Dir,
nix::libc::S_IFLNK => Self::Link,
nix::libc::S_IFBLK => Self::Block,
nix::libc::S_IFCHR => Self::Char,
nix::libc::S_IFSOCK => Self::Socket,
nix::libc::S_IFIFO => Self::FIFO,
_ => Self::Unknown,
}
}
}
static SCHEMA: &str = include_str!("../../schema/schema.sql");
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error("failed to execute query: {0}")]
SqlError(#[from] sqlx::Error),
#[error("invalid hash length")]
InvalidHash,
#[error("invalid key length")]
InvalidKey,
#[error("io error: {0:#}")]
IO(#[from] std::io::Error),
#[error("store error: {0}")]
Store(#[from] store::Error),
#[error("unknown meta error: {0}")]
Anyhow(#[from] anyhow::Error),
}
pub type Result<T> = std::result::Result<T, Error>;
pub type Ino = u64;
#[derive(Debug, Clone, Default)]
pub struct Mode(u32);
impl From<u32> for Mode {
fn from(value: u32) -> Self {
Self(value)
}
}
impl Mode {
pub fn new(t: FileType, perm: u32) -> Self {
Self(t as u32 | (perm & !TYPE_MASK))
}
pub fn file_type(&self) -> FileType {
(self.0 & TYPE_MASK).into()
}
pub fn permissions(&self) -> u32 {
self.0 & !TYPE_MASK
}
pub fn mode(&self) -> u32 {
self.0
}
pub fn is(&self, typ: FileType) -> bool {
self.file_type() == typ
}
}
#[derive(Debug, Clone, Default)]
pub struct Inode {
pub ino: Ino,
pub parent: Ino,
pub name: String,
pub size: u64,
pub uid: u32,
pub gid: u32,
pub mode: Mode,
pub rdev: u64,
pub ctime: i64,
pub mtime: i64,
pub data: Option<Vec<u8>>,
}
impl FromRow<'_, SqliteRow> for Inode {
fn from_row(row: &'_ SqliteRow) -> std::result::Result<Self, sqlx::Error> {
Ok(Self {
ino: row.get::<i64, &str>("ino") as Ino,
parent: row.get::<i64, &str>("parent") as Ino,
name: row.get("name"),
size: row.get::<i64, &str>("size") as u64,
uid: row.get("uid"),
gid: row.get("uid"),
mode: row.get::<u32, &str>("mode").into(),
rdev: row.get::<i64, &str>("rdev") as u64,
ctime: row.get("ctime"),
mtime: row.get("mtime"),
data: row.get("data"),
})
}
}
#[derive(Debug, Clone, Default)]
pub struct Block {
/// id of the block
pub id: [u8; ID_LEN],
/// encryption key of the block
pub key: [u8; KEY_LEN],
}
impl FromRow<'_, SqliteRow> for Block {
fn from_row(row: &'_ SqliteRow) -> std::result::Result<Self, sqlx::Error> {
let hash: &[u8] = row.get("id");
if hash.len() != ID_LEN {
return Err(sqlx::Error::Decode(Box::new(Error::InvalidHash)));
}
let key: &[u8] = row.get("key");
if hash.len() != KEY_LEN {
return Err(sqlx::Error::Decode(Box::new(Error::InvalidKey)));
}
let mut block = Self::default();
block.id.copy_from_slice(hash);
block.key.copy_from_slice(key);
Ok(block)
}
}
#[derive(Debug, Clone, Default)]
pub struct Route {
pub start: u8,
pub end: u8,
pub url: String,
}
impl FromRow<'_, SqliteRow> for Route {
fn from_row(row: &'_ SqliteRow) -> std::result::Result<Self, sqlx::Error> {
Ok(Self {
start: row.get("start"),
end: row.get("end"),
url: row.get("url"),
})
}
}
#[derive(Debug, Clone)]
pub enum Tag<'a> {
Version,
Description,
Author,
Custom(&'a str),
}
impl<'a> Tag<'a> {
fn key(&self) -> &str {
match self {
Self::Version => "version",
Self::Description => "description",
Self::Author => "author",
Self::Custom(a) => a,
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Walk {
Continue,
Break,
}
#[async_trait::async_trait]
pub trait WalkVisitor {
async fn visit(&mut self, path: &Path, node: &Inode) -> Result<Walk>;
}
struct WalkItem(PathBuf, Inode);
#[derive(Clone)]
pub struct Reader {
pool: SqlitePool,
}
impl Reader {
pub async fn new<P: AsRef<Path>>(path: P) -> Result<Self> {
let opts = SqliteConnectOptions::new()
.journal_mode(SqliteJournalMode::Delete)
.filename(path);
let pool = SqlitePool::connect_with(opts).await?;
Ok(Self { pool })
}
pub async fn inode(&self, ino: Ino) -> Result<Inode> {
let inode: Inode = sqlx::query_as(r#"select inode.*, extra.data
from inode left join extra on inode.ino = extra.ino
where inode.ino = ?;"#)
.bind(ino as i64).fetch_one(&self.pool).await?;
Ok(inode)
}
pub async fn children(&self, parent: Ino, limit: u32, offset: u64) -> Result<Vec<Inode>> {
let results: Vec<Inode> = sqlx::query_as(
r#"select inode.*, extra.data
from inode left join extra on inode.ino = extra.ino
where inode.parent = ? limit ? offset ?;"#,
)
.bind(parent as i64)
.bind(limit)
.bind(offset as i64)
.fetch_all(&self.pool)
.await?;
Ok(results)
}
pub async fn lookup<S: AsRef<str>>(&self, parent: Ino, name: S) -> Result<Option<Inode>> {
let inode: Option<Inode> = sqlx::query_as(r#"select inode.*, extra.data
from inode left join extra on inode.ino = extra.ino
where inode.parent = ? and inode.name = ?;"#)
.bind(parent as i64)
.bind(name.as_ref())
.fetch_optional(&self.pool).await?;
Ok(inode)
}
pub async fn blocks(&self, ino: Ino) -> Result<Vec<Block>> {
let results: Vec<Block> = sqlx::query_as("select id, key from block where ino = ?;")
.bind(ino as i64)
.fetch_all(&self.pool)
.await?;
Ok(results)
}
pub async fn all_blocks(&self, limit: u32, offset: u64) -> Result<Vec<Block>> {
let results: Vec<Block> = sqlx::query_as("select id, key from block limit ? offset ?;")
.bind(limit)
.bind(offset as i64)
.fetch_all(&self.pool)
.await?;
Ok(results)
}
pub async fn tag(&self, tag: Tag<'_>) -> Result<Option<String>> {
let value: Option<(String,)> = sqlx::query_as("select value from tag where key = ?;")
.bind(tag.key())
.fetch_optional(&self.pool)
.await?;
Ok(value.map(|v| v.0))
}
pub async fn tags(&self) -> Result<Vec<(String, String)>> {
let tags: Vec<(String, String)> = sqlx::query_as("select key, value from tag;")
.fetch_all(&self.pool)
.await?;
Ok(tags)
}
pub async fn routes(&self) -> Result<Vec<Route>> {
let results: Vec<Route> = sqlx::query_as("select start, end, url from route;")
.fetch_all(&self.pool)
.await?;
Ok(results)
}
pub async fn walk<W: WalkVisitor + Send>(&self, visitor: &mut W) -> Result<()> {
let node = self.inode(1).await?;
let mut list = LinkedList::default();
let path: PathBuf = "/".into();
list.push_back(WalkItem(path, node));
while !list.is_empty() {
let item = list.pop_back().unwrap();
self.walk_node(&mut list, &item, visitor).await?;
}
Ok(())
}
async fn walk_node<W: WalkVisitor + Send>(
&self,
list: &mut LinkedList<WalkItem>,
WalkItem(path, node): &WalkItem,
visitor: &mut W,
) -> Result<()> {
if visitor.visit(path, node).await? == Walk::Break {
return Ok(());
}
let mut offset = 0;
loop {
let children = self.children(node.ino, 1000, offset).await?;
if children.is_empty() {
break;
}
for child in children {
offset += 1;
let child_path = path.join(&child.name);
if child.mode.is(FileType::Dir) {
list.push_back(WalkItem(child_path, child));
continue;
}
if visitor.visit(&child_path, &child).await? == Walk::Break {
return Ok(());
}
}
}
Ok(())
}
}
#[derive(Clone)]
pub struct Writer {
pool: SqlitePool,
}
impl Writer {
/// create a new mkondo writer
pub async fn new<P: AsRef<Path>>(path: P, remove: bool) -> Result<Self> {
if remove {
let _ = tokio::fs::remove_file(&path).await;
}
let opts = SqliteConnectOptions::new()
.create_if_missing(true)
.journal_mode(SqliteJournalMode::Delete)
.filename(path);
let pool = SqlitePoolOptions::new()
.max_connections(1)
.connect_with(opts)
.await?;
sqlx::query(SCHEMA).execute(&pool).await?;
Ok(Self { pool })
}
/// inode add an inode to the flist
pub async fn inode(&self, inode: Inode) -> Result<Ino> {
let result = sqlx::query(
r#"insert into inode (parent, name, size, uid, gid, mode, rdev, ctime, mtime)
values (?, ?, ?, ?, ?, ?, ?, ?, ?);"#,
)
.bind(inode.parent as i64)
.bind(inode.name)
.bind(inode.size as i64)
.bind(inode.uid)
.bind(inode.gid)
.bind(inode.mode.0)
.bind(inode.rdev as i64)
.bind(inode.ctime)
.bind(inode.mtime)
.execute(&self.pool)
.await?;
let ino = result.last_insert_rowid() as Ino;
if let Some(data) = &inode.data {
sqlx::query("insert into extra(ino, data) values (?, ?)")
.bind(ino as i64)
.bind(data)
.execute(&self.pool)
.await?;
}
Ok(ino)
}
pub async fn block(&self, ino: Ino, id: &[u8; ID_LEN], key: &[u8; KEY_LEN]) -> Result<()> {
sqlx::query("insert into block (ino, id, key) values (?, ?, ?)")
.bind(ino as i64)
.bind(&id[..])
.bind(&key[..])
.execute(&self.pool)
.await?;
Ok(())
}
pub async fn route<U: AsRef<str>>(&self, start: u8, end: u8, url: U) -> Result<()> {
sqlx::query("insert into route (start, end, url) values (?, ?, ?)")
.bind(start)
.bind(end)
.bind(url.as_ref())
.execute(&self.pool)
.await?;
Ok(())
}
pub async fn tag<V: AsRef<str>>(&self, tag: Tag<'_>, value: V) -> Result<()> {
sqlx::query("insert or replace into tag (key, value) values (?, ?);")
.bind(tag.key())
.bind(value.as_ref())
.execute(&self.pool)
.await?;
Ok(())
}
pub async fn delete_tag(&self, tag: Tag<'_>) -> Result<()> {
sqlx::query("delete from tag where key = ?;")
.bind(tag.key())
.execute(&self.pool)
.await?;
Ok(())
}
pub async fn delete_route<U: AsRef<str>>(&self, url: U) -> Result<()> {
sqlx::query("delete from route where url = ?;")
.bind(url.as_ref())
.execute(&self.pool)
.await?;
Ok(())
}
pub async fn delete_tags(&self) -> Result<()> {
sqlx::query("delete from tag;").execute(&self.pool).await?;
Ok(())
}
pub async fn delete_routes(&self) -> Result<()> {
sqlx::query("delete from route;")
.execute(&self.pool)
.await?;
Ok(())
}
}
#[cfg(test)]
mod test {
use super::*;
#[tokio::test]
async fn test_inode() {
const PATH: &str = "/tmp/inode.fl";
let meta = Writer::new(PATH, true).await.unwrap();
let ino = meta
.inode(Inode {
name: "/".into(),
data: Some("target".into()),
..Inode::default()
})
.await
.unwrap();
assert_eq!(ino, 1);
let meta = Reader::new(PATH).await.unwrap();
let inode = meta.inode(ino).await.unwrap();
assert_eq!(inode.name, "/");
assert!(inode.data.is_some());
assert_eq!(inode.data.unwrap().as_slice(), "target".as_bytes());
}
#[tokio::test]
async fn test_get_children() {
const PATH: &str = "/tmp/children.fl";
let meta = Writer::new(PATH, true).await.unwrap();
let ino = meta
.inode(Inode {
name: "/".into(),
data: Some("target".into()),
..Inode::default()
})
.await
.unwrap();
for name in ["bin", "etc", "usr"] {
meta.inode(Inode {
parent: ino,
name: name.into(),
..Inode::default()
})
.await
.unwrap();
}
let meta = Reader::new(PATH).await.unwrap();
let children = meta.children(ino, 10, 0).await.unwrap();
assert_eq!(children.len(), 3);
assert_eq!(children[0].name, "bin");
let child = meta.lookup(ino, "bin").await.unwrap();
assert!(child.is_some());
assert_eq!(child.unwrap().name, "bin");
let child = meta.lookup(ino, "wrong").await.unwrap();
assert!(child.is_none());
}
#[tokio::test]
async fn test_get_block() {
const PATH: &str = "/tmp/block.fl";
let meta = Writer::new(PATH, true).await.unwrap();
let hash: [u8; ID_LEN] = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
25, 26, 27, 28, 29, 30, 31, 32,
];
let key1: [u8; KEY_LEN] = [1; KEY_LEN];
let key2: [u8; KEY_LEN] = [2; KEY_LEN];
meta.block(1, &hash, &key1).await.unwrap();
meta.block(1, &hash, &key2).await.unwrap();
let meta = Reader::new(PATH).await.unwrap();
let blocks = meta.blocks(1).await.unwrap();
assert_eq!(blocks.len(), 2);
assert_eq!(blocks[0].id, hash);
assert_eq!(blocks[0].key, key1);
assert_eq!(blocks[1].key, key2);
}
#[tokio::test]
async fn test_get_tag() {
const PATH: &str = "/tmp/tag.fl";
let meta = Writer::new(PATH, true).await.unwrap();
meta.tag(Tag::Version, "0.1").await.unwrap();
meta.tag(Tag::Author, "azmy").await.unwrap();
meta.tag(Tag::Custom("custom"), "value").await.unwrap();
let meta = Reader::new(PATH).await.unwrap();
assert!(matches!(
meta.tag(Tag::Version).await.unwrap().as_deref(),
Some("0.1")
));
assert!(matches!(
meta.tag(Tag::Custom("custom")).await.unwrap().as_deref(),
Some("value")
));
assert!(matches!(
meta.tag(Tag::Custom("unknown")).await.unwrap(),
None
));
}
#[tokio::test]
async fn test_get_routes() {
const PATH: &str = "/tmp/route.fl";
let meta = Writer::new(PATH, true).await.unwrap();
meta.route(0, 128, "zdb://hub1.grid.tf").await.unwrap();
meta.route(129, 255, "zdb://hub2.grid.tf").await.unwrap();
let meta = Reader::new(PATH).await.unwrap();
let routes = meta.routes().await.unwrap();
assert_eq!(routes.len(), 2);
assert_eq!(routes[0].start, 0);
assert_eq!(routes[0].end, 128);
assert_eq!(routes[0].url, "zdb://hub1.grid.tf");
}
#[test]
fn test_mode() {
let m = Mode::new(FileType::Regular, 0754);
assert_eq!(m.permissions(), 0754);
assert_eq!(m.file_type(), FileType::Regular);
}
#[tokio::test]
async fn test_walk() {
const PATH: &str = "/tmp/walk.fl";
let meta = Writer::new(PATH, true).await.unwrap();
let parent = meta
.inode(Inode {
name: "/".into(),
data: Some("target".into()),
..Inode::default()
})
.await
.unwrap();
for name in ["bin", "etc", "usr"] {
meta.inode(Inode {
parent: parent,
name: name.into(),
..Inode::default()
})
.await
.unwrap();
}
let meta = Reader::new(PATH).await.unwrap();
//TODO: validate the walk
meta.walk(&mut WalkTest).await.unwrap();
}
struct WalkTest;
#[async_trait::async_trait]
impl WalkVisitor for WalkTest {
async fn visit(&mut self, path: &Path, node: &Inode) -> Result<Walk> {
println!("{} = {:?}", node.ino, path);
Ok(Walk::Continue)
}
}
}

View File

@@ -0,0 +1,3 @@
pub mod meta;
pub use meta::{Error, Reader, Result, Writer};

View File

@@ -0,0 +1,107 @@
#[macro_use]
extern crate log;
pub mod cache;
pub mod fungi;
pub mod store;
mod pack;
pub use pack::pack;
mod unpack;
pub use unpack::unpack;
mod clone;
pub use clone::clone;
pub mod config;
const PARALLEL_UPLOAD: usize = 10; // number of files we can upload in parallel
#[cfg(test)]
mod test {
use super::*;
use crate::{
cache::Cache,
fungi::meta,
store::{dir::DirStore, Router},
};
use std::path::PathBuf;
use tokio::{fs, io::AsyncReadExt};
#[tokio::test]
async fn pack_unpack() {
const ROOT: &str = "/tmp/pack-unpack-test";
let _ = fs::remove_dir_all(ROOT).await;
let root: PathBuf = ROOT.into();
let source = root.join("source");
fs::create_dir_all(&source).await.unwrap();
for size in [0, 100 * 1024, 1024 * 1024, 10 * 1024 * 1024] {
let mut urandom = fs::OpenOptions::default()
.read(true)
.open("/dev/urandom")
.await
.unwrap()
.take(size);
let name = format!("file-{}.rnd", size);
let p = source.join(&name);
let mut file = fs::OpenOptions::default()
.create(true)
.write(true)
.open(p)
.await
.unwrap();
tokio::io::copy(&mut urandom, &mut file).await.unwrap();
}
println!("file generation complete");
let writer = meta::Writer::new(root.join("meta.fl"), true).await.unwrap();
// while we at it we can already create 2 stores and create a router store on top
// of that.
let store0 = DirStore::new(root.join("store0")).await.unwrap();
let store1 = DirStore::new(root.join("store1")).await.unwrap();
let mut store = Router::new();
store.add(0x00, 0x7f, store0);
store.add(0x80, 0xff, store1);
pack(writer, store, &source, false, None).await.unwrap();
println!("packing complete");
// recreate the stores for reading.
let store0 = DirStore::new(root.join("store0")).await.unwrap();
let store1 = DirStore::new(root.join("store1")).await.unwrap();
let mut store = Router::new();
store.add(0x00, 0x7f, store0);
store.add(0x80, 0xff, store1);
let cache = Cache::new(root.join("cache"), store);
let reader = meta::Reader::new(root.join("meta.fl")).await.unwrap();
// validate reader store routing
let routers = reader.routes().await.unwrap();
assert_eq!(2, routers.len());
assert_eq!(routers[0].url, "dir:///tmp/pack-unpack-test/store0");
assert_eq!(routers[1].url, "dir:///tmp/pack-unpack-test/store1");
assert_eq!((routers[0].start, routers[0].end), (0x00, 0x7f));
assert_eq!((routers[1].start, routers[1].end), (0x80, 0xff));
unpack(&reader, &cache, root.join("destination"), false)
.await
.unwrap();
println!("unpacking complete");
// compare that source directory is exactly the same as target directory
let status = std::process::Command::new("diff")
.arg(root.join("source"))
.arg(root.join("destination"))
.status()
.unwrap();
assert!(status.success());
}
}

View File

@@ -0,0 +1,383 @@
#[macro_use]
extern crate log;
use nix::sys::signal::{self, Signal};
use nix::unistd::Pid;
use std::error::Error;
use std::io::Read;
use anyhow::{Context, Result};
use clap::{ArgAction, Args, Parser, Subcommand};
use rfs::fungi;
use rfs::store::{self};
use rfs::{cache, config};
mod fs;
/// mount flists
#[derive(Parser, Debug)]
#[clap(name ="rfs", author, version = env!("GIT_VERSION"), about, long_about = None)]
struct Options {
/// enable debugging logs
#[clap(long, action=ArgAction::Count)]
debug: u8,
#[command(subcommand)]
command: Commands,
}
#[derive(Subcommand, Debug)]
enum Commands {
/// mount an FL
Mount(MountOptions),
/// create an FL and upload blocks to provided storage
Pack(PackOptions),
/// unpack (downloads) content of an FL the provided location
Unpack(UnpackOptions),
/// clone copies the data from the stores of an FL to another stores
Clone(CloneOptions),
/// list or modify FL metadata and stores
Config(ConfigOptions),
}
#[derive(Args, Debug)]
struct MountOptions {
/// path to metadata file (flist)
#[clap(short, long)]
meta: String,
/// directory used as cache for downloaded file chuncks
#[clap(short, long, default_value_t = String::from("/tmp/cache"))]
cache: String,
/// run in the background.
#[clap(short, long)]
daemon: bool,
/// log file only used with daemon mode
#[clap(short, long)]
log: Option<String>,
/// target mountpoint
target: String,
}
#[derive(Args, Debug)]
struct PackOptions {
/// path to metadata file (flist)
#[clap(short, long)]
meta: String,
/// store url in the format [xx-xx=]<url>. the range xx-xx is optional and used for
/// sharding. the URL is per store type, please check docs for more information
#[clap(short, long, action=ArgAction::Append)]
store: Vec<String>,
/// no_strip_password disable automatic password stripping from store url, otherwise password will be stored in the fl.
#[clap(long, default_value_t = false)]
no_strip_password: bool,
/// target directory to upload
target: String,
}
#[derive(Args, Debug)]
struct UnpackOptions {
/// path to metadata file (flist)
#[clap(short, long)]
meta: String,
/// directory used as cache for downloaded file chuncks
#[clap(short, long, default_value_t = String::from("/tmp/cache"))]
cache: String,
/// preserve files ownership from the FL, otherwise use the current user ownership
/// setting this flag to true normally requires sudo
#[clap(short, long, default_value_t = false)]
preserve_ownership: bool,
/// target directory for unpacking
target: String,
}
#[derive(Args, Debug)]
struct CloneOptions {
/// path to metadata file (flist)
#[clap(short, long)]
meta: String,
/// store url in the format [xx-xx=]<url>. the range xx-xx is optional and used for
/// sharding. the URL is per store type, please check docs for more information
#[clap(short, long, action=ArgAction::Append)]
store: Vec<String>,
/// directory used as cache for downloaded file chunks
#[clap(short, long, default_value_t = String::from("/tmp/cache"))]
cache: String,
}
#[derive(Args, Debug)]
struct ConfigOptions {
/// path to metadata file (flist)
#[clap(short, long)]
meta: String,
#[command(subcommand)]
command: ConfigCommands,
}
#[derive(Subcommand, Debug)]
enum ConfigCommands {
#[command(subcommand)]
Tag(TagOperation),
#[command(subcommand)]
Store(StoreOperation),
}
#[derive(Subcommand, Debug)]
enum TagOperation {
List,
Add(TagAddOptions),
Delete(TagDeleteOptions),
}
#[derive(Args, Debug)]
struct TagAddOptions {
/// pair of key-values separated with '='
#[clap(short, long, value_parser = parse_key_val::<String, String>, number_of_values = 1)]
tag: Vec<(String, String)>,
}
#[derive(Args, Debug)]
struct TagDeleteOptions {
/// key to remove
#[clap(short, long, action=ArgAction::Append)]
key: Vec<String>,
/// remove all tags
#[clap(short, long, default_value_t = false)]
all: bool,
}
#[derive(Subcommand, Debug)]
enum StoreOperation {
List,
Add(StoreAddOptions),
Delete(StoreDeleteOptions),
}
#[derive(Args, Debug)]
struct StoreAddOptions {
/// store url in the format [xx-xx=]<url>. the range xx-xx is optional and used for
/// sharding. the URL is per store type, please check docs for more information
#[clap(short, long, action=ArgAction::Append)]
store: Vec<String>,
}
#[derive(Args, Debug)]
struct StoreDeleteOptions {
/// store to remove
#[clap(short, long, action=ArgAction::Append)]
store: Vec<String>,
/// remove all stores
#[clap(short, long, default_value_t = false)]
all: bool,
}
/// Parse a single key-value pair
fn parse_key_val<T, U>(s: &str) -> Result<(T, U), Box<dyn Error + Send + Sync + 'static>>
where
T: std::str::FromStr,
T::Err: Error + Send + Sync + 'static,
U: std::str::FromStr,
U::Err: Error + Send + Sync + 'static,
{
let pos = s
.find('=')
.ok_or_else(|| format!("invalid KEY=value: no `=` found in `{s}`"))?;
Ok((s[..pos].parse()?, s[pos + 1..].parse()?))
}
fn main() -> Result<()> {
let opts = Options::parse();
simple_logger::SimpleLogger::new()
.with_utc_timestamps()
.with_level({
match opts.debug {
0 => log::LevelFilter::Info,
1 => log::LevelFilter::Debug,
_ => log::LevelFilter::Trace,
}
})
.with_module_level("sqlx", log::Level::Error.to_level_filter())
.init()?;
log::debug!("options: {:#?}", opts);
match opts.command {
Commands::Mount(opts) => mount(opts),
Commands::Pack(opts) => pack(opts),
Commands::Unpack(opts) => unpack(opts),
Commands::Clone(opts) => clone(opts),
Commands::Config(opts) => config(opts),
}
}
fn pack(opts: PackOptions) -> Result<()> {
let rt = tokio::runtime::Runtime::new()?;
rt.block_on(async move {
let store = store::parse_router(opts.store.as_slice()).await?;
let meta = fungi::Writer::new(opts.meta, true).await?;
rfs::pack(meta, store, opts.target, !opts.no_strip_password, None).await?;
Ok(())
})
}
fn unpack(opts: UnpackOptions) -> Result<()> {
let rt = tokio::runtime::Runtime::new()?;
rt.block_on(async move {
let meta = fungi::Reader::new(opts.meta)
.await
.context("failed to initialize metadata database")?;
let router = store::get_router(&meta).await?;
let cache = cache::Cache::new(opts.cache, router);
rfs::unpack(&meta, &cache, opts.target, opts.preserve_ownership).await?;
Ok(())
})
}
fn mount(opts: MountOptions) -> Result<()> {
if is_mountpoint(&opts.target)? {
eprintln!("target {} is already a mount point", opts.target);
std::process::exit(1);
}
if opts.daemon {
let pid_file = tempfile::NamedTempFile::new()?;
let target = opts.target.clone();
let mut daemon = daemonize::Daemonize::new()
.working_directory(std::env::current_dir()?)
.pid_file(pid_file.path());
if let Some(ref log) = opts.log {
let out = std::fs::File::create(log)?;
let err = out.try_clone()?;
daemon = daemon.stdout(out).stderr(err);
}
match daemon.execute() {
daemonize::Outcome::Parent(result) => {
result.context("daemonize")?;
wait_child(target, pid_file);
return Ok(());
}
_ => {}
}
}
let rt = tokio::runtime::Runtime::new()?;
rt.block_on(fuse(opts))
}
fn is_mountpoint<S: AsRef<str>>(target: S) -> Result<bool> {
use std::process::Command;
let output = Command::new("mountpoint")
.arg("-q")
.arg(target.as_ref())
.output()
.context("failed to check mountpoint")?;
Ok(output.status.success())
}
fn wait_child(target: String, mut pid_file: tempfile::NamedTempFile) {
for _ in 0..5 {
if is_mountpoint(&target).unwrap() {
return;
}
std::thread::sleep(std::time::Duration::from_secs(1));
}
let mut buf = String::new();
if let Err(e) = pid_file.read_to_string(&mut buf) {
error!("failed to read pid_file: {:#}", e);
}
let pid = buf.parse::<i32>();
match pid {
Err(e) => error!("failed to parse pid_file contents {}: {:#}", buf, e),
Ok(v) => {
let _ = signal::kill(Pid::from_raw(v), Signal::SIGTERM);
} // probably the child exited on its own
}
// cleanup is not performed if the process is terminated with exit(2)
drop(pid_file);
eprintln!("failed to mount in under 5 seconds, please check logs for more information");
std::process::exit(1);
}
async fn fuse(opts: MountOptions) -> Result<()> {
let meta = fungi::Reader::new(opts.meta)
.await
.context("failed to initialize metadata database")?;
let router = store::get_router(&meta).await?;
let cache = cache::Cache::new(opts.cache, router);
let filesystem = fs::Filesystem::new(meta, cache);
filesystem.mount(opts.target).await
}
fn clone(opts: CloneOptions) -> Result<()> {
let rt = tokio::runtime::Runtime::new()?;
rt.block_on(async move {
let store = store::parse_router(opts.store.as_slice()).await?;
let meta = fungi::Reader::new(opts.meta)
.await
.context("failed to initialize metadata database")?;
let router = store::get_router(&meta).await?;
let cache = cache::Cache::new(opts.cache, router);
rfs::clone(meta, store, cache).await?;
Ok(())
})
}
fn config(opts: ConfigOptions) -> Result<()> {
let rt = tokio::runtime::Runtime::new()?;
rt.block_on(async move {
let writer = fungi::Writer::new(opts.meta.clone(), false)
.await
.context("failed to initialize metadata database")?;
let reader = fungi::Reader::new(opts.meta)
.await
.context("failed to initialize metadata database")?;
match opts.command {
ConfigCommands::Tag(opts) => match opts {
TagOperation::List => config::tag_list(reader).await?,
TagOperation::Add(opts) => config::tag_add(writer, opts.tag).await?,
TagOperation::Delete(opts) => {
config::tag_delete(writer, opts.key, opts.all).await?
}
},
ConfigCommands::Store(opts) => match opts {
StoreOperation::List => config::store_list(reader).await?,
StoreOperation::Add(opts) => config::store_add(writer, opts.store).await?,
StoreOperation::Delete(opts) => {
config::store_delete(writer, opts.store, opts.all).await?
}
},
}
Ok(())
})
}

View File

@@ -0,0 +1,267 @@
use crate::fungi::meta::{Ino, Inode};
use crate::fungi::{Error, Result, Writer};
use crate::store::{BlockStore, Store};
use anyhow::Context;
use futures::lock::Mutex;
use std::collections::LinkedList;
use std::ffi::OsString;
use std::fs::Metadata;
use std::os::unix::ffi::OsStrExt;
use std::path::{Path, PathBuf};
use std::sync::mpsc::Sender;
use std::sync::Arc;
use workers::WorkerPool;
const BLOB_SIZE: usize = 512 * 1024; // 512K
type FailuresList = Arc<Mutex<Vec<(PathBuf, Error)>>>;
#[derive(Debug)]
struct Item(Ino, PathBuf, OsString, Metadata);
/// creates an FL from the given root location. It takes ownership of the writer because
/// it's logically incorrect to store multiple filessytem in the same FL.
/// All file chunks will then be uploaded to the provided store
///
pub async fn pack<P: Into<PathBuf>, S: Store>(
writer: Writer,
store: S,
root: P,
strip_password: bool,
sender: Option<Sender<u32>>,
) -> Result<()> {
use tokio::fs;
// building routing table from store information
for route in store.routes() {
let mut store_url = route.url;
if strip_password {
let mut url = url::Url::parse(&store_url).context("failed to parse store url")?;
if url.password().is_some() {
url.set_password(None)
.map_err(|_| anyhow::anyhow!("failed to strip password"))?;
store_url = url.to_string();
}
}
writer
.route(
route.start.unwrap_or(u8::MIN),
route.end.unwrap_or(u8::MAX),
store_url,
)
.await?;
}
let store: BlockStore<S> = store.into();
let root = root.into();
let meta = fs::metadata(&root)
.await
.context("failed to get root stats")?;
let mut list = LinkedList::default();
let failures = FailuresList::default();
let uploader = Uploader::new(store, writer.clone(), Arc::clone(&failures));
let mut pool = workers::WorkerPool::new(uploader.clone(), super::PARALLEL_UPLOAD);
pack_one(
&mut list,
&writer,
&mut pool,
Item(0, root, OsString::from("/"), meta),
sender.as_ref(),
)
.await?;
while !list.is_empty() {
let dir = list.pop_back().unwrap();
pack_one(&mut list, &writer, &mut pool, dir, sender.as_ref()).await?;
}
pool.close().await;
let failures = failures.lock().await;
if failures.is_empty() {
return Ok(());
}
log::error!("failed to upload one or more files");
for (file, error) in failures.iter() {
log::error!(" - failed to upload file {}: {}", file.display(), error);
}
Err(Error::Anyhow(anyhow::anyhow!(
"failed to upload ({}) files",
failures.len()
)))
}
/// pack_one is called for each dir
async fn pack_one<S: Store>(
list: &mut LinkedList<Item>,
writer: &Writer,
pool: &mut WorkerPool<Uploader<S>>,
Item(parent, path, name, meta): Item,
sender: Option<&Sender<u32>>,
) -> Result<()> {
use std::os::unix::fs::MetadataExt;
use tokio::fs;
let current = writer
.inode(Inode {
ino: 0,
name: String::from_utf8_lossy(name.as_bytes()).into_owned(),
parent,
size: meta.size(),
uid: meta.uid(),
gid: meta.gid(),
mode: meta.mode().into(),
rdev: meta.rdev(),
ctime: meta.ctime(),
mtime: meta.mtime(),
data: None,
})
.await?;
let mut children = fs::read_dir(&path)
.await
.context("failed to list dir children")?;
while let Some(child) = children
.next_entry()
.await
.context("failed to read next entry from directory")?
{
let name = child.file_name();
let meta = child.metadata().await?;
let child_path = path.join(&name);
if let Some(ref sender) = sender {
sender.send(1).context("failed to send progress")?;
}
// if this child a directory we add to the tail of the list
if meta.is_dir() {
list.push_back(Item(current, child_path.clone(), name, meta));
continue;
}
// create entry
// otherwise create the file meta
let data = if meta.is_symlink() {
let target = fs::read_link(&child_path).await?;
Some(target.as_os_str().as_bytes().into())
} else {
None
};
let child_ino = writer
.inode(Inode {
ino: 0,
name: String::from_utf8_lossy(name.as_bytes()).into_owned(),
parent: current,
size: meta.size(),
uid: meta.uid(),
gid: meta.gid(),
mode: meta.mode().into(),
rdev: meta.rdev(),
ctime: meta.ctime(),
mtime: meta.mtime(),
data,
})
.await?;
if !meta.is_file() {
continue;
}
let worker = pool.get().await;
worker
.send((child_ino, child_path))
.context("failed to schedule file upload")?;
}
Ok(())
}
struct Uploader<S>
where
S: Store,
{
store: Arc<BlockStore<S>>,
failures: FailuresList,
writer: Writer,
buffer: [u8; BLOB_SIZE],
}
impl<S> Clone for Uploader<S>
where
S: Store,
{
fn clone(&self) -> Self {
Self {
store: Arc::clone(&self.store),
failures: Arc::clone(&self.failures),
writer: self.writer.clone(),
buffer: [0; BLOB_SIZE],
}
}
}
impl<S> Uploader<S>
where
S: Store,
{
fn new(store: BlockStore<S>, writer: Writer, failures: FailuresList) -> Self {
Self {
store: Arc::new(store),
failures,
writer,
buffer: [0; BLOB_SIZE],
}
}
async fn upload(&mut self, ino: Ino, path: &Path) -> Result<()> {
use tokio::fs;
use tokio::io::AsyncReadExt;
use tokio::io::BufReader;
// create file blocks
let fd = fs::OpenOptions::default().read(true).open(path).await?;
let mut reader = BufReader::new(fd);
loop {
let size = reader.read(&mut self.buffer).await?;
if size == 0 {
break;
}
// write block to remote store
let block = self.store.set(&self.buffer[..size]).await?;
// write block info to meta
self.writer.block(ino, &block.id, &block.key).await?;
}
Ok(())
}
}
#[async_trait::async_trait]
impl<S> workers::Work for Uploader<S>
where
S: Store,
{
type Input = (Ino, PathBuf);
type Output = ();
async fn run(&mut self, (ino, path): Self::Input) -> Self::Output {
log::info!("uploading {:?}", path);
if let Err(err) = self.upload(ino, &path).await {
log::error!("failed to upload file {}: {:#}", path.display(), err);
self.failures.lock().await.push((path, err));
}
}
}

View File

@@ -0,0 +1,133 @@
use super::{Error, Result, Store};
use crate::fungi::meta::Block;
use aes_gcm::{
aead::{
generic_array::{self, GenericArray},
Aead, KeyInit,
},
Aes256Gcm, Nonce,
};
fn hash(input: &[u8]) -> GenericArray<u8, generic_array::typenum::U32> {
let hash = blake2b_simd::Params::new().hash_length(32).hash(input);
GenericArray::from_slice(hash.as_bytes()).to_owned()
}
/// The block store builds on top of a store and adds encryption and compression
#[derive(Clone, Debug)]
pub struct BlockStore<S: Store> {
store: S,
}
impl<S> From<S> for BlockStore<S>
where
S: Store,
{
fn from(store: S) -> Self {
Self { store }
}
}
impl<S> BlockStore<S>
where
S: Store,
{
pub fn inner(self) -> S {
self.store
}
pub async fn get(&self, block: &Block) -> Result<Vec<u8>> {
let encrypted = self.store.get(&block.id).await?;
let cipher = Aes256Gcm::new_from_slice(&block.key).map_err(|_| Error::InvalidKey)?;
let nonce = Nonce::from_slice(&block.key[..12]);
let compressed = cipher
.decrypt(nonce, encrypted.as_slice())
.map_err(|_| Error::EncryptionError)?;
let mut decoder = snap::raw::Decoder::new();
let plain = decoder.decompress_vec(&compressed)?;
Ok(plain)
}
pub async fn set(&self, blob: &[u8]) -> Result<Block> {
// we first calculate the hash of the plain-text data
let key = hash(blob);
let mut encoder = snap::raw::Encoder::new();
// data is then compressed
let compressed = encoder.compress_vec(blob)?;
// we then encrypt it using the hash of the plain-text as a key
let cipher = Aes256Gcm::new(&key);
// the nonce is still driven from the key, a nonce is 12 bytes for aes
// it's done like this so a store can still dedup the data
let nonce = Nonce::from_slice(&key[..12]);
// we encrypt the data
let encrypted = cipher
.encrypt(nonce, compressed.as_slice())
.map_err(|_| Error::EncryptionError)?;
// we hash it again, and use that as the store key
let id = hash(&encrypted);
let block = Block {
id: id.into(),
key: key.into(),
};
self.store.set(&block.id, &encrypted).await?;
Ok(block)
}
}
#[cfg(test)]
mod test {
use super::super::Route;
use super::*;
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::Mutex;
#[derive(Default)]
struct InMemoryStore {
map: Arc<Mutex<HashMap<Vec<u8>, Vec<u8>>>>,
}
#[async_trait::async_trait]
impl Store for InMemoryStore {
async fn get(&self, key: &[u8]) -> Result<Vec<u8>> {
let map = self.map.lock().await;
let v = map.get(key).ok_or(Error::KeyNotFound)?;
Ok(v.clone())
}
async fn set(&self, key: &[u8], blob: &[u8]) -> Result<()> {
let mut map = self.map.lock().await;
map.insert(key.into(), blob.into());
Ok(())
}
fn routes(&self) -> Vec<Route> {
vec![Route::url("mem://")]
}
}
#[tokio::test]
async fn test_block_store() {
let store = InMemoryStore::default();
let block_store = BlockStore::from(store);
let blob = "some random data to store";
let block = block_store.set(blob.as_bytes()).await.unwrap();
let received = block_store.get(&block).await.unwrap();
assert_eq!(blob.as_bytes(), received.as_slice());
}
}

View File

@@ -0,0 +1,83 @@
use super::{Error, Result, Route, Store};
use std::io::ErrorKind;
use std::os::unix::prelude::OsStrExt;
use std::path::PathBuf;
use tokio::fs;
use url;
pub const SCHEME: &str = "dir";
/// DirStore is a simple store that store blobs on the filesystem
/// and is mainly used for testing
#[derive(Clone)]
pub struct DirStore {
root: PathBuf,
}
impl DirStore {
pub async fn make<U: AsRef<str>>(url: &U) -> Result<DirStore> {
let u = url::Url::parse(url.as_ref())?;
if u.scheme() != SCHEME {
return Err(Error::InvalidScheme(u.scheme().into(), SCHEME.into()));
}
Ok(DirStore::new(u.path()).await?)
}
pub async fn new<P: Into<PathBuf>>(root: P) -> Result<Self> {
let root = root.into();
fs::create_dir_all(&root).await?;
Ok(Self { root })
}
}
#[async_trait::async_trait]
impl Store for DirStore {
async fn get(&self, key: &[u8]) -> Result<Vec<u8>> {
let file_name = hex::encode(key);
let dir_path = self.root.join(&file_name[0..2]);
let mut path = dir_path.join(&file_name);
let data = match fs::read(&path).await {
Ok(data) => data,
Err(err) if err.kind() == ErrorKind::NotFound => {
path = self.root.join(file_name);
let data = match fs::read(&path).await {
Ok(data) => data,
Err(err) if err.kind() == ErrorKind::NotFound => {
return Err(Error::KeyNotFound);
}
Err(err) => {
return Err(Error::IO(err));
}
};
data
}
Err(err) => {
return Err(Error::IO(err));
}
};
Ok(data)
}
async fn set(&self, key: &[u8], blob: &[u8]) -> Result<()> {
let file_name = hex::encode(key);
let dir_path = self.root.join(&file_name[0..2]);
fs::create_dir_all(&dir_path).await?;
let file_path = dir_path.join(file_name);
fs::write(file_path, blob).await?;
Ok(())
}
fn routes(&self) -> Vec<Route> {
let r = Route::url(format!(
"dir://{}",
String::from_utf8_lossy(self.root.as_os_str().as_bytes())
));
vec![r]
}
}

View File

@@ -0,0 +1,73 @@
use super::{Error, Result, Route, Store};
use reqwest::{self, StatusCode};
use url::Url;
#[derive(Clone)]
pub struct HTTPStore {
url: Url,
}
impl HTTPStore {
pub async fn make<U: AsRef<str>>(url: &U) -> Result<HTTPStore> {
let u = Url::parse(url.as_ref())?;
if u.scheme() != "http" && u.scheme() != "https" {
return Err(Error::Other(anyhow::Error::msg("invalid scheme")));
}
Ok(HTTPStore::new(u).await?)
}
pub async fn new<U: Into<Url>>(url: U) -> Result<Self> {
let url = url.into();
Ok(Self { url })
}
}
#[async_trait::async_trait]
impl Store for HTTPStore {
async fn get(&self, key: &[u8]) -> Result<Vec<u8>> {
let file = hex::encode(key);
let mut file_path = self.url.clone();
file_path
.path_segments_mut()
.map_err(|_| Error::Other(anyhow::Error::msg("cannot be base")))?
.push(&file[0..2])
.push(&file);
let mut legacy_path = self.url.clone();
legacy_path
.path_segments_mut()
.map_err(|_| Error::Other(anyhow::Error::msg("cannot be base")))?
.push(&file);
let data = match reqwest::get(file_path).await {
Ok(mut response) => {
if response.status() == StatusCode::NOT_FOUND {
response = reqwest::get(legacy_path)
.await
.map_err(|_| Error::KeyNotFound)?;
if response.status() != StatusCode::OK {
return Err(Error::KeyNotFound);
}
}
if response.status() != StatusCode::OK {
return Err(Error::Unavailable);
}
response.bytes().await.map_err(|e| Error::Other(e.into()))?
}
Err(err) => return Err(Error::Other(err.into())),
};
Ok(data.into())
}
async fn set(&self, _key: &[u8], _blob: &[u8]) -> Result<()> {
Err(Error::Other(anyhow::Error::msg(
"http store doesn't support uploading",
)))
}
fn routes(&self) -> Vec<Route> {
let r = Route::url(self.url.clone());
vec![r]
}
}

View File

@@ -0,0 +1,240 @@
mod bs;
pub mod dir;
pub mod http;
mod router;
pub mod s3store;
pub mod zdb;
use anyhow::Context;
use rand::seq::SliceRandom;
pub use bs::BlockStore;
use regex::Regex;
use crate::fungi;
pub use self::router::Router;
pub async fn make<U: AsRef<str>>(u: U) -> Result<Stores> {
let parsed = url::Url::parse(u.as_ref())?;
match parsed.scheme() {
dir::SCHEME => return Ok(Stores::Dir(dir::DirStore::make(&u).await?)),
"s3" | "s3s" | "s3s+tls" => return Ok(Stores::S3(s3store::S3Store::make(&u).await?)),
"zdb" => return Ok(Stores::ZDB(zdb::ZdbStore::make(&u).await?)),
"http" | "https" => return Ok(Stores::HTTP(http::HTTPStore::make(&u).await?)),
_ => return Err(Error::UnknownStore(parsed.scheme().into())),
}
}
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error("key not found")]
KeyNotFound,
#[error("invalid key")]
InvalidKey,
#[error("invalid blob")]
InvalidBlob,
#[error("key is not routable")]
KeyNotRoutable,
#[error("store is not available")]
Unavailable,
#[error("compression error: {0}")]
Compression(#[from] snap::Error),
#[error("encryption error")]
EncryptionError,
// TODO: better display for the Box<Vec<Self>>
#[error("multiple error: {0:?}")]
Multiple(Box<Vec<Self>>),
#[error("io error: {0}")]
IO(#[from] std::io::Error),
#[error("url parse error: {0}")]
Url(#[from] url::ParseError),
#[error("unknown store type '{0}'")]
UnknownStore(String),
#[error("invalid schema '{0}' expected '{1}'")]
InvalidScheme(String, String),
#[error("unknown store error {0:#}")]
Other(#[from] anyhow::Error),
}
pub type Result<T> = std::result::Result<T, Error>;
pub struct Route {
pub start: Option<u8>,
pub end: Option<u8>,
pub url: String,
}
impl Route {
pub fn url<S: Into<String>>(s: S) -> Self {
Self {
start: None,
end: None,
url: s.into(),
}
}
}
/// The store trait defines a simple (low level) key/value store interface to set/get blobs
/// the concern of the store is to only store given data with given key and implement
/// the means to retrieve it again once a get is called.
#[async_trait::async_trait]
pub trait Store: Send + Sync + 'static {
async fn get(&self, key: &[u8]) -> Result<Vec<u8>>;
async fn set(&self, key: &[u8], blob: &[u8]) -> Result<()>;
fn routes(&self) -> Vec<Route>;
}
#[async_trait::async_trait]
impl<S> Store for Router<S>
where
S: Store,
{
async fn get(&self, key: &[u8]) -> Result<Vec<u8>> {
if key.is_empty() {
return Err(Error::InvalidKey);
}
let mut errors = Vec::default();
// to make it fare we shuffle the list of matching routers randomly everytime
// before we do a get
let mut routers: Vec<&S> = self.route(key[0]).collect();
routers.shuffle(&mut rand::thread_rng());
for store in routers {
match store.get(key).await {
Ok(object) => return Ok(object),
Err(err) => errors.push(err),
};
}
if errors.is_empty() {
return Err(Error::KeyNotRoutable);
}
// return aggregated errors
return Err(Error::Multiple(Box::new(errors)));
}
async fn set(&self, key: &[u8], blob: &[u8]) -> Result<()> {
if key.is_empty() {
return Err(Error::InvalidKey);
}
let mut b = false;
for store in self.route(key[0]) {
b = true;
store.set(key, blob).await?;
}
if !b {
return Err(Error::KeyNotRoutable);
}
Ok(())
}
fn routes(&self) -> Vec<Route> {
let mut routes = Vec::default();
for (key, value) in self.routes.iter() {
for sub in value.routes() {
let r = Route {
start: Some(sub.start.unwrap_or(*key.start())),
end: Some(sub.end.unwrap_or(*key.end())),
url: sub.url,
};
routes.push(r);
}
}
routes
}
}
pub async fn get_router(meta: &fungi::Reader) -> Result<Router<Stores>> {
let mut router = Router::new();
for route in meta.routes().await.context("failed to get store routes")? {
let store = make(&route.url)
.await
.with_context(|| format!("failed to initialize store '{}'", route.url))?;
router.add(route.start, route.end, store);
}
Ok(router)
}
pub async fn parse_router(urls: &[String]) -> anyhow::Result<Router<Stores>> {
let mut router = Router::new();
let pattern = r"^(?P<range>[0-9a-f]{2}-[0-9a-f]{2})=(?P<url>.+)$";
let re = Regex::new(pattern)?;
for u in urls {
let ((start, end), store) = match re.captures(u) {
None => ((0x00, 0xff), make(u).await?),
Some(captures) => {
let url = captures.name("url").context("missing url group")?.as_str();
let rng = captures
.name("range")
.context("missing range group")?
.as_str();
let store = make(url).await?;
let range = match rng.split_once('-') {
None => anyhow::bail!("invalid range format"),
Some((low, high)) => (
u8::from_str_radix(low, 16)
.with_context(|| format!("failed to parse low range '{}'", low))?,
u8::from_str_radix(high, 16)
.with_context(|| format!("failed to parse high range '{}'", high))?,
),
};
(range, store)
}
};
router.add(start, end, store);
}
Ok(router)
}
pub enum Stores {
S3(s3store::S3Store),
Dir(dir::DirStore),
ZDB(zdb::ZdbStore),
HTTP(http::HTTPStore),
}
#[async_trait::async_trait]
impl Store for Stores {
async fn get(&self, key: &[u8]) -> Result<Vec<u8>> {
match self {
self::Stores::S3(s3_store) => s3_store.get(key).await,
self::Stores::Dir(dir_store) => dir_store.get(key).await,
self::Stores::ZDB(zdb_store) => zdb_store.get(key).await,
self::Stores::HTTP(http_store) => http_store.get(key).await,
}
}
async fn set(&self, key: &[u8], blob: &[u8]) -> Result<()> {
match self {
self::Stores::S3(s3_store) => s3_store.set(key, blob).await,
self::Stores::Dir(dir_store) => dir_store.set(key, blob).await,
self::Stores::ZDB(zdb_store) => zdb_store.set(key, blob).await,
self::Stores::HTTP(http_store) => http_store.set(key, blob).await,
}
}
fn routes(&self) -> Vec<Route> {
match self {
self::Stores::S3(s3_store) => s3_store.routes(),
self::Stores::Dir(dir_store) => dir_store.routes(),
self::Stores::ZDB(zdb_store) => zdb_store.routes(),
self::Stores::HTTP(http_store) => http_store.routes(),
}
}
}

View File

@@ -0,0 +1,56 @@
use std::ops::RangeInclusive;
/// route implements a naive prefix router by going through the complete set of
/// available routers and find that ones that matches this given prefix
#[derive(Default, Clone)]
pub struct Router<T> {
pub(crate) routes: Vec<(RangeInclusive<u8>, T)>,
}
impl<T> Router<T> {
pub fn new() -> Self {
Self {
routes: Vec::default(),
}
}
/// add a range
pub fn add(&mut self, start: u8, end: u8, route: T) {
self.routes.push((start..=end, route));
}
/// return all stores that matches a certain key
///
/// TODO: may be they need to be randomized
pub fn route(&self, i: u8) -> impl Iterator<Item = &T> {
self.routes
.iter()
.filter(move |f| f.0.contains(&i))
.map(|v| &v.1)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test() {
let mut router = Router::default();
router.add(0, 255, "a");
router.add(0, 255, "b");
router.add(0, 128, "c");
let paths: Vec<&str> = router.route(200).map(|v| *v).collect();
assert_eq!(paths.len(), 2);
assert_eq!(paths[0], "a");
assert_eq!(paths[1], "b");
let paths: Vec<&str> = router.route(0).map(|v| *v).collect();
assert_eq!(paths.len(), 3);
assert_eq!(paths[0], "a");
assert_eq!(paths[1], "b");
assert_eq!(paths[2], "c");
}
}

View File

@@ -0,0 +1,191 @@
use super::{Error, Result, Route, Store};
use anyhow::Context;
use s3::{creds::Credentials, error::S3Error, Bucket, Region};
use url::Url;
fn get_config<U: AsRef<str>>(u: U) -> Result<(Credentials, Region, String)> {
let url = Url::parse(u.as_ref())?;
let access_key = url.username().to_string();
let access_secret = url.password().map(|s| s.to_owned());
let host = url.host_str().context("host not found")?;
let port = url.port().context("port not found")?;
let scheme = match url.scheme() {
"s3" => "http://",
"s3+tls" | "s3s" => "https://",
_ => return Err(Error::Other(anyhow::Error::msg("invalid scheme"))),
};
let endpoint = format!("{}{}:{}", scheme, host, port);
let bucket_name = url.path().trim_start_matches('/').to_string();
let region_name = url
.query_pairs()
.find(|(key, _)| key == "region")
.map(|(_, value)| value.to_string())
.unwrap_or_default();
Ok((
Credentials {
access_key: Some(access_key),
secret_key: access_secret,
security_token: None,
session_token: None,
expiration: None,
},
Region::Custom {
region: region_name,
endpoint,
},
bucket_name,
))
}
#[derive(Clone)]
pub struct S3Store {
bucket: Bucket,
url: String,
// this is only here as a work around for this bug https://github.com/durch/rust-s3/issues/337
// because rfs uses the store in async (and parallel) matter to upload/download blobs
// we need to synchronize this locally in that store which will hurt performance
// the 2 solutions now is to either wait until this bug is fixed, or switch to another client
// but for now we keep this work around
}
impl S3Store {
pub async fn make<U: AsRef<str>>(url: &U) -> Result<S3Store> {
let (cred, region, bucket_name) = get_config(url.as_ref())?;
Ok(S3Store::new(url.as_ref(), &bucket_name, region, cred)?)
}
pub fn new(url: &str, bucket_name: &str, region: Region, cred: Credentials) -> Result<Self> {
let bucket = Bucket::new(bucket_name, region, cred)
.context("failed instantiate bucket")?
.with_path_style();
Ok(Self {
bucket,
url: url.to_owned(),
})
}
}
#[async_trait::async_trait]
impl Store for S3Store {
async fn get(&self, key: &[u8]) -> super::Result<Vec<u8>> {
match self.bucket.get_object(hex::encode(key)).await {
Ok(res) => Ok(res.to_vec()),
Err(S3Error::HttpFailWithBody(404, _)) => Err(Error::KeyNotFound),
Err(S3Error::Io(err)) => Err(Error::IO(err)),
Err(err) => Err(anyhow::Error::from(err).into()),
}
}
async fn set(&self, key: &[u8], blob: &[u8]) -> Result<()> {
self.bucket
.put_object(hex::encode(key), blob)
.await
.context("put object over s3 storage")?;
Ok(())
}
fn routes(&self) -> Vec<Route> {
vec![Route::url(self.url.clone())]
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_get_config() {
let (cred, region, bucket_name) =
get_config("s3s://minioadmin:minioadmin@127.0.0.1:9000/mybucket?region=minio").unwrap();
assert_eq!(
cred,
Credentials {
access_key: Some("minioadmin".to_string()),
secret_key: Some("minioadmin".to_string()),
security_token: None,
session_token: None,
expiration: None,
}
);
assert_eq!(
region,
Region::Custom {
region: "minio".to_string(),
endpoint: "https://127.0.0.1:9000".to_string()
}
);
assert_eq!(bucket_name, "mybucket".to_string())
}
#[test]
fn test_get_config_without_tls() {
let (cred, region, bucket_name) =
get_config("s3://minioadmin:minioadmin@127.0.0.1:9000/mybucket?region=minio").unwrap();
assert_eq!(
cred,
Credentials {
access_key: Some("minioadmin".to_string()),
secret_key: Some("minioadmin".to_string()),
security_token: None,
session_token: None,
expiration: None,
}
);
assert_eq!(
region,
Region::Custom {
region: "minio".to_string(),
endpoint: "http://127.0.0.1:9000".to_string()
}
);
assert_eq!(bucket_name, "mybucket".to_string())
}
#[ignore]
#[tokio::test]
async fn test_set_get() {
let url = "s3://minioadmin:minioadmin@127.0.0.1:9000/mybucket?region=minio";
let (cred, region, bucket_name) = get_config(url).unwrap();
let store = S3Store::new(url, &bucket_name, region, cred);
let store = store.unwrap();
let key = b"test.txt";
let blob = b"# Hello, World!";
_ = store.set(key, blob).await;
let get_res = store.get(key).await;
let get_res = get_res.unwrap();
assert_eq!(get_res, blob)
}
#[ignore]
#[tokio::test]
async fn test_set_get_without_region() {
let url = "s3://minioadmin:minioadmin@127.0.0.1:9000/mybucket";
let (cred, region, bucket_name) = get_config(url).unwrap();
let store = S3Store::new(url, &bucket_name, region, cred);
let store = store.unwrap();
let key = b"test2.txt";
let blob = b"# Hello, World!";
_ = store.set(key, blob).await;
let get_res = store.get(key).await;
let get_res = get_res.unwrap();
assert_eq!(get_res, blob)
}
}

View File

@@ -0,0 +1,176 @@
use super::{Error, Result, Route, Store};
use anyhow::Context;
use bb8_redis::{
bb8::{CustomizeConnection, Pool},
redis::{
aio::Connection, cmd, AsyncCommands, ConnectionAddr, ConnectionInfo, RedisConnectionInfo,
RedisError,
},
RedisConnectionManager,
};
#[derive(Debug)]
struct WithNamespace {
namespace: Option<String>,
password: Option<String>,
}
#[async_trait::async_trait]
impl CustomizeConnection<Connection, RedisError> for WithNamespace {
async fn on_acquire(&self, connection: &mut Connection) -> anyhow::Result<(), RedisError> {
match self.namespace {
Some(ref ns) if ns != "default" => {
let mut c = cmd("SELECT");
let c = c.arg(ns);
if let Some(ref password) = self.password {
c.arg(password);
}
let result = c.query_async(connection).await;
if let Err(ref err) = result {
error!("failed to switch namespace to {}: {}", ns, err);
}
result
}
_ => Ok(()),
}
}
}
pub struct ZdbStoreFactory;
fn get_connection_info<U: AsRef<str>>(u: U) -> Result<(ConnectionInfo, Option<String>)> {
let u = url::Url::parse(u.as_ref())?;
let (address, namespace) = match u.host() {
Some(host) => {
let addr = match host {
url::Host::Domain(domain) => domain.to_owned(),
url::Host::Ipv4(ipv4) => ipv4.to_string(),
url::Host::Ipv6(ipv6) => ipv6.to_string(),
};
let addr = ConnectionAddr::Tcp(addr, u.port().unwrap_or(9900));
let ns: Option<String> = u
.path_segments()
.and_then(|s| s.last().map(|s| s.to_owned()));
(addr, ns)
}
None => (ConnectionAddr::Unix(u.path().into()), None),
};
Ok((
ConnectionInfo {
addr: address,
redis: RedisConnectionInfo {
db: 0,
username: if u.username().is_empty() {
None
} else {
Some(u.username().into())
},
password: u.password().map(|s| s.into()),
},
},
namespace,
))
}
#[derive(Clone)]
pub struct ZdbStore {
url: String,
pool: Pool<RedisConnectionManager>,
}
impl ZdbStore {
pub async fn make<U: AsRef<str>>(url: &U) -> Result<ZdbStore> {
let (mut info, namespace) = get_connection_info(url.as_ref())?;
let namespace = WithNamespace {
namespace,
password: info.redis.password.take(),
};
log::debug!("connection {:#?}", info);
log::debug!("switching namespace to: {:?}", namespace.namespace);
let mgr = RedisConnectionManager::new(info)
.context("failed to create redis connection manager")?;
let pool = Pool::builder()
.max_size(20)
.connection_customizer(Box::new(namespace))
.build(mgr)
.await
.context("failed to create connection pool")?;
Ok(ZdbStore {
url: url.as_ref().to_string(),
pool,
})
}
}
#[async_trait::async_trait]
impl Store for ZdbStore {
async fn get(&self, key: &[u8]) -> super::Result<Vec<u8>> {
let mut con = self.pool.get().await.context("failed to get connection")?;
let result: Option<Vec<u8>> = con.get(key).await.context("failed to get blob")?;
let result = result.ok_or(Error::KeyNotFound)?;
if result.is_empty() {
return Err(Error::InvalidBlob);
}
Ok(result)
}
async fn set(&self, key: &[u8], blob: &[u8]) -> Result<()> {
let mut con = self.pool.get().await.context("failed to get connection")?;
if con
.exists(key)
.await
.context("failed to check if blob exists")?
{
return Ok(());
};
con.set(key, blob).await.context("failed to set blob")?;
Ok(())
}
fn routes(&self) -> Vec<Route> {
vec![Route::url(self.url.clone())]
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_connection_info_simple() {
let (info, ns) = get_connection_info("zdb://hub.grid.tf:9900").unwrap();
assert_eq!(ns, None);
assert_eq!(info.addr, ConnectionAddr::Tcp("hub.grid.tf".into(), 9900));
}
#[test]
fn test_connection_info_ns() {
let (info, ns) = get_connection_info("zdb://username@hub.grid.tf/custom").unwrap();
assert_eq!(ns, Some("custom".into()));
assert_eq!(info.addr, ConnectionAddr::Tcp("hub.grid.tf".into(), 9900));
assert_eq!(info.redis.username, Some("username".into()));
}
#[test]
fn test_connection_info_unix() {
let (info, ns) = get_connection_info("zdb:///path/to/socket").unwrap();
assert_eq!(ns, None);
assert_eq!(info.addr, ConnectionAddr::Unix("/path/to/socket".into()));
}
}

View File

@@ -0,0 +1,184 @@
use crate::cache::Cache;
use crate::fungi::{
meta::{FileType, Inode, Result, Walk, WalkVisitor},
Reader,
};
use crate::store::Store;
use anyhow::Context;
use nix::unistd::{fchownat, FchownatFlags, Gid, Uid};
use std::fs::Permissions;
use std::os::unix::ffi::OsStrExt;
use std::os::unix::fs::PermissionsExt;
use std::path::Path;
use std::{ffi::OsStr, fs};
use tokio::fs::OpenOptions;
/// unpack an FL to the given root location. it will download the files and reconstruct
/// the filesystem.
pub async fn unpack<P: AsRef<Path>, S: Store>(
meta: &Reader,
cache: &Cache<S>,
root: P,
preserve: bool,
) -> Result<()> {
let mut visitor = CopyVisitor::new(meta, cache, root.as_ref(), preserve);
meta.walk(&mut visitor).await
}
struct CopyVisitor<'a, S>
where
S: Store,
{
preserve: bool,
meta: &'a Reader,
cache: &'a Cache<S>,
root: &'a Path,
}
impl<'a, S> CopyVisitor<'a, S>
where
S: Store,
{
pub fn new(meta: &'a Reader, cache: &'a Cache<S>, root: &'a Path, preserve: bool) -> Self {
Self {
meta,
cache,
root,
preserve,
}
}
}
#[async_trait::async_trait]
impl<'a, S> WalkVisitor for CopyVisitor<'a, S>
where
S: Store,
{
async fn visit(&mut self, path: &Path, node: &Inode) -> Result<Walk> {
let rooted = self.root.join(path.strip_prefix("/").unwrap());
match node.mode.file_type() {
FileType::Dir => {
fs::create_dir_all(&rooted)
.with_context(|| format!("failed to create directory '{:?}'", rooted))?;
}
FileType::Regular => {
let mut fd = OpenOptions::new()
.create_new(true)
.write(true)
.truncate(true)
.open(&rooted)
.await
.with_context(|| format!("failed to create file '{:?}'", rooted))?;
let blocks = self.meta.blocks(node.ino).await?;
self.cache
.direct(&blocks, &mut fd)
.await
.with_context(|| format!("failed to download file '{:?}'", rooted))?;
fd.set_permissions(Permissions::from_mode(node.mode.mode()))
.await?;
}
FileType::Link => {
let target = node
.data
.as_deref()
.ok_or_else(|| anyhow::anyhow!("link has no target path"))?;
let target = Path::new(OsStr::from_bytes(target));
let target = if target.is_relative() {
target.to_owned()
} else {
self.root.join(target)
};
std::os::unix::fs::symlink(target, &rooted)
.with_context(|| format!("failed to create symlink '{:?}'", rooted))?;
}
_ => {
warn!("unknown file kind: {:?}", node.mode.file_type());
return Ok(Walk::Continue);
}
};
if self.preserve {
fchownat(
None,
&rooted,
Some(Uid::from_raw(node.uid)),
Some(Gid::from_raw(node.gid)),
FchownatFlags::NoFollowSymlink,
)
.with_context(|| format!("failed to change ownership of '{:?}'", &rooted))?;
}
Ok(Walk::Continue)
}
}
/*
TODO: parallel download ?
this is a download worker that can be used in a worker pool to download files
in parallel
struct Downloader<S>
where
S: Store,
{
cache: Arc<Cache<S>>,
}
impl<S> Downloader<S>
where
S: Store,
{
async fn download(&self, path: &Path, blocks: &[Block], mode: u32) -> Result<()> {
let mut fd = OpenOptions::new()
.create_new(true)
.write(true)
.truncate(true)
.open(&path)
.await
.with_context(|| format!("failed to create file '{:?}'", path))?;
self.cache
.direct(&blocks, &mut fd)
.await
.with_context(|| format!("failed to download file '{:?}'", path))?;
fd.set_permissions(Permissions::from_mode(mode)).await?;
Ok(())
}
}
impl<S> Clone for Downloader<S>
where
S: Store,
{
fn clone(&self) -> Self {
Self {
cache: Arc::clone(&self.cache),
}
}
}
#[async_trait::async_trait]
impl<S> workers::Work for Downloader<S>
where
S: Store,
{
type Input = (PathBuf, Vec<Block>, Mode);
type Output = ();
async fn run(&mut self, (path, blocks, mode): Self::Input) -> Self::Output {
if let Err(err) = self.download(&path, &blocks, mode.mode()).await {
log::error!("failed to download file {:?}: {}", path, err);
}
}
}
*/

View File

@@ -0,0 +1,197 @@
# Garage s3 server with flist
## Requirements
- tfcmd
- docker2fl
- rust
- docker
- git
- sqlite
- minio (or any third-party tool you want to use)
- caddy
### Install tfcmd
```bash
wget https://github.com/threefoldtech/tfgrid-sdk-go/releases/download/v0.15.11/tfgrid-sdk-go_Linux_x86_64.tar.gz
mkdir tfgrid-sdk-go
tar -xzf tfgrid-sdk-go_Linux_x86_64.tar.gz -C tfgrid-sdk-go
sudo mv tfgrid-sdk-go/tfcmd /usr/bin/
sudo rm -rf tfgrid-sdk-go_Linux_x86_64.tar.gz tfgrid-sdk-go
```
- Login to tfcmd
```bash
tfcmd login
```
### Install rust
```bash
apt-get update
apt-get install -y curl
curl https://sh.rustup.rs -sSf | sh
export PATH="$HOME/.cargo/bin:$PATH"
apt-get install -y build-essential
apt-get install -y musl-dev musl-tools
apt-get update
```
### Install docker
```bash
apt-get update
apt-get install -y ca-certificates curl
install -m 0755 -d /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
chmod a+r /etc/apt/keyrings/docker.asc
echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null
apt-get update
apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
apt-get update
dockerd > docker.log 2>&1 &
```
### Install docker2fl
```bash
git clone https://github.com/threefoldtech/rfs.git
cd rfs
rustup target add x86_64-unknown-linux-musl
cargo build --features build-binary --release --target=x86_64-unknown-linux-musl
mv ./target/x86_64-unknown-linux-musl/release/docker2fl /usr/local/bin
```
### Install sqlite
```bash
apt update
apt install sqlite3
```
### Install minio
```bash
curl https://dl.min.io/client/mc/release/linux-amd64/mc \
--create-dirs \
-o $HOME/minio-binaries/mc
chmod +x $HOME/minio-binaries/mc
export PATH=$PATH:$HOME/minio-binaries/
```
### Install Caddy
```bash
sudo apt install -y debian-keyring debian-archive-keyring apt-transport-https curl
curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/gpg.key' | sudo gpg --dearmor -o /usr/share/keyrings/caddy-stable-archive-keyring.gpg
curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt' | sudo tee /etc/apt/sources.list.d/caddy-stable.list
sudo apt update
sudo apt install caddy
```
## Usage
### Deploy garage server
Run garage server using garage server [script](./deploy_garage.sh)
```bash
chmod +x deploy_garage.sh
./deploy_garage.sh
```
This script includes:
1. Deploy a vm with mycelium IP to run garage s3 server over it.
2. Install garage in the vm.
3. Run the garage server with the given configuration.
### Manage buckets in garage server
Manage your buckets using manage buckets [script](./manage_buckets.sh)
```bash
export MYCELIUM_IP=<"your machine mycelium IP which has your garage server">
chmod +x manage_buckets.sh
./manage_buckets.sh
```
This script includes:
1. Create 2 buckets in garage server one for `flist` and the other for `blobs`.
2. Allow web for both buckets to be able to serve them.
3. Create 2 keys one for write and the other for read only. The `write-key` will be used to upload the flist and the blobs through rfs. The `read-key` should be updated for flist and blobs to prevent updating them.
4. Adding the keys with their permissions to the bucket.
> *NOTE:* Don't forget to save your read and write keys (ID and secret).
### Convert docker images to flist and upload it
- Convert your image to an flist, The content will be uploaded over blobs buckets
```bash
export IMAGE=<"Your image for example `threefolddev/ubuntu:22.04`">
export WRITE_KEY_ID=<"your key ID">
export WRITE_KEY_SECRET=<"your key secret">
export MYCELIUM_IP=<"your machine mycelium IP which has your garage server">
docker2fl -i $IMAGE -s 's3://$WRITE_KEY_ID:$WRITE_KEY_SECRET@$[$MYCELIUM_IP]:3900/blobs?region=garage'
```
- Update the key to the read only key
```bash TODO:
sqlite3
.open "<your flist file name>"
update route set url="s3://<your read key ID>:<your read key secret>@[<your vm mycelium IP>]:3900/blobs?region=garage"
```
- Upload your flist to flist bucket using minio (you can use any other client).
```bash
export PATH=$PATH:$HOME/minio-binaries/
mc alias set \
garage \
"http://[$MYCELIUM_IP]:3900" \
"$WRITE_KEY_ID" \
"$WRITE_KEY_SECRET" \
--api S3v4
export FLIST_NAME=<"your flist name">
mc cp $FLIST_NAME "s3://flist/$FLIST_NAME"
```
### Serve the flist
- Deploy a name gateway for any domain you want and get the fqdn
```bash
tfcmd deploy gateway name -n "<domain name>" --backends http://[$MYCELIUM_IP]:80
```
- Create Caddyfile
```Caddyfile
http://<fqdn> {
route /flists/* {
uri strip_prefix /flists
reverse_proxy http://127.0.0.1:3902 {
header_up Host "flist"
}
}
route /blobs/* {
uri strip_prefix /blobs
reverse_proxy http://127.0.0.1:3902 {
header_up Host "blobs"
}
}
}
```
- Run `caddy run`
Finally, you can get your flist using `https://<fqdn>/flists/<your flist file name>`.
and get your blobs using `https://<fqdn>/blobs/<your blob file name>`.

View File

@@ -0,0 +1,50 @@
#!/bin/bash
set -ex
# Deploy a vm for garage server with mycelium for s3 server
tfcmd deploy vm --name s3_server --ssh ~/.ssh/id_rsa.pub --cpu 8 --memory 16 --disk 50 --rootfs 10
sleep 6 # wait deployment
OUTPUT=$(tfcmd get vm s3_server 2>&1 | tail -n +3 | tr { '\n' | tr , '\n' | tr } '\n')
MYCELIUM_IP=$(echo "$OUTPUT" | grep -Eo '"mycelium_ip"[^,]*' | awk -F'"' '{print $4}')
# Expose S3 server over mycelium IP
ssh root@$MYCELIUM_IP "
wget https://garagehq.deuxfleurs.fr/_releases/v1.0.0/x86_64-unknown-linux-musl/garage
chmod +x garage
mv garage /usr/local/bin
cat > /etc/garage.toml <<EOF
metadata_dir = '/home/meta'
data_dir = '/home/data'
db_engine = 'sqlite'
replication_factor = 1
rpc_bind_addr = '[::]:3901'
rpc_public_addr = '0.0.0.0:3901'
rpc_secret = '$(openssl rand -hex 32)'
[s3_api]
s3_region = 'garage'
api_bind_addr = '[::]:3900'
root_domain = '.s3.garage.localhost'
[s3_web]
bind_addr = '[::]:3902'
root_domain = '.web.garage.localhost'
index = 'index.html'
[k2v_api]
api_bind_addr = '[::]:3904'
[admin]
api_bind_addr = '[::]:3903'
admin_token = '$(openssl rand -base64 32)'
metrics_token = '$(openssl rand -base64 32)'
EOF
garage server > output.log 2>&1 &
"

View File

@@ -0,0 +1,62 @@
#!/bin/bash
set -ex
if [ -z ${MYCELIUM_IP+x} ]
then
echo 'Error! $MYCELIUM_IP is required.'
exit 64
fi
# Create flist bucket and blobs bucket for rfs store and allow web for both
NODE_ID=$(ssh root@$MYCELIUM_IP "garage status | awk 'NR==3{print \$1}'")
ssh root@$MYCELIUM_IP "
garage layout assign -z dc1 -c 1G $NODE_ID
garage layout apply --version 1
garage bucket create blobs
garage bucket create flist
garage bucket website --allow flist
garage bucket website --allow blobs
garage bucket list
"
# We need to generate a key allowing read and write permissions
# This allow us to upload different files over the s3 server bucket
WRITE_KEY_INFO=$(ssh root@$MYCELIUM_IP "garage key create write-rfs-key | awk 'NR==2{print \$3}NR==3{print \$3}'")
WRITE_KEY_ID=$(echo $KEY_INFO | awk '{print $1}')
WRITE_KEY_SECRET=$(echo $KEY_INFO | awk '{print $2}')
# We need to generate a key allowing read only permission
# This allow us to only download different files over the s3 server bucket
# After generating the flist it should be updated to include the read key only
READ_KEY_INFO=$(ssh root@$MYCELIUM_IP "garage key create read-rfs-key | awk 'NR==2{print \$3}NR==3{print \$3}'")
READ_KEY_ID=$(echo $KEY_INFO | awk '{print $1}')
READ_KEY_SECRET=$(echo $KEY_INFO | awk '{print $2}')
ssh root@$MYCELIUM_IP "
garage bucket allow \
--read \
--write \
--owner \
flist \
--key write-rfs-key
garage bucket allow \
--read \
--write \
--owner \
blobs \
--key write-rfs-key
garage bucket allow \
--read \
flist \
--key read-rfs-key
garage bucket allow \
--read \
blobs \
--key read-rfs-key
# "