Squashed 'components/rfs/' content from commit 9808a5e
git-subtree-dir: components/rfs git-subtree-split: 9808a5e9fc768edc7d8b1dfa5b91b3f018dff0cb
This commit is contained in:
5
rfs/.envrc
Normal file
5
rfs/.envrc
Normal file
@@ -0,0 +1,5 @@
|
||||
if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then
|
||||
source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4="
|
||||
fi
|
||||
|
||||
use flake
|
||||
67
rfs/Cargo.toml
Normal file
67
rfs/Cargo.toml
Normal file
@@ -0,0 +1,67 @@
|
||||
[package]
|
||||
name = "rfs"
|
||||
version = "0.2.0"
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
[build-dependencies]
|
||||
git-version = "0.3.5"
|
||||
|
||||
[[bin]]
|
||||
name = "rfs"
|
||||
path = "src/main.rs"
|
||||
required-features = ["build-binary"]
|
||||
|
||||
[features]
|
||||
build-binary = [
|
||||
"dep:polyfuse",
|
||||
"dep:simple_logger",
|
||||
"dep:tempfile",
|
||||
"dep:daemonize",
|
||||
"dep:clap"
|
||||
]
|
||||
|
||||
[lib]
|
||||
name = "rfs"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.44"
|
||||
time = "0.3"
|
||||
sqlx = { version = "0.7.4", features = [ "runtime-tokio-rustls", "sqlite" ] }
|
||||
tokio = { version = "1", features = [ "rt", "rt-multi-thread", "macros"] }
|
||||
libc = "0.2"
|
||||
futures = "0.3"
|
||||
thiserror = "1.0"
|
||||
bytes = "1.1.0"
|
||||
log = "0.4"
|
||||
lru = "0.7.0"
|
||||
nix = "0.23.0"
|
||||
snap = "1.0.5"
|
||||
bb8-redis = "0.13"
|
||||
async-trait = "0.1.53"
|
||||
url = "2.3.1"
|
||||
blake2b_simd = "1"
|
||||
aes-gcm = "0.10"
|
||||
hex = "0.4"
|
||||
rand = "0.8"
|
||||
# next are only needed for the binarys
|
||||
clap = { version = "4.2", features = ["derive"], optional = true}
|
||||
simple_logger = {version = "1.0.1", optional = true}
|
||||
daemonize = { version = "0.5", optional = true }
|
||||
tempfile = { version = "3.3.0", optional = true }
|
||||
workers = { git="https://github.com/threefoldtech/tokio-worker-pool.git" }
|
||||
rust-s3 = "0.34.0-rc3"
|
||||
openssl = { version = "0.10", features = ["vendored"] }
|
||||
regex = "1.9.6"
|
||||
which = "6.0"
|
||||
reqwest = "0.11"
|
||||
|
||||
[dependencies.polyfuse]
|
||||
branch = "master"
|
||||
git = "https://github.com/muhamadazmy/polyfuse"
|
||||
optional = true
|
||||
|
||||
[dev-dependencies]
|
||||
reqwest = { version = "0.11", features = ["blocking"] }
|
||||
assert_cmd = "2.0"
|
||||
149
rfs/README.md
Normal file
149
rfs/README.md
Normal file
@@ -0,0 +1,149 @@
|
||||
|
||||
# Introduction
|
||||
|
||||
`rfs` is the main tool to create, mount and extract FungiStore lists (FungiList)`fl` for short. An `fl` is a simple format
|
||||
to keep information about an entire filesystem in a compact form. It does not hold the data itself but enough information to
|
||||
retrieve this data back from a `store`.
|
||||
|
||||
## Building rfs
|
||||
|
||||
To build rfs make sure you have rust installed then run the following commands:
|
||||
|
||||
```bash
|
||||
# this is needed to be run once to make sure the musl target is installed
|
||||
rustup target add x86_64-unknown-linux-musl
|
||||
|
||||
# build the binary
|
||||
cargo build --features build-binary --release --target=x86_64-unknown-linux-musl
|
||||
```
|
||||
|
||||
the binary will be available under `./target/x86_64-unknown-linux-musl/release/rfs` you can copy that binary then to `/usr/bin/`
|
||||
to be able to use from anywhere on your system.
|
||||
|
||||
## Stores
|
||||
|
||||
A store in where the actual data lives. A store can be as simple as a `directory` on your local machine in that case the files on the `fl` are only 'accessible' on your local machine. A store can also be a `zdb` running remotely or a cluster of `zdb`. Right now only `dir`, `http`, `zdb` and `s3` stores are supported but this will change in the future to support even more stores.
|
||||
|
||||
## Usage
|
||||
|
||||
### Creating an `fl`
|
||||
|
||||
```bash
|
||||
rfs pack -m output.fl -s <store-specs> <directory>
|
||||
```
|
||||
|
||||
This tells rfs to create an `fl` named `output.fl` using the store defined by the url `<store-specs>` and upload all the files under directory recursively.
|
||||
|
||||
The simplest form of `<store-specs>` is a `url`. the store `url` defines the store to use. Any `url`` has a schema that defines the store type. Right now we have support only for:
|
||||
|
||||
- `dir`: dir is a very simple store that is mostly used for testing. A dir store will store the fs blobs in another location defined by the url path. An example of a valid dir url is `dir:///tmp/store`
|
||||
- `zdb`: [zdb](https://github.com/threefoldtech/0-db) is a append-only key value store and provides a redis like API. An example zdb url can be something like `zdb://<hostname>[:port][/namespace]`
|
||||
- `s3`: aws-s3 is used for storing and retrieving large amounts of data (blobs) in buckets (directories). An example `s3://<username>:<password>@<host>:<port>/<bucket-name>`
|
||||
|
||||
`region` is an optional param for s3 stores, if you want to provide one you can add it as a query to the url `?region=<region-name>`
|
||||
- `http`: http is a store mostly used for wrapping a dir store to fetch data through http requests. It does not support uploading, just fetching the data.
|
||||
It can be set in the FL file as the store to fetch the data with `rfs config`. Example: `http://localhost:9000/store` (https works too).
|
||||
|
||||
`<store-specs>` can also be of the form `<start>-<end>=<url>` where `start` and `end` are a hex bytes for partitioning of blob keys. rfs will then store a set of blobs on the defined store if they blob key falls in the `[start:end]` range (inclusive).
|
||||
|
||||
If the `start-end` range is not provided a `00-FF` range is assume basically a catch all range for the blob keys. In other words, all blobs will be written to that store.
|
||||
|
||||
This is only useful because `rfs` can accept multiple stores on the command line with different and/or overlapping ranges.
|
||||
|
||||
For example `-s 00-80=dir:///tmp/store0 -s 81-ff=dir:///tmp/store1` means all keys that has prefix byte in range `[00-80]` will be written to /tmp/store0 all other keys `[81-ff]` will be written to store1.
|
||||
|
||||
The same range can appear multiple times, which means the blob will be replicated to all the stores that matches its key prefix.
|
||||
|
||||
To quickly test this operation
|
||||
|
||||
```bash
|
||||
rfs pack -m output.fl -s 00-80=dir:///tmp/store0 -s 81-ff=dir:///tmp/store1 ~/Documents
|
||||
```
|
||||
|
||||
this command will effectively create the `output.fl` and store (and shard) the blobs across the 2 locations /tmp/store0 and /tmp/store1.
|
||||
|
||||
```bash
|
||||
#rfs pack --help
|
||||
|
||||
create an FL and upload blocks to provided storage
|
||||
|
||||
Usage: rfs pack [OPTIONS] --meta <META> <TARGET>
|
||||
|
||||
Arguments:
|
||||
<TARGET> target directory to upload
|
||||
|
||||
Options:
|
||||
-m, --meta <META> path to metadata file (flist)
|
||||
-s, --store <STORE> store url in the format [xx-xx=]<url>. the range xx-xx is optional and used for sharding. the URL is per store type, please check docs for more information
|
||||
--no-strip-password disables automatic password stripping from store url, otherwise password will be stored in the fl.
|
||||
-h, --help Print help
|
||||
```
|
||||
|
||||
#### Password stripping
|
||||
|
||||
During creation of an flist you will probably provide a password in the URL of the store. This is normally needed to allow write operation to the store (say s3 bucket)
|
||||
Normally this password is removed from the store info so it's safe to ship the fl to users. A user of the flist then will only have read access, if configured correctly
|
||||
in the store
|
||||
|
||||
For example a `zdb` store has the notion of a public namespace which is password protected for writes, but open for reads. An S3 bucket can have the policy to allow public reads, but protected writes (minio supports that via bucket settings)
|
||||
|
||||
If you wanna disable the password stripping from the store url, you can provide the `--no-strip-password` flag during creation. This also means someone can extract
|
||||
this information from the fl and gain write access to your store, so be careful how u use it.
|
||||
|
||||
# Mounting an `fl`
|
||||
|
||||
Once the `fl` is created it can be distributes to other people. Then they can mount the `fl` which will allow them then to traverse the packed filesystem and also access (read-only) the files.
|
||||
|
||||
To mount an `fl` only the `fl` is needed since all information regarding the `stores` is already stored in the `fl`. This also means you can only share the `fl` if the other user can actually reach the store used to crate the `fl`. So a `dir` store is not sharable, also a `zdb` instance that is running on localhost :no_good:
|
||||
|
||||
```bash
|
||||
sudo rfs mount -m output.fl <target>
|
||||
```
|
||||
|
||||
The `<target>` is the mount location, usually `/mnt` but can be anywhere. In another terminal you can now `cd <target>` and walk the filesystem tree. Opening the files will trigger a file download from the store only on read access.
|
||||
|
||||
full command help
|
||||
|
||||
```bash
|
||||
# rfs mount --help
|
||||
|
||||
mount an FL
|
||||
|
||||
Usage: rfs mount [OPTIONS] --meta <META> <TARGET>
|
||||
|
||||
Arguments:
|
||||
<TARGET> target mountpoint
|
||||
|
||||
Options:
|
||||
-m, --meta <META> path to metadata file (flist)
|
||||
-c, --cache <CACHE> directory used as cache for downloaded file chuncks [default: /tmp/cache]
|
||||
-d, --daemon run in the background
|
||||
-l, --log <LOG> log file only used with daemon mode
|
||||
-h, --help Print help
|
||||
```
|
||||
|
||||
# Unpack an `fl`
|
||||
|
||||
Similar to `mount` rfs provides an `unpack` subcommand that downloads the entire content (extract) of an `fl` to a provided directory.
|
||||
|
||||
```bash
|
||||
rfs unpack --help
|
||||
unpack (downloads) content of an FL the provided location
|
||||
|
||||
Usage: rfs unpack [OPTIONS] --meta <META> <TARGET>
|
||||
|
||||
Arguments:
|
||||
<TARGET> target directory to upload
|
||||
|
||||
Options:
|
||||
-m, --meta <META> path to metadata file (flist)
|
||||
-c, --cache <CACHE> directory used as cache for downloaded file chuncks [default: /tmp/cache]
|
||||
-p, --preserve-ownership preserve files ownership from the FL, otherwise use the current user ownership setting this flag to true normally requires sudo
|
||||
-h, --help Print help
|
||||
```
|
||||
|
||||
By default when unpacking the `-p` flag is not set. which means downloaded files will be `owned` by the current user/group. If `-p` flag is set, the files ownership will be same as the original files used to create the fl (preserve `uid` and `gid` of the files and directories) this normally requires `sudo` while unpacking.
|
||||
|
||||
# Specifications
|
||||
|
||||
Please check [docs](../docs)
|
||||
9
rfs/build.rs
Normal file
9
rfs/build.rs
Normal file
@@ -0,0 +1,9 @@
|
||||
fn main() {
|
||||
println!(
|
||||
"cargo:rustc-env=GIT_VERSION={}",
|
||||
git_version::git_version!(
|
||||
args = ["--tags", "--always", "--dirty=-modified"],
|
||||
fallback = "unknown"
|
||||
)
|
||||
);
|
||||
}
|
||||
105
rfs/flake.lock
generated
Normal file
105
rfs/flake.lock
generated
Normal file
@@ -0,0 +1,105 @@
|
||||
{
|
||||
"nodes": {
|
||||
"crane": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1709610799,
|
||||
"narHash": "sha256-5jfLQx0U9hXbi2skYMGodDJkIgffrjIOgMRjZqms2QE=",
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"rev": "81c393c776d5379c030607866afef6406ca1be57",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1709126324,
|
||||
"narHash": "sha256-q6EQdSeUZOG26WelxqkmR7kArjgWCdw5sfJVHPH/7j8=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "d465f4819400de7c8d874d50b982301f28a84605",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"id": "flake-utils",
|
||||
"type": "indirect"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1709677081,
|
||||
"narHash": "sha256-tix36Y7u0rkn6mTm0lA45b45oab2cFLqAzDbJxeXS+c=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "880992dcc006a5e00dd0591446fdf723e6a51a64",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-23.11",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"crane": "crane",
|
||||
"flake-utils": "flake-utils",
|
||||
"nixpkgs": "nixpkgs",
|
||||
"rust-overlay": "rust-overlay"
|
||||
}
|
||||
},
|
||||
"rust-overlay": {
|
||||
"inputs": {
|
||||
"flake-utils": [
|
||||
"flake-utils"
|
||||
],
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1712542394,
|
||||
"narHash": "sha256-UZebDBECRSrJqw4K+LxZ6qFdYnScu6q1XCwqtsu1cas=",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "ece8bdb3c3b58def25f204b9a1261dee55d7c9c0",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
||||
71
rfs/flake.nix
Normal file
71
rfs/flake.nix
Normal file
@@ -0,0 +1,71 @@
|
||||
{
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-23.11";
|
||||
|
||||
crane.url = "github:ipetkov/crane";
|
||||
crane.inputs.nixpkgs.follows = "nixpkgs";
|
||||
|
||||
flake-utils.inputs.nixpkgs.follows = "nixpkgs";
|
||||
|
||||
rust-overlay = {
|
||||
url = "github:oxalica/rust-overlay";
|
||||
inputs = {
|
||||
nixpkgs.follows = "nixpkgs";
|
||||
flake-utils.follows = "flake-utils";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
outputs = {
|
||||
self,
|
||||
nixpkgs,
|
||||
crane,
|
||||
flake-utils,
|
||||
rust-overlay,
|
||||
}:
|
||||
flake-utils.lib.eachSystem
|
||||
[
|
||||
flake-utils.lib.system.x86_64-linux
|
||||
flake-utils.lib.system.aarch64-linux
|
||||
flake-utils.lib.system.aarch64-darwin
|
||||
] (system: let
|
||||
pkgs = import nixpkgs {
|
||||
inherit system;
|
||||
overlays = [(import rust-overlay)];
|
||||
};
|
||||
|
||||
customToolchain = pkgs.rust-bin.fromRustupToolchainFile ./rust-toolchain.toml;
|
||||
craneLib = (crane.mkLib pkgs).overrideToolchain customToolchain;
|
||||
in {
|
||||
devShells.default = craneLib.devShell {
|
||||
packages = [
|
||||
pkgs.rust-analyzer
|
||||
];
|
||||
|
||||
RUST_SRC_PATH = "${pkgs.rustPlatform.rustLibSrc}";
|
||||
};
|
||||
packages.default = craneLib.buildPackage {
|
||||
src = self;
|
||||
|
||||
# 2024-03-07 failing test:
|
||||
# > thread 'test::pack_unpack' has overflowed its stack
|
||||
# > fatal runtime error: stack overflow
|
||||
# > error: test failed, to rerun pass `--lib`
|
||||
#
|
||||
# appearantly needs `RUST_MIN_STACK: 8388608` according to https://github.com/threefoldtech/rfs/blob/eae5186cc6b0f8704f3e4715d2e3644f1f3baa2c/.github/workflows/tests.yaml#L25C1-L25C34
|
||||
doCheck = false;
|
||||
|
||||
cargoExtraArgs = "--bin rfs --features=build-binary";
|
||||
|
||||
nativeBuildInputs = [
|
||||
pkgs.perl
|
||||
pkgs.pkg-config
|
||||
];
|
||||
|
||||
buildInputs = [
|
||||
pkgs.openssl
|
||||
pkgs.openssl.dev
|
||||
];
|
||||
};
|
||||
});
|
||||
}
|
||||
3
rfs/rust-toolchain.toml
Normal file
3
rfs/rust-toolchain.toml
Normal file
@@ -0,0 +1,3 @@
|
||||
[toolchain]
|
||||
channel = "1.74.0"
|
||||
|
||||
46
rfs/schema/schema.sql
Normal file
46
rfs/schema/schema.sql
Normal file
@@ -0,0 +1,46 @@
|
||||
-- inode table and main entrypoint of the schema
|
||||
CREATE TABLE IF NOT EXISTS inode (
|
||||
ino INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
parent INTEGER,
|
||||
name VARCHAR(255),
|
||||
size INTEGER,
|
||||
uid INTEGER,
|
||||
gid INTEGER,
|
||||
mode INTEGER,
|
||||
rdev INTEGER,
|
||||
ctime INTEGER,
|
||||
mtime INTEGER
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS parents ON inode (parent);
|
||||
CREATE INDEX IF NOT EXISTS names ON inode (name);
|
||||
|
||||
-- extra data for each inode for special types (say link targets)
|
||||
CREATE TABLE IF NOT EXISTS extra (
|
||||
ino INTEGER PRIMARY KEY,
|
||||
data VARCHAR(4096)
|
||||
);
|
||||
|
||||
-- blocks per file, order of insertion is important
|
||||
CREATE TABLE IF NOT EXISTS block (
|
||||
ino INTEGER,
|
||||
id VARCHAR(32),
|
||||
key VARCHAR(32)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS block_ino ON block (ino);
|
||||
|
||||
-- global flist tags, this can include values like `version`, `description`, `block-size`, etc..
|
||||
-- it can also hold extra user-defined tags for extensions
|
||||
CREATE TABLE IF NOT EXISTS tag (
|
||||
key VARCHAR(10) PRIMARY KEY,
|
||||
value VARCHAR(255)
|
||||
);
|
||||
|
||||
-- routing table define ranges where blobs can be found. This allows "sharding" by be able to retrieve
|
||||
-- blobs from different partitions using the prefix range (hashes that are )
|
||||
CREATE TABLE IF NOT EXISTS route (
|
||||
start integer, -- one byte hash prefix
|
||||
end integer, -- one byte hash prefix
|
||||
url VARCHAR(2048)
|
||||
);
|
||||
151
rfs/src/cache/mod.rs
vendored
Normal file
151
rfs/src/cache/mod.rs
vendored
Normal file
@@ -0,0 +1,151 @@
|
||||
use crate::fungi::meta::Block;
|
||||
use crate::store::{BlockStore, Store};
|
||||
use anyhow::{Context, Result};
|
||||
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::path::PathBuf;
|
||||
use tokio::fs::{self, File, OpenOptions};
|
||||
use tokio::io::{AsyncSeekExt, AsyncWriteExt};
|
||||
|
||||
/// Cache implements a caching layer on top of a block store
|
||||
//#[derive(Clone)]
|
||||
pub struct Cache<S: Store> {
|
||||
store: BlockStore<S>,
|
||||
root: PathBuf,
|
||||
}
|
||||
|
||||
impl<S> Cache<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
pub fn new<P>(root: P, store: S) -> Self
|
||||
where
|
||||
P: Into<PathBuf>,
|
||||
{
|
||||
Cache {
|
||||
store: store.into(),
|
||||
root: root.into(),
|
||||
}
|
||||
}
|
||||
|
||||
// download given an open file, writes the content of the chunk to the file
|
||||
async fn download(&self, file: &mut File, block: &Block) -> Result<u64> {
|
||||
let data = self.store.get(block).await?;
|
||||
file.write_all(&data).await?;
|
||||
|
||||
Ok(data.len() as u64)
|
||||
}
|
||||
|
||||
async fn prepare(&self, id: &[u8]) -> Result<File> {
|
||||
let name = id.hex();
|
||||
if name.len() < 4 {
|
||||
anyhow::bail!("invalid chunk hash");
|
||||
}
|
||||
let path = self.root.join(&name[0..2]).join(&name[2..4]);
|
||||
fs::create_dir_all(&path).await?;
|
||||
let path = path.join(name);
|
||||
|
||||
let file = OpenOptions::new()
|
||||
.create(true)
|
||||
.read(true)
|
||||
.write(true)
|
||||
.truncate(false)
|
||||
.open(path)
|
||||
.await?;
|
||||
|
||||
Ok(file)
|
||||
}
|
||||
|
||||
/// get a file block either from cache or from remote if it's already
|
||||
/// not cached
|
||||
pub async fn get(&self, block: &Block) -> Result<(u64, File)> {
|
||||
let mut file = self
|
||||
.prepare(&block.id)
|
||||
.await
|
||||
.context("failed to prepare cache block")?;
|
||||
// TODO: locking must happen here so no
|
||||
// other processes start downloading the same chunk
|
||||
let locker = Locker::new(&file);
|
||||
locker.lock().await?;
|
||||
|
||||
let meta = file
|
||||
.metadata()
|
||||
.await
|
||||
.context("failed to get block metadata")?;
|
||||
if meta.len() > 0 {
|
||||
// chunk is already downloaded
|
||||
debug!("block cache hit: {}", block.id.as_slice().hex());
|
||||
locker.unlock().await?;
|
||||
return Ok((meta.len(), file));
|
||||
}
|
||||
|
||||
debug!("downloading block with key: {}", block.id.as_slice().hex());
|
||||
let size = self
|
||||
.download(&mut file, block)
|
||||
.await
|
||||
.context("failed to download block")?;
|
||||
|
||||
// if file is just downloaded, we need
|
||||
// to seek to beginning of the file.
|
||||
file.rewind().await?;
|
||||
|
||||
locker.unlock().await?;
|
||||
Ok((size, file))
|
||||
}
|
||||
|
||||
/// direct downloads all the file blocks from remote and write it to output
|
||||
#[allow(dead_code)]
|
||||
pub async fn direct(&self, blocks: &[Block], out: &mut File) -> Result<()> {
|
||||
use tokio::io::copy;
|
||||
for (index, block) in blocks.iter().enumerate() {
|
||||
let (_, mut chunk) = self.get(block).await?;
|
||||
copy(&mut chunk, out)
|
||||
.await
|
||||
.with_context(|| format!("failed to copy block {}", index))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Locker {
|
||||
fd: std::os::unix::io::RawFd,
|
||||
}
|
||||
|
||||
impl Locker {
|
||||
pub fn new(f: &File) -> Locker {
|
||||
Locker { fd: f.as_raw_fd() }
|
||||
}
|
||||
|
||||
pub async fn lock(&self) -> Result<()> {
|
||||
let fd = self.fd;
|
||||
tokio::task::spawn_blocking(move || {
|
||||
nix::fcntl::flock(fd, nix::fcntl::FlockArg::LockExclusive)
|
||||
})
|
||||
.await
|
||||
.context("failed to spawn file locking")?
|
||||
.context("failed to lock file")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn unlock(&self) -> Result<()> {
|
||||
let fd = self.fd;
|
||||
tokio::task::spawn_blocking(move || nix::fcntl::flock(fd, nix::fcntl::FlockArg::Unlock))
|
||||
.await
|
||||
.context("failed to spawn file lunlocking")?
|
||||
.context("failed to unlock file")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
trait Hex {
|
||||
fn hex(&self) -> String;
|
||||
}
|
||||
|
||||
impl Hex for &[u8] {
|
||||
fn hex(&self) -> String {
|
||||
hex::encode(self)
|
||||
}
|
||||
}
|
||||
128
rfs/src/clone.rs
Normal file
128
rfs/src/clone.rs
Normal file
@@ -0,0 +1,128 @@
|
||||
use crate::{
|
||||
cache::Cache,
|
||||
fungi::{meta::Block, Reader, Result},
|
||||
store::{BlockStore, Store},
|
||||
};
|
||||
use anyhow::Error;
|
||||
use futures::lock::Mutex;
|
||||
use hex::ToHex;
|
||||
use std::sync::Arc;
|
||||
use tokio::io::AsyncReadExt;
|
||||
|
||||
const WORKERS: usize = 10;
|
||||
|
||||
pub async fn clone<S: Store>(reader: Reader, store: S, cache: Cache<S>) -> Result<()> {
|
||||
let failures = Arc::new(Mutex::new(Vec::new()));
|
||||
let cloner = BlobCloner::new(cache, store.into(), failures.clone());
|
||||
let mut workers = workers::WorkerPool::new(cloner, WORKERS);
|
||||
|
||||
let mut offset = 0;
|
||||
loop {
|
||||
if !failures.lock().await.is_empty() {
|
||||
break;
|
||||
}
|
||||
let blocks = reader.all_blocks(1000, offset).await?;
|
||||
if blocks.is_empty() {
|
||||
break;
|
||||
}
|
||||
for block in blocks {
|
||||
offset += 1;
|
||||
let worker = workers.get().await;
|
||||
worker.send(block)?;
|
||||
}
|
||||
}
|
||||
|
||||
workers.close().await;
|
||||
let failures = failures.lock().await;
|
||||
|
||||
if failures.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
log::error!("failed to clone one or more blocks");
|
||||
for (block, error) in failures.iter() {
|
||||
log::error!(" - failed to clone block {}: {}", block, error);
|
||||
}
|
||||
|
||||
Err(crate::fungi::Error::Anyhow(anyhow::anyhow!(
|
||||
"failed to clone ({}) blocks",
|
||||
failures.len()
|
||||
)))
|
||||
}
|
||||
|
||||
struct BlobCloner<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
cache: Arc<Cache<S>>,
|
||||
store: Arc<BlockStore<S>>,
|
||||
failures: Arc<Mutex<Vec<(String, Error)>>>,
|
||||
}
|
||||
|
||||
impl<S> Clone for BlobCloner<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
cache: self.cache.clone(),
|
||||
store: self.store.clone(),
|
||||
failures: self.failures.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> BlobCloner<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
fn new(
|
||||
cache: Cache<S>,
|
||||
store: BlockStore<S>,
|
||||
failures: Arc<Mutex<Vec<(String, Error)>>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
cache: Arc::new(cache),
|
||||
store: Arc::new(store),
|
||||
failures,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<S> workers::Work for BlobCloner<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
type Input = Block;
|
||||
type Output = ();
|
||||
|
||||
async fn run(&mut self, block: Self::Input) -> Self::Output {
|
||||
let mut file = match self.cache.get(&block).await {
|
||||
Ok((_, f)) => f,
|
||||
Err(err) => {
|
||||
self.failures
|
||||
.lock()
|
||||
.await
|
||||
.push((block.id.as_slice().encode_hex(), err));
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let mut data = Vec::new();
|
||||
if let Err(err) = file.read_to_end(&mut data).await {
|
||||
self.failures
|
||||
.lock()
|
||||
.await
|
||||
.push((block.id.as_slice().encode_hex(), err.into()));
|
||||
return;
|
||||
}
|
||||
if let Err(err) = self.store.set(&data).await {
|
||||
self.failures
|
||||
.lock()
|
||||
.await
|
||||
.push((block.id.as_slice().encode_hex(), err.into()));
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
72
rfs/src/config.rs
Normal file
72
rfs/src/config.rs
Normal file
@@ -0,0 +1,72 @@
|
||||
use crate::{
|
||||
fungi::{meta::Tag, Reader, Result, Writer},
|
||||
store::{self, Store},
|
||||
};
|
||||
|
||||
pub async fn tag_list(reader: Reader) -> Result<()> {
|
||||
let tags = reader.tags().await?;
|
||||
if !tags.is_empty() {
|
||||
println!("tags:");
|
||||
}
|
||||
for (key, value) in tags {
|
||||
println!("\t{}={}", key, value);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn tag_add(writer: Writer, tags: Vec<(String, String)>) -> Result<()> {
|
||||
for (key, value) in tags {
|
||||
writer.tag(Tag::Custom(key.as_str()), value).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn tag_delete(writer: Writer, keys: Vec<String>, all: bool) -> Result<()> {
|
||||
if all {
|
||||
writer.delete_tags().await?;
|
||||
return Ok(());
|
||||
}
|
||||
for key in keys {
|
||||
writer.delete_tag(Tag::Custom(key.as_str())).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn store_list(reader: Reader) -> Result<()> {
|
||||
let routes = reader.routes().await?;
|
||||
if !routes.is_empty() {
|
||||
println!("routes:")
|
||||
}
|
||||
for route in routes {
|
||||
println!(
|
||||
"\trange:[{}-{}] store:{}",
|
||||
route.start, route.end, route.url
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn store_add(writer: Writer, stores: Vec<String>) -> Result<()> {
|
||||
let store = store::parse_router(stores.as_slice()).await?;
|
||||
for route in store.routes() {
|
||||
writer
|
||||
.route(
|
||||
route.start.unwrap_or(u8::MIN),
|
||||
route.end.unwrap_or(u8::MAX),
|
||||
route.url,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn store_delete(writer: Writer, stores: Vec<String>, all: bool) -> Result<()> {
|
||||
if all {
|
||||
writer.delete_routes().await?;
|
||||
return Ok(());
|
||||
}
|
||||
for store in stores {
|
||||
writer.delete_route(store).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
407
rfs/src/fs/mod.rs
Normal file
407
rfs/src/fs/mod.rs
Normal file
@@ -0,0 +1,407 @@
|
||||
#![allow(clippy::unnecessary_mut_passed)]
|
||||
#![deny(clippy::unimplemented, clippy::todo)]
|
||||
|
||||
use crate::cache;
|
||||
use crate::fungi::{
|
||||
meta::{FileType, Inode},
|
||||
Reader,
|
||||
};
|
||||
use crate::store::Store;
|
||||
|
||||
use anyhow::{ensure, Context, Result};
|
||||
use polyfuse::reply::FileAttr;
|
||||
use polyfuse::{
|
||||
op,
|
||||
reply::{AttrOut, EntryOut, ReaddirOut, StatfsOut},
|
||||
KernelConfig, Operation, Request, Session,
|
||||
};
|
||||
use std::io::SeekFrom;
|
||||
use std::sync::Arc;
|
||||
use std::{io, path::PathBuf, time::Duration};
|
||||
use tokio::fs::File;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::{
|
||||
io::{unix::AsyncFd, AsyncReadExt, AsyncSeekExt, Interest},
|
||||
task::{self, JoinHandle},
|
||||
};
|
||||
|
||||
const CHUNK_SIZE: usize = 512 * 1024; // 512k and is hardcoded in the hub. the block_size value is not used
|
||||
const TTL: Duration = Duration::from_secs(60 * 60 * 24 * 365);
|
||||
const LRU_CAP: usize = 5; // Least Recently Used File Capacity
|
||||
const FS_BLOCK_SIZE: u32 = 4 * 1024;
|
||||
|
||||
type FHash = [u8; 32];
|
||||
type BlockSize = u64;
|
||||
|
||||
pub struct Filesystem<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
meta: Reader,
|
||||
cache: Arc<cache::Cache<S>>,
|
||||
lru: Arc<Mutex<lru::LruCache<FHash, (File, BlockSize)>>>,
|
||||
}
|
||||
|
||||
impl<S> Clone for Filesystem<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
meta: self.meta.clone(),
|
||||
cache: Arc::clone(&self.cache),
|
||||
lru: Arc::clone(&self.lru),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Filesystem<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
pub fn new(meta: Reader, cache: cache::Cache<S>) -> Self {
|
||||
Filesystem {
|
||||
meta,
|
||||
cache: Arc::new(cache),
|
||||
lru: Arc::new(Mutex::new(lru::LruCache::new(LRU_CAP))),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn mount<P>(&self, mnt: P) -> Result<()>
|
||||
where
|
||||
P: Into<PathBuf>,
|
||||
{
|
||||
let mountpoint: PathBuf = mnt.into();
|
||||
ensure!(mountpoint.is_dir(), "mountpoint must be a directory");
|
||||
let mut options = KernelConfig::default();
|
||||
options.mount_option(&format!(
|
||||
"ro,allow_other,fsname={},subtype=g8ufs,default_permissions",
|
||||
std::process::id()
|
||||
));
|
||||
|
||||
// polyfuse assumes an absolute path, see https://github.com/ubnt-intrepid/polyfuse/issues/83
|
||||
let fusermount_path =
|
||||
which::which("fusermount").context("looking up 'fusermount' in PATH")?;
|
||||
options.fusermount_path(fusermount_path);
|
||||
|
||||
let session = AsyncSession::mount(mountpoint, options).await?;
|
||||
|
||||
// release here
|
||||
while let Some(req) = session.next_request().await? {
|
||||
let fs = self.clone();
|
||||
|
||||
let handler: JoinHandle<Result<()>> = task::spawn(async move {
|
||||
let result = match req.operation()? {
|
||||
Operation::Lookup(op) => fs.lookup(&req, op).await,
|
||||
Operation::Getattr(op) => fs.getattr(&req, op).await,
|
||||
Operation::Read(op) => fs.read(&req, op).await,
|
||||
Operation::Readdir(op) => fs.readdir(&req, op).await,
|
||||
Operation::Readlink(op) => fs.readlink(&req, op).await,
|
||||
Operation::Statfs(op) => fs.statfs(&req, op).await,
|
||||
op => {
|
||||
debug!("function is not implemented: {:?}", op);
|
||||
Ok(req.reply_error(libc::ENOSYS)?)
|
||||
}
|
||||
};
|
||||
|
||||
if result.is_err() {
|
||||
req.reply_error(libc::ENOENT)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
});
|
||||
|
||||
drop(handler);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn statfs(&self, req: &Request, _op: op::Statfs<'_>) -> Result<()> {
|
||||
let mut out = StatfsOut::default();
|
||||
let stats = out.statfs();
|
||||
stats.bsize(FS_BLOCK_SIZE);
|
||||
req.reply(out)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn readlink(&self, req: &Request, op: op::Readlink<'_>) -> Result<()> {
|
||||
let link = self.meta.inode(op.ino()).await?;
|
||||
if !link.mode.is(FileType::Link) {
|
||||
return Ok(req.reply_error(libc::ENOLINK)?);
|
||||
}
|
||||
|
||||
if let Some(target) = link.data {
|
||||
req.reply(target)?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
Ok(req.reply_error(libc::ENOLINK)?)
|
||||
}
|
||||
|
||||
async fn read(&self, req: &Request, op: op::Read<'_>) -> Result<()> {
|
||||
let entry = self.meta.inode(op.ino()).await?;
|
||||
|
||||
if !entry.mode.is(FileType::Regular) {
|
||||
return Ok(req.reply_error(libc::EISDIR)?);
|
||||
};
|
||||
|
||||
let offset = op.offset() as usize;
|
||||
let size = op.size() as usize;
|
||||
let chunk_size = CHUNK_SIZE; // file.block_size as usize;
|
||||
let chunk_index = offset / chunk_size;
|
||||
|
||||
let blocks = self.meta.blocks(op.ino()).await?;
|
||||
|
||||
if chunk_index >= blocks.len() || op.size() == 0 {
|
||||
// reading after the end of the file
|
||||
let data: &[u8] = &[];
|
||||
return Ok(req.reply(data)?);
|
||||
}
|
||||
|
||||
// offset inside the file
|
||||
let mut offset = offset - (chunk_index * chunk_size);
|
||||
let mut buf: Vec<u8> = vec![0; size];
|
||||
let mut total = 0;
|
||||
|
||||
'blocks: for block in blocks.iter().skip(chunk_index) {
|
||||
// hash works as a key inside the LRU
|
||||
let hash = block.id;
|
||||
|
||||
// getting the file descriptor from the LRU or from the cache if not found in the LRU
|
||||
let lru = self.lru.lock().await.pop(&hash);
|
||||
|
||||
let (mut fd, block_size) = match lru {
|
||||
Some((descriptor, bsize)) => {
|
||||
debug!("lru hit");
|
||||
(descriptor, bsize)
|
||||
}
|
||||
None => {
|
||||
let (bsize, descriptor) = match self.cache.get(block).await {
|
||||
Ok(out) => out,
|
||||
Err(err) => {
|
||||
error!("io cache error: {:#}", err);
|
||||
return Ok(req.reply_error(libc::EIO)?);
|
||||
}
|
||||
};
|
||||
(descriptor, bsize)
|
||||
}
|
||||
};
|
||||
|
||||
// seek to the position <offset>
|
||||
fd.seek(SeekFrom::Start(offset as u64)).await?;
|
||||
|
||||
let mut chunk_offset = offset as u64;
|
||||
|
||||
loop {
|
||||
// read the file bytes into buf
|
||||
let read = match fd.read(&mut buf[total..]).await {
|
||||
Ok(n) => n,
|
||||
Err(err) => {
|
||||
error!("read error: {:#}", err);
|
||||
return Ok(req.reply_error(libc::EIO)?);
|
||||
}
|
||||
};
|
||||
|
||||
chunk_offset += read as u64;
|
||||
|
||||
// calculate the total size and break if the required bytes (=size) downloaded
|
||||
total += read;
|
||||
|
||||
if total >= size {
|
||||
// if only part of the block read -> store it in the lruf
|
||||
if chunk_offset < block_size {
|
||||
let mut lruf = self.lru.lock().await;
|
||||
lruf.put(hash, (fd, block_size));
|
||||
}
|
||||
|
||||
break 'blocks;
|
||||
}
|
||||
|
||||
// read = 0 means the EOF (end of the block)
|
||||
if read == 0 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
offset = 0;
|
||||
}
|
||||
|
||||
Ok(req.reply(&buf[..size])?)
|
||||
}
|
||||
|
||||
async fn getattr(&self, req: &Request, op: op::Getattr<'_>) -> Result<()> {
|
||||
log::debug!("getattr({})", op.ino());
|
||||
|
||||
let entry = self.meta.inode(op.ino()).await?;
|
||||
|
||||
let mut attr = AttrOut::default();
|
||||
|
||||
let fill = attr.attr();
|
||||
entry.fill(fill);
|
||||
|
||||
req.reply(attr)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn readdir(&self, req: &Request, op: op::Readdir<'_>) -> Result<()> {
|
||||
log::debug!("readdir({})", op.ino());
|
||||
let root = self.meta.inode(op.ino()).await?;
|
||||
|
||||
if !root.mode.is(FileType::Dir) {
|
||||
req.reply_error(libc::ENOTDIR)?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut out = ReaddirOut::new(op.size() as usize);
|
||||
let mut offset = op.offset();
|
||||
|
||||
let mut query_offset = offset;
|
||||
if offset == 0 {
|
||||
out.entry(".".as_ref(), op.ino(), libc::DT_DIR as u32, 1);
|
||||
out.entry(
|
||||
"..".as_ref(),
|
||||
match op.ino() {
|
||||
1 => 1,
|
||||
_ => root.parent,
|
||||
},
|
||||
libc::DT_DIR as u32,
|
||||
2,
|
||||
);
|
||||
offset = 2;
|
||||
} else {
|
||||
// we don't add the . and .. but
|
||||
// we also need to change the offset to
|
||||
query_offset -= 2;
|
||||
}
|
||||
|
||||
let children = self.meta.children(root.ino, 10, query_offset).await?;
|
||||
for entry in children.iter() {
|
||||
offset += 1;
|
||||
|
||||
let full = match entry.mode.file_type() {
|
||||
FileType::Dir => {
|
||||
//let inode = self.meta.dir_inode(&sub.key).await?;
|
||||
out.entry(entry.name.as_ref(), entry.ino, libc::DT_DIR as u32, offset)
|
||||
}
|
||||
FileType::Regular => {
|
||||
out.entry(entry.name.as_ref(), entry.ino, libc::DT_REG as u32, offset)
|
||||
}
|
||||
FileType::Link => {
|
||||
out.entry(entry.name.as_ref(), entry.ino, libc::DT_LNK as u32, offset)
|
||||
}
|
||||
_ => {
|
||||
warn!("unkonwn entry");
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
if full {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(req.reply(out)?)
|
||||
}
|
||||
|
||||
async fn lookup(&self, req: &Request, op: op::Lookup<'_>) -> Result<()> {
|
||||
log::debug!("lookup(parent: {}, name: {:?})", op.parent(), op.name());
|
||||
let name = match op.name().to_str() {
|
||||
Some(name) => name,
|
||||
None => {
|
||||
req.reply_error(libc::ENOENT)?;
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
let node = self.meta.lookup(op.parent(), name).await?;
|
||||
|
||||
let node = match node {
|
||||
Some(node) => node,
|
||||
None => {
|
||||
req.reply_error(libc::ENOENT)?;
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
let mut out = EntryOut::default();
|
||||
|
||||
node.fill(out.attr());
|
||||
out.ino(node.ino);
|
||||
out.ttl_attr(TTL);
|
||||
out.ttl_entry(TTL);
|
||||
|
||||
Ok(req.reply(out)?)
|
||||
}
|
||||
}
|
||||
|
||||
// ==== AsyncSession ====
|
||||
|
||||
struct AsyncSession {
|
||||
inner: AsyncFd<Session>,
|
||||
}
|
||||
|
||||
impl AsyncSession {
|
||||
async fn mount(mountpoint: PathBuf, config: KernelConfig) -> io::Result<Self> {
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let session = Session::mount(mountpoint, config)?;
|
||||
Ok(Self {
|
||||
inner: AsyncFd::with_interest(session, Interest::READABLE)?,
|
||||
})
|
||||
})
|
||||
.await
|
||||
.expect("join error")
|
||||
}
|
||||
|
||||
async fn next_request(&self) -> io::Result<Option<Request>> {
|
||||
use futures::{future::poll_fn, ready, task::Poll};
|
||||
|
||||
poll_fn(|cx| {
|
||||
let mut guard = ready!(self.inner.poll_read_ready(cx))?;
|
||||
match self.inner.get_ref().next_request() {
|
||||
Err(err) if err.kind() == io::ErrorKind::WouldBlock => {
|
||||
guard.clear_ready();
|
||||
Poll::Pending
|
||||
}
|
||||
res => {
|
||||
guard.retain_ready();
|
||||
Poll::Ready(res)
|
||||
}
|
||||
}
|
||||
})
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
trait AttributeFiller {
|
||||
fn fill(&self, attr: &mut FileAttr);
|
||||
}
|
||||
|
||||
impl AttributeFiller for Inode {
|
||||
fn fill(&self, attr: &mut FileAttr) {
|
||||
attr.mode(self.mode.mode());
|
||||
|
||||
attr.ino(self.ino);
|
||||
attr.ctime(Duration::from_secs(self.ctime as u64));
|
||||
attr.mtime(Duration::from_secs(self.mtime as u64));
|
||||
attr.uid(self.uid);
|
||||
attr.gid(self.gid);
|
||||
attr.size(self.size);
|
||||
attr.rdev(self.rdev as u32);
|
||||
attr.blksize(FS_BLOCK_SIZE);
|
||||
|
||||
let mut blocks = self.size / 512;
|
||||
blocks += match self.size % 512 {
|
||||
0 => 0,
|
||||
_ => 1,
|
||||
};
|
||||
|
||||
attr.blocks(blocks);
|
||||
|
||||
match self.mode.file_type() {
|
||||
FileType::Dir => attr.nlink(2),
|
||||
FileType::Regular => attr.blksize(4 * 1024),
|
||||
_ => (),
|
||||
};
|
||||
}
|
||||
}
|
||||
644
rfs/src/fungi/meta.rs
Normal file
644
rfs/src/fungi/meta.rs
Normal file
@@ -0,0 +1,644 @@
|
||||
use std::{
|
||||
collections::LinkedList,
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
use sqlx::{
|
||||
sqlite::{SqliteConnectOptions, SqliteJournalMode, SqlitePoolOptions, SqliteRow},
|
||||
FromRow, Row, SqlitePool,
|
||||
};
|
||||
|
||||
use crate::store;
|
||||
|
||||
const ID_LEN: usize = 32;
|
||||
const KEY_LEN: usize = 32;
|
||||
const TYPE_MASK: u32 = nix::libc::S_IFMT;
|
||||
|
||||
#[repr(u32)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum FileType {
|
||||
Regular = nix::libc::S_IFREG,
|
||||
Dir = nix::libc::S_IFDIR,
|
||||
Link = nix::libc::S_IFLNK,
|
||||
Block = nix::libc::S_IFBLK,
|
||||
Char = nix::libc::S_IFCHR,
|
||||
Socket = nix::libc::S_IFSOCK,
|
||||
FIFO = nix::libc::S_IFIFO,
|
||||
Unknown = 0,
|
||||
}
|
||||
|
||||
impl From<u32> for FileType {
|
||||
fn from(value: u32) -> Self {
|
||||
match value {
|
||||
nix::libc::S_IFREG => Self::Regular,
|
||||
nix::libc::S_IFDIR => Self::Dir,
|
||||
nix::libc::S_IFLNK => Self::Link,
|
||||
nix::libc::S_IFBLK => Self::Block,
|
||||
nix::libc::S_IFCHR => Self::Char,
|
||||
nix::libc::S_IFSOCK => Self::Socket,
|
||||
nix::libc::S_IFIFO => Self::FIFO,
|
||||
_ => Self::Unknown,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static SCHEMA: &str = include_str!("../../schema/schema.sql");
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum Error {
|
||||
#[error("failed to execute query: {0}")]
|
||||
SqlError(#[from] sqlx::Error),
|
||||
|
||||
#[error("invalid hash length")]
|
||||
InvalidHash,
|
||||
|
||||
#[error("invalid key length")]
|
||||
InvalidKey,
|
||||
|
||||
#[error("io error: {0:#}")]
|
||||
IO(#[from] std::io::Error),
|
||||
|
||||
#[error("store error: {0}")]
|
||||
Store(#[from] store::Error),
|
||||
|
||||
#[error("unknown meta error: {0}")]
|
||||
Anyhow(#[from] anyhow::Error),
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
pub type Ino = u64;
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct Mode(u32);
|
||||
|
||||
impl From<u32> for Mode {
|
||||
fn from(value: u32) -> Self {
|
||||
Self(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl Mode {
|
||||
pub fn new(t: FileType, perm: u32) -> Self {
|
||||
Self(t as u32 | (perm & !TYPE_MASK))
|
||||
}
|
||||
|
||||
pub fn file_type(&self) -> FileType {
|
||||
(self.0 & TYPE_MASK).into()
|
||||
}
|
||||
|
||||
pub fn permissions(&self) -> u32 {
|
||||
self.0 & !TYPE_MASK
|
||||
}
|
||||
|
||||
pub fn mode(&self) -> u32 {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn is(&self, typ: FileType) -> bool {
|
||||
self.file_type() == typ
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct Inode {
|
||||
pub ino: Ino,
|
||||
pub parent: Ino,
|
||||
pub name: String,
|
||||
pub size: u64,
|
||||
pub uid: u32,
|
||||
pub gid: u32,
|
||||
pub mode: Mode,
|
||||
pub rdev: u64,
|
||||
pub ctime: i64,
|
||||
pub mtime: i64,
|
||||
pub data: Option<Vec<u8>>,
|
||||
}
|
||||
|
||||
impl FromRow<'_, SqliteRow> for Inode {
|
||||
fn from_row(row: &'_ SqliteRow) -> std::result::Result<Self, sqlx::Error> {
|
||||
Ok(Self {
|
||||
ino: row.get::<i64, &str>("ino") as Ino,
|
||||
parent: row.get::<i64, &str>("parent") as Ino,
|
||||
name: row.get("name"),
|
||||
size: row.get::<i64, &str>("size") as u64,
|
||||
uid: row.get("uid"),
|
||||
gid: row.get("uid"),
|
||||
mode: row.get::<u32, &str>("mode").into(),
|
||||
rdev: row.get::<i64, &str>("rdev") as u64,
|
||||
ctime: row.get("ctime"),
|
||||
mtime: row.get("mtime"),
|
||||
data: row.get("data"),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct Block {
|
||||
/// id of the block
|
||||
pub id: [u8; ID_LEN],
|
||||
/// encryption key of the block
|
||||
pub key: [u8; KEY_LEN],
|
||||
}
|
||||
|
||||
impl FromRow<'_, SqliteRow> for Block {
|
||||
fn from_row(row: &'_ SqliteRow) -> std::result::Result<Self, sqlx::Error> {
|
||||
let hash: &[u8] = row.get("id");
|
||||
if hash.len() != ID_LEN {
|
||||
return Err(sqlx::Error::Decode(Box::new(Error::InvalidHash)));
|
||||
}
|
||||
|
||||
let key: &[u8] = row.get("key");
|
||||
|
||||
if hash.len() != KEY_LEN {
|
||||
return Err(sqlx::Error::Decode(Box::new(Error::InvalidKey)));
|
||||
}
|
||||
|
||||
let mut block = Self::default();
|
||||
block.id.copy_from_slice(hash);
|
||||
block.key.copy_from_slice(key);
|
||||
|
||||
Ok(block)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct Route {
|
||||
pub start: u8,
|
||||
pub end: u8,
|
||||
pub url: String,
|
||||
}
|
||||
|
||||
impl FromRow<'_, SqliteRow> for Route {
|
||||
fn from_row(row: &'_ SqliteRow) -> std::result::Result<Self, sqlx::Error> {
|
||||
Ok(Self {
|
||||
start: row.get("start"),
|
||||
end: row.get("end"),
|
||||
url: row.get("url"),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Tag<'a> {
|
||||
Version,
|
||||
Description,
|
||||
Author,
|
||||
Custom(&'a str),
|
||||
}
|
||||
|
||||
impl<'a> Tag<'a> {
|
||||
fn key(&self) -> &str {
|
||||
match self {
|
||||
Self::Version => "version",
|
||||
Self::Description => "description",
|
||||
Self::Author => "author",
|
||||
Self::Custom(a) => a,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Walk {
|
||||
Continue,
|
||||
Break,
|
||||
}
|
||||
#[async_trait::async_trait]
|
||||
pub trait WalkVisitor {
|
||||
async fn visit(&mut self, path: &Path, node: &Inode) -> Result<Walk>;
|
||||
}
|
||||
|
||||
struct WalkItem(PathBuf, Inode);
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Reader {
|
||||
pool: SqlitePool,
|
||||
}
|
||||
|
||||
impl Reader {
|
||||
pub async fn new<P: AsRef<Path>>(path: P) -> Result<Self> {
|
||||
let opts = SqliteConnectOptions::new()
|
||||
.journal_mode(SqliteJournalMode::Delete)
|
||||
.filename(path);
|
||||
|
||||
let pool = SqlitePool::connect_with(opts).await?;
|
||||
|
||||
Ok(Self { pool })
|
||||
}
|
||||
|
||||
pub async fn inode(&self, ino: Ino) -> Result<Inode> {
|
||||
let inode: Inode = sqlx::query_as(r#"select inode.*, extra.data
|
||||
from inode left join extra on inode.ino = extra.ino
|
||||
where inode.ino = ?;"#)
|
||||
.bind(ino as i64).fetch_one(&self.pool).await?;
|
||||
|
||||
Ok(inode)
|
||||
}
|
||||
|
||||
pub async fn children(&self, parent: Ino, limit: u32, offset: u64) -> Result<Vec<Inode>> {
|
||||
let results: Vec<Inode> = sqlx::query_as(
|
||||
r#"select inode.*, extra.data
|
||||
from inode left join extra on inode.ino = extra.ino
|
||||
where inode.parent = ? limit ? offset ?;"#,
|
||||
)
|
||||
.bind(parent as i64)
|
||||
.bind(limit)
|
||||
.bind(offset as i64)
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(results)
|
||||
}
|
||||
|
||||
pub async fn lookup<S: AsRef<str>>(&self, parent: Ino, name: S) -> Result<Option<Inode>> {
|
||||
let inode: Option<Inode> = sqlx::query_as(r#"select inode.*, extra.data
|
||||
from inode left join extra on inode.ino = extra.ino
|
||||
where inode.parent = ? and inode.name = ?;"#)
|
||||
.bind(parent as i64)
|
||||
.bind(name.as_ref())
|
||||
.fetch_optional(&self.pool).await?;
|
||||
Ok(inode)
|
||||
}
|
||||
|
||||
pub async fn blocks(&self, ino: Ino) -> Result<Vec<Block>> {
|
||||
let results: Vec<Block> = sqlx::query_as("select id, key from block where ino = ?;")
|
||||
.bind(ino as i64)
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(results)
|
||||
}
|
||||
|
||||
pub async fn all_blocks(&self, limit: u32, offset: u64) -> Result<Vec<Block>> {
|
||||
let results: Vec<Block> = sqlx::query_as("select id, key from block limit ? offset ?;")
|
||||
.bind(limit)
|
||||
.bind(offset as i64)
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(results)
|
||||
}
|
||||
|
||||
pub async fn tag(&self, tag: Tag<'_>) -> Result<Option<String>> {
|
||||
let value: Option<(String,)> = sqlx::query_as("select value from tag where key = ?;")
|
||||
.bind(tag.key())
|
||||
.fetch_optional(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(value.map(|v| v.0))
|
||||
}
|
||||
|
||||
pub async fn tags(&self) -> Result<Vec<(String, String)>> {
|
||||
let tags: Vec<(String, String)> = sqlx::query_as("select key, value from tag;")
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(tags)
|
||||
}
|
||||
|
||||
pub async fn routes(&self) -> Result<Vec<Route>> {
|
||||
let results: Vec<Route> = sqlx::query_as("select start, end, url from route;")
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(results)
|
||||
}
|
||||
|
||||
pub async fn walk<W: WalkVisitor + Send>(&self, visitor: &mut W) -> Result<()> {
|
||||
let node = self.inode(1).await?;
|
||||
let mut list = LinkedList::default();
|
||||
let path: PathBuf = "/".into();
|
||||
list.push_back(WalkItem(path, node));
|
||||
while !list.is_empty() {
|
||||
let item = list.pop_back().unwrap();
|
||||
self.walk_node(&mut list, &item, visitor).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn walk_node<W: WalkVisitor + Send>(
|
||||
&self,
|
||||
list: &mut LinkedList<WalkItem>,
|
||||
WalkItem(path, node): &WalkItem,
|
||||
visitor: &mut W,
|
||||
) -> Result<()> {
|
||||
if visitor.visit(path, node).await? == Walk::Break {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut offset = 0;
|
||||
loop {
|
||||
let children = self.children(node.ino, 1000, offset).await?;
|
||||
if children.is_empty() {
|
||||
break;
|
||||
}
|
||||
|
||||
for child in children {
|
||||
offset += 1;
|
||||
let child_path = path.join(&child.name);
|
||||
if child.mode.is(FileType::Dir) {
|
||||
list.push_back(WalkItem(child_path, child));
|
||||
continue;
|
||||
}
|
||||
|
||||
if visitor.visit(&child_path, &child).await? == Walk::Break {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Writer {
|
||||
pool: SqlitePool,
|
||||
}
|
||||
|
||||
impl Writer {
|
||||
/// create a new mkondo writer
|
||||
pub async fn new<P: AsRef<Path>>(path: P, remove: bool) -> Result<Self> {
|
||||
if remove {
|
||||
let _ = tokio::fs::remove_file(&path).await;
|
||||
}
|
||||
|
||||
let opts = SqliteConnectOptions::new()
|
||||
.create_if_missing(true)
|
||||
.journal_mode(SqliteJournalMode::Delete)
|
||||
.filename(path);
|
||||
|
||||
let pool = SqlitePoolOptions::new()
|
||||
.max_connections(1)
|
||||
.connect_with(opts)
|
||||
.await?;
|
||||
|
||||
sqlx::query(SCHEMA).execute(&pool).await?;
|
||||
|
||||
Ok(Self { pool })
|
||||
}
|
||||
|
||||
/// inode add an inode to the flist
|
||||
pub async fn inode(&self, inode: Inode) -> Result<Ino> {
|
||||
let result = sqlx::query(
|
||||
r#"insert into inode (parent, name, size, uid, gid, mode, rdev, ctime, mtime)
|
||||
values (?, ?, ?, ?, ?, ?, ?, ?, ?);"#,
|
||||
)
|
||||
.bind(inode.parent as i64)
|
||||
.bind(inode.name)
|
||||
.bind(inode.size as i64)
|
||||
.bind(inode.uid)
|
||||
.bind(inode.gid)
|
||||
.bind(inode.mode.0)
|
||||
.bind(inode.rdev as i64)
|
||||
.bind(inode.ctime)
|
||||
.bind(inode.mtime)
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
|
||||
let ino = result.last_insert_rowid() as Ino;
|
||||
if let Some(data) = &inode.data {
|
||||
sqlx::query("insert into extra(ino, data) values (?, ?)")
|
||||
.bind(ino as i64)
|
||||
.bind(data)
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(ino)
|
||||
}
|
||||
|
||||
pub async fn block(&self, ino: Ino, id: &[u8; ID_LEN], key: &[u8; KEY_LEN]) -> Result<()> {
|
||||
sqlx::query("insert into block (ino, id, key) values (?, ?, ?)")
|
||||
.bind(ino as i64)
|
||||
.bind(&id[..])
|
||||
.bind(&key[..])
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn route<U: AsRef<str>>(&self, start: u8, end: u8, url: U) -> Result<()> {
|
||||
sqlx::query("insert into route (start, end, url) values (?, ?, ?)")
|
||||
.bind(start)
|
||||
.bind(end)
|
||||
.bind(url.as_ref())
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn tag<V: AsRef<str>>(&self, tag: Tag<'_>, value: V) -> Result<()> {
|
||||
sqlx::query("insert or replace into tag (key, value) values (?, ?);")
|
||||
.bind(tag.key())
|
||||
.bind(value.as_ref())
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
pub async fn delete_tag(&self, tag: Tag<'_>) -> Result<()> {
|
||||
sqlx::query("delete from tag where key = ?;")
|
||||
.bind(tag.key())
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn delete_route<U: AsRef<str>>(&self, url: U) -> Result<()> {
|
||||
sqlx::query("delete from route where url = ?;")
|
||||
.bind(url.as_ref())
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn delete_tags(&self) -> Result<()> {
|
||||
sqlx::query("delete from tag;").execute(&self.pool).await?;
|
||||
Ok(())
|
||||
}
|
||||
pub async fn delete_routes(&self) -> Result<()> {
|
||||
sqlx::query("delete from route;")
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_inode() {
|
||||
const PATH: &str = "/tmp/inode.fl";
|
||||
let meta = Writer::new(PATH, true).await.unwrap();
|
||||
|
||||
let ino = meta
|
||||
.inode(Inode {
|
||||
name: "/".into(),
|
||||
data: Some("target".into()),
|
||||
..Inode::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(ino, 1);
|
||||
|
||||
let meta = Reader::new(PATH).await.unwrap();
|
||||
let inode = meta.inode(ino).await.unwrap();
|
||||
|
||||
assert_eq!(inode.name, "/");
|
||||
assert!(inode.data.is_some());
|
||||
assert_eq!(inode.data.unwrap().as_slice(), "target".as_bytes());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_children() {
|
||||
const PATH: &str = "/tmp/children.fl";
|
||||
let meta = Writer::new(PATH, true).await.unwrap();
|
||||
|
||||
let ino = meta
|
||||
.inode(Inode {
|
||||
name: "/".into(),
|
||||
data: Some("target".into()),
|
||||
..Inode::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
for name in ["bin", "etc", "usr"] {
|
||||
meta.inode(Inode {
|
||||
parent: ino,
|
||||
name: name.into(),
|
||||
..Inode::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
let meta = Reader::new(PATH).await.unwrap();
|
||||
let children = meta.children(ino, 10, 0).await.unwrap();
|
||||
|
||||
assert_eq!(children.len(), 3);
|
||||
assert_eq!(children[0].name, "bin");
|
||||
|
||||
let child = meta.lookup(ino, "bin").await.unwrap();
|
||||
assert!(child.is_some());
|
||||
assert_eq!(child.unwrap().name, "bin");
|
||||
|
||||
let child = meta.lookup(ino, "wrong").await.unwrap();
|
||||
assert!(child.is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_block() {
|
||||
const PATH: &str = "/tmp/block.fl";
|
||||
let meta = Writer::new(PATH, true).await.unwrap();
|
||||
let hash: [u8; ID_LEN] = [
|
||||
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
|
||||
25, 26, 27, 28, 29, 30, 31, 32,
|
||||
];
|
||||
let key1: [u8; KEY_LEN] = [1; KEY_LEN];
|
||||
let key2: [u8; KEY_LEN] = [2; KEY_LEN];
|
||||
|
||||
meta.block(1, &hash, &key1).await.unwrap();
|
||||
meta.block(1, &hash, &key2).await.unwrap();
|
||||
|
||||
let meta = Reader::new(PATH).await.unwrap();
|
||||
|
||||
let blocks = meta.blocks(1).await.unwrap();
|
||||
assert_eq!(blocks.len(), 2);
|
||||
assert_eq!(blocks[0].id, hash);
|
||||
assert_eq!(blocks[0].key, key1);
|
||||
assert_eq!(blocks[1].key, key2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_tag() {
|
||||
const PATH: &str = "/tmp/tag.fl";
|
||||
let meta = Writer::new(PATH, true).await.unwrap();
|
||||
meta.tag(Tag::Version, "0.1").await.unwrap();
|
||||
meta.tag(Tag::Author, "azmy").await.unwrap();
|
||||
meta.tag(Tag::Custom("custom"), "value").await.unwrap();
|
||||
|
||||
let meta = Reader::new(PATH).await.unwrap();
|
||||
|
||||
assert!(matches!(
|
||||
meta.tag(Tag::Version).await.unwrap().as_deref(),
|
||||
Some("0.1")
|
||||
));
|
||||
|
||||
assert!(matches!(
|
||||
meta.tag(Tag::Custom("custom")).await.unwrap().as_deref(),
|
||||
Some("value")
|
||||
));
|
||||
|
||||
assert!(matches!(
|
||||
meta.tag(Tag::Custom("unknown")).await.unwrap(),
|
||||
None
|
||||
));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_routes() {
|
||||
const PATH: &str = "/tmp/route.fl";
|
||||
let meta = Writer::new(PATH, true).await.unwrap();
|
||||
|
||||
meta.route(0, 128, "zdb://hub1.grid.tf").await.unwrap();
|
||||
meta.route(129, 255, "zdb://hub2.grid.tf").await.unwrap();
|
||||
|
||||
let meta = Reader::new(PATH).await.unwrap();
|
||||
|
||||
let routes = meta.routes().await.unwrap();
|
||||
assert_eq!(routes.len(), 2);
|
||||
assert_eq!(routes[0].start, 0);
|
||||
assert_eq!(routes[0].end, 128);
|
||||
assert_eq!(routes[0].url, "zdb://hub1.grid.tf");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mode() {
|
||||
let m = Mode::new(FileType::Regular, 0754);
|
||||
|
||||
assert_eq!(m.permissions(), 0754);
|
||||
assert_eq!(m.file_type(), FileType::Regular);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_walk() {
|
||||
const PATH: &str = "/tmp/walk.fl";
|
||||
let meta = Writer::new(PATH, true).await.unwrap();
|
||||
|
||||
let parent = meta
|
||||
.inode(Inode {
|
||||
name: "/".into(),
|
||||
data: Some("target".into()),
|
||||
..Inode::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
for name in ["bin", "etc", "usr"] {
|
||||
meta.inode(Inode {
|
||||
parent: parent,
|
||||
name: name.into(),
|
||||
..Inode::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let meta = Reader::new(PATH).await.unwrap();
|
||||
//TODO: validate the walk
|
||||
meta.walk(&mut WalkTest).await.unwrap();
|
||||
}
|
||||
|
||||
struct WalkTest;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl WalkVisitor for WalkTest {
|
||||
async fn visit(&mut self, path: &Path, node: &Inode) -> Result<Walk> {
|
||||
println!("{} = {:?}", node.ino, path);
|
||||
Ok(Walk::Continue)
|
||||
}
|
||||
}
|
||||
}
|
||||
3
rfs/src/fungi/mod.rs
Normal file
3
rfs/src/fungi/mod.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
pub mod meta;
|
||||
|
||||
pub use meta::{Error, Reader, Result, Writer};
|
||||
107
rfs/src/lib.rs
Normal file
107
rfs/src/lib.rs
Normal file
@@ -0,0 +1,107 @@
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
pub mod cache;
|
||||
pub mod fungi;
|
||||
pub mod store;
|
||||
|
||||
mod pack;
|
||||
pub use pack::pack;
|
||||
mod unpack;
|
||||
pub use unpack::unpack;
|
||||
mod clone;
|
||||
pub use clone::clone;
|
||||
pub mod config;
|
||||
|
||||
const PARALLEL_UPLOAD: usize = 10; // number of files we can upload in parallel
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::{
|
||||
cache::Cache,
|
||||
fungi::meta,
|
||||
store::{dir::DirStore, Router},
|
||||
};
|
||||
use std::path::PathBuf;
|
||||
use tokio::{fs, io::AsyncReadExt};
|
||||
|
||||
#[tokio::test]
|
||||
async fn pack_unpack() {
|
||||
const ROOT: &str = "/tmp/pack-unpack-test";
|
||||
let _ = fs::remove_dir_all(ROOT).await;
|
||||
|
||||
let root: PathBuf = ROOT.into();
|
||||
let source = root.join("source");
|
||||
fs::create_dir_all(&source).await.unwrap();
|
||||
|
||||
for size in [0, 100 * 1024, 1024 * 1024, 10 * 1024 * 1024] {
|
||||
let mut urandom = fs::OpenOptions::default()
|
||||
.read(true)
|
||||
.open("/dev/urandom")
|
||||
.await
|
||||
.unwrap()
|
||||
.take(size);
|
||||
|
||||
let name = format!("file-{}.rnd", size);
|
||||
let p = source.join(&name);
|
||||
let mut file = fs::OpenOptions::default()
|
||||
.create(true)
|
||||
.write(true)
|
||||
.open(p)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
tokio::io::copy(&mut urandom, &mut file).await.unwrap();
|
||||
}
|
||||
|
||||
println!("file generation complete");
|
||||
let writer = meta::Writer::new(root.join("meta.fl"), true).await.unwrap();
|
||||
|
||||
// while we at it we can already create 2 stores and create a router store on top
|
||||
// of that.
|
||||
let store0 = DirStore::new(root.join("store0")).await.unwrap();
|
||||
let store1 = DirStore::new(root.join("store1")).await.unwrap();
|
||||
let mut store = Router::new();
|
||||
|
||||
store.add(0x00, 0x7f, store0);
|
||||
store.add(0x80, 0xff, store1);
|
||||
|
||||
pack(writer, store, &source, false, None).await.unwrap();
|
||||
|
||||
println!("packing complete");
|
||||
// recreate the stores for reading.
|
||||
let store0 = DirStore::new(root.join("store0")).await.unwrap();
|
||||
let store1 = DirStore::new(root.join("store1")).await.unwrap();
|
||||
let mut store = Router::new();
|
||||
|
||||
store.add(0x00, 0x7f, store0);
|
||||
store.add(0x80, 0xff, store1);
|
||||
|
||||
let cache = Cache::new(root.join("cache"), store);
|
||||
|
||||
let reader = meta::Reader::new(root.join("meta.fl")).await.unwrap();
|
||||
// validate reader store routing
|
||||
let routers = reader.routes().await.unwrap();
|
||||
assert_eq!(2, routers.len());
|
||||
assert_eq!(routers[0].url, "dir:///tmp/pack-unpack-test/store0");
|
||||
assert_eq!(routers[1].url, "dir:///tmp/pack-unpack-test/store1");
|
||||
|
||||
assert_eq!((routers[0].start, routers[0].end), (0x00, 0x7f));
|
||||
assert_eq!((routers[1].start, routers[1].end), (0x80, 0xff));
|
||||
|
||||
unpack(&reader, &cache, root.join("destination"), false)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
println!("unpacking complete");
|
||||
// compare that source directory is exactly the same as target directory
|
||||
let status = std::process::Command::new("diff")
|
||||
.arg(root.join("source"))
|
||||
.arg(root.join("destination"))
|
||||
.status()
|
||||
.unwrap();
|
||||
|
||||
assert!(status.success());
|
||||
}
|
||||
}
|
||||
383
rfs/src/main.rs
Normal file
383
rfs/src/main.rs
Normal file
@@ -0,0 +1,383 @@
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
use nix::sys::signal::{self, Signal};
|
||||
use nix::unistd::Pid;
|
||||
use std::error::Error;
|
||||
use std::io::Read;
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use clap::{ArgAction, Args, Parser, Subcommand};
|
||||
|
||||
use rfs::fungi;
|
||||
use rfs::store::{self};
|
||||
use rfs::{cache, config};
|
||||
|
||||
mod fs;
|
||||
/// mount flists
|
||||
#[derive(Parser, Debug)]
|
||||
#[clap(name ="rfs", author, version = env!("GIT_VERSION"), about, long_about = None)]
|
||||
struct Options {
|
||||
/// enable debugging logs
|
||||
#[clap(long, action=ArgAction::Count)]
|
||||
debug: u8,
|
||||
|
||||
#[command(subcommand)]
|
||||
command: Commands,
|
||||
}
|
||||
|
||||
#[derive(Subcommand, Debug)]
|
||||
enum Commands {
|
||||
/// mount an FL
|
||||
Mount(MountOptions),
|
||||
/// create an FL and upload blocks to provided storage
|
||||
Pack(PackOptions),
|
||||
/// unpack (downloads) content of an FL the provided location
|
||||
Unpack(UnpackOptions),
|
||||
/// clone copies the data from the stores of an FL to another stores
|
||||
Clone(CloneOptions),
|
||||
/// list or modify FL metadata and stores
|
||||
Config(ConfigOptions),
|
||||
}
|
||||
|
||||
#[derive(Args, Debug)]
|
||||
struct MountOptions {
|
||||
/// path to metadata file (flist)
|
||||
#[clap(short, long)]
|
||||
meta: String,
|
||||
|
||||
/// directory used as cache for downloaded file chuncks
|
||||
#[clap(short, long, default_value_t = String::from("/tmp/cache"))]
|
||||
cache: String,
|
||||
|
||||
/// run in the background.
|
||||
#[clap(short, long)]
|
||||
daemon: bool,
|
||||
|
||||
/// log file only used with daemon mode
|
||||
#[clap(short, long)]
|
||||
log: Option<String>,
|
||||
|
||||
/// target mountpoint
|
||||
target: String,
|
||||
}
|
||||
|
||||
#[derive(Args, Debug)]
|
||||
struct PackOptions {
|
||||
/// path to metadata file (flist)
|
||||
#[clap(short, long)]
|
||||
meta: String,
|
||||
|
||||
/// store url in the format [xx-xx=]<url>. the range xx-xx is optional and used for
|
||||
/// sharding. the URL is per store type, please check docs for more information
|
||||
#[clap(short, long, action=ArgAction::Append)]
|
||||
store: Vec<String>,
|
||||
|
||||
/// no_strip_password disable automatic password stripping from store url, otherwise password will be stored in the fl.
|
||||
#[clap(long, default_value_t = false)]
|
||||
no_strip_password: bool,
|
||||
|
||||
/// target directory to upload
|
||||
target: String,
|
||||
}
|
||||
|
||||
#[derive(Args, Debug)]
|
||||
struct UnpackOptions {
|
||||
/// path to metadata file (flist)
|
||||
#[clap(short, long)]
|
||||
meta: String,
|
||||
|
||||
/// directory used as cache for downloaded file chuncks
|
||||
#[clap(short, long, default_value_t = String::from("/tmp/cache"))]
|
||||
cache: String,
|
||||
|
||||
/// preserve files ownership from the FL, otherwise use the current user ownership
|
||||
/// setting this flag to true normally requires sudo
|
||||
#[clap(short, long, default_value_t = false)]
|
||||
preserve_ownership: bool,
|
||||
|
||||
/// target directory for unpacking
|
||||
target: String,
|
||||
}
|
||||
|
||||
#[derive(Args, Debug)]
|
||||
struct CloneOptions {
|
||||
/// path to metadata file (flist)
|
||||
#[clap(short, long)]
|
||||
meta: String,
|
||||
|
||||
/// store url in the format [xx-xx=]<url>. the range xx-xx is optional and used for
|
||||
/// sharding. the URL is per store type, please check docs for more information
|
||||
#[clap(short, long, action=ArgAction::Append)]
|
||||
store: Vec<String>,
|
||||
|
||||
/// directory used as cache for downloaded file chunks
|
||||
#[clap(short, long, default_value_t = String::from("/tmp/cache"))]
|
||||
cache: String,
|
||||
}
|
||||
|
||||
#[derive(Args, Debug)]
|
||||
struct ConfigOptions {
|
||||
/// path to metadata file (flist)
|
||||
#[clap(short, long)]
|
||||
meta: String,
|
||||
|
||||
#[command(subcommand)]
|
||||
command: ConfigCommands,
|
||||
}
|
||||
|
||||
#[derive(Subcommand, Debug)]
|
||||
enum ConfigCommands {
|
||||
#[command(subcommand)]
|
||||
Tag(TagOperation),
|
||||
#[command(subcommand)]
|
||||
Store(StoreOperation),
|
||||
}
|
||||
|
||||
#[derive(Subcommand, Debug)]
|
||||
enum TagOperation {
|
||||
List,
|
||||
Add(TagAddOptions),
|
||||
Delete(TagDeleteOptions),
|
||||
}
|
||||
|
||||
#[derive(Args, Debug)]
|
||||
struct TagAddOptions {
|
||||
/// pair of key-values separated with '='
|
||||
#[clap(short, long, value_parser = parse_key_val::<String, String>, number_of_values = 1)]
|
||||
tag: Vec<(String, String)>,
|
||||
}
|
||||
|
||||
#[derive(Args, Debug)]
|
||||
struct TagDeleteOptions {
|
||||
/// key to remove
|
||||
#[clap(short, long, action=ArgAction::Append)]
|
||||
key: Vec<String>,
|
||||
/// remove all tags
|
||||
#[clap(short, long, default_value_t = false)]
|
||||
all: bool,
|
||||
}
|
||||
|
||||
#[derive(Subcommand, Debug)]
|
||||
enum StoreOperation {
|
||||
List,
|
||||
Add(StoreAddOptions),
|
||||
Delete(StoreDeleteOptions),
|
||||
}
|
||||
|
||||
#[derive(Args, Debug)]
|
||||
struct StoreAddOptions {
|
||||
/// store url in the format [xx-xx=]<url>. the range xx-xx is optional and used for
|
||||
/// sharding. the URL is per store type, please check docs for more information
|
||||
#[clap(short, long, action=ArgAction::Append)]
|
||||
store: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Args, Debug)]
|
||||
struct StoreDeleteOptions {
|
||||
/// store to remove
|
||||
#[clap(short, long, action=ArgAction::Append)]
|
||||
store: Vec<String>,
|
||||
/// remove all stores
|
||||
#[clap(short, long, default_value_t = false)]
|
||||
all: bool,
|
||||
}
|
||||
|
||||
/// Parse a single key-value pair
|
||||
fn parse_key_val<T, U>(s: &str) -> Result<(T, U), Box<dyn Error + Send + Sync + 'static>>
|
||||
where
|
||||
T: std::str::FromStr,
|
||||
T::Err: Error + Send + Sync + 'static,
|
||||
U: std::str::FromStr,
|
||||
U::Err: Error + Send + Sync + 'static,
|
||||
{
|
||||
let pos = s
|
||||
.find('=')
|
||||
.ok_or_else(|| format!("invalid KEY=value: no `=` found in `{s}`"))?;
|
||||
Ok((s[..pos].parse()?, s[pos + 1..].parse()?))
|
||||
}
|
||||
|
||||
fn main() -> Result<()> {
|
||||
let opts = Options::parse();
|
||||
|
||||
simple_logger::SimpleLogger::new()
|
||||
.with_utc_timestamps()
|
||||
.with_level({
|
||||
match opts.debug {
|
||||
0 => log::LevelFilter::Info,
|
||||
1 => log::LevelFilter::Debug,
|
||||
_ => log::LevelFilter::Trace,
|
||||
}
|
||||
})
|
||||
.with_module_level("sqlx", log::Level::Error.to_level_filter())
|
||||
.init()?;
|
||||
|
||||
log::debug!("options: {:#?}", opts);
|
||||
|
||||
match opts.command {
|
||||
Commands::Mount(opts) => mount(opts),
|
||||
Commands::Pack(opts) => pack(opts),
|
||||
Commands::Unpack(opts) => unpack(opts),
|
||||
Commands::Clone(opts) => clone(opts),
|
||||
Commands::Config(opts) => config(opts),
|
||||
}
|
||||
}
|
||||
|
||||
fn pack(opts: PackOptions) -> Result<()> {
|
||||
let rt = tokio::runtime::Runtime::new()?;
|
||||
|
||||
rt.block_on(async move {
|
||||
let store = store::parse_router(opts.store.as_slice()).await?;
|
||||
let meta = fungi::Writer::new(opts.meta, true).await?;
|
||||
rfs::pack(meta, store, opts.target, !opts.no_strip_password, None).await?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
fn unpack(opts: UnpackOptions) -> Result<()> {
|
||||
let rt = tokio::runtime::Runtime::new()?;
|
||||
|
||||
rt.block_on(async move {
|
||||
let meta = fungi::Reader::new(opts.meta)
|
||||
.await
|
||||
.context("failed to initialize metadata database")?;
|
||||
|
||||
let router = store::get_router(&meta).await?;
|
||||
|
||||
let cache = cache::Cache::new(opts.cache, router);
|
||||
rfs::unpack(&meta, &cache, opts.target, opts.preserve_ownership).await?;
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
fn mount(opts: MountOptions) -> Result<()> {
|
||||
if is_mountpoint(&opts.target)? {
|
||||
eprintln!("target {} is already a mount point", opts.target);
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
if opts.daemon {
|
||||
let pid_file = tempfile::NamedTempFile::new()?;
|
||||
let target = opts.target.clone();
|
||||
let mut daemon = daemonize::Daemonize::new()
|
||||
.working_directory(std::env::current_dir()?)
|
||||
.pid_file(pid_file.path());
|
||||
if let Some(ref log) = opts.log {
|
||||
let out = std::fs::File::create(log)?;
|
||||
let err = out.try_clone()?;
|
||||
daemon = daemon.stdout(out).stderr(err);
|
||||
}
|
||||
|
||||
match daemon.execute() {
|
||||
daemonize::Outcome::Parent(result) => {
|
||||
result.context("daemonize")?;
|
||||
wait_child(target, pid_file);
|
||||
return Ok(());
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
let rt = tokio::runtime::Runtime::new()?;
|
||||
|
||||
rt.block_on(fuse(opts))
|
||||
}
|
||||
|
||||
fn is_mountpoint<S: AsRef<str>>(target: S) -> Result<bool> {
|
||||
use std::process::Command;
|
||||
|
||||
let output = Command::new("mountpoint")
|
||||
.arg("-q")
|
||||
.arg(target.as_ref())
|
||||
.output()
|
||||
.context("failed to check mountpoint")?;
|
||||
|
||||
Ok(output.status.success())
|
||||
}
|
||||
|
||||
fn wait_child(target: String, mut pid_file: tempfile::NamedTempFile) {
|
||||
for _ in 0..5 {
|
||||
if is_mountpoint(&target).unwrap() {
|
||||
return;
|
||||
}
|
||||
std::thread::sleep(std::time::Duration::from_secs(1));
|
||||
}
|
||||
let mut buf = String::new();
|
||||
if let Err(e) = pid_file.read_to_string(&mut buf) {
|
||||
error!("failed to read pid_file: {:#}", e);
|
||||
}
|
||||
let pid = buf.parse::<i32>();
|
||||
match pid {
|
||||
Err(e) => error!("failed to parse pid_file contents {}: {:#}", buf, e),
|
||||
Ok(v) => {
|
||||
let _ = signal::kill(Pid::from_raw(v), Signal::SIGTERM);
|
||||
} // probably the child exited on its own
|
||||
}
|
||||
// cleanup is not performed if the process is terminated with exit(2)
|
||||
drop(pid_file);
|
||||
eprintln!("failed to mount in under 5 seconds, please check logs for more information");
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
async fn fuse(opts: MountOptions) -> Result<()> {
|
||||
let meta = fungi::Reader::new(opts.meta)
|
||||
.await
|
||||
.context("failed to initialize metadata database")?;
|
||||
|
||||
let router = store::get_router(&meta).await?;
|
||||
|
||||
let cache = cache::Cache::new(opts.cache, router);
|
||||
let filesystem = fs::Filesystem::new(meta, cache);
|
||||
|
||||
filesystem.mount(opts.target).await
|
||||
}
|
||||
|
||||
fn clone(opts: CloneOptions) -> Result<()> {
|
||||
let rt = tokio::runtime::Runtime::new()?;
|
||||
|
||||
rt.block_on(async move {
|
||||
let store = store::parse_router(opts.store.as_slice()).await?;
|
||||
let meta = fungi::Reader::new(opts.meta)
|
||||
.await
|
||||
.context("failed to initialize metadata database")?;
|
||||
|
||||
let router = store::get_router(&meta).await?;
|
||||
|
||||
let cache = cache::Cache::new(opts.cache, router);
|
||||
rfs::clone(meta, store, cache).await?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
fn config(opts: ConfigOptions) -> Result<()> {
|
||||
let rt = tokio::runtime::Runtime::new()?;
|
||||
|
||||
rt.block_on(async move {
|
||||
let writer = fungi::Writer::new(opts.meta.clone(), false)
|
||||
.await
|
||||
.context("failed to initialize metadata database")?;
|
||||
|
||||
let reader = fungi::Reader::new(opts.meta)
|
||||
.await
|
||||
.context("failed to initialize metadata database")?;
|
||||
|
||||
match opts.command {
|
||||
ConfigCommands::Tag(opts) => match opts {
|
||||
TagOperation::List => config::tag_list(reader).await?,
|
||||
TagOperation::Add(opts) => config::tag_add(writer, opts.tag).await?,
|
||||
TagOperation::Delete(opts) => {
|
||||
config::tag_delete(writer, opts.key, opts.all).await?
|
||||
}
|
||||
},
|
||||
ConfigCommands::Store(opts) => match opts {
|
||||
StoreOperation::List => config::store_list(reader).await?,
|
||||
StoreOperation::Add(opts) => config::store_add(writer, opts.store).await?,
|
||||
StoreOperation::Delete(opts) => {
|
||||
config::store_delete(writer, opts.store, opts.all).await?
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
267
rfs/src/pack.rs
Normal file
267
rfs/src/pack.rs
Normal file
@@ -0,0 +1,267 @@
|
||||
use crate::fungi::meta::{Ino, Inode};
|
||||
use crate::fungi::{Error, Result, Writer};
|
||||
use crate::store::{BlockStore, Store};
|
||||
use anyhow::Context;
|
||||
use futures::lock::Mutex;
|
||||
use std::collections::LinkedList;
|
||||
use std::ffi::OsString;
|
||||
use std::fs::Metadata;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::mpsc::Sender;
|
||||
use std::sync::Arc;
|
||||
use workers::WorkerPool;
|
||||
|
||||
const BLOB_SIZE: usize = 512 * 1024; // 512K
|
||||
|
||||
type FailuresList = Arc<Mutex<Vec<(PathBuf, Error)>>>;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Item(Ino, PathBuf, OsString, Metadata);
|
||||
/// creates an FL from the given root location. It takes ownership of the writer because
|
||||
/// it's logically incorrect to store multiple filessytem in the same FL.
|
||||
/// All file chunks will then be uploaded to the provided store
|
||||
///
|
||||
pub async fn pack<P: Into<PathBuf>, S: Store>(
|
||||
writer: Writer,
|
||||
store: S,
|
||||
root: P,
|
||||
strip_password: bool,
|
||||
sender: Option<Sender<u32>>,
|
||||
) -> Result<()> {
|
||||
use tokio::fs;
|
||||
|
||||
// building routing table from store information
|
||||
for route in store.routes() {
|
||||
let mut store_url = route.url;
|
||||
|
||||
if strip_password {
|
||||
let mut url = url::Url::parse(&store_url).context("failed to parse store url")?;
|
||||
if url.password().is_some() {
|
||||
url.set_password(None)
|
||||
.map_err(|_| anyhow::anyhow!("failed to strip password"))?;
|
||||
|
||||
store_url = url.to_string();
|
||||
}
|
||||
}
|
||||
|
||||
writer
|
||||
.route(
|
||||
route.start.unwrap_or(u8::MIN),
|
||||
route.end.unwrap_or(u8::MAX),
|
||||
store_url,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
let store: BlockStore<S> = store.into();
|
||||
|
||||
let root = root.into();
|
||||
let meta = fs::metadata(&root)
|
||||
.await
|
||||
.context("failed to get root stats")?;
|
||||
|
||||
let mut list = LinkedList::default();
|
||||
|
||||
let failures = FailuresList::default();
|
||||
let uploader = Uploader::new(store, writer.clone(), Arc::clone(&failures));
|
||||
let mut pool = workers::WorkerPool::new(uploader.clone(), super::PARALLEL_UPLOAD);
|
||||
|
||||
pack_one(
|
||||
&mut list,
|
||||
&writer,
|
||||
&mut pool,
|
||||
Item(0, root, OsString::from("/"), meta),
|
||||
sender.as_ref(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
while !list.is_empty() {
|
||||
let dir = list.pop_back().unwrap();
|
||||
pack_one(&mut list, &writer, &mut pool, dir, sender.as_ref()).await?;
|
||||
}
|
||||
|
||||
pool.close().await;
|
||||
|
||||
let failures = failures.lock().await;
|
||||
if failures.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
log::error!("failed to upload one or more files");
|
||||
for (file, error) in failures.iter() {
|
||||
log::error!(" - failed to upload file {}: {}", file.display(), error);
|
||||
}
|
||||
|
||||
Err(Error::Anyhow(anyhow::anyhow!(
|
||||
"failed to upload ({}) files",
|
||||
failures.len()
|
||||
)))
|
||||
}
|
||||
|
||||
/// pack_one is called for each dir
|
||||
async fn pack_one<S: Store>(
|
||||
list: &mut LinkedList<Item>,
|
||||
writer: &Writer,
|
||||
pool: &mut WorkerPool<Uploader<S>>,
|
||||
Item(parent, path, name, meta): Item,
|
||||
sender: Option<&Sender<u32>>,
|
||||
) -> Result<()> {
|
||||
use std::os::unix::fs::MetadataExt;
|
||||
use tokio::fs;
|
||||
|
||||
let current = writer
|
||||
.inode(Inode {
|
||||
ino: 0,
|
||||
name: String::from_utf8_lossy(name.as_bytes()).into_owned(),
|
||||
parent,
|
||||
size: meta.size(),
|
||||
uid: meta.uid(),
|
||||
gid: meta.gid(),
|
||||
mode: meta.mode().into(),
|
||||
rdev: meta.rdev(),
|
||||
ctime: meta.ctime(),
|
||||
mtime: meta.mtime(),
|
||||
data: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
let mut children = fs::read_dir(&path)
|
||||
.await
|
||||
.context("failed to list dir children")?;
|
||||
|
||||
while let Some(child) = children
|
||||
.next_entry()
|
||||
.await
|
||||
.context("failed to read next entry from directory")?
|
||||
{
|
||||
let name = child.file_name();
|
||||
let meta = child.metadata().await?;
|
||||
let child_path = path.join(&name);
|
||||
|
||||
if let Some(ref sender) = sender {
|
||||
sender.send(1).context("failed to send progress")?;
|
||||
}
|
||||
|
||||
// if this child a directory we add to the tail of the list
|
||||
if meta.is_dir() {
|
||||
list.push_back(Item(current, child_path.clone(), name, meta));
|
||||
continue;
|
||||
}
|
||||
|
||||
// create entry
|
||||
// otherwise create the file meta
|
||||
let data = if meta.is_symlink() {
|
||||
let target = fs::read_link(&child_path).await?;
|
||||
Some(target.as_os_str().as_bytes().into())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let child_ino = writer
|
||||
.inode(Inode {
|
||||
ino: 0,
|
||||
name: String::from_utf8_lossy(name.as_bytes()).into_owned(),
|
||||
parent: current,
|
||||
size: meta.size(),
|
||||
uid: meta.uid(),
|
||||
gid: meta.gid(),
|
||||
mode: meta.mode().into(),
|
||||
rdev: meta.rdev(),
|
||||
ctime: meta.ctime(),
|
||||
mtime: meta.mtime(),
|
||||
data,
|
||||
})
|
||||
.await?;
|
||||
|
||||
if !meta.is_file() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let worker = pool.get().await;
|
||||
worker
|
||||
.send((child_ino, child_path))
|
||||
.context("failed to schedule file upload")?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
struct Uploader<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
store: Arc<BlockStore<S>>,
|
||||
failures: FailuresList,
|
||||
writer: Writer,
|
||||
buffer: [u8; BLOB_SIZE],
|
||||
}
|
||||
|
||||
impl<S> Clone for Uploader<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
store: Arc::clone(&self.store),
|
||||
failures: Arc::clone(&self.failures),
|
||||
writer: self.writer.clone(),
|
||||
buffer: [0; BLOB_SIZE],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Uploader<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
fn new(store: BlockStore<S>, writer: Writer, failures: FailuresList) -> Self {
|
||||
Self {
|
||||
store: Arc::new(store),
|
||||
failures,
|
||||
writer,
|
||||
buffer: [0; BLOB_SIZE],
|
||||
}
|
||||
}
|
||||
|
||||
async fn upload(&mut self, ino: Ino, path: &Path) -> Result<()> {
|
||||
use tokio::fs;
|
||||
use tokio::io::AsyncReadExt;
|
||||
use tokio::io::BufReader;
|
||||
|
||||
// create file blocks
|
||||
let fd = fs::OpenOptions::default().read(true).open(path).await?;
|
||||
|
||||
let mut reader = BufReader::new(fd);
|
||||
loop {
|
||||
let size = reader.read(&mut self.buffer).await?;
|
||||
if size == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
// write block to remote store
|
||||
let block = self.store.set(&self.buffer[..size]).await?;
|
||||
|
||||
// write block info to meta
|
||||
self.writer.block(ino, &block.id, &block.key).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<S> workers::Work for Uploader<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
type Input = (Ino, PathBuf);
|
||||
type Output = ();
|
||||
|
||||
async fn run(&mut self, (ino, path): Self::Input) -> Self::Output {
|
||||
log::info!("uploading {:?}", path);
|
||||
if let Err(err) = self.upload(ino, &path).await {
|
||||
log::error!("failed to upload file {}: {:#}", path.display(), err);
|
||||
self.failures.lock().await.push((path, err));
|
||||
}
|
||||
}
|
||||
}
|
||||
133
rfs/src/store/bs.rs
Normal file
133
rfs/src/store/bs.rs
Normal file
@@ -0,0 +1,133 @@
|
||||
use super::{Error, Result, Store};
|
||||
use crate::fungi::meta::Block;
|
||||
use aes_gcm::{
|
||||
aead::{
|
||||
generic_array::{self, GenericArray},
|
||||
Aead, KeyInit,
|
||||
},
|
||||
Aes256Gcm, Nonce,
|
||||
};
|
||||
|
||||
fn hash(input: &[u8]) -> GenericArray<u8, generic_array::typenum::U32> {
|
||||
let hash = blake2b_simd::Params::new().hash_length(32).hash(input);
|
||||
GenericArray::from_slice(hash.as_bytes()).to_owned()
|
||||
}
|
||||
|
||||
/// The block store builds on top of a store and adds encryption and compression
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct BlockStore<S: Store> {
|
||||
store: S,
|
||||
}
|
||||
|
||||
impl<S> From<S> for BlockStore<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
fn from(store: S) -> Self {
|
||||
Self { store }
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> BlockStore<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
pub fn inner(self) -> S {
|
||||
self.store
|
||||
}
|
||||
|
||||
pub async fn get(&self, block: &Block) -> Result<Vec<u8>> {
|
||||
let encrypted = self.store.get(&block.id).await?;
|
||||
|
||||
let cipher = Aes256Gcm::new_from_slice(&block.key).map_err(|_| Error::InvalidKey)?;
|
||||
let nonce = Nonce::from_slice(&block.key[..12]);
|
||||
|
||||
let compressed = cipher
|
||||
.decrypt(nonce, encrypted.as_slice())
|
||||
.map_err(|_| Error::EncryptionError)?;
|
||||
|
||||
let mut decoder = snap::raw::Decoder::new();
|
||||
let plain = decoder.decompress_vec(&compressed)?;
|
||||
|
||||
Ok(plain)
|
||||
}
|
||||
|
||||
pub async fn set(&self, blob: &[u8]) -> Result<Block> {
|
||||
// we first calculate the hash of the plain-text data
|
||||
|
||||
let key = hash(blob);
|
||||
let mut encoder = snap::raw::Encoder::new();
|
||||
// data is then compressed
|
||||
let compressed = encoder.compress_vec(blob)?;
|
||||
|
||||
// we then encrypt it using the hash of the plain-text as a key
|
||||
let cipher = Aes256Gcm::new(&key);
|
||||
// the nonce is still driven from the key, a nonce is 12 bytes for aes
|
||||
// it's done like this so a store can still dedup the data
|
||||
let nonce = Nonce::from_slice(&key[..12]);
|
||||
|
||||
// we encrypt the data
|
||||
let encrypted = cipher
|
||||
.encrypt(nonce, compressed.as_slice())
|
||||
.map_err(|_| Error::EncryptionError)?;
|
||||
|
||||
// we hash it again, and use that as the store key
|
||||
let id = hash(&encrypted);
|
||||
|
||||
let block = Block {
|
||||
id: id.into(),
|
||||
key: key.into(),
|
||||
};
|
||||
|
||||
self.store.set(&block.id, &encrypted).await?;
|
||||
|
||||
Ok(block)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::super::Route;
|
||||
|
||||
use super::*;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
#[derive(Default)]
|
||||
struct InMemoryStore {
|
||||
map: Arc<Mutex<HashMap<Vec<u8>, Vec<u8>>>>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Store for InMemoryStore {
|
||||
async fn get(&self, key: &[u8]) -> Result<Vec<u8>> {
|
||||
let map = self.map.lock().await;
|
||||
let v = map.get(key).ok_or(Error::KeyNotFound)?;
|
||||
Ok(v.clone())
|
||||
}
|
||||
async fn set(&self, key: &[u8], blob: &[u8]) -> Result<()> {
|
||||
let mut map = self.map.lock().await;
|
||||
map.insert(key.into(), blob.into());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn routes(&self) -> Vec<Route> {
|
||||
vec![Route::url("mem://")]
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_block_store() {
|
||||
let store = InMemoryStore::default();
|
||||
let block_store = BlockStore::from(store);
|
||||
|
||||
let blob = "some random data to store";
|
||||
let block = block_store.set(blob.as_bytes()).await.unwrap();
|
||||
|
||||
let received = block_store.get(&block).await.unwrap();
|
||||
|
||||
assert_eq!(blob.as_bytes(), received.as_slice());
|
||||
}
|
||||
}
|
||||
83
rfs/src/store/dir.rs
Normal file
83
rfs/src/store/dir.rs
Normal file
@@ -0,0 +1,83 @@
|
||||
use super::{Error, Result, Route, Store};
|
||||
use std::io::ErrorKind;
|
||||
use std::os::unix::prelude::OsStrExt;
|
||||
use std::path::PathBuf;
|
||||
use tokio::fs;
|
||||
use url;
|
||||
|
||||
pub const SCHEME: &str = "dir";
|
||||
|
||||
/// DirStore is a simple store that store blobs on the filesystem
|
||||
/// and is mainly used for testing
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DirStore {
|
||||
root: PathBuf,
|
||||
}
|
||||
|
||||
impl DirStore {
|
||||
pub async fn make<U: AsRef<str>>(url: &U) -> Result<DirStore> {
|
||||
let u = url::Url::parse(url.as_ref())?;
|
||||
if u.scheme() != SCHEME {
|
||||
return Err(Error::InvalidScheme(u.scheme().into(), SCHEME.into()));
|
||||
}
|
||||
|
||||
Ok(DirStore::new(u.path()).await?)
|
||||
}
|
||||
pub async fn new<P: Into<PathBuf>>(root: P) -> Result<Self> {
|
||||
let root = root.into();
|
||||
fs::create_dir_all(&root).await?;
|
||||
Ok(Self { root })
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Store for DirStore {
|
||||
async fn get(&self, key: &[u8]) -> Result<Vec<u8>> {
|
||||
let file_name = hex::encode(key);
|
||||
let dir_path = self.root.join(&file_name[0..2]);
|
||||
|
||||
let mut path = dir_path.join(&file_name);
|
||||
let data = match fs::read(&path).await {
|
||||
Ok(data) => data,
|
||||
Err(err) if err.kind() == ErrorKind::NotFound => {
|
||||
path = self.root.join(file_name);
|
||||
let data = match fs::read(&path).await {
|
||||
Ok(data) => data,
|
||||
Err(err) if err.kind() == ErrorKind::NotFound => {
|
||||
return Err(Error::KeyNotFound);
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(Error::IO(err));
|
||||
}
|
||||
};
|
||||
data
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(Error::IO(err));
|
||||
}
|
||||
};
|
||||
|
||||
Ok(data)
|
||||
}
|
||||
|
||||
async fn set(&self, key: &[u8], blob: &[u8]) -> Result<()> {
|
||||
let file_name = hex::encode(key);
|
||||
let dir_path = self.root.join(&file_name[0..2]);
|
||||
|
||||
fs::create_dir_all(&dir_path).await?;
|
||||
|
||||
let file_path = dir_path.join(file_name);
|
||||
fs::write(file_path, blob).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn routes(&self) -> Vec<Route> {
|
||||
let r = Route::url(format!(
|
||||
"dir://{}",
|
||||
String::from_utf8_lossy(self.root.as_os_str().as_bytes())
|
||||
));
|
||||
|
||||
vec![r]
|
||||
}
|
||||
}
|
||||
73
rfs/src/store/http.rs
Normal file
73
rfs/src/store/http.rs
Normal file
@@ -0,0 +1,73 @@
|
||||
use super::{Error, Result, Route, Store};
|
||||
use reqwest::{self, StatusCode};
|
||||
use url::Url;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct HTTPStore {
|
||||
url: Url,
|
||||
}
|
||||
|
||||
impl HTTPStore {
|
||||
pub async fn make<U: AsRef<str>>(url: &U) -> Result<HTTPStore> {
|
||||
let u = Url::parse(url.as_ref())?;
|
||||
if u.scheme() != "http" && u.scheme() != "https" {
|
||||
return Err(Error::Other(anyhow::Error::msg("invalid scheme")));
|
||||
}
|
||||
|
||||
Ok(HTTPStore::new(u).await?)
|
||||
}
|
||||
pub async fn new<U: Into<Url>>(url: U) -> Result<Self> {
|
||||
let url = url.into();
|
||||
Ok(Self { url })
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Store for HTTPStore {
|
||||
async fn get(&self, key: &[u8]) -> Result<Vec<u8>> {
|
||||
let file = hex::encode(key);
|
||||
let mut file_path = self.url.clone();
|
||||
file_path
|
||||
.path_segments_mut()
|
||||
.map_err(|_| Error::Other(anyhow::Error::msg("cannot be base")))?
|
||||
.push(&file[0..2])
|
||||
.push(&file);
|
||||
let mut legacy_path = self.url.clone();
|
||||
|
||||
legacy_path
|
||||
.path_segments_mut()
|
||||
.map_err(|_| Error::Other(anyhow::Error::msg("cannot be base")))?
|
||||
.push(&file);
|
||||
|
||||
let data = match reqwest::get(file_path).await {
|
||||
Ok(mut response) => {
|
||||
if response.status() == StatusCode::NOT_FOUND {
|
||||
response = reqwest::get(legacy_path)
|
||||
.await
|
||||
.map_err(|_| Error::KeyNotFound)?;
|
||||
if response.status() != StatusCode::OK {
|
||||
return Err(Error::KeyNotFound);
|
||||
}
|
||||
}
|
||||
if response.status() != StatusCode::OK {
|
||||
return Err(Error::Unavailable);
|
||||
}
|
||||
response.bytes().await.map_err(|e| Error::Other(e.into()))?
|
||||
}
|
||||
Err(err) => return Err(Error::Other(err.into())),
|
||||
};
|
||||
Ok(data.into())
|
||||
}
|
||||
|
||||
async fn set(&self, _key: &[u8], _blob: &[u8]) -> Result<()> {
|
||||
Err(Error::Other(anyhow::Error::msg(
|
||||
"http store doesn't support uploading",
|
||||
)))
|
||||
}
|
||||
|
||||
fn routes(&self) -> Vec<Route> {
|
||||
let r = Route::url(self.url.clone());
|
||||
|
||||
vec![r]
|
||||
}
|
||||
}
|
||||
240
rfs/src/store/mod.rs
Normal file
240
rfs/src/store/mod.rs
Normal file
@@ -0,0 +1,240 @@
|
||||
mod bs;
|
||||
pub mod dir;
|
||||
pub mod http;
|
||||
mod router;
|
||||
pub mod s3store;
|
||||
pub mod zdb;
|
||||
|
||||
use anyhow::Context;
|
||||
use rand::seq::SliceRandom;
|
||||
|
||||
pub use bs::BlockStore;
|
||||
use regex::Regex;
|
||||
|
||||
use crate::fungi;
|
||||
|
||||
pub use self::router::Router;
|
||||
|
||||
pub async fn make<U: AsRef<str>>(u: U) -> Result<Stores> {
|
||||
let parsed = url::Url::parse(u.as_ref())?;
|
||||
|
||||
match parsed.scheme() {
|
||||
dir::SCHEME => return Ok(Stores::Dir(dir::DirStore::make(&u).await?)),
|
||||
"s3" | "s3s" | "s3s+tls" => return Ok(Stores::S3(s3store::S3Store::make(&u).await?)),
|
||||
"zdb" => return Ok(Stores::ZDB(zdb::ZdbStore::make(&u).await?)),
|
||||
"http" | "https" => return Ok(Stores::HTTP(http::HTTPStore::make(&u).await?)),
|
||||
_ => return Err(Error::UnknownStore(parsed.scheme().into())),
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum Error {
|
||||
#[error("key not found")]
|
||||
KeyNotFound,
|
||||
#[error("invalid key")]
|
||||
InvalidKey,
|
||||
#[error("invalid blob")]
|
||||
InvalidBlob,
|
||||
#[error("key is not routable")]
|
||||
KeyNotRoutable,
|
||||
#[error("store is not available")]
|
||||
Unavailable,
|
||||
|
||||
#[error("compression error: {0}")]
|
||||
Compression(#[from] snap::Error),
|
||||
|
||||
#[error("encryption error")]
|
||||
EncryptionError,
|
||||
|
||||
// TODO: better display for the Box<Vec<Self>>
|
||||
#[error("multiple error: {0:?}")]
|
||||
Multiple(Box<Vec<Self>>),
|
||||
|
||||
#[error("io error: {0}")]
|
||||
IO(#[from] std::io::Error),
|
||||
|
||||
#[error("url parse error: {0}")]
|
||||
Url(#[from] url::ParseError),
|
||||
#[error("unknown store type '{0}'")]
|
||||
UnknownStore(String),
|
||||
#[error("invalid schema '{0}' expected '{1}'")]
|
||||
InvalidScheme(String, String),
|
||||
|
||||
#[error("unknown store error {0:#}")]
|
||||
Other(#[from] anyhow::Error),
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
pub struct Route {
|
||||
pub start: Option<u8>,
|
||||
pub end: Option<u8>,
|
||||
pub url: String,
|
||||
}
|
||||
|
||||
impl Route {
|
||||
pub fn url<S: Into<String>>(s: S) -> Self {
|
||||
Self {
|
||||
start: None,
|
||||
end: None,
|
||||
url: s.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
/// The store trait defines a simple (low level) key/value store interface to set/get blobs
|
||||
/// the concern of the store is to only store given data with given key and implement
|
||||
/// the means to retrieve it again once a get is called.
|
||||
#[async_trait::async_trait]
|
||||
pub trait Store: Send + Sync + 'static {
|
||||
async fn get(&self, key: &[u8]) -> Result<Vec<u8>>;
|
||||
async fn set(&self, key: &[u8], blob: &[u8]) -> Result<()>;
|
||||
fn routes(&self) -> Vec<Route>;
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<S> Store for Router<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
async fn get(&self, key: &[u8]) -> Result<Vec<u8>> {
|
||||
if key.is_empty() {
|
||||
return Err(Error::InvalidKey);
|
||||
}
|
||||
let mut errors = Vec::default();
|
||||
|
||||
// to make it fare we shuffle the list of matching routers randomly everytime
|
||||
// before we do a get
|
||||
let mut routers: Vec<&S> = self.route(key[0]).collect();
|
||||
routers.shuffle(&mut rand::thread_rng());
|
||||
for store in routers {
|
||||
match store.get(key).await {
|
||||
Ok(object) => return Ok(object),
|
||||
Err(err) => errors.push(err),
|
||||
};
|
||||
}
|
||||
|
||||
if errors.is_empty() {
|
||||
return Err(Error::KeyNotRoutable);
|
||||
}
|
||||
|
||||
// return aggregated errors
|
||||
return Err(Error::Multiple(Box::new(errors)));
|
||||
}
|
||||
|
||||
async fn set(&self, key: &[u8], blob: &[u8]) -> Result<()> {
|
||||
if key.is_empty() {
|
||||
return Err(Error::InvalidKey);
|
||||
}
|
||||
|
||||
let mut b = false;
|
||||
for store in self.route(key[0]) {
|
||||
b = true;
|
||||
store.set(key, blob).await?;
|
||||
}
|
||||
|
||||
if !b {
|
||||
return Err(Error::KeyNotRoutable);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn routes(&self) -> Vec<Route> {
|
||||
let mut routes = Vec::default();
|
||||
for (key, value) in self.routes.iter() {
|
||||
for sub in value.routes() {
|
||||
let r = Route {
|
||||
start: Some(sub.start.unwrap_or(*key.start())),
|
||||
end: Some(sub.end.unwrap_or(*key.end())),
|
||||
url: sub.url,
|
||||
};
|
||||
routes.push(r);
|
||||
}
|
||||
}
|
||||
|
||||
routes
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_router(meta: &fungi::Reader) -> Result<Router<Stores>> {
|
||||
let mut router = Router::new();
|
||||
|
||||
for route in meta.routes().await.context("failed to get store routes")? {
|
||||
let store = make(&route.url)
|
||||
.await
|
||||
.with_context(|| format!("failed to initialize store '{}'", route.url))?;
|
||||
router.add(route.start, route.end, store);
|
||||
}
|
||||
|
||||
Ok(router)
|
||||
}
|
||||
|
||||
pub async fn parse_router(urls: &[String]) -> anyhow::Result<Router<Stores>> {
|
||||
let mut router = Router::new();
|
||||
let pattern = r"^(?P<range>[0-9a-f]{2}-[0-9a-f]{2})=(?P<url>.+)$";
|
||||
let re = Regex::new(pattern)?;
|
||||
|
||||
for u in urls {
|
||||
let ((start, end), store) = match re.captures(u) {
|
||||
None => ((0x00, 0xff), make(u).await?),
|
||||
Some(captures) => {
|
||||
let url = captures.name("url").context("missing url group")?.as_str();
|
||||
let rng = captures
|
||||
.name("range")
|
||||
.context("missing range group")?
|
||||
.as_str();
|
||||
|
||||
let store = make(url).await?;
|
||||
let range = match rng.split_once('-') {
|
||||
None => anyhow::bail!("invalid range format"),
|
||||
Some((low, high)) => (
|
||||
u8::from_str_radix(low, 16)
|
||||
.with_context(|| format!("failed to parse low range '{}'", low))?,
|
||||
u8::from_str_radix(high, 16)
|
||||
.with_context(|| format!("failed to parse high range '{}'", high))?,
|
||||
),
|
||||
};
|
||||
(range, store)
|
||||
}
|
||||
};
|
||||
|
||||
router.add(start, end, store);
|
||||
}
|
||||
|
||||
Ok(router)
|
||||
}
|
||||
|
||||
pub enum Stores {
|
||||
S3(s3store::S3Store),
|
||||
Dir(dir::DirStore),
|
||||
ZDB(zdb::ZdbStore),
|
||||
HTTP(http::HTTPStore),
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Store for Stores {
|
||||
async fn get(&self, key: &[u8]) -> Result<Vec<u8>> {
|
||||
match self {
|
||||
self::Stores::S3(s3_store) => s3_store.get(key).await,
|
||||
self::Stores::Dir(dir_store) => dir_store.get(key).await,
|
||||
self::Stores::ZDB(zdb_store) => zdb_store.get(key).await,
|
||||
self::Stores::HTTP(http_store) => http_store.get(key).await,
|
||||
}
|
||||
}
|
||||
async fn set(&self, key: &[u8], blob: &[u8]) -> Result<()> {
|
||||
match self {
|
||||
self::Stores::S3(s3_store) => s3_store.set(key, blob).await,
|
||||
self::Stores::Dir(dir_store) => dir_store.set(key, blob).await,
|
||||
self::Stores::ZDB(zdb_store) => zdb_store.set(key, blob).await,
|
||||
self::Stores::HTTP(http_store) => http_store.set(key, blob).await,
|
||||
}
|
||||
}
|
||||
fn routes(&self) -> Vec<Route> {
|
||||
match self {
|
||||
self::Stores::S3(s3_store) => s3_store.routes(),
|
||||
self::Stores::Dir(dir_store) => dir_store.routes(),
|
||||
self::Stores::ZDB(zdb_store) => zdb_store.routes(),
|
||||
self::Stores::HTTP(http_store) => http_store.routes(),
|
||||
}
|
||||
}
|
||||
}
|
||||
56
rfs/src/store/router.rs
Normal file
56
rfs/src/store/router.rs
Normal file
@@ -0,0 +1,56 @@
|
||||
use std::ops::RangeInclusive;
|
||||
|
||||
/// route implements a naive prefix router by going through the complete set of
|
||||
/// available routers and find that ones that matches this given prefix
|
||||
#[derive(Default, Clone)]
|
||||
pub struct Router<T> {
|
||||
pub(crate) routes: Vec<(RangeInclusive<u8>, T)>,
|
||||
}
|
||||
|
||||
impl<T> Router<T> {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
routes: Vec::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// add a range
|
||||
pub fn add(&mut self, start: u8, end: u8, route: T) {
|
||||
self.routes.push((start..=end, route));
|
||||
}
|
||||
|
||||
/// return all stores that matches a certain key
|
||||
///
|
||||
/// TODO: may be they need to be randomized
|
||||
pub fn route(&self, i: u8) -> impl Iterator<Item = &T> {
|
||||
self.routes
|
||||
.iter()
|
||||
.filter(move |f| f.0.contains(&i))
|
||||
.map(|v| &v.1)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test() {
|
||||
let mut router = Router::default();
|
||||
|
||||
router.add(0, 255, "a");
|
||||
router.add(0, 255, "b");
|
||||
router.add(0, 128, "c");
|
||||
|
||||
let paths: Vec<&str> = router.route(200).map(|v| *v).collect();
|
||||
assert_eq!(paths.len(), 2);
|
||||
assert_eq!(paths[0], "a");
|
||||
assert_eq!(paths[1], "b");
|
||||
|
||||
let paths: Vec<&str> = router.route(0).map(|v| *v).collect();
|
||||
assert_eq!(paths.len(), 3);
|
||||
assert_eq!(paths[0], "a");
|
||||
assert_eq!(paths[1], "b");
|
||||
assert_eq!(paths[2], "c");
|
||||
}
|
||||
}
|
||||
191
rfs/src/store/s3store.rs
Normal file
191
rfs/src/store/s3store.rs
Normal file
@@ -0,0 +1,191 @@
|
||||
use super::{Error, Result, Route, Store};
|
||||
|
||||
use anyhow::Context;
|
||||
use s3::{creds::Credentials, error::S3Error, Bucket, Region};
|
||||
use url::Url;
|
||||
|
||||
fn get_config<U: AsRef<str>>(u: U) -> Result<(Credentials, Region, String)> {
|
||||
let url = Url::parse(u.as_ref())?;
|
||||
|
||||
let access_key = url.username().to_string();
|
||||
let access_secret = url.password().map(|s| s.to_owned());
|
||||
|
||||
let host = url.host_str().context("host not found")?;
|
||||
let port = url.port().context("port not found")?;
|
||||
let scheme = match url.scheme() {
|
||||
"s3" => "http://",
|
||||
"s3+tls" | "s3s" => "https://",
|
||||
_ => return Err(Error::Other(anyhow::Error::msg("invalid scheme"))),
|
||||
};
|
||||
|
||||
let endpoint = format!("{}{}:{}", scheme, host, port);
|
||||
|
||||
let bucket_name = url.path().trim_start_matches('/').to_string();
|
||||
|
||||
let region_name = url
|
||||
.query_pairs()
|
||||
.find(|(key, _)| key == "region")
|
||||
.map(|(_, value)| value.to_string())
|
||||
.unwrap_or_default();
|
||||
|
||||
Ok((
|
||||
Credentials {
|
||||
access_key: Some(access_key),
|
||||
secret_key: access_secret,
|
||||
security_token: None,
|
||||
session_token: None,
|
||||
expiration: None,
|
||||
},
|
||||
Region::Custom {
|
||||
region: region_name,
|
||||
endpoint,
|
||||
},
|
||||
bucket_name,
|
||||
))
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct S3Store {
|
||||
bucket: Bucket,
|
||||
url: String,
|
||||
// this is only here as a work around for this bug https://github.com/durch/rust-s3/issues/337
|
||||
// because rfs uses the store in async (and parallel) matter to upload/download blobs
|
||||
// we need to synchronize this locally in that store which will hurt performance
|
||||
// the 2 solutions now is to either wait until this bug is fixed, or switch to another client
|
||||
// but for now we keep this work around
|
||||
}
|
||||
|
||||
impl S3Store {
|
||||
pub async fn make<U: AsRef<str>>(url: &U) -> Result<S3Store> {
|
||||
let (cred, region, bucket_name) = get_config(url.as_ref())?;
|
||||
Ok(S3Store::new(url.as_ref(), &bucket_name, region, cred)?)
|
||||
}
|
||||
pub fn new(url: &str, bucket_name: &str, region: Region, cred: Credentials) -> Result<Self> {
|
||||
let bucket = Bucket::new(bucket_name, region, cred)
|
||||
.context("failed instantiate bucket")?
|
||||
.with_path_style();
|
||||
|
||||
Ok(Self {
|
||||
bucket,
|
||||
url: url.to_owned(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Store for S3Store {
|
||||
async fn get(&self, key: &[u8]) -> super::Result<Vec<u8>> {
|
||||
match self.bucket.get_object(hex::encode(key)).await {
|
||||
Ok(res) => Ok(res.to_vec()),
|
||||
Err(S3Error::HttpFailWithBody(404, _)) => Err(Error::KeyNotFound),
|
||||
Err(S3Error::Io(err)) => Err(Error::IO(err)),
|
||||
Err(err) => Err(anyhow::Error::from(err).into()),
|
||||
}
|
||||
}
|
||||
|
||||
async fn set(&self, key: &[u8], blob: &[u8]) -> Result<()> {
|
||||
self.bucket
|
||||
.put_object(hex::encode(key), blob)
|
||||
.await
|
||||
.context("put object over s3 storage")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn routes(&self) -> Vec<Route> {
|
||||
vec![Route::url(self.url.clone())]
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_get_config() {
|
||||
let (cred, region, bucket_name) =
|
||||
get_config("s3s://minioadmin:minioadmin@127.0.0.1:9000/mybucket?region=minio").unwrap();
|
||||
assert_eq!(
|
||||
cred,
|
||||
Credentials {
|
||||
access_key: Some("minioadmin".to_string()),
|
||||
secret_key: Some("minioadmin".to_string()),
|
||||
security_token: None,
|
||||
session_token: None,
|
||||
expiration: None,
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
region,
|
||||
Region::Custom {
|
||||
region: "minio".to_string(),
|
||||
endpoint: "https://127.0.0.1:9000".to_string()
|
||||
}
|
||||
);
|
||||
assert_eq!(bucket_name, "mybucket".to_string())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_config_without_tls() {
|
||||
let (cred, region, bucket_name) =
|
||||
get_config("s3://minioadmin:minioadmin@127.0.0.1:9000/mybucket?region=minio").unwrap();
|
||||
assert_eq!(
|
||||
cred,
|
||||
Credentials {
|
||||
access_key: Some("minioadmin".to_string()),
|
||||
secret_key: Some("minioadmin".to_string()),
|
||||
security_token: None,
|
||||
session_token: None,
|
||||
expiration: None,
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
region,
|
||||
Region::Custom {
|
||||
region: "minio".to_string(),
|
||||
endpoint: "http://127.0.0.1:9000".to_string()
|
||||
}
|
||||
);
|
||||
assert_eq!(bucket_name, "mybucket".to_string())
|
||||
}
|
||||
|
||||
#[ignore]
|
||||
#[tokio::test]
|
||||
async fn test_set_get() {
|
||||
let url = "s3://minioadmin:minioadmin@127.0.0.1:9000/mybucket?region=minio";
|
||||
let (cred, region, bucket_name) = get_config(url).unwrap();
|
||||
|
||||
let store = S3Store::new(url, &bucket_name, region, cred);
|
||||
let store = store.unwrap();
|
||||
|
||||
let key = b"test.txt";
|
||||
let blob = b"# Hello, World!";
|
||||
|
||||
_ = store.set(key, blob).await;
|
||||
|
||||
let get_res = store.get(key).await;
|
||||
let get_res = get_res.unwrap();
|
||||
|
||||
assert_eq!(get_res, blob)
|
||||
}
|
||||
|
||||
#[ignore]
|
||||
#[tokio::test]
|
||||
async fn test_set_get_without_region() {
|
||||
let url = "s3://minioadmin:minioadmin@127.0.0.1:9000/mybucket";
|
||||
let (cred, region, bucket_name) = get_config(url).unwrap();
|
||||
|
||||
let store = S3Store::new(url, &bucket_name, region, cred);
|
||||
let store = store.unwrap();
|
||||
|
||||
let key = b"test2.txt";
|
||||
let blob = b"# Hello, World!";
|
||||
|
||||
_ = store.set(key, blob).await;
|
||||
|
||||
let get_res = store.get(key).await;
|
||||
let get_res = get_res.unwrap();
|
||||
|
||||
assert_eq!(get_res, blob)
|
||||
}
|
||||
}
|
||||
176
rfs/src/store/zdb.rs
Normal file
176
rfs/src/store/zdb.rs
Normal file
@@ -0,0 +1,176 @@
|
||||
use super::{Error, Result, Route, Store};
|
||||
use anyhow::Context;
|
||||
|
||||
use bb8_redis::{
|
||||
bb8::{CustomizeConnection, Pool},
|
||||
redis::{
|
||||
aio::Connection, cmd, AsyncCommands, ConnectionAddr, ConnectionInfo, RedisConnectionInfo,
|
||||
RedisError,
|
||||
},
|
||||
RedisConnectionManager,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
struct WithNamespace {
|
||||
namespace: Option<String>,
|
||||
password: Option<String>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl CustomizeConnection<Connection, RedisError> for WithNamespace {
|
||||
async fn on_acquire(&self, connection: &mut Connection) -> anyhow::Result<(), RedisError> {
|
||||
match self.namespace {
|
||||
Some(ref ns) if ns != "default" => {
|
||||
let mut c = cmd("SELECT");
|
||||
let c = c.arg(ns);
|
||||
if let Some(ref password) = self.password {
|
||||
c.arg(password);
|
||||
}
|
||||
|
||||
let result = c.query_async(connection).await;
|
||||
if let Err(ref err) = result {
|
||||
error!("failed to switch namespace to {}: {}", ns, err);
|
||||
}
|
||||
result
|
||||
}
|
||||
_ => Ok(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ZdbStoreFactory;
|
||||
|
||||
fn get_connection_info<U: AsRef<str>>(u: U) -> Result<(ConnectionInfo, Option<String>)> {
|
||||
let u = url::Url::parse(u.as_ref())?;
|
||||
|
||||
let (address, namespace) = match u.host() {
|
||||
Some(host) => {
|
||||
let addr = match host {
|
||||
url::Host::Domain(domain) => domain.to_owned(),
|
||||
url::Host::Ipv4(ipv4) => ipv4.to_string(),
|
||||
url::Host::Ipv6(ipv6) => ipv6.to_string(),
|
||||
};
|
||||
|
||||
let addr = ConnectionAddr::Tcp(addr, u.port().unwrap_or(9900));
|
||||
let ns: Option<String> = u
|
||||
.path_segments()
|
||||
.and_then(|s| s.last().map(|s| s.to_owned()));
|
||||
(addr, ns)
|
||||
}
|
||||
None => (ConnectionAddr::Unix(u.path().into()), None),
|
||||
};
|
||||
|
||||
Ok((
|
||||
ConnectionInfo {
|
||||
addr: address,
|
||||
redis: RedisConnectionInfo {
|
||||
db: 0,
|
||||
username: if u.username().is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(u.username().into())
|
||||
},
|
||||
password: u.password().map(|s| s.into()),
|
||||
},
|
||||
},
|
||||
namespace,
|
||||
))
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct ZdbStore {
|
||||
url: String,
|
||||
pool: Pool<RedisConnectionManager>,
|
||||
}
|
||||
|
||||
impl ZdbStore {
|
||||
pub async fn make<U: AsRef<str>>(url: &U) -> Result<ZdbStore> {
|
||||
let (mut info, namespace) = get_connection_info(url.as_ref())?;
|
||||
|
||||
let namespace = WithNamespace {
|
||||
namespace,
|
||||
password: info.redis.password.take(),
|
||||
};
|
||||
|
||||
log::debug!("connection {:#?}", info);
|
||||
log::debug!("switching namespace to: {:?}", namespace.namespace);
|
||||
|
||||
let mgr = RedisConnectionManager::new(info)
|
||||
.context("failed to create redis connection manager")?;
|
||||
|
||||
let pool = Pool::builder()
|
||||
.max_size(20)
|
||||
.connection_customizer(Box::new(namespace))
|
||||
.build(mgr)
|
||||
.await
|
||||
.context("failed to create connection pool")?;
|
||||
|
||||
Ok(ZdbStore {
|
||||
url: url.as_ref().to_string(),
|
||||
pool,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Store for ZdbStore {
|
||||
async fn get(&self, key: &[u8]) -> super::Result<Vec<u8>> {
|
||||
let mut con = self.pool.get().await.context("failed to get connection")?;
|
||||
|
||||
let result: Option<Vec<u8>> = con.get(key).await.context("failed to get blob")?;
|
||||
let result = result.ok_or(Error::KeyNotFound)?;
|
||||
|
||||
if result.is_empty() {
|
||||
return Err(Error::InvalidBlob);
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
async fn set(&self, key: &[u8], blob: &[u8]) -> Result<()> {
|
||||
let mut con = self.pool.get().await.context("failed to get connection")?;
|
||||
|
||||
if con
|
||||
.exists(key)
|
||||
.await
|
||||
.context("failed to check if blob exists")?
|
||||
{
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
con.set(key, blob).await.context("failed to set blob")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn routes(&self) -> Vec<Route> {
|
||||
vec![Route::url(self.url.clone())]
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_connection_info_simple() {
|
||||
let (info, ns) = get_connection_info("zdb://hub.grid.tf:9900").unwrap();
|
||||
assert_eq!(ns, None);
|
||||
assert_eq!(info.addr, ConnectionAddr::Tcp("hub.grid.tf".into(), 9900));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_connection_info_ns() {
|
||||
let (info, ns) = get_connection_info("zdb://username@hub.grid.tf/custom").unwrap();
|
||||
assert_eq!(ns, Some("custom".into()));
|
||||
assert_eq!(info.addr, ConnectionAddr::Tcp("hub.grid.tf".into(), 9900));
|
||||
assert_eq!(info.redis.username, Some("username".into()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_connection_info_unix() {
|
||||
let (info, ns) = get_connection_info("zdb:///path/to/socket").unwrap();
|
||||
assert_eq!(ns, None);
|
||||
assert_eq!(info.addr, ConnectionAddr::Unix("/path/to/socket".into()));
|
||||
}
|
||||
}
|
||||
184
rfs/src/unpack.rs
Normal file
184
rfs/src/unpack.rs
Normal file
@@ -0,0 +1,184 @@
|
||||
use crate::cache::Cache;
|
||||
use crate::fungi::{
|
||||
meta::{FileType, Inode, Result, Walk, WalkVisitor},
|
||||
Reader,
|
||||
};
|
||||
use crate::store::Store;
|
||||
use anyhow::Context;
|
||||
use nix::unistd::{fchownat, FchownatFlags, Gid, Uid};
|
||||
use std::fs::Permissions;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
use std::path::Path;
|
||||
use std::{ffi::OsStr, fs};
|
||||
use tokio::fs::OpenOptions;
|
||||
|
||||
/// unpack an FL to the given root location. it will download the files and reconstruct
|
||||
/// the filesystem.
|
||||
pub async fn unpack<P: AsRef<Path>, S: Store>(
|
||||
meta: &Reader,
|
||||
cache: &Cache<S>,
|
||||
root: P,
|
||||
preserve: bool,
|
||||
) -> Result<()> {
|
||||
let mut visitor = CopyVisitor::new(meta, cache, root.as_ref(), preserve);
|
||||
|
||||
meta.walk(&mut visitor).await
|
||||
}
|
||||
|
||||
struct CopyVisitor<'a, S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
preserve: bool,
|
||||
meta: &'a Reader,
|
||||
cache: &'a Cache<S>,
|
||||
root: &'a Path,
|
||||
}
|
||||
|
||||
impl<'a, S> CopyVisitor<'a, S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
pub fn new(meta: &'a Reader, cache: &'a Cache<S>, root: &'a Path, preserve: bool) -> Self {
|
||||
Self {
|
||||
meta,
|
||||
cache,
|
||||
root,
|
||||
preserve,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<'a, S> WalkVisitor for CopyVisitor<'a, S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
async fn visit(&mut self, path: &Path, node: &Inode) -> Result<Walk> {
|
||||
let rooted = self.root.join(path.strip_prefix("/").unwrap());
|
||||
|
||||
match node.mode.file_type() {
|
||||
FileType::Dir => {
|
||||
fs::create_dir_all(&rooted)
|
||||
.with_context(|| format!("failed to create directory '{:?}'", rooted))?;
|
||||
}
|
||||
FileType::Regular => {
|
||||
let mut fd = OpenOptions::new()
|
||||
.create_new(true)
|
||||
.write(true)
|
||||
.truncate(true)
|
||||
.open(&rooted)
|
||||
.await
|
||||
.with_context(|| format!("failed to create file '{:?}'", rooted))?;
|
||||
|
||||
let blocks = self.meta.blocks(node.ino).await?;
|
||||
self.cache
|
||||
.direct(&blocks, &mut fd)
|
||||
.await
|
||||
.with_context(|| format!("failed to download file '{:?}'", rooted))?;
|
||||
|
||||
fd.set_permissions(Permissions::from_mode(node.mode.mode()))
|
||||
.await?;
|
||||
}
|
||||
FileType::Link => {
|
||||
let target = node
|
||||
.data
|
||||
.as_deref()
|
||||
.ok_or_else(|| anyhow::anyhow!("link has no target path"))?;
|
||||
|
||||
let target = Path::new(OsStr::from_bytes(target));
|
||||
let target = if target.is_relative() {
|
||||
target.to_owned()
|
||||
} else {
|
||||
self.root.join(target)
|
||||
};
|
||||
|
||||
std::os::unix::fs::symlink(target, &rooted)
|
||||
.with_context(|| format!("failed to create symlink '{:?}'", rooted))?;
|
||||
}
|
||||
_ => {
|
||||
warn!("unknown file kind: {:?}", node.mode.file_type());
|
||||
return Ok(Walk::Continue);
|
||||
}
|
||||
};
|
||||
|
||||
if self.preserve {
|
||||
fchownat(
|
||||
None,
|
||||
&rooted,
|
||||
Some(Uid::from_raw(node.uid)),
|
||||
Some(Gid::from_raw(node.gid)),
|
||||
FchownatFlags::NoFollowSymlink,
|
||||
)
|
||||
.with_context(|| format!("failed to change ownership of '{:?}'", &rooted))?;
|
||||
}
|
||||
|
||||
Ok(Walk::Continue)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
TODO: parallel download ?
|
||||
|
||||
this is a download worker that can be used in a worker pool to download files
|
||||
in parallel
|
||||
|
||||
struct Downloader<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
cache: Arc<Cache<S>>,
|
||||
}
|
||||
|
||||
impl<S> Downloader<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
async fn download(&self, path: &Path, blocks: &[Block], mode: u32) -> Result<()> {
|
||||
let mut fd = OpenOptions::new()
|
||||
.create_new(true)
|
||||
.write(true)
|
||||
.truncate(true)
|
||||
.open(&path)
|
||||
.await
|
||||
.with_context(|| format!("failed to create file '{:?}'", path))?;
|
||||
|
||||
self.cache
|
||||
.direct(&blocks, &mut fd)
|
||||
.await
|
||||
.with_context(|| format!("failed to download file '{:?}'", path))?;
|
||||
|
||||
fd.set_permissions(Permissions::from_mode(mode)).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Clone for Downloader<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
cache: Arc::clone(&self.cache),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<S> workers::Work for Downloader<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
type Input = (PathBuf, Vec<Block>, Mode);
|
||||
type Output = ();
|
||||
|
||||
async fn run(&mut self, (path, blocks, mode): Self::Input) -> Self::Output {
|
||||
if let Err(err) = self.download(&path, &blocks, mode.mode()).await {
|
||||
log::error!("failed to download file {:?}: {}", path, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
*/
|
||||
Reference in New Issue
Block a user