feat: Create minimal Zero-OS initramfs with console support
- Fixed build system to clone source repositories instead of downloading binaries - Enhanced scripts/fetch-github.sh with proper git repo cloning and branch handling - Updated scripts/compile-components.sh for RFS compilation with build-binary feature - Added minimal firmware installation for essential network drivers (73 modules) - Created comprehensive zinit configuration set (15 config files including getty) - Added util-linux package for getty/agetty console support - Optimized package selection for minimal 27MB initramfs footprint - Successfully builds bootable vmlinuz.efi with embedded initramfs - Confirmed working: VM boot, console login, network drivers, zinit init system Components: - initramfs.cpio.xz: 27MB compressed minimal Zero-OS image - vmlinuz.efi: 35MB bootable kernel with embedded initramfs - Complete Zero-OS toolchain: zinit, rfs, mycelium compiled from source
This commit is contained in:
55
Cargo.toml
55
Cargo.toml
@@ -1,55 +0,0 @@
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = [
|
||||
"components/zinit",
|
||||
"components/mycelium",
|
||||
"components/rfs",
|
||||
]
|
||||
|
||||
[workspace.dependencies]
|
||||
# Common dependencies used across Zero-OS components
|
||||
tokio = { version = "1.0", features = ["full"] }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
clap = { version = "4.0", features = ["derive"] }
|
||||
anyhow = "1.0"
|
||||
thiserror = "1.0"
|
||||
log = "0.4"
|
||||
env_logger = "0.10"
|
||||
futures = "0.3"
|
||||
async-trait = "0.1"
|
||||
|
||||
# Networking
|
||||
reqwest = { version = "0.11", features = ["json"] }
|
||||
hyper = { version = "0.14", features = ["full"] }
|
||||
tonic = { version = "0.10", features = ["default"] }
|
||||
|
||||
# Crypto and encoding
|
||||
base64 = "0.21"
|
||||
hex = "0.4"
|
||||
sha2 = "0.10"
|
||||
rand = "0.8"
|
||||
|
||||
# System and filesystem
|
||||
nix = "0.27"
|
||||
libc = "0.2"
|
||||
tempfile = "3.0"
|
||||
|
||||
# Async runtime and utilities
|
||||
parking_lot = "0.12"
|
||||
dashmap = "5.0"
|
||||
uuid = { version = "1.0", features = ["v4"] }
|
||||
|
||||
[profile.release]
|
||||
# Optimize for size and performance
|
||||
opt-level = "z" # Optimize for size
|
||||
lto = true # Link-time optimization
|
||||
codegen-units = 1 # Better optimization
|
||||
panic = "abort" # Smaller binaries
|
||||
strip = true # Remove symbols
|
||||
|
||||
[profile.dev]
|
||||
# Faster debug builds
|
||||
opt-level = 1 # Some optimization for faster debug builds
|
||||
debug = true # Debug info
|
||||
split-debuginfo = "unpacked" # Faster linking on macOS/Windows
|
||||
45
README.md
45
README.md
@@ -6,8 +6,8 @@ Modern Alpine Linux based approach to building Zero-OS initramfs with source com
|
||||
|
||||
This system uses:
|
||||
- **Alpine Linux 3.22** as base system with latest packages
|
||||
- **Rust workspace** with git subtrees for Zero-OS Rust components (zinit, mycelium, rfs)
|
||||
- **Unified compilation** via `cargo build --workspace` for all Rust components
|
||||
- **Individual Rust workspaces** with git subtrees for Zero-OS components (zinit, mycelium, rfs)
|
||||
- **Component-based compilation** - each component built in its own workspace
|
||||
- **Alpine packages** for system tools (busybox, openssh, btrfs-progs, etc.)
|
||||
- **Smart caching** with Docker layers and volumes
|
||||
- **Same init flow** as original Zero-OS
|
||||
@@ -16,15 +16,14 @@ This system uses:
|
||||
|
||||
```
|
||||
alpine-initramfs/
|
||||
├── Cargo.toml # Rust workspace configuration
|
||||
├── build/ # Build orchestration
|
||||
│ ├── Dockerfile.alpine # Alpine build environment
|
||||
│ ├── Dockerfile.cached # Multi-stage cached build
|
||||
│ └── docker-compose.yml # Build orchestration with caching
|
||||
├── components/ # Git subtrees (Rust workspace members)
|
||||
│ ├── zinit/ # Init system (Rust subtree)
|
||||
│ ├── mycelium/ # Networking layer (Rust subtree)
|
||||
│ └── rfs/ # Filesystem (Rust subtree)
|
||||
├── components/ # Git subtrees (individual workspaces)
|
||||
│ ├── zinit/ # Init system (Rust subtree with workspace)
|
||||
│ ├── mycelium/ # Networking layer (Rust subtree with workspace)
|
||||
│ └── rfs/ # Filesystem (Rust subtree with workspace)
|
||||
├── configs/ # Configuration files
|
||||
│ ├── packages-alpine.txt # Alpine packages to install
|
||||
│ ├── kernel-version # Kernel version to build
|
||||
@@ -54,7 +53,7 @@ alpine-initramfs/
|
||||
## Build Flow
|
||||
|
||||
1. **Environment Setup**: Alpine Docker container with Rust and build tools
|
||||
2. **Workspace Compilation**: Single `cargo build --workspace` for all Rust components
|
||||
2. **Component Compilation**: Individual cargo builds for each Rust component
|
||||
3. **Binary Downloads**: Download CoreX (Go binary) from GitHub releases
|
||||
4. **Package Installation**: Install Alpine packages to initramfs root
|
||||
5. **Initramfs Assembly**: Create filesystem structure with compiled binaries
|
||||
@@ -79,13 +78,13 @@ cd build/
|
||||
# Interactive development shell
|
||||
docker-compose run dev-shell
|
||||
|
||||
# Build entire workspace
|
||||
cargo build --workspace --release --target x86_64-unknown-linux-musl
|
||||
# Build all components (automated)
|
||||
../scripts/compile-components.sh
|
||||
|
||||
# Build specific component
|
||||
cargo build -p zinit --release --target x86_64-unknown-linux-musl
|
||||
cargo build -p mycelium --release --target x86_64-unknown-linux-musl
|
||||
cargo build -p rfs --release --target x86_64-unknown-linux-musl
|
||||
# Build individual components manually:
|
||||
cd ../components/zinit && cargo build --release --target x86_64-unknown-linux-musl
|
||||
cd ../components/mycelium && cargo build --release --target x86_64-unknown-linux-musl
|
||||
cd ../components/rfs && cargo build --release --target x86_64-unknown-linux-musl
|
||||
```
|
||||
|
||||
### Smart Build (with caching)
|
||||
@@ -96,10 +95,10 @@ docker-compose run builder # Uses build-smart.sh automatically
|
||||
|
||||
## Key Features
|
||||
|
||||
- ✅ **Rust workspace** - Unified build system for all Rust components
|
||||
- ✅ **Component workspaces** - Each component maintains its own workspace
|
||||
- ✅ **Git subtrees** - Source code included, no submodule complexity
|
||||
- ✅ **Single build command** - `cargo build --workspace` builds everything
|
||||
- ✅ **Shared dependencies** - Consistent versions across all components
|
||||
- ✅ **Automated build** - `scripts/compile-components.sh` builds all components
|
||||
- ✅ **Shared target directory** - Efficient build artifact sharing
|
||||
- ✅ **Smart caching** - Docker layer and volume caching for fast rebuilds
|
||||
- ✅ **Alpine packages** - System tools from Alpine repositories
|
||||
- ✅ **Development mode** - Full IDE support and integrated tooling
|
||||
@@ -112,13 +111,13 @@ docker-compose run builder # Uses build-smart.sh automatically
|
||||
cd build/
|
||||
docker-compose run dev-shell
|
||||
|
||||
# Inside container - workspace build:
|
||||
cargo build --workspace --release --target x86_64-unknown-linux-musl
|
||||
# Inside container - build all components:
|
||||
../scripts/compile-components.sh
|
||||
|
||||
# Or build specific packages:
|
||||
cargo build -p zinit --release --target x86_64-unknown-linux-musl
|
||||
cargo build -p mycelium --release --target x86_64-unknown-linux-musl
|
||||
cargo build -p rfs --release --target x86_64-unknown-linux-musl
|
||||
# Or build specific components manually:
|
||||
cd ../components/zinit && cargo build --release --target x86_64-unknown-linux-musl
|
||||
cd ../components/mycelium/myceliumd && cargo build --release --target x86_64-unknown-linux-musl
|
||||
cd ../components/rfs && cargo build --release --target x86_64-unknown-linux-musl
|
||||
```
|
||||
|
||||
### Cleanup
|
||||
|
||||
14
components/rfs/.github/workflows/release.yaml
vendored
14
components/rfs/.github/workflows/release.yaml
vendored
@@ -31,9 +31,6 @@ jobs:
|
||||
- name: Strip
|
||||
run: |
|
||||
strip target/x86_64-unknown-linux-musl/release/rfs
|
||||
- name: Strip
|
||||
run: |
|
||||
strip target/x86_64-unknown-linux-musl/release/docker2fl
|
||||
- name: Create Release
|
||||
id: create_release
|
||||
uses: actions/create-release@v1
|
||||
@@ -54,14 +51,3 @@ jobs:
|
||||
asset_path: target/x86_64-unknown-linux-musl/release/rfs
|
||||
asset_name: rfs
|
||||
asset_content_type: application/x-pie-executable
|
||||
|
||||
- name: Upload Release Asset for docker2fl
|
||||
id: upload-release-asset-docker2fl
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: target/x86_64-unknown-linux-musl/release/docker2fl
|
||||
asset_name: docker2fl
|
||||
asset_content_type: application/x-pie-executable
|
||||
|
||||
6
components/rfs/.gitignore
vendored
6
components/rfs/.gitignore
vendored
@@ -2,5 +2,7 @@
|
||||
/tests/*.flist.d
|
||||
result
|
||||
.direnv/
|
||||
fl-server/flists
|
||||
fl-server/config.toml
|
||||
flists
|
||||
config.toml
|
||||
*.db
|
||||
storage/
|
||||
|
||||
96
components/rfs/Cargo.lock
generated
96
components/rfs/Cargo.lock
generated
@@ -931,29 +931,6 @@ version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10"
|
||||
|
||||
[[package]]
|
||||
name = "docker2fl"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bollard",
|
||||
"clap",
|
||||
"futures-util",
|
||||
"git-version",
|
||||
"log",
|
||||
"regex",
|
||||
"rfs",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"simple_logger",
|
||||
"tempdir",
|
||||
"tokio",
|
||||
"tokio-async-drop",
|
||||
"toml",
|
||||
"uuid",
|
||||
"walkdir",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dotenvy"
|
||||
version = "0.15.7"
|
||||
@@ -1023,49 +1000,6 @@ version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8fcfdc7a0362c9f4444381a9e697c79d435fe65b52a37466fc2c1184cee9edc6"
|
||||
|
||||
[[package]]
|
||||
name = "fl-server"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"askama",
|
||||
"async-trait",
|
||||
"axum",
|
||||
"axum-macros",
|
||||
"bollard",
|
||||
"chrono",
|
||||
"clap",
|
||||
"docker2fl",
|
||||
"futures-util",
|
||||
"git-version",
|
||||
"hostname-validator",
|
||||
"hyper 1.4.1",
|
||||
"jsonwebtoken",
|
||||
"log",
|
||||
"mime",
|
||||
"mime_guess",
|
||||
"percent-encoding",
|
||||
"regex",
|
||||
"rfs",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha256",
|
||||
"simple_logger",
|
||||
"tempdir",
|
||||
"thiserror",
|
||||
"time",
|
||||
"tokio",
|
||||
"tokio-async-drop",
|
||||
"toml",
|
||||
"tower",
|
||||
"tower-http",
|
||||
"tracing",
|
||||
"utoipa",
|
||||
"utoipa-swagger-ui",
|
||||
"uuid",
|
||||
"walkdir",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "flate2"
|
||||
version = "1.0.30"
|
||||
@@ -1755,9 +1689,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.153"
|
||||
version = "0.2.172"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd"
|
||||
checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa"
|
||||
|
||||
[[package]]
|
||||
name = "libm"
|
||||
@@ -2649,34 +2583,60 @@ version = "0.2.0"
|
||||
dependencies = [
|
||||
"aes-gcm",
|
||||
"anyhow",
|
||||
"askama",
|
||||
"assert_cmd",
|
||||
"async-trait",
|
||||
"axum",
|
||||
"axum-macros",
|
||||
"bb8-redis",
|
||||
"blake2b_simd",
|
||||
"bollard",
|
||||
"bytes",
|
||||
"chrono",
|
||||
"clap",
|
||||
"daemonize",
|
||||
"futures",
|
||||
"futures-util",
|
||||
"git-version",
|
||||
"hex",
|
||||
"hostname-validator",
|
||||
"hyper 1.4.1",
|
||||
"jsonwebtoken",
|
||||
"libc",
|
||||
"log",
|
||||
"lru",
|
||||
"mime",
|
||||
"mime_guess",
|
||||
"nix",
|
||||
"openssl",
|
||||
"percent-encoding",
|
||||
"polyfuse",
|
||||
"rand 0.8.5",
|
||||
"regex",
|
||||
"reqwest 0.11.27",
|
||||
"rust-s3",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha2",
|
||||
"sha256",
|
||||
"simple_logger",
|
||||
"snap",
|
||||
"sqlx",
|
||||
"tempdir",
|
||||
"tempfile",
|
||||
"thiserror",
|
||||
"time",
|
||||
"tokio",
|
||||
"tokio-async-drop",
|
||||
"toml",
|
||||
"tower",
|
||||
"tower-http",
|
||||
"tracing",
|
||||
"url",
|
||||
"utoipa",
|
||||
"utoipa-swagger-ui",
|
||||
"uuid",
|
||||
"walkdir",
|
||||
"which",
|
||||
"workers",
|
||||
]
|
||||
|
||||
@@ -1,11 +1,100 @@
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
[package]
|
||||
name = "rfs"
|
||||
version = "0.2.0"
|
||||
edition = "2018"
|
||||
|
||||
members = [
|
||||
"rfs",
|
||||
"docker2fl",
|
||||
"fl-server"
|
||||
]
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
[build-dependencies]
|
||||
git-version = "0.3.5"
|
||||
|
||||
[[bin]]
|
||||
name = "rfs"
|
||||
path = "src/main.rs"
|
||||
required-features = ["build-binary"]
|
||||
|
||||
[features]
|
||||
build-binary = [
|
||||
"dep:polyfuse",
|
||||
"dep:simple_logger",
|
||||
"dep:daemonize",
|
||||
"dep:clap",
|
||||
"parallel-download"
|
||||
]
|
||||
parallel-download = []
|
||||
|
||||
[lib]
|
||||
name = "rfs"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.44"
|
||||
time = "0.3"
|
||||
sqlx = { version = "0.7.4", features = [ "runtime-tokio-rustls", "sqlite" ] }
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
libc = "0.2"
|
||||
futures = "0.3"
|
||||
thiserror = "1.0"
|
||||
bytes = "1.1.0"
|
||||
log = "0.4"
|
||||
lru = "0.7.0"
|
||||
|
||||
snap = "1.0.5"
|
||||
bb8-redis = "0.13"
|
||||
async-trait = "0.1.53"
|
||||
url = "2.3.1"
|
||||
blake2b_simd = "1"
|
||||
aes-gcm = "0.10"
|
||||
hex = "0.4"
|
||||
rand = "0.8"
|
||||
# next are only needed for the binarys
|
||||
clap = { version = "4.2", features = ["derive"], optional = true}
|
||||
simple_logger = {version = "1.0.1", optional = true}
|
||||
daemonize = { version = "0.5", optional = true }
|
||||
tempfile = { version = "3.3.0"}
|
||||
workers = { git="https://github.com/threefoldtech/tokio-worker-pool.git" }
|
||||
rust-s3 = "0.34.0-rc3"
|
||||
openssl = { version = "0.10", features = ["vendored"] }
|
||||
regex = "1.9.6"
|
||||
which = "6.0"
|
||||
reqwest = { version = "0.11", features = ["json"] }
|
||||
nix = "0.23.0"
|
||||
# Docker functionality dependencies
|
||||
bollard = "0.15.0"
|
||||
sha2 = "0.10"
|
||||
futures-util = "0.3"
|
||||
uuid = { version = "1.3.1", features = ["v4"] }
|
||||
tempdir = "0.3"
|
||||
serde_json = "1.0"
|
||||
toml = "0.4.2"
|
||||
serde = { version = "1.0.159" , features = ["derive"] }
|
||||
tokio-async-drop = "0.1.0"
|
||||
walkdir = "2.5.0"
|
||||
|
||||
axum = "0.7"
|
||||
axum-macros = "0.4.1"
|
||||
tower = { version = "0.4", features = ["util", "timeout", "load-shed", "limit"] }
|
||||
tower-http = { version = "0.5.2", features = ["fs", "cors", "add-extension", "auth", "compression-full", "trace", "limit"] }
|
||||
mime_guess = "2.0.5"
|
||||
mime = "0.3.17"
|
||||
percent-encoding = "2.3.1"
|
||||
tracing = "0.1.40"
|
||||
askama = "0.12.1"
|
||||
hyper = { version = "1.4.0", features = ["full"] }
|
||||
chrono = "0.4.38"
|
||||
jsonwebtoken = "9.3.0"
|
||||
utoipa = { version = "4", features = ["axum_extras"] }
|
||||
utoipa-swagger-ui = { version = "7", features = ["axum"] }
|
||||
hostname-validator = "1.1.1"
|
||||
sha256 = "1.5.0"
|
||||
|
||||
[dependencies.polyfuse]
|
||||
branch = "master"
|
||||
git = "https://github.com/muhamadazmy/polyfuse"
|
||||
optional = true
|
||||
|
||||
[dev-dependencies]
|
||||
reqwest = { version = "0.11", features = ["blocking"] }
|
||||
assert_cmd = "2.0"
|
||||
|
||||
[profile.release]
|
||||
lto = true
|
||||
|
||||
@@ -2,22 +2,20 @@ FROM rust:slim as builder
|
||||
|
||||
WORKDIR /src
|
||||
|
||||
COPY fl-server /src/fl-server
|
||||
COPY rfs /src/rfs
|
||||
COPY docker2fl /src/docker2fl
|
||||
COPY Cargo.toml .
|
||||
COPY Cargo.lock .
|
||||
COPY config.toml .
|
||||
|
||||
RUN apt-get update && apt-get install curl build-essential libssl-dev musl-tools -y
|
||||
RUN rustup target add x86_64-unknown-linux-musl
|
||||
RUN cargo build --release --bin fl-server --target=x86_64-unknown-linux-musl
|
||||
RUN cargo build --release --target=x86_64-unknown-linux-musl
|
||||
|
||||
FROM alpine:3.19
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY --from=builder /src/target/x86_64-unknown-linux-musl/release/fl-server .
|
||||
COPY --from=builder /src/target/x86_64-unknown-linux-musl/release/rfs .
|
||||
COPY --from=builder /src/config.toml .
|
||||
|
||||
ENTRYPOINT [ "./fl-server", "--config-path", "config.toml"]
|
||||
ENTRYPOINT [ "./rfs", "server", "--config-path", "config.toml"]
|
||||
|
||||
@@ -4,40 +4,213 @@ This repo contains the binaries related to rfs.
|
||||
|
||||
[](https://github.com/threefoldtech/rfs/actions/workflows/tests.yaml)
|
||||
|
||||
## Introduction
|
||||
# Introduction
|
||||
|
||||
`rfs` is the main tool to create, mount and extract FungiStore lists (FungiList)`fl` for short. An `fl` is a simple format
|
||||
to keep information about an entire filesystem in a compact form. It does not hold the data itself but enough information to
|
||||
retrieve this data back from a `store`.
|
||||
|
||||
## Build
|
||||
## Building rfs
|
||||
|
||||
Make sure you have rust installed then run the following commands:
|
||||
To build rfs make sure you have rust installed then run the following commands:
|
||||
|
||||
```bash
|
||||
# this is needed to be run once to make sure the musl target is installed
|
||||
rustup target add x86_64-unknown-linux-musl
|
||||
|
||||
# build all binaries
|
||||
# build the binary
|
||||
cargo build --features build-binary --release --target=x86_64-unknown-linux-musl
|
||||
```
|
||||
|
||||
The rfs binary will be available under `./target/x86_64-unknown-linux-musl/release/rfs`
|
||||
the binary will be available under `./target/x86_64-unknown-linux-musl/release/rfs` you can copy that binary then to `/usr/bin/`
|
||||
to be able to use from anywhere on your system.
|
||||
|
||||
The docker2fl binary will be available under `./target/x86_64-unknown-linux-musl/release/docker2fl`
|
||||
## Stores
|
||||
|
||||
you can copy the binaries then to `/usr/bin/` to be able to use from anywhere on your system.
|
||||
A store in where the actual data lives. A store can be as simple as a `directory` on your local machine in that case the files on the `fl` are only 'accessible' on your local machine. A store can also be a `zdb` running remotely or a cluster of `zdb`. Right now only `dir`, `http`, `zdb` and `s3` stores are supported but this will change in the future to support even more stores.
|
||||
|
||||
## Binaries and libraries
|
||||
## Usage
|
||||
|
||||
- [rfs](./rfs/README.md)
|
||||
- [docker2fl](./docker2fl/README.md)
|
||||
- [fl-server](./fl-server/README.md)
|
||||
- [fl-frontend](./frontend/README.md)
|
||||
### Creating an `fl`
|
||||
|
||||
## Flist-Server
|
||||
```bash
|
||||
rfs pack -m output.fl -s <store-specs> <directory>
|
||||
```
|
||||
|
||||
- Dockerfile for the backend: https://github.com/threefoldtech/rfs/blob/master/Dockerfile
|
||||
- backend config: https://github.com/threefoldtech/rfs/blob/master/fl-server/README.md
|
||||
- Dockerfile for the frontend: https://github.com/threefoldtech/rfs/blob/master/frontend/Dockerfile
|
||||
This tells rfs to create an `fl` named `output.fl` using the store defined by the url `<store-specs>` and upload all the files under directory recursively.
|
||||
|
||||
The simplest form of `<store-specs>` is a `url`. the store `url` defines the store to use. Any `url`` has a schema that defines the store type. Right now we have support only for:
|
||||
|
||||
- `dir`: dir is a very simple store that is mostly used for testing. A dir store will store the fs blobs in another location defined by the url path. An example of a valid dir url is `dir:///tmp/store`
|
||||
- `zdb`: [zdb](https://github.com/threefoldtech/0-db) is a append-only key value store and provides a redis like API. An example zdb url can be something like `zdb://<hostname>[:port][/namespace]`
|
||||
- `s3`: aws-s3 is used for storing and retrieving large amounts of data (blobs) in buckets (directories). An example `s3://<username>:<password>@<host>:<port>/<bucket-name>`
|
||||
|
||||
`region` is an optional param for s3 stores, if you want to provide one you can add it as a query to the url `?region=<region-name>`
|
||||
- `http`: http is a store mostly used for wrapping a dir store to fetch data through http requests. It does not support uploading, just fetching the data.
|
||||
It can be set in the FL file as the store to fetch the data with `rfs config`. Example: `http://localhost:9000/store` (https works too).
|
||||
|
||||
`<store-specs>` can also be of the form `<start>-<end>=<url>` where `start` and `end` are a hex bytes for partitioning of blob keys. rfs will then store a set of blobs on the defined store if they blob key falls in the `[start:end]` range (inclusive).
|
||||
|
||||
If the `start-end` range is not provided a `00-FF` range is assume basically a catch all range for the blob keys. In other words, all blobs will be written to that store.
|
||||
|
||||
This is only useful because `rfs` can accept multiple stores on the command line with different and/or overlapping ranges.
|
||||
|
||||
For example `-s 00-80=dir:///tmp/store0 -s 81-ff=dir:///tmp/store1` means all keys that has prefix byte in range `[00-80]` will be written to /tmp/store0 all other keys `[81-ff]` will be written to store1.
|
||||
|
||||
The same range can appear multiple times, which means the blob will be replicated to all the stores that matches its key prefix.
|
||||
|
||||
To quickly test this operation
|
||||
|
||||
```bash
|
||||
rfs pack -m output.fl -s 00-80=dir:///tmp/store0 -s 81-ff=dir:///tmp/store1 ~/Documents
|
||||
```
|
||||
|
||||
this command will effectively create the `output.fl` and store (and shard) the blobs across the 2 locations /tmp/store0 and /tmp/store1.
|
||||
|
||||
```bash
|
||||
#rfs pack --help
|
||||
|
||||
create an FL and upload blocks to provided storage
|
||||
|
||||
Usage: rfs pack [OPTIONS] --meta <META> <TARGET>
|
||||
|
||||
Arguments:
|
||||
<TARGET> target directory to upload
|
||||
|
||||
Options:
|
||||
-m, --meta <META> path to metadata file (flist)
|
||||
-s, --store <STORE> store url in the format [xx-xx=]<url>. the range xx-xx is optional and used for sharding. the URL is per store type, please check docs for more information
|
||||
--no-strip-password disables automatic password stripping from store url, otherwise password will be stored in the fl.
|
||||
-h, --help Print help
|
||||
```
|
||||
|
||||
#### Password stripping
|
||||
|
||||
During creation of an flist you will probably provide a password in the URL of the store. This is normally needed to allow write operation to the store (say s3 bucket)
|
||||
Normally this password is removed from the store info so it's safe to ship the fl to users. A user of the flist then will only have read access, if configured correctly
|
||||
in the store
|
||||
|
||||
For example a `zdb` store has the notion of a public namespace which is password protected for writes, but open for reads. An S3 bucket can have the policy to allow public reads, but protected writes (minio supports that via bucket settings)
|
||||
|
||||
If you wanna disable the password stripping from the store url, you can provide the `--no-strip-password` flag during creation. This also means someone can extract
|
||||
this information from the fl and gain write access to your store, so be careful how u use it.
|
||||
|
||||
# Mounting an `fl`
|
||||
|
||||
Once the `fl` is created it can be distributes to other people. Then they can mount the `fl` which will allow them then to traverse the packed filesystem and also access (read-only) the files.
|
||||
|
||||
To mount an `fl` only the `fl` is needed since all information regarding the `stores` is already stored in the `fl`. This also means you can only share the `fl` if the other user can actually reach the store used to crate the `fl`. So a `dir` store is not sharable, also a `zdb` instance that is running on localhost :no_good:
|
||||
|
||||
```bash
|
||||
sudo rfs mount -m output.fl <target>
|
||||
```
|
||||
|
||||
The `<target>` is the mount location, usually `/mnt` but can be anywhere. In another terminal you can now `cd <target>` and walk the filesystem tree. Opening the files will trigger a file download from the store only on read access.
|
||||
|
||||
full command help
|
||||
|
||||
```bash
|
||||
# rfs mount --help
|
||||
|
||||
mount an FL
|
||||
|
||||
Usage: rfs mount [OPTIONS] --meta <META> <TARGET>
|
||||
|
||||
Arguments:
|
||||
<TARGET> target mountpoint
|
||||
|
||||
Options:
|
||||
-m, --meta <META> path to metadata file (flist)
|
||||
-c, --cache <CACHE> directory used as cache for downloaded file chuncks [default: /tmp/cache]
|
||||
-d, --daemon run in the background
|
||||
-l, --log <LOG> log file only used with daemon mode
|
||||
-h, --help Print help
|
||||
```
|
||||
|
||||
# Unpack an `fl`
|
||||
|
||||
Similar to `mount` rfs provides an `unpack` subcommand that downloads the entire content (extract) of an `fl` to a provided directory.
|
||||
|
||||
```bash
|
||||
rfs unpack --help
|
||||
unpack (downloads) content of an FL the provided location
|
||||
|
||||
Usage: rfs unpack [OPTIONS] --meta <META> <TARGET>
|
||||
|
||||
Arguments:
|
||||
<TARGET> target directory to upload
|
||||
|
||||
Options:
|
||||
-m, --meta <META> path to metadata file (flist)
|
||||
-c, --cache <CACHE> directory used as cache for downloaded file chuncks [default: /tmp/cache]
|
||||
-p, --preserve-ownership preserve files ownership from the FL, otherwise use the current user ownership setting this flag to true normally requires sudo
|
||||
-h, --help Print help
|
||||
```
|
||||
|
||||
By default when unpacking the `-p` flag is not set. which means downloaded files will be `owned` by the current user/group. If `-p` flag is set, the files ownership will be same as the original files used to create the fl (preserve `uid` and `gid` of the files and directories) this normally requires `sudo` while unpacking.
|
||||
|
||||
# Server Command
|
||||
|
||||
The `rfs` tool can also be used to run the server
|
||||
|
||||
```bash
|
||||
rfs server --config-path config.toml [--debug]
|
||||
```
|
||||
|
||||
This command will start the server using the specified configuration file. The server binary must be located in the same directory as the rfs binary.
|
||||
|
||||
Options:
|
||||
|
||||
- `--config-path`, `-c`: Path to the configuration file (required)
|
||||
- `--debug`, `-d`: Enable debugging logs (can be specified multiple times for more verbose logging)
|
||||
|
||||
## Configuration
|
||||
|
||||
Before building or running the server, create `config.toml` in the current directory.
|
||||
|
||||
example `config.toml`:
|
||||
|
||||
```toml
|
||||
host="Your host to run the server on, required, example: 'localhost'"
|
||||
port="Your port to run the server on, required, example: 3000, validation: between [0, 65535]"
|
||||
store_url="List of stores to pack flists in which can be 'dir', 'zdb', 's3', required, example: ['dir:///tmp/store0']"
|
||||
flist_dir="A directory to save each user flists, required, example: 'flists'"
|
||||
|
||||
jwt_secret="secret for jwt, required, example: 'secret'"
|
||||
jwt_expire_hours="Life time for jwt token in hours, required, example: 5, validation: between [1, 24]"
|
||||
|
||||
sqlite_path="path of database file, example: 'server.db'"
|
||||
storage_dir="path of server storage directory for blocks, example: 'storage'"
|
||||
block_size="Size of blocks in bytes used for splitting files during storage operations, optional, example: 1048576 (1MB)"
|
||||
|
||||
[[users]] # list of authorized user in the server
|
||||
username = "user1"
|
||||
password = "password1"
|
||||
|
||||
[[users]]
|
||||
username = "user2"
|
||||
password = "password2"
|
||||
...
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
The project includes various tests to ensure functionality:
|
||||
|
||||
```bash
|
||||
# Run all tests
|
||||
cd tests
|
||||
make all
|
||||
|
||||
# Or run specific test types
|
||||
make unit
|
||||
make integration
|
||||
make e2e
|
||||
make performance
|
||||
```
|
||||
|
||||
See the [tests README](./tests/README.md) for more details.
|
||||
|
||||
# Specifications
|
||||
|
||||
Please check [docs](../docs)
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
[package]
|
||||
name = "docker2fl"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
[build-dependencies]
|
||||
git-version = "0.3.5"
|
||||
|
||||
[lib]
|
||||
name = "docker2fl"
|
||||
path = "src/docker2fl.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "docker2fl"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4"
|
||||
anyhow = "1.0.44"
|
||||
regex = "1.9.6"
|
||||
rfs = { path = "../rfs"}
|
||||
tokio = { version = "1", features = [ "rt", "rt-multi-thread", "macros", "signal"] }
|
||||
bollard = "0.15.0"
|
||||
futures-util = "0.3"
|
||||
simple_logger = {version = "1.0.1"}
|
||||
uuid = { version = "1.3.1", features = ["v4"] }
|
||||
tempdir = "0.3"
|
||||
serde_json = "1.0"
|
||||
toml = "0.4.2"
|
||||
clap = { version = "4.2", features = ["derive"] }
|
||||
serde = { version = "1.0.159" , features = ["derive"] }
|
||||
tokio-async-drop = "0.1.0"
|
||||
walkdir = "2.5.0"
|
||||
@@ -1,137 +0,0 @@
|
||||
# docker2fl
|
||||
|
||||
`docker2fl` is a tool to extract docker images and convert them to flist using [rfs](../rfs) tool.
|
||||
|
||||
## Build
|
||||
|
||||
To build docker2fl make sure you have rust installed then run the following commands:
|
||||
|
||||
```bash
|
||||
# this is needed to be run once to make sure the musl target is installed
|
||||
rustup target add x86_64-unknown-linux-musl
|
||||
|
||||
# build the binary
|
||||
cargo build --release --target=x86_64-unknown-linux-musl
|
||||
```
|
||||
|
||||
the binary will be available under `./target/x86_64-unknown-linux-musl/release/docker2fl` you can copy that binary then to `/usr/bin/`
|
||||
to be able to use from anywhere on your system.
|
||||
|
||||
```bash
|
||||
sudo mv ./target/x86_64-unknown-linux-musl/release/docker2fl /usr/bin/
|
||||
```
|
||||
|
||||
## Stores
|
||||
|
||||
A store in where the actual data lives. A store can be as simple as a `directory` on your local machine in that case the files on the `fl` are only 'accessible' on your local machine. A store can also be a `zdb` running remotely or a cluster of `zdb`. Right now only `dir`, `zdb` and `s3` stores are supported but this will change in the future to support even more stores.
|
||||
|
||||
## Usage
|
||||
|
||||
### Creating an `fl`
|
||||
|
||||
```bash
|
||||
docker2fl -i redis -s <store-specs>
|
||||
```
|
||||
|
||||
This tells docker2fl to create an `fl` named `redis-latest.fl` using the store defined by the url `<store-specs>` and upload all the files under the temp docker directory that include exported docker image recursively.
|
||||
|
||||
The simplest form of `<store-specs>` is a `url`. the store `url` defines the store to use. Any `url` has a schema that defines the store type. Right now we have support only for:
|
||||
|
||||
- `dir`: dir is a very simple store that is mostly used for testing. A dir store will store the fs blobs in another location defined by the url path. An example of a valid dir url is `dir:///tmp/store`
|
||||
- `zdb`: [zdb](https://github.com/threefoldtech/0-db) is a append-only key value store and provides a redis like API. An example zdb url can be something like `zdb://<hostname>[:port][/namespace]`
|
||||
- `s3`: aws-s3 is used for storing and retrieving large amounts of data (blobs) in buckets (directories). An example `s3://<username>:<password>@<host>:<port>/<bucket-name>`
|
||||
|
||||
`region` is an optional param for s3 stores, if you want to provide one you can add it as a query to the url `?region=<region-name>`
|
||||
|
||||
`<store-specs>` can also be of the form `<start>-<end>=<url>` where `start` and `end` are a hex bytes for partitioning of blob keys. rfs will then store a set of blobs on the defined store if they blob key falls in the `[start:end]` range (inclusive).
|
||||
|
||||
If the `start-end` range is not provided a `00-FF` range is assume basically a catch all range for the blob keys. In other words, all blobs will be written to that store.
|
||||
|
||||
This is only useful because `docker2fl` can accept multiple stores on the command line with different and/or overlapping ranges.
|
||||
|
||||
For example `-s 00-80=dir:///tmp/store0 -s 81-ff=dir://tmp/store1` means all keys that has prefix byte in range `[00-80]` will be written to /tmp/store0 all other keys `00-ff` will be written to store1.
|
||||
|
||||
The same range can appear multiple times, which means the blob will be replicated to all the stores that matches its key prefix.
|
||||
|
||||
To quickly test this operation
|
||||
|
||||
```bash
|
||||
docker2fl -i redis -s "dir:///tmp/store0"
|
||||
```
|
||||
|
||||
this command will use redis image and effectively create the `redis.fl` and store (and shard) the blobs across the location /tmp/store0.
|
||||
|
||||
```bash
|
||||
#docker2fl --help
|
||||
|
||||
Usage: docker2fl [OPTIONS] --image-name <IMAGE_NAME>
|
||||
Options:
|
||||
--debug...
|
||||
enable debugging logs
|
||||
-i, --image-name <IMAGE_NAME>
|
||||
name of the docker image to be converted to flist
|
||||
-s, --store <STORE>
|
||||
store url for rfs in the format [xx-xx=]<url>. the range xx-xx is optional and used for sharding. the URL is per store type, please check docs for more information
|
||||
-h, --help
|
||||
Print help
|
||||
-V, --version
|
||||
Print version
|
||||
```
|
||||
|
||||
## Generate an flist using ZDB
|
||||
|
||||
### Deploy a vm
|
||||
|
||||
1. Deploy a vm with a public IP
|
||||
2. add docker (don't forget to add a disk for it with mountpoint = "/var/lib/docker")
|
||||
3. add caddy
|
||||
|
||||
### Install zdb and run an instance of it
|
||||
|
||||
1. Execute `git clone -b development-v2 https://github.com/threefoldtech/0-db /zdb` then `cd /zdb`
|
||||
2. Build
|
||||
|
||||
```bash
|
||||
cd libzdb
|
||||
make
|
||||
cd ..
|
||||
|
||||
cd zdbd
|
||||
make STATIC=1
|
||||
cd ..
|
||||
|
||||
make
|
||||
```
|
||||
|
||||
3. Install `make install`
|
||||
4. run `zdb --listen 0.0.0.0`
|
||||
5. The result info you should know
|
||||
|
||||
```console
|
||||
zdbEndpoint = "<vm public IP>:<port>"
|
||||
zdbNameSpace = "default"
|
||||
zdbPassword = "default"
|
||||
```
|
||||
|
||||
### Install docker2fl
|
||||
|
||||
1. Execute `git clone -b development-v2 https://github.com/threefoldtech/rfs` then `cd /rfs`
|
||||
2. Execute
|
||||
|
||||
```bash
|
||||
rustup target add x86_64-unknown-linux-musl`
|
||||
cargo build --features build-binary --release --target=x86_64-unknown-linux-musl
|
||||
mv ./target/x86_64-unknown-linux-musl/release/docker2fl /usr/bin/
|
||||
```
|
||||
|
||||
### Convert docker image to an fl
|
||||
|
||||
1. Try an image for example `threefolddev/ubuntu:22.04` image
|
||||
2. Executing `docker2fl -i threefolddev/ubuntu:22.04 -s "zdb://<vm public IP>:<port>/default" -d`
|
||||
3. You will end up having `threefolddev-ubuntu-22.04.fl` (flist)
|
||||
|
||||
### Serve the flist using caddy
|
||||
|
||||
1. In the directory includes the output flist, you can run `caddy file-server --listen 0.0.0.0:2015 --browse`
|
||||
2. The flist will be available as `http://<vm public IP>:2015/threefolddev-ubuntu-22.04.fl`
|
||||
3. Use the flist to deploy any virtual machine.
|
||||
@@ -1,115 +0,0 @@
|
||||
use anyhow::Result;
|
||||
use bollard::auth::DockerCredentials;
|
||||
use clap::{ArgAction, Parser};
|
||||
use rfs::fungi;
|
||||
use rfs::store::parse_router;
|
||||
use tokio::runtime::Builder;
|
||||
use uuid::Uuid;
|
||||
|
||||
mod docker2fl;
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[clap(name ="docker2fl", author, version = env!("GIT_VERSION"), about, long_about = None)]
|
||||
struct Options {
|
||||
/// enable debugging logs
|
||||
#[clap(short, long, action=ArgAction::Count)]
|
||||
debug: u8,
|
||||
|
||||
/// store url for rfs in the format [xx-xx=]<url>. the range xx-xx is optional and used for
|
||||
/// sharding. the URL is per store type, please check docs for more information
|
||||
#[clap(short, long, required = true, action=ArgAction::Append)]
|
||||
store: Vec<String>,
|
||||
|
||||
/// name of the docker image to be converted to flist
|
||||
#[clap(short, long, required = true)]
|
||||
image_name: String,
|
||||
|
||||
// docker credentials
|
||||
/// docker hub server username
|
||||
#[clap(long, required = false)]
|
||||
username: Option<String>,
|
||||
|
||||
/// docker hub server password
|
||||
#[clap(long, required = false)]
|
||||
password: Option<String>,
|
||||
|
||||
/// docker hub server auth
|
||||
#[clap(long, required = false)]
|
||||
auth: Option<String>,
|
||||
|
||||
/// docker hub server email
|
||||
#[clap(long, required = false)]
|
||||
email: Option<String>,
|
||||
|
||||
/// docker hub server address
|
||||
#[clap(long, required = false)]
|
||||
server_address: Option<String>,
|
||||
|
||||
/// docker hub server identity token
|
||||
#[clap(long, required = false)]
|
||||
identity_token: Option<String>,
|
||||
|
||||
/// docker hub server registry token
|
||||
#[clap(long, required = false)]
|
||||
registry_token: Option<String>,
|
||||
}
|
||||
|
||||
fn main() -> Result<()> {
|
||||
let rt = Builder::new_multi_thread()
|
||||
.thread_stack_size(8 * 1024 * 1024)
|
||||
.enable_all()
|
||||
.build()
|
||||
.unwrap();
|
||||
rt.block_on(run())
|
||||
}
|
||||
|
||||
async fn run() -> Result<()> {
|
||||
let opts = Options::parse();
|
||||
|
||||
simple_logger::SimpleLogger::new()
|
||||
.with_utc_timestamps()
|
||||
.with_level({
|
||||
match opts.debug {
|
||||
0 => log::LevelFilter::Info,
|
||||
1 => log::LevelFilter::Debug,
|
||||
_ => log::LevelFilter::Trace,
|
||||
}
|
||||
})
|
||||
.with_module_level("sqlx", log::Level::Error.to_level_filter())
|
||||
.init()?;
|
||||
|
||||
let mut docker_image = opts.image_name.to_string();
|
||||
if !docker_image.contains(':') {
|
||||
docker_image.push_str(":latest");
|
||||
}
|
||||
|
||||
let credentials = Some(DockerCredentials {
|
||||
username: opts.username,
|
||||
password: opts.password,
|
||||
auth: opts.auth,
|
||||
email: opts.email,
|
||||
serveraddress: opts.server_address,
|
||||
identitytoken: opts.identity_token,
|
||||
registrytoken: opts.registry_token,
|
||||
});
|
||||
|
||||
let fl_name = docker_image.replace([':', '/'], "-") + ".fl";
|
||||
let meta = fungi::Writer::new(&fl_name, true).await?;
|
||||
let store = parse_router(&opts.store).await?;
|
||||
|
||||
let container_name = Uuid::new_v4().to_string();
|
||||
let docker_tmp_dir =
|
||||
tempdir::TempDir::new(&container_name).expect("failed to create tmp directory");
|
||||
|
||||
let mut docker_to_fl =
|
||||
docker2fl::DockerImageToFlist::new(meta, docker_image, credentials, docker_tmp_dir);
|
||||
let res = docker_to_fl.convert(store, None).await;
|
||||
|
||||
// remove the file created with the writer if fl creation failed
|
||||
if res.is_err() {
|
||||
tokio::fs::remove_file(fl_name).await?;
|
||||
return res;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
484
components/rfs/docs/user_guide.md
Normal file
484
components/rfs/docs/user_guide.md
Normal file
@@ -0,0 +1,484 @@
|
||||
# RFS User Guide
|
||||
|
||||
This document provides an overview of the commands available in the RFS application and their usage.
|
||||
|
||||
---
|
||||
|
||||
## Commands
|
||||
|
||||
### 1. **Mount**
|
||||
|
||||
Mount an FL (Flist) to a target directory.
|
||||
|
||||
**Usage:**
|
||||
|
||||
```bash
|
||||
rfs mount --meta <path_to_flist> --cache <cache_directory> [--daemon] [--log <log_file>] <target>
|
||||
```
|
||||
|
||||
**Options:**
|
||||
|
||||
- `--meta`: Path to the metadata file (flist).
|
||||
- `--cache`: Directory used as a cache for downloaded file chunks (default: `/tmp/cache`).
|
||||
- `--daemon`: Run the process in the background.
|
||||
- `--log`: Log file (only used with daemon mode).
|
||||
- `<target>`: Target mount point.
|
||||
|
||||
---
|
||||
|
||||
### 2. **Pack**
|
||||
|
||||
Create an FL and upload blocks to the provided storage.
|
||||
|
||||
**Usage:**
|
||||
|
||||
```bash
|
||||
rfs pack --meta <path_to_flist> --store <store_url>... [--no-strip-password] <target_directory>
|
||||
```
|
||||
|
||||
**Options:**
|
||||
|
||||
- `--meta`: Path to the metadata file (flist).
|
||||
- `--store`: Store URL(s) in the format `[xx-xx=]<url>`. Multiple stores can be specified.
|
||||
- `--no-strip-password`: Disable automatic password stripping from the store URL.
|
||||
- `<target_directory>`: Directory to upload.
|
||||
|
||||
---
|
||||
|
||||
### 3. **Unpack**
|
||||
|
||||
Download the content of an FL to a specified location.
|
||||
|
||||
**Usage:**
|
||||
|
||||
```bash
|
||||
rfs unpack --meta <path_to_flist> --cache <cache_directory> [--preserve-ownership] <target_directory>
|
||||
```
|
||||
|
||||
**Options:**
|
||||
|
||||
- `--meta`: Path to the metadata file (flist).
|
||||
- `--cache`: Directory used as a cache for downloaded file chunks (default: `/tmp/cache`).
|
||||
- `--preserve-ownership`: Preserve file ownership from the FL (requires sudo).
|
||||
- `<target_directory>`: Directory to unpack the content.
|
||||
|
||||
---
|
||||
|
||||
### 4. **Clone**
|
||||
|
||||
Copy data from the stores of an FL to another store.
|
||||
|
||||
**Usage:**
|
||||
|
||||
```bash
|
||||
rfs clone --meta <path_to_flist> --store <store_url>... --cache <cache_directory>
|
||||
```
|
||||
|
||||
**Options:**
|
||||
|
||||
- `--meta`: Path to the metadata file (flist).
|
||||
- `--store`: Store URL(s) in the format `[xx-xx=]<url>`. Multiple stores can be specified.
|
||||
- `--cache`: Directory used as a cache for downloaded file chunks (default: `/tmp/cache`).
|
||||
|
||||
---
|
||||
|
||||
### 5. **Config**
|
||||
|
||||
List or modify FL metadata and stores.
|
||||
|
||||
**Usage:**
|
||||
|
||||
```bash
|
||||
rfs config --meta <path_to_flist> <subcommand>
|
||||
```
|
||||
|
||||
**Subcommands:**
|
||||
|
||||
- `tag list`: List all tags.
|
||||
- `tag add --tag <key=value>`: Add a tag.
|
||||
- `tag delete --key <key>`: Delete a tag.
|
||||
- `store list`: List all stores.
|
||||
- `store add --store <store_url>`: Add a store.
|
||||
- `store delete --store <store_url>`: Delete a store.
|
||||
|
||||
---
|
||||
|
||||
### 6. **Docker**
|
||||
|
||||
Convert a Docker image to an FL.
|
||||
|
||||
**Usage:**
|
||||
|
||||
```bash
|
||||
rfs docker --image-name <image_name> --store <store_url>... [--username <username>] [--password <password>] [--auth <auth>] [--email <email>] [--server-address <server_address>] [--identity-token <token>] [--registry-token <token>]
|
||||
```
|
||||
|
||||
**Options:**
|
||||
|
||||
- `--image-name`: Name of the Docker image to convert.
|
||||
- `--store`: Store URL(s) in the format `[xx-xx=]<url>`. Multiple stores can be specified.
|
||||
- Additional options for Docker credentials (e.g., `--username`, `--password`, etc.).
|
||||
|
||||
---
|
||||
|
||||
### 7. **Server**
|
||||
|
||||
Run the FL server.
|
||||
|
||||
**Usage:**
|
||||
|
||||
```bash
|
||||
rfs server --config-path <config_file> [--debug]
|
||||
```
|
||||
|
||||
**Options:**
|
||||
|
||||
- `--config-path`: Path to the server configuration file.
|
||||
- `--debug`: Enable debugging logs.
|
||||
|
||||
---
|
||||
|
||||
### 8. **Upload**
|
||||
|
||||
Upload a file to a server.
|
||||
|
||||
**Usage:**
|
||||
|
||||
```bash
|
||||
rfs upload <file_path> --server <server_url> [--block-size <size>]
|
||||
```
|
||||
|
||||
**Options:**
|
||||
|
||||
- `<file_path>`: Path to the file to upload.
|
||||
- `--server`: Server URL (e.g., `http://localhost:8080`).
|
||||
- `--block-size`: Block size for splitting the file (default: 1MB).
|
||||
|
||||
---
|
||||
|
||||
### 9. **UploadDir**
|
||||
|
||||
Upload a directory to a server.
|
||||
|
||||
**Usage:**
|
||||
|
||||
```bash
|
||||
rfs upload-dir <directory_path> --server <server_url> [--block-size <size>] [--create-flist] [--flist-output <output_path>]
|
||||
```
|
||||
|
||||
**Options:**
|
||||
|
||||
- `<directory_path>`: Path to the directory to upload.
|
||||
- `--server`: Server URL (e.g., `http://localhost:8080`).
|
||||
- `--block-size`: Block size for splitting the files (default: 1MB).
|
||||
- `--create-flist`: Create and upload an FL file.
|
||||
- `--flist-output`: Path to output the FL file.
|
||||
|
||||
---
|
||||
|
||||
### 10. **Download**
|
||||
|
||||
Download a file from a server using its hash.
|
||||
|
||||
**Usage:**
|
||||
|
||||
```bash
|
||||
rfs download <file_hash> --output <output_file> --server <server_url>
|
||||
```
|
||||
|
||||
**Options:**
|
||||
|
||||
- `<file_hash>`: Hash of the file to download.
|
||||
- `--output`: Name to save the downloaded file as.
|
||||
- `--server`: Server URL (e.g., `http://localhost:8080`).
|
||||
|
||||
---
|
||||
|
||||
### 11. **DownloadDir**
|
||||
|
||||
Download a directory from a server using its FL hash.
|
||||
|
||||
**Usage:**
|
||||
|
||||
```bash
|
||||
rfs download-dir <flist_hash> --output <output_directory> --server <server_url>
|
||||
```
|
||||
|
||||
**Options:**
|
||||
|
||||
- `<flist_hash>`: Hash of the FL to download.
|
||||
- `--output`: Directory to save the downloaded files to.
|
||||
- `--server`: Server URL (e.g., `http://localhost:8080`).
|
||||
|
||||
---
|
||||
|
||||
### 12. **Exists**
|
||||
|
||||
Check if a file or hash exists on the server.
|
||||
|
||||
**Usage:**
|
||||
|
||||
```bash
|
||||
rfs exists <file_or_hash> --server <server_url> [--block-size <size>]
|
||||
```
|
||||
|
||||
**Options:**
|
||||
|
||||
- `<file_or_hash>`: Path to the file or hash to check.
|
||||
- `--server`: Server URL (e.g., `http://localhost:8080`).
|
||||
- `--block-size`: Block size for splitting the file (default: 1MB).
|
||||
|
||||
---
|
||||
|
||||
### 13. **flist create**
|
||||
|
||||
Creates an flist from a directory.
|
||||
|
||||
**Usage:**
|
||||
|
||||
```bash
|
||||
rfs flist create <directory> --output /path/to/output.flist --server http://localhost:8080 --block-size 1048576
|
||||
```
|
||||
|
||||
**Options:**
|
||||
|
||||
- `<directory>`: Path to the directory to create the flist from.
|
||||
- `--output`: Path to save the generated flist file.
|
||||
- `--server`: Server URL (e.g., <http://localhost:8080>).
|
||||
- `--block-size`: Block size for splitting the files (default: 1MB).
|
||||
|
||||
---
|
||||
|
||||
### 14. **Website Publish**
|
||||
|
||||
Publish a website directory to the server.
|
||||
|
||||
**Usage:**
|
||||
|
||||
```bash
|
||||
rfs website-publish <directory_path> --server <server_url> [--block-size <size>]
|
||||
```
|
||||
|
||||
**Options:**
|
||||
|
||||
- `<directory_path>`: Path to the website directory to publish.
|
||||
- `--server`: Server URL (e.g., `http://localhost:8080`).
|
||||
- `--block-size`: Block size for splitting the files (default: 1MB).
|
||||
|
||||
---
|
||||
|
||||
### 15. **Token**
|
||||
|
||||
Retrieve an authentication token using username and password.
|
||||
|
||||
**Usage:**
|
||||
|
||||
```bash
|
||||
rfs token --username <username> --password <password> --server <server_url>
|
||||
```
|
||||
|
||||
**Options:**
|
||||
|
||||
- `--username`: Username for authentication.
|
||||
- `--password`: Password for authentication.
|
||||
- `--server`: Server URL (e.g., `http://localhost:8080`).
|
||||
|
||||
---
|
||||
|
||||
### 16. **Track**
|
||||
|
||||
Track user blocks on the server and their download statistics.
|
||||
|
||||
**Usage:**
|
||||
|
||||
```bash
|
||||
rfs track --server <server_url> --token <auth_token> [--details]
|
||||
```
|
||||
|
||||
**Options:**
|
||||
|
||||
- `--server`: Server URL (e.g., `http://localhost:8080`).
|
||||
- `--token`: Authentication token for the server.
|
||||
- `--details`: Display detailed information about each block.
|
||||
|
||||
---
|
||||
|
||||
### 17. **TrackBlocks**
|
||||
|
||||
Track download statistics for specific blocks or all blocks.
|
||||
|
||||
**Usage:**
|
||||
|
||||
```bash
|
||||
rfs track-blocks --server <server_url> --token <auth_token> [--hash <block_hash>] [--all] [--details]
|
||||
```
|
||||
|
||||
**Options:**
|
||||
|
||||
- `--server`: Server URL (e.g., `http://localhost:8080`).
|
||||
- `--token`: Authentication token for the server.
|
||||
- `--hash`: Specific block hash to track (conflicts with --all).
|
||||
- `--all`: Track all blocks (default if no hash is provided).
|
||||
- `--details`: Display detailed information about each block.
|
||||
|
||||
---
|
||||
|
||||
### 18. **TrackWebsite**
|
||||
|
||||
Track download statistics for a website using its flist hash.
|
||||
|
||||
**Usage:**
|
||||
|
||||
```bash
|
||||
rfs track-website <flist_hash> --server <server_url> [--details]
|
||||
```
|
||||
|
||||
**Options:**
|
||||
|
||||
- `<flist_hash>`: Hash of the website's flist.
|
||||
- `--server`: Server URL (e.g., `http://localhost:8080`).
|
||||
- `--details`: Display detailed information about each block.
|
||||
|
||||
---
|
||||
|
||||
### Examples
|
||||
|
||||
1. **Upload a File**:
|
||||
|
||||
Upload a file to the server with a custom block size:
|
||||
|
||||
```bash
|
||||
rfs upload big_file.txt --server http://localhost:8080 --block-size 2097152
|
||||
```
|
||||
|
||||
2. **Download a Directory**:
|
||||
|
||||
Download a directory from the server using its FL hash:
|
||||
|
||||
```bash
|
||||
rfs download-dir abc123 --output ./mydir --server http://localhost:8080
|
||||
```
|
||||
|
||||
3. **Pack a Directory**:
|
||||
|
||||
Create an FL from a directory and upload it to a specific store:
|
||||
|
||||
```bash
|
||||
rfs pack --meta myflist.fl --store http://store.url --target ./mydir
|
||||
```
|
||||
|
||||
4. **Unpack an FL**:
|
||||
|
||||
Unpack the contents of an FL to a target directory while preserving file ownership:
|
||||
|
||||
```bash
|
||||
rfs unpack --meta myflist.fl --cache /tmp/cache --preserve-ownership --target ./output
|
||||
```
|
||||
|
||||
5. **Convert a Docker Image to an FL**:
|
||||
|
||||
Convert a Docker image to an FL and upload it to a store with authentication:
|
||||
|
||||
```bash
|
||||
rfs docker --image-name redis --store server://http://localhost:4000 --username myuser --password mypass
|
||||
```
|
||||
|
||||
6. **Publish a Website**:
|
||||
|
||||
Publish a website directory to the server:
|
||||
|
||||
```bash
|
||||
rfs website-publish ./website --server http://localhost:8080
|
||||
```
|
||||
|
||||
7. **Check if a File Exists**:
|
||||
|
||||
Verify if a file exists on the server using its hash:
|
||||
|
||||
```bash
|
||||
rfs exists myfilehash --server http://localhost:8080
|
||||
```
|
||||
|
||||
8. **Create an FL from a Directory**:
|
||||
|
||||
Create an FL from a directory and save it to a specific output path:
|
||||
|
||||
```bash
|
||||
rfs flist create ./mydir --output ./mydir.flist --server http://localhost:8080
|
||||
```
|
||||
|
||||
9. **Run the FL Server**:
|
||||
|
||||
Start the FL server with a specific configuration file:
|
||||
|
||||
```bash
|
||||
rfs server --config-path ./config.yaml --debug
|
||||
```
|
||||
|
||||
10. **List FL Metadata Tags**:
|
||||
|
||||
List all tags in an FL metadata file:
|
||||
|
||||
```bash
|
||||
rfs config --meta myflist.fl tag list
|
||||
```
|
||||
|
||||
11. **Add a Tag to FL Metadata**:
|
||||
|
||||
Add a custom tag to an FL metadata file:
|
||||
|
||||
```bash
|
||||
rfs config --meta myflist.fl tag add --tag key=value
|
||||
```
|
||||
|
||||
12. **Delete a Tag from FL Metadata**:
|
||||
|
||||
Remove a specific tag from an FL metadata file:
|
||||
|
||||
```bash
|
||||
rfs config --meta myflist.fl tag delete --key key
|
||||
```
|
||||
|
||||
13. **Clone an FL to Another Store**:
|
||||
|
||||
Clone the data of an FL to another store:
|
||||
|
||||
```bash
|
||||
rfs clone --meta myflist.fl --store http://newstore.url --cache /tmp/cache
|
||||
```
|
||||
|
||||
14. **Get an Authentication Token**:
|
||||
|
||||
Retrieve an authentication token from the server:
|
||||
|
||||
```bash
|
||||
rfs token --username myuser --password mypass --server http://localhost:8080
|
||||
```
|
||||
|
||||
15. **Track User Blocks**:
|
||||
|
||||
Track all blocks uploaded by the authenticated user:
|
||||
|
||||
```bash
|
||||
rfs track --server http://localhost:8080 --token mytoken
|
||||
```
|
||||
|
||||
16. **Track a Specific Block**:
|
||||
|
||||
Track download statistics for a specific block:
|
||||
|
||||
```bash
|
||||
rfs track-blocks --server http://localhost:8080 --hash abc123def456
|
||||
```
|
||||
|
||||
4. **Track Website Downloads**:
|
||||
|
||||
Track download statistics for a published website:
|
||||
|
||||
```bash
|
||||
rfs track-website abc123def456 --server http://localhost:8080 --details
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
For more details, refer to the documentation or use the `--help` flag with any command.
|
||||
58
components/rfs/docs/zdb_flists.md
Normal file
58
components/rfs/docs/zdb_flists.md
Normal file
@@ -0,0 +1,58 @@
|
||||
|
||||
# Generate an flist using ZDB
|
||||
|
||||
## Deploy a vm
|
||||
|
||||
1. Deploy a vm with a public IP
|
||||
2. add docker (don't forget to add a disk for it with mountpoint = "/var/lib/docker")
|
||||
3. add caddy
|
||||
|
||||
## Install zdb and run an instance of it
|
||||
|
||||
1. Execute `git clone -b development-v2 https://github.com/threefoldtech/0-db /zdb` then `cd /zdb`
|
||||
2. Build
|
||||
|
||||
```bash
|
||||
cd libzdb
|
||||
make
|
||||
cd ..
|
||||
|
||||
cd zdbd
|
||||
make STATIC=1
|
||||
cd ..
|
||||
|
||||
make
|
||||
```
|
||||
|
||||
3. Install `make install`
|
||||
4. run `zdb --listen 0.0.0.0`
|
||||
5. The result info you should know
|
||||
|
||||
```console
|
||||
zdbEndpoint = "<vm public IP>:<port>"
|
||||
zdbNameSpace = "default"
|
||||
zdbPassword = "default"
|
||||
```
|
||||
|
||||
## Install rfs
|
||||
|
||||
1. Execute `git clone -b development-v2 https://github.com/threefoldtech/rfs` then `cd /rfs`
|
||||
2. Execute
|
||||
|
||||
```bash
|
||||
rustup target add x86_64-unknown-linux-musl`
|
||||
cargo build --features build-binary --release --target=x86_64-unknown-linux-musl
|
||||
mv ./target/x86_64-unknown-linux-musl/release/rfs /usr/bin/
|
||||
```
|
||||
|
||||
## Convert docker image to an fl
|
||||
|
||||
1. Try an image for example `threefolddev/ubuntu:22.04` image
|
||||
2. Executing `rfs docker -i threefolddev/ubuntu:22.04 -s "zdb://<vm public IP>:<port>/default" -d`
|
||||
3. You will end up having `threefolddev-ubuntu-22.04.fl` (flist)
|
||||
|
||||
## Serve the flist using caddy
|
||||
|
||||
1. In the directory includes the output flist, you can run `caddy file-server --listen 0.0.0.0:2015 --browse`
|
||||
2. The flist will be available as `http://<vm public IP>:2015/threefolddev-ubuntu-22.04.fl`
|
||||
3. Use the flist to deploy any virtual machine.
|
||||
@@ -1,52 +0,0 @@
|
||||
[package]
|
||||
name = "fl-server"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
[build-dependencies]
|
||||
git-version = "0.3.5"
|
||||
|
||||
[[bin]]
|
||||
name = "fl-server"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
log = "0.4"
|
||||
anyhow = "1.0.44"
|
||||
regex = "1.9.6"
|
||||
rfs = { path = "../rfs"}
|
||||
docker2fl = { path = "../docker2fl"}
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
bollard = "0.15.0"
|
||||
futures-util = "0.3"
|
||||
simple_logger = {version = "1.0.1"}
|
||||
uuid = { version = "1.3.1", features = ["v4"] }
|
||||
tempdir = "0.3"
|
||||
serde_json = "1.0"
|
||||
toml = "0.4.2"
|
||||
clap = { version = "4.5.8", features = ["derive"] }
|
||||
|
||||
serde = { version = "1.0.159" , features = ["derive"] }
|
||||
axum = "0.7"
|
||||
axum-macros = "0.4.1"
|
||||
tower = { version = "0.4", features = ["util", "timeout", "load-shed", "limit"] }
|
||||
tower-http = { version = "0.5.2", features = ["fs", "cors", "add-extension", "auth", "compression-full", "trace", "limit"] }
|
||||
tokio-async-drop = "0.1.0"
|
||||
mime_guess = "2.0.5"
|
||||
mime = "0.3.17"
|
||||
percent-encoding = "2.3.1"
|
||||
tracing = "0.1.40"
|
||||
askama = "0.12.1"
|
||||
hyper = { version = "1.4.0", features = ["full"] }
|
||||
time = { version = "0.3.36", features = ["formatting"] }
|
||||
chrono = "0.4.38"
|
||||
jsonwebtoken = "9.3.0"
|
||||
|
||||
utoipa = { version = "4", features = ["axum_extras"] }
|
||||
utoipa-swagger-ui = { version = "7", features = ["axum"] }
|
||||
thiserror = "1.0.63"
|
||||
hostname-validator = "1.1.1"
|
||||
walkdir = "2.5.0"
|
||||
sha256 = "1.5.0"
|
||||
async-trait = "0.1.53"
|
||||
@@ -1,42 +0,0 @@
|
||||
# Flist server
|
||||
|
||||
Flist server helps using rfs and docker2fl tools to generate different flists from docker images.
|
||||
|
||||
## Build
|
||||
|
||||
```bash
|
||||
cargo build
|
||||
```
|
||||
|
||||
## Run
|
||||
|
||||
First create `config.toml` check [configuration](#configuration)
|
||||
|
||||
```bash
|
||||
cargo run --bin fl-server -- --config-path config.toml -d
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
Before building or running the server, create `config.toml` in the current directory.
|
||||
|
||||
example `config.toml`:
|
||||
|
||||
```toml
|
||||
host="Your host to run the server on, required, example: 'localhost'"
|
||||
port="Your port to run the server on, required, example: 3000, validation: between [0, 65535]"
|
||||
store_url="List of stores to pack flists in which can be 'dir', 'zdb', 's3', required, example: ['dir:///tmp/store0']"
|
||||
flist_dir="A directory to save each user flists, required, example: 'flists'"
|
||||
|
||||
jwt_secret="secret for jwt, required, example: 'secret'"
|
||||
jwt_expire_hours="Life time for jwt token in hours, required, example: 5, validation: between [1, 24]"
|
||||
|
||||
[[users]] # list of authorized user in the server
|
||||
username = "user1"
|
||||
password = "password1"
|
||||
|
||||
[[users]]
|
||||
username = "user2"
|
||||
password = "password2"
|
||||
...
|
||||
```
|
||||
@@ -1,9 +0,0 @@
|
||||
fn main() {
|
||||
println!(
|
||||
"cargo:rustc-env=GIT_VERSION={}",
|
||||
git_version::git_version!(
|
||||
args = ["--tags", "--always", "--dirty=-modified"],
|
||||
fallback = "unknown"
|
||||
)
|
||||
);
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use utoipa::ToSchema;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct User {
|
||||
pub username: String,
|
||||
pub password: String,
|
||||
}
|
||||
|
||||
pub trait DB: Send + Sync {
|
||||
fn get_user_by_username(&self, username: &str) -> Option<User>;
|
||||
}
|
||||
|
||||
#[derive(Debug, ToSchema)]
|
||||
pub struct MapDB {
|
||||
users: HashMap<String, User>,
|
||||
}
|
||||
|
||||
impl MapDB {
|
||||
pub fn new(users: &[User]) -> Self {
|
||||
Self {
|
||||
users: users
|
||||
.iter()
|
||||
.map(|u| (u.username.clone(), u.to_owned()))
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DB for MapDB {
|
||||
fn get_user_by_username(&self, username: &str) -> Option<User> {
|
||||
self.users.get(username).cloned()
|
||||
}
|
||||
}
|
||||
@@ -29,40 +29,7 @@
|
||||
|
||||
### backend
|
||||
|
||||
In fl-server dir:
|
||||
|
||||
- create flists dir containaing dirs for each user
|
||||
ex:
|
||||
- fl-server
|
||||
- flists
|
||||
- user1
|
||||
- user2
|
||||
- include config file
|
||||
ex:
|
||||
|
||||
```yml
|
||||
host='localhost'
|
||||
port=4000
|
||||
store_url=['dir:///tmp/store0']
|
||||
flist_dir='flists'
|
||||
|
||||
jwt_secret='secret'
|
||||
jwt_expire_hours=5
|
||||
|
||||
[[users]] # list of authorized user in the server
|
||||
username = "user1"
|
||||
password = "password1"
|
||||
|
||||
[[users]]
|
||||
username = "user2"
|
||||
password = "password2"
|
||||
```
|
||||
|
||||
- Move to `fl-server` directory and execute the following command to run the backend:
|
||||
|
||||
```bash
|
||||
cargo run --bin fl-server -- --config-path config.toml
|
||||
```
|
||||
Please check [rfs server](../rfs/README.md#server-command)
|
||||
|
||||
### frontend
|
||||
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then
|
||||
source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4="
|
||||
fi
|
||||
|
||||
use flake
|
||||
@@ -1,67 +0,0 @@
|
||||
[package]
|
||||
name = "rfs"
|
||||
version = "0.2.0"
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
[build-dependencies]
|
||||
git-version = "0.3.5"
|
||||
|
||||
[[bin]]
|
||||
name = "rfs"
|
||||
path = "src/main.rs"
|
||||
required-features = ["build-binary"]
|
||||
|
||||
[features]
|
||||
build-binary = [
|
||||
"dep:polyfuse",
|
||||
"dep:simple_logger",
|
||||
"dep:tempfile",
|
||||
"dep:daemonize",
|
||||
"dep:clap"
|
||||
]
|
||||
|
||||
[lib]
|
||||
name = "rfs"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.44"
|
||||
time = "0.3"
|
||||
sqlx = { version = "0.7.4", features = [ "runtime-tokio-rustls", "sqlite" ] }
|
||||
tokio = { version = "1", features = [ "rt", "rt-multi-thread", "macros"] }
|
||||
libc = "0.2"
|
||||
futures = "0.3"
|
||||
thiserror = "1.0"
|
||||
bytes = "1.1.0"
|
||||
log = "0.4"
|
||||
lru = "0.7.0"
|
||||
nix = "0.23.0"
|
||||
snap = "1.0.5"
|
||||
bb8-redis = "0.13"
|
||||
async-trait = "0.1.53"
|
||||
url = "2.3.1"
|
||||
blake2b_simd = "1"
|
||||
aes-gcm = "0.10"
|
||||
hex = "0.4"
|
||||
rand = "0.8"
|
||||
# next are only needed for the binarys
|
||||
clap = { version = "4.2", features = ["derive"], optional = true}
|
||||
simple_logger = {version = "1.0.1", optional = true}
|
||||
daemonize = { version = "0.5", optional = true }
|
||||
tempfile = { version = "3.3.0", optional = true }
|
||||
workers = { git="https://github.com/threefoldtech/tokio-worker-pool.git" }
|
||||
rust-s3 = "0.34.0-rc3"
|
||||
openssl = { version = "0.10", features = ["vendored"] }
|
||||
regex = "1.9.6"
|
||||
which = "6.0"
|
||||
reqwest = "0.11"
|
||||
|
||||
[dependencies.polyfuse]
|
||||
branch = "master"
|
||||
git = "https://github.com/muhamadazmy/polyfuse"
|
||||
optional = true
|
||||
|
||||
[dev-dependencies]
|
||||
reqwest = { version = "0.11", features = ["blocking"] }
|
||||
assert_cmd = "2.0"
|
||||
@@ -1,149 +0,0 @@
|
||||
|
||||
# Introduction
|
||||
|
||||
`rfs` is the main tool to create, mount and extract FungiStore lists (FungiList)`fl` for short. An `fl` is a simple format
|
||||
to keep information about an entire filesystem in a compact form. It does not hold the data itself but enough information to
|
||||
retrieve this data back from a `store`.
|
||||
|
||||
## Building rfs
|
||||
|
||||
To build rfs make sure you have rust installed then run the following commands:
|
||||
|
||||
```bash
|
||||
# this is needed to be run once to make sure the musl target is installed
|
||||
rustup target add x86_64-unknown-linux-musl
|
||||
|
||||
# build the binary
|
||||
cargo build --features build-binary --release --target=x86_64-unknown-linux-musl
|
||||
```
|
||||
|
||||
the binary will be available under `./target/x86_64-unknown-linux-musl/release/rfs` you can copy that binary then to `/usr/bin/`
|
||||
to be able to use from anywhere on your system.
|
||||
|
||||
## Stores
|
||||
|
||||
A store in where the actual data lives. A store can be as simple as a `directory` on your local machine in that case the files on the `fl` are only 'accessible' on your local machine. A store can also be a `zdb` running remotely or a cluster of `zdb`. Right now only `dir`, `http`, `zdb` and `s3` stores are supported but this will change in the future to support even more stores.
|
||||
|
||||
## Usage
|
||||
|
||||
### Creating an `fl`
|
||||
|
||||
```bash
|
||||
rfs pack -m output.fl -s <store-specs> <directory>
|
||||
```
|
||||
|
||||
This tells rfs to create an `fl` named `output.fl` using the store defined by the url `<store-specs>` and upload all the files under directory recursively.
|
||||
|
||||
The simplest form of `<store-specs>` is a `url`. the store `url` defines the store to use. Any `url`` has a schema that defines the store type. Right now we have support only for:
|
||||
|
||||
- `dir`: dir is a very simple store that is mostly used for testing. A dir store will store the fs blobs in another location defined by the url path. An example of a valid dir url is `dir:///tmp/store`
|
||||
- `zdb`: [zdb](https://github.com/threefoldtech/0-db) is a append-only key value store and provides a redis like API. An example zdb url can be something like `zdb://<hostname>[:port][/namespace]`
|
||||
- `s3`: aws-s3 is used for storing and retrieving large amounts of data (blobs) in buckets (directories). An example `s3://<username>:<password>@<host>:<port>/<bucket-name>`
|
||||
|
||||
`region` is an optional param for s3 stores, if you want to provide one you can add it as a query to the url `?region=<region-name>`
|
||||
- `http`: http is a store mostly used for wrapping a dir store to fetch data through http requests. It does not support uploading, just fetching the data.
|
||||
It can be set in the FL file as the store to fetch the data with `rfs config`. Example: `http://localhost:9000/store` (https works too).
|
||||
|
||||
`<store-specs>` can also be of the form `<start>-<end>=<url>` where `start` and `end` are a hex bytes for partitioning of blob keys. rfs will then store a set of blobs on the defined store if they blob key falls in the `[start:end]` range (inclusive).
|
||||
|
||||
If the `start-end` range is not provided a `00-FF` range is assume basically a catch all range for the blob keys. In other words, all blobs will be written to that store.
|
||||
|
||||
This is only useful because `rfs` can accept multiple stores on the command line with different and/or overlapping ranges.
|
||||
|
||||
For example `-s 00-80=dir:///tmp/store0 -s 81-ff=dir:///tmp/store1` means all keys that has prefix byte in range `[00-80]` will be written to /tmp/store0 all other keys `[81-ff]` will be written to store1.
|
||||
|
||||
The same range can appear multiple times, which means the blob will be replicated to all the stores that matches its key prefix.
|
||||
|
||||
To quickly test this operation
|
||||
|
||||
```bash
|
||||
rfs pack -m output.fl -s 00-80=dir:///tmp/store0 -s 81-ff=dir:///tmp/store1 ~/Documents
|
||||
```
|
||||
|
||||
this command will effectively create the `output.fl` and store (and shard) the blobs across the 2 locations /tmp/store0 and /tmp/store1.
|
||||
|
||||
```bash
|
||||
#rfs pack --help
|
||||
|
||||
create an FL and upload blocks to provided storage
|
||||
|
||||
Usage: rfs pack [OPTIONS] --meta <META> <TARGET>
|
||||
|
||||
Arguments:
|
||||
<TARGET> target directory to upload
|
||||
|
||||
Options:
|
||||
-m, --meta <META> path to metadata file (flist)
|
||||
-s, --store <STORE> store url in the format [xx-xx=]<url>. the range xx-xx is optional and used for sharding. the URL is per store type, please check docs for more information
|
||||
--no-strip-password disables automatic password stripping from store url, otherwise password will be stored in the fl.
|
||||
-h, --help Print help
|
||||
```
|
||||
|
||||
#### Password stripping
|
||||
|
||||
During creation of an flist you will probably provide a password in the URL of the store. This is normally needed to allow write operation to the store (say s3 bucket)
|
||||
Normally this password is removed from the store info so it's safe to ship the fl to users. A user of the flist then will only have read access, if configured correctly
|
||||
in the store
|
||||
|
||||
For example a `zdb` store has the notion of a public namespace which is password protected for writes, but open for reads. An S3 bucket can have the policy to allow public reads, but protected writes (minio supports that via bucket settings)
|
||||
|
||||
If you wanna disable the password stripping from the store url, you can provide the `--no-strip-password` flag during creation. This also means someone can extract
|
||||
this information from the fl and gain write access to your store, so be careful how u use it.
|
||||
|
||||
# Mounting an `fl`
|
||||
|
||||
Once the `fl` is created it can be distributes to other people. Then they can mount the `fl` which will allow them then to traverse the packed filesystem and also access (read-only) the files.
|
||||
|
||||
To mount an `fl` only the `fl` is needed since all information regarding the `stores` is already stored in the `fl`. This also means you can only share the `fl` if the other user can actually reach the store used to crate the `fl`. So a `dir` store is not sharable, also a `zdb` instance that is running on localhost :no_good:
|
||||
|
||||
```bash
|
||||
sudo rfs mount -m output.fl <target>
|
||||
```
|
||||
|
||||
The `<target>` is the mount location, usually `/mnt` but can be anywhere. In another terminal you can now `cd <target>` and walk the filesystem tree. Opening the files will trigger a file download from the store only on read access.
|
||||
|
||||
full command help
|
||||
|
||||
```bash
|
||||
# rfs mount --help
|
||||
|
||||
mount an FL
|
||||
|
||||
Usage: rfs mount [OPTIONS] --meta <META> <TARGET>
|
||||
|
||||
Arguments:
|
||||
<TARGET> target mountpoint
|
||||
|
||||
Options:
|
||||
-m, --meta <META> path to metadata file (flist)
|
||||
-c, --cache <CACHE> directory used as cache for downloaded file chuncks [default: /tmp/cache]
|
||||
-d, --daemon run in the background
|
||||
-l, --log <LOG> log file only used with daemon mode
|
||||
-h, --help Print help
|
||||
```
|
||||
|
||||
# Unpack an `fl`
|
||||
|
||||
Similar to `mount` rfs provides an `unpack` subcommand that downloads the entire content (extract) of an `fl` to a provided directory.
|
||||
|
||||
```bash
|
||||
rfs unpack --help
|
||||
unpack (downloads) content of an FL the provided location
|
||||
|
||||
Usage: rfs unpack [OPTIONS] --meta <META> <TARGET>
|
||||
|
||||
Arguments:
|
||||
<TARGET> target directory to upload
|
||||
|
||||
Options:
|
||||
-m, --meta <META> path to metadata file (flist)
|
||||
-c, --cache <CACHE> directory used as cache for downloaded file chuncks [default: /tmp/cache]
|
||||
-p, --preserve-ownership preserve files ownership from the FL, otherwise use the current user ownership setting this flag to true normally requires sudo
|
||||
-h, --help Print help
|
||||
```
|
||||
|
||||
By default when unpacking the `-p` flag is not set. which means downloaded files will be `owned` by the current user/group. If `-p` flag is set, the files ownership will be same as the original files used to create the fl (preserve `uid` and `gid` of the files and directories) this normally requires `sudo` while unpacking.
|
||||
|
||||
# Specifications
|
||||
|
||||
Please check [docs](../docs)
|
||||
@@ -1,9 +0,0 @@
|
||||
fn main() {
|
||||
println!(
|
||||
"cargo:rustc-env=GIT_VERSION={}",
|
||||
git_version::git_version!(
|
||||
args = ["--tags", "--always", "--dirty=-modified"],
|
||||
fallback = "unknown"
|
||||
)
|
||||
);
|
||||
}
|
||||
105
components/rfs/rfs/flake.lock
generated
105
components/rfs/rfs/flake.lock
generated
@@ -1,105 +0,0 @@
|
||||
{
|
||||
"nodes": {
|
||||
"crane": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1709610799,
|
||||
"narHash": "sha256-5jfLQx0U9hXbi2skYMGodDJkIgffrjIOgMRjZqms2QE=",
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"rev": "81c393c776d5379c030607866afef6406ca1be57",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1709126324,
|
||||
"narHash": "sha256-q6EQdSeUZOG26WelxqkmR7kArjgWCdw5sfJVHPH/7j8=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "d465f4819400de7c8d874d50b982301f28a84605",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"id": "flake-utils",
|
||||
"type": "indirect"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1709677081,
|
||||
"narHash": "sha256-tix36Y7u0rkn6mTm0lA45b45oab2cFLqAzDbJxeXS+c=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "880992dcc006a5e00dd0591446fdf723e6a51a64",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-23.11",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"crane": "crane",
|
||||
"flake-utils": "flake-utils",
|
||||
"nixpkgs": "nixpkgs",
|
||||
"rust-overlay": "rust-overlay"
|
||||
}
|
||||
},
|
||||
"rust-overlay": {
|
||||
"inputs": {
|
||||
"flake-utils": [
|
||||
"flake-utils"
|
||||
],
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1712542394,
|
||||
"narHash": "sha256-UZebDBECRSrJqw4K+LxZ6qFdYnScu6q1XCwqtsu1cas=",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "ece8bdb3c3b58def25f204b9a1261dee55d7c9c0",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
{
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-23.11";
|
||||
|
||||
crane.url = "github:ipetkov/crane";
|
||||
crane.inputs.nixpkgs.follows = "nixpkgs";
|
||||
|
||||
flake-utils.inputs.nixpkgs.follows = "nixpkgs";
|
||||
|
||||
rust-overlay = {
|
||||
url = "github:oxalica/rust-overlay";
|
||||
inputs = {
|
||||
nixpkgs.follows = "nixpkgs";
|
||||
flake-utils.follows = "flake-utils";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
outputs = {
|
||||
self,
|
||||
nixpkgs,
|
||||
crane,
|
||||
flake-utils,
|
||||
rust-overlay,
|
||||
}:
|
||||
flake-utils.lib.eachSystem
|
||||
[
|
||||
flake-utils.lib.system.x86_64-linux
|
||||
flake-utils.lib.system.aarch64-linux
|
||||
flake-utils.lib.system.aarch64-darwin
|
||||
] (system: let
|
||||
pkgs = import nixpkgs {
|
||||
inherit system;
|
||||
overlays = [(import rust-overlay)];
|
||||
};
|
||||
|
||||
customToolchain = pkgs.rust-bin.fromRustupToolchainFile ./rust-toolchain.toml;
|
||||
craneLib = (crane.mkLib pkgs).overrideToolchain customToolchain;
|
||||
in {
|
||||
devShells.default = craneLib.devShell {
|
||||
packages = [
|
||||
pkgs.rust-analyzer
|
||||
];
|
||||
|
||||
RUST_SRC_PATH = "${pkgs.rustPlatform.rustLibSrc}";
|
||||
};
|
||||
packages.default = craneLib.buildPackage {
|
||||
src = self;
|
||||
|
||||
# 2024-03-07 failing test:
|
||||
# > thread 'test::pack_unpack' has overflowed its stack
|
||||
# > fatal runtime error: stack overflow
|
||||
# > error: test failed, to rerun pass `--lib`
|
||||
#
|
||||
# appearantly needs `RUST_MIN_STACK: 8388608` according to https://github.com/threefoldtech/rfs/blob/eae5186cc6b0f8704f3e4715d2e3644f1f3baa2c/.github/workflows/tests.yaml#L25C1-L25C34
|
||||
doCheck = false;
|
||||
|
||||
cargoExtraArgs = "--bin rfs --features=build-binary";
|
||||
|
||||
nativeBuildInputs = [
|
||||
pkgs.perl
|
||||
pkgs.pkg-config
|
||||
];
|
||||
|
||||
buildInputs = [
|
||||
pkgs.openssl
|
||||
pkgs.openssl.dev
|
||||
];
|
||||
};
|
||||
});
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
[toolchain]
|
||||
channel = "1.74.0"
|
||||
|
||||
151
components/rfs/rfs/src/cache/mod.rs
vendored
151
components/rfs/rfs/src/cache/mod.rs
vendored
@@ -1,151 +0,0 @@
|
||||
use crate::fungi::meta::Block;
|
||||
use crate::store::{BlockStore, Store};
|
||||
use anyhow::{Context, Result};
|
||||
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::path::PathBuf;
|
||||
use tokio::fs::{self, File, OpenOptions};
|
||||
use tokio::io::{AsyncSeekExt, AsyncWriteExt};
|
||||
|
||||
/// Cache implements a caching layer on top of a block store
|
||||
//#[derive(Clone)]
|
||||
pub struct Cache<S: Store> {
|
||||
store: BlockStore<S>,
|
||||
root: PathBuf,
|
||||
}
|
||||
|
||||
impl<S> Cache<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
pub fn new<P>(root: P, store: S) -> Self
|
||||
where
|
||||
P: Into<PathBuf>,
|
||||
{
|
||||
Cache {
|
||||
store: store.into(),
|
||||
root: root.into(),
|
||||
}
|
||||
}
|
||||
|
||||
// download given an open file, writes the content of the chunk to the file
|
||||
async fn download(&self, file: &mut File, block: &Block) -> Result<u64> {
|
||||
let data = self.store.get(block).await?;
|
||||
file.write_all(&data).await?;
|
||||
|
||||
Ok(data.len() as u64)
|
||||
}
|
||||
|
||||
async fn prepare(&self, id: &[u8]) -> Result<File> {
|
||||
let name = id.hex();
|
||||
if name.len() < 4 {
|
||||
anyhow::bail!("invalid chunk hash");
|
||||
}
|
||||
let path = self.root.join(&name[0..2]).join(&name[2..4]);
|
||||
fs::create_dir_all(&path).await?;
|
||||
let path = path.join(name);
|
||||
|
||||
let file = OpenOptions::new()
|
||||
.create(true)
|
||||
.read(true)
|
||||
.write(true)
|
||||
.truncate(false)
|
||||
.open(path)
|
||||
.await?;
|
||||
|
||||
Ok(file)
|
||||
}
|
||||
|
||||
/// get a file block either from cache or from remote if it's already
|
||||
/// not cached
|
||||
pub async fn get(&self, block: &Block) -> Result<(u64, File)> {
|
||||
let mut file = self
|
||||
.prepare(&block.id)
|
||||
.await
|
||||
.context("failed to prepare cache block")?;
|
||||
// TODO: locking must happen here so no
|
||||
// other processes start downloading the same chunk
|
||||
let locker = Locker::new(&file);
|
||||
locker.lock().await?;
|
||||
|
||||
let meta = file
|
||||
.metadata()
|
||||
.await
|
||||
.context("failed to get block metadata")?;
|
||||
if meta.len() > 0 {
|
||||
// chunk is already downloaded
|
||||
debug!("block cache hit: {}", block.id.as_slice().hex());
|
||||
locker.unlock().await?;
|
||||
return Ok((meta.len(), file));
|
||||
}
|
||||
|
||||
debug!("downloading block with key: {}", block.id.as_slice().hex());
|
||||
let size = self
|
||||
.download(&mut file, block)
|
||||
.await
|
||||
.context("failed to download block")?;
|
||||
|
||||
// if file is just downloaded, we need
|
||||
// to seek to beginning of the file.
|
||||
file.rewind().await?;
|
||||
|
||||
locker.unlock().await?;
|
||||
Ok((size, file))
|
||||
}
|
||||
|
||||
/// direct downloads all the file blocks from remote and write it to output
|
||||
#[allow(dead_code)]
|
||||
pub async fn direct(&self, blocks: &[Block], out: &mut File) -> Result<()> {
|
||||
use tokio::io::copy;
|
||||
for (index, block) in blocks.iter().enumerate() {
|
||||
let (_, mut chunk) = self.get(block).await?;
|
||||
copy(&mut chunk, out)
|
||||
.await
|
||||
.with_context(|| format!("failed to copy block {}", index))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Locker {
|
||||
fd: std::os::unix::io::RawFd,
|
||||
}
|
||||
|
||||
impl Locker {
|
||||
pub fn new(f: &File) -> Locker {
|
||||
Locker { fd: f.as_raw_fd() }
|
||||
}
|
||||
|
||||
pub async fn lock(&self) -> Result<()> {
|
||||
let fd = self.fd;
|
||||
tokio::task::spawn_blocking(move || {
|
||||
nix::fcntl::flock(fd, nix::fcntl::FlockArg::LockExclusive)
|
||||
})
|
||||
.await
|
||||
.context("failed to spawn file locking")?
|
||||
.context("failed to lock file")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn unlock(&self) -> Result<()> {
|
||||
let fd = self.fd;
|
||||
tokio::task::spawn_blocking(move || nix::fcntl::flock(fd, nix::fcntl::FlockArg::Unlock))
|
||||
.await
|
||||
.context("failed to spawn file lunlocking")?
|
||||
.context("failed to unlock file")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
trait Hex {
|
||||
fn hex(&self) -> String;
|
||||
}
|
||||
|
||||
impl Hex for &[u8] {
|
||||
fn hex(&self) -> String {
|
||||
hex::encode(self)
|
||||
}
|
||||
}
|
||||
@@ -1,383 +0,0 @@
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
use nix::sys::signal::{self, Signal};
|
||||
use nix::unistd::Pid;
|
||||
use std::error::Error;
|
||||
use std::io::Read;
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use clap::{ArgAction, Args, Parser, Subcommand};
|
||||
|
||||
use rfs::fungi;
|
||||
use rfs::store::{self};
|
||||
use rfs::{cache, config};
|
||||
|
||||
mod fs;
|
||||
/// mount flists
|
||||
#[derive(Parser, Debug)]
|
||||
#[clap(name ="rfs", author, version = env!("GIT_VERSION"), about, long_about = None)]
|
||||
struct Options {
|
||||
/// enable debugging logs
|
||||
#[clap(long, action=ArgAction::Count)]
|
||||
debug: u8,
|
||||
|
||||
#[command(subcommand)]
|
||||
command: Commands,
|
||||
}
|
||||
|
||||
#[derive(Subcommand, Debug)]
|
||||
enum Commands {
|
||||
/// mount an FL
|
||||
Mount(MountOptions),
|
||||
/// create an FL and upload blocks to provided storage
|
||||
Pack(PackOptions),
|
||||
/// unpack (downloads) content of an FL the provided location
|
||||
Unpack(UnpackOptions),
|
||||
/// clone copies the data from the stores of an FL to another stores
|
||||
Clone(CloneOptions),
|
||||
/// list or modify FL metadata and stores
|
||||
Config(ConfigOptions),
|
||||
}
|
||||
|
||||
#[derive(Args, Debug)]
|
||||
struct MountOptions {
|
||||
/// path to metadata file (flist)
|
||||
#[clap(short, long)]
|
||||
meta: String,
|
||||
|
||||
/// directory used as cache for downloaded file chuncks
|
||||
#[clap(short, long, default_value_t = String::from("/tmp/cache"))]
|
||||
cache: String,
|
||||
|
||||
/// run in the background.
|
||||
#[clap(short, long)]
|
||||
daemon: bool,
|
||||
|
||||
/// log file only used with daemon mode
|
||||
#[clap(short, long)]
|
||||
log: Option<String>,
|
||||
|
||||
/// target mountpoint
|
||||
target: String,
|
||||
}
|
||||
|
||||
#[derive(Args, Debug)]
|
||||
struct PackOptions {
|
||||
/// path to metadata file (flist)
|
||||
#[clap(short, long)]
|
||||
meta: String,
|
||||
|
||||
/// store url in the format [xx-xx=]<url>. the range xx-xx is optional and used for
|
||||
/// sharding. the URL is per store type, please check docs for more information
|
||||
#[clap(short, long, action=ArgAction::Append)]
|
||||
store: Vec<String>,
|
||||
|
||||
/// no_strip_password disable automatic password stripping from store url, otherwise password will be stored in the fl.
|
||||
#[clap(long, default_value_t = false)]
|
||||
no_strip_password: bool,
|
||||
|
||||
/// target directory to upload
|
||||
target: String,
|
||||
}
|
||||
|
||||
#[derive(Args, Debug)]
|
||||
struct UnpackOptions {
|
||||
/// path to metadata file (flist)
|
||||
#[clap(short, long)]
|
||||
meta: String,
|
||||
|
||||
/// directory used as cache for downloaded file chuncks
|
||||
#[clap(short, long, default_value_t = String::from("/tmp/cache"))]
|
||||
cache: String,
|
||||
|
||||
/// preserve files ownership from the FL, otherwise use the current user ownership
|
||||
/// setting this flag to true normally requires sudo
|
||||
#[clap(short, long, default_value_t = false)]
|
||||
preserve_ownership: bool,
|
||||
|
||||
/// target directory for unpacking
|
||||
target: String,
|
||||
}
|
||||
|
||||
#[derive(Args, Debug)]
|
||||
struct CloneOptions {
|
||||
/// path to metadata file (flist)
|
||||
#[clap(short, long)]
|
||||
meta: String,
|
||||
|
||||
/// store url in the format [xx-xx=]<url>. the range xx-xx is optional and used for
|
||||
/// sharding. the URL is per store type, please check docs for more information
|
||||
#[clap(short, long, action=ArgAction::Append)]
|
||||
store: Vec<String>,
|
||||
|
||||
/// directory used as cache for downloaded file chunks
|
||||
#[clap(short, long, default_value_t = String::from("/tmp/cache"))]
|
||||
cache: String,
|
||||
}
|
||||
|
||||
#[derive(Args, Debug)]
|
||||
struct ConfigOptions {
|
||||
/// path to metadata file (flist)
|
||||
#[clap(short, long)]
|
||||
meta: String,
|
||||
|
||||
#[command(subcommand)]
|
||||
command: ConfigCommands,
|
||||
}
|
||||
|
||||
#[derive(Subcommand, Debug)]
|
||||
enum ConfigCommands {
|
||||
#[command(subcommand)]
|
||||
Tag(TagOperation),
|
||||
#[command(subcommand)]
|
||||
Store(StoreOperation),
|
||||
}
|
||||
|
||||
#[derive(Subcommand, Debug)]
|
||||
enum TagOperation {
|
||||
List,
|
||||
Add(TagAddOptions),
|
||||
Delete(TagDeleteOptions),
|
||||
}
|
||||
|
||||
#[derive(Args, Debug)]
|
||||
struct TagAddOptions {
|
||||
/// pair of key-values separated with '='
|
||||
#[clap(short, long, value_parser = parse_key_val::<String, String>, number_of_values = 1)]
|
||||
tag: Vec<(String, String)>,
|
||||
}
|
||||
|
||||
#[derive(Args, Debug)]
|
||||
struct TagDeleteOptions {
|
||||
/// key to remove
|
||||
#[clap(short, long, action=ArgAction::Append)]
|
||||
key: Vec<String>,
|
||||
/// remove all tags
|
||||
#[clap(short, long, default_value_t = false)]
|
||||
all: bool,
|
||||
}
|
||||
|
||||
#[derive(Subcommand, Debug)]
|
||||
enum StoreOperation {
|
||||
List,
|
||||
Add(StoreAddOptions),
|
||||
Delete(StoreDeleteOptions),
|
||||
}
|
||||
|
||||
#[derive(Args, Debug)]
|
||||
struct StoreAddOptions {
|
||||
/// store url in the format [xx-xx=]<url>. the range xx-xx is optional and used for
|
||||
/// sharding. the URL is per store type, please check docs for more information
|
||||
#[clap(short, long, action=ArgAction::Append)]
|
||||
store: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Args, Debug)]
|
||||
struct StoreDeleteOptions {
|
||||
/// store to remove
|
||||
#[clap(short, long, action=ArgAction::Append)]
|
||||
store: Vec<String>,
|
||||
/// remove all stores
|
||||
#[clap(short, long, default_value_t = false)]
|
||||
all: bool,
|
||||
}
|
||||
|
||||
/// Parse a single key-value pair
|
||||
fn parse_key_val<T, U>(s: &str) -> Result<(T, U), Box<dyn Error + Send + Sync + 'static>>
|
||||
where
|
||||
T: std::str::FromStr,
|
||||
T::Err: Error + Send + Sync + 'static,
|
||||
U: std::str::FromStr,
|
||||
U::Err: Error + Send + Sync + 'static,
|
||||
{
|
||||
let pos = s
|
||||
.find('=')
|
||||
.ok_or_else(|| format!("invalid KEY=value: no `=` found in `{s}`"))?;
|
||||
Ok((s[..pos].parse()?, s[pos + 1..].parse()?))
|
||||
}
|
||||
|
||||
fn main() -> Result<()> {
|
||||
let opts = Options::parse();
|
||||
|
||||
simple_logger::SimpleLogger::new()
|
||||
.with_utc_timestamps()
|
||||
.with_level({
|
||||
match opts.debug {
|
||||
0 => log::LevelFilter::Info,
|
||||
1 => log::LevelFilter::Debug,
|
||||
_ => log::LevelFilter::Trace,
|
||||
}
|
||||
})
|
||||
.with_module_level("sqlx", log::Level::Error.to_level_filter())
|
||||
.init()?;
|
||||
|
||||
log::debug!("options: {:#?}", opts);
|
||||
|
||||
match opts.command {
|
||||
Commands::Mount(opts) => mount(opts),
|
||||
Commands::Pack(opts) => pack(opts),
|
||||
Commands::Unpack(opts) => unpack(opts),
|
||||
Commands::Clone(opts) => clone(opts),
|
||||
Commands::Config(opts) => config(opts),
|
||||
}
|
||||
}
|
||||
|
||||
fn pack(opts: PackOptions) -> Result<()> {
|
||||
let rt = tokio::runtime::Runtime::new()?;
|
||||
|
||||
rt.block_on(async move {
|
||||
let store = store::parse_router(opts.store.as_slice()).await?;
|
||||
let meta = fungi::Writer::new(opts.meta, true).await?;
|
||||
rfs::pack(meta, store, opts.target, !opts.no_strip_password, None).await?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
fn unpack(opts: UnpackOptions) -> Result<()> {
|
||||
let rt = tokio::runtime::Runtime::new()?;
|
||||
|
||||
rt.block_on(async move {
|
||||
let meta = fungi::Reader::new(opts.meta)
|
||||
.await
|
||||
.context("failed to initialize metadata database")?;
|
||||
|
||||
let router = store::get_router(&meta).await?;
|
||||
|
||||
let cache = cache::Cache::new(opts.cache, router);
|
||||
rfs::unpack(&meta, &cache, opts.target, opts.preserve_ownership).await?;
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
fn mount(opts: MountOptions) -> Result<()> {
|
||||
if is_mountpoint(&opts.target)? {
|
||||
eprintln!("target {} is already a mount point", opts.target);
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
if opts.daemon {
|
||||
let pid_file = tempfile::NamedTempFile::new()?;
|
||||
let target = opts.target.clone();
|
||||
let mut daemon = daemonize::Daemonize::new()
|
||||
.working_directory(std::env::current_dir()?)
|
||||
.pid_file(pid_file.path());
|
||||
if let Some(ref log) = opts.log {
|
||||
let out = std::fs::File::create(log)?;
|
||||
let err = out.try_clone()?;
|
||||
daemon = daemon.stdout(out).stderr(err);
|
||||
}
|
||||
|
||||
match daemon.execute() {
|
||||
daemonize::Outcome::Parent(result) => {
|
||||
result.context("daemonize")?;
|
||||
wait_child(target, pid_file);
|
||||
return Ok(());
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
let rt = tokio::runtime::Runtime::new()?;
|
||||
|
||||
rt.block_on(fuse(opts))
|
||||
}
|
||||
|
||||
fn is_mountpoint<S: AsRef<str>>(target: S) -> Result<bool> {
|
||||
use std::process::Command;
|
||||
|
||||
let output = Command::new("mountpoint")
|
||||
.arg("-q")
|
||||
.arg(target.as_ref())
|
||||
.output()
|
||||
.context("failed to check mountpoint")?;
|
||||
|
||||
Ok(output.status.success())
|
||||
}
|
||||
|
||||
fn wait_child(target: String, mut pid_file: tempfile::NamedTempFile) {
|
||||
for _ in 0..5 {
|
||||
if is_mountpoint(&target).unwrap() {
|
||||
return;
|
||||
}
|
||||
std::thread::sleep(std::time::Duration::from_secs(1));
|
||||
}
|
||||
let mut buf = String::new();
|
||||
if let Err(e) = pid_file.read_to_string(&mut buf) {
|
||||
error!("failed to read pid_file: {:#}", e);
|
||||
}
|
||||
let pid = buf.parse::<i32>();
|
||||
match pid {
|
||||
Err(e) => error!("failed to parse pid_file contents {}: {:#}", buf, e),
|
||||
Ok(v) => {
|
||||
let _ = signal::kill(Pid::from_raw(v), Signal::SIGTERM);
|
||||
} // probably the child exited on its own
|
||||
}
|
||||
// cleanup is not performed if the process is terminated with exit(2)
|
||||
drop(pid_file);
|
||||
eprintln!("failed to mount in under 5 seconds, please check logs for more information");
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
async fn fuse(opts: MountOptions) -> Result<()> {
|
||||
let meta = fungi::Reader::new(opts.meta)
|
||||
.await
|
||||
.context("failed to initialize metadata database")?;
|
||||
|
||||
let router = store::get_router(&meta).await?;
|
||||
|
||||
let cache = cache::Cache::new(opts.cache, router);
|
||||
let filesystem = fs::Filesystem::new(meta, cache);
|
||||
|
||||
filesystem.mount(opts.target).await
|
||||
}
|
||||
|
||||
fn clone(opts: CloneOptions) -> Result<()> {
|
||||
let rt = tokio::runtime::Runtime::new()?;
|
||||
|
||||
rt.block_on(async move {
|
||||
let store = store::parse_router(opts.store.as_slice()).await?;
|
||||
let meta = fungi::Reader::new(opts.meta)
|
||||
.await
|
||||
.context("failed to initialize metadata database")?;
|
||||
|
||||
let router = store::get_router(&meta).await?;
|
||||
|
||||
let cache = cache::Cache::new(opts.cache, router);
|
||||
rfs::clone(meta, store, cache).await?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
fn config(opts: ConfigOptions) -> Result<()> {
|
||||
let rt = tokio::runtime::Runtime::new()?;
|
||||
|
||||
rt.block_on(async move {
|
||||
let writer = fungi::Writer::new(opts.meta.clone(), false)
|
||||
.await
|
||||
.context("failed to initialize metadata database")?;
|
||||
|
||||
let reader = fungi::Reader::new(opts.meta)
|
||||
.await
|
||||
.context("failed to initialize metadata database")?;
|
||||
|
||||
match opts.command {
|
||||
ConfigCommands::Tag(opts) => match opts {
|
||||
TagOperation::List => config::tag_list(reader).await?,
|
||||
TagOperation::Add(opts) => config::tag_add(writer, opts.tag).await?,
|
||||
TagOperation::Delete(opts) => {
|
||||
config::tag_delete(writer, opts.key, opts.all).await?
|
||||
}
|
||||
},
|
||||
ConfigCommands::Store(opts) => match opts {
|
||||
StoreOperation::List => config::store_list(reader).await?,
|
||||
StoreOperation::Add(opts) => config::store_add(writer, opts.store).await?,
|
||||
StoreOperation::Delete(opts) => {
|
||||
config::store_delete(writer, opts.store, opts.all).await?
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
3
components/rfs/rust-toolchain.toml
Normal file
3
components/rfs/rust-toolchain.toml
Normal file
@@ -0,0 +1,3 @@
|
||||
[toolchain]
|
||||
channel = "1.83.0"
|
||||
|
||||
28
components/rfs/schema/server.sql
Normal file
28
components/rfs/schema/server.sql
Normal file
@@ -0,0 +1,28 @@
|
||||
-- Schema for blocks and files metadata relation tables in SQLite database
|
||||
|
||||
-- Table to store file metadata
|
||||
CREATE TABLE IF NOT EXISTS metadata (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT, -- Auto-incrementing ID for the file
|
||||
file_hash VARCHAR(64) NOT NULL, -- SHA-256 hash of the file content (64 characters for hex representation)
|
||||
block_index INTEGER, -- The index of the block in the file (NULL if not part of a file)
|
||||
block_hash VARCHAR(64), -- SHA-256 hash of the block data (64 characters for hex representation)
|
||||
user_id INTEGER, -- ID of the user who uploaded the block
|
||||
block_size INTEGER, -- Size of the block in bytes
|
||||
downloads_count INTEGER DEFAULT 0, -- Number of times the block has been downloaded
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, -- When the file was added to the database
|
||||
FOREIGN KEY (user_id) REFERENCES users(id) -- Foreign key constraint to users table
|
||||
);
|
||||
|
||||
-- Index on file_hash for faster lookups
|
||||
CREATE INDEX IF NOT EXISTS idx_files_hash ON metadata (file_hash);
|
||||
|
||||
-- Table to store user information
|
||||
CREATE TABLE IF NOT EXISTS users (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT, -- Auto-incrementing ID for the user
|
||||
username VARCHAR(255) NOT NULL UNIQUE, -- Unique username
|
||||
password VARCHAR(255) NOT NULL, -- Hashed password
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP -- When the user was added to the database
|
||||
);
|
||||
|
||||
-- Index on username for faster lookups
|
||||
CREATE INDEX IF NOT EXISTS idx_users_username ON users (username);
|
||||
@@ -3,7 +3,7 @@
|
||||
## Requirements
|
||||
|
||||
- tfcmd
|
||||
- docker2fl
|
||||
- rfs
|
||||
- rust
|
||||
- docker
|
||||
- git
|
||||
@@ -54,14 +54,14 @@ apt-get update
|
||||
dockerd > docker.log 2>&1 &
|
||||
```
|
||||
|
||||
### Install docker2fl
|
||||
### Install rfs
|
||||
|
||||
```bash
|
||||
git clone https://github.com/threefoldtech/rfs.git
|
||||
cd rfs
|
||||
rustup target add x86_64-unknown-linux-musl
|
||||
cargo build --features build-binary --release --target=x86_64-unknown-linux-musl
|
||||
mv ./target/x86_64-unknown-linux-musl/release/docker2fl /usr/local/bin
|
||||
mv ./target/x86_64-unknown-linux-musl/release/rfs /usr/local/bin
|
||||
```
|
||||
|
||||
### Install sqlite
|
||||
@@ -137,7 +137,7 @@ export WRITE_KEY_ID=<"your key ID">
|
||||
export WRITE_KEY_SECRET=<"your key secret">
|
||||
export MYCELIUM_IP=<"your machine mycelium IP which has your garage server">
|
||||
|
||||
docker2fl -i $IMAGE -s 's3://$WRITE_KEY_ID:$WRITE_KEY_SECRET@$[$MYCELIUM_IP]:3900/blobs?region=garage'
|
||||
rfs docker -i $IMAGE -s 's3://$WRITE_KEY_ID:$WRITE_KEY_SECRET@$[$MYCELIUM_IP]:3900/blobs?region=garage'
|
||||
```
|
||||
|
||||
- Update the key to the read only key
|
||||
|
||||
@@ -18,8 +18,8 @@ use std::path::Path;
|
||||
use std::process::Command;
|
||||
use tokio_async_drop::tokio_async_drop;
|
||||
|
||||
use rfs::fungi::Writer;
|
||||
use rfs::store::Store;
|
||||
use crate::fungi::Writer;
|
||||
use crate::store::Store;
|
||||
|
||||
struct DockerInfo {
|
||||
image_name: String,
|
||||
@@ -58,7 +58,7 @@ impl DockerImageToFlist {
|
||||
credentials: Option<DockerCredentials>,
|
||||
docker_tmp_dir: TempDir,
|
||||
) -> Self {
|
||||
DockerImageToFlist {
|
||||
Self {
|
||||
meta,
|
||||
image_name,
|
||||
credentials,
|
||||
@@ -66,8 +66,8 @@ impl DockerImageToFlist {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn files_count(&self) -> usize {
|
||||
WalkDir::new(self.docker_tmp_dir.path()).into_iter().count()
|
||||
pub fn files_count(&self) -> u32 {
|
||||
WalkDir::new(self.docker_tmp_dir.path()).into_iter().count() as u32
|
||||
}
|
||||
|
||||
pub async fn prepare(&mut self) -> Result<()> {
|
||||
@@ -105,7 +105,7 @@ impl DockerImageToFlist {
|
||||
}
|
||||
|
||||
pub async fn pack<S: Store>(&mut self, store: S, sender: Option<Sender<u32>>) -> Result<()> {
|
||||
rfs::pack(
|
||||
crate::pack(
|
||||
self.meta.clone(),
|
||||
store,
|
||||
&self.docker_tmp_dir.path(),
|
||||
@@ -239,28 +239,37 @@ async fn container_boot(
|
||||
.config
|
||||
.context("failed to get docker container configs")?;
|
||||
|
||||
let command;
|
||||
let args;
|
||||
let mut command = String::new();
|
||||
let mut args: Vec<String> = Vec::new();
|
||||
let mut env: HashMap<String, String> = HashMap::new();
|
||||
let mut cwd = String::from("/");
|
||||
|
||||
let cmd = container_config.cmd.expect("failed to get cmd configs");
|
||||
|
||||
if let Some(entrypoint) = container_config.entrypoint {
|
||||
command = (entrypoint.first().expect("failed to get first entrypoint")).to_string();
|
||||
|
||||
if entrypoint.len() > 1 {
|
||||
let (_, entries) = entrypoint
|
||||
.split_first()
|
||||
.expect("failed to split entrypoint");
|
||||
args = entries.to_vec();
|
||||
} else {
|
||||
args = cmd;
|
||||
if let Some(ref entrypoint) = container_config.entrypoint {
|
||||
if !entrypoint.is_empty() {
|
||||
command = entrypoint[0].to_string();
|
||||
for i in 1..entrypoint.len() {
|
||||
args.push(entrypoint[i].to_string());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
command = (cmd.first().expect("failed to get first cmd")).to_string();
|
||||
let (_, entries) = cmd.split_first().expect("failed to split cmd");
|
||||
args = entries.to_vec();
|
||||
}
|
||||
|
||||
if let Some(ref cmd) = container_config.cmd {
|
||||
if !cmd.is_empty() {
|
||||
if command.is_empty() {
|
||||
command = cmd[0].to_string();
|
||||
for i in 1..cmd.len() {
|
||||
args.push(cmd[i].to_string());
|
||||
}
|
||||
} else {
|
||||
for i in 0..cmd.len() {
|
||||
args.push(cmd[i].to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if command.is_empty() {
|
||||
command = String::from("/bin/sh");
|
||||
}
|
||||
|
||||
if let Some(envs) = container_config.env {
|
||||
@@ -302,14 +311,13 @@ async fn container_boot(
|
||||
fs::write(
|
||||
docker_tmp_dir_path.join(".startup.toml"),
|
||||
toml_metadata.to_string(),
|
||||
)
|
||||
.expect("failed to create '.startup.toml' file");
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn clean(docker: &Docker, image_name: &str, container_name: &str) -> Result<()> {
|
||||
log::info!("cleaning docker image and container");
|
||||
log::debug!("Removing docker container {}", container_name);
|
||||
|
||||
let options = Some(RemoveContainerOptions {
|
||||
force: true,
|
||||
@@ -321,13 +329,15 @@ async fn clean(docker: &Docker, image_name: &str, container_name: &str) -> Resul
|
||||
.await
|
||||
.context("failed to remove docker container")?;
|
||||
|
||||
let remove_options = Some(RemoveImageOptions {
|
||||
log::debug!("Removing docker image {}", image_name);
|
||||
|
||||
let options = Some(RemoveImageOptions {
|
||||
force: true,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
docker
|
||||
.remove_image(image_name, remove_options, None)
|
||||
.remove_image(image_name, options, None)
|
||||
.await
|
||||
.context("failed to remove docker image")?;
|
||||
|
||||
335
components/rfs/src/download.rs
Normal file
335
components/rfs/src/download.rs
Normal file
@@ -0,0 +1,335 @@
|
||||
use anyhow::{Context, Result};
|
||||
use futures::{stream, StreamExt};
|
||||
use std::path::Path;
|
||||
use tokio::fs::File;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
use tokio::sync::Semaphore;
|
||||
|
||||
use crate::server_api;
|
||||
use crate::{cache, fungi, store};
|
||||
|
||||
const PARALLEL_DOWNLOAD: usize = 20; // Number of blocks to download in parallel
|
||||
|
||||
/// Downloads all blocks for a file or a single block and assembles them
|
||||
pub async fn download<P: AsRef<Path>>(hash: &str, file_name: P, server_url: String) -> Result<()> {
|
||||
let file_name = file_name.as_ref();
|
||||
|
||||
info!("Downloading blocks for hash: {}", hash);
|
||||
info!("Saving to: {}", file_name.display());
|
||||
|
||||
let blocks = server_api::get_blocks_by_hash(hash, server_url.clone()).await?;
|
||||
|
||||
if blocks.is_empty() {
|
||||
return Err(anyhow::anyhow!("No blocks found for hash: {}", hash));
|
||||
}
|
||||
|
||||
// Store the number of blocks
|
||||
let blocks_count = blocks.len();
|
||||
|
||||
// Create the file
|
||||
let mut file = File::create(file_name)
|
||||
.await
|
||||
.context("Failed to create output file")?;
|
||||
|
||||
// Create a semaphore to limit concurrent downloads
|
||||
let semaphore = std::sync::Arc::new(Semaphore::new(PARALLEL_DOWNLOAD));
|
||||
|
||||
// Download blocks in parallel
|
||||
info!(
|
||||
"Starting parallel download of {} blocks with concurrency {}",
|
||||
blocks_count, PARALLEL_DOWNLOAD
|
||||
);
|
||||
|
||||
// Create a vector to store downloaded blocks in order
|
||||
let mut downloaded_blocks = vec![None; blocks_count];
|
||||
|
||||
// Process blocks in parallel with limited concurrency
|
||||
let results = stream::iter(blocks.into_iter().enumerate())
|
||||
.map(|(i, (block_hash, block_index))| {
|
||||
let server_url = server_url.clone();
|
||||
let permit = semaphore.clone();
|
||||
|
||||
async move {
|
||||
// Acquire a permit from the semaphore
|
||||
let _permit = permit
|
||||
.acquire()
|
||||
.await
|
||||
.expect("Failed to acquire semaphore permit");
|
||||
|
||||
info!("Downloading block {} (index: {})", block_hash, block_index);
|
||||
|
||||
// Download the block
|
||||
server_api::download_block(&block_hash, &server_url)
|
||||
.await
|
||||
.map(|content| (i, content))
|
||||
.map_err(|e| (i, e))
|
||||
}
|
||||
})
|
||||
.buffer_unordered(PARALLEL_DOWNLOAD)
|
||||
.collect::<Vec<_>>()
|
||||
.await;
|
||||
|
||||
// Process results and write blocks to file in correct order
|
||||
for result in results {
|
||||
match result {
|
||||
Ok((index, content)) => {
|
||||
downloaded_blocks[index] = Some(content);
|
||||
}
|
||||
Err((index, e)) => {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Failed to download block at index {}: {}",
|
||||
index,
|
||||
e
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Write blocks to file in order
|
||||
for (i, block_opt) in downloaded_blocks.into_iter().enumerate() {
|
||||
if let Some(block_content) = block_opt {
|
||||
file.write_all(&block_content)
|
||||
.await
|
||||
.context(format!("Failed to write block at index {} to file", i))?;
|
||||
} else {
|
||||
return Err(anyhow::anyhow!("Missing block at index {}", i));
|
||||
}
|
||||
}
|
||||
|
||||
info!("File downloaded successfully to {:?}", file_name);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Downloads a directory by processing all files listed in its flist using the flist hash
|
||||
pub async fn download_dir<P: AsRef<Path>>(
|
||||
flist_hash: &str,
|
||||
output_dir: P,
|
||||
server_url: String,
|
||||
) -> Result<()> {
|
||||
let output_dir = output_dir.as_ref();
|
||||
|
||||
info!("Downloading directory from flist with hash: {}", flist_hash);
|
||||
info!("Saving files to: {}", output_dir.display());
|
||||
|
||||
// Download the flist file using its hash
|
||||
let temp_path = std::env::temp_dir().join(format!("{}.fl", flist_hash));
|
||||
download(flist_hash, &temp_path, server_url.clone()).await?;
|
||||
|
||||
let meta = fungi::Reader::new(temp_path)
|
||||
.await
|
||||
.context("failed to initialize metadata database")?;
|
||||
|
||||
let router = store::get_router(&meta).await?;
|
||||
let cache = cache::Cache::new("/tmp/cache", router);
|
||||
crate::unpack(&meta, &cache, output_dir, false).await?;
|
||||
|
||||
info!("Directory download complete");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Track blocks uploaded by the user and their download counts
|
||||
/// If hash is provided, only track that specific block
|
||||
/// Otherwise, track all user blocks
|
||||
pub async fn track_blocks(
|
||||
server_url: &str,
|
||||
token: &str,
|
||||
hash: Option<&str>,
|
||||
details: bool,
|
||||
) -> Result<()> {
|
||||
if let Some(block_hash) = hash {
|
||||
match server_api::get_block_downloads(server_url, block_hash).await {
|
||||
Ok(downloads) => {
|
||||
println!(
|
||||
"{:<64} {:<10} {:<10}",
|
||||
"BLOCK HASH", "DOWNLOADS", "SIZE (B)"
|
||||
);
|
||||
println!("{}", "-".repeat(85));
|
||||
println!(
|
||||
"{:<64} {:<10} {:<10}",
|
||||
downloads.block_hash, downloads.downloads_count, downloads.block_size
|
||||
);
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Failed to get download count for block {}: {}",
|
||||
block_hash,
|
||||
err
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Track all user blocks
|
||||
let mut all_user_blocks = Vec::new();
|
||||
|
||||
let first_page = server_api::get_user_blocks(server_url, token, Some(1), Some(50))
|
||||
.await
|
||||
.context("Failed to get user blocks")?;
|
||||
|
||||
let total_pages = (first_page.total as f64 / 50.0).ceil() as u32;
|
||||
|
||||
let mut tasks = Vec::new();
|
||||
for page in 1..=total_pages {
|
||||
let server_url = server_url.to_string();
|
||||
let token = token.to_string();
|
||||
tasks.push(tokio::spawn(async move {
|
||||
server_api::get_user_blocks(&server_url, &token, Some(page), Some(50)).await
|
||||
}));
|
||||
}
|
||||
|
||||
for task in tasks {
|
||||
match task.await {
|
||||
Ok(Ok(blocks_per_page)) => {
|
||||
all_user_blocks.extend(blocks_per_page.blocks);
|
||||
}
|
||||
Ok(Err(err)) => {
|
||||
return Err(anyhow::anyhow!("Failed to get user blocks: {}", err));
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(anyhow::anyhow!("Task failed: {}", err));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
println!(
|
||||
"User has {} blocks out of {} total blocks on the server",
|
||||
all_user_blocks.len(),
|
||||
first_page.all_blocks
|
||||
);
|
||||
|
||||
let block_hashes: Vec<String> = all_user_blocks
|
||||
.into_iter()
|
||||
.map(|(block_hash, _)| block_hash)
|
||||
.collect();
|
||||
print_block_downloads(server_url, block_hashes, details).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn print_block_downloads(
|
||||
server_url: &str,
|
||||
blocks: Vec<String>,
|
||||
details: bool,
|
||||
) -> Result<()> {
|
||||
// Collect all block details first
|
||||
let mut block_details = Vec::new();
|
||||
let mut total_downloads = 0;
|
||||
let mut bandwidth = 0;
|
||||
|
||||
for block_hash in blocks {
|
||||
match server_api::get_block_downloads(server_url, &block_hash).await {
|
||||
Ok(downloads) => {
|
||||
total_downloads += downloads.downloads_count;
|
||||
bandwidth += downloads.block_size * downloads.downloads_count;
|
||||
block_details.push(downloads);
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Failed to get download count for block {}: {}",
|
||||
block_hash,
|
||||
err
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Print totals first
|
||||
println!("{}", "-".repeat(85));
|
||||
println!("TOTAL DOWNLOADS: {}", total_downloads);
|
||||
println!("BANDWIDTH: {} bytes", bandwidth);
|
||||
|
||||
if details {
|
||||
println!("{}", "-".repeat(85));
|
||||
|
||||
println!(
|
||||
"\n{:<64} {:<10} {:<10}",
|
||||
"BLOCK HASH", "DOWNLOADS", "SIZE (B)"
|
||||
);
|
||||
println!("{}", "-".repeat(85));
|
||||
|
||||
for block in block_details {
|
||||
println!(
|
||||
"{:<64} {:<10} {:<10}",
|
||||
block.block_hash, block.downloads_count, block.block_size
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn track_website(server_url: &str, flist_hash: &str, details: bool) -> Result<()> {
|
||||
// Temporarily disable logs for the upload function
|
||||
let original_level = log::max_level();
|
||||
log::set_max_level(log::LevelFilter::Off);
|
||||
|
||||
let flist_blocks = server_api::get_blocks_by_hash(flist_hash, server_url.to_owned()).await?;
|
||||
|
||||
if flist_blocks.is_empty() {
|
||||
return Err(anyhow::anyhow!("No blocks found for hash: {}", flist_hash));
|
||||
}
|
||||
|
||||
// Download the flist file using its hash
|
||||
let temp_path = std::env::temp_dir().join(format!("{}.fl", flist_hash));
|
||||
download(flist_hash, &temp_path, server_url.to_owned()).await?;
|
||||
|
||||
let meta = fungi::Reader::new(temp_path)
|
||||
.await
|
||||
.context("failed to initialize metadata database")?;
|
||||
|
||||
let router = store::get_router(&meta).await?;
|
||||
let cache_dir = std::env::temp_dir().join("cache_blocks");
|
||||
let cache = cache::Cache::new(cache_dir.clone(), router);
|
||||
let temp_output_dir = std::env::temp_dir().join("output_dir");
|
||||
tokio::fs::create_dir_all(&temp_output_dir)
|
||||
.await
|
||||
.context("Failed to create temporary output directory")?;
|
||||
crate::unpack(&meta, &cache, &temp_output_dir, false).await?;
|
||||
|
||||
// Restore the original log level
|
||||
log::set_max_level(original_level);
|
||||
|
||||
let mut website_blocks = list_files_in_dir(cache_dir.clone())
|
||||
.await
|
||||
.context("Failed to list files in /tmp/cache directory")?;
|
||||
|
||||
website_blocks.extend(flist_blocks.into_iter().map(|(block_hash, _)| block_hash));
|
||||
|
||||
println!("Website has {} blocks on the server", website_blocks.len());
|
||||
print_block_downloads(&server_url, website_blocks, details).await?;
|
||||
|
||||
// Delete the temporary directory
|
||||
tokio::fs::remove_dir_all(&temp_output_dir)
|
||||
.await
|
||||
.context("Failed to delete temporary output directory")?;
|
||||
tokio::fs::remove_dir_all(&cache_dir)
|
||||
.await
|
||||
.context("Failed to delete temporary cache directory")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn list_files_in_dir<P: AsRef<Path>>(dir: P) -> Result<Vec<String>> {
|
||||
let dir = dir.as_ref();
|
||||
let mut file_names = Vec::new();
|
||||
|
||||
let mut entries = tokio::fs::read_dir(dir)
|
||||
.await
|
||||
.context(format!("Failed to read directory: {}", dir.display()))?;
|
||||
|
||||
while let Some(entry) = entries.next_entry().await.context("Failed to read entry")? {
|
||||
let path = entry.path();
|
||||
if path.is_dir() {
|
||||
let sub_dir_files = Box::pin(list_files_in_dir(path)).await?;
|
||||
file_names.extend(sub_dir_files);
|
||||
continue;
|
||||
}
|
||||
if let Ok(file_name) = entry.file_name().into_string() {
|
||||
file_names.push(file_name);
|
||||
}
|
||||
}
|
||||
Ok(file_names)
|
||||
}
|
||||
89
components/rfs/src/exist.rs
Normal file
89
components/rfs/src/exist.rs
Normal file
@@ -0,0 +1,89 @@
|
||||
use anyhow::Result;
|
||||
use std::path::Path;
|
||||
use tokio::fs::File;
|
||||
|
||||
use crate::upload::{split_file_into_blocks, BLOCK_SIZE};
|
||||
|
||||
use crate::server_api;
|
||||
|
||||
/// Checks if a file exists in the server splitting it into blocks
|
||||
pub async fn exists<P: AsRef<Path>>(
|
||||
file_path: P,
|
||||
server_url: String,
|
||||
block_size: Option<usize>,
|
||||
) -> Result<()> {
|
||||
// Use provided block size or default
|
||||
let block_size = block_size.unwrap_or(BLOCK_SIZE);
|
||||
let file_path = file_path.as_ref();
|
||||
|
||||
info!("Checking file: {}", file_path.display());
|
||||
debug!("Using block size: {} bytes", block_size);
|
||||
|
||||
// Read the file size
|
||||
let file_size = File::open(file_path).await?.metadata().await?.len();
|
||||
|
||||
info!("File size: {} bytes", file_size);
|
||||
info!("Splitting file into blocks of {} bytes", block_size);
|
||||
|
||||
// Split file into blocks and calculate hashes
|
||||
let (blocks, _) = split_file_into_blocks(file_path, block_size).await?;
|
||||
info!("File split into {} blocks", blocks.len());
|
||||
|
||||
// Create futures for all block checks
|
||||
let futures = blocks.iter().map(|block_hash| {
|
||||
let server_url = server_url.clone();
|
||||
let block_hash = block_hash.clone();
|
||||
async move {
|
||||
let result = server_api::check_block(&server_url, &block_hash).await;
|
||||
match result {
|
||||
Ok(true) => true, // Block exists
|
||||
Ok(false) => {
|
||||
info!("Block with hash {} does not exist on server", block_hash);
|
||||
false
|
||||
}
|
||||
Err(e) => {
|
||||
info!("Error checking block {}: {}", block_hash, e);
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Run all futures concurrently
|
||||
let results: Vec<bool> = futures::future::join_all(futures).await;
|
||||
let mut file_exists = true;
|
||||
|
||||
// Process results
|
||||
for block_exists in results {
|
||||
match block_exists {
|
||||
true => {}
|
||||
false => {
|
||||
file_exists = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if file_exists {
|
||||
info!("File exists on server");
|
||||
} else {
|
||||
info!("File does not exist on server");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Checks if a hash exists in the server
|
||||
pub async fn exists_by_hash(hash: String, server_url: String) -> Result<()> {
|
||||
match server_api::get_blocks_by_hash(&hash, server_url.clone()).await {
|
||||
Ok(blocks) if !blocks.is_empty() => {
|
||||
info!("Hash exists on server\nHash: {}", hash);
|
||||
}
|
||||
Ok(_) => {
|
||||
info!("Hash does not exist on server");
|
||||
}
|
||||
Err(_) => {
|
||||
info!("Hash does not exist on server");
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
84
components/rfs/src/flist_inspector.rs
Normal file
84
components/rfs/src/flist_inspector.rs
Normal file
@@ -0,0 +1,84 @@
|
||||
use crate::fungi::meta::{FileType, Inode, Result, Walk, WalkVisitor};
|
||||
use std::path::Path;
|
||||
|
||||
pub struct InspectVisitor {
|
||||
file_count: u32,
|
||||
dir_count: u32,
|
||||
link_count: u32,
|
||||
total_size: u64,
|
||||
}
|
||||
|
||||
impl InspectVisitor {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
file_count: 0,
|
||||
dir_count: 0,
|
||||
link_count: 0,
|
||||
total_size: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn print_summary(&self, target: &str) {
|
||||
println!("Flist Inspection: {}", target);
|
||||
println!("==================");
|
||||
println!("Files: {}", self.file_count);
|
||||
println!("Directories: {}", self.dir_count);
|
||||
println!("Symlinks: {}", self.link_count);
|
||||
println!("Total size: {} bytes", self.total_size);
|
||||
}
|
||||
|
||||
fn print_metadata(&self, path: &Path, node: &Inode) {
|
||||
let file_type_str = match node.mode.file_type() {
|
||||
FileType::Dir => "Directory",
|
||||
FileType::Regular => "Regular File",
|
||||
FileType::Link => "Symbolic Link",
|
||||
FileType::Block => "Block Device",
|
||||
FileType::Char => "Character Device",
|
||||
FileType::Socket => "Socket",
|
||||
FileType::FIFO => "FIFO",
|
||||
FileType::Unknown => "Unknown",
|
||||
};
|
||||
|
||||
println!("Path: {}", path.display());
|
||||
println!(" Type: {}", file_type_str);
|
||||
println!(" Inode: {}", node.ino);
|
||||
println!(" Name: {}", node.name);
|
||||
println!(" Size: {} bytes", node.size);
|
||||
println!(" UID: {}", node.uid);
|
||||
println!(" GID: {}", node.gid);
|
||||
println!(" Mode: 0{:o}", node.mode.mode());
|
||||
println!(" Permissions: 0{:o}", node.mode.permissions());
|
||||
println!(" Device: {}", node.rdev);
|
||||
println!(" Created: {}", node.ctime);
|
||||
println!(" Modified: {}", node.mtime);
|
||||
|
||||
if let Some(data) = &node.data {
|
||||
if node.mode.file_type() == FileType::Link {
|
||||
if let Ok(target) = String::from_utf8(data.clone()) {
|
||||
println!(" Link Target: {}", target);
|
||||
}
|
||||
} else {
|
||||
println!(" Extra Data: {} bytes", data.len());
|
||||
}
|
||||
}
|
||||
println!(" ---");
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl WalkVisitor for InspectVisitor {
|
||||
async fn visit(&mut self, path: &Path, node: &Inode) -> Result<Walk> {
|
||||
self.print_metadata(path, node);
|
||||
|
||||
match node.mode.file_type() {
|
||||
FileType::Dir => self.dir_count += 1,
|
||||
FileType::Regular => {
|
||||
self.file_count += 1;
|
||||
self.total_size += node.size;
|
||||
}
|
||||
FileType::Link => self.link_count += 1,
|
||||
_ => {}
|
||||
}
|
||||
Ok(Walk::Continue)
|
||||
}
|
||||
}
|
||||
@@ -12,31 +12,31 @@ use crate::store;
|
||||
|
||||
const ID_LEN: usize = 32;
|
||||
const KEY_LEN: usize = 32;
|
||||
const TYPE_MASK: u32 = nix::libc::S_IFMT;
|
||||
const TYPE_MASK: u32 = libc::S_IFMT;
|
||||
|
||||
#[repr(u32)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum FileType {
|
||||
Regular = nix::libc::S_IFREG,
|
||||
Dir = nix::libc::S_IFDIR,
|
||||
Link = nix::libc::S_IFLNK,
|
||||
Block = nix::libc::S_IFBLK,
|
||||
Char = nix::libc::S_IFCHR,
|
||||
Socket = nix::libc::S_IFSOCK,
|
||||
FIFO = nix::libc::S_IFIFO,
|
||||
Unknown = 0,
|
||||
Regular = libc::S_IFREG,
|
||||
Dir = libc::S_IFDIR,
|
||||
Link = libc::S_IFLNK,
|
||||
Block = libc::S_IFBLK,
|
||||
Char = libc::S_IFCHR,
|
||||
Socket = libc::S_IFSOCK,
|
||||
FIFO = libc::S_IFIFO,
|
||||
Unknown = 0xFFFFFFFF, // Use a different value to avoid conflict
|
||||
}
|
||||
|
||||
impl From<u32> for FileType {
|
||||
fn from(value: u32) -> Self {
|
||||
match value {
|
||||
nix::libc::S_IFREG => Self::Regular,
|
||||
nix::libc::S_IFDIR => Self::Dir,
|
||||
nix::libc::S_IFLNK => Self::Link,
|
||||
nix::libc::S_IFBLK => Self::Block,
|
||||
nix::libc::S_IFCHR => Self::Char,
|
||||
nix::libc::S_IFSOCK => Self::Socket,
|
||||
nix::libc::S_IFIFO => Self::FIFO,
|
||||
libc::S_IFREG => Self::Regular,
|
||||
libc::S_IFDIR => Self::Dir,
|
||||
libc::S_IFLNK => Self::Link,
|
||||
libc::S_IFBLK => Self::Block,
|
||||
libc::S_IFCHR => Self::Char,
|
||||
libc::S_IFSOCK => Self::Socket,
|
||||
libc::S_IFIFO => Self::FIFO,
|
||||
_ => Self::Unknown,
|
||||
}
|
||||
}
|
||||
@@ -225,6 +225,15 @@ impl Reader {
|
||||
Ok(Self { pool })
|
||||
}
|
||||
|
||||
pub async fn root_inode(&self) -> Result<Inode> {
|
||||
let inode: Inode = sqlx::query_as(r#"select inode.*, extra.data
|
||||
from inode left join extra on inode.ino = extra.ino
|
||||
where inode.parent = 0 limit 1;"#)
|
||||
.fetch_one(&self.pool).await?;
|
||||
|
||||
Ok(inode)
|
||||
}
|
||||
|
||||
pub async fn inode(&self, ino: Ino) -> Result<Inode> {
|
||||
let inode: Inode = sqlx::query_as(r#"select inode.*, extra.data
|
||||
from inode left join extra on inode.ino = extra.ino
|
||||
@@ -3,6 +3,7 @@ extern crate log;
|
||||
|
||||
pub mod cache;
|
||||
pub mod fungi;
|
||||
pub mod server;
|
||||
pub mod store;
|
||||
|
||||
mod pack;
|
||||
@@ -12,8 +13,22 @@ pub use unpack::unpack;
|
||||
mod clone;
|
||||
pub use clone::clone;
|
||||
pub mod config;
|
||||
mod docker;
|
||||
pub use docker::DockerImageToFlist;
|
||||
mod upload;
|
||||
pub use upload::*;
|
||||
mod download;
|
||||
pub use download::*;
|
||||
mod exist;
|
||||
pub use exist::*;
|
||||
mod sync;
|
||||
pub use sync::*;
|
||||
pub mod flist_inspector;
|
||||
mod server_api;
|
||||
pub mod tree_visitor;
|
||||
|
||||
const PARALLEL_UPLOAD: usize = 10; // number of files we can upload in parallel
|
||||
const PARALLEL_UPLOAD: usize = 20; // number of files we can upload in parallel
|
||||
const PARALLEL_DOWNLOAD: usize = 20; // number of files we can download in parallel
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
1104
components/rfs/src/main.rs
Normal file
1104
components/rfs/src/main.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -12,8 +12,9 @@ use jsonwebtoken::{decode, encode, DecodingKey, EncodingKey, Header, TokenData,
|
||||
use serde::{Deserialize, Serialize};
|
||||
use utoipa::ToSchema;
|
||||
|
||||
use crate::{
|
||||
use crate::server::{
|
||||
config,
|
||||
db::DB,
|
||||
response::{ResponseError, ResponseResult},
|
||||
};
|
||||
|
||||
@@ -37,12 +38,13 @@ pub struct SignInResponse {
|
||||
|
||||
#[utoipa::path(
|
||||
post,
|
||||
path = "/v1/api/signin",
|
||||
path = "/api/v1/signin",
|
||||
tag = "Authentication",
|
||||
request_body = SignInBody,
|
||||
responses(
|
||||
(status = 200, description = "User signed in successfully", body = SignInResponse),
|
||||
(status = 500, description = "Internal server error"),
|
||||
(status = 401, description = "Unauthorized user"),
|
||||
(status = 201, description = "User signed in successfully", body = SignInResponse),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
(status = 401, description = "Unauthorized user", body = ResponseError),
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
@@ -50,7 +52,7 @@ pub async fn sign_in_handler(
|
||||
State(state): State<Arc<config::AppState>>,
|
||||
Json(user_data): Json<SignInBody>,
|
||||
) -> impl IntoResponse {
|
||||
let user = match state.db.get_user_by_username(&user_data.username) {
|
||||
let user = match state.db.get_user_by_username(&user_data.username).await {
|
||||
Some(user) => user,
|
||||
None => {
|
||||
return Err(ResponseError::Unauthorized(
|
||||
@@ -127,7 +129,9 @@ pub async fn authorize(
|
||||
Some(t) => t.to_string(),
|
||||
None => {
|
||||
log::error!("failed to get token string");
|
||||
return Err(ResponseError::InternalServerError);
|
||||
return Err(ResponseError::Unauthorized(
|
||||
"Authorization token is not provided".to_string(),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
@@ -140,7 +144,11 @@ pub async fn authorize(
|
||||
}
|
||||
};
|
||||
|
||||
let current_user = match state.db.get_user_by_username(&token_data.claims.username) {
|
||||
let current_user = match state
|
||||
.db
|
||||
.get_user_by_username(&token_data.claims.username)
|
||||
.await
|
||||
{
|
||||
Some(user) => user,
|
||||
None => {
|
||||
return Err(ResponseError::Unauthorized(
|
||||
@@ -152,3 +160,22 @@ pub async fn authorize(
|
||||
req.extensions_mut().insert(current_user.username.clone());
|
||||
Ok(next.run(req).await)
|
||||
}
|
||||
|
||||
/// Get the user ID from the username stored in the request extension
|
||||
pub async fn get_user_id_from_token(db: &impl DB, username: &str) -> Result<i64, ResponseError> {
|
||||
match db.get_user_by_username(username).await {
|
||||
Some(user) => match user.id {
|
||||
Some(id) => Ok(id),
|
||||
None => {
|
||||
log::error!("User ID is missing for user: {}", username);
|
||||
Err(ResponseError::Unauthorized(
|
||||
"User ID is missing".to_string(),
|
||||
))
|
||||
}
|
||||
},
|
||||
None => {
|
||||
log::error!("User not found: {}", username);
|
||||
Err(ResponseError::Unauthorized("User not found".to_string()))
|
||||
}
|
||||
}
|
||||
}
|
||||
500
components/rfs/src/server/block_handlers.rs
Normal file
500
components/rfs/src/server/block_handlers.rs
Normal file
@@ -0,0 +1,500 @@
|
||||
use axum::{
|
||||
body::Bytes,
|
||||
extract::{Query, State},
|
||||
http::StatusCode,
|
||||
response::IntoResponse,
|
||||
Json,
|
||||
};
|
||||
use axum_macros::debug_handler;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::server::{
|
||||
auth,
|
||||
config::AppState,
|
||||
db::DB,
|
||||
models::Block,
|
||||
response::{ResponseError, ResponseResult, BlockUploadedResponse},
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use utoipa::ToSchema;
|
||||
|
||||
// Block API endpoints are included in the main FlistApi in handlers.rs
|
||||
|
||||
/// Query parameters for uploading a block
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct UploadBlockParams {
|
||||
/// File hash associated with the block
|
||||
pub file_hash: String,
|
||||
/// Block index within the file
|
||||
pub idx: u64,
|
||||
}
|
||||
|
||||
/// Upload a block to the server.
|
||||
/// If the block already exists, the server will return a 200 OK response.
|
||||
/// If the block is new, the server will return a 201 Created response.
|
||||
#[utoipa::path(
|
||||
post,
|
||||
path = "/api/v1/block",
|
||||
tag = "Block Management",
|
||||
request_body(content = [u8], description = "Block data to upload", content_type = "application/octet-stream"),
|
||||
params(
|
||||
("file_hash" = String, Query, description = "File hash associated with the block"),
|
||||
("idx" = u64, Query, description = "Block index within the file")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "Block already exists", body = BlockUploadedResponse),
|
||||
(status = 201, description = "Block created successfully", body = BlockUploadedResponse),
|
||||
(status = 400, description = "Bad request", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
),
|
||||
security(
|
||||
("bearerAuth" = [])
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn upload_block_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Query(params): Query<UploadBlockParams>,
|
||||
extension: axum::extract::Extension<String>,
|
||||
body: Bytes,
|
||||
) -> Result<(StatusCode, ResponseResult), ResponseError> {
|
||||
// Convert the body bytes to Vec<u8>
|
||||
let data = body.to_vec();
|
||||
|
||||
// Calculate the hash of the block data
|
||||
let hash = Block::calculate_hash(&data);
|
||||
|
||||
// Get the username from the extension (set by the authorize middleware)
|
||||
let username = extension.0;
|
||||
let user_id = auth::get_user_id_from_token(&*state.db, &username).await?;
|
||||
|
||||
// Store the block data in the database
|
||||
match state
|
||||
.db
|
||||
.store_block(&hash, data, ¶ms.file_hash, params.idx, user_id)
|
||||
.await
|
||||
{
|
||||
Ok(is_new) => {
|
||||
if is_new {
|
||||
// Block is new, return 201 Created
|
||||
Ok((StatusCode::CREATED, ResponseResult::BlockUploaded(hash)))
|
||||
} else {
|
||||
// Block already exists, return 200 OK
|
||||
Ok((StatusCode::OK, ResponseResult::BlockUploaded(hash)))
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Failed to store block: {}", err);
|
||||
Err(ResponseError::InternalServerError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Retrieve a block by its hash.
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/v1/block/{hash}",
|
||||
tag = "Block Management",
|
||||
responses(
|
||||
(status = 200, description = "Block found", body = [u8], content_type = "application/octet-stream"),
|
||||
(status = 404, description = "Block not found", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
),
|
||||
params(
|
||||
("hash" = String, Path, description = "Block hash")
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn get_block_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
axum::extract::Path(hash): axum::extract::Path<String>,
|
||||
) -> Result<impl IntoResponse, ResponseError> {
|
||||
// Retrieve the block from the database
|
||||
match state.db.get_block(&hash).await {
|
||||
Ok(Some(data)) => {
|
||||
// Block found, return its data
|
||||
Ok((StatusCode::OK, axum::body::Bytes::from(data)))
|
||||
}
|
||||
Ok(None) => {
|
||||
// Block not found
|
||||
Err(ResponseError::NotFound(format!(
|
||||
"Block with hash '{}' not found",
|
||||
hash
|
||||
)))
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Failed to retrieve block: {}", err);
|
||||
Err(ResponseError::InternalServerError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks a block by its hash.
|
||||
#[utoipa::path(
|
||||
head,
|
||||
path = "/api/v1/block/{hash}",
|
||||
tag = "Block Management",
|
||||
responses(
|
||||
(status = 200, description = "Block found"),
|
||||
(status = 404, description = "Block not found", body = ResponseError),
|
||||
),
|
||||
params(
|
||||
("hash" = String, Path, description = "Block hash")
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn check_block_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
axum::extract::Path(hash): axum::extract::Path<String>,
|
||||
) -> Result<impl IntoResponse, ResponseError> {
|
||||
// Retrieve the block from the database
|
||||
match state.db.block_exists("", 0, &hash, 0).await {
|
||||
true => {
|
||||
// Block found
|
||||
Ok(StatusCode::OK)
|
||||
}
|
||||
false => {
|
||||
log::error!("Block with hash '{}' doesn't exist", hash);
|
||||
Err(ResponseError::NotFound(format!(
|
||||
"Block with hash '{}' not found",
|
||||
hash
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Request to verify if multiple blocks exist on the server
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct VerifyBlock {
|
||||
/// Block hash to verify
|
||||
pub block_hash: String,
|
||||
/// File hash associated with the block
|
||||
pub file_hash: String,
|
||||
/// Block index within the file
|
||||
pub block_index: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct VerifyBlocksRequest {
|
||||
/// List of blocks to verify
|
||||
pub blocks: Vec<VerifyBlock>,
|
||||
}
|
||||
|
||||
/// Response with list of missing blocks
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct VerifyBlocksResponse {
|
||||
/// List of block hashes that are missing on the server
|
||||
pub missing: Vec<String>,
|
||||
}
|
||||
|
||||
/// Verify if multiple blocks exist on the server.
|
||||
/// Returns a list of missing blocks.
|
||||
#[utoipa::path(
|
||||
post,
|
||||
path = "/api/v1/block/verify",
|
||||
tag = "Block Management",
|
||||
request_body(content = VerifyBlocksRequest, description = "List of block hashes to verify", content_type = "application/json"),
|
||||
responses(
|
||||
(status = 200, description = "Verification completed", body = VerifyBlocksResponse),
|
||||
(status = 400, description = "Bad request", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn verify_blocks_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Json(request): Json<VerifyBlocksRequest>,
|
||||
) -> Result<impl IntoResponse, ResponseError> {
|
||||
let mut missing = Vec::new();
|
||||
|
||||
// Check each block in the request
|
||||
for block in request.blocks {
|
||||
if !state
|
||||
.db
|
||||
.block_exists(&block.file_hash, block.block_index, &block.block_hash, 0)
|
||||
.await
|
||||
{
|
||||
missing.push(block.block_hash);
|
||||
}
|
||||
}
|
||||
|
||||
// Return the list of missing blocks
|
||||
Ok((
|
||||
StatusCode::OK,
|
||||
Json(VerifyBlocksResponse {
|
||||
missing, // Include missing blocks in the response
|
||||
}),
|
||||
))
|
||||
}
|
||||
|
||||
/// Block information with hash and index
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct BlockInfo {
|
||||
/// Block hash
|
||||
pub hash: String,
|
||||
/// Block index within the file
|
||||
pub index: u64,
|
||||
}
|
||||
|
||||
/// Block information with hash and size
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct UserBlockInfo {
|
||||
/// Block hash
|
||||
pub hash: String,
|
||||
/// Block size in bytes
|
||||
pub size: u64,
|
||||
}
|
||||
|
||||
/// Response for blocks by hash endpoint
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct BlocksResponse {
|
||||
/// List of blocks with their indices
|
||||
pub blocks: Vec<BlockInfo>,
|
||||
}
|
||||
|
||||
/// Retrieve blocks by hash (file hash or block hash).
|
||||
/// If the hash is a file hash, returns all blocks with their block index related to that file.
|
||||
/// If the hash is a block hash, returns the block itself.
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/v1/blocks/{hash}",
|
||||
tag = "Block Management",
|
||||
responses(
|
||||
(status = 200, description = "Blocks found", body = BlocksResponse),
|
||||
(status = 404, description = "Hash not found", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
),
|
||||
params(
|
||||
("hash" = String, Path, description = "File hash or block hash")
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn get_blocks_by_hash_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
axum::extract::Path(hash): axum::extract::Path<String>,
|
||||
) -> Result<impl IntoResponse, ResponseError> {
|
||||
// First, try to get file blocks by hash
|
||||
match state.db.get_file_blocks_ordered(&hash).await {
|
||||
Ok(blocks) if !blocks.is_empty() => {
|
||||
// This is a file hash, return all blocks with their indices
|
||||
let block_infos = blocks.into_iter()
|
||||
.map(|(hash, index)| BlockInfo { hash, index })
|
||||
.collect();
|
||||
Ok((StatusCode::OK, Json(BlocksResponse { blocks: block_infos })))
|
||||
}
|
||||
Ok(_) | Err(_) => {
|
||||
// Not a file hash or error occurred, try as block hash
|
||||
match state.db.get_block(&hash).await {
|
||||
Ok(Some(_)) => {
|
||||
// This is a block hash, return just this block with index 0
|
||||
Ok((
|
||||
StatusCode::OK,
|
||||
Json(BlocksResponse {
|
||||
blocks: vec![BlockInfo { hash: hash.clone(), index: 0 }],
|
||||
}),
|
||||
))
|
||||
}
|
||||
Ok(None) => {
|
||||
// Neither file nor block found
|
||||
Err(ResponseError::NotFound(format!(
|
||||
"No file or block with hash '{}' found",
|
||||
hash
|
||||
)))
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Failed to retrieve block: {}", err);
|
||||
Err(ResponseError::InternalServerError)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Query parameters for listing blocks
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct ListBlocksParams {
|
||||
/// Page number (1-indexed)
|
||||
#[schema(default = 1, minimum = 1)]
|
||||
pub page: Option<u32>,
|
||||
/// Number of items per page
|
||||
#[schema(default = 50, minimum = 1, maximum = 100)]
|
||||
pub per_page: Option<u32>,
|
||||
}
|
||||
|
||||
/// Response for listing blocks
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct ListBlocksResponse {
|
||||
/// List of block hashes
|
||||
pub blocks: Vec<String>,
|
||||
/// Total number of blocks
|
||||
pub total: u64,
|
||||
/// Current page number
|
||||
pub page: u32,
|
||||
/// Number of items per page
|
||||
pub per_page: u32,
|
||||
}
|
||||
|
||||
/// List all block hashes in the server with pagination
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/v1/blocks",
|
||||
tag = "Block Management",
|
||||
params(
|
||||
("page" = Option<u32>, Query, description = "Page number (1-indexed)"),
|
||||
("per_page" = Option<u32>, Query, description = "Number of items per page")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "List of block hashes", body = ListBlocksResponse),
|
||||
(status = 400, description = "Bad request"),
|
||||
(status = 500, description = "Internal server error"),
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn list_blocks_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Query(params): Query<ListBlocksParams>,
|
||||
) -> Result<impl IntoResponse, ResponseError> {
|
||||
let page = params.page.unwrap_or(1);
|
||||
let per_page = params.per_page.unwrap_or(50).min(100);
|
||||
|
||||
match state.db.list_blocks(page, per_page).await {
|
||||
Ok((blocks, total)) => {
|
||||
let response = ListBlocksResponse {
|
||||
blocks,
|
||||
total,
|
||||
page,
|
||||
per_page,
|
||||
};
|
||||
Ok((StatusCode::OK, Json(response)))
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Failed to list blocks: {}", err);
|
||||
Err(ResponseError::InternalServerError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Response for user blocks endpoint
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct UserBlocksResponse {
|
||||
/// List of blocks with their sizes
|
||||
pub blocks: Vec<UserBlockInfo>,
|
||||
/// Total number of blocks
|
||||
pub total: u64,
|
||||
/// Total number of all blocks
|
||||
pub all_blocks: u64,
|
||||
}
|
||||
|
||||
/// Retrieve all blocks uploaded by a specific user.
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/v1/user/blocks",
|
||||
tag = "Block Management",
|
||||
params(
|
||||
("page" = Option<u32>, Query, description = "Page number (1-indexed)"),
|
||||
("per_page" = Option<u32>, Query, description = "Number of items per page")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "Blocks found", body = UserBlocksResponse),
|
||||
(status = 401, description = "Unauthorized"),
|
||||
(status = 500, description = "Internal server error"),
|
||||
),
|
||||
security(
|
||||
("bearerAuth" = [])
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn get_user_blocks_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
extension: axum::extract::Extension<String>,
|
||||
Query(params): Query<ListBlocksParams>,
|
||||
) -> Result<impl IntoResponse, ResponseError> {
|
||||
let page = params.page.unwrap_or(1);
|
||||
let per_page = params.per_page.unwrap_or(50).min(100);
|
||||
|
||||
// Get the username from the extension (set by the authorize middleware)
|
||||
let username = extension.0;
|
||||
let user_id = auth::get_user_id_from_token(&*state.db, &username).await?;
|
||||
|
||||
let all_blocks = match state.db.list_blocks(1, 1).await {
|
||||
Ok((_, total)) => total,
|
||||
Err(err) => {
|
||||
log::error!("Failed to list blocks: {}", err);
|
||||
0
|
||||
}
|
||||
};
|
||||
|
||||
// Get all blocks related to the user
|
||||
match state.db.get_user_blocks(user_id, page, per_page).await {
|
||||
Ok(blocks) => {
|
||||
let total = blocks.len() as u64;
|
||||
let response = UserBlocksResponse {
|
||||
blocks: blocks.into_iter()
|
||||
.map(|(hash, size)| UserBlockInfo { hash, size })
|
||||
.collect(),
|
||||
total,
|
||||
all_blocks,
|
||||
};
|
||||
Ok((StatusCode::OK, Json(response)))
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Failed to retrieve user blocks: {}", err);
|
||||
Err(ResponseError::InternalServerError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Response for block downloads endpoint
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct BlockDownloadsResponse {
|
||||
/// Block hash
|
||||
pub block_hash: String,
|
||||
/// Number of times the block has been downloaded
|
||||
pub downloads_count: u64,
|
||||
/// Size of the block in bytes
|
||||
pub block_size: u64,
|
||||
}
|
||||
|
||||
/// Retrieve the number of times a block has been downloaded.
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/v1/block/{hash}/downloads",
|
||||
tag = "Block Management",
|
||||
responses(
|
||||
(status = 200, description = "Download count retrieved successfully", body = BlockDownloadsResponse),
|
||||
(status = 404, description = "Block not found"),
|
||||
(status = 500, description = "Internal server error"),
|
||||
),
|
||||
params(
|
||||
("hash" = String, Path, description = "Block hash")
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn get_block_downloads_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
axum::extract::Path(hash): axum::extract::Path<String>,
|
||||
) -> Result<impl IntoResponse, ResponseError> {
|
||||
// Check if the block exists
|
||||
if !state.db.block_exists("", 0, &hash, 0).await {
|
||||
return Err(ResponseError::NotFound(format!(
|
||||
"Block with hash '{}' not found",
|
||||
hash
|
||||
)));
|
||||
}
|
||||
|
||||
// Get the download count
|
||||
match state.db.get_block_downloads(&hash).await {
|
||||
Ok((count, block_size)) => {
|
||||
let response = BlockDownloadsResponse {
|
||||
block_hash: hash,
|
||||
downloads_count: count,
|
||||
block_size: block_size,
|
||||
};
|
||||
Ok((StatusCode::OK, Json(response)))
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Failed to retrieve block download count: {}", err);
|
||||
Err(ResponseError::InternalServerError)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -8,10 +8,8 @@ use std::{
|
||||
};
|
||||
use utoipa::ToSchema;
|
||||
|
||||
use crate::{
|
||||
db::{User, DB},
|
||||
handlers,
|
||||
};
|
||||
use crate::server::{db::DBType, handlers, models::User};
|
||||
use crate::store;
|
||||
|
||||
#[derive(Debug, ToSchema, Serialize, Clone)]
|
||||
pub struct Job {
|
||||
@@ -22,7 +20,7 @@ pub struct Job {
|
||||
pub struct AppState {
|
||||
pub jobs_state: Mutex<HashMap<String, handlers::FlistState>>,
|
||||
pub flists_progress: Mutex<HashMap<PathBuf, f32>>,
|
||||
pub db: Arc<dyn DB>,
|
||||
pub db: Arc<DBType>,
|
||||
pub config: Config,
|
||||
}
|
||||
|
||||
@@ -32,25 +30,30 @@ pub struct Config {
|
||||
pub port: u16,
|
||||
pub store_url: Vec<String>,
|
||||
pub flist_dir: String,
|
||||
pub sqlite_path: Option<String>,
|
||||
|
||||
pub jwt_secret: String,
|
||||
pub jwt_expire_hours: i64,
|
||||
pub users: Vec<User>,
|
||||
|
||||
pub block_size: Option<usize>, // Optional block size in bytes
|
||||
pub storage_dir: String, // Path to the storage directory
|
||||
}
|
||||
|
||||
/// Parse the config file into Config struct.
|
||||
pub async fn parse_config(filepath: &str) -> Result<Config> {
|
||||
let content = fs::read_to_string(filepath).context("failed to read config file")?;
|
||||
let c: Config = toml::from_str(&content).context("failed to convert toml config data")?;
|
||||
let mut c: Config = toml::from_str(&content).context("failed to convert toml config data")?;
|
||||
|
||||
if !hostname_validator::is_valid(&c.host) {
|
||||
anyhow::bail!("host '{}' is invalid", c.host)
|
||||
}
|
||||
|
||||
rfs::store::parse_router(&c.store_url)
|
||||
store::parse_router(&c.store_url)
|
||||
.await
|
||||
.context("failed to parse store urls")?;
|
||||
fs::create_dir_all(&c.flist_dir).context("failed to create flists directory")?;
|
||||
fs::create_dir_all(&c.storage_dir).context("failed to create storage directory")?;
|
||||
|
||||
if c.jwt_expire_hours < 1 || c.jwt_expire_hours > 24 {
|
||||
anyhow::bail!(format!(
|
||||
@@ -59,5 +62,6 @@ pub async fn parse_config(filepath: &str) -> Result<Config> {
|
||||
))
|
||||
}
|
||||
|
||||
c.block_size = c.block_size.or(Some(1024 * 1024));
|
||||
Ok(c)
|
||||
}
|
||||
96
components/rfs/src/server/db/map.rs
Normal file
96
components/rfs/src/server/db/map.rs
Normal file
@@ -0,0 +1,96 @@
|
||||
use std::collections::HashMap;
|
||||
use utoipa::ToSchema;
|
||||
|
||||
use super::DB;
|
||||
use crate::server::models::{File, User};
|
||||
use anyhow::Result;
|
||||
|
||||
#[derive(Debug, ToSchema)]
|
||||
pub struct MapDB {
|
||||
users: HashMap<String, User>,
|
||||
}
|
||||
|
||||
impl MapDB {
|
||||
pub fn new(users: &[User]) -> Self {
|
||||
Self {
|
||||
users: users
|
||||
.iter()
|
||||
.map(|u| (u.username.clone(), u.to_owned()))
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DB for MapDB {
|
||||
async fn get_user_by_username(&self, username: &str) -> Option<User> {
|
||||
self.users.get(username).cloned()
|
||||
}
|
||||
|
||||
async fn block_exists(
|
||||
&self,
|
||||
_file_hash: &str,
|
||||
_block_index: u64,
|
||||
_block_hash: &str,
|
||||
_user_id: i64,
|
||||
) -> bool {
|
||||
// TODO:
|
||||
true
|
||||
}
|
||||
|
||||
async fn store_block(
|
||||
&self,
|
||||
_block_hash: &str,
|
||||
_data: Vec<u8>,
|
||||
_file_hash: &str,
|
||||
_block_index: u64,
|
||||
_user_id: i64,
|
||||
) -> Result<bool, anyhow::Error> {
|
||||
// TODO: Implement block storage logic
|
||||
Ok(true) // Placeholder return value
|
||||
}
|
||||
|
||||
async fn get_block(&self, _hash: &str) -> Result<Option<Vec<u8>>, anyhow::Error> {
|
||||
// TODO:
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
async fn get_file_by_hash(&self, _hash: &str) -> Result<Option<File>, anyhow::Error> {
|
||||
// TODO:
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
async fn get_file_blocks_ordered(
|
||||
&self,
|
||||
_file_hash: &str,
|
||||
) -> Result<Vec<(String, u64)>, anyhow::Error> {
|
||||
// TODO:
|
||||
Ok(Vec::new())
|
||||
}
|
||||
|
||||
async fn list_blocks(
|
||||
&self,
|
||||
_page: u32,
|
||||
_per_page: u32,
|
||||
) -> Result<(Vec<String>, u64), anyhow::Error> {
|
||||
// TODO:
|
||||
Ok((Vec::new(), 0))
|
||||
}
|
||||
|
||||
async fn get_user_blocks(
|
||||
&self,
|
||||
_user_id: i64,
|
||||
_page: u32,
|
||||
_per_page: u32,
|
||||
) -> Result<Vec<(String, u64)>, anyhow::Error> {
|
||||
// TODO:
|
||||
Ok(Vec::new())
|
||||
}
|
||||
|
||||
async fn increment_block_downloads(&self, _hash: &str) -> Result<(), anyhow::Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_block_downloads(&self, _hash: &str) -> Result<(u64, u64), anyhow::Error> {
|
||||
Ok((0, 0))
|
||||
}
|
||||
}
|
||||
166
components/rfs/src/server/db/mod.rs
Normal file
166
components/rfs/src/server/db/mod.rs
Normal file
@@ -0,0 +1,166 @@
|
||||
pub mod map;
|
||||
pub mod sqlite;
|
||||
mod storage;
|
||||
use crate::server::models::{File, User};
|
||||
|
||||
pub trait DB: Send + Sync {
|
||||
// User methods
|
||||
async fn get_user_by_username(&self, username: &str) -> Option<User>;
|
||||
|
||||
// Block methods
|
||||
async fn block_exists(
|
||||
&self,
|
||||
file_hash: &str,
|
||||
block_index: u64,
|
||||
block_hash: &str,
|
||||
user_id: i64,
|
||||
) -> bool;
|
||||
async fn store_block(
|
||||
&self,
|
||||
block_hash: &str,
|
||||
data: Vec<u8>,
|
||||
file_hash: &str,
|
||||
block_index: u64,
|
||||
user_id: i64,
|
||||
) -> Result<bool, anyhow::Error>;
|
||||
async fn get_block(&self, hash: &str) -> Result<Option<Vec<u8>>, anyhow::Error>;
|
||||
async fn increment_block_downloads(&self, hash: &str) -> Result<(), anyhow::Error>;
|
||||
async fn get_block_downloads(&self, hash: &str) -> Result<(u64, u64), anyhow::Error>;
|
||||
|
||||
// File methods
|
||||
async fn get_file_by_hash(&self, hash: &str) -> Result<Option<File>, anyhow::Error>;
|
||||
async fn get_file_blocks_ordered(
|
||||
&self,
|
||||
file_hash: &str,
|
||||
) -> Result<Vec<(String, u64)>, anyhow::Error>;
|
||||
async fn list_blocks(
|
||||
&self,
|
||||
page: u32,
|
||||
per_page: u32,
|
||||
) -> Result<(Vec<String>, u64), anyhow::Error>;
|
||||
|
||||
// Get all blocks related to a user
|
||||
async fn get_user_blocks(
|
||||
&self,
|
||||
user_id: i64,
|
||||
page: u32,
|
||||
per_page: u32,
|
||||
) -> Result<Vec<(String, u64)>, anyhow::Error>;
|
||||
}
|
||||
|
||||
pub enum DBType {
|
||||
MapDB(map::MapDB),
|
||||
SqlDB(sqlite::SqlDB),
|
||||
}
|
||||
|
||||
impl DB for DBType {
|
||||
// User methods
|
||||
async fn get_user_by_username(&self, username: &str) -> Option<User> {
|
||||
match self {
|
||||
DBType::MapDB(db) => db.get_user_by_username(username).await,
|
||||
DBType::SqlDB(db) => db.get_user_by_username(username).await,
|
||||
}
|
||||
}
|
||||
|
||||
// Block methods
|
||||
async fn block_exists(
|
||||
&self,
|
||||
file_hash: &str,
|
||||
block_index: u64,
|
||||
block_hash: &str,
|
||||
user_id: i64,
|
||||
) -> bool {
|
||||
match self {
|
||||
DBType::MapDB(db) => {
|
||||
db.block_exists(file_hash, block_index, block_hash, user_id)
|
||||
.await
|
||||
}
|
||||
DBType::SqlDB(db) => {
|
||||
db.block_exists(file_hash, block_index, block_hash, user_id)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn store_block(
|
||||
&self,
|
||||
block_hash: &str,
|
||||
data: Vec<u8>,
|
||||
file_hash: &str,
|
||||
block_index: u64,
|
||||
user_id: i64,
|
||||
) -> Result<bool, anyhow::Error> {
|
||||
match self {
|
||||
DBType::MapDB(db) => {
|
||||
db.store_block(block_hash, data, file_hash, block_index, user_id)
|
||||
.await
|
||||
}
|
||||
DBType::SqlDB(db) => {
|
||||
db.store_block(block_hash, data, file_hash, block_index, user_id)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_block(&self, hash: &str) -> Result<Option<Vec<u8>>, anyhow::Error> {
|
||||
match self {
|
||||
DBType::MapDB(db) => db.get_block(hash).await,
|
||||
DBType::SqlDB(db) => db.get_block(hash).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn increment_block_downloads(&self, hash: &str) -> Result<(), anyhow::Error> {
|
||||
match self {
|
||||
DBType::MapDB(db) => db.increment_block_downloads(hash).await,
|
||||
DBType::SqlDB(db) => db.increment_block_downloads(hash).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_block_downloads(&self, hash: &str) -> Result<(u64, u64), anyhow::Error> {
|
||||
match self {
|
||||
DBType::MapDB(db) => db.get_block_downloads(hash).await,
|
||||
DBType::SqlDB(db) => db.get_block_downloads(hash).await,
|
||||
}
|
||||
}
|
||||
|
||||
// File methods
|
||||
async fn get_file_by_hash(&self, hash: &str) -> Result<Option<File>, anyhow::Error> {
|
||||
match self {
|
||||
DBType::MapDB(db) => db.get_file_by_hash(hash).await,
|
||||
DBType::SqlDB(db) => db.get_file_by_hash(hash).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_file_blocks_ordered(
|
||||
&self,
|
||||
file_hash: &str,
|
||||
) -> Result<Vec<(String, u64)>, anyhow::Error> {
|
||||
match self {
|
||||
DBType::MapDB(db) => db.get_file_blocks_ordered(file_hash).await,
|
||||
DBType::SqlDB(db) => db.get_file_blocks_ordered(file_hash).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn list_blocks(
|
||||
&self,
|
||||
page: u32,
|
||||
per_page: u32,
|
||||
) -> Result<(Vec<String>, u64), anyhow::Error> {
|
||||
match self {
|
||||
DBType::MapDB(db) => db.list_blocks(page, per_page).await,
|
||||
DBType::SqlDB(db) => db.list_blocks(page, per_page).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_user_blocks(
|
||||
&self,
|
||||
user_id: i64,
|
||||
page: u32,
|
||||
per_page: u32,
|
||||
) -> Result<Vec<(String, u64)>, anyhow::Error> {
|
||||
match self {
|
||||
DBType::MapDB(db) => db.get_user_blocks(user_id, page, per_page).await,
|
||||
DBType::SqlDB(db) => db.get_user_blocks(user_id, page, per_page).await,
|
||||
}
|
||||
}
|
||||
}
|
||||
397
components/rfs/src/server/db/sqlite.rs
Normal file
397
components/rfs/src/server/db/sqlite.rs
Normal file
@@ -0,0 +1,397 @@
|
||||
use super::{storage::Storage, DB};
|
||||
use crate::server::models::{File, User};
|
||||
use anyhow::Result;
|
||||
use sqlx::{query, query_as, Row, SqlitePool};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct SqlDB {
|
||||
pool: SqlitePool, // Use a connection pool for efficient database access
|
||||
storage: Storage, // Directory for storing blocks
|
||||
}
|
||||
|
||||
static SCHEMA: &str = include_str!("../../../schema/server.sql");
|
||||
|
||||
impl SqlDB {
|
||||
pub async fn new(database_filepath: &str, storage_dir: &str, users: &[User]) -> Self {
|
||||
// Check if the database file exists, and create it if it doesn't
|
||||
if !std::path::Path::new(database_filepath).exists() {
|
||||
std::fs::File::create(database_filepath).expect("Failed to create database file");
|
||||
}
|
||||
|
||||
let pool = SqlitePool::connect_lazy(database_filepath)
|
||||
.expect("Failed to create database connection pool");
|
||||
|
||||
// Initialize the database schema
|
||||
Self::init_schema(&pool)
|
||||
.await
|
||||
.expect("Failed to initialize database schema");
|
||||
|
||||
let storage = Storage::new(storage_dir);
|
||||
|
||||
for user in users {
|
||||
if let Err(err) = Self::insert_user(&pool, user).await {
|
||||
log::error!("Failed to insert user '{}': {}", user.username, err);
|
||||
}
|
||||
}
|
||||
|
||||
Self { pool, storage }
|
||||
}
|
||||
|
||||
/// Initialize the database schema
|
||||
async fn init_schema(pool: &SqlitePool) -> Result<(), anyhow::Error> {
|
||||
sqlx::query(SCHEMA)
|
||||
.execute(pool)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("Failed to create database schema: {}", e))?;
|
||||
|
||||
log::info!("Database schema initialized successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn metadata_exists(
|
||||
&self,
|
||||
file_hash: &str,
|
||||
block_index: u64,
|
||||
block_hash: &str,
|
||||
user_id: i64,
|
||||
) -> bool {
|
||||
let result = query(
|
||||
"SELECT COUNT(*) as count FROM metadata WHERE file_hash = ? AND block_index = ? AND block_hash = ? AND user_id = ?",
|
||||
)
|
||||
.bind(file_hash)
|
||||
.bind(block_index as i64)
|
||||
.bind(block_hash)
|
||||
.bind(user_id)
|
||||
.fetch_one(&self.pool);
|
||||
|
||||
match result.await {
|
||||
Ok(row) => {
|
||||
let count: i64 = row.get(0);
|
||||
count > 0
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Error checking if metadata exists: {}", err);
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn insert_user(pool: &SqlitePool, user: &User) -> Result<(), anyhow::Error> {
|
||||
query(
|
||||
"INSERT OR IGNORE INTO users (username, password, created_at) VALUES (?, ?, CURRENT_TIMESTAMP)",
|
||||
)
|
||||
.bind(&user.username)
|
||||
.bind(&user.password)
|
||||
.execute(pool)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("Failed to insert user: {}", e))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl DB for SqlDB {
|
||||
async fn get_user_by_username(&self, username: &str) -> Option<User> {
|
||||
let query = "SELECT * FROM users WHERE username = ?";
|
||||
let result = query_as::<_, User>(query)
|
||||
.bind(username)
|
||||
.fetch_one(&self.pool);
|
||||
|
||||
match result.await {
|
||||
Ok(user) => Some(user),
|
||||
Err(_) => None,
|
||||
}
|
||||
}
|
||||
|
||||
async fn block_exists(
|
||||
&self,
|
||||
file_hash: &str,
|
||||
block_index: u64,
|
||||
block_hash: &str,
|
||||
user_id: i64,
|
||||
) -> bool {
|
||||
// Check if the block already exists in storage
|
||||
let block_exists = self.storage.block_exists(block_hash);
|
||||
|
||||
// Check if the metadata already exists in the database
|
||||
let metadata_exists = self
|
||||
.metadata_exists(file_hash, block_index, block_hash, user_id)
|
||||
.await;
|
||||
|
||||
// If both block and metadata exist, no need to store again
|
||||
if block_exists && (metadata_exists || file_hash.is_empty()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
false // Block does not exist
|
||||
}
|
||||
|
||||
async fn store_block(
|
||||
&self,
|
||||
block_hash: &str,
|
||||
data: Vec<u8>,
|
||||
file_hash: &str,
|
||||
block_index: u64,
|
||||
user_id: i64,
|
||||
) -> Result<bool, anyhow::Error> {
|
||||
// Check if the block already exists in storage
|
||||
let block_exists = self.storage.block_exists(block_hash);
|
||||
|
||||
// Check if the metadata already exists in the database
|
||||
let metadata_exists = self
|
||||
.metadata_exists(file_hash, block_index, block_hash, user_id)
|
||||
.await;
|
||||
|
||||
// If both block and metadata exist, no need to store again
|
||||
if block_exists && (metadata_exists || (file_hash.is_empty() && user_id == 0)) {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// Calculate block size
|
||||
let block_size = data.len() as i64;
|
||||
|
||||
// Store metadata if it doesn't exist
|
||||
if !metadata_exists {
|
||||
if let Err(err) = query(
|
||||
"INSERT INTO metadata (file_hash, block_index, block_hash, user_id, block_size, created_at)
|
||||
VALUES (?, ?, ?, ?, ?, CURRENT_TIMESTAMP)",
|
||||
)
|
||||
.bind(file_hash)
|
||||
.bind(block_index as i64)
|
||||
.bind(block_hash)
|
||||
.bind(user_id)
|
||||
.bind(block_size)
|
||||
.execute(&self.pool)
|
||||
.await
|
||||
{
|
||||
log::error!("Error storing metadata: {}", err);
|
||||
return Err(anyhow::anyhow!("Failed to store metadata: {}", err));
|
||||
}
|
||||
}
|
||||
|
||||
// Store the block data in the file system if it doesn't exist
|
||||
if !block_exists {
|
||||
if let Err(err) = self.storage.save_block(block_hash, &data) {
|
||||
log::error!("Error storing block in storage: {}", err);
|
||||
return Err(anyhow::anyhow!("Failed to store block in storage: {}", err));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(true) // Indicate that the block or metadata was newly stored
|
||||
}
|
||||
|
||||
async fn get_block(&self, hash: &str) -> Result<Option<Vec<u8>>, anyhow::Error> {
|
||||
// Retrieve the block data from storage
|
||||
match self.storage.get_block(hash) {
|
||||
Ok(Some(data)) => {
|
||||
if let Err(err) = self.increment_block_downloads(&hash).await {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Failed to increment download count for block {}: {}",
|
||||
hash,
|
||||
err
|
||||
));
|
||||
}
|
||||
Ok(Some(data))
|
||||
}
|
||||
Ok(None) => Ok(None),
|
||||
Err(err) => {
|
||||
log::error!("Error retrieving block from storage: {}", err);
|
||||
Err(anyhow::anyhow!(
|
||||
"Failed to retrieve block from storage: {}",
|
||||
err
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_file_by_hash(&self, hash: &str) -> Result<Option<File>, anyhow::Error> {
|
||||
// Retrieve the blocks associated with the file hash
|
||||
let blocks = match self.get_file_blocks_ordered(hash).await {
|
||||
Ok(blocks) => blocks,
|
||||
Err(err) => {
|
||||
log::error!("Failed to retrieve file blocks: {}", err);
|
||||
return Err(anyhow::anyhow!("Failed to retrieve file blocks: {}", err));
|
||||
}
|
||||
};
|
||||
|
||||
if blocks.is_empty() {
|
||||
return Ok(None); // No blocks found, file does not exist
|
||||
}
|
||||
|
||||
// Combine block data to reconstruct the file
|
||||
let mut file_content = Vec::new();
|
||||
for (block_hash, _) in blocks {
|
||||
match self.storage.get_block(&block_hash) {
|
||||
Ok(Some(data)) => {
|
||||
if let Err(err) = self.increment_block_downloads(&block_hash).await {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Failed to increment download count for block {}: {}",
|
||||
block_hash,
|
||||
err
|
||||
));
|
||||
}
|
||||
file_content.extend(data)
|
||||
}
|
||||
Ok(None) => {
|
||||
log::error!("Block {} not found", block_hash);
|
||||
return Err(anyhow::anyhow!("Block {} not found", block_hash));
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Failed to retrieve block {}: {}", block_hash, err);
|
||||
return Err(anyhow::anyhow!(
|
||||
"Failed to retrieve block {}: {}",
|
||||
block_hash,
|
||||
err
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Return the reconstructed file
|
||||
Ok(Some(File {
|
||||
file_hash: hash.to_string(),
|
||||
file_content,
|
||||
}))
|
||||
}
|
||||
|
||||
async fn get_file_blocks_ordered(
|
||||
&self,
|
||||
file_hash: &str,
|
||||
) -> Result<Vec<(String, u64)>, anyhow::Error> {
|
||||
let result = query(
|
||||
"SELECT block_hash, block_index FROM metadata WHERE file_hash = ? ORDER BY block_index",
|
||||
)
|
||||
.bind(file_hash)
|
||||
.fetch_all(&self.pool)
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(rows) => {
|
||||
let blocks = rows
|
||||
.into_iter()
|
||||
.map(|row| {
|
||||
let block_hash: String = row.get(0);
|
||||
let block_index: i64 = row.get(1);
|
||||
(block_hash, block_index as u64)
|
||||
})
|
||||
.collect::<Vec<(String, u64)>>();
|
||||
|
||||
Ok(blocks)
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Error retrieving file blocks: {}", err);
|
||||
Err(anyhow::anyhow!("Failed to retrieve file blocks: {}", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn list_blocks(
|
||||
&self,
|
||||
page: u32,
|
||||
per_page: u32,
|
||||
) -> Result<(Vec<String>, u64), anyhow::Error> {
|
||||
let blocks = match self.storage.list_blocks() {
|
||||
Ok(blocks) => blocks,
|
||||
Err(err) => {
|
||||
log::error!("Error listing blocks: {}", err);
|
||||
return Err(anyhow::anyhow!("Failed to list blocks: {}", err));
|
||||
}
|
||||
};
|
||||
|
||||
let total = blocks.len() as u64;
|
||||
let start = page
|
||||
.checked_sub(1)
|
||||
.and_then(|p| p.checked_mul(per_page))
|
||||
.ok_or_else(|| anyhow::anyhow!("Page or per_page value caused overflow"))?
|
||||
as usize;
|
||||
let end = (start + per_page as usize).min(total as usize);
|
||||
let page_blocks = blocks
|
||||
.into_iter()
|
||||
.skip(start)
|
||||
.take(end.saturating_sub(start))
|
||||
.collect();
|
||||
Ok((page_blocks, total))
|
||||
}
|
||||
|
||||
async fn get_user_blocks(
|
||||
&self,
|
||||
user_id: i64,
|
||||
page: u32,
|
||||
per_page: u32,
|
||||
) -> Result<Vec<(String, u64)>, anyhow::Error> {
|
||||
let offset = page
|
||||
.checked_sub(1)
|
||||
.and_then(|p| p.checked_mul(per_page))
|
||||
.ok_or_else(|| anyhow::anyhow!("Page or per_page value caused overflow"))?
|
||||
as i64;
|
||||
|
||||
let result = query(
|
||||
"SELECT block_hash, block_size FROM metadata WHERE user_id = ? ORDER BY block_index LIMIT ? OFFSET ?",
|
||||
)
|
||||
.bind(user_id)
|
||||
.bind(per_page as i64)
|
||||
.bind(offset)
|
||||
.fetch_all(&self.pool)
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(rows) => {
|
||||
let blocks = rows
|
||||
.into_iter()
|
||||
.map(|row| {
|
||||
let block_hash: String = row.get(0);
|
||||
let block_size: i64 = row.get(1);
|
||||
(block_hash, block_size as u64)
|
||||
})
|
||||
.collect::<Vec<(String, u64)>>();
|
||||
|
||||
Ok(blocks)
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Error retrieving user blocks: {}", err);
|
||||
Err(anyhow::anyhow!("Failed to retrieve user blocks: {}", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn increment_block_downloads(&self, hash: &str) -> Result<(), anyhow::Error> {
|
||||
let result =
|
||||
query("UPDATE metadata SET downloads_count = downloads_count + 1 WHERE block_hash = ?")
|
||||
.bind(hash)
|
||||
.execute(&self.pool)
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(_) => Ok(()),
|
||||
Err(err) => {
|
||||
log::error!("Error incrementing block downloads count: {}", err);
|
||||
Err(anyhow::anyhow!(
|
||||
"Failed to increment block downloads count: {}",
|
||||
err
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_block_downloads(&self, hash: &str) -> Result<(u64, u64), anyhow::Error> {
|
||||
let result = query("SELECT downloads_count, block_size FROM metadata WHERE block_hash = ?")
|
||||
.bind(hash)
|
||||
.fetch_one(&self.pool)
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(row) => {
|
||||
let count: i64 = row.get(0);
|
||||
let size: i64 = row.get(1);
|
||||
Ok((count as u64, size as u64))
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Error retrieving block downloads count and size: {}", err);
|
||||
Err(anyhow::anyhow!(
|
||||
"Failed to retrieve block downloads count and size: {}",
|
||||
err
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
95
components/rfs/src/server/db/storage.rs
Normal file
95
components/rfs/src/server/db/storage.rs
Normal file
@@ -0,0 +1,95 @@
|
||||
use std::fs;
|
||||
use std::io::{self, Write};
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Storage {
|
||||
base_dir: PathBuf,
|
||||
}
|
||||
|
||||
impl Storage {
|
||||
pub fn new(base_dir: &str) -> Self {
|
||||
let base_path = PathBuf::from(base_dir).join("blocks");
|
||||
fs::create_dir_all(&base_path).expect("Failed to create storage directory");
|
||||
Self {
|
||||
base_dir: base_path,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn save_block(&self, hash: &str, content: &[u8]) -> io::Result<()> {
|
||||
let sub_dir = self.base_dir.join(&hash[..4]);
|
||||
fs::create_dir_all(&sub_dir)?;
|
||||
|
||||
let block_path = sub_dir.join(hash);
|
||||
let mut file = fs::File::create(block_path)?;
|
||||
file.write_all(content)
|
||||
}
|
||||
|
||||
pub fn get_block(&self, hash: &str) -> io::Result<Option<Vec<u8>>> {
|
||||
let block_path = self.base_dir.join(&hash[..4]).join(hash);
|
||||
if block_path.exists() {
|
||||
Ok(Some(fs::read(block_path)?))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn block_exists(&self, hash: &str) -> bool {
|
||||
let block_path = self.base_dir.join(&hash[..4]).join(hash);
|
||||
block_path.exists()
|
||||
}
|
||||
|
||||
pub fn list_blocks(&self) -> io::Result<Vec<String>> {
|
||||
let mut block_hashes = Vec::new();
|
||||
|
||||
// Walk through the storage directory
|
||||
for entry in fs::read_dir(&self.base_dir)? {
|
||||
let entry = entry?;
|
||||
let path = entry.path();
|
||||
if path.is_dir() {
|
||||
// Each subdirectory represents the first 4 characters of the hash
|
||||
for block_entry in fs::read_dir(path)? {
|
||||
let block_entry = block_entry?;
|
||||
let block_path = block_entry.path();
|
||||
if block_path.is_file() {
|
||||
if let Some(file_name) = block_path.file_name() {
|
||||
if let Some(hash) = file_name.to_str() {
|
||||
block_hashes.push(hash.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(block_hashes)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_storage() {
|
||||
let storage = Storage::new("test_storage");
|
||||
|
||||
let hash = "abcd1234";
|
||||
let content = b"Hello, world!";
|
||||
|
||||
// Save block
|
||||
storage.save_block(hash, content).unwrap();
|
||||
assert!(storage.block_exists(hash));
|
||||
|
||||
let hash = "abcd12345";
|
||||
let content = b"Hello, world!";
|
||||
|
||||
// Get block
|
||||
storage.save_block(hash, content).unwrap();
|
||||
let retrieved_content = storage.get_block(hash).unwrap();
|
||||
assert_eq!(retrieved_content.unwrap(), content);
|
||||
|
||||
// Clean up
|
||||
fs::remove_dir_all("test_storage").unwrap();
|
||||
}
|
||||
}
|
||||
171
components/rfs/src/server/file_handlers.rs
Normal file
171
components/rfs/src/server/file_handlers.rs
Normal file
@@ -0,0 +1,171 @@
|
||||
use axum::{body::Bytes, extract::State, http::StatusCode, response::IntoResponse};
|
||||
use axum_macros::debug_handler;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::server::{
|
||||
auth,
|
||||
config::AppState,
|
||||
db::DB,
|
||||
models::{Block, File},
|
||||
response::{ResponseError, ResponseResult},
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use utoipa::ToSchema;
|
||||
|
||||
const BLOCK_SIZE: usize = 1024 * 1024; // 1MB
|
||||
|
||||
// File API endpoints are included in the main FlistApi in handlers.rs
|
||||
|
||||
/// Response for file upload
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct FileUploadResponse {
|
||||
/// The file hash
|
||||
pub file_hash: String,
|
||||
/// Message indicating success
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
/// Upload a file to the server.
|
||||
/// The file will be split into blocks and stored in the database.
|
||||
#[utoipa::path(
|
||||
post,
|
||||
path = "/api/v1/file",
|
||||
tag = "File Management",
|
||||
request_body(content = [u8], description = "File data to upload", content_type = "application/octet-stream"),
|
||||
responses(
|
||||
(status = 201, description = "File uploaded successfully", body = FileUploadResponse),
|
||||
(status = 400, description = "Bad request", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
),
|
||||
security(
|
||||
("bearerAuth" = [])
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn upload_file_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
extension: axum::extract::Extension<String>,
|
||||
body: Bytes,
|
||||
) -> Result<(StatusCode, ResponseResult), ResponseError> {
|
||||
// Convert the request body to a byte vector
|
||||
let data = body.to_vec();
|
||||
|
||||
// Create a new File record
|
||||
let file = File::new(data.clone());
|
||||
|
||||
// Store the file metadata in the database
|
||||
// In a real implementation, we would store this in the files table
|
||||
// For now, we'll just log it
|
||||
log::info!("Storing file metadata: hash={}", file.file_hash);
|
||||
|
||||
// Get the username from the extension (set by the authorize middleware)
|
||||
let username = extension.0;
|
||||
let user_id = auth::get_user_id_from_token(&*state.db, &username).await?;
|
||||
|
||||
// Store each block with a reference to the file
|
||||
for (i, chunk) in data
|
||||
.chunks(state.config.block_size.unwrap_or(BLOCK_SIZE))
|
||||
.enumerate()
|
||||
{
|
||||
let block_hash = Block::calculate_hash(chunk);
|
||||
|
||||
// TODO: parallel
|
||||
// Store each block in the storage with file hash and block index in metadata in DB
|
||||
match state
|
||||
.db
|
||||
.store_block(
|
||||
&block_hash,
|
||||
chunk.to_vec(),
|
||||
&file.file_hash,
|
||||
i as u64,
|
||||
user_id,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(_) => {
|
||||
log::debug!("Stored block {}", block_hash);
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Failed to store block: {}", err);
|
||||
return Err(ResponseError::InternalServerError);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log::info!(
|
||||
"Stored file metadata and blocks for file {}",
|
||||
file.file_hash
|
||||
);
|
||||
|
||||
// Return success response
|
||||
let response = FileUploadResponse {
|
||||
file_hash: file.file_hash,
|
||||
message: "File is uploaded successfully".to_string(),
|
||||
};
|
||||
|
||||
Ok((StatusCode::CREATED, ResponseResult::FileUploaded(response)))
|
||||
}
|
||||
|
||||
/// Request for file download with custom filename
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct FileDownloadRequest {
|
||||
/// The custom filename to use for download
|
||||
pub file_name: String,
|
||||
}
|
||||
|
||||
/// Retrieve a file by its hash from path, with optional custom filename in request body.
|
||||
/// The file will be reconstructed from its blocks.
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/v1/file/{hash}",
|
||||
tag = "File Management",
|
||||
request_body(content = FileDownloadRequest, description = "Optional custom filename for download", content_type = "application/json"),
|
||||
responses(
|
||||
(status = 200, description = "File found", body = [u8], content_type = "application/octet-stream"),
|
||||
(status = 404, description = "File not found", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
),
|
||||
params(
|
||||
("hash" = String, Path, description = "File hash")
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn get_file_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
axum::extract::Path(hash): axum::extract::Path<String>,
|
||||
request: Option<axum::extract::Json<FileDownloadRequest>>,
|
||||
) -> Result<impl IntoResponse, ResponseError> {
|
||||
// Get the file metadata using the hash
|
||||
let file = match state.db.get_file_by_hash(&hash).await {
|
||||
Ok(Some(file)) => file,
|
||||
Ok(None) => {
|
||||
return Err(ResponseError::NotFound(format!(
|
||||
"File with hash '{}' not found",
|
||||
hash
|
||||
)));
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Failed to retrieve file metadata: {}", err);
|
||||
return Err(ResponseError::InternalServerError);
|
||||
}
|
||||
};
|
||||
|
||||
// Set content disposition header with the custom filename from request if provided
|
||||
// Otherwise use the hash as the filename
|
||||
let filename = match request {
|
||||
Some(req) => req.0.file_name,
|
||||
None => format!("{}.bin", hash), // Default filename using hash
|
||||
};
|
||||
|
||||
let headers = [(
|
||||
axum::http::header::CONTENT_DISPOSITION,
|
||||
format!("attachment; filename=\"{}\"", filename),
|
||||
)];
|
||||
|
||||
// Return the file data
|
||||
Ok((
|
||||
StatusCode::OK,
|
||||
headers,
|
||||
axum::body::Bytes::from(file.file_content),
|
||||
))
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
use anyhow::Error;
|
||||
use axum::{
|
||||
extract::{Path, Query, State},
|
||||
extract::{Path, State},
|
||||
response::IntoResponse,
|
||||
Extension, Json,
|
||||
};
|
||||
@@ -15,25 +15,67 @@ use std::{
|
||||
use bollard::auth::DockerCredentials;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
use crate::docker;
|
||||
use crate::fungi;
|
||||
use crate::server::{
|
||||
auth::{SignInBody, SignInResponse, __path_sign_in_handler},
|
||||
response::{DirListTemplate, DirLister, ErrorTemplate, TemplateErr},
|
||||
};
|
||||
use crate::{
|
||||
config::{self, Job},
|
||||
response::{FileInfo, ResponseError, ResponseResult},
|
||||
db::DB,
|
||||
response::{DirListTemplate, DirLister, ErrorTemplate, TemplateErr},
|
||||
response::{FileInfo, ResponseError, ResponseResult, FlistStateResponse, HealthResponse, BlockUploadedResponse},
|
||||
serve_flists::visit_dir_one_level,
|
||||
};
|
||||
use rfs::fungi::{Reader, Writer};
|
||||
use utoipa::{OpenApi, ToSchema};
|
||||
use crate::store;
|
||||
use utoipa::{OpenApi, ToSchema, Modify};
|
||||
use utoipa::openapi::security::{SecurityScheme, HttpAuthScheme, Http};
|
||||
use uuid::Uuid;
|
||||
use crate::server::block_handlers;
|
||||
use crate::server::file_handlers;
|
||||
use crate::server::serve_flists;
|
||||
use crate::server::website_handlers;
|
||||
|
||||
// Security scheme modifier for JWT Bearer authentication
|
||||
struct SecurityAddon;
|
||||
|
||||
impl Modify for SecurityAddon {
|
||||
fn modify(&self, openapi: &mut utoipa::openapi::OpenApi) {
|
||||
let components = openapi.components.as_mut().unwrap(); // Safe to unwrap since components are registered
|
||||
components.add_security_scheme(
|
||||
"bearerAuth",
|
||||
SecurityScheme::Http(Http::new(HttpAuthScheme::Bearer)),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(OpenApi)]
|
||||
#[openapi(
|
||||
paths(health_check_handler, create_flist_handler, get_flist_state_handler, preview_flist_handler, list_flists_handler, sign_in_handler),
|
||||
components(schemas(DirListTemplate, DirLister, FlistBody, Job, ResponseError, ErrorTemplate, TemplateErr, ResponseResult, FileInfo, SignInBody, FlistState, SignInResponse, FlistStateInfo, PreviewResponse)),
|
||||
paths(health_check_handler, create_flist_handler, get_flist_state_handler, preview_flist_handler, list_flists_handler, sign_in_handler, block_handlers::upload_block_handler, block_handlers::get_block_handler, block_handlers::check_block_handler, block_handlers::verify_blocks_handler, block_handlers::get_blocks_by_hash_handler, block_handlers::list_blocks_handler, block_handlers::get_block_downloads_handler, block_handlers::get_user_blocks_handler, file_handlers::upload_file_handler, file_handlers::get_file_handler, website_handlers::serve_website_handler, serve_flists::serve_flists),
|
||||
modifiers(&SecurityAddon),
|
||||
components(
|
||||
schemas(
|
||||
// Common schemas
|
||||
DirListTemplate, DirLister, ResponseError, ErrorTemplate, TemplateErr, ResponseResult, FileInfo, FlistStateResponse,
|
||||
// Response wrapper schemas
|
||||
HealthResponse, BlockUploadedResponse,
|
||||
// Authentication schemas
|
||||
SignInBody, SignInResponse,
|
||||
// Flist schemas
|
||||
FlistBody, Job, FlistState, FlistStateInfo, PreviewResponse,
|
||||
// Block schemas
|
||||
block_handlers::VerifyBlock, block_handlers::VerifyBlocksRequest, block_handlers::VerifyBlocksResponse,
|
||||
block_handlers::BlocksResponse, block_handlers::ListBlocksParams, block_handlers::ListBlocksResponse, block_handlers::BlockInfo,
|
||||
block_handlers::UserBlocksResponse, block_handlers::BlockDownloadsResponse, block_handlers::UploadBlockParams, block_handlers::UserBlockInfo,
|
||||
// File schemas
|
||||
file_handlers::FileUploadResponse, file_handlers::FileDownloadRequest
|
||||
)
|
||||
),
|
||||
tags(
|
||||
(name = "fl-server", description = "Flist conversion API")
|
||||
(name = "System", description = "System health and status"),
|
||||
(name = "Authentication", description = "Authentication endpoints"),
|
||||
(name = "Flist Management", description = "Flist creation and management"),
|
||||
(name = "Block Management", description = "Block storage and retrieval"),
|
||||
(name = "File Management", description = "File upload and download"),
|
||||
(name = "Website Serving", description = "Website content serving")
|
||||
)
|
||||
)]
|
||||
pub struct FlistApi;
|
||||
@@ -54,17 +96,22 @@ pub struct FlistBody {
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, Clone, ToSchema)]
|
||||
pub struct PreviewResponse {
|
||||
pub content: Vec<PathBuf>,
|
||||
pub content: Vec<String>,
|
||||
pub metadata: String,
|
||||
pub checksum: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, PartialEq, ToSchema)]
|
||||
pub enum FlistState {
|
||||
#[schema(title = "FlistStateAccepted")]
|
||||
Accepted(String),
|
||||
#[schema(title = "FlistStateStarted")]
|
||||
Started(String),
|
||||
#[schema(title = "FlistStateInProgress")]
|
||||
InProgress(FlistStateInfo),
|
||||
#[schema(title = "FlistStateCreated")]
|
||||
Created(String),
|
||||
#[schema(title = "FlistStateFailed")]
|
||||
Failed,
|
||||
}
|
||||
|
||||
@@ -74,28 +121,12 @@ pub struct FlistStateInfo {
|
||||
progress: f32,
|
||||
}
|
||||
|
||||
const DEFAULT_LIMIT: usize = 10;
|
||||
const DEFAULT_PAGE: usize = 1;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct Pagination {
|
||||
page: Option<usize>,
|
||||
limit: Option<usize>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Clone)]
|
||||
pub struct Filter {
|
||||
pub max_size: Option<usize>,
|
||||
pub min_size: Option<usize>,
|
||||
username: Option<String>,
|
||||
pub name: Option<String>,
|
||||
}
|
||||
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/v1/api",
|
||||
path = "/api/v1",
|
||||
tag = "System",
|
||||
responses(
|
||||
(status = 200, description = "flist server is working", body = String)
|
||||
(status = 200, description = "flist server is working", body = HealthResponse)
|
||||
)
|
||||
)]
|
||||
pub async fn health_check_handler() -> ResponseResult {
|
||||
@@ -104,14 +135,18 @@ pub async fn health_check_handler() -> ResponseResult {
|
||||
|
||||
#[utoipa::path(
|
||||
post,
|
||||
path = "/v1/api/fl",
|
||||
path = "/api/v1/fl",
|
||||
tag = "Flist Management",
|
||||
request_body = FlistBody,
|
||||
responses(
|
||||
(status = 201, description = "Flist conversion started", body = Job),
|
||||
(status = 401, description = "Unauthorized user"),
|
||||
(status = 403, description = "Forbidden"),
|
||||
(status = 409, description = "Conflict"),
|
||||
(status = 500, description = "Internal server error"),
|
||||
(status = 401, description = "Unauthorized user", body = ResponseError),
|
||||
(status = 403, description = "Forbidden", body = ResponseError),
|
||||
(status = 409, description = "Conflict", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
),
|
||||
security(
|
||||
("bearerAuth" = [])
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
@@ -153,7 +188,7 @@ pub async fn create_flist_handler(
|
||||
return Err(ResponseError::InternalServerError);
|
||||
}
|
||||
|
||||
let meta = match Writer::new(&fl_path, true).await {
|
||||
let meta = match fungi::Writer::new(&fl_path, true).await {
|
||||
Ok(writer) => writer,
|
||||
Err(err) => {
|
||||
log::error!(
|
||||
@@ -165,7 +200,7 @@ pub async fn create_flist_handler(
|
||||
}
|
||||
};
|
||||
|
||||
let store = match rfs::store::parse_router(&cfg.store_url).await {
|
||||
let store = match store::parse_router(&cfg.store_url).await {
|
||||
Ok(s) => s,
|
||||
Err(err) => {
|
||||
log::error!("failed to parse router for store with error {}", err);
|
||||
@@ -209,7 +244,7 @@ pub async fn create_flist_handler(
|
||||
|
||||
let (tx, rx) = mpsc::channel();
|
||||
let mut docker_to_fl =
|
||||
docker2fl::DockerImageToFlist::new(meta, docker_image, credentials, docker_tmp_dir);
|
||||
docker::DockerImageToFlist::new(meta, docker_image, credentials, docker_tmp_dir);
|
||||
|
||||
let res = docker_to_fl.prepare().await;
|
||||
if res.is_err() {
|
||||
@@ -284,16 +319,20 @@ pub async fn create_flist_handler(
|
||||
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/v1/api/fl/{job_id}",
|
||||
path = "/api/v1/fl/{job_id}",
|
||||
tag = "Flist Management",
|
||||
responses(
|
||||
(status = 200, description = "Flist state", body = FlistState),
|
||||
(status = 404, description = "Flist not found"),
|
||||
(status = 500, description = "Internal server error"),
|
||||
(status = 401, description = "Unauthorized user"),
|
||||
(status = 403, description = "Forbidden"),
|
||||
(status = 200, description = "Flist state", body = FlistStateResponse),
|
||||
(status = 404, description = "Flist not found", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
(status = 401, description = "Unauthorized user", body = ResponseError),
|
||||
(status = 403, description = "Forbidden", body = ResponseError),
|
||||
),
|
||||
params(
|
||||
("job_id" = String, Path, description = "flist job id")
|
||||
),
|
||||
security(
|
||||
("bearerAuth" = [])
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
@@ -345,36 +384,21 @@ pub async fn get_flist_state_handler(
|
||||
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/v1/api/fl",
|
||||
path = "/api/v1/fl",
|
||||
tag = "Flist Management",
|
||||
responses(
|
||||
(status = 200, description = "Listing flists", body = HashMap<String, Vec<FileInfo>>),
|
||||
(status = 401, description = "Unauthorized user"),
|
||||
(status = 403, description = "Forbidden"),
|
||||
(status = 500, description = "Internal server error"),
|
||||
(status = 401, description = "Unauthorized user", body = ResponseError),
|
||||
(status = 403, description = "Forbidden", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn list_flists_handler(
|
||||
State(state): State<Arc<config::AppState>>,
|
||||
pagination: Query<Pagination>,
|
||||
filter: Query<Filter>,
|
||||
) -> impl IntoResponse {
|
||||
pub async fn list_flists_handler(State(state): State<Arc<config::AppState>>) -> impl IntoResponse {
|
||||
let mut flists: HashMap<String, Vec<FileInfo>> = HashMap::new();
|
||||
|
||||
let pagination: Pagination = pagination.0;
|
||||
let page = pagination.page.unwrap_or(DEFAULT_PAGE);
|
||||
let limit = pagination.limit.unwrap_or(DEFAULT_LIMIT);
|
||||
|
||||
if page == 0 {
|
||||
return Err(ResponseError::BadRequest(
|
||||
"requested page should be nonzero positive number".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let filter: Filter = filter.0;
|
||||
|
||||
let rs: Result<Vec<FileInfo>, std::io::Error> =
|
||||
visit_dir_one_level(&state.config.flist_dir, &state, None).await;
|
||||
visit_dir_one_level(&state.config.flist_dir, &state).await;
|
||||
|
||||
let files = match rs {
|
||||
Ok(files) => files,
|
||||
@@ -386,30 +410,9 @@ pub async fn list_flists_handler(
|
||||
|
||||
for file in files {
|
||||
if !file.is_file {
|
||||
let flists_per_username =
|
||||
visit_dir_one_level(&file.path_uri, &state, Some(filter.clone())).await;
|
||||
|
||||
if let Some(ref filter_username) = filter.username {
|
||||
if filter_username.clone() != file.name {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
let flists_per_username = visit_dir_one_level(&file.path_uri, &state).await;
|
||||
match flists_per_username {
|
||||
Ok(files) => {
|
||||
let username = file.name;
|
||||
flists.insert(username.clone(), Vec::new());
|
||||
|
||||
let start = limit * (page - 1);
|
||||
let end = limit * page;
|
||||
if files.len() > start {
|
||||
if files.len() >= end {
|
||||
flists.insert(username, files[start..end].to_vec());
|
||||
} else {
|
||||
flists.insert(username, files[start..].to_vec());
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(files) => flists.insert(file.name, files),
|
||||
Err(e) => {
|
||||
log::error!("failed to list flists per username with error: {}", e);
|
||||
return Err(ResponseError::InternalServerError);
|
||||
@@ -423,13 +426,14 @@ pub async fn list_flists_handler(
|
||||
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/v1/api/fl/preview/{flist_path}",
|
||||
path = "/api/v1/fl/preview/{flist_path}",
|
||||
tag = "Flist Management",
|
||||
responses(
|
||||
(status = 200, description = "Flist preview result", body = PreviewResponse),
|
||||
(status = 400, description = "Bad request"),
|
||||
(status = 401, description = "Unauthorized user"),
|
||||
(status = 403, description = "Forbidden"),
|
||||
(status = 500, description = "Internal server error"),
|
||||
(status = 400, description = "Bad request", body = ResponseError),
|
||||
(status = 401, description = "Unauthorized user", body = ResponseError),
|
||||
(status = 403, description = "Forbidden", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
),
|
||||
params(
|
||||
("flist_path" = String, Path, description = "flist file path")
|
||||
@@ -464,8 +468,14 @@ pub async fn preview_flist_handler(
|
||||
}
|
||||
};
|
||||
|
||||
// Convert PathBuf values to strings for OpenAPI compatibility
|
||||
let content_strings: Vec<String> = content
|
||||
.into_iter()
|
||||
.map(|path| path.to_string_lossy().to_string())
|
||||
.collect();
|
||||
|
||||
Ok(ResponseResult::PreviewFlist(PreviewResponse {
|
||||
content,
|
||||
content: content_strings,
|
||||
metadata: state.config.store_url.join("-"),
|
||||
checksum: sha256::digest(&bytes),
|
||||
}))
|
||||
@@ -495,7 +505,7 @@ async fn validate_flist_path(state: &Arc<config::AppState>, fl_path: &String) ->
|
||||
}
|
||||
|
||||
// validate username
|
||||
match state.db.get_user_by_username(&parts[1]) {
|
||||
match state.db.get_user_by_username(parts[1]).await {
|
||||
Some(_) => (),
|
||||
None => {
|
||||
anyhow::bail!(
|
||||
@@ -536,7 +546,7 @@ async fn validate_flist_path(state: &Arc<config::AppState>, fl_path: &String) ->
|
||||
async fn get_flist_content(fl_path: &String) -> Result<Vec<PathBuf>, Error> {
|
||||
let mut visitor = ReadVisitor::default();
|
||||
|
||||
let meta = match Reader::new(&fl_path).await {
|
||||
let meta = match fungi::Reader::new(&fl_path).await {
|
||||
Ok(reader) => reader,
|
||||
Err(err) => {
|
||||
log::error!(
|
||||
@@ -573,13 +583,13 @@ impl ReadVisitor {
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl rfs::fungi::meta::WalkVisitor for ReadVisitor {
|
||||
impl fungi::meta::WalkVisitor for ReadVisitor {
|
||||
async fn visit(
|
||||
&mut self,
|
||||
path: &std::path::Path,
|
||||
_node: &rfs::fungi::meta::Inode,
|
||||
) -> rfs::fungi::meta::Result<rfs::fungi::meta::Walk> {
|
||||
_node: &fungi::meta::Inode,
|
||||
) -> fungi::meta::Result<fungi::meta::Walk> {
|
||||
self.inner.push(path.to_path_buf());
|
||||
Ok(rfs::fungi::meta::Walk::Continue)
|
||||
Ok(fungi::meta::Walk::Continue)
|
||||
}
|
||||
}
|
||||
@@ -1,20 +1,25 @@
|
||||
mod auth;
|
||||
mod block_handlers;
|
||||
mod config;
|
||||
mod db;
|
||||
mod file_handlers;
|
||||
mod handlers;
|
||||
mod models;
|
||||
mod response;
|
||||
mod serve_flists;
|
||||
mod website_handlers;
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use axum::{
|
||||
error_handling::HandleErrorLayer,
|
||||
extract::{Path, State},
|
||||
http::StatusCode,
|
||||
middleware,
|
||||
response::IntoResponse,
|
||||
routing::{get, post},
|
||||
routing::{get, head, post},
|
||||
BoxError, Router,
|
||||
};
|
||||
use clap::{ArgAction, Parser};
|
||||
use config::AppState;
|
||||
use hyper::{
|
||||
header::{ACCEPT, AUTHORIZATION, CONTENT_TYPE},
|
||||
Method,
|
||||
@@ -25,54 +30,32 @@ use std::{
|
||||
sync::{Arc, Mutex},
|
||||
time::Duration,
|
||||
};
|
||||
use tokio::{runtime::Builder, signal};
|
||||
use tokio::signal;
|
||||
use tower::ServiceBuilder;
|
||||
use tower_http::cors::CorsLayer;
|
||||
use tower_http::{cors::Any, trace::TraceLayer};
|
||||
|
||||
use utoipa::OpenApi;
|
||||
use utoipa_swagger_ui::SwaggerUi;
|
||||
// Using only the main FlistApi for OpenAPI documentation
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[clap(name ="fl-server", author, version = env!("GIT_VERSION"), about, long_about = None)]
|
||||
struct Options {
|
||||
/// enable debugging logs
|
||||
#[clap(short, long, action=ArgAction::Count)]
|
||||
debug: u8,
|
||||
|
||||
/// config file path
|
||||
#[clap(short, long)]
|
||||
config_path: String,
|
||||
}
|
||||
|
||||
fn main() -> Result<()> {
|
||||
let rt = Builder::new_multi_thread()
|
||||
.thread_stack_size(8 * 1024 * 1024)
|
||||
.enable_all()
|
||||
.build()
|
||||
.unwrap();
|
||||
rt.block_on(app())
|
||||
}
|
||||
|
||||
async fn app() -> Result<()> {
|
||||
let opts = Options::parse();
|
||||
simple_logger::SimpleLogger::new()
|
||||
.with_utc_timestamps()
|
||||
.with_level({
|
||||
match opts.debug {
|
||||
0 => log::LevelFilter::Info,
|
||||
1 => log::LevelFilter::Debug,
|
||||
_ => log::LevelFilter::Trace,
|
||||
}
|
||||
})
|
||||
.with_module_level("sqlx", log::Level::Error.to_level_filter())
|
||||
.init()?;
|
||||
|
||||
let config = config::parse_config(&opts.config_path)
|
||||
pub async fn app(config_path: &str) -> Result<()> {
|
||||
let config = config::parse_config(config_path)
|
||||
.await
|
||||
.context("failed to parse config file")?;
|
||||
|
||||
let db = Arc::new(db::MapDB::new(&config.users.clone()));
|
||||
// Initialize the database based on configuration
|
||||
let db: Arc<db::DBType> = if let Some(sqlite_path) = &config.sqlite_path {
|
||||
log::info!("Using SQLite database at: {}", sqlite_path);
|
||||
Arc::new(db::DBType::SqlDB(
|
||||
db::sqlite::SqlDB::new(sqlite_path, &config.storage_dir, &config.users.clone()).await,
|
||||
))
|
||||
} else {
|
||||
log::info!("Using in-memory MapDB database");
|
||||
Arc::new(db::DBType::MapDB(db::map::MapDB::new(
|
||||
&config.users.clone(),
|
||||
)))
|
||||
};
|
||||
|
||||
let app_state = Arc::new(config::AppState {
|
||||
jobs_state: Mutex::new(HashMap::new()),
|
||||
@@ -87,27 +70,83 @@ async fn app() -> Result<()> {
|
||||
.allow_headers([AUTHORIZATION, ACCEPT, CONTENT_TYPE]);
|
||||
|
||||
let v1_routes = Router::new()
|
||||
.route("/v1/api", get(handlers::health_check_handler))
|
||||
.route("/v1/api/signin", post(auth::sign_in_handler))
|
||||
.route("/api/v1", get(handlers::health_check_handler))
|
||||
.route("/api/v1/signin", post(auth::sign_in_handler))
|
||||
.route(
|
||||
"/v1/api/fl",
|
||||
"/api/v1/fl",
|
||||
post(handlers::create_flist_handler).layer(middleware::from_fn_with_state(
|
||||
app_state.clone(),
|
||||
auth::authorize,
|
||||
)),
|
||||
)
|
||||
.route(
|
||||
"/v1/api/fl/:job_id",
|
||||
"/api/v1/fl/:job_id",
|
||||
get(handlers::get_flist_state_handler).layer(middleware::from_fn_with_state(
|
||||
app_state.clone(),
|
||||
auth::authorize,
|
||||
)),
|
||||
)
|
||||
.route(
|
||||
"/v1/api/fl/preview/:flist_path",
|
||||
"/api/v1/fl/preview/:flist_path",
|
||||
get(handlers::preview_flist_handler),
|
||||
)
|
||||
.route("/v1/api/fl", get(handlers::list_flists_handler))
|
||||
.route("/api/v1/fl", get(handlers::list_flists_handler))
|
||||
.route(
|
||||
"/api/v1/block",
|
||||
post(block_handlers::upload_block_handler).layer(middleware::from_fn_with_state(
|
||||
app_state.clone(),
|
||||
auth::authorize,
|
||||
)),
|
||||
)
|
||||
.route(
|
||||
"/api/v1/block/:hash",
|
||||
get(block_handlers::get_block_handler),
|
||||
)
|
||||
.route(
|
||||
"/api/v1/block/:hash",
|
||||
head(block_handlers::check_block_handler),
|
||||
)
|
||||
.route(
|
||||
"/api/v1/block/verify",
|
||||
post(block_handlers::verify_blocks_handler),
|
||||
)
|
||||
.route(
|
||||
"/api/v1/blocks/:hash",
|
||||
get(block_handlers::get_blocks_by_hash_handler),
|
||||
)
|
||||
.route("/api/v1/blocks", get(block_handlers::list_blocks_handler))
|
||||
.route(
|
||||
"/api/v1/block/:hash/downloads",
|
||||
get(block_handlers::get_block_downloads_handler),
|
||||
)
|
||||
.route(
|
||||
"/api/v1/user/blocks",
|
||||
get(block_handlers::get_user_blocks_handler).layer(middleware::from_fn_with_state(
|
||||
app_state.clone(),
|
||||
auth::authorize,
|
||||
)),
|
||||
)
|
||||
.route(
|
||||
"/api/v1/file",
|
||||
post(file_handlers::upload_file_handler).layer(middleware::from_fn_with_state(
|
||||
app_state.clone(),
|
||||
auth::authorize,
|
||||
)),
|
||||
)
|
||||
.route("/api/v1/file/:hash", get(file_handlers::get_file_handler))
|
||||
.route(
|
||||
"/website/:website_hash/*path",
|
||||
get(website_handlers::serve_website_handler),
|
||||
)
|
||||
.route(
|
||||
"/website/:website_hash/",
|
||||
get(
|
||||
|state: State<Arc<AppState>>, path: Path<String>| async move {
|
||||
website_handlers::serve_website_handler(state, Path((path.0, "".to_string())))
|
||||
.await
|
||||
},
|
||||
),
|
||||
)
|
||||
.route("/*path", get(serve_flists::serve_flists));
|
||||
|
||||
let app = Router::new()
|
||||
18
components/rfs/src/server/models/block.rs
Normal file
18
components/rfs/src/server/models/block.rs
Normal file
@@ -0,0 +1,18 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use utoipa::ToSchema;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, ToSchema)]
|
||||
pub struct Block {
|
||||
pub index: u64, // The index of the block in the file
|
||||
pub hash: String, // The hash of the block's content
|
||||
pub data: Vec<u8>, // The actual data of the block
|
||||
pub size: usize, // The size of the block's data
|
||||
}
|
||||
|
||||
impl Block {
|
||||
/// Calculates the hash of the block's data using SHA-256.
|
||||
pub fn calculate_hash(data: &[u8]) -> String {
|
||||
let hash = blake2b_simd::Params::new().hash_length(32).hash(data);
|
||||
hex::encode(hash.as_bytes())
|
||||
}
|
||||
}
|
||||
28
components/rfs/src/server/models/file.rs
Normal file
28
components/rfs/src/server/models/file.rs
Normal file
@@ -0,0 +1,28 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::{Digest, Sha256};
|
||||
use sqlx::FromRow;
|
||||
use utoipa::ToSchema;
|
||||
|
||||
#[derive(Debug, Clone, FromRow, Serialize, Deserialize, ToSchema)]
|
||||
pub struct File {
|
||||
pub file_hash: String, // Hash of the file content
|
||||
pub file_content: Vec<u8>, // Content of the file
|
||||
}
|
||||
|
||||
impl File {
|
||||
/// Calculates the hash of the block's data using SHA-256.
|
||||
pub fn calculate_hash(data: &[u8]) -> String {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(data);
|
||||
format!("{:x}", hasher.finalize())
|
||||
}
|
||||
|
||||
/// Creates a new File instance by calculating the hash of the content.
|
||||
pub fn new(file_content: Vec<u8>) -> Self {
|
||||
let file_hash = Self::calculate_hash(&file_content);
|
||||
Self {
|
||||
file_hash,
|
||||
file_content,
|
||||
}
|
||||
}
|
||||
}
|
||||
7
components/rfs/src/server/models/mod.rs
Normal file
7
components/rfs/src/server/models/mod.rs
Normal file
@@ -0,0 +1,7 @@
|
||||
pub mod block;
|
||||
pub mod file;
|
||||
pub mod user;
|
||||
|
||||
pub use block::Block;
|
||||
pub use file::File;
|
||||
pub use user::User;
|
||||
9
components/rfs/src/server/models/user.rs
Normal file
9
components/rfs/src/server/models/user.rs
Normal file
@@ -0,0 +1,9 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::FromRow;
|
||||
|
||||
#[derive(Debug, Clone, FromRow, Serialize, Deserialize)]
|
||||
pub struct User {
|
||||
pub id: Option<i64>,
|
||||
pub username: String,
|
||||
pub password: String,
|
||||
}
|
||||
@@ -10,20 +10,28 @@ use axum::{
|
||||
use serde::Serialize;
|
||||
use utoipa::ToSchema;
|
||||
|
||||
use crate::{
|
||||
use crate::server::{
|
||||
auth::SignInResponse,
|
||||
config::Job,
|
||||
file_handlers::FileUploadResponse,
|
||||
handlers::{FlistState, PreviewResponse},
|
||||
};
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub enum ResponseError {
|
||||
#[schema(title = "ResponseErrorInternalServerError")]
|
||||
InternalServerError,
|
||||
#[schema(title = "ResponseErrorConflict")]
|
||||
Conflict(String),
|
||||
#[schema(title = "ResponseErrorNotFound")]
|
||||
NotFound(String),
|
||||
#[schema(title = "ResponseErrorUnauthorized")]
|
||||
Unauthorized(String),
|
||||
#[schema(title = "ResponseErrorBadRequest")]
|
||||
BadRequest(String),
|
||||
#[schema(title = "ResponseErrorForbidden")]
|
||||
Forbidden(String),
|
||||
#[schema(title = "ResponseErrorTemplateError")]
|
||||
TemplateError(ErrorTemplate),
|
||||
}
|
||||
|
||||
@@ -73,15 +81,45 @@ impl IntoResponse for ResponseError {
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapper structs for OpenAPI documentation to match the actual JSON response format
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct FlistStateResponse {
|
||||
pub flist_state: FlistState,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct HealthResponse {
|
||||
pub msg: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct BlockUploadedResponse {
|
||||
pub hash: String,
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
|
||||
#[derive(ToSchema)]
|
||||
pub enum ResponseResult {
|
||||
#[schema(title = "ResponseResultHealth")]
|
||||
Health,
|
||||
#[schema(title = "ResponseResultFlistCreated")]
|
||||
FlistCreated(Job),
|
||||
#[schema(title = "ResponseResultFlistState")]
|
||||
FlistState(FlistState),
|
||||
#[schema(title = "ResponseResultFlists")]
|
||||
Flists(HashMap<String, Vec<FileInfo>>),
|
||||
#[schema(title = "ResponseResultPreviewFlist")]
|
||||
PreviewFlist(PreviewResponse),
|
||||
#[schema(title = "ResponseResultSignedIn")]
|
||||
SignedIn(SignInResponse),
|
||||
#[schema(title = "ResponseResultDirTemplate")]
|
||||
DirTemplate(DirListTemplate),
|
||||
#[schema(title = "ResponseResultBlockUploaded")]
|
||||
BlockUploaded(String),
|
||||
#[schema(title = "ResponseResultFileUploaded")]
|
||||
FileUploaded(FileUploadResponse),
|
||||
#[schema(value_type = String, title = "ResponseResultRes", format = "binary")]
|
||||
Res(hyper::Response<tower_http::services::fs::ServeFileSystemResponseBody>),
|
||||
}
|
||||
|
||||
@@ -90,7 +128,9 @@ impl IntoResponse for ResponseResult {
|
||||
match self {
|
||||
ResponseResult::Health => (
|
||||
StatusCode::OK,
|
||||
Json(serde_json::json!({"msg": "flist server is working"})),
|
||||
Json(HealthResponse {
|
||||
msg: "flist server is working".to_string(),
|
||||
}),
|
||||
)
|
||||
.into_response(),
|
||||
ResponseResult::SignedIn(token) => (StatusCode::CREATED, Json(token)).into_response(),
|
||||
@@ -106,6 +146,17 @@ impl IntoResponse for ResponseResult {
|
||||
ResponseResult::PreviewFlist(content) => {
|
||||
(StatusCode::OK, Json(content)).into_response()
|
||||
}
|
||||
ResponseResult::BlockUploaded(hash) => (
|
||||
StatusCode::OK,
|
||||
Json(BlockUploadedResponse {
|
||||
hash,
|
||||
message: "Block processed successfully".to_string(),
|
||||
}),
|
||||
)
|
||||
.into_response(),
|
||||
ResponseResult::FileUploaded(response) => {
|
||||
(StatusCode::CREATED, Json(response)).into_response()
|
||||
}
|
||||
ResponseResult::DirTemplate(t) => match t.render() {
|
||||
Ok(html) => Html(html).into_response(),
|
||||
Err(err) => {
|
||||
@@ -124,7 +175,7 @@ impl IntoResponse for ResponseResult {
|
||||
|
||||
//////// TEMPLATES ////////
|
||||
|
||||
#[derive(Serialize, Clone, Debug, ToSchema)]
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct FileInfo {
|
||||
pub name: String,
|
||||
pub path_uri: String,
|
||||
@@ -172,7 +223,10 @@ const FAIL_REASON_HEADER_NAME: &str = "fl-server-fail-reason";
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub enum TemplateErr {
|
||||
#[schema(title = "TemplateErrBadRequest")]
|
||||
BadRequest(String),
|
||||
#[schema(title = "TemplateErrNotFound")]
|
||||
NotFound(String),
|
||||
#[schema(title = "TemplateErrInternalServerError")]
|
||||
InternalServerError(String),
|
||||
}
|
||||
@@ -12,9 +12,8 @@ use axum::{
|
||||
use axum_macros::debug_handler;
|
||||
use percent_encoding::percent_decode;
|
||||
|
||||
use crate::{
|
||||
use crate::server::{
|
||||
config,
|
||||
handlers::Filter,
|
||||
response::{
|
||||
DirListTemplate, DirLister, ErrorTemplate, FileInfo, ResponseError, ResponseResult,
|
||||
TemplateErr,
|
||||
@@ -22,13 +21,27 @@ use crate::{
|
||||
};
|
||||
|
||||
#[debug_handler]
|
||||
/// Serve flist files from the server's filesystem
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/{path}",
|
||||
tag = "Flist Management",
|
||||
params(
|
||||
("path" = String, Path, description = "Path to the flist file or directory to serve")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "Successfully served the flist or directory listing", body = ResponseResult),
|
||||
(status = 404, description = "Flist not found", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError)
|
||||
)
|
||||
)]
|
||||
pub async fn serve_flists(
|
||||
State(state): State<Arc<config::AppState>>,
|
||||
req: Request<Body>,
|
||||
) -> impl IntoResponse {
|
||||
let path = req.uri().path().to_string();
|
||||
|
||||
return match ServeDir::new("").oneshot(req).await {
|
||||
match ServeDir::new("").oneshot(req).await {
|
||||
Ok(res) => {
|
||||
let status = res.status();
|
||||
match status {
|
||||
@@ -48,7 +61,7 @@ pub async fn serve_flists(
|
||||
|
||||
match cur_path.is_dir() {
|
||||
true => {
|
||||
let rs = visit_dir_one_level(&full_path, &state, None).await;
|
||||
let rs = visit_dir_one_level(&full_path, &state).await;
|
||||
match rs {
|
||||
Ok(files) => Ok(ResponseResult::DirTemplate(DirListTemplate {
|
||||
lister: DirLister { files },
|
||||
@@ -76,7 +89,7 @@ pub async fn serve_flists(
|
||||
cur_path: path.to_string(),
|
||||
message: format!("Unhandled error: {}", err),
|
||||
})),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
fn validate_path(path: &str) -> io::Result<PathBuf> {
|
||||
@@ -99,7 +112,6 @@ fn validate_path(path: &str) -> io::Result<PathBuf> {
|
||||
pub async fn visit_dir_one_level<P: AsRef<std::path::Path>>(
|
||||
path: P,
|
||||
state: &Arc<config::AppState>,
|
||||
filter: Option<Filter>,
|
||||
) -> io::Result<Vec<FileInfo>> {
|
||||
let path = path.as_ref();
|
||||
let mut dir = tokio::fs::read_dir(path).await?;
|
||||
@@ -109,7 +121,6 @@ pub async fn visit_dir_one_level<P: AsRef<std::path::Path>>(
|
||||
let path_uri = child.path().to_string_lossy().to_string();
|
||||
let is_file = child.file_type().await?.is_file();
|
||||
let name = child.file_name().to_string_lossy().to_string();
|
||||
let size = child.metadata().await?.len();
|
||||
|
||||
let mut progress = 0.0;
|
||||
if is_file {
|
||||
@@ -134,31 +145,11 @@ pub async fn visit_dir_one_level<P: AsRef<std::path::Path>>(
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(ref filter_files) = filter {
|
||||
if let Some(ref filter_name) = filter_files.name {
|
||||
if filter_name.clone() != name {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(ref filter_max_size) = filter_files.max_size {
|
||||
if filter_max_size.clone() < size as usize {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(ref filter_min_size) = filter_files.min_size {
|
||||
if filter_min_size.clone() > size as usize {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
files.push(FileInfo {
|
||||
name,
|
||||
path_uri,
|
||||
is_file,
|
||||
size: size,
|
||||
size: child.metadata().await?.len(),
|
||||
last_modified: child
|
||||
.metadata()
|
||||
.await?
|
||||
197
components/rfs/src/server/website_handlers.rs
Normal file
197
components/rfs/src/server/website_handlers.rs
Normal file
@@ -0,0 +1,197 @@
|
||||
use crate::fungi::{meta, Reader};
|
||||
use aes_gcm::{
|
||||
aead::{Aead, KeyInit},
|
||||
Aes256Gcm, Nonce,
|
||||
};
|
||||
use anyhow::{Context, Result};
|
||||
use axum::{
|
||||
extract::{Path, State},
|
||||
http::StatusCode,
|
||||
response::IntoResponse,
|
||||
};
|
||||
use axum_macros::debug_handler;
|
||||
use mime_guess::from_path;
|
||||
use std::fs;
|
||||
use std::sync::Arc;
|
||||
use tempfile::NamedTempFile;
|
||||
// OpenApi is now only used in the main handlers.rs file
|
||||
|
||||
use crate::server::{config::AppState, db::DB, response::ResponseError};
|
||||
|
||||
// Website API endpoints are included in the main FlistApi in handlers.rs
|
||||
|
||||
/// Resolves a file path within a flist database to get file information
|
||||
async fn get_file_from_flist(flist_content: &[u8], file_path: &str) -> Result<Vec<meta::Block>> {
|
||||
// Create a temporary file
|
||||
let temp_file = NamedTempFile::new().context("failed to create temporary file")?;
|
||||
|
||||
// Write flist content to the temporary file
|
||||
fs::write(temp_file.path(), flist_content)
|
||||
.context("failed to write flist content to temporary file")?;
|
||||
|
||||
// Open the flist file as a database using the existing Reader
|
||||
let reader = Reader::new(temp_file.path().to_str().unwrap())
|
||||
.await
|
||||
.context("failed to open flist as a database")?;
|
||||
|
||||
// Find the root inode
|
||||
let root_inode: u64 = reader
|
||||
.root_inode()
|
||||
.await
|
||||
.context("failed to find root inode")?
|
||||
.ino;
|
||||
|
||||
// Split the path and traverse
|
||||
let mut current_inode = root_inode;
|
||||
let path_components: Vec<&str> = file_path.split('/').collect();
|
||||
|
||||
for (i, component) in path_components.iter().enumerate() {
|
||||
if component.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// If this is the last component, get file info
|
||||
if i == path_components.len() - 1 {
|
||||
let file_inode = match reader.lookup(current_inode, component).await {
|
||||
Ok(inode) => match inode {
|
||||
Some(inode) => inode.ino,
|
||||
None => {
|
||||
anyhow::bail!("file not found");
|
||||
}
|
||||
},
|
||||
Err(err) => return Err(anyhow::Error::new(err).context("failed to lookup inode")),
|
||||
};
|
||||
|
||||
// Get blocks
|
||||
let blocks: Vec<meta::Block> = reader
|
||||
.blocks(file_inode)
|
||||
.await
|
||||
.context("failed to get blocks")?;
|
||||
|
||||
return Ok(blocks);
|
||||
}
|
||||
|
||||
// Find the next inode in the path
|
||||
current_inode = match reader.lookup(current_inode, component).await {
|
||||
Ok(inode) => match inode {
|
||||
Some(inode) => inode.ino,
|
||||
None => {
|
||||
anyhow::bail!("directory not found");
|
||||
}
|
||||
},
|
||||
Err(err) => return Err(anyhow::Error::new(err).context("failed to lookup inode")),
|
||||
};
|
||||
}
|
||||
|
||||
anyhow::bail!("file not found")
|
||||
}
|
||||
|
||||
async fn decrypt_block(state: &Arc<AppState>, block: &meta::Block) -> Result<Vec<u8>> {
|
||||
let encrypted = match state.db.get_block(&hex::encode(block.id)).await {
|
||||
Ok(Some(block_content)) => block_content,
|
||||
Ok(None) => {
|
||||
anyhow::bail!("Block {:?} not found", block.id);
|
||||
}
|
||||
Err(err) => {
|
||||
anyhow::bail!("Failed to get block {:?}: {}", block.id, err);
|
||||
}
|
||||
};
|
||||
|
||||
let cipher =
|
||||
Aes256Gcm::new_from_slice(&block.key).map_err(|_| anyhow::anyhow!("key is invalid"))?;
|
||||
let nonce = Nonce::from_slice(&block.key[..12]);
|
||||
|
||||
let compressed = cipher
|
||||
.decrypt(nonce, encrypted.as_slice())
|
||||
.map_err(|_| anyhow::anyhow!("encryption error"))?;
|
||||
|
||||
let mut decoder = snap::raw::Decoder::new();
|
||||
let plain = decoder.decompress_vec(&compressed)?;
|
||||
|
||||
Ok(plain)
|
||||
}
|
||||
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/v1/website/{website_hash}/{path}",
|
||||
tag = "Website Serving",
|
||||
responses(
|
||||
(status = 200, description = "Website file served successfully", content_type = "application/octet-stream", body = [u8]),
|
||||
(status = 404, description = "File not found", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
),
|
||||
params(
|
||||
("website_hash" = String, Path, description = "flist hash of the website directory"),
|
||||
("path" = String, Path, description = "Path to the file within the website directory, defaults to index.html if empty")
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn serve_website_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path((website_hash, path)): Path<(String, String)>,
|
||||
) -> impl IntoResponse {
|
||||
// If no path is provided, default to index.html
|
||||
let file_path = if path.is_empty() {
|
||||
"index.html".to_string()
|
||||
} else {
|
||||
path
|
||||
};
|
||||
|
||||
// Get the flist using the website hash
|
||||
let flist = match state.db.get_file_by_hash(&website_hash).await {
|
||||
Ok(Some(file)) => file,
|
||||
Ok(None) => {
|
||||
return Err(ResponseError::NotFound(format!(
|
||||
"Flist with hash '{}' not found",
|
||||
website_hash
|
||||
)));
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Failed to retrieve flist metadata: {}", err);
|
||||
return Err(ResponseError::InternalServerError);
|
||||
}
|
||||
};
|
||||
|
||||
// Resolve the file information from the flist content
|
||||
let file_blocks = match get_file_from_flist(&flist.file_content, &file_path).await {
|
||||
Ok(blocks) => blocks,
|
||||
Err(err) => {
|
||||
log::error!(
|
||||
"Failed to resolve file '{}' from flist '{}': {}",
|
||||
file_path,
|
||||
website_hash,
|
||||
err
|
||||
);
|
||||
return Err(ResponseError::NotFound(format!(
|
||||
"File {} not found in flist {}",
|
||||
file_path, website_hash
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
let mut file_content = Vec::new();
|
||||
for block in file_blocks {
|
||||
match decrypt_block(&state, &block).await {
|
||||
Ok(block_content) => file_content.extend(block_content),
|
||||
Err(err) => {
|
||||
log::error!(
|
||||
"Failed to decrypt block {:?} for file '{}' in website '{}': {}",
|
||||
block.id,
|
||||
file_path,
|
||||
website_hash,
|
||||
err
|
||||
);
|
||||
return Err(ResponseError::InternalServerError);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mime_type = from_path(&file_path).first_or_octet_stream();
|
||||
|
||||
Ok((
|
||||
StatusCode::OK,
|
||||
[(axum::http::header::CONTENT_TYPE, mime_type.to_string())],
|
||||
file_content,
|
||||
)
|
||||
.into_response())
|
||||
}
|
||||
380
components/rfs/src/server_api.rs
Normal file
380
components/rfs/src/server_api.rs
Normal file
@@ -0,0 +1,380 @@
|
||||
use anyhow::{Context, Result};
|
||||
use reqwest::Client;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct VerifyBlock {
|
||||
/// Block hash to verify
|
||||
pub block_hash: String,
|
||||
/// File hash associated with the block
|
||||
pub file_hash: String,
|
||||
/// Block index within the file
|
||||
pub block_index: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct VerifyBlocksRequest {
|
||||
blocks: Vec<VerifyBlock>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct VerifyBlocksResponse {
|
||||
missing: Vec<String>,
|
||||
}
|
||||
|
||||
/// Response structure for the blocks endpoint
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct BlocksResponse {
|
||||
blocks: Vec<(String, u64)>,
|
||||
}
|
||||
|
||||
/// Response for listing blocks
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ListBlocksResponse {
|
||||
pub blocks: Vec<String>,
|
||||
pub total: u64,
|
||||
pub page: u32,
|
||||
pub per_page: u32,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct SigninResponse {
|
||||
pub access_token: String,
|
||||
}
|
||||
|
||||
/// Response for user blocks endpoint
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct UserBlocksResponse {
|
||||
/// List of blocks with their sizes
|
||||
pub blocks: Vec<(String, u64)>,
|
||||
/// Total number of blocks
|
||||
pub total: u64,
|
||||
/// Total number of all blocks
|
||||
pub all_blocks: u64,
|
||||
}
|
||||
|
||||
/// Response for block downloads endpoint
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct BlockDownloadsResponse {
|
||||
/// Block hash
|
||||
pub block_hash: String,
|
||||
/// Number of times the block has been downloaded
|
||||
pub downloads_count: u64,
|
||||
/// Size of the block in bytes
|
||||
pub block_size: u64,
|
||||
}
|
||||
|
||||
/// Downloads blocks associated with a hash (file hash or block hash)
|
||||
/// Returns a vector of (block_hash, block_index) pairs
|
||||
pub async fn get_blocks_by_hash(hash: &str, server_url: String) -> Result<Vec<(String, u64)>> {
|
||||
info!("Getting blocks for hash: {}", hash);
|
||||
|
||||
// Create HTTP client
|
||||
let client = Client::new();
|
||||
|
||||
// Construct the blocks URL
|
||||
let blocks_url = format!("{}/api/v1/blocks/{}", server_url, hash);
|
||||
|
||||
info!("Requesting blocks from: {}", blocks_url);
|
||||
|
||||
// Send GET request to get the blocks
|
||||
let response = client
|
||||
.get(&blocks_url)
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to get blocks from server")?;
|
||||
|
||||
// Check if the request was successful
|
||||
if !response.status().is_success() {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Server returned error: {} - {}",
|
||||
response.status(),
|
||||
response.text().await?
|
||||
));
|
||||
}
|
||||
|
||||
// Parse the response
|
||||
let blocks_response: BlocksResponse = response
|
||||
.json()
|
||||
.await
|
||||
.context("Failed to parse blocks response")?;
|
||||
|
||||
info!("Retrieved {} blocks", blocks_response.blocks.len());
|
||||
|
||||
Ok(blocks_response.blocks)
|
||||
}
|
||||
|
||||
pub async fn download_block(block_hash: &str, server_url: &str) -> Result<bytes::Bytes> {
|
||||
let block_url = format!("{}/api/v1/block/{}", server_url, block_hash);
|
||||
|
||||
// Create HTTP client
|
||||
let client = Client::new();
|
||||
|
||||
// Send GET request to download the block
|
||||
let response = client
|
||||
.get(&block_url)
|
||||
.send()
|
||||
.await
|
||||
.context(format!("Failed to download block {}", block_hash))?;
|
||||
|
||||
// Check if the request was successful
|
||||
if !response.status().is_success() {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Server returned error for block {}: {} - {}",
|
||||
block_hash,
|
||||
response.status(),
|
||||
response.text().await?
|
||||
));
|
||||
}
|
||||
|
||||
// Get the block content
|
||||
let block_content = response
|
||||
.bytes()
|
||||
.await
|
||||
.context("Failed to read block content")?;
|
||||
info!(
|
||||
"Downloaded block {} ({} bytes)",
|
||||
block_hash,
|
||||
block_content.len()
|
||||
);
|
||||
|
||||
Ok(block_content)
|
||||
}
|
||||
|
||||
/// Verifies which blocks are missing on the server
|
||||
pub async fn verify_blocks_with_server(
|
||||
client: &Client,
|
||||
server_url: String,
|
||||
blocks: Vec<VerifyBlock>,
|
||||
) -> Result<Vec<String>> {
|
||||
let verify_url = format!("{}/api/v1/block/verify", server_url);
|
||||
let verify_request = VerifyBlocksRequest { blocks };
|
||||
|
||||
info!("Verifying blocks with server: {}", verify_url);
|
||||
|
||||
let response = client
|
||||
.post(&verify_url)
|
||||
.json(&verify_request)
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to verify blocks with server")?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Server returned error: {} - {}",
|
||||
response.status(),
|
||||
response.text().await?
|
||||
));
|
||||
}
|
||||
|
||||
let verify_response: VerifyBlocksResponse = response
|
||||
.json()
|
||||
.await
|
||||
.context("Failed to parse server response")?;
|
||||
|
||||
Ok(verify_response.missing)
|
||||
}
|
||||
|
||||
/// Uploads a single block to the server
|
||||
pub async fn upload_block(
|
||||
client: Arc<Client>,
|
||||
server_url: String,
|
||||
hash: String,
|
||||
data: Vec<u8>,
|
||||
file_hash: String,
|
||||
idx: u64,
|
||||
token: String,
|
||||
) -> Result<()> {
|
||||
let upload_block_url = format!("{}/api/v1/block", server_url);
|
||||
|
||||
info!("Uploading block: {}", hash);
|
||||
|
||||
// Send the data directly as bytes with query parameters
|
||||
let response = client
|
||||
.post(&upload_block_url)
|
||||
.header("Content-Type", "application/octet-stream")
|
||||
.header("Authorization", format!("Bearer {}", token)) // Add Authorization header
|
||||
.query(&[("file_hash", &file_hash), ("idx", &idx.to_string())])
|
||||
.body(data)
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to upload block")?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Failed to upload block {}: {} - {}",
|
||||
hash,
|
||||
response.status(),
|
||||
response.text().await?
|
||||
));
|
||||
}
|
||||
|
||||
if response.status() == 200 {
|
||||
info!("Block {} already exists on server", hash);
|
||||
}
|
||||
if response.status() == 201 {
|
||||
info!("Successfully uploaded block: {}", hash);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Checks if a block exists on the server by its hash.
|
||||
pub async fn check_block(server_url: &str, hash: &str) -> Result<bool> {
|
||||
let url = format!("{}/api/v1/block/{}", server_url, hash);
|
||||
|
||||
let client = Client::new();
|
||||
let response = client
|
||||
.head(&url)
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to send request to check block")?;
|
||||
|
||||
match response.status() {
|
||||
reqwest::StatusCode::OK => Ok(true), // Block exists
|
||||
reqwest::StatusCode::NOT_FOUND => Ok(false), // Block does not exist
|
||||
_ => Err(anyhow::anyhow!(
|
||||
"Unexpected response from server: {}",
|
||||
response.status()
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Lists blocks available on the server with pagination.
|
||||
/// Returns a vector of (block_hash, block_index) pairs.
|
||||
pub async fn list_blocks(
|
||||
server_url: &str,
|
||||
page_size: usize,
|
||||
page: usize,
|
||||
) -> Result<(Vec<String>, u64)> {
|
||||
let blocks_url = format!(
|
||||
"{}/api/v1/blocks?page={}&page_size={}",
|
||||
server_url, page, page_size
|
||||
);
|
||||
|
||||
// Create HTTP client
|
||||
let client = Client::new();
|
||||
|
||||
// Send GET request to get blocks for the current page
|
||||
let response = client
|
||||
.get(&blocks_url)
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to list blocks from server")?;
|
||||
|
||||
// Check if the request was successful
|
||||
if !response.status().is_success() {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Server returned error: {} - {}",
|
||||
response.status(),
|
||||
response.text().await?
|
||||
));
|
||||
}
|
||||
|
||||
// Parse the response
|
||||
let blocks_response: ListBlocksResponse = response
|
||||
.json()
|
||||
.await
|
||||
.context("Failed to parse blocks response")?;
|
||||
|
||||
Ok((blocks_response.blocks, blocks_response.total))
|
||||
}
|
||||
|
||||
pub async fn signin(
|
||||
client: &Client,
|
||||
server_url: &str,
|
||||
username: &str,
|
||||
password: &str,
|
||||
) -> Result<String> {
|
||||
let response = client
|
||||
.post(format!("{}/api/v1/signin", server_url))
|
||||
.json(&serde_json::json!({
|
||||
"username": username,
|
||||
"password": password,
|
||||
}))
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to send request to signin endpoint")?;
|
||||
|
||||
if response.status().is_success() {
|
||||
let signin_response: SigninResponse =
|
||||
response.json().await.context("Failed to parse response")?;
|
||||
Ok(signin_response.access_token)
|
||||
} else {
|
||||
anyhow::bail!("Failed to retrieve token: {}", response.status());
|
||||
}
|
||||
}
|
||||
|
||||
/// Get all blocks uploaded by the authenticated user
|
||||
pub async fn get_user_blocks(
|
||||
server_url: &str,
|
||||
token: &str,
|
||||
page: Option<u32>,
|
||||
per_page: Option<u32>,
|
||||
) -> Result<UserBlocksResponse> {
|
||||
let url = format!(
|
||||
"{}/api/v1/user/blocks?page={}&per_page={}",
|
||||
server_url,
|
||||
page.unwrap_or(1),
|
||||
per_page.unwrap_or(50)
|
||||
);
|
||||
|
||||
// Create HTTP client
|
||||
let client = Client::new();
|
||||
|
||||
// Send GET request with authorization header
|
||||
let response = client
|
||||
.get(&url)
|
||||
.header("Authorization", format!("Bearer {}", token))
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to get user blocks from server")?;
|
||||
|
||||
// Check if the request was successful
|
||||
if !response.status().is_success() {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Server returned error: {}",
|
||||
response.status(),
|
||||
));
|
||||
}
|
||||
|
||||
// Parse the response
|
||||
let blocks_response: UserBlocksResponse = response
|
||||
.json()
|
||||
.await
|
||||
.context("Failed to parse user blocks response")?;
|
||||
|
||||
Ok(blocks_response)
|
||||
}
|
||||
|
||||
/// Get the download count for a specific block
|
||||
pub async fn get_block_downloads(server_url: &str, hash: &str) -> Result<BlockDownloadsResponse> {
|
||||
let url = format!("{}/api/v1/block/{}/downloads", server_url, hash);
|
||||
|
||||
// Create HTTP client
|
||||
let client = Client::new();
|
||||
|
||||
// Send GET request
|
||||
let response = client
|
||||
.get(&url)
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to get block downloads from server")?;
|
||||
|
||||
// Check if the request was successful
|
||||
if !response.status().is_success() {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Server returned error: {}",
|
||||
response.status(),
|
||||
));
|
||||
}
|
||||
|
||||
// Parse the response
|
||||
let downloads_response: BlockDownloadsResponse = response
|
||||
.json()
|
||||
.await
|
||||
.context("Failed to parse block downloads response")?;
|
||||
|
||||
Ok(downloads_response)
|
||||
}
|
||||
@@ -3,6 +3,7 @@ pub mod dir;
|
||||
pub mod http;
|
||||
mod router;
|
||||
pub mod s3store;
|
||||
pub mod server;
|
||||
pub mod zdb;
|
||||
|
||||
use anyhow::Context;
|
||||
@@ -23,6 +24,7 @@ pub async fn make<U: AsRef<str>>(u: U) -> Result<Stores> {
|
||||
"s3" | "s3s" | "s3s+tls" => return Ok(Stores::S3(s3store::S3Store::make(&u).await?)),
|
||||
"zdb" => return Ok(Stores::ZDB(zdb::ZdbStore::make(&u).await?)),
|
||||
"http" | "https" => return Ok(Stores::HTTP(http::HTTPStore::make(&u).await?)),
|
||||
server::SCHEME => return Ok(Stores::Server(server::ServerStore::make(&u).await?)),
|
||||
_ => return Err(Error::UnknownStore(parsed.scheme().into())),
|
||||
}
|
||||
}
|
||||
@@ -204,11 +206,13 @@ pub async fn parse_router(urls: &[String]) -> anyhow::Result<Router<Stores>> {
|
||||
Ok(router)
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub enum Stores {
|
||||
S3(s3store::S3Store),
|
||||
Dir(dir::DirStore),
|
||||
ZDB(zdb::ZdbStore),
|
||||
HTTP(http::HTTPStore),
|
||||
Server(server::ServerStore),
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -219,6 +223,7 @@ impl Store for Stores {
|
||||
self::Stores::Dir(dir_store) => dir_store.get(key).await,
|
||||
self::Stores::ZDB(zdb_store) => zdb_store.get(key).await,
|
||||
self::Stores::HTTP(http_store) => http_store.get(key).await,
|
||||
self::Stores::Server(server_store) => server_store.get(key).await,
|
||||
}
|
||||
}
|
||||
async fn set(&self, key: &[u8], blob: &[u8]) -> Result<()> {
|
||||
@@ -227,6 +232,7 @@ impl Store for Stores {
|
||||
self::Stores::Dir(dir_store) => dir_store.set(key, blob).await,
|
||||
self::Stores::ZDB(zdb_store) => zdb_store.set(key, blob).await,
|
||||
self::Stores::HTTP(http_store) => http_store.set(key, blob).await,
|
||||
self::Stores::Server(server_store) => server_store.set(key, blob).await,
|
||||
}
|
||||
}
|
||||
fn routes(&self) -> Vec<Route> {
|
||||
@@ -235,6 +241,7 @@ impl Store for Stores {
|
||||
self::Stores::Dir(dir_store) => dir_store.routes(),
|
||||
self::Stores::ZDB(zdb_store) => zdb_store.routes(),
|
||||
self::Stores::HTTP(http_store) => http_store.routes(),
|
||||
self::Stores::Server(server_store) => server_store.routes(),
|
||||
}
|
||||
}
|
||||
}
|
||||
106
components/rfs/src/store/server.rs
Normal file
106
components/rfs/src/store/server.rs
Normal file
@@ -0,0 +1,106 @@
|
||||
use super::{Error, Result, Route, Store};
|
||||
use crate::server_api;
|
||||
use reqwest::Client;
|
||||
use std::sync::Arc;
|
||||
use url;
|
||||
|
||||
pub const SCHEME: &str = "server";
|
||||
|
||||
/// ServerStore is a store that interfaces with the fl-server's API
|
||||
/// It supports both uploads and downloads for blocks using the server's HTTP API
|
||||
#[derive(Clone)]
|
||||
pub struct ServerStore {
|
||||
/// Server URL
|
||||
server_url: String,
|
||||
/// HTTP client for making requests
|
||||
client: Arc<Client>,
|
||||
/// Authentication token
|
||||
token: Option<String>,
|
||||
}
|
||||
|
||||
impl ServerStore {
|
||||
pub async fn make<U: AsRef<str>>(url: &U) -> Result<ServerStore> {
|
||||
let u = url::Url::parse(url.as_ref())?;
|
||||
if u.scheme() != SCHEME {
|
||||
return Err(Error::InvalidScheme(u.scheme().into(), SCHEME.into()));
|
||||
}
|
||||
|
||||
// Extract the token from the query parameters
|
||||
let token = u
|
||||
.query_pairs()
|
||||
.find(|(key, _)| key == "token")
|
||||
.map(|(_, value)| value.to_string());
|
||||
|
||||
// Extract the actual server URL (e.g., "http://localhost:4000")
|
||||
let server_url = u
|
||||
.host_str()
|
||||
.map(|host| format!("{}://{}", host, u.path().trim_start_matches('/')))
|
||||
.ok_or_else(|| Error::InvalidScheme("Invalid host in URL".into(), SCHEME.into()))?;
|
||||
|
||||
let client = Arc::new(Client::new());
|
||||
|
||||
Ok(Self {
|
||||
server_url,
|
||||
client,
|
||||
token,
|
||||
})
|
||||
}
|
||||
|
||||
/// Create a new ServerStore with the given server URL
|
||||
pub fn new(server_url: String, token: Option<String>) -> Self {
|
||||
let client = Arc::new(Client::new());
|
||||
|
||||
Self {
|
||||
server_url,
|
||||
client,
|
||||
token,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Store for ServerStore {
|
||||
async fn get(&self, key: &[u8]) -> Result<Vec<u8>> {
|
||||
// Convert the key to a hex string
|
||||
let hash = hex::encode(key);
|
||||
|
||||
// Download the block from the server
|
||||
match server_api::download_block(&hash, &self.server_url).await {
|
||||
Ok(data) => Ok(data.to_vec()),
|
||||
Err(err) => {
|
||||
// Check if the error is because the block doesn't exist
|
||||
if err.to_string().contains("404") {
|
||||
return Err(Error::KeyNotFound);
|
||||
}
|
||||
Err(Error::Other(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn set(&self, key: &[u8], blob: &[u8]) -> Result<()> {
|
||||
// Convert the key to a hex string
|
||||
let hash = hex::encode(key);
|
||||
|
||||
// Upload the block to the server
|
||||
let file_hash = "".to_string(); // Use the hash as the file hash for simplicity
|
||||
let idx = 0; // Use 0 as the index for testing
|
||||
|
||||
server_api::upload_block(
|
||||
Arc::clone(&self.client),
|
||||
self.server_url.clone(),
|
||||
hash,
|
||||
blob.to_vec(),
|
||||
file_hash,
|
||||
idx,
|
||||
self.token.clone().unwrap_or_default(),
|
||||
)
|
||||
.await
|
||||
.map_err(|err| Error::Other(err))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn routes(&self) -> Vec<Route> {
|
||||
vec![Route::url(format!("{}://{}", SCHEME, self.server_url))]
|
||||
}
|
||||
}
|
||||
287
components/rfs/src/sync.rs
Normal file
287
components/rfs/src/sync.rs
Normal file
@@ -0,0 +1,287 @@
|
||||
use anyhow::Result;
|
||||
use futures::{stream, StreamExt};
|
||||
use reqwest::Client;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Semaphore;
|
||||
|
||||
use crate::server_api::{self, VerifyBlock};
|
||||
|
||||
const PARALLEL_OPERATIONS: usize = 20;
|
||||
const DEFAULT_PAGE_SIZE: usize = 50;
|
||||
|
||||
/// Syncs a file or block between two servers using its hash
|
||||
pub async fn sync(
|
||||
hash: Option<&str>,
|
||||
source_server: &str,
|
||||
dest_server: &str,
|
||||
token: &str,
|
||||
) -> Result<()> {
|
||||
if token.is_empty() {
|
||||
return Err(anyhow::anyhow!("Authentication token is required. Use --token option or set RFS_TOKEN environment variable."));
|
||||
}
|
||||
|
||||
if hash.is_some() {
|
||||
return sync_blocks(hash.unwrap(), source_server, dest_server, token).await;
|
||||
}
|
||||
sync_all_blocks(source_server, dest_server, Some(DEFAULT_PAGE_SIZE), token).await
|
||||
}
|
||||
|
||||
/// Syncs all blocks of a file between two servers
|
||||
async fn sync_blocks(
|
||||
file_hash: &str,
|
||||
source_server: &str,
|
||||
dest_server: &str,
|
||||
token: &str,
|
||||
) -> Result<()> {
|
||||
// Get all blocks for the file from source server
|
||||
info!("Getting blocks for file hash: {}", file_hash);
|
||||
let blocks = server_api::get_blocks_by_hash(file_hash, source_server.to_string()).await?;
|
||||
|
||||
if blocks.is_empty() {
|
||||
return Err(anyhow::anyhow!(
|
||||
"No blocks found for file hash: {}",
|
||||
file_hash
|
||||
));
|
||||
}
|
||||
|
||||
info!("File has {} blocks", blocks.len());
|
||||
|
||||
// Create a client for API requests
|
||||
let client = Arc::new(Client::new());
|
||||
|
||||
// Prepare blocks with metadata for verification
|
||||
let blocks_with_metadata: Vec<VerifyBlock> = blocks
|
||||
.iter()
|
||||
.map(|(hash, idx)| VerifyBlock {
|
||||
block_hash: hash.clone(),
|
||||
file_hash: file_hash.to_string(),
|
||||
block_index: *idx,
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Verify which blocks are missing on the destination server
|
||||
let missing_blocks = server_api::verify_blocks_with_server(
|
||||
&client,
|
||||
dest_server.to_string(),
|
||||
blocks_with_metadata,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if missing_blocks.is_empty() {
|
||||
info!("All blocks already exist on destination server");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
info!(
|
||||
"{} of {} blocks are missing on destination server",
|
||||
missing_blocks.len(),
|
||||
blocks.len()
|
||||
);
|
||||
|
||||
// Create a semaphore to limit concurrent operations
|
||||
let semaphore = Arc::new(Semaphore::new(PARALLEL_OPERATIONS));
|
||||
|
||||
// Download missing blocks from source and upload to destination in parallel
|
||||
let results = stream::iter(blocks.iter())
|
||||
.filter_map(|(block_hash, block_idx)| {
|
||||
let is_missing = missing_blocks.iter().any(|hash| hash == block_hash);
|
||||
|
||||
if !is_missing {
|
||||
return futures::future::ready(None);
|
||||
}
|
||||
|
||||
let block_hash = block_hash.clone();
|
||||
let source_server = source_server.to_string();
|
||||
let dest_server = dest_server.to_string();
|
||||
let file_hash = file_hash.to_string();
|
||||
let block_idx = *block_idx;
|
||||
let permit = semaphore.clone();
|
||||
let client = client.clone();
|
||||
let token = token.to_string();
|
||||
|
||||
futures::future::ready(Some(async move {
|
||||
// Acquire a permit from the semaphore
|
||||
let _permit = permit
|
||||
.acquire()
|
||||
.await
|
||||
.expect("Failed to acquire semaphore permit");
|
||||
|
||||
info!("Syncing block {} (index: {})", block_hash, block_idx);
|
||||
|
||||
// Download the block from source server
|
||||
match server_api::download_block(&block_hash, &source_server).await {
|
||||
Ok(content) => {
|
||||
// Upload the block to destination server
|
||||
server_api::upload_block(
|
||||
client,
|
||||
dest_server,
|
||||
block_hash.clone(),
|
||||
content.to_vec(),
|
||||
file_hash,
|
||||
block_idx,
|
||||
token.clone(),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| (block_hash.clone(), e))
|
||||
}
|
||||
Err(e) => Err((block_hash.clone(), e)),
|
||||
}
|
||||
}))
|
||||
})
|
||||
.buffer_unordered(PARALLEL_OPERATIONS)
|
||||
.collect::<Vec<_>>()
|
||||
.await;
|
||||
|
||||
// Check for any errors in the sync operations
|
||||
let mut has_errors = false;
|
||||
for result in results {
|
||||
if let Err((block_hash, e)) = result {
|
||||
has_errors = true;
|
||||
error!("Failed to sync block {}: {}", block_hash, e);
|
||||
}
|
||||
}
|
||||
|
||||
if has_errors {
|
||||
Err(anyhow::anyhow!("Some blocks failed to sync"))
|
||||
} else {
|
||||
info!("All blocks synced successfully");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Syncs all blocks between two servers
|
||||
pub async fn sync_all_blocks(
|
||||
source_server: &str,
|
||||
dest_server: &str,
|
||||
page_size: Option<usize>,
|
||||
token: &str,
|
||||
) -> Result<()> {
|
||||
info!("Starting full block sync between servers");
|
||||
info!("Source server: {}", source_server);
|
||||
info!("Destination server: {}", dest_server);
|
||||
|
||||
let page_size = page_size.unwrap_or(DEFAULT_PAGE_SIZE);
|
||||
let client = Arc::new(Client::new());
|
||||
let semaphore = Arc::new(Semaphore::new(PARALLEL_OPERATIONS));
|
||||
|
||||
let mut page = 1;
|
||||
let mut total_blocks = 0;
|
||||
let mut total_synced = 0;
|
||||
let mut total_failed = 0;
|
||||
|
||||
loop {
|
||||
// Get a page of blocks from the source server
|
||||
info!("Fetching blocks page {} (size: {})", page, page_size);
|
||||
let (blocks, total) = match server_api::list_blocks(source_server, page_size, page).await {
|
||||
Ok(result) => result,
|
||||
Err(e) => {
|
||||
error!("Failed to list blocks from source server: {}", e);
|
||||
return Err(anyhow::anyhow!("Failed to list blocks from source server"));
|
||||
}
|
||||
};
|
||||
|
||||
if blocks.is_empty() {
|
||||
info!("No more blocks to sync");
|
||||
break;
|
||||
}
|
||||
|
||||
total_blocks = total;
|
||||
info!(
|
||||
"Retrieved {} blocks (page {}/{})",
|
||||
blocks.len(),
|
||||
page,
|
||||
(total_blocks as f64 / page_size as f64).ceil() as usize
|
||||
);
|
||||
|
||||
// Process blocks in parallel
|
||||
let results = stream::iter(blocks.iter())
|
||||
.map(|block_hash| {
|
||||
let block_hash = block_hash.clone();
|
||||
let source_server = source_server.to_string();
|
||||
let dest_server = dest_server.to_string();
|
||||
let permit = semaphore.clone();
|
||||
let client = client.clone();
|
||||
let token = token.to_string();
|
||||
|
||||
async move {
|
||||
// Acquire a permit from the semaphore
|
||||
let _permit = permit
|
||||
.acquire()
|
||||
.await
|
||||
.expect("Failed to acquire semaphore permit");
|
||||
|
||||
// Check if block exists on destination server
|
||||
match server_api::check_block(&dest_server, &block_hash).await {
|
||||
Ok(exists) => {
|
||||
if exists {
|
||||
// Block already exists on destination server
|
||||
debug!("Block {} already exists on destination server", block_hash);
|
||||
return Ok(block_hash);
|
||||
}
|
||||
|
||||
info!("Syncing block {}", block_hash);
|
||||
|
||||
// Download the block from source server
|
||||
match server_api::download_block(&block_hash, &source_server).await {
|
||||
Ok(content) => {
|
||||
// Upload the block to destination server
|
||||
// Note: We don't have file_hash and block_index for this block
|
||||
// so we use empty string and 0 as placeholders
|
||||
server_api::upload_block(
|
||||
client,
|
||||
dest_server,
|
||||
block_hash.clone(),
|
||||
content.to_vec(),
|
||||
"".to_string(),
|
||||
0,
|
||||
token.clone(),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| (block_hash.clone(), e))
|
||||
.map(|_| block_hash)
|
||||
}
|
||||
Err(e) => Err((block_hash.clone(), e)),
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to check if block {} exists: {}", block_hash, e);
|
||||
Err((block_hash, e))
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
.buffer_unordered(PARALLEL_OPERATIONS)
|
||||
.collect::<Vec<_>>()
|
||||
.await;
|
||||
|
||||
// Process results
|
||||
for result in results {
|
||||
match result {
|
||||
Ok(_) => total_synced += 1,
|
||||
Err((block_hash, e)) => {
|
||||
total_failed += 1;
|
||||
error!("Failed to sync block {}: {}", block_hash, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!(
|
||||
"Progress: {}/{} blocks synced ({} failed)",
|
||||
total_synced, total_blocks, total_failed
|
||||
);
|
||||
|
||||
// Move to the next page
|
||||
page += 1;
|
||||
}
|
||||
|
||||
info!(
|
||||
"Block sync completed: {}/{} blocks synced ({} failed)",
|
||||
total_synced, total_blocks, total_failed
|
||||
);
|
||||
|
||||
if total_failed > 0 {
|
||||
Err(anyhow::anyhow!("{} blocks failed to sync", total_failed))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
40
components/rfs/src/tree_visitor.rs
Normal file
40
components/rfs/src/tree_visitor.rs
Normal file
@@ -0,0 +1,40 @@
|
||||
use crate::fungi::meta::{FileType, Inode, Result, Walk, WalkVisitor};
|
||||
use std::path::Path;
|
||||
|
||||
pub struct TreeVisitor {
|
||||
// We don't need to track depth since the path already contains the structure
|
||||
}
|
||||
|
||||
impl TreeVisitor {
|
||||
pub fn new() -> Self {
|
||||
Self {}
|
||||
}
|
||||
|
||||
fn print_entry(&self, path: &Path, node: &Inode) {
|
||||
// Calculate depth from the path
|
||||
let depth = path.components().count().saturating_sub(1);
|
||||
let indent = " ".repeat(depth);
|
||||
let file_type = match node.mode.file_type() {
|
||||
FileType::Dir => "📁",
|
||||
FileType::Regular => "📄",
|
||||
FileType::Link => "🔗",
|
||||
_ => "❓",
|
||||
};
|
||||
|
||||
// Get just the filename
|
||||
let name = path
|
||||
.file_name()
|
||||
.map(|n| n.to_string_lossy())
|
||||
.unwrap_or_else(|| path.to_string_lossy());
|
||||
|
||||
println!("{}{} {}", indent, file_type, name);
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl WalkVisitor for TreeVisitor {
|
||||
async fn visit(&mut self, path: &Path, node: &Inode) -> Result<Walk> {
|
||||
self.print_entry(path, node);
|
||||
Ok(Walk::Continue)
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::cache::Cache;
|
||||
use crate::fungi::{
|
||||
meta::{FileType, Inode, Result, Walk, WalkVisitor},
|
||||
meta::{Block, FileType, Inode, Result, Walk, WalkVisitor},
|
||||
Reader,
|
||||
};
|
||||
use crate::store::Store;
|
||||
@@ -9,9 +9,10 @@ use nix::unistd::{fchownat, FchownatFlags, Gid, Uid};
|
||||
use std::fs::Permissions;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
use std::path::Path;
|
||||
use std::{ffi::OsStr, fs};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::{ffi::OsStr, fs, sync::Arc};
|
||||
use tokio::fs::OpenOptions;
|
||||
use workers::WorkerPool;
|
||||
|
||||
/// unpack an FL to the given root location. it will download the files and reconstruct
|
||||
/// the filesystem.
|
||||
@@ -21,8 +22,9 @@ pub async fn unpack<P: AsRef<Path>, S: Store>(
|
||||
root: P,
|
||||
preserve: bool,
|
||||
) -> Result<()> {
|
||||
// For now, we'll use the non-parallel version
|
||||
// TODO: Implement parallel download properly
|
||||
let mut visitor = CopyVisitor::new(meta, cache, root.as_ref(), preserve);
|
||||
|
||||
meta.walk(&mut visitor).await
|
||||
}
|
||||
|
||||
@@ -118,11 +120,90 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
TODO: parallel download ?
|
||||
// Parallel download implementation
|
||||
struct ParallelCopyVisitor<'a, S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
meta: &'a Reader,
|
||||
root: &'a Path,
|
||||
preserve: bool,
|
||||
pool: &'a mut WorkerPool<Downloader<S>>,
|
||||
}
|
||||
|
||||
this is a download worker that can be used in a worker pool to download files
|
||||
in parallel
|
||||
impl<'a, S> ParallelCopyVisitor<'a, S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
pub fn new(
|
||||
meta: &'a Reader,
|
||||
root: &'a Path,
|
||||
preserve: bool,
|
||||
pool: &'a mut WorkerPool<Downloader<S>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
meta,
|
||||
root,
|
||||
preserve,
|
||||
pool,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<'a, S> WalkVisitor for ParallelCopyVisitor<'a, S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
async fn visit(&mut self, path: &Path, node: &Inode) -> Result<Walk> {
|
||||
let rooted = self.root.join(path.strip_prefix("/").unwrap());
|
||||
|
||||
match node.mode.file_type() {
|
||||
FileType::Dir => {
|
||||
fs::create_dir_all(&rooted)
|
||||
.with_context(|| format!("failed to create directory '{:?}'", rooted))?;
|
||||
}
|
||||
FileType::Regular => {
|
||||
let blocks = self.meta.blocks(node.ino).await?;
|
||||
let worker = self.pool.get().await;
|
||||
worker.send((rooted.clone(), blocks, node.mode.mode()))?;
|
||||
}
|
||||
FileType::Link => {
|
||||
let target = node
|
||||
.data
|
||||
.as_deref()
|
||||
.ok_or_else(|| anyhow::anyhow!("link has no target path"))?;
|
||||
|
||||
let target = Path::new(OsStr::from_bytes(target));
|
||||
let target = if target.is_relative() {
|
||||
target.to_owned()
|
||||
} else {
|
||||
self.root.join(target)
|
||||
};
|
||||
|
||||
std::os::unix::fs::symlink(target, &rooted)
|
||||
.with_context(|| format!("failed to create symlink '{:?}'", rooted))?;
|
||||
}
|
||||
_ => {
|
||||
warn!("unknown file kind: {:?}", node.mode.file_type());
|
||||
return Ok(Walk::Continue);
|
||||
}
|
||||
};
|
||||
|
||||
if self.preserve {
|
||||
fchownat(
|
||||
None,
|
||||
&rooted,
|
||||
Some(Uid::from_raw(node.uid)),
|
||||
Some(Gid::from_raw(node.gid)),
|
||||
FchownatFlags::NoFollowSymlink,
|
||||
)
|
||||
.with_context(|| format!("failed to change ownership of '{:?}'", &rooted))?;
|
||||
}
|
||||
|
||||
Ok(Walk::Continue)
|
||||
}
|
||||
}
|
||||
|
||||
struct Downloader<S>
|
||||
where
|
||||
@@ -135,6 +216,12 @@ impl<S> Downloader<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
fn new(cache: Cache<S>) -> Self {
|
||||
Self {
|
||||
cache: Arc::new(cache),
|
||||
}
|
||||
}
|
||||
|
||||
async fn download(&self, path: &Path, blocks: &[Block], mode: u32) -> Result<()> {
|
||||
let mut fd = OpenOptions::new()
|
||||
.create_new(true)
|
||||
@@ -171,14 +258,13 @@ impl<S> workers::Work for Downloader<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
type Input = (PathBuf, Vec<Block>, Mode);
|
||||
type Input = (PathBuf, Vec<Block>, u32);
|
||||
type Output = ();
|
||||
|
||||
async fn run(&mut self, (path, blocks, mode): Self::Input) -> Self::Output {
|
||||
if let Err(err) = self.download(&path, &blocks, mode.mode()).await {
|
||||
log::info!("downloading file {:?}", path);
|
||||
if let Err(err) = self.download(&path, &blocks, mode).await {
|
||||
log::error!("failed to download file {:?}: {}", path, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
*/
|
||||
435
components/rfs/src/upload.rs
Normal file
435
components/rfs/src/upload.rs
Normal file
@@ -0,0 +1,435 @@
|
||||
use anyhow::{Context, Result};
|
||||
use futures::future::join_all;
|
||||
use reqwest::Client;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use tokio::fs::File;
|
||||
use tokio::io::AsyncReadExt;
|
||||
use tokio::sync::Semaphore;
|
||||
|
||||
use crate::fungi;
|
||||
use crate::server_api;
|
||||
use crate::store;
|
||||
|
||||
pub const BLOCK_SIZE: usize = 1024 * 1024; // 1MB blocks, same as server
|
||||
const PARALLEL_UPLOAD: usize = 20; // Number of blocks to upload in parallel
|
||||
|
||||
pub fn calculate_hash(data: &[u8]) -> String {
|
||||
let hash = blake2b_simd::Params::new().hash_length(32).hash(data);
|
||||
hex::encode(hash.as_bytes())
|
||||
}
|
||||
|
||||
/// Splits the file into blocks and calculates their hashes
|
||||
pub async fn split_file_into_blocks(
|
||||
file_path: &Path,
|
||||
block_size: usize,
|
||||
) -> Result<(Vec<String>, Vec<(String, Vec<u8>)>)> {
|
||||
let mut file = File::open(file_path).await.context("Failed to open file")?;
|
||||
let mut blocks = Vec::new();
|
||||
let mut block_data = Vec::new();
|
||||
|
||||
loop {
|
||||
let mut buffer = vec![0; block_size];
|
||||
let bytes_read = file.read(&mut buffer).await?;
|
||||
|
||||
if bytes_read == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
buffer.truncate(bytes_read);
|
||||
|
||||
// Calculate hash for this block
|
||||
let hash = calculate_hash(&buffer);
|
||||
|
||||
blocks.push(hash.clone());
|
||||
block_data.push((hash, buffer));
|
||||
}
|
||||
|
||||
Ok((blocks, block_data))
|
||||
}
|
||||
|
||||
/// Calculates the hash of the entire file by combining the hashes of all blocks
|
||||
pub fn calculate_file_hash(blocks: &[String]) -> String {
|
||||
let mut hasher = Sha256::new();
|
||||
for block_hash in blocks {
|
||||
hasher.update(block_hash.as_bytes());
|
||||
}
|
||||
format!("{:x}", hasher.finalize())
|
||||
}
|
||||
|
||||
/// Uploads a file to the server, splitting it into blocks and only uploading missing blocks
|
||||
/// Returns the hash of the uploaded file
|
||||
pub async fn upload<P: AsRef<Path>>(
|
||||
file_path: P,
|
||||
server_url: String,
|
||||
block_size: Option<usize>,
|
||||
token: &str,
|
||||
) -> Result<String> {
|
||||
if token.is_empty() {
|
||||
return Err(anyhow::anyhow!("Authentication token is required. Use --token option or set RFS_TOKEN environment variable."));
|
||||
}
|
||||
|
||||
let block_size = block_size.unwrap_or(BLOCK_SIZE); // Use provided block size or default
|
||||
let file_path = file_path.as_ref();
|
||||
|
||||
info!("Uploading file: {}", file_path.display());
|
||||
debug!("Using block size: {} bytes", block_size);
|
||||
|
||||
// Create HTTP client
|
||||
let client = Client::new();
|
||||
|
||||
// Read the file size
|
||||
let file_size = File::open(file_path).await?.metadata().await?.len();
|
||||
|
||||
info!("File size: {} bytes", file_size);
|
||||
info!("Splitting file into blocks of {} bytes", block_size);
|
||||
|
||||
// Split file into blocks and calculate hashes
|
||||
let (blocks, block_data) = split_file_into_blocks(file_path, block_size).await?;
|
||||
info!("File split into {} blocks", blocks.len());
|
||||
|
||||
// Calculate the file hash by combining all block hashes
|
||||
let file_hash = calculate_file_hash(&blocks);
|
||||
info!("Calculated file hash: {}", file_hash);
|
||||
|
||||
// Prepare blocks with metadata for verification
|
||||
let blocks_with_metadata: Vec<server_api::VerifyBlock> = blocks
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(idx, hash)| server_api::VerifyBlock {
|
||||
block_hash: hash.clone(),
|
||||
file_hash: file_hash.clone(),
|
||||
block_index: idx as u64,
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Verify which blocks are missing on the server
|
||||
let missing_blocks =
|
||||
server_api::verify_blocks_with_server(&client, server_url.clone(), blocks_with_metadata)
|
||||
.await?;
|
||||
info!(
|
||||
"{} of {} blocks are missing and need to be uploaded",
|
||||
missing_blocks.len(),
|
||||
block_data.len()
|
||||
);
|
||||
|
||||
// Upload missing blocks in parallel
|
||||
let client = Arc::new(client);
|
||||
let missing_blocks = Arc::new(missing_blocks);
|
||||
|
||||
// Use a semaphore to limit concurrent uploads
|
||||
let semaphore = Arc::new(Semaphore::new(PARALLEL_UPLOAD));
|
||||
|
||||
// Create a vector to hold all upload tasks
|
||||
let mut upload_tasks = Vec::new();
|
||||
|
||||
for (idx, (hash, data)) in block_data.into_iter().enumerate() {
|
||||
if missing_blocks.iter().any(|block| block == &hash) {
|
||||
let hash_clone = hash.clone();
|
||||
let server_url_clone = server_url.clone();
|
||||
let client_clone = Arc::clone(&client);
|
||||
let file_hash_clone = file_hash.clone();
|
||||
let token_clone = token.to_string();
|
||||
|
||||
// Acquire a permit from the semaphore
|
||||
let _permit = semaphore.acquire().await.unwrap();
|
||||
|
||||
// Create a task for each block upload
|
||||
let task: tokio::task::JoinHandle<std::result::Result<(), anyhow::Error>> =
|
||||
tokio::spawn(server_api::upload_block(
|
||||
client_clone,
|
||||
server_url_clone,
|
||||
hash_clone,
|
||||
data,
|
||||
file_hash_clone,
|
||||
idx as u64,
|
||||
token_clone,
|
||||
));
|
||||
|
||||
upload_tasks.push(task);
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for all upload tasks to complete
|
||||
let results = join_all(upload_tasks).await;
|
||||
|
||||
// Check for any errors in the upload tasks
|
||||
for result in results {
|
||||
match result {
|
||||
Ok(task_result) => task_result?,
|
||||
Err(e) => {
|
||||
return Err(anyhow::anyhow!("Upload task failed: {}", e));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("File upload complete");
|
||||
Ok(file_hash)
|
||||
}
|
||||
|
||||
/// Uploads a directory to the server, processing all files recursively
|
||||
pub async fn upload_dir<P: AsRef<Path>>(
|
||||
dir_path: P,
|
||||
server_url: String,
|
||||
block_size: Option<usize>,
|
||||
token: &str,
|
||||
create_flist: bool,
|
||||
flist_output: Option<&str>,
|
||||
) -> Result<()> {
|
||||
if token.is_empty() {
|
||||
return Err(anyhow::anyhow!("Authentication token is required. Use --token option or set RFS_TOKEN environment variable."));
|
||||
}
|
||||
|
||||
let dir_path = dir_path.as_ref().to_path_buf();
|
||||
|
||||
info!("Uploading directory: {}", dir_path.display());
|
||||
debug!(
|
||||
"Using block size: {} bytes",
|
||||
block_size.unwrap_or(BLOCK_SIZE)
|
||||
);
|
||||
|
||||
// Collect all files in the directory recursively
|
||||
let mut file_paths = Vec::new();
|
||||
collect_files(&dir_path, &mut file_paths).context("Failed to read directory")?;
|
||||
|
||||
info!("Found {} files to upload", file_paths.len());
|
||||
|
||||
if !create_flist {
|
||||
// Upload each file
|
||||
for file_path in file_paths.clone() {
|
||||
upload(&file_path, server_url.clone(), block_size, token).await?;
|
||||
}
|
||||
|
||||
info!("Directory upload complete");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Create and handle flist if requested
|
||||
info!("Creating flist for the uploaded directory");
|
||||
|
||||
// Create a temporary flist file if no output path is specified
|
||||
let flist_path = match flist_output {
|
||||
Some(path) => PathBuf::from(path),
|
||||
None => {
|
||||
let temp_dir = std::env::temp_dir();
|
||||
temp_dir.join(format!(
|
||||
"{}.fl",
|
||||
dir_path.file_name().unwrap_or_default().to_string_lossy()
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
// Create the flist
|
||||
let writer = fungi::Writer::new(&flist_path, true)
|
||||
.await
|
||||
.context("Failed to create flist file")?;
|
||||
|
||||
// Create a store for the server
|
||||
let store = store::parse_router(&[format!(
|
||||
"{}://{}?token={}",
|
||||
store::server::SCHEME,
|
||||
server_url.clone(),
|
||||
token
|
||||
)])
|
||||
.await
|
||||
.context("Failed to create store")?;
|
||||
|
||||
// Pack the directory into the flist iteratively to avoid stack overflow
|
||||
let result =
|
||||
tokio::task::spawn_blocking(move || crate::pack(writer, store, dir_path, false, None))
|
||||
.await
|
||||
.context("Failed to join spawned task")?;
|
||||
|
||||
result.await.context("Failed to create flist")?;
|
||||
|
||||
info!("Flist created at: {}", flist_path.display());
|
||||
|
||||
// Upload the flist file if it was created
|
||||
if flist_path.exists() {
|
||||
info!("Uploading flist file");
|
||||
let flist_hash = upload(&flist_path, server_url.clone(), block_size, token)
|
||||
.await
|
||||
.context("Failed to upload flist file")?;
|
||||
|
||||
info!("Flist uploaded successfully. Hash: {}", flist_hash);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn collect_files(dir_path: &Path, file_paths: &mut Vec<PathBuf>) -> std::io::Result<()> {
|
||||
let mut stack = vec![dir_path.to_path_buf()];
|
||||
|
||||
while let Some(current_path) = stack.pop() {
|
||||
for entry in std::fs::read_dir(¤t_path)? {
|
||||
let entry = entry?;
|
||||
let path = entry.path();
|
||||
|
||||
if path.is_file() {
|
||||
file_paths.push(path);
|
||||
} else if path.is_dir() {
|
||||
stack.push(path);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Publishes a website by uploading its directory to the server
|
||||
pub async fn publish_website<P: AsRef<Path>>(
|
||||
dir_path: P,
|
||||
server_url: String,
|
||||
block_size: Option<usize>,
|
||||
token: &str,
|
||||
) -> Result<()> {
|
||||
if token.is_empty() {
|
||||
return Err(anyhow::anyhow!("Authentication token is required. Use --token option or set RFS_TOKEN environment variable."));
|
||||
}
|
||||
|
||||
let dir_path = dir_path.as_ref().to_path_buf();
|
||||
|
||||
debug!("Uploading directory: {}", dir_path.display());
|
||||
debug!(
|
||||
"Using block size: {} bytes",
|
||||
block_size.unwrap_or(BLOCK_SIZE)
|
||||
);
|
||||
|
||||
// Collect all files in the directory recursively
|
||||
let mut file_paths = Vec::new();
|
||||
collect_files(&dir_path, &mut file_paths).context("Failed to read directory")?;
|
||||
|
||||
debug!("Found {} files to upload", file_paths.len());
|
||||
|
||||
// Create and handle flist if requested
|
||||
debug!("Creating flist for the uploaded directory");
|
||||
|
||||
// Create a temporary flist file
|
||||
let temp_dir = std::env::temp_dir();
|
||||
let flist_path = temp_dir.join(format!(
|
||||
"{}.fl",
|
||||
dir_path.file_name().unwrap_or_default().to_string_lossy()
|
||||
));
|
||||
|
||||
// Create the flist
|
||||
let writer = fungi::Writer::new(&flist_path, true)
|
||||
.await
|
||||
.context("Failed to create flist file")?;
|
||||
|
||||
// Create a store for the server
|
||||
let store = store::parse_router(&[format!(
|
||||
"{}://{}?token={}",
|
||||
store::server::SCHEME,
|
||||
server_url.clone(),
|
||||
token
|
||||
)])
|
||||
.await
|
||||
.context("Failed to create store")?;
|
||||
|
||||
// Temporarily disable logs for the upload function
|
||||
let original_level = log::max_level();
|
||||
log::set_max_level(log::LevelFilter::Off);
|
||||
|
||||
// Pack the directory into the flist iteratively to avoid stack overflow
|
||||
let result =
|
||||
tokio::task::spawn_blocking(move || crate::pack(writer, store, dir_path, false, None))
|
||||
.await
|
||||
.context("Failed to join spawned task")?;
|
||||
|
||||
result.await.context("Failed to create flist")?;
|
||||
|
||||
debug!("Flist created at: {}", flist_path.display());
|
||||
|
||||
// Upload the flist file if it was created
|
||||
if flist_path.exists() {
|
||||
debug!("Uploading flist file");
|
||||
|
||||
let flist_hash = upload(&flist_path, server_url.clone(), block_size, token)
|
||||
.await
|
||||
.context("Failed to upload flist file")?;
|
||||
|
||||
// Restore the original log level
|
||||
log::set_max_level(original_level);
|
||||
|
||||
debug!("Flist uploaded successfully. Hash: {}", flist_hash);
|
||||
|
||||
info!("Website published successfully");
|
||||
info!("Website hash: {}", flist_hash);
|
||||
info!("Website URL: {}/website/{}/", server_url, flist_hash);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_token_from_server(
|
||||
server_url: &str,
|
||||
username: &str,
|
||||
password: &str,
|
||||
) -> Result<String> {
|
||||
let client = reqwest::Client::new();
|
||||
server_api::signin(&client, server_url, username, password).await
|
||||
}
|
||||
|
||||
/// Track user blocks on the server
|
||||
/// Returns information about the number of blocks and their total size
|
||||
pub async fn track(server_url: &str, token: &str, show_details: bool) -> Result<()> {
|
||||
if token.is_empty() {
|
||||
return Err(anyhow::anyhow!("Authentication token is required. Use --token option or set RFS_TOKEN environment variable."));
|
||||
}
|
||||
|
||||
let first_page = server_api::get_user_blocks(server_url, token, Some(1), None)
|
||||
.await
|
||||
.context("Failed to get user blocks")?;
|
||||
|
||||
let total_pages = (first_page.total as f64 / 50.0).ceil() as u32;
|
||||
|
||||
let mut tasks = Vec::new();
|
||||
for page in 1..=total_pages {
|
||||
let server_url = server_url.to_string();
|
||||
let token = token.to_string();
|
||||
tasks.push(tokio::spawn(async move {
|
||||
server_api::get_user_blocks(&server_url, &token, Some(page), Some(50)).await
|
||||
}));
|
||||
}
|
||||
|
||||
let mut user_blocks = Vec::new();
|
||||
for task in tasks {
|
||||
match task.await {
|
||||
Ok(Ok(blocks_per_page)) => {
|
||||
user_blocks.extend(blocks_per_page.blocks);
|
||||
}
|
||||
Ok(Err(err)) => {
|
||||
return Err(anyhow::anyhow!("Failed to get user blocks: {}", err));
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(anyhow::anyhow!("Task failed: {}", err));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate total size
|
||||
let total_size: u64 = user_blocks.iter().map(|(_, size)| size).sum();
|
||||
|
||||
println!("User Blocks Summary:");
|
||||
println!(
|
||||
"Usage percentage: {}%",
|
||||
(user_blocks.len() as f64 / first_page.all_blocks as f64) * 100.0
|
||||
);
|
||||
println!("Total blocks: {}", user_blocks.len());
|
||||
println!(
|
||||
"Total size: {} bytes ({:.2} MB)",
|
||||
total_size,
|
||||
total_size as f64 / (1024.0 * 1024.0)
|
||||
);
|
||||
|
||||
// Print individual blocks if there are any
|
||||
if show_details && !user_blocks.is_empty() {
|
||||
println!("\nBlock details:");
|
||||
for (hash, size) in &user_blocks {
|
||||
println!(" {} - {} bytes", hash, size);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
33
components/rfs/tests/Makefile
Normal file
33
components/rfs/tests/Makefile
Normal file
@@ -0,0 +1,33 @@
|
||||
.PHONY: all e2e performance unit integration clean
|
||||
|
||||
all: unit integration e2e performance
|
||||
|
||||
# Run unit tests
|
||||
unit:
|
||||
@echo "Running unit tests..."
|
||||
cargo test --lib
|
||||
|
||||
# Run integration tests
|
||||
integration:
|
||||
@echo "Running integration tests..."
|
||||
cargo test --test docker_test --test parallel_download_test
|
||||
|
||||
# Run end-to-end tests
|
||||
e2e:
|
||||
@echo "Running end-to-end tests..."
|
||||
chmod +x e2e_tests.sh
|
||||
./e2e_tests.sh
|
||||
|
||||
chmod +x e2e_tests_updated.sh
|
||||
./e2e_tests_updated.sh
|
||||
|
||||
# Run performance tests
|
||||
performance:
|
||||
@echo "Running performance tests..."
|
||||
chmod +x performance_tests.sh
|
||||
./performance_tests.sh
|
||||
|
||||
# Clean test artifacts
|
||||
clean:
|
||||
@echo "Cleaning test artifacts..."
|
||||
rm -rf /tmp/rfs-e2e-tests /tmp/rfs-performance-tests /tmp/rfs-upload-download-tests
|
||||
51
components/rfs/tests/README.md
Normal file
51
components/rfs/tests/README.md
Normal file
@@ -0,0 +1,51 @@
|
||||
# RFS Tests
|
||||
|
||||
This directory contains various tests for the RFS tool.
|
||||
|
||||
## Test Types
|
||||
|
||||
1. **Unit Tests**: Standard Rust unit tests within the codebase
|
||||
2. **Integration Tests**: Rust tests that verify specific functionality
|
||||
3. **End-to-End Tests**: Shell scripts that test the full RFS command-line interface
|
||||
4. **Performance Tests**: Shell scripts that measure and compare performance
|
||||
|
||||
## Running Tests
|
||||
|
||||
You can use the provided Makefile to run the tests:
|
||||
|
||||
```bash
|
||||
# Run all tests
|
||||
make all
|
||||
|
||||
# Run specific test types
|
||||
make unit
|
||||
make integration
|
||||
make e2e
|
||||
make performance
|
||||
|
||||
# Clean test artifacts
|
||||
make clean
|
||||
```
|
||||
|
||||
## Test Files
|
||||
|
||||
- `e2e_tests.sh` and `e2e_tests_updated.sh`: End-to-end tests for all RFS commands
|
||||
- `performance_tests.sh`: Performance tests focusing on parallel upload/download
|
||||
- `docker_test.rs`: Integration test for the Docker functionality
|
||||
- `parallel_download_test.rs`: Integration test for parallel download feature
|
||||
- `Makefile`: Simplifies running the tests
|
||||
|
||||
## Requirements
|
||||
|
||||
- Rust and Cargo for unit and integration tests
|
||||
- Bash for shell-based tests
|
||||
- Docker for Docker-related tests
|
||||
- Root/sudo access for mount tests
|
||||
|
||||
## Notes
|
||||
|
||||
- The end-to-end tests create temporary directories in `/tmp/rfs-e2e-tests`, also automatically start local servers on port 8080 and 8081 for testing and shut them down after tests complete
|
||||
- The performance tests create temporary directories in `/tmp/rfs-performance-tests`
|
||||
- The upload/download tests create temporary directories in `/tmp/rfs-upload-download-tests`
|
||||
- Some tests require sudo access (mount tests)
|
||||
- Docker tests will be skipped if Docker is not available
|
||||
66
components/rfs/tests/docker_test.rs
Normal file
66
components/rfs/tests/docker_test.rs
Normal file
@@ -0,0 +1,66 @@
|
||||
#[cfg(test)]
|
||||
mod docker_tests {
|
||||
use anyhow::Result;
|
||||
use std::path::Path;
|
||||
use tempdir::TempDir;
|
||||
use tokio::runtime::Runtime;
|
||||
use uuid::Uuid;
|
||||
|
||||
use rfs::fungi;
|
||||
use rfs::store::{self, dir::DirStore};
|
||||
use rfs::DockerImageToFlist;
|
||||
|
||||
#[test]
|
||||
fn test_docker_conversion() -> Result<()> {
|
||||
// Skip test if docker is not available
|
||||
if !is_docker_available() {
|
||||
println!("Docker is not available, skipping test");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Create a runtime for async operations
|
||||
let rt = Runtime::new()?;
|
||||
|
||||
rt.block_on(async {
|
||||
// Create temporary directories
|
||||
let temp_dir = TempDir::new("docker-test")?;
|
||||
let store_dir = temp_dir.path().join("store");
|
||||
std::fs::create_dir_all(&store_dir)?;
|
||||
|
||||
// Create a store
|
||||
let store = DirStore::new(&store_dir).await?;
|
||||
|
||||
// Create a flist writer
|
||||
let fl_path = temp_dir.path().join("alpine-test.fl");
|
||||
let meta = fungi::Writer::new(&fl_path, true).await?;
|
||||
|
||||
// Create a temporary directory for docker extraction
|
||||
let container_name = Uuid::new_v4().to_string();
|
||||
let docker_tmp_dir = TempDir::new(&container_name)?;
|
||||
|
||||
// Create DockerImageToFlist instance
|
||||
let mut docker_to_fl = DockerImageToFlist::new(
|
||||
meta,
|
||||
"alpine:latest".to_string(),
|
||||
None, // No credentials for public image
|
||||
docker_tmp_dir,
|
||||
);
|
||||
|
||||
// Convert docker image to flist
|
||||
docker_to_fl.convert(store, None).await?;
|
||||
|
||||
// Verify the flist was created
|
||||
assert!(Path::new(&fl_path).exists(), "Flist file was not created");
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
// Helper function to check if docker is available
|
||||
fn is_docker_available() -> bool {
|
||||
std::process::Command::new("docker")
|
||||
.arg("--version")
|
||||
.output()
|
||||
.is_ok()
|
||||
}
|
||||
}
|
||||
223
components/rfs/tests/e2e_tests.sh
Executable file
223
components/rfs/tests/e2e_tests.sh
Executable file
@@ -0,0 +1,223 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
# Colors for output
|
||||
GREEN='\033[0;32m'
|
||||
RED='\033[0;31m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Path to the rfs binary
|
||||
RFS_BIN="../target/release/rfs"
|
||||
|
||||
# Test directory
|
||||
TEST_DIR="/tmp/rfs-e2e-tests"
|
||||
CACHE_DIR="$TEST_DIR/cache"
|
||||
SOURCE_DIR="$TEST_DIR/source"
|
||||
DEST_DIR="$TEST_DIR/destination"
|
||||
MOUNT_DIR="$TEST_DIR/mount"
|
||||
FLIST_PATH="$TEST_DIR/test.fl"
|
||||
DOCKER_FLIST_PATH="$TEST_DIR/docker-test.fl"
|
||||
|
||||
# Store URL - using a local directory store for testing
|
||||
STORE_DIR="$TEST_DIR/store"
|
||||
STORE_URL="dir://$STORE_DIR"
|
||||
|
||||
# Clean up function
|
||||
cleanup() {
|
||||
echo "Cleaning up test directories..."
|
||||
# Unmount if mounted
|
||||
if mountpoint -q "$MOUNT_DIR"; then
|
||||
sudo umount "$MOUNT_DIR"
|
||||
fi
|
||||
rm -rf "$TEST_DIR"
|
||||
}
|
||||
|
||||
# Setup function
|
||||
setup() {
|
||||
echo "Setting up test directories..."
|
||||
mkdir -p "$TEST_DIR" "$CACHE_DIR" "$SOURCE_DIR" "$DEST_DIR" "$MOUNT_DIR" "$STORE_DIR"
|
||||
|
||||
# Create some test files
|
||||
echo "Creating test files..."
|
||||
echo "This is a test file 1" > "$SOURCE_DIR/file1.txt"
|
||||
echo "This is a test file 2" > "$SOURCE_DIR/file2.txt"
|
||||
mkdir -p "$SOURCE_DIR/subdir"
|
||||
echo "This is a test file in a subdirectory" > "$SOURCE_DIR/subdir/file3.txt"
|
||||
|
||||
# Create a symlink
|
||||
ln -s "file1.txt" "$SOURCE_DIR/link_to_file1.txt"
|
||||
|
||||
# Create a smaller file for testing
|
||||
dd if=/dev/urandom of="$SOURCE_DIR/random.bin" bs=1M count=1
|
||||
}
|
||||
|
||||
# Function to run a test and report result
|
||||
run_test() {
|
||||
local test_name="$1"
|
||||
local test_cmd="$2"
|
||||
|
||||
echo -e "\n${GREEN}Running test: $test_name${NC}"
|
||||
echo "Command: $test_cmd"
|
||||
|
||||
if eval "$test_cmd"; then
|
||||
echo -e "${GREEN}✓ Test passed: $test_name${NC}"
|
||||
return 0
|
||||
else
|
||||
echo -e "${RED}✗ Test failed: $test_name${NC}"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Test the pack command
|
||||
test_pack() {
|
||||
run_test "Pack command" "$RFS_BIN pack -m $FLIST_PATH -s $STORE_URL $SOURCE_DIR"
|
||||
|
||||
# Verify the flist was created
|
||||
if [ ! -f "$FLIST_PATH" ]; then
|
||||
echo -e "${RED}Flist file was not created${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "Flist created successfully at $FLIST_PATH"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Test the unpack command
|
||||
test_unpack() {
|
||||
run_test "Unpack command" "$RFS_BIN unpack -m $FLIST_PATH -c $CACHE_DIR $DEST_DIR"
|
||||
|
||||
# Verify files were unpacked correctly
|
||||
if ! diff -r "$SOURCE_DIR" "$DEST_DIR"; then
|
||||
echo -e "${RED}Unpacked files don't match source files${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "Files unpacked successfully to $DEST_DIR"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Test the mount command (requires sudo)
|
||||
test_mount() {
|
||||
echo -e "\n${GREEN}Running test: Mount command${NC}"
|
||||
echo "Command: sudo $RFS_BIN mount -m $FLIST_PATH -c $CACHE_DIR $MOUNT_DIR"
|
||||
|
||||
# Run the mount command in the background
|
||||
sudo $RFS_BIN mount -m $FLIST_PATH -c $CACHE_DIR $MOUNT_DIR &
|
||||
MOUNT_PID=$!
|
||||
|
||||
# Wait a moment for the mount to complete
|
||||
sleep 3
|
||||
|
||||
# Verify the mount point is working
|
||||
if ! mountpoint -q "$MOUNT_DIR"; then
|
||||
echo -e "${RED}Mount failed${NC}"
|
||||
kill $MOUNT_PID 2>/dev/null
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check if files are accessible
|
||||
if ! ls -la "$MOUNT_DIR"; then
|
||||
echo -e "${RED}Cannot list files in mount directory${NC}"
|
||||
sudo umount "$MOUNT_DIR" 2>/dev/null
|
||||
kill $MOUNT_PID 2>/dev/null
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Read a file from the mount
|
||||
if ! cat "$MOUNT_DIR/file1.txt"; then
|
||||
echo -e "${RED}Cannot read file from mount${NC}"
|
||||
sudo umount "$MOUNT_DIR" 2>/dev/null
|
||||
kill $MOUNT_PID 2>/dev/null
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Unmount
|
||||
sudo umount "$MOUNT_DIR" 2>/dev/null
|
||||
kill $MOUNT_PID 2>/dev/null
|
||||
|
||||
echo -e "${GREEN}✓ Test passed: Mount command${NC}"
|
||||
echo "Mount test completed successfully"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Test the docker command (requires docker)
|
||||
test_docker() {
|
||||
# Check if docker is available
|
||||
if ! command -v docker &> /dev/null; then
|
||||
echo -e "${RED}Docker is not installed, skipping docker test${NC}"
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo -e "\n${GREEN}Running test: Docker command${NC}"
|
||||
echo "Command: $RFS_BIN docker -i alpine:latest -s $STORE_URL"
|
||||
|
||||
# Pull a small test image
|
||||
docker pull alpine:latest
|
||||
|
||||
# Convert docker image to flist with a timeout
|
||||
timeout 60 $RFS_BIN docker -i alpine:latest -s $STORE_URL &
|
||||
DOCKER_PID=$!
|
||||
|
||||
# Wait for the command to complete or timeout
|
||||
wait $DOCKER_PID
|
||||
RESULT=$?
|
||||
|
||||
if [ $RESULT -eq 124 ]; then
|
||||
echo -e "${RED}Docker command timed out${NC}"
|
||||
return 1
|
||||
elif [ $RESULT -ne 0 ]; then
|
||||
echo -e "${RED}Docker command failed with exit code $RESULT${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Verify the flist was created
|
||||
if [ ! -f "alpine-latest.fl" ]; then
|
||||
echo -e "${RED}Docker flist file was not created${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}✓ Test passed: Docker command${NC}"
|
||||
echo "Docker image converted to flist successfully"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Test the config command
|
||||
test_config() {
|
||||
# Add a tag
|
||||
run_test "Config tag add" "$RFS_BIN config -m $FLIST_PATH tag add -t test=value"
|
||||
|
||||
# List tags
|
||||
run_test "Config tag list" "$RFS_BIN config -m $FLIST_PATH tag list"
|
||||
|
||||
# Add a store
|
||||
run_test "Config store add" "$RFS_BIN config -m $FLIST_PATH store add -s $STORE_URL"
|
||||
|
||||
# List stores
|
||||
run_test "Config store list" "$RFS_BIN config -m $FLIST_PATH store list"
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Main test function
|
||||
main() {
|
||||
# Register cleanup on exit
|
||||
trap cleanup EXIT
|
||||
|
||||
# Setup test environment
|
||||
setup
|
||||
|
||||
# Run tests
|
||||
test_pack
|
||||
test_unpack
|
||||
test_config
|
||||
|
||||
# These tests may require sudo
|
||||
echo -e "\n${GREEN}The following tests may require sudo:${NC}"
|
||||
test_mount
|
||||
test_docker
|
||||
|
||||
echo -e "\n${GREEN}All tests completed!${NC}"
|
||||
}
|
||||
|
||||
# Run the main function
|
||||
main
|
||||
705
components/rfs/tests/e2e_tests_updated.sh
Executable file
705
components/rfs/tests/e2e_tests_updated.sh
Executable file
@@ -0,0 +1,705 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
GREEN='\033[0;32m'
|
||||
RED='\033[0;31m'
|
||||
YELLOW='\033[0;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Path to the rfs binary
|
||||
RFS_BIN="../target/release/rfs"
|
||||
|
||||
# Test directory
|
||||
TEST_DIR="/tmp/rfs-upload-download-tests"
|
||||
CACHE_DIR="$TEST_DIR/cache"
|
||||
SOURCE_DIR="$TEST_DIR/source"
|
||||
DEST_DIR="$TEST_DIR/destination"
|
||||
UPLOAD_DIR="$TEST_DIR/upload"
|
||||
DOWNLOAD_DIR="$TEST_DIR/download"
|
||||
|
||||
# Store URL - using a local directory store for testing
|
||||
STORE_DIR="$TEST_DIR/store"
|
||||
STORE_URL="dir://$STORE_DIR"
|
||||
|
||||
# Server settings for testing
|
||||
SERVER_PORT=8080
|
||||
SERVER_URL="http://localhost:$SERVER_PORT"
|
||||
SERVER_STORAGE="$TEST_DIR/server_storage"
|
||||
SERVER_PID_FILE="$TEST_DIR/server.pid"
|
||||
SERVER_CONFIG_FILE="$TEST_DIR/server_config.toml"
|
||||
|
||||
# Test file sizes
|
||||
SMALL_FILE_SIZE_MB=1
|
||||
MEDIUM_FILE_SIZE_MB=5
|
||||
LARGE_FILE_SIZE_MB=10
|
||||
|
||||
# Clean up function
|
||||
cleanup() {
|
||||
echo "Cleaning up test environment..."
|
||||
|
||||
# Stop the main server if it's running
|
||||
if [ -f "$SERVER_PID_FILE" ]; then
|
||||
echo "Stopping test server..."
|
||||
kill $(cat "$SERVER_PID_FILE") 2>/dev/null || true
|
||||
rm -f "$SERVER_PID_FILE"
|
||||
fi
|
||||
|
||||
# Stop the second server if it's running (for sync tests)
|
||||
local SERVER2_PID_FILE="$TEST_DIR/server2.pid"
|
||||
if [ -f "$SERVER2_PID_FILE" ]; then
|
||||
echo "Stopping second test server..."
|
||||
kill $(cat "$SERVER2_PID_FILE") 2>/dev/null || true
|
||||
rm -f "$SERVER2_PID_FILE"
|
||||
fi
|
||||
|
||||
# Remove test directories and files
|
||||
rm -rf "$TEST_DIR"
|
||||
|
||||
echo "Cleanup complete"
|
||||
}
|
||||
|
||||
# Create server configuration file
|
||||
create_server_config() {
|
||||
echo "Creating server configuration file..."
|
||||
|
||||
cat > "$SERVER_CONFIG_FILE" << EOF
|
||||
# Server configuration for e2e tests
|
||||
host="0.0.0.0"
|
||||
port=8080
|
||||
store_url=["dir:///tmp/store0"]
|
||||
flist_dir="flists"
|
||||
sqlite_path="fl-server.db"
|
||||
storage_dir="storage"
|
||||
# bloc_size=
|
||||
|
||||
jwt_secret="secret"
|
||||
jwt_expire_hours=5
|
||||
|
||||
# users
|
||||
[[users]]
|
||||
username = "admin"
|
||||
password = "admin"
|
||||
|
||||
EOF
|
||||
|
||||
echo "Server configuration file created at $SERVER_CONFIG_FILE"
|
||||
}
|
||||
|
||||
# Start the server
|
||||
start_server() {
|
||||
echo -e "\n${GREEN}Starting test server on port $SERVER_PORT...${NC}"
|
||||
|
||||
# Create server storage directory
|
||||
mkdir -p "$SERVER_STORAGE"
|
||||
|
||||
# Create server configuration
|
||||
create_server_config
|
||||
|
||||
# Start the server in the background
|
||||
$RFS_BIN server --config-path "$SERVER_CONFIG_FILE" > "$TEST_DIR/server.log" 2>&1 &
|
||||
|
||||
# Save the PID
|
||||
echo $! > "$SERVER_PID_FILE"
|
||||
|
||||
# Wait for the server to start
|
||||
echo "Waiting for server to start..."
|
||||
sleep 3
|
||||
|
||||
# Check if the server is running
|
||||
if ! curl -s "$SERVER_URL/health" > /dev/null; then
|
||||
echo -e "${RED}Failed to start server${NC}"
|
||||
cat "$TEST_DIR/server.log"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}Server started successfully${NC}"
|
||||
}
|
||||
|
||||
# Setup function
|
||||
setup() {
|
||||
echo "Setting up test directories..."
|
||||
mkdir -p "$TEST_DIR" "$CACHE_DIR" "$SOURCE_DIR" "$DEST_DIR" "$UPLOAD_DIR" "$DOWNLOAD_DIR" "$STORE_DIR" "$SERVER_STORAGE"
|
||||
|
||||
# Create test files of different sizes
|
||||
echo "Creating test files..."
|
||||
|
||||
# Small file
|
||||
echo -e "${YELLOW}Creating small test file (${SMALL_FILE_SIZE_MB}MB)...${NC}"
|
||||
dd if=/dev/urandom of="$SOURCE_DIR/small_file.bin" bs=1M count=$SMALL_FILE_SIZE_MB status=none
|
||||
|
||||
# Medium file
|
||||
echo -e "${YELLOW}Creating medium test file (${MEDIUM_FILE_SIZE_MB}MB)...${NC}"
|
||||
dd if=/dev/urandom of="$SOURCE_DIR/medium_file.bin" bs=1M count=$MEDIUM_FILE_SIZE_MB status=none
|
||||
|
||||
# Large file
|
||||
echo -e "${YELLOW}Creating large test file (${LARGE_FILE_SIZE_MB}MB)...${NC}"
|
||||
dd if=/dev/urandom of="$SOURCE_DIR/large_file.bin" bs=1M count=$LARGE_FILE_SIZE_MB status=none
|
||||
|
||||
# Create a directory with multiple files
|
||||
mkdir -p "$SOURCE_DIR/multi_files"
|
||||
for i in {1..5}; do
|
||||
dd if=/dev/urandom of="$SOURCE_DIR/multi_files/file_$i.bin" bs=512K count=1 status=none
|
||||
done
|
||||
|
||||
# Create a nested directory structure
|
||||
mkdir -p "$SOURCE_DIR/nested/dir1/dir2"
|
||||
echo "Test content 1" > "$SOURCE_DIR/nested/file1.txt"
|
||||
echo "Test content 2" > "$SOURCE_DIR/nested/dir1/file2.txt"
|
||||
echo "Test content 3" > "$SOURCE_DIR/nested/dir1/dir2/file3.txt"
|
||||
|
||||
echo "Test files created successfully"
|
||||
}
|
||||
|
||||
# Function to run a test and report result
|
||||
run_test() {
|
||||
local test_name="$1"
|
||||
local test_cmd="$2"
|
||||
|
||||
echo -e "\n${GREEN}Running test: $test_name${NC}"
|
||||
echo "Command: $test_cmd"
|
||||
|
||||
if eval "$test_cmd"; then
|
||||
echo -e "${GREEN}✓ Test passed: $test_name${NC}"
|
||||
return 0
|
||||
else
|
||||
echo -e "${RED}✗ Test failed: $test_name${NC}"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to measure execution time
|
||||
measure_time() {
|
||||
local start_time=$(date +%s.%N)
|
||||
"$@"
|
||||
local end_time=$(date +%s.%N)
|
||||
echo "$(echo "$end_time - $start_time" | bc)"
|
||||
}
|
||||
|
||||
# Test single file upload
|
||||
test_single_file_upload() {
|
||||
local file_path="$SOURCE_DIR/medium_file.bin"
|
||||
local file_name=$(basename "$file_path")
|
||||
local upload_time=$(measure_time $RFS_BIN upload "$file_path" -s "$SERVER_URL")
|
||||
|
||||
echo -e "Upload time for $file_name: ${YELLOW}$upload_time seconds${NC}"
|
||||
|
||||
# Verify the file was uploaded by checking if it exists in the store
|
||||
# In a real test, we would verify this by querying the server
|
||||
# For this test, we'll just check if the command succeeded
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Test directory upload
|
||||
test_directory_upload() {
|
||||
local dir_path="$SOURCE_DIR/multi_files"
|
||||
local upload_time=$(measure_time $RFS_BIN upload-dir "$dir_path" -s "$SERVER_URL")
|
||||
|
||||
echo -e "Upload time for directory: ${YELLOW}$upload_time seconds${NC}"
|
||||
|
||||
# Verify the directory was uploaded
|
||||
# In a real test, we would verify this by querying the server
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Test nested directory upload
|
||||
test_nested_directory_upload() {
|
||||
local dir_path="$SOURCE_DIR/nested"
|
||||
local upload_time=$(measure_time $RFS_BIN upload-dir "$dir_path" -s "$SERVER_URL" --create-flist)
|
||||
|
||||
echo -e "Upload time for nested directory with flist: ${YELLOW}$upload_time seconds${NC}"
|
||||
|
||||
# Verify the directory was uploaded and flist was created
|
||||
# In a real test, we would verify this by querying the server
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Test single file download
|
||||
test_single_file_download() {
|
||||
# First, upload a file to get its hash
|
||||
local file_path="$SOURCE_DIR/medium_file.bin"
|
||||
local file_name=$(basename "$file_path")
|
||||
echo -e "\n${GREEN}Uploading file to get hash: $file_path${NC}"
|
||||
local upload_output
|
||||
upload_output=$($RFS_BIN upload "$file_path" -s "$SERVER_URL" 2>&1)
|
||||
echo "$upload_output"
|
||||
|
||||
# Extract the file hash from the upload output
|
||||
local file_hash=$(echo "$upload_output" | grep -o "hash: [a-f0-9]*" | cut -d' ' -f2)
|
||||
|
||||
if [ -z "$file_hash" ]; then
|
||||
echo -e "${RED}Failed to get file hash from upload${NC}"
|
||||
echo -e "${RED}Upload output: ${NC}"
|
||||
echo "$upload_output"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "File hash: $file_hash"
|
||||
|
||||
# Now download the file using its hash
|
||||
local download_path="$DOWNLOAD_DIR/$file_name"
|
||||
local download_time=$(measure_time $RFS_BIN download "$file_hash" -o "$download_path" -s "$SERVER_URL")
|
||||
|
||||
echo -e "Download time for $file_name: ${YELLOW}$download_time seconds${NC}"
|
||||
|
||||
# Verify the file was downloaded correctly
|
||||
if [ ! -f "$download_path" ]; then
|
||||
echo -e "${RED}Downloaded file does not exist${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Compare the original and downloaded files
|
||||
if ! cmp -s "$file_path" "$download_path"; then
|
||||
echo -e "${RED}Downloaded file does not match original${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}Downloaded file matches original${NC}"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Test directory download
|
||||
test_directory_download() {
|
||||
# First, upload a directory with flist to get its hash
|
||||
local dir_path="$SOURCE_DIR/nested"
|
||||
echo -e "\n${GREEN}Uploading directory with flist to get hash: $dir_path${NC}"
|
||||
local upload_output
|
||||
upload_output=$($RFS_BIN upload-dir "$dir_path" -s "$SERVER_URL" --create-flist 2>&1)
|
||||
echo "$upload_output"
|
||||
|
||||
# Extract the flist hash from the upload output
|
||||
local flist_hash=$(echo "$upload_output" | grep -o "hash: [a-f0-9]*" | cut -d' ' -f2)
|
||||
|
||||
if [ -z "$flist_hash" ]; then
|
||||
echo -e "${RED}Failed to get flist hash from upload${NC}"
|
||||
echo -e "${RED}Upload output: ${NC}"
|
||||
echo "$upload_output"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "Flist hash: $flist_hash"
|
||||
|
||||
# Now download the directory using the flist hash
|
||||
local download_dir="$DOWNLOAD_DIR/nested"
|
||||
mkdir -p "$download_dir"
|
||||
|
||||
local download_time=$(measure_time $RFS_BIN download-dir "$flist_hash" -o "$download_dir" -s "$SERVER_URL")
|
||||
|
||||
echo -e "Download time for directory: ${YELLOW}$download_time seconds${NC}"
|
||||
|
||||
# Verify the directory was downloaded correctly
|
||||
if [ ! -d "$download_dir" ]; then
|
||||
echo -e "${RED}Downloaded directory does not exist${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Compare the original and downloaded directories
|
||||
if ! diff -r "$dir_path" "$download_dir"; then
|
||||
echo -e "${RED}Downloaded directory does not match original${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}Downloaded directory matches original${NC}"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Test parallel upload performance
|
||||
test_parallel_upload_performance() {
|
||||
echo -e "\n${GREEN}Testing parallel upload performance...${NC}"
|
||||
|
||||
# Create a directory with many small files for testing parallel upload
|
||||
local parallel_dir="$SOURCE_DIR/parallel_test"
|
||||
mkdir -p "$parallel_dir"
|
||||
|
||||
echo -e "${YELLOW}Creating 20 small files for parallel upload test...${NC}"
|
||||
for i in {1..20}; do
|
||||
dd if=/dev/urandom of="$parallel_dir/file_$i.bin" bs=512K count=1 status=none
|
||||
echo -ne "\rCreated $i/20 files"
|
||||
done
|
||||
echo -e "\nTest files created successfully"
|
||||
|
||||
# Test with default parallel upload (PARALLEL_UPLOAD=20)
|
||||
echo -e "${YELLOW}Testing with default parallel upload...${NC}"
|
||||
local parallel_time=$(measure_time $RFS_BIN upload-dir "$parallel_dir" -s "$SERVER_URL")
|
||||
|
||||
# Test with reduced parallelism
|
||||
echo -e "${YELLOW}Testing with reduced parallelism...${NC}"
|
||||
local serial_time=$(measure_time env RFS_PARALLEL_UPLOAD=1 $RFS_BIN upload-dir "$parallel_dir" -s "$SERVER_URL")
|
||||
|
||||
echo -e "Serial upload time: ${YELLOW}$serial_time seconds${NC}"
|
||||
echo -e "Parallel upload time: ${YELLOW}$parallel_time seconds${NC}"
|
||||
|
||||
# Calculate speedup
|
||||
local speedup=$(echo "scale=2; $serial_time / $parallel_time" | bc)
|
||||
echo -e "Speedup: ${GREEN}${speedup}x${NC}"
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Test parallel download performance
|
||||
test_parallel_download_performance() {
|
||||
echo -e "\n${GREEN}Testing parallel download performance...${NC}"
|
||||
|
||||
# First, upload a directory with many files to get its hash
|
||||
local parallel_dir="$SOURCE_DIR/parallel_test"
|
||||
echo -e "\n${GREEN}Uploading directory with flist for parallel test: $parallel_dir${NC}"
|
||||
local upload_output
|
||||
upload_output=$($RFS_BIN upload-dir "$parallel_dir" -s "$SERVER_URL" --create-flist 2>&1)
|
||||
echo "$upload_output"
|
||||
|
||||
# Extract the flist hash from the upload output
|
||||
local flist_hash=$(echo "$upload_output" | grep -o "hash: [a-f0-9]*" | cut -d' ' -f2)
|
||||
|
||||
if [ -z "$flist_hash" ]; then
|
||||
echo -e "${RED}Failed to get flist hash from upload${NC}"
|
||||
echo -e "${RED}Upload output: ${NC}"
|
||||
echo "$upload_output"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "Flist hash: $flist_hash"
|
||||
|
||||
# Test with default parallel download (PARALLEL_DOWNLOAD=20)
|
||||
echo -e "${YELLOW}Testing with default parallel download...${NC}"
|
||||
local download_dir_parallel="$DOWNLOAD_DIR/parallel"
|
||||
mkdir -p "$download_dir_parallel"
|
||||
local parallel_time=$(measure_time $RFS_BIN download-dir "$flist_hash" -o "$download_dir_parallel" -s "$SERVER_URL")
|
||||
|
||||
# Test with reduced parallelism
|
||||
echo -e "${YELLOW}Testing with reduced parallelism...${NC}"
|
||||
local download_dir_serial="$DOWNLOAD_DIR/serial"
|
||||
mkdir -p "$download_dir_serial"
|
||||
local serial_time=$(measure_time env RFS_PARALLEL_DOWNLOAD=1 $RFS_BIN download-dir "$flist_hash" -o "$download_dir_serial" -s "$SERVER_URL")
|
||||
|
||||
echo -e "Serial download time: ${YELLOW}$serial_time seconds${NC}"
|
||||
echo -e "Parallel download time: ${YELLOW}$parallel_time seconds${NC}"
|
||||
|
||||
# Calculate speedup
|
||||
local speedup=$(echo "scale=2; $serial_time / $parallel_time" | bc)
|
||||
echo -e "Speedup: ${GREEN}${speedup}x${NC}"
|
||||
|
||||
# Verify downloaded directories match
|
||||
if ! diff -r "$download_dir_serial" "$download_dir_parallel"; then
|
||||
echo -e "${RED}Downloaded directories don't match between serial and parallel methods${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}Downloaded directories match between methods${NC}"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Test upload with different block sizes
|
||||
test_block_size_impact() {
|
||||
echo -e "\n${GREEN}Testing impact of block size on upload/download...${NC}"
|
||||
|
||||
local file_path="$SOURCE_DIR/large_file.bin"
|
||||
local file_name=$(basename "$file_path")
|
||||
|
||||
# Test with different block sizes
|
||||
for block_size in 256 512 1024 2048; do
|
||||
echo -e "${YELLOW}Testing with block size: ${block_size}KB${NC}"
|
||||
|
||||
# Upload with specific block size
|
||||
local upload_time=$(measure_time $RFS_BIN upload "$file_path" -s "$SERVER_URL" -b $((block_size * 1024)))
|
||||
echo -e "\n${GREEN}Uploading file with ${block_size}KB blocks: $file_path${NC}"
|
||||
local upload_output
|
||||
upload_output=$($RFS_BIN upload "$file_path" -s "$SERVER_URL" -b $((block_size * 1024)) 2>&1)
|
||||
echo "$upload_output"
|
||||
|
||||
# Extract the file hash from the upload output
|
||||
local file_hash=$(echo "$upload_output" | grep -o "hash: [a-f0-9]*" | cut -d' ' -f2)
|
||||
|
||||
if [ -z "$file_hash" ]; then
|
||||
echo -e "${RED}Failed to get file hash from upload with ${block_size}KB blocks${NC}"
|
||||
echo -e "${RED}Upload output: ${NC}"
|
||||
echo "$upload_output"
|
||||
continue
|
||||
fi
|
||||
|
||||
echo -e "Upload time with ${block_size}KB blocks: ${YELLOW}$upload_time seconds${NC}"
|
||||
|
||||
# Download with the same hash
|
||||
local download_path="$DOWNLOAD_DIR/${block_size}kb_${file_name}"
|
||||
local download_time=$(measure_time $RFS_BIN download "$file_hash" -o "$download_path" -s "$SERVER_URL")
|
||||
|
||||
echo -e "Download time with ${block_size}KB blocks: ${YELLOW}$download_time seconds${NC}"
|
||||
|
||||
# Verify the file was downloaded correctly
|
||||
if ! cmp -s "$file_path" "$download_path"; then
|
||||
echo -e "${RED}Downloaded file with ${block_size}KB blocks does not match original${NC}"
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo -e "${GREEN}All block size tests passed${NC}"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Test exists command
|
||||
test_exists_command() {
|
||||
echo -e "\n${GREEN}Testing exists command...${NC}"
|
||||
|
||||
# First, upload a file to check
|
||||
local file_path="$SOURCE_DIR/medium_file.bin"
|
||||
|
||||
echo -e "\n${GREEN}Uploading file to check existence: $file_path${NC}"
|
||||
local upload_output
|
||||
upload_output=$($RFS_BIN upload "$file_path" -s "$SERVER_URL" 2>&1)
|
||||
echo "$upload_output"
|
||||
|
||||
# Extract the file hash from the upload output
|
||||
local file_hash=$(echo "$upload_output" | grep -o "hash: [a-f0-9]*" | cut -d' ' -f2)
|
||||
|
||||
if [ -z "$file_hash" ]; then
|
||||
echo -e "${RED}Failed to get file hash from upload${NC}"
|
||||
echo -e "${RED}Upload output: ${NC}"
|
||||
echo "$upload_output"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Test exists command with file path
|
||||
echo -e "\n${GREEN}Testing exists with file path${NC}"
|
||||
run_test "Exists command with file path" "$RFS_BIN exists \"$file_path\" -s \"$SERVER_URL\""
|
||||
|
||||
# Test exists command with hash
|
||||
echo -e "\n${GREEN}Testing exists with hash${NC}"
|
||||
run_test "Exists command with hash" "$RFS_BIN exists \"$file_hash\" -s \"$SERVER_URL\""
|
||||
|
||||
# Test exists command with non-existent file
|
||||
echo -e "\n${GREEN}Testing exists with non-existent file${NC}"
|
||||
local non_existent_file="$SOURCE_DIR/non_existent_file.bin"
|
||||
touch "$non_existent_file"
|
||||
echo "This file should not exist on the server" > "$non_existent_file"
|
||||
|
||||
# This should report that the file doesn't exist, but the command should succeed
|
||||
run_test "Exists command with non-existent file" "$RFS_BIN exists \"$non_existent_file\" -s \"$SERVER_URL\""
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Test website-publish command
|
||||
test_website_publish() {
|
||||
echo -e "\n${GREEN}Testing website-publish command...${NC}"
|
||||
|
||||
# Create a simple website in a temporary directory
|
||||
local website_dir="$SOURCE_DIR/website"
|
||||
mkdir -p "$website_dir"
|
||||
|
||||
# Create index.html
|
||||
cat > "$website_dir/index.html" << EOF
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Test Website</title>
|
||||
<link rel="stylesheet" href="style.css">
|
||||
</head>
|
||||
<body>
|
||||
<h1>Test Website</h1>
|
||||
<p>This is a test website for RFS.</p>
|
||||
<img src="image.png" alt="Test Image">
|
||||
</body>
|
||||
</html>
|
||||
EOF
|
||||
|
||||
# Create style.css
|
||||
cat > "$website_dir/style.css" << EOF
|
||||
body {
|
||||
font-family: Arial, sans-serif;
|
||||
margin: 0;
|
||||
padding: 20px;
|
||||
background-color: #f0f0f0;
|
||||
}
|
||||
h1 {
|
||||
color: #333;
|
||||
}
|
||||
EOF
|
||||
|
||||
# Create a simple image
|
||||
dd if=/dev/urandom bs=1024 count=10 | base64 > "$website_dir/image.png"
|
||||
|
||||
# Publish the website
|
||||
echo -e "\n${GREEN}Publishing website: $website_dir${NC}"
|
||||
local publish_output
|
||||
publish_output=$($RFS_BIN website-publish "$website_dir" -s "$SERVER_URL" 2>&1)
|
||||
echo "$publish_output"
|
||||
|
||||
# Extract the website hash and URL from the output
|
||||
local website_hash=$(echo "$publish_output" | grep -o "Website hash: [a-f0-9]*" | cut -d' ' -f3)
|
||||
local website_url=$(echo "$publish_output" | grep -o "Website URL: .*" | cut -d' ' -f3)
|
||||
|
||||
if [ -z "$website_hash" ]; then
|
||||
echo -e "${RED}Failed to get website hash from publish output${NC}"
|
||||
echo -e "${RED}Publish output: ${NC}"
|
||||
echo "$publish_output"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo -e "Website hash: ${YELLOW}$website_hash${NC}"
|
||||
echo -e "Website URL: ${YELLOW}$website_url${NC}"
|
||||
|
||||
# Verify the website is accessible
|
||||
echo -e "\n${GREEN}Verifying website is accessible...${NC}"
|
||||
if curl -s "$website_url" | grep -q "Test Website"; then
|
||||
echo -e "${GREEN}Website is accessible${NC}"
|
||||
else
|
||||
echo -e "${RED}Website is not accessible${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Test sync command
|
||||
test_sync_command() {
|
||||
echo -e "\n${GREEN}Testing sync command...${NC}"
|
||||
|
||||
# We need a second server to test sync
|
||||
# For this test, we'll create a second server configuration and start it
|
||||
local SERVER2_PORT=8081
|
||||
local SERVER2_URL="http://localhost:$SERVER2_PORT"
|
||||
local SERVER2_STORAGE="$TEST_DIR/server2_storage"
|
||||
local SERVER2_PID_FILE="$TEST_DIR/server2.pid"
|
||||
local SERVER2_CONFIG_FILE="$TEST_DIR/server2_config.toml"
|
||||
|
||||
# Create second server storage directory
|
||||
mkdir -p "$SERVER2_STORAGE"
|
||||
|
||||
# Create second server configuration
|
||||
cat > "$SERVER2_CONFIG_FILE" << EOF
|
||||
# Server configuration for e2e tests (server 2)
|
||||
host="0.0.0.0"
|
||||
port=8081
|
||||
store_url=["dir:///tmp/store1"]
|
||||
flist_dir="flists"
|
||||
sqlite_path="fl-server2.db"
|
||||
storage_dir="storage"
|
||||
# bloc_size=
|
||||
|
||||
jwt_secret="secret"
|
||||
jwt_expire_hours=5
|
||||
|
||||
# users
|
||||
[[users]]
|
||||
username = "admin"
|
||||
password = "admin"
|
||||
EOF
|
||||
|
||||
# Start the second server
|
||||
echo -e "\n${GREEN}Starting second test server on port $SERVER2_PORT...${NC}"
|
||||
$RFS_BIN server --config-path "$SERVER2_CONFIG_FILE" > "$TEST_DIR/server2.log" 2>&1 &
|
||||
echo $! > "$SERVER2_PID_FILE"
|
||||
|
||||
# Wait for the server to start
|
||||
echo "Waiting for second server to start..."
|
||||
sleep 3
|
||||
|
||||
# Check if the server is running
|
||||
if ! curl -s "$SERVER2_URL/health" > /dev/null; then
|
||||
echo -e "${RED}Failed to start second server${NC}"
|
||||
cat "$TEST_DIR/server2.log"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}Second server started successfully${NC}"
|
||||
|
||||
# Upload a file to the first server
|
||||
local file_path="$SOURCE_DIR/medium_file.bin"
|
||||
|
||||
echo -e "\n${GREEN}Uploading file to first server: $file_path${NC}"
|
||||
local upload_output
|
||||
upload_output=$($RFS_BIN upload "$file_path" -s "$SERVER_URL" 2>&1)
|
||||
echo "$upload_output"
|
||||
|
||||
# Extract the file hash from the upload output
|
||||
local file_hash=$(echo "$upload_output" | grep -o "hash: [a-f0-9]*" | cut -d' ' -f2)
|
||||
|
||||
if [ -z "$file_hash" ]; then
|
||||
echo -e "${RED}Failed to get file hash from upload${NC}"
|
||||
echo -e "${RED}Upload output: ${NC}"
|
||||
echo "$upload_output"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Verify the file exists on the first server but not on the second
|
||||
echo -e "\n${GREEN}Verifying file exists on first server but not on second...${NC}"
|
||||
$RFS_BIN exists "$file_hash" -s "$SERVER_URL"
|
||||
$RFS_BIN exists "$file_hash" -s "$SERVER2_URL" || true # This should fail, but we don't want to exit
|
||||
|
||||
# Sync the file from the first server to the second
|
||||
echo -e "\n${GREEN}Syncing file from first server to second...${NC}"
|
||||
run_test "Sync command with hash" "$RFS_BIN sync -h \"$file_hash\" -s \"$SERVER_URL\" -d \"$SERVER2_URL\""
|
||||
|
||||
# Verify the file now exists on both servers
|
||||
echo -e "\n${GREEN}Verifying file now exists on both servers...${NC}"
|
||||
run_test "Exists on first server after sync" "$RFS_BIN exists \"$file_hash\" -s \"$SERVER_URL\""
|
||||
run_test "Exists on second server after sync" "$RFS_BIN exists \"$file_hash\" -s \"$SERVER2_URL\""
|
||||
|
||||
# Test sync all blocks
|
||||
echo -e "\n${GREEN}Testing sync all blocks...${NC}"
|
||||
|
||||
# Upload another file to the first server
|
||||
local file2_path="$SOURCE_DIR/small_file.bin"
|
||||
|
||||
echo -e "\n${GREEN}Uploading second file to first server: $file2_path${NC}"
|
||||
local upload2_output
|
||||
upload2_output=$($RFS_BIN upload "$file2_path" -s "$SERVER_URL" 2>&1)
|
||||
echo "$upload2_output"
|
||||
|
||||
# Sync all blocks from the first server to the second
|
||||
echo -e "\n${GREEN}Syncing all blocks from first server to second...${NC}"
|
||||
run_test "Sync command for all blocks" "$RFS_BIN sync -s \"$SERVER_URL\" -d \"$SERVER2_URL\""
|
||||
|
||||
# Stop the second server
|
||||
if [ -f "$SERVER2_PID_FILE" ]; then
|
||||
echo "Stopping second test server..."
|
||||
kill $(cat "$SERVER2_PID_FILE") 2>/dev/null || true
|
||||
rm -f "$SERVER2_PID_FILE"
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Main test function
|
||||
main() {
|
||||
# Register cleanup on exit
|
||||
trap cleanup EXIT
|
||||
|
||||
# Setup test environment
|
||||
setup
|
||||
|
||||
# Start the server
|
||||
start_server
|
||||
|
||||
# Run upload tests
|
||||
test_single_file_upload
|
||||
test_directory_upload
|
||||
test_nested_directory_upload
|
||||
|
||||
# Run download tests
|
||||
test_single_file_download
|
||||
test_directory_download
|
||||
|
||||
# Run performance tests
|
||||
test_parallel_upload_performance
|
||||
test_parallel_download_performance
|
||||
|
||||
# Run block size impact tests
|
||||
test_block_size_impact
|
||||
|
||||
# Run exists command tests
|
||||
test_exists_command
|
||||
|
||||
# Run website-publish tests
|
||||
test_website_publish
|
||||
|
||||
# Run sync command tests
|
||||
test_sync_command
|
||||
|
||||
echo -e "\n${GREEN}All upload and download tests completed!${NC}"
|
||||
}
|
||||
|
||||
# Run the main function
|
||||
main
|
||||
120
components/rfs/tests/parallel_download_test.rs
Normal file
120
components/rfs/tests/parallel_download_test.rs
Normal file
@@ -0,0 +1,120 @@
|
||||
#[cfg(test)]
|
||||
mod parallel_download_tests {
|
||||
use anyhow::Result;
|
||||
use std::path::Path;
|
||||
use std::time::Instant;
|
||||
use tempdir::TempDir;
|
||||
use tokio::runtime::Runtime;
|
||||
|
||||
use rfs::cache::Cache;
|
||||
use rfs::fungi::{self, meta};
|
||||
use rfs::store::{self, dir::DirStore};
|
||||
use rfs::{pack, unpack};
|
||||
|
||||
#[test]
|
||||
fn test_parallel_download() -> Result<()> {
|
||||
// Create a runtime for async operations
|
||||
let rt = Runtime::new()?;
|
||||
|
||||
rt.block_on(async {
|
||||
// Create temporary directories
|
||||
let temp_dir = TempDir::new("parallel-test")?;
|
||||
let source_dir = temp_dir.path().join("source");
|
||||
let dest_dir_parallel = temp_dir.path().join("dest-parallel");
|
||||
let dest_dir_serial = temp_dir.path().join("dest-serial");
|
||||
let store_dir = temp_dir.path().join("store");
|
||||
let cache_dir = temp_dir.path().join("cache");
|
||||
|
||||
std::fs::create_dir_all(&source_dir)?;
|
||||
std::fs::create_dir_all(&dest_dir_parallel)?;
|
||||
std::fs::create_dir_all(&dest_dir_serial)?;
|
||||
std::fs::create_dir_all(&store_dir)?;
|
||||
std::fs::create_dir_all(&cache_dir)?;
|
||||
|
||||
// Create some test files
|
||||
create_test_files(&source_dir, 20, 1024 * 1024).await?; // 20 files of 1MB each
|
||||
|
||||
// Create a store
|
||||
let store = DirStore::new(&store_dir).await?;
|
||||
|
||||
// Create a flist writer
|
||||
let fl_path = temp_dir.path().join("test.fl");
|
||||
let writer = fungi::Writer::new(&fl_path, true).await?;
|
||||
|
||||
// Pack the files
|
||||
pack(writer, store.clone(), &source_dir, true, None).await?;
|
||||
|
||||
// Create a reader for the flist
|
||||
let reader = fungi::Reader::new(&fl_path).await?;
|
||||
let router = store::get_router(&reader).await?;
|
||||
|
||||
// Test parallel download (default)
|
||||
let cache_parallel = Cache::new(&cache_dir, router.clone());
|
||||
let start_parallel = Instant::now();
|
||||
unpack(&reader, &cache_parallel, &dest_dir_parallel, false).await?;
|
||||
let parallel_duration = start_parallel.elapsed();
|
||||
|
||||
// Clear cache directory
|
||||
std::fs::remove_dir_all(&cache_dir)?;
|
||||
std::fs::create_dir_all(&cache_dir)?;
|
||||
|
||||
// Test serial download by setting PARALLEL_DOWNLOAD to 1
|
||||
// This is just a simulation since we can't easily modify the constant at runtime
|
||||
// In a real test, we would use a feature flag or environment variable
|
||||
let cache_serial = Cache::new(&cache_dir, router);
|
||||
let start_serial = Instant::now();
|
||||
|
||||
// Here we're still using the parallel implementation, but in a real test
|
||||
// we would use a version with PARALLEL_DOWNLOAD=1
|
||||
unpack(&reader, &cache_serial, &dest_dir_serial, false).await?;
|
||||
|
||||
let serial_duration = start_serial.elapsed();
|
||||
|
||||
// Print the results
|
||||
println!("Parallel download time: {:?}", parallel_duration);
|
||||
println!("Serial download time: {:?}", serial_duration);
|
||||
|
||||
// Verify files were unpacked correctly
|
||||
verify_directories(&source_dir, &dest_dir_parallel)?;
|
||||
verify_directories(&source_dir, &dest_dir_serial)?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
// Helper function to create test files
|
||||
async fn create_test_files(dir: &Path, count: usize, size: usize) -> Result<()> {
|
||||
use rand::{thread_rng, Rng};
|
||||
use tokio::fs::File;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
|
||||
for i in 0..count {
|
||||
let file_path = dir.join(format!("file_{}.bin", i));
|
||||
let mut file = File::create(&file_path).await?;
|
||||
|
||||
// Create random data
|
||||
let mut data = vec![0u8; size];
|
||||
thread_rng().fill(&mut data[..]);
|
||||
|
||||
// Write to file
|
||||
file.write_all(&data).await?;
|
||||
file.flush().await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Helper function to verify directories match
|
||||
fn verify_directories(source: &Path, dest: &Path) -> Result<()> {
|
||||
use std::process::Command;
|
||||
|
||||
let output = Command::new("diff")
|
||||
.arg("-r")
|
||||
.arg(source)
|
||||
.arg(dest)
|
||||
.output()?;
|
||||
|
||||
assert!(output.status.success(), "Directories don't match");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
124
components/rfs/tests/performance_tests.sh
Normal file
124
components/rfs/tests/performance_tests.sh
Normal file
@@ -0,0 +1,124 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
GREEN='\033[0;32m'
|
||||
RED='\033[0;31m'
|
||||
YELLOW='\033[0;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Test directory
|
||||
TEST_DIR="/tmp/rfs-performance-tests"
|
||||
CACHE_DIR="$TEST_DIR/cache"
|
||||
SOURCE_DIR="$TEST_DIR/source"
|
||||
DEST_DIR_SERIAL="$TEST_DIR/destination-serial"
|
||||
DEST_DIR_PARALLEL="$TEST_DIR/destination-parallel"
|
||||
FLIST_PATH="$TEST_DIR/perf-test.fl"
|
||||
|
||||
# Store URL - using a local directory store for testing
|
||||
STORE_DIR="$TEST_DIR/store"
|
||||
STORE_URL="dir://$STORE_DIR"
|
||||
|
||||
# Number of files and file size for testing
|
||||
NUM_FILES=100
|
||||
FILE_SIZE_MB=1
|
||||
|
||||
# Clean up function
|
||||
cleanup() {
|
||||
echo "Cleaning up test directories..."
|
||||
rm -rf "$TEST_DIR"
|
||||
}
|
||||
|
||||
# Setup function
|
||||
setup() {
|
||||
echo "Setting up test directories..."
|
||||
mkdir -p "$TEST_DIR" "$CACHE_DIR" "$SOURCE_DIR" "$DEST_DIR_SERIAL" "$DEST_DIR_PARALLEL" "$STORE_DIR"
|
||||
|
||||
echo -e "${YELLOW}Creating $NUM_FILES test files of ${FILE_SIZE_MB}MB each...${NC}"
|
||||
for i in $(seq 1 $NUM_FILES); do
|
||||
dd if=/dev/urandom of="$SOURCE_DIR/file_$i.bin" bs=1M count=$FILE_SIZE_MB status=none
|
||||
echo -ne "\rCreated $i/$NUM_FILES files"
|
||||
done
|
||||
echo -e "\nTest files created successfully"
|
||||
}
|
||||
|
||||
# Function to measure execution time
|
||||
measure_time() {
|
||||
local start_time=$(date +%s.%N)
|
||||
"$@"
|
||||
local end_time=$(date +%s.%N)
|
||||
echo "$(echo "$end_time - $start_time" | bc)"
|
||||
}
|
||||
|
||||
# Test pack performance
|
||||
test_pack_performance() {
|
||||
echo -e "\n${GREEN}Testing pack performance...${NC}"
|
||||
|
||||
local pack_time=$(measure_time rfs pack -m "$FLIST_PATH" -s "$STORE_URL" "$SOURCE_DIR")
|
||||
|
||||
echo -e "Pack time: ${YELLOW}$pack_time seconds${NC}"
|
||||
|
||||
# Verify the flist was created
|
||||
if [ ! -f "$FLIST_PATH" ]; then
|
||||
echo -e "${RED}Flist file was not created${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "Flist created successfully at $FLIST_PATH"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Test unpack performance with and without parallel download
|
||||
test_unpack_performance() {
|
||||
echo -e "\n${GREEN}Testing unpack performance...${NC}"
|
||||
|
||||
# Clear cache directory to ensure fair comparison
|
||||
rm -rf "$CACHE_DIR"
|
||||
mkdir -p "$CACHE_DIR"
|
||||
|
||||
# Test with parallel download (default)
|
||||
echo -e "${YELLOW}Testing with parallel download...${NC}"
|
||||
local parallel_time=$(measure_time rfs unpack -m "$FLIST_PATH" -c "$CACHE_DIR" "$DEST_DIR_PARALLEL")
|
||||
|
||||
# Clear cache directory again
|
||||
rm -rf "$CACHE_DIR"
|
||||
mkdir -p "$CACHE_DIR"
|
||||
|
||||
# Temporarily disable parallel download by setting PARALLEL_DOWNLOAD to 1
|
||||
echo -e "${YELLOW}Testing with serial download...${NC}"
|
||||
local serial_time=$(measure_time env RFS_PARALLEL_DOWNLOAD=1 rfs unpack -m "$FLIST_PATH" -c "$CACHE_DIR" "$DEST_DIR_SERIAL")
|
||||
|
||||
echo -e "Serial unpack time: ${YELLOW}$serial_time seconds${NC}"
|
||||
echo -e "Parallel unpack time: ${YELLOW}$parallel_time seconds${NC}"
|
||||
|
||||
# Calculate speedup
|
||||
local speedup=$(echo "scale=2; $serial_time / $parallel_time" | bc)
|
||||
echo -e "Speedup: ${GREEN}${speedup}x${NC}"
|
||||
|
||||
# Verify files were unpacked correctly
|
||||
if ! diff -r "$DEST_DIR_SERIAL" "$DEST_DIR_PARALLEL" > /dev/null; then
|
||||
echo -e "${RED}Unpacked files don't match between serial and parallel methods${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "Files unpacked successfully and match between methods"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Main test function
|
||||
main() {
|
||||
# Register cleanup on exit
|
||||
trap cleanup EXIT
|
||||
|
||||
# Setup test environment
|
||||
setup
|
||||
|
||||
# Run performance tests
|
||||
test_pack_performance
|
||||
test_unpack_performance
|
||||
|
||||
echo -e "\n${GREEN}All performance tests completed!${NC}"
|
||||
}
|
||||
|
||||
# Run the main function
|
||||
main
|
||||
@@ -10,6 +10,9 @@ musl
|
||||
eudev
|
||||
kmod
|
||||
|
||||
# Console/terminal management
|
||||
util-linux
|
||||
|
||||
# Essential networking (for Zero-OS connectivity)
|
||||
iproute2
|
||||
ethtool
|
||||
@@ -24,6 +27,9 @@ zlib
|
||||
# Network utilities (minimal)
|
||||
dhcpcd
|
||||
|
||||
# Random number generation (for crypto/security)
|
||||
haveged
|
||||
|
||||
# NO debugging tools, NO development tools, NO SSH, NO curl/wget
|
||||
# NO python, NO redis, NO massive linux-firmware package
|
||||
# These will be loaded from RFS after network connectivity
|
||||
2
configs/zinit/cgroup.yaml
Normal file
2
configs/zinit/cgroup.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
exec: sh /etc/zinit/init/cgroup.sh
|
||||
oneshot: true
|
||||
2
configs/zinit/console.yaml
Normal file
2
configs/zinit/console.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
exec: /sbin/getty -L 9600 console
|
||||
restart: always
|
||||
2
configs/zinit/getty.yaml
Normal file
2
configs/zinit/getty.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
exec: /sbin/getty -L 115200 ttyS0 vt100
|
||||
restart: always
|
||||
2
configs/zinit/haveged.yaml
Normal file
2
configs/zinit/haveged.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
exec: haveged -w 1024 -d 32 -i 32 -v 1
|
||||
oneshot: true
|
||||
6
configs/zinit/init/ashloging.sh
Normal file
6
configs/zinit/init/ashloging.sh
Normal file
@@ -0,0 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "start ash terminal"
|
||||
while true; do
|
||||
getty -l /bin/ash -n 19200 tty2
|
||||
done
|
||||
10
configs/zinit/init/cgroup.sh
Normal file
10
configs/zinit/init/cgroup.sh
Normal file
@@ -0,0 +1,10 @@
|
||||
set -x
|
||||
|
||||
mount -t tmpfs cgroup_root /sys/fs/cgroup
|
||||
|
||||
subsys="pids cpuset cpu cpuacct blkio memory devices freezer net_cls perf_event net_prio hugetlb"
|
||||
|
||||
for sys in $subsys; do
|
||||
mkdir -p /sys/fs/cgroup/$sys
|
||||
mount -t cgroup $sys -o $sys /sys/fs/cgroup/$sys/
|
||||
done
|
||||
10
configs/zinit/init/modprobe.sh
Normal file
10
configs/zinit/init/modprobe.sh
Normal file
@@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
|
||||
modprobe fuse
|
||||
modprobe btrfs
|
||||
modprobe tun
|
||||
modprobe br_netfilter
|
||||
|
||||
echo never > /sys/kernel/mm/transparent_hugepage/enabled
|
||||
|
||||
ulimit -n 524288
|
||||
10
configs/zinit/init/ntpd.sh
Normal file
10
configs/zinit/init/ntpd.sh
Normal file
@@ -0,0 +1,10 @@
|
||||
#!/bin/sh
|
||||
|
||||
ntp_flags=$(grep -o 'ntp=.*' /proc/cmdline | sed 's/^ntp=//')
|
||||
|
||||
params=""
|
||||
if [ -n "$ntp_flags" ]; then
|
||||
params=$(echo "-p $ntp_flags" | sed s/,/' -p '/g)
|
||||
fi
|
||||
|
||||
exec ntpd -n $params
|
||||
4
configs/zinit/init/routing.sh
Normal file
4
configs/zinit/init/routing.sh
Normal file
@@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "Enable ip forwarding"
|
||||
echo 1 > /proc/sys/net/ipv4/ip_forward
|
||||
15
configs/zinit/init/sshd-setup.sh
Normal file
15
configs/zinit/init/sshd-setup.sh
Normal file
@@ -0,0 +1,15 @@
|
||||
#!/bin/ash
|
||||
if [ -f /etc/ssh/ssh_host_rsa_key ]; then
|
||||
# ensure existing file permissions
|
||||
chown root:root /etc/ssh/ssh_host_*
|
||||
chmod 600 /etc/ssh/ssh_host_*
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Setting up sshd"
|
||||
mkdir -p /run/sshd
|
||||
|
||||
ssh-keygen -f /etc/ssh/ssh_host_rsa_key -N '' -t rsa
|
||||
ssh-keygen -f /etc/ssh/ssh_host_dsa_key -N '' -t dsa
|
||||
ssh-keygen -f /etc/ssh/ssh_host_ecdsa_key -N '' -t ecdsa -b 521
|
||||
ssh-keygen -f /etc/ssh/ssh_host_ed25519_key -N '' -t ed25519
|
||||
4
configs/zinit/init/udev.sh
Normal file
4
configs/zinit/init/udev.sh
Normal file
@@ -0,0 +1,4 @@
|
||||
#!/bin/sh
|
||||
|
||||
udevadm trigger --action=add
|
||||
udevadm settle
|
||||
2
configs/zinit/local-modprobe.yaml
Normal file
2
configs/zinit/local-modprobe.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
exec: sh /etc/zinit/init/modprobe.sh
|
||||
oneshot: true
|
||||
3
configs/zinit/ntp.yaml
Normal file
3
configs/zinit/ntp.yaml
Normal file
@@ -0,0 +1,3 @@
|
||||
exec: sh /etc/zinit/init/ntpd.sh
|
||||
after:
|
||||
- internet
|
||||
2
configs/zinit/routing.yaml
Normal file
2
configs/zinit/routing.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
exec: sh /etc/zinit/init/routing.sh
|
||||
oneshot: true
|
||||
2
configs/zinit/sshd-setup.yaml
Normal file
2
configs/zinit/sshd-setup.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
exec: sh /etc/zinit/init/sshd-setup.sh
|
||||
oneshot: true
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user