feat: Create minimal Zero-OS initramfs with console support
- Fixed build system to clone source repositories instead of downloading binaries - Enhanced scripts/fetch-github.sh with proper git repo cloning and branch handling - Updated scripts/compile-components.sh for RFS compilation with build-binary feature - Added minimal firmware installation for essential network drivers (73 modules) - Created comprehensive zinit configuration set (15 config files including getty) - Added util-linux package for getty/agetty console support - Optimized package selection for minimal 27MB initramfs footprint - Successfully builds bootable vmlinuz.efi with embedded initramfs - Confirmed working: VM boot, console login, network drivers, zinit init system Components: - initramfs.cpio.xz: 27MB compressed minimal Zero-OS image - vmlinuz.efi: 35MB bootable kernel with embedded initramfs - Complete Zero-OS toolchain: zinit, rfs, mycelium compiled from source
This commit is contained in:
128
components/rfs/src/clone.rs
Normal file
128
components/rfs/src/clone.rs
Normal file
@@ -0,0 +1,128 @@
|
||||
use crate::{
|
||||
cache::Cache,
|
||||
fungi::{meta::Block, Reader, Result},
|
||||
store::{BlockStore, Store},
|
||||
};
|
||||
use anyhow::Error;
|
||||
use futures::lock::Mutex;
|
||||
use hex::ToHex;
|
||||
use std::sync::Arc;
|
||||
use tokio::io::AsyncReadExt;
|
||||
|
||||
const WORKERS: usize = 10;
|
||||
|
||||
pub async fn clone<S: Store>(reader: Reader, store: S, cache: Cache<S>) -> Result<()> {
|
||||
let failures = Arc::new(Mutex::new(Vec::new()));
|
||||
let cloner = BlobCloner::new(cache, store.into(), failures.clone());
|
||||
let mut workers = workers::WorkerPool::new(cloner, WORKERS);
|
||||
|
||||
let mut offset = 0;
|
||||
loop {
|
||||
if !failures.lock().await.is_empty() {
|
||||
break;
|
||||
}
|
||||
let blocks = reader.all_blocks(1000, offset).await?;
|
||||
if blocks.is_empty() {
|
||||
break;
|
||||
}
|
||||
for block in blocks {
|
||||
offset += 1;
|
||||
let worker = workers.get().await;
|
||||
worker.send(block)?;
|
||||
}
|
||||
}
|
||||
|
||||
workers.close().await;
|
||||
let failures = failures.lock().await;
|
||||
|
||||
if failures.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
log::error!("failed to clone one or more blocks");
|
||||
for (block, error) in failures.iter() {
|
||||
log::error!(" - failed to clone block {}: {}", block, error);
|
||||
}
|
||||
|
||||
Err(crate::fungi::Error::Anyhow(anyhow::anyhow!(
|
||||
"failed to clone ({}) blocks",
|
||||
failures.len()
|
||||
)))
|
||||
}
|
||||
|
||||
struct BlobCloner<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
cache: Arc<Cache<S>>,
|
||||
store: Arc<BlockStore<S>>,
|
||||
failures: Arc<Mutex<Vec<(String, Error)>>>,
|
||||
}
|
||||
|
||||
impl<S> Clone for BlobCloner<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
cache: self.cache.clone(),
|
||||
store: self.store.clone(),
|
||||
failures: self.failures.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> BlobCloner<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
fn new(
|
||||
cache: Cache<S>,
|
||||
store: BlockStore<S>,
|
||||
failures: Arc<Mutex<Vec<(String, Error)>>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
cache: Arc::new(cache),
|
||||
store: Arc::new(store),
|
||||
failures,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<S> workers::Work for BlobCloner<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
type Input = Block;
|
||||
type Output = ();
|
||||
|
||||
async fn run(&mut self, block: Self::Input) -> Self::Output {
|
||||
let mut file = match self.cache.get(&block).await {
|
||||
Ok((_, f)) => f,
|
||||
Err(err) => {
|
||||
self.failures
|
||||
.lock()
|
||||
.await
|
||||
.push((block.id.as_slice().encode_hex(), err));
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let mut data = Vec::new();
|
||||
if let Err(err) = file.read_to_end(&mut data).await {
|
||||
self.failures
|
||||
.lock()
|
||||
.await
|
||||
.push((block.id.as_slice().encode_hex(), err.into()));
|
||||
return;
|
||||
}
|
||||
if let Err(err) = self.store.set(&data).await {
|
||||
self.failures
|
||||
.lock()
|
||||
.await
|
||||
.push((block.id.as_slice().encode_hex(), err.into()));
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
72
components/rfs/src/config.rs
Normal file
72
components/rfs/src/config.rs
Normal file
@@ -0,0 +1,72 @@
|
||||
use crate::{
|
||||
fungi::{meta::Tag, Reader, Result, Writer},
|
||||
store::{self, Store},
|
||||
};
|
||||
|
||||
pub async fn tag_list(reader: Reader) -> Result<()> {
|
||||
let tags = reader.tags().await?;
|
||||
if !tags.is_empty() {
|
||||
println!("tags:");
|
||||
}
|
||||
for (key, value) in tags {
|
||||
println!("\t{}={}", key, value);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn tag_add(writer: Writer, tags: Vec<(String, String)>) -> Result<()> {
|
||||
for (key, value) in tags {
|
||||
writer.tag(Tag::Custom(key.as_str()), value).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn tag_delete(writer: Writer, keys: Vec<String>, all: bool) -> Result<()> {
|
||||
if all {
|
||||
writer.delete_tags().await?;
|
||||
return Ok(());
|
||||
}
|
||||
for key in keys {
|
||||
writer.delete_tag(Tag::Custom(key.as_str())).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn store_list(reader: Reader) -> Result<()> {
|
||||
let routes = reader.routes().await?;
|
||||
if !routes.is_empty() {
|
||||
println!("routes:")
|
||||
}
|
||||
for route in routes {
|
||||
println!(
|
||||
"\trange:[{}-{}] store:{}",
|
||||
route.start, route.end, route.url
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn store_add(writer: Writer, stores: Vec<String>) -> Result<()> {
|
||||
let store = store::parse_router(stores.as_slice()).await?;
|
||||
for route in store.routes() {
|
||||
writer
|
||||
.route(
|
||||
route.start.unwrap_or(u8::MIN),
|
||||
route.end.unwrap_or(u8::MAX),
|
||||
route.url,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn store_delete(writer: Writer, stores: Vec<String>, all: bool) -> Result<()> {
|
||||
if all {
|
||||
writer.delete_routes().await?;
|
||||
return Ok(());
|
||||
}
|
||||
for store in stores {
|
||||
writer.delete_route(store).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
345
components/rfs/src/docker.rs
Normal file
345
components/rfs/src/docker.rs
Normal file
@@ -0,0 +1,345 @@
|
||||
use bollard::auth::DockerCredentials;
|
||||
use bollard::container::{
|
||||
Config, CreateContainerOptions, InspectContainerOptions, RemoveContainerOptions,
|
||||
};
|
||||
use bollard::image::{CreateImageOptions, RemoveImageOptions};
|
||||
use bollard::Docker;
|
||||
use std::sync::mpsc::Sender;
|
||||
use tempdir::TempDir;
|
||||
use walkdir::WalkDir;
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use futures_util::stream::StreamExt;
|
||||
use serde_json::json;
|
||||
use std::collections::HashMap;
|
||||
use std::default::Default;
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
use std::process::Command;
|
||||
use tokio_async_drop::tokio_async_drop;
|
||||
|
||||
use crate::fungi::Writer;
|
||||
use crate::store::Store;
|
||||
|
||||
struct DockerInfo {
|
||||
image_name: String,
|
||||
container_name: String,
|
||||
docker: Docker,
|
||||
}
|
||||
|
||||
impl Drop for DockerInfo {
|
||||
fn drop(&mut self) {
|
||||
tokio_async_drop!({
|
||||
let res = clean(&self.docker, &self.image_name, &self.container_name)
|
||||
.await
|
||||
.context("failed to clean docker image and container");
|
||||
|
||||
if res.is_err() {
|
||||
log::error!(
|
||||
"cleaning docker image and container failed with error: {:?}",
|
||||
res.err()
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DockerImageToFlist {
|
||||
meta: Writer,
|
||||
image_name: String,
|
||||
credentials: Option<DockerCredentials>,
|
||||
docker_tmp_dir: TempDir,
|
||||
}
|
||||
|
||||
impl DockerImageToFlist {
|
||||
pub fn new(
|
||||
meta: Writer,
|
||||
image_name: String,
|
||||
credentials: Option<DockerCredentials>,
|
||||
docker_tmp_dir: TempDir,
|
||||
) -> Self {
|
||||
Self {
|
||||
meta,
|
||||
image_name,
|
||||
credentials,
|
||||
docker_tmp_dir,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn files_count(&self) -> u32 {
|
||||
WalkDir::new(self.docker_tmp_dir.path()).into_iter().count() as u32
|
||||
}
|
||||
|
||||
pub async fn prepare(&mut self) -> Result<()> {
|
||||
#[cfg(unix)]
|
||||
let docker = Docker::connect_with_socket_defaults().context("failed to create docker")?;
|
||||
|
||||
let container_file =
|
||||
Path::file_stem(self.docker_tmp_dir.path()).expect("failed to get directory name");
|
||||
let container_name = container_file
|
||||
.to_str()
|
||||
.expect("failed to get container name")
|
||||
.to_owned();
|
||||
|
||||
let docker_info = DockerInfo {
|
||||
image_name: self.image_name.to_owned(),
|
||||
container_name,
|
||||
docker,
|
||||
};
|
||||
|
||||
extract_image(
|
||||
&docker_info.docker,
|
||||
&docker_info.image_name,
|
||||
&docker_info.container_name,
|
||||
self.docker_tmp_dir.path(),
|
||||
self.credentials.clone(),
|
||||
)
|
||||
.await
|
||||
.context("failed to extract docker image to a directory")?;
|
||||
log::info!(
|
||||
"docker image '{}' is extracted successfully",
|
||||
docker_info.image_name
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn pack<S: Store>(&mut self, store: S, sender: Option<Sender<u32>>) -> Result<()> {
|
||||
crate::pack(
|
||||
self.meta.clone(),
|
||||
store,
|
||||
&self.docker_tmp_dir.path(),
|
||||
true,
|
||||
sender,
|
||||
)
|
||||
.await
|
||||
.context("failed to pack flist")?;
|
||||
|
||||
log::info!("flist has been created successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn convert<S: Store>(&mut self, store: S, sender: Option<Sender<u32>>) -> Result<()> {
|
||||
self.prepare().await?;
|
||||
self.pack(store, sender).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
async fn extract_image(
|
||||
docker: &Docker,
|
||||
image_name: &str,
|
||||
container_name: &str,
|
||||
docker_tmp_dir_path: &Path,
|
||||
credentials: Option<DockerCredentials>,
|
||||
) -> Result<()> {
|
||||
pull_image(docker, image_name, credentials).await?;
|
||||
create_container(docker, image_name, container_name)
|
||||
.await
|
||||
.context("failed to create docker container")?;
|
||||
export_container(container_name, docker_tmp_dir_path)
|
||||
.context("failed to export docker container")?;
|
||||
container_boot(docker, container_name, docker_tmp_dir_path)
|
||||
.await
|
||||
.context("failed to boot docker container")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn pull_image(
|
||||
docker: &Docker,
|
||||
image_name: &str,
|
||||
credentials: Option<DockerCredentials>,
|
||||
) -> Result<()> {
|
||||
log::info!("pulling docker image {}", image_name);
|
||||
|
||||
let options = Some(CreateImageOptions {
|
||||
from_image: image_name,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
let mut image_pull_stream = docker.create_image(options, None, credentials);
|
||||
while let Some(msg) = image_pull_stream.next().await {
|
||||
msg.context("failed to pull docker image")?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn create_container(docker: &Docker, image_name: &str, container_name: &str) -> Result<()> {
|
||||
log::debug!("Inspecting docker image configurations {}", image_name);
|
||||
|
||||
let image = docker
|
||||
.inspect_image(image_name)
|
||||
.await
|
||||
.context("failed to inspect docker image")?;
|
||||
let image_config = image.config.context("failed to get docker image configs")?;
|
||||
|
||||
let mut command = "";
|
||||
if image_config.cmd.is_none() && image_config.entrypoint.is_none() {
|
||||
command = "/bin/sh";
|
||||
}
|
||||
|
||||
log::debug!("Creating a docker container {}", container_name);
|
||||
|
||||
let options = Some(CreateContainerOptions {
|
||||
name: container_name,
|
||||
platform: None,
|
||||
});
|
||||
|
||||
let config = Config {
|
||||
image: Some(image_name),
|
||||
hostname: Some(container_name),
|
||||
cmd: Some(vec![command]),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
docker
|
||||
.create_container(options, config)
|
||||
.await
|
||||
.context("failed to create docker temporary container")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn export_container(container_name: &str, docker_tmp_dir_path: &Path) -> Result<()> {
|
||||
log::debug!("Exporting docker container {}", container_name);
|
||||
|
||||
Command::new("sh")
|
||||
.arg("-c")
|
||||
.arg(format!(
|
||||
"docker export {} | tar -xpf - -C {}",
|
||||
container_name,
|
||||
docker_tmp_dir_path.display()
|
||||
))
|
||||
.output()
|
||||
.expect("failed to execute export docker container");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn container_boot(
|
||||
docker: &Docker,
|
||||
container_name: &str,
|
||||
docker_tmp_dir_path: &Path,
|
||||
) -> Result<()> {
|
||||
log::debug!(
|
||||
"Inspecting docker container configurations {}",
|
||||
container_name
|
||||
);
|
||||
|
||||
let options = Some(InspectContainerOptions { size: false });
|
||||
let container = docker
|
||||
.inspect_container(container_name, options)
|
||||
.await
|
||||
.context("failed to inspect docker container")?;
|
||||
|
||||
let container_config = container
|
||||
.config
|
||||
.context("failed to get docker container configs")?;
|
||||
|
||||
let mut command = String::new();
|
||||
let mut args: Vec<String> = Vec::new();
|
||||
let mut env: HashMap<String, String> = HashMap::new();
|
||||
let mut cwd = String::from("/");
|
||||
|
||||
if let Some(ref entrypoint) = container_config.entrypoint {
|
||||
if !entrypoint.is_empty() {
|
||||
command = entrypoint[0].to_string();
|
||||
for i in 1..entrypoint.len() {
|
||||
args.push(entrypoint[i].to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(ref cmd) = container_config.cmd {
|
||||
if !cmd.is_empty() {
|
||||
if command.is_empty() {
|
||||
command = cmd[0].to_string();
|
||||
for i in 1..cmd.len() {
|
||||
args.push(cmd[i].to_string());
|
||||
}
|
||||
} else {
|
||||
for i in 0..cmd.len() {
|
||||
args.push(cmd[i].to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if command.is_empty() {
|
||||
command = String::from("/bin/sh");
|
||||
}
|
||||
|
||||
if let Some(envs) = container_config.env {
|
||||
for entry in envs.iter() {
|
||||
if let Some((key, value)) = entry.split_once('=') {
|
||||
env.insert(key.to_string(), value.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(ref working_dir) = container_config.working_dir {
|
||||
if !working_dir.is_empty() {
|
||||
cwd = working_dir.to_string();
|
||||
}
|
||||
}
|
||||
|
||||
let metadata = json!({
|
||||
"startup": {
|
||||
"entry": {
|
||||
"name": "core.system",
|
||||
"args": {
|
||||
"name": command,
|
||||
"args": args,
|
||||
"env": env,
|
||||
"dir": cwd,
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let toml_metadata: toml::Value = serde_json::from_str(&metadata.to_string())?;
|
||||
|
||||
log::info!(
|
||||
"Creating '.startup.toml' file from container {} contains {}",
|
||||
container_name,
|
||||
toml_metadata.to_string()
|
||||
);
|
||||
|
||||
fs::write(
|
||||
docker_tmp_dir_path.join(".startup.toml"),
|
||||
toml_metadata.to_string(),
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn clean(docker: &Docker, image_name: &str, container_name: &str) -> Result<()> {
|
||||
log::debug!("Removing docker container {}", container_name);
|
||||
|
||||
let options = Some(RemoveContainerOptions {
|
||||
force: true,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
docker
|
||||
.remove_container(container_name, options)
|
||||
.await
|
||||
.context("failed to remove docker container")?;
|
||||
|
||||
log::debug!("Removing docker image {}", image_name);
|
||||
|
||||
let options = Some(RemoveImageOptions {
|
||||
force: true,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
docker
|
||||
.remove_image(image_name, options, None)
|
||||
.await
|
||||
.context("failed to remove docker image")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
335
components/rfs/src/download.rs
Normal file
335
components/rfs/src/download.rs
Normal file
@@ -0,0 +1,335 @@
|
||||
use anyhow::{Context, Result};
|
||||
use futures::{stream, StreamExt};
|
||||
use std::path::Path;
|
||||
use tokio::fs::File;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
use tokio::sync::Semaphore;
|
||||
|
||||
use crate::server_api;
|
||||
use crate::{cache, fungi, store};
|
||||
|
||||
const PARALLEL_DOWNLOAD: usize = 20; // Number of blocks to download in parallel
|
||||
|
||||
/// Downloads all blocks for a file or a single block and assembles them
|
||||
pub async fn download<P: AsRef<Path>>(hash: &str, file_name: P, server_url: String) -> Result<()> {
|
||||
let file_name = file_name.as_ref();
|
||||
|
||||
info!("Downloading blocks for hash: {}", hash);
|
||||
info!("Saving to: {}", file_name.display());
|
||||
|
||||
let blocks = server_api::get_blocks_by_hash(hash, server_url.clone()).await?;
|
||||
|
||||
if blocks.is_empty() {
|
||||
return Err(anyhow::anyhow!("No blocks found for hash: {}", hash));
|
||||
}
|
||||
|
||||
// Store the number of blocks
|
||||
let blocks_count = blocks.len();
|
||||
|
||||
// Create the file
|
||||
let mut file = File::create(file_name)
|
||||
.await
|
||||
.context("Failed to create output file")?;
|
||||
|
||||
// Create a semaphore to limit concurrent downloads
|
||||
let semaphore = std::sync::Arc::new(Semaphore::new(PARALLEL_DOWNLOAD));
|
||||
|
||||
// Download blocks in parallel
|
||||
info!(
|
||||
"Starting parallel download of {} blocks with concurrency {}",
|
||||
blocks_count, PARALLEL_DOWNLOAD
|
||||
);
|
||||
|
||||
// Create a vector to store downloaded blocks in order
|
||||
let mut downloaded_blocks = vec![None; blocks_count];
|
||||
|
||||
// Process blocks in parallel with limited concurrency
|
||||
let results = stream::iter(blocks.into_iter().enumerate())
|
||||
.map(|(i, (block_hash, block_index))| {
|
||||
let server_url = server_url.clone();
|
||||
let permit = semaphore.clone();
|
||||
|
||||
async move {
|
||||
// Acquire a permit from the semaphore
|
||||
let _permit = permit
|
||||
.acquire()
|
||||
.await
|
||||
.expect("Failed to acquire semaphore permit");
|
||||
|
||||
info!("Downloading block {} (index: {})", block_hash, block_index);
|
||||
|
||||
// Download the block
|
||||
server_api::download_block(&block_hash, &server_url)
|
||||
.await
|
||||
.map(|content| (i, content))
|
||||
.map_err(|e| (i, e))
|
||||
}
|
||||
})
|
||||
.buffer_unordered(PARALLEL_DOWNLOAD)
|
||||
.collect::<Vec<_>>()
|
||||
.await;
|
||||
|
||||
// Process results and write blocks to file in correct order
|
||||
for result in results {
|
||||
match result {
|
||||
Ok((index, content)) => {
|
||||
downloaded_blocks[index] = Some(content);
|
||||
}
|
||||
Err((index, e)) => {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Failed to download block at index {}: {}",
|
||||
index,
|
||||
e
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Write blocks to file in order
|
||||
for (i, block_opt) in downloaded_blocks.into_iter().enumerate() {
|
||||
if let Some(block_content) = block_opt {
|
||||
file.write_all(&block_content)
|
||||
.await
|
||||
.context(format!("Failed to write block at index {} to file", i))?;
|
||||
} else {
|
||||
return Err(anyhow::anyhow!("Missing block at index {}", i));
|
||||
}
|
||||
}
|
||||
|
||||
info!("File downloaded successfully to {:?}", file_name);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Downloads a directory by processing all files listed in its flist using the flist hash
|
||||
pub async fn download_dir<P: AsRef<Path>>(
|
||||
flist_hash: &str,
|
||||
output_dir: P,
|
||||
server_url: String,
|
||||
) -> Result<()> {
|
||||
let output_dir = output_dir.as_ref();
|
||||
|
||||
info!("Downloading directory from flist with hash: {}", flist_hash);
|
||||
info!("Saving files to: {}", output_dir.display());
|
||||
|
||||
// Download the flist file using its hash
|
||||
let temp_path = std::env::temp_dir().join(format!("{}.fl", flist_hash));
|
||||
download(flist_hash, &temp_path, server_url.clone()).await?;
|
||||
|
||||
let meta = fungi::Reader::new(temp_path)
|
||||
.await
|
||||
.context("failed to initialize metadata database")?;
|
||||
|
||||
let router = store::get_router(&meta).await?;
|
||||
let cache = cache::Cache::new("/tmp/cache", router);
|
||||
crate::unpack(&meta, &cache, output_dir, false).await?;
|
||||
|
||||
info!("Directory download complete");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Track blocks uploaded by the user and their download counts
|
||||
/// If hash is provided, only track that specific block
|
||||
/// Otherwise, track all user blocks
|
||||
pub async fn track_blocks(
|
||||
server_url: &str,
|
||||
token: &str,
|
||||
hash: Option<&str>,
|
||||
details: bool,
|
||||
) -> Result<()> {
|
||||
if let Some(block_hash) = hash {
|
||||
match server_api::get_block_downloads(server_url, block_hash).await {
|
||||
Ok(downloads) => {
|
||||
println!(
|
||||
"{:<64} {:<10} {:<10}",
|
||||
"BLOCK HASH", "DOWNLOADS", "SIZE (B)"
|
||||
);
|
||||
println!("{}", "-".repeat(85));
|
||||
println!(
|
||||
"{:<64} {:<10} {:<10}",
|
||||
downloads.block_hash, downloads.downloads_count, downloads.block_size
|
||||
);
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Failed to get download count for block {}: {}",
|
||||
block_hash,
|
||||
err
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Track all user blocks
|
||||
let mut all_user_blocks = Vec::new();
|
||||
|
||||
let first_page = server_api::get_user_blocks(server_url, token, Some(1), Some(50))
|
||||
.await
|
||||
.context("Failed to get user blocks")?;
|
||||
|
||||
let total_pages = (first_page.total as f64 / 50.0).ceil() as u32;
|
||||
|
||||
let mut tasks = Vec::new();
|
||||
for page in 1..=total_pages {
|
||||
let server_url = server_url.to_string();
|
||||
let token = token.to_string();
|
||||
tasks.push(tokio::spawn(async move {
|
||||
server_api::get_user_blocks(&server_url, &token, Some(page), Some(50)).await
|
||||
}));
|
||||
}
|
||||
|
||||
for task in tasks {
|
||||
match task.await {
|
||||
Ok(Ok(blocks_per_page)) => {
|
||||
all_user_blocks.extend(blocks_per_page.blocks);
|
||||
}
|
||||
Ok(Err(err)) => {
|
||||
return Err(anyhow::anyhow!("Failed to get user blocks: {}", err));
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(anyhow::anyhow!("Task failed: {}", err));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
println!(
|
||||
"User has {} blocks out of {} total blocks on the server",
|
||||
all_user_blocks.len(),
|
||||
first_page.all_blocks
|
||||
);
|
||||
|
||||
let block_hashes: Vec<String> = all_user_blocks
|
||||
.into_iter()
|
||||
.map(|(block_hash, _)| block_hash)
|
||||
.collect();
|
||||
print_block_downloads(server_url, block_hashes, details).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn print_block_downloads(
|
||||
server_url: &str,
|
||||
blocks: Vec<String>,
|
||||
details: bool,
|
||||
) -> Result<()> {
|
||||
// Collect all block details first
|
||||
let mut block_details = Vec::new();
|
||||
let mut total_downloads = 0;
|
||||
let mut bandwidth = 0;
|
||||
|
||||
for block_hash in blocks {
|
||||
match server_api::get_block_downloads(server_url, &block_hash).await {
|
||||
Ok(downloads) => {
|
||||
total_downloads += downloads.downloads_count;
|
||||
bandwidth += downloads.block_size * downloads.downloads_count;
|
||||
block_details.push(downloads);
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Failed to get download count for block {}: {}",
|
||||
block_hash,
|
||||
err
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Print totals first
|
||||
println!("{}", "-".repeat(85));
|
||||
println!("TOTAL DOWNLOADS: {}", total_downloads);
|
||||
println!("BANDWIDTH: {} bytes", bandwidth);
|
||||
|
||||
if details {
|
||||
println!("{}", "-".repeat(85));
|
||||
|
||||
println!(
|
||||
"\n{:<64} {:<10} {:<10}",
|
||||
"BLOCK HASH", "DOWNLOADS", "SIZE (B)"
|
||||
);
|
||||
println!("{}", "-".repeat(85));
|
||||
|
||||
for block in block_details {
|
||||
println!(
|
||||
"{:<64} {:<10} {:<10}",
|
||||
block.block_hash, block.downloads_count, block.block_size
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn track_website(server_url: &str, flist_hash: &str, details: bool) -> Result<()> {
|
||||
// Temporarily disable logs for the upload function
|
||||
let original_level = log::max_level();
|
||||
log::set_max_level(log::LevelFilter::Off);
|
||||
|
||||
let flist_blocks = server_api::get_blocks_by_hash(flist_hash, server_url.to_owned()).await?;
|
||||
|
||||
if flist_blocks.is_empty() {
|
||||
return Err(anyhow::anyhow!("No blocks found for hash: {}", flist_hash));
|
||||
}
|
||||
|
||||
// Download the flist file using its hash
|
||||
let temp_path = std::env::temp_dir().join(format!("{}.fl", flist_hash));
|
||||
download(flist_hash, &temp_path, server_url.to_owned()).await?;
|
||||
|
||||
let meta = fungi::Reader::new(temp_path)
|
||||
.await
|
||||
.context("failed to initialize metadata database")?;
|
||||
|
||||
let router = store::get_router(&meta).await?;
|
||||
let cache_dir = std::env::temp_dir().join("cache_blocks");
|
||||
let cache = cache::Cache::new(cache_dir.clone(), router);
|
||||
let temp_output_dir = std::env::temp_dir().join("output_dir");
|
||||
tokio::fs::create_dir_all(&temp_output_dir)
|
||||
.await
|
||||
.context("Failed to create temporary output directory")?;
|
||||
crate::unpack(&meta, &cache, &temp_output_dir, false).await?;
|
||||
|
||||
// Restore the original log level
|
||||
log::set_max_level(original_level);
|
||||
|
||||
let mut website_blocks = list_files_in_dir(cache_dir.clone())
|
||||
.await
|
||||
.context("Failed to list files in /tmp/cache directory")?;
|
||||
|
||||
website_blocks.extend(flist_blocks.into_iter().map(|(block_hash, _)| block_hash));
|
||||
|
||||
println!("Website has {} blocks on the server", website_blocks.len());
|
||||
print_block_downloads(&server_url, website_blocks, details).await?;
|
||||
|
||||
// Delete the temporary directory
|
||||
tokio::fs::remove_dir_all(&temp_output_dir)
|
||||
.await
|
||||
.context("Failed to delete temporary output directory")?;
|
||||
tokio::fs::remove_dir_all(&cache_dir)
|
||||
.await
|
||||
.context("Failed to delete temporary cache directory")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn list_files_in_dir<P: AsRef<Path>>(dir: P) -> Result<Vec<String>> {
|
||||
let dir = dir.as_ref();
|
||||
let mut file_names = Vec::new();
|
||||
|
||||
let mut entries = tokio::fs::read_dir(dir)
|
||||
.await
|
||||
.context(format!("Failed to read directory: {}", dir.display()))?;
|
||||
|
||||
while let Some(entry) = entries.next_entry().await.context("Failed to read entry")? {
|
||||
let path = entry.path();
|
||||
if path.is_dir() {
|
||||
let sub_dir_files = Box::pin(list_files_in_dir(path)).await?;
|
||||
file_names.extend(sub_dir_files);
|
||||
continue;
|
||||
}
|
||||
if let Ok(file_name) = entry.file_name().into_string() {
|
||||
file_names.push(file_name);
|
||||
}
|
||||
}
|
||||
Ok(file_names)
|
||||
}
|
||||
89
components/rfs/src/exist.rs
Normal file
89
components/rfs/src/exist.rs
Normal file
@@ -0,0 +1,89 @@
|
||||
use anyhow::Result;
|
||||
use std::path::Path;
|
||||
use tokio::fs::File;
|
||||
|
||||
use crate::upload::{split_file_into_blocks, BLOCK_SIZE};
|
||||
|
||||
use crate::server_api;
|
||||
|
||||
/// Checks if a file exists in the server splitting it into blocks
|
||||
pub async fn exists<P: AsRef<Path>>(
|
||||
file_path: P,
|
||||
server_url: String,
|
||||
block_size: Option<usize>,
|
||||
) -> Result<()> {
|
||||
// Use provided block size or default
|
||||
let block_size = block_size.unwrap_or(BLOCK_SIZE);
|
||||
let file_path = file_path.as_ref();
|
||||
|
||||
info!("Checking file: {}", file_path.display());
|
||||
debug!("Using block size: {} bytes", block_size);
|
||||
|
||||
// Read the file size
|
||||
let file_size = File::open(file_path).await?.metadata().await?.len();
|
||||
|
||||
info!("File size: {} bytes", file_size);
|
||||
info!("Splitting file into blocks of {} bytes", block_size);
|
||||
|
||||
// Split file into blocks and calculate hashes
|
||||
let (blocks, _) = split_file_into_blocks(file_path, block_size).await?;
|
||||
info!("File split into {} blocks", blocks.len());
|
||||
|
||||
// Create futures for all block checks
|
||||
let futures = blocks.iter().map(|block_hash| {
|
||||
let server_url = server_url.clone();
|
||||
let block_hash = block_hash.clone();
|
||||
async move {
|
||||
let result = server_api::check_block(&server_url, &block_hash).await;
|
||||
match result {
|
||||
Ok(true) => true, // Block exists
|
||||
Ok(false) => {
|
||||
info!("Block with hash {} does not exist on server", block_hash);
|
||||
false
|
||||
}
|
||||
Err(e) => {
|
||||
info!("Error checking block {}: {}", block_hash, e);
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Run all futures concurrently
|
||||
let results: Vec<bool> = futures::future::join_all(futures).await;
|
||||
let mut file_exists = true;
|
||||
|
||||
// Process results
|
||||
for block_exists in results {
|
||||
match block_exists {
|
||||
true => {}
|
||||
false => {
|
||||
file_exists = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if file_exists {
|
||||
info!("File exists on server");
|
||||
} else {
|
||||
info!("File does not exist on server");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Checks if a hash exists in the server
|
||||
pub async fn exists_by_hash(hash: String, server_url: String) -> Result<()> {
|
||||
match server_api::get_blocks_by_hash(&hash, server_url.clone()).await {
|
||||
Ok(blocks) if !blocks.is_empty() => {
|
||||
info!("Hash exists on server\nHash: {}", hash);
|
||||
}
|
||||
Ok(_) => {
|
||||
info!("Hash does not exist on server");
|
||||
}
|
||||
Err(_) => {
|
||||
info!("Hash does not exist on server");
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
84
components/rfs/src/flist_inspector.rs
Normal file
84
components/rfs/src/flist_inspector.rs
Normal file
@@ -0,0 +1,84 @@
|
||||
use crate::fungi::meta::{FileType, Inode, Result, Walk, WalkVisitor};
|
||||
use std::path::Path;
|
||||
|
||||
pub struct InspectVisitor {
|
||||
file_count: u32,
|
||||
dir_count: u32,
|
||||
link_count: u32,
|
||||
total_size: u64,
|
||||
}
|
||||
|
||||
impl InspectVisitor {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
file_count: 0,
|
||||
dir_count: 0,
|
||||
link_count: 0,
|
||||
total_size: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn print_summary(&self, target: &str) {
|
||||
println!("Flist Inspection: {}", target);
|
||||
println!("==================");
|
||||
println!("Files: {}", self.file_count);
|
||||
println!("Directories: {}", self.dir_count);
|
||||
println!("Symlinks: {}", self.link_count);
|
||||
println!("Total size: {} bytes", self.total_size);
|
||||
}
|
||||
|
||||
fn print_metadata(&self, path: &Path, node: &Inode) {
|
||||
let file_type_str = match node.mode.file_type() {
|
||||
FileType::Dir => "Directory",
|
||||
FileType::Regular => "Regular File",
|
||||
FileType::Link => "Symbolic Link",
|
||||
FileType::Block => "Block Device",
|
||||
FileType::Char => "Character Device",
|
||||
FileType::Socket => "Socket",
|
||||
FileType::FIFO => "FIFO",
|
||||
FileType::Unknown => "Unknown",
|
||||
};
|
||||
|
||||
println!("Path: {}", path.display());
|
||||
println!(" Type: {}", file_type_str);
|
||||
println!(" Inode: {}", node.ino);
|
||||
println!(" Name: {}", node.name);
|
||||
println!(" Size: {} bytes", node.size);
|
||||
println!(" UID: {}", node.uid);
|
||||
println!(" GID: {}", node.gid);
|
||||
println!(" Mode: 0{:o}", node.mode.mode());
|
||||
println!(" Permissions: 0{:o}", node.mode.permissions());
|
||||
println!(" Device: {}", node.rdev);
|
||||
println!(" Created: {}", node.ctime);
|
||||
println!(" Modified: {}", node.mtime);
|
||||
|
||||
if let Some(data) = &node.data {
|
||||
if node.mode.file_type() == FileType::Link {
|
||||
if let Ok(target) = String::from_utf8(data.clone()) {
|
||||
println!(" Link Target: {}", target);
|
||||
}
|
||||
} else {
|
||||
println!(" Extra Data: {} bytes", data.len());
|
||||
}
|
||||
}
|
||||
println!(" ---");
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl WalkVisitor for InspectVisitor {
|
||||
async fn visit(&mut self, path: &Path, node: &Inode) -> Result<Walk> {
|
||||
self.print_metadata(path, node);
|
||||
|
||||
match node.mode.file_type() {
|
||||
FileType::Dir => self.dir_count += 1,
|
||||
FileType::Regular => {
|
||||
self.file_count += 1;
|
||||
self.total_size += node.size;
|
||||
}
|
||||
FileType::Link => self.link_count += 1,
|
||||
_ => {}
|
||||
}
|
||||
Ok(Walk::Continue)
|
||||
}
|
||||
}
|
||||
407
components/rfs/src/fs/mod.rs
Normal file
407
components/rfs/src/fs/mod.rs
Normal file
@@ -0,0 +1,407 @@
|
||||
#![allow(clippy::unnecessary_mut_passed)]
|
||||
#![deny(clippy::unimplemented, clippy::todo)]
|
||||
|
||||
use crate::cache;
|
||||
use crate::fungi::{
|
||||
meta::{FileType, Inode},
|
||||
Reader,
|
||||
};
|
||||
use crate::store::Store;
|
||||
|
||||
use anyhow::{ensure, Context, Result};
|
||||
use polyfuse::reply::FileAttr;
|
||||
use polyfuse::{
|
||||
op,
|
||||
reply::{AttrOut, EntryOut, ReaddirOut, StatfsOut},
|
||||
KernelConfig, Operation, Request, Session,
|
||||
};
|
||||
use std::io::SeekFrom;
|
||||
use std::sync::Arc;
|
||||
use std::{io, path::PathBuf, time::Duration};
|
||||
use tokio::fs::File;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::{
|
||||
io::{unix::AsyncFd, AsyncReadExt, AsyncSeekExt, Interest},
|
||||
task::{self, JoinHandle},
|
||||
};
|
||||
|
||||
const CHUNK_SIZE: usize = 512 * 1024; // 512k and is hardcoded in the hub. the block_size value is not used
|
||||
const TTL: Duration = Duration::from_secs(60 * 60 * 24 * 365);
|
||||
const LRU_CAP: usize = 5; // Least Recently Used File Capacity
|
||||
const FS_BLOCK_SIZE: u32 = 4 * 1024;
|
||||
|
||||
type FHash = [u8; 32];
|
||||
type BlockSize = u64;
|
||||
|
||||
pub struct Filesystem<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
meta: Reader,
|
||||
cache: Arc<cache::Cache<S>>,
|
||||
lru: Arc<Mutex<lru::LruCache<FHash, (File, BlockSize)>>>,
|
||||
}
|
||||
|
||||
impl<S> Clone for Filesystem<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
meta: self.meta.clone(),
|
||||
cache: Arc::clone(&self.cache),
|
||||
lru: Arc::clone(&self.lru),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Filesystem<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
pub fn new(meta: Reader, cache: cache::Cache<S>) -> Self {
|
||||
Filesystem {
|
||||
meta,
|
||||
cache: Arc::new(cache),
|
||||
lru: Arc::new(Mutex::new(lru::LruCache::new(LRU_CAP))),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn mount<P>(&self, mnt: P) -> Result<()>
|
||||
where
|
||||
P: Into<PathBuf>,
|
||||
{
|
||||
let mountpoint: PathBuf = mnt.into();
|
||||
ensure!(mountpoint.is_dir(), "mountpoint must be a directory");
|
||||
let mut options = KernelConfig::default();
|
||||
options.mount_option(&format!(
|
||||
"ro,allow_other,fsname={},subtype=g8ufs,default_permissions",
|
||||
std::process::id()
|
||||
));
|
||||
|
||||
// polyfuse assumes an absolute path, see https://github.com/ubnt-intrepid/polyfuse/issues/83
|
||||
let fusermount_path =
|
||||
which::which("fusermount").context("looking up 'fusermount' in PATH")?;
|
||||
options.fusermount_path(fusermount_path);
|
||||
|
||||
let session = AsyncSession::mount(mountpoint, options).await?;
|
||||
|
||||
// release here
|
||||
while let Some(req) = session.next_request().await? {
|
||||
let fs = self.clone();
|
||||
|
||||
let handler: JoinHandle<Result<()>> = task::spawn(async move {
|
||||
let result = match req.operation()? {
|
||||
Operation::Lookup(op) => fs.lookup(&req, op).await,
|
||||
Operation::Getattr(op) => fs.getattr(&req, op).await,
|
||||
Operation::Read(op) => fs.read(&req, op).await,
|
||||
Operation::Readdir(op) => fs.readdir(&req, op).await,
|
||||
Operation::Readlink(op) => fs.readlink(&req, op).await,
|
||||
Operation::Statfs(op) => fs.statfs(&req, op).await,
|
||||
op => {
|
||||
debug!("function is not implemented: {:?}", op);
|
||||
Ok(req.reply_error(libc::ENOSYS)?)
|
||||
}
|
||||
};
|
||||
|
||||
if result.is_err() {
|
||||
req.reply_error(libc::ENOENT)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
});
|
||||
|
||||
drop(handler);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn statfs(&self, req: &Request, _op: op::Statfs<'_>) -> Result<()> {
|
||||
let mut out = StatfsOut::default();
|
||||
let stats = out.statfs();
|
||||
stats.bsize(FS_BLOCK_SIZE);
|
||||
req.reply(out)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn readlink(&self, req: &Request, op: op::Readlink<'_>) -> Result<()> {
|
||||
let link = self.meta.inode(op.ino()).await?;
|
||||
if !link.mode.is(FileType::Link) {
|
||||
return Ok(req.reply_error(libc::ENOLINK)?);
|
||||
}
|
||||
|
||||
if let Some(target) = link.data {
|
||||
req.reply(target)?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
Ok(req.reply_error(libc::ENOLINK)?)
|
||||
}
|
||||
|
||||
async fn read(&self, req: &Request, op: op::Read<'_>) -> Result<()> {
|
||||
let entry = self.meta.inode(op.ino()).await?;
|
||||
|
||||
if !entry.mode.is(FileType::Regular) {
|
||||
return Ok(req.reply_error(libc::EISDIR)?);
|
||||
};
|
||||
|
||||
let offset = op.offset() as usize;
|
||||
let size = op.size() as usize;
|
||||
let chunk_size = CHUNK_SIZE; // file.block_size as usize;
|
||||
let chunk_index = offset / chunk_size;
|
||||
|
||||
let blocks = self.meta.blocks(op.ino()).await?;
|
||||
|
||||
if chunk_index >= blocks.len() || op.size() == 0 {
|
||||
// reading after the end of the file
|
||||
let data: &[u8] = &[];
|
||||
return Ok(req.reply(data)?);
|
||||
}
|
||||
|
||||
// offset inside the file
|
||||
let mut offset = offset - (chunk_index * chunk_size);
|
||||
let mut buf: Vec<u8> = vec![0; size];
|
||||
let mut total = 0;
|
||||
|
||||
'blocks: for block in blocks.iter().skip(chunk_index) {
|
||||
// hash works as a key inside the LRU
|
||||
let hash = block.id;
|
||||
|
||||
// getting the file descriptor from the LRU or from the cache if not found in the LRU
|
||||
let lru = self.lru.lock().await.pop(&hash);
|
||||
|
||||
let (mut fd, block_size) = match lru {
|
||||
Some((descriptor, bsize)) => {
|
||||
debug!("lru hit");
|
||||
(descriptor, bsize)
|
||||
}
|
||||
None => {
|
||||
let (bsize, descriptor) = match self.cache.get(block).await {
|
||||
Ok(out) => out,
|
||||
Err(err) => {
|
||||
error!("io cache error: {:#}", err);
|
||||
return Ok(req.reply_error(libc::EIO)?);
|
||||
}
|
||||
};
|
||||
(descriptor, bsize)
|
||||
}
|
||||
};
|
||||
|
||||
// seek to the position <offset>
|
||||
fd.seek(SeekFrom::Start(offset as u64)).await?;
|
||||
|
||||
let mut chunk_offset = offset as u64;
|
||||
|
||||
loop {
|
||||
// read the file bytes into buf
|
||||
let read = match fd.read(&mut buf[total..]).await {
|
||||
Ok(n) => n,
|
||||
Err(err) => {
|
||||
error!("read error: {:#}", err);
|
||||
return Ok(req.reply_error(libc::EIO)?);
|
||||
}
|
||||
};
|
||||
|
||||
chunk_offset += read as u64;
|
||||
|
||||
// calculate the total size and break if the required bytes (=size) downloaded
|
||||
total += read;
|
||||
|
||||
if total >= size {
|
||||
// if only part of the block read -> store it in the lruf
|
||||
if chunk_offset < block_size {
|
||||
let mut lruf = self.lru.lock().await;
|
||||
lruf.put(hash, (fd, block_size));
|
||||
}
|
||||
|
||||
break 'blocks;
|
||||
}
|
||||
|
||||
// read = 0 means the EOF (end of the block)
|
||||
if read == 0 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
offset = 0;
|
||||
}
|
||||
|
||||
Ok(req.reply(&buf[..size])?)
|
||||
}
|
||||
|
||||
async fn getattr(&self, req: &Request, op: op::Getattr<'_>) -> Result<()> {
|
||||
log::debug!("getattr({})", op.ino());
|
||||
|
||||
let entry = self.meta.inode(op.ino()).await?;
|
||||
|
||||
let mut attr = AttrOut::default();
|
||||
|
||||
let fill = attr.attr();
|
||||
entry.fill(fill);
|
||||
|
||||
req.reply(attr)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn readdir(&self, req: &Request, op: op::Readdir<'_>) -> Result<()> {
|
||||
log::debug!("readdir({})", op.ino());
|
||||
let root = self.meta.inode(op.ino()).await?;
|
||||
|
||||
if !root.mode.is(FileType::Dir) {
|
||||
req.reply_error(libc::ENOTDIR)?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut out = ReaddirOut::new(op.size() as usize);
|
||||
let mut offset = op.offset();
|
||||
|
||||
let mut query_offset = offset;
|
||||
if offset == 0 {
|
||||
out.entry(".".as_ref(), op.ino(), libc::DT_DIR as u32, 1);
|
||||
out.entry(
|
||||
"..".as_ref(),
|
||||
match op.ino() {
|
||||
1 => 1,
|
||||
_ => root.parent,
|
||||
},
|
||||
libc::DT_DIR as u32,
|
||||
2,
|
||||
);
|
||||
offset = 2;
|
||||
} else {
|
||||
// we don't add the . and .. but
|
||||
// we also need to change the offset to
|
||||
query_offset -= 2;
|
||||
}
|
||||
|
||||
let children = self.meta.children(root.ino, 10, query_offset).await?;
|
||||
for entry in children.iter() {
|
||||
offset += 1;
|
||||
|
||||
let full = match entry.mode.file_type() {
|
||||
FileType::Dir => {
|
||||
//let inode = self.meta.dir_inode(&sub.key).await?;
|
||||
out.entry(entry.name.as_ref(), entry.ino, libc::DT_DIR as u32, offset)
|
||||
}
|
||||
FileType::Regular => {
|
||||
out.entry(entry.name.as_ref(), entry.ino, libc::DT_REG as u32, offset)
|
||||
}
|
||||
FileType::Link => {
|
||||
out.entry(entry.name.as_ref(), entry.ino, libc::DT_LNK as u32, offset)
|
||||
}
|
||||
_ => {
|
||||
warn!("unkonwn entry");
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
if full {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(req.reply(out)?)
|
||||
}
|
||||
|
||||
async fn lookup(&self, req: &Request, op: op::Lookup<'_>) -> Result<()> {
|
||||
log::debug!("lookup(parent: {}, name: {:?})", op.parent(), op.name());
|
||||
let name = match op.name().to_str() {
|
||||
Some(name) => name,
|
||||
None => {
|
||||
req.reply_error(libc::ENOENT)?;
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
let node = self.meta.lookup(op.parent(), name).await?;
|
||||
|
||||
let node = match node {
|
||||
Some(node) => node,
|
||||
None => {
|
||||
req.reply_error(libc::ENOENT)?;
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
let mut out = EntryOut::default();
|
||||
|
||||
node.fill(out.attr());
|
||||
out.ino(node.ino);
|
||||
out.ttl_attr(TTL);
|
||||
out.ttl_entry(TTL);
|
||||
|
||||
Ok(req.reply(out)?)
|
||||
}
|
||||
}
|
||||
|
||||
// ==== AsyncSession ====
|
||||
|
||||
struct AsyncSession {
|
||||
inner: AsyncFd<Session>,
|
||||
}
|
||||
|
||||
impl AsyncSession {
|
||||
async fn mount(mountpoint: PathBuf, config: KernelConfig) -> io::Result<Self> {
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let session = Session::mount(mountpoint, config)?;
|
||||
Ok(Self {
|
||||
inner: AsyncFd::with_interest(session, Interest::READABLE)?,
|
||||
})
|
||||
})
|
||||
.await
|
||||
.expect("join error")
|
||||
}
|
||||
|
||||
async fn next_request(&self) -> io::Result<Option<Request>> {
|
||||
use futures::{future::poll_fn, ready, task::Poll};
|
||||
|
||||
poll_fn(|cx| {
|
||||
let mut guard = ready!(self.inner.poll_read_ready(cx))?;
|
||||
match self.inner.get_ref().next_request() {
|
||||
Err(err) if err.kind() == io::ErrorKind::WouldBlock => {
|
||||
guard.clear_ready();
|
||||
Poll::Pending
|
||||
}
|
||||
res => {
|
||||
guard.retain_ready();
|
||||
Poll::Ready(res)
|
||||
}
|
||||
}
|
||||
})
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
trait AttributeFiller {
|
||||
fn fill(&self, attr: &mut FileAttr);
|
||||
}
|
||||
|
||||
impl AttributeFiller for Inode {
|
||||
fn fill(&self, attr: &mut FileAttr) {
|
||||
attr.mode(self.mode.mode());
|
||||
|
||||
attr.ino(self.ino);
|
||||
attr.ctime(Duration::from_secs(self.ctime as u64));
|
||||
attr.mtime(Duration::from_secs(self.mtime as u64));
|
||||
attr.uid(self.uid);
|
||||
attr.gid(self.gid);
|
||||
attr.size(self.size);
|
||||
attr.rdev(self.rdev as u32);
|
||||
attr.blksize(FS_BLOCK_SIZE);
|
||||
|
||||
let mut blocks = self.size / 512;
|
||||
blocks += match self.size % 512 {
|
||||
0 => 0,
|
||||
_ => 1,
|
||||
};
|
||||
|
||||
attr.blocks(blocks);
|
||||
|
||||
match self.mode.file_type() {
|
||||
FileType::Dir => attr.nlink(2),
|
||||
FileType::Regular => attr.blksize(4 * 1024),
|
||||
_ => (),
|
||||
};
|
||||
}
|
||||
}
|
||||
653
components/rfs/src/fungi/meta.rs
Normal file
653
components/rfs/src/fungi/meta.rs
Normal file
@@ -0,0 +1,653 @@
|
||||
use std::{
|
||||
collections::LinkedList,
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
use sqlx::{
|
||||
sqlite::{SqliteConnectOptions, SqliteJournalMode, SqlitePoolOptions, SqliteRow},
|
||||
FromRow, Row, SqlitePool,
|
||||
};
|
||||
|
||||
use crate::store;
|
||||
|
||||
const ID_LEN: usize = 32;
|
||||
const KEY_LEN: usize = 32;
|
||||
const TYPE_MASK: u32 = libc::S_IFMT;
|
||||
|
||||
#[repr(u32)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum FileType {
|
||||
Regular = libc::S_IFREG,
|
||||
Dir = libc::S_IFDIR,
|
||||
Link = libc::S_IFLNK,
|
||||
Block = libc::S_IFBLK,
|
||||
Char = libc::S_IFCHR,
|
||||
Socket = libc::S_IFSOCK,
|
||||
FIFO = libc::S_IFIFO,
|
||||
Unknown = 0xFFFFFFFF, // Use a different value to avoid conflict
|
||||
}
|
||||
|
||||
impl From<u32> for FileType {
|
||||
fn from(value: u32) -> Self {
|
||||
match value {
|
||||
libc::S_IFREG => Self::Regular,
|
||||
libc::S_IFDIR => Self::Dir,
|
||||
libc::S_IFLNK => Self::Link,
|
||||
libc::S_IFBLK => Self::Block,
|
||||
libc::S_IFCHR => Self::Char,
|
||||
libc::S_IFSOCK => Self::Socket,
|
||||
libc::S_IFIFO => Self::FIFO,
|
||||
_ => Self::Unknown,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static SCHEMA: &str = include_str!("../../schema/schema.sql");
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum Error {
|
||||
#[error("failed to execute query: {0}")]
|
||||
SqlError(#[from] sqlx::Error),
|
||||
|
||||
#[error("invalid hash length")]
|
||||
InvalidHash,
|
||||
|
||||
#[error("invalid key length")]
|
||||
InvalidKey,
|
||||
|
||||
#[error("io error: {0:#}")]
|
||||
IO(#[from] std::io::Error),
|
||||
|
||||
#[error("store error: {0}")]
|
||||
Store(#[from] store::Error),
|
||||
|
||||
#[error("unknown meta error: {0}")]
|
||||
Anyhow(#[from] anyhow::Error),
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
pub type Ino = u64;
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct Mode(u32);
|
||||
|
||||
impl From<u32> for Mode {
|
||||
fn from(value: u32) -> Self {
|
||||
Self(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl Mode {
|
||||
pub fn new(t: FileType, perm: u32) -> Self {
|
||||
Self(t as u32 | (perm & !TYPE_MASK))
|
||||
}
|
||||
|
||||
pub fn file_type(&self) -> FileType {
|
||||
(self.0 & TYPE_MASK).into()
|
||||
}
|
||||
|
||||
pub fn permissions(&self) -> u32 {
|
||||
self.0 & !TYPE_MASK
|
||||
}
|
||||
|
||||
pub fn mode(&self) -> u32 {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn is(&self, typ: FileType) -> bool {
|
||||
self.file_type() == typ
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct Inode {
|
||||
pub ino: Ino,
|
||||
pub parent: Ino,
|
||||
pub name: String,
|
||||
pub size: u64,
|
||||
pub uid: u32,
|
||||
pub gid: u32,
|
||||
pub mode: Mode,
|
||||
pub rdev: u64,
|
||||
pub ctime: i64,
|
||||
pub mtime: i64,
|
||||
pub data: Option<Vec<u8>>,
|
||||
}
|
||||
|
||||
impl FromRow<'_, SqliteRow> for Inode {
|
||||
fn from_row(row: &'_ SqliteRow) -> std::result::Result<Self, sqlx::Error> {
|
||||
Ok(Self {
|
||||
ino: row.get::<i64, &str>("ino") as Ino,
|
||||
parent: row.get::<i64, &str>("parent") as Ino,
|
||||
name: row.get("name"),
|
||||
size: row.get::<i64, &str>("size") as u64,
|
||||
uid: row.get("uid"),
|
||||
gid: row.get("uid"),
|
||||
mode: row.get::<u32, &str>("mode").into(),
|
||||
rdev: row.get::<i64, &str>("rdev") as u64,
|
||||
ctime: row.get("ctime"),
|
||||
mtime: row.get("mtime"),
|
||||
data: row.get("data"),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct Block {
|
||||
/// id of the block
|
||||
pub id: [u8; ID_LEN],
|
||||
/// encryption key of the block
|
||||
pub key: [u8; KEY_LEN],
|
||||
}
|
||||
|
||||
impl FromRow<'_, SqliteRow> for Block {
|
||||
fn from_row(row: &'_ SqliteRow) -> std::result::Result<Self, sqlx::Error> {
|
||||
let hash: &[u8] = row.get("id");
|
||||
if hash.len() != ID_LEN {
|
||||
return Err(sqlx::Error::Decode(Box::new(Error::InvalidHash)));
|
||||
}
|
||||
|
||||
let key: &[u8] = row.get("key");
|
||||
|
||||
if hash.len() != KEY_LEN {
|
||||
return Err(sqlx::Error::Decode(Box::new(Error::InvalidKey)));
|
||||
}
|
||||
|
||||
let mut block = Self::default();
|
||||
block.id.copy_from_slice(hash);
|
||||
block.key.copy_from_slice(key);
|
||||
|
||||
Ok(block)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct Route {
|
||||
pub start: u8,
|
||||
pub end: u8,
|
||||
pub url: String,
|
||||
}
|
||||
|
||||
impl FromRow<'_, SqliteRow> for Route {
|
||||
fn from_row(row: &'_ SqliteRow) -> std::result::Result<Self, sqlx::Error> {
|
||||
Ok(Self {
|
||||
start: row.get("start"),
|
||||
end: row.get("end"),
|
||||
url: row.get("url"),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Tag<'a> {
|
||||
Version,
|
||||
Description,
|
||||
Author,
|
||||
Custom(&'a str),
|
||||
}
|
||||
|
||||
impl<'a> Tag<'a> {
|
||||
fn key(&self) -> &str {
|
||||
match self {
|
||||
Self::Version => "version",
|
||||
Self::Description => "description",
|
||||
Self::Author => "author",
|
||||
Self::Custom(a) => a,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Walk {
|
||||
Continue,
|
||||
Break,
|
||||
}
|
||||
#[async_trait::async_trait]
|
||||
pub trait WalkVisitor {
|
||||
async fn visit(&mut self, path: &Path, node: &Inode) -> Result<Walk>;
|
||||
}
|
||||
|
||||
struct WalkItem(PathBuf, Inode);
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Reader {
|
||||
pool: SqlitePool,
|
||||
}
|
||||
|
||||
impl Reader {
|
||||
pub async fn new<P: AsRef<Path>>(path: P) -> Result<Self> {
|
||||
let opts = SqliteConnectOptions::new()
|
||||
.journal_mode(SqliteJournalMode::Delete)
|
||||
.filename(path);
|
||||
|
||||
let pool = SqlitePool::connect_with(opts).await?;
|
||||
|
||||
Ok(Self { pool })
|
||||
}
|
||||
|
||||
pub async fn root_inode(&self) -> Result<Inode> {
|
||||
let inode: Inode = sqlx::query_as(r#"select inode.*, extra.data
|
||||
from inode left join extra on inode.ino = extra.ino
|
||||
where inode.parent = 0 limit 1;"#)
|
||||
.fetch_one(&self.pool).await?;
|
||||
|
||||
Ok(inode)
|
||||
}
|
||||
|
||||
pub async fn inode(&self, ino: Ino) -> Result<Inode> {
|
||||
let inode: Inode = sqlx::query_as(r#"select inode.*, extra.data
|
||||
from inode left join extra on inode.ino = extra.ino
|
||||
where inode.ino = ?;"#)
|
||||
.bind(ino as i64).fetch_one(&self.pool).await?;
|
||||
|
||||
Ok(inode)
|
||||
}
|
||||
|
||||
pub async fn children(&self, parent: Ino, limit: u32, offset: u64) -> Result<Vec<Inode>> {
|
||||
let results: Vec<Inode> = sqlx::query_as(
|
||||
r#"select inode.*, extra.data
|
||||
from inode left join extra on inode.ino = extra.ino
|
||||
where inode.parent = ? limit ? offset ?;"#,
|
||||
)
|
||||
.bind(parent as i64)
|
||||
.bind(limit)
|
||||
.bind(offset as i64)
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(results)
|
||||
}
|
||||
|
||||
pub async fn lookup<S: AsRef<str>>(&self, parent: Ino, name: S) -> Result<Option<Inode>> {
|
||||
let inode: Option<Inode> = sqlx::query_as(r#"select inode.*, extra.data
|
||||
from inode left join extra on inode.ino = extra.ino
|
||||
where inode.parent = ? and inode.name = ?;"#)
|
||||
.bind(parent as i64)
|
||||
.bind(name.as_ref())
|
||||
.fetch_optional(&self.pool).await?;
|
||||
Ok(inode)
|
||||
}
|
||||
|
||||
pub async fn blocks(&self, ino: Ino) -> Result<Vec<Block>> {
|
||||
let results: Vec<Block> = sqlx::query_as("select id, key from block where ino = ?;")
|
||||
.bind(ino as i64)
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(results)
|
||||
}
|
||||
|
||||
pub async fn all_blocks(&self, limit: u32, offset: u64) -> Result<Vec<Block>> {
|
||||
let results: Vec<Block> = sqlx::query_as("select id, key from block limit ? offset ?;")
|
||||
.bind(limit)
|
||||
.bind(offset as i64)
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(results)
|
||||
}
|
||||
|
||||
pub async fn tag(&self, tag: Tag<'_>) -> Result<Option<String>> {
|
||||
let value: Option<(String,)> = sqlx::query_as("select value from tag where key = ?;")
|
||||
.bind(tag.key())
|
||||
.fetch_optional(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(value.map(|v| v.0))
|
||||
}
|
||||
|
||||
pub async fn tags(&self) -> Result<Vec<(String, String)>> {
|
||||
let tags: Vec<(String, String)> = sqlx::query_as("select key, value from tag;")
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(tags)
|
||||
}
|
||||
|
||||
pub async fn routes(&self) -> Result<Vec<Route>> {
|
||||
let results: Vec<Route> = sqlx::query_as("select start, end, url from route;")
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(results)
|
||||
}
|
||||
|
||||
pub async fn walk<W: WalkVisitor + Send>(&self, visitor: &mut W) -> Result<()> {
|
||||
let node = self.inode(1).await?;
|
||||
let mut list = LinkedList::default();
|
||||
let path: PathBuf = "/".into();
|
||||
list.push_back(WalkItem(path, node));
|
||||
while !list.is_empty() {
|
||||
let item = list.pop_back().unwrap();
|
||||
self.walk_node(&mut list, &item, visitor).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn walk_node<W: WalkVisitor + Send>(
|
||||
&self,
|
||||
list: &mut LinkedList<WalkItem>,
|
||||
WalkItem(path, node): &WalkItem,
|
||||
visitor: &mut W,
|
||||
) -> Result<()> {
|
||||
if visitor.visit(path, node).await? == Walk::Break {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut offset = 0;
|
||||
loop {
|
||||
let children = self.children(node.ino, 1000, offset).await?;
|
||||
if children.is_empty() {
|
||||
break;
|
||||
}
|
||||
|
||||
for child in children {
|
||||
offset += 1;
|
||||
let child_path = path.join(&child.name);
|
||||
if child.mode.is(FileType::Dir) {
|
||||
list.push_back(WalkItem(child_path, child));
|
||||
continue;
|
||||
}
|
||||
|
||||
if visitor.visit(&child_path, &child).await? == Walk::Break {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Writer {
|
||||
pool: SqlitePool,
|
||||
}
|
||||
|
||||
impl Writer {
|
||||
/// create a new mkondo writer
|
||||
pub async fn new<P: AsRef<Path>>(path: P, remove: bool) -> Result<Self> {
|
||||
if remove {
|
||||
let _ = tokio::fs::remove_file(&path).await;
|
||||
}
|
||||
|
||||
let opts = SqliteConnectOptions::new()
|
||||
.create_if_missing(true)
|
||||
.journal_mode(SqliteJournalMode::Delete)
|
||||
.filename(path);
|
||||
|
||||
let pool = SqlitePoolOptions::new()
|
||||
.max_connections(1)
|
||||
.connect_with(opts)
|
||||
.await?;
|
||||
|
||||
sqlx::query(SCHEMA).execute(&pool).await?;
|
||||
|
||||
Ok(Self { pool })
|
||||
}
|
||||
|
||||
/// inode add an inode to the flist
|
||||
pub async fn inode(&self, inode: Inode) -> Result<Ino> {
|
||||
let result = sqlx::query(
|
||||
r#"insert into inode (parent, name, size, uid, gid, mode, rdev, ctime, mtime)
|
||||
values (?, ?, ?, ?, ?, ?, ?, ?, ?);"#,
|
||||
)
|
||||
.bind(inode.parent as i64)
|
||||
.bind(inode.name)
|
||||
.bind(inode.size as i64)
|
||||
.bind(inode.uid)
|
||||
.bind(inode.gid)
|
||||
.bind(inode.mode.0)
|
||||
.bind(inode.rdev as i64)
|
||||
.bind(inode.ctime)
|
||||
.bind(inode.mtime)
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
|
||||
let ino = result.last_insert_rowid() as Ino;
|
||||
if let Some(data) = &inode.data {
|
||||
sqlx::query("insert into extra(ino, data) values (?, ?)")
|
||||
.bind(ino as i64)
|
||||
.bind(data)
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(ino)
|
||||
}
|
||||
|
||||
pub async fn block(&self, ino: Ino, id: &[u8; ID_LEN], key: &[u8; KEY_LEN]) -> Result<()> {
|
||||
sqlx::query("insert into block (ino, id, key) values (?, ?, ?)")
|
||||
.bind(ino as i64)
|
||||
.bind(&id[..])
|
||||
.bind(&key[..])
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn route<U: AsRef<str>>(&self, start: u8, end: u8, url: U) -> Result<()> {
|
||||
sqlx::query("insert into route (start, end, url) values (?, ?, ?)")
|
||||
.bind(start)
|
||||
.bind(end)
|
||||
.bind(url.as_ref())
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn tag<V: AsRef<str>>(&self, tag: Tag<'_>, value: V) -> Result<()> {
|
||||
sqlx::query("insert or replace into tag (key, value) values (?, ?);")
|
||||
.bind(tag.key())
|
||||
.bind(value.as_ref())
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
pub async fn delete_tag(&self, tag: Tag<'_>) -> Result<()> {
|
||||
sqlx::query("delete from tag where key = ?;")
|
||||
.bind(tag.key())
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn delete_route<U: AsRef<str>>(&self, url: U) -> Result<()> {
|
||||
sqlx::query("delete from route where url = ?;")
|
||||
.bind(url.as_ref())
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn delete_tags(&self) -> Result<()> {
|
||||
sqlx::query("delete from tag;").execute(&self.pool).await?;
|
||||
Ok(())
|
||||
}
|
||||
pub async fn delete_routes(&self) -> Result<()> {
|
||||
sqlx::query("delete from route;")
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_inode() {
|
||||
const PATH: &str = "/tmp/inode.fl";
|
||||
let meta = Writer::new(PATH, true).await.unwrap();
|
||||
|
||||
let ino = meta
|
||||
.inode(Inode {
|
||||
name: "/".into(),
|
||||
data: Some("target".into()),
|
||||
..Inode::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(ino, 1);
|
||||
|
||||
let meta = Reader::new(PATH).await.unwrap();
|
||||
let inode = meta.inode(ino).await.unwrap();
|
||||
|
||||
assert_eq!(inode.name, "/");
|
||||
assert!(inode.data.is_some());
|
||||
assert_eq!(inode.data.unwrap().as_slice(), "target".as_bytes());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_children() {
|
||||
const PATH: &str = "/tmp/children.fl";
|
||||
let meta = Writer::new(PATH, true).await.unwrap();
|
||||
|
||||
let ino = meta
|
||||
.inode(Inode {
|
||||
name: "/".into(),
|
||||
data: Some("target".into()),
|
||||
..Inode::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
for name in ["bin", "etc", "usr"] {
|
||||
meta.inode(Inode {
|
||||
parent: ino,
|
||||
name: name.into(),
|
||||
..Inode::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
let meta = Reader::new(PATH).await.unwrap();
|
||||
let children = meta.children(ino, 10, 0).await.unwrap();
|
||||
|
||||
assert_eq!(children.len(), 3);
|
||||
assert_eq!(children[0].name, "bin");
|
||||
|
||||
let child = meta.lookup(ino, "bin").await.unwrap();
|
||||
assert!(child.is_some());
|
||||
assert_eq!(child.unwrap().name, "bin");
|
||||
|
||||
let child = meta.lookup(ino, "wrong").await.unwrap();
|
||||
assert!(child.is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_block() {
|
||||
const PATH: &str = "/tmp/block.fl";
|
||||
let meta = Writer::new(PATH, true).await.unwrap();
|
||||
let hash: [u8; ID_LEN] = [
|
||||
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
|
||||
25, 26, 27, 28, 29, 30, 31, 32,
|
||||
];
|
||||
let key1: [u8; KEY_LEN] = [1; KEY_LEN];
|
||||
let key2: [u8; KEY_LEN] = [2; KEY_LEN];
|
||||
|
||||
meta.block(1, &hash, &key1).await.unwrap();
|
||||
meta.block(1, &hash, &key2).await.unwrap();
|
||||
|
||||
let meta = Reader::new(PATH).await.unwrap();
|
||||
|
||||
let blocks = meta.blocks(1).await.unwrap();
|
||||
assert_eq!(blocks.len(), 2);
|
||||
assert_eq!(blocks[0].id, hash);
|
||||
assert_eq!(blocks[0].key, key1);
|
||||
assert_eq!(blocks[1].key, key2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_tag() {
|
||||
const PATH: &str = "/tmp/tag.fl";
|
||||
let meta = Writer::new(PATH, true).await.unwrap();
|
||||
meta.tag(Tag::Version, "0.1").await.unwrap();
|
||||
meta.tag(Tag::Author, "azmy").await.unwrap();
|
||||
meta.tag(Tag::Custom("custom"), "value").await.unwrap();
|
||||
|
||||
let meta = Reader::new(PATH).await.unwrap();
|
||||
|
||||
assert!(matches!(
|
||||
meta.tag(Tag::Version).await.unwrap().as_deref(),
|
||||
Some("0.1")
|
||||
));
|
||||
|
||||
assert!(matches!(
|
||||
meta.tag(Tag::Custom("custom")).await.unwrap().as_deref(),
|
||||
Some("value")
|
||||
));
|
||||
|
||||
assert!(matches!(
|
||||
meta.tag(Tag::Custom("unknown")).await.unwrap(),
|
||||
None
|
||||
));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_routes() {
|
||||
const PATH: &str = "/tmp/route.fl";
|
||||
let meta = Writer::new(PATH, true).await.unwrap();
|
||||
|
||||
meta.route(0, 128, "zdb://hub1.grid.tf").await.unwrap();
|
||||
meta.route(129, 255, "zdb://hub2.grid.tf").await.unwrap();
|
||||
|
||||
let meta = Reader::new(PATH).await.unwrap();
|
||||
|
||||
let routes = meta.routes().await.unwrap();
|
||||
assert_eq!(routes.len(), 2);
|
||||
assert_eq!(routes[0].start, 0);
|
||||
assert_eq!(routes[0].end, 128);
|
||||
assert_eq!(routes[0].url, "zdb://hub1.grid.tf");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mode() {
|
||||
let m = Mode::new(FileType::Regular, 0754);
|
||||
|
||||
assert_eq!(m.permissions(), 0754);
|
||||
assert_eq!(m.file_type(), FileType::Regular);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_walk() {
|
||||
const PATH: &str = "/tmp/walk.fl";
|
||||
let meta = Writer::new(PATH, true).await.unwrap();
|
||||
|
||||
let parent = meta
|
||||
.inode(Inode {
|
||||
name: "/".into(),
|
||||
data: Some("target".into()),
|
||||
..Inode::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
for name in ["bin", "etc", "usr"] {
|
||||
meta.inode(Inode {
|
||||
parent: parent,
|
||||
name: name.into(),
|
||||
..Inode::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let meta = Reader::new(PATH).await.unwrap();
|
||||
//TODO: validate the walk
|
||||
meta.walk(&mut WalkTest).await.unwrap();
|
||||
}
|
||||
|
||||
struct WalkTest;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl WalkVisitor for WalkTest {
|
||||
async fn visit(&mut self, path: &Path, node: &Inode) -> Result<Walk> {
|
||||
println!("{} = {:?}", node.ino, path);
|
||||
Ok(Walk::Continue)
|
||||
}
|
||||
}
|
||||
}
|
||||
3
components/rfs/src/fungi/mod.rs
Normal file
3
components/rfs/src/fungi/mod.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
pub mod meta;
|
||||
|
||||
pub use meta::{Error, Reader, Result, Writer};
|
||||
122
components/rfs/src/lib.rs
Normal file
122
components/rfs/src/lib.rs
Normal file
@@ -0,0 +1,122 @@
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
pub mod cache;
|
||||
pub mod fungi;
|
||||
pub mod server;
|
||||
pub mod store;
|
||||
|
||||
mod pack;
|
||||
pub use pack::pack;
|
||||
mod unpack;
|
||||
pub use unpack::unpack;
|
||||
mod clone;
|
||||
pub use clone::clone;
|
||||
pub mod config;
|
||||
mod docker;
|
||||
pub use docker::DockerImageToFlist;
|
||||
mod upload;
|
||||
pub use upload::*;
|
||||
mod download;
|
||||
pub use download::*;
|
||||
mod exist;
|
||||
pub use exist::*;
|
||||
mod sync;
|
||||
pub use sync::*;
|
||||
pub mod flist_inspector;
|
||||
mod server_api;
|
||||
pub mod tree_visitor;
|
||||
|
||||
const PARALLEL_UPLOAD: usize = 20; // number of files we can upload in parallel
|
||||
const PARALLEL_DOWNLOAD: usize = 20; // number of files we can download in parallel
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::{
|
||||
cache::Cache,
|
||||
fungi::meta,
|
||||
store::{dir::DirStore, Router},
|
||||
};
|
||||
use std::path::PathBuf;
|
||||
use tokio::{fs, io::AsyncReadExt};
|
||||
|
||||
#[tokio::test]
|
||||
async fn pack_unpack() {
|
||||
const ROOT: &str = "/tmp/pack-unpack-test";
|
||||
let _ = fs::remove_dir_all(ROOT).await;
|
||||
|
||||
let root: PathBuf = ROOT.into();
|
||||
let source = root.join("source");
|
||||
fs::create_dir_all(&source).await.unwrap();
|
||||
|
||||
for size in [0, 100 * 1024, 1024 * 1024, 10 * 1024 * 1024] {
|
||||
let mut urandom = fs::OpenOptions::default()
|
||||
.read(true)
|
||||
.open("/dev/urandom")
|
||||
.await
|
||||
.unwrap()
|
||||
.take(size);
|
||||
|
||||
let name = format!("file-{}.rnd", size);
|
||||
let p = source.join(&name);
|
||||
let mut file = fs::OpenOptions::default()
|
||||
.create(true)
|
||||
.write(true)
|
||||
.open(p)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
tokio::io::copy(&mut urandom, &mut file).await.unwrap();
|
||||
}
|
||||
|
||||
println!("file generation complete");
|
||||
let writer = meta::Writer::new(root.join("meta.fl"), true).await.unwrap();
|
||||
|
||||
// while we at it we can already create 2 stores and create a router store on top
|
||||
// of that.
|
||||
let store0 = DirStore::new(root.join("store0")).await.unwrap();
|
||||
let store1 = DirStore::new(root.join("store1")).await.unwrap();
|
||||
let mut store = Router::new();
|
||||
|
||||
store.add(0x00, 0x7f, store0);
|
||||
store.add(0x80, 0xff, store1);
|
||||
|
||||
pack(writer, store, &source, false, None).await.unwrap();
|
||||
|
||||
println!("packing complete");
|
||||
// recreate the stores for reading.
|
||||
let store0 = DirStore::new(root.join("store0")).await.unwrap();
|
||||
let store1 = DirStore::new(root.join("store1")).await.unwrap();
|
||||
let mut store = Router::new();
|
||||
|
||||
store.add(0x00, 0x7f, store0);
|
||||
store.add(0x80, 0xff, store1);
|
||||
|
||||
let cache = Cache::new(root.join("cache"), store);
|
||||
|
||||
let reader = meta::Reader::new(root.join("meta.fl")).await.unwrap();
|
||||
// validate reader store routing
|
||||
let routers = reader.routes().await.unwrap();
|
||||
assert_eq!(2, routers.len());
|
||||
assert_eq!(routers[0].url, "dir:///tmp/pack-unpack-test/store0");
|
||||
assert_eq!(routers[1].url, "dir:///tmp/pack-unpack-test/store1");
|
||||
|
||||
assert_eq!((routers[0].start, routers[0].end), (0x00, 0x7f));
|
||||
assert_eq!((routers[1].start, routers[1].end), (0x80, 0xff));
|
||||
|
||||
unpack(&reader, &cache, root.join("destination"), false)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
println!("unpacking complete");
|
||||
// compare that source directory is exactly the same as target directory
|
||||
let status = std::process::Command::new("diff")
|
||||
.arg(root.join("source"))
|
||||
.arg(root.join("destination"))
|
||||
.status()
|
||||
.unwrap();
|
||||
|
||||
assert!(status.success());
|
||||
}
|
||||
}
|
||||
1104
components/rfs/src/main.rs
Normal file
1104
components/rfs/src/main.rs
Normal file
File diff suppressed because it is too large
Load Diff
267
components/rfs/src/pack.rs
Normal file
267
components/rfs/src/pack.rs
Normal file
@@ -0,0 +1,267 @@
|
||||
use crate::fungi::meta::{Ino, Inode};
|
||||
use crate::fungi::{Error, Result, Writer};
|
||||
use crate::store::{BlockStore, Store};
|
||||
use anyhow::Context;
|
||||
use futures::lock::Mutex;
|
||||
use std::collections::LinkedList;
|
||||
use std::ffi::OsString;
|
||||
use std::fs::Metadata;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::mpsc::Sender;
|
||||
use std::sync::Arc;
|
||||
use workers::WorkerPool;
|
||||
|
||||
const BLOB_SIZE: usize = 512 * 1024; // 512K
|
||||
|
||||
type FailuresList = Arc<Mutex<Vec<(PathBuf, Error)>>>;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Item(Ino, PathBuf, OsString, Metadata);
|
||||
/// creates an FL from the given root location. It takes ownership of the writer because
|
||||
/// it's logically incorrect to store multiple filessytem in the same FL.
|
||||
/// All file chunks will then be uploaded to the provided store
|
||||
///
|
||||
pub async fn pack<P: Into<PathBuf>, S: Store>(
|
||||
writer: Writer,
|
||||
store: S,
|
||||
root: P,
|
||||
strip_password: bool,
|
||||
sender: Option<Sender<u32>>,
|
||||
) -> Result<()> {
|
||||
use tokio::fs;
|
||||
|
||||
// building routing table from store information
|
||||
for route in store.routes() {
|
||||
let mut store_url = route.url;
|
||||
|
||||
if strip_password {
|
||||
let mut url = url::Url::parse(&store_url).context("failed to parse store url")?;
|
||||
if url.password().is_some() {
|
||||
url.set_password(None)
|
||||
.map_err(|_| anyhow::anyhow!("failed to strip password"))?;
|
||||
|
||||
store_url = url.to_string();
|
||||
}
|
||||
}
|
||||
|
||||
writer
|
||||
.route(
|
||||
route.start.unwrap_or(u8::MIN),
|
||||
route.end.unwrap_or(u8::MAX),
|
||||
store_url,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
let store: BlockStore<S> = store.into();
|
||||
|
||||
let root = root.into();
|
||||
let meta = fs::metadata(&root)
|
||||
.await
|
||||
.context("failed to get root stats")?;
|
||||
|
||||
let mut list = LinkedList::default();
|
||||
|
||||
let failures = FailuresList::default();
|
||||
let uploader = Uploader::new(store, writer.clone(), Arc::clone(&failures));
|
||||
let mut pool = workers::WorkerPool::new(uploader.clone(), super::PARALLEL_UPLOAD);
|
||||
|
||||
pack_one(
|
||||
&mut list,
|
||||
&writer,
|
||||
&mut pool,
|
||||
Item(0, root, OsString::from("/"), meta),
|
||||
sender.as_ref(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
while !list.is_empty() {
|
||||
let dir = list.pop_back().unwrap();
|
||||
pack_one(&mut list, &writer, &mut pool, dir, sender.as_ref()).await?;
|
||||
}
|
||||
|
||||
pool.close().await;
|
||||
|
||||
let failures = failures.lock().await;
|
||||
if failures.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
log::error!("failed to upload one or more files");
|
||||
for (file, error) in failures.iter() {
|
||||
log::error!(" - failed to upload file {}: {}", file.display(), error);
|
||||
}
|
||||
|
||||
Err(Error::Anyhow(anyhow::anyhow!(
|
||||
"failed to upload ({}) files",
|
||||
failures.len()
|
||||
)))
|
||||
}
|
||||
|
||||
/// pack_one is called for each dir
|
||||
async fn pack_one<S: Store>(
|
||||
list: &mut LinkedList<Item>,
|
||||
writer: &Writer,
|
||||
pool: &mut WorkerPool<Uploader<S>>,
|
||||
Item(parent, path, name, meta): Item,
|
||||
sender: Option<&Sender<u32>>,
|
||||
) -> Result<()> {
|
||||
use std::os::unix::fs::MetadataExt;
|
||||
use tokio::fs;
|
||||
|
||||
let current = writer
|
||||
.inode(Inode {
|
||||
ino: 0,
|
||||
name: String::from_utf8_lossy(name.as_bytes()).into_owned(),
|
||||
parent,
|
||||
size: meta.size(),
|
||||
uid: meta.uid(),
|
||||
gid: meta.gid(),
|
||||
mode: meta.mode().into(),
|
||||
rdev: meta.rdev(),
|
||||
ctime: meta.ctime(),
|
||||
mtime: meta.mtime(),
|
||||
data: None,
|
||||
})
|
||||
.await?;
|
||||
|
||||
let mut children = fs::read_dir(&path)
|
||||
.await
|
||||
.context("failed to list dir children")?;
|
||||
|
||||
while let Some(child) = children
|
||||
.next_entry()
|
||||
.await
|
||||
.context("failed to read next entry from directory")?
|
||||
{
|
||||
let name = child.file_name();
|
||||
let meta = child.metadata().await?;
|
||||
let child_path = path.join(&name);
|
||||
|
||||
if let Some(ref sender) = sender {
|
||||
sender.send(1).context("failed to send progress")?;
|
||||
}
|
||||
|
||||
// if this child a directory we add to the tail of the list
|
||||
if meta.is_dir() {
|
||||
list.push_back(Item(current, child_path.clone(), name, meta));
|
||||
continue;
|
||||
}
|
||||
|
||||
// create entry
|
||||
// otherwise create the file meta
|
||||
let data = if meta.is_symlink() {
|
||||
let target = fs::read_link(&child_path).await?;
|
||||
Some(target.as_os_str().as_bytes().into())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let child_ino = writer
|
||||
.inode(Inode {
|
||||
ino: 0,
|
||||
name: String::from_utf8_lossy(name.as_bytes()).into_owned(),
|
||||
parent: current,
|
||||
size: meta.size(),
|
||||
uid: meta.uid(),
|
||||
gid: meta.gid(),
|
||||
mode: meta.mode().into(),
|
||||
rdev: meta.rdev(),
|
||||
ctime: meta.ctime(),
|
||||
mtime: meta.mtime(),
|
||||
data,
|
||||
})
|
||||
.await?;
|
||||
|
||||
if !meta.is_file() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let worker = pool.get().await;
|
||||
worker
|
||||
.send((child_ino, child_path))
|
||||
.context("failed to schedule file upload")?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
struct Uploader<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
store: Arc<BlockStore<S>>,
|
||||
failures: FailuresList,
|
||||
writer: Writer,
|
||||
buffer: [u8; BLOB_SIZE],
|
||||
}
|
||||
|
||||
impl<S> Clone for Uploader<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
store: Arc::clone(&self.store),
|
||||
failures: Arc::clone(&self.failures),
|
||||
writer: self.writer.clone(),
|
||||
buffer: [0; BLOB_SIZE],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Uploader<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
fn new(store: BlockStore<S>, writer: Writer, failures: FailuresList) -> Self {
|
||||
Self {
|
||||
store: Arc::new(store),
|
||||
failures,
|
||||
writer,
|
||||
buffer: [0; BLOB_SIZE],
|
||||
}
|
||||
}
|
||||
|
||||
async fn upload(&mut self, ino: Ino, path: &Path) -> Result<()> {
|
||||
use tokio::fs;
|
||||
use tokio::io::AsyncReadExt;
|
||||
use tokio::io::BufReader;
|
||||
|
||||
// create file blocks
|
||||
let fd = fs::OpenOptions::default().read(true).open(path).await?;
|
||||
|
||||
let mut reader = BufReader::new(fd);
|
||||
loop {
|
||||
let size = reader.read(&mut self.buffer).await?;
|
||||
if size == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
// write block to remote store
|
||||
let block = self.store.set(&self.buffer[..size]).await?;
|
||||
|
||||
// write block info to meta
|
||||
self.writer.block(ino, &block.id, &block.key).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<S> workers::Work for Uploader<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
type Input = (Ino, PathBuf);
|
||||
type Output = ();
|
||||
|
||||
async fn run(&mut self, (ino, path): Self::Input) -> Self::Output {
|
||||
log::info!("uploading {:?}", path);
|
||||
if let Err(err) = self.upload(ino, &path).await {
|
||||
log::error!("failed to upload file {}: {:#}", path.display(), err);
|
||||
self.failures.lock().await.push((path, err));
|
||||
}
|
||||
}
|
||||
}
|
||||
181
components/rfs/src/server/auth.rs
Normal file
181
components/rfs/src/server/auth.rs
Normal file
@@ -0,0 +1,181 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use axum::{
|
||||
extract::{Json, Request, State},
|
||||
http::{self, StatusCode},
|
||||
middleware::Next,
|
||||
response::IntoResponse,
|
||||
};
|
||||
use axum_macros::debug_handler;
|
||||
use chrono::{Duration, Utc};
|
||||
use jsonwebtoken::{decode, encode, DecodingKey, EncodingKey, Header, TokenData, Validation};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use utoipa::ToSchema;
|
||||
|
||||
use crate::server::{
|
||||
config,
|
||||
db::DB,
|
||||
response::{ResponseError, ResponseResult},
|
||||
};
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct Claims {
|
||||
pub exp: usize, // Expiry time of the token
|
||||
pub iat: usize, // Issued at time of the token
|
||||
pub username: String, // Username associated with the token
|
||||
}
|
||||
|
||||
#[derive(Deserialize, ToSchema)]
|
||||
pub struct SignInBody {
|
||||
pub username: String,
|
||||
pub password: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct SignInResponse {
|
||||
pub access_token: String,
|
||||
}
|
||||
|
||||
#[utoipa::path(
|
||||
post,
|
||||
path = "/api/v1/signin",
|
||||
tag = "Authentication",
|
||||
request_body = SignInBody,
|
||||
responses(
|
||||
(status = 201, description = "User signed in successfully", body = SignInResponse),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
(status = 401, description = "Unauthorized user", body = ResponseError),
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn sign_in_handler(
|
||||
State(state): State<Arc<config::AppState>>,
|
||||
Json(user_data): Json<SignInBody>,
|
||||
) -> impl IntoResponse {
|
||||
let user = match state.db.get_user_by_username(&user_data.username).await {
|
||||
Some(user) => user,
|
||||
None => {
|
||||
return Err(ResponseError::Unauthorized(
|
||||
"User is not authorized".to_string(),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
if user_data.password != user.password {
|
||||
return Err(ResponseError::Unauthorized(
|
||||
"Wrong username or password".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let token = encode_jwt(
|
||||
user.username.clone(),
|
||||
state.config.jwt_secret.clone(),
|
||||
state.config.jwt_expire_hours,
|
||||
)
|
||||
.map_err(|_| ResponseError::InternalServerError)?;
|
||||
|
||||
Ok(ResponseResult::SignedIn(SignInResponse {
|
||||
access_token: token,
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn encode_jwt(
|
||||
username: String,
|
||||
jwt_secret: String,
|
||||
jwt_expire: i64,
|
||||
) -> Result<String, StatusCode> {
|
||||
let now = Utc::now();
|
||||
let exp: usize = (now + Duration::hours(jwt_expire)).timestamp() as usize;
|
||||
let iat: usize = now.timestamp() as usize;
|
||||
let claim = Claims { iat, exp, username };
|
||||
|
||||
encode(
|
||||
&Header::default(),
|
||||
&claim,
|
||||
&EncodingKey::from_secret(jwt_secret.as_ref()),
|
||||
)
|
||||
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)
|
||||
}
|
||||
|
||||
pub fn decode_jwt(jwt_token: String, jwt_secret: String) -> Result<TokenData<Claims>, StatusCode> {
|
||||
let result: Result<TokenData<Claims>, StatusCode> = decode(
|
||||
&jwt_token,
|
||||
&DecodingKey::from_secret(jwt_secret.as_ref()),
|
||||
&Validation::default(),
|
||||
)
|
||||
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR);
|
||||
result
|
||||
}
|
||||
|
||||
pub async fn authorize(
|
||||
State(state): State<Arc<config::AppState>>,
|
||||
mut req: Request,
|
||||
next: Next,
|
||||
) -> impl IntoResponse {
|
||||
let auth_header = match req.headers_mut().get(http::header::AUTHORIZATION) {
|
||||
Some(header) => header
|
||||
.to_str()
|
||||
.map_err(|_| ResponseError::Forbidden("Empty header is not allowed".to_string()))?,
|
||||
None => {
|
||||
return Err(ResponseError::Forbidden(
|
||||
"No JWT token is added to the header".to_string(),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
let mut header = auth_header.split_whitespace();
|
||||
let (_, token) = (header.next(), header.next());
|
||||
let token_str = match token {
|
||||
Some(t) => t.to_string(),
|
||||
None => {
|
||||
log::error!("failed to get token string");
|
||||
return Err(ResponseError::Unauthorized(
|
||||
"Authorization token is not provided".to_string(),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let token_data = match decode_jwt(token_str, state.config.jwt_secret.clone()) {
|
||||
Ok(data) => data,
|
||||
Err(_) => {
|
||||
return Err(ResponseError::Forbidden(
|
||||
"Unable to decode JWT token".to_string(),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
let current_user = match state
|
||||
.db
|
||||
.get_user_by_username(&token_data.claims.username)
|
||||
.await
|
||||
{
|
||||
Some(user) => user,
|
||||
None => {
|
||||
return Err(ResponseError::Unauthorized(
|
||||
"You are not an authorized user".to_string(),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
req.extensions_mut().insert(current_user.username.clone());
|
||||
Ok(next.run(req).await)
|
||||
}
|
||||
|
||||
/// Get the user ID from the username stored in the request extension
|
||||
pub async fn get_user_id_from_token(db: &impl DB, username: &str) -> Result<i64, ResponseError> {
|
||||
match db.get_user_by_username(username).await {
|
||||
Some(user) => match user.id {
|
||||
Some(id) => Ok(id),
|
||||
None => {
|
||||
log::error!("User ID is missing for user: {}", username);
|
||||
Err(ResponseError::Unauthorized(
|
||||
"User ID is missing".to_string(),
|
||||
))
|
||||
}
|
||||
},
|
||||
None => {
|
||||
log::error!("User not found: {}", username);
|
||||
Err(ResponseError::Unauthorized("User not found".to_string()))
|
||||
}
|
||||
}
|
||||
}
|
||||
500
components/rfs/src/server/block_handlers.rs
Normal file
500
components/rfs/src/server/block_handlers.rs
Normal file
@@ -0,0 +1,500 @@
|
||||
use axum::{
|
||||
body::Bytes,
|
||||
extract::{Query, State},
|
||||
http::StatusCode,
|
||||
response::IntoResponse,
|
||||
Json,
|
||||
};
|
||||
use axum_macros::debug_handler;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::server::{
|
||||
auth,
|
||||
config::AppState,
|
||||
db::DB,
|
||||
models::Block,
|
||||
response::{ResponseError, ResponseResult, BlockUploadedResponse},
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use utoipa::ToSchema;
|
||||
|
||||
// Block API endpoints are included in the main FlistApi in handlers.rs
|
||||
|
||||
/// Query parameters for uploading a block
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct UploadBlockParams {
|
||||
/// File hash associated with the block
|
||||
pub file_hash: String,
|
||||
/// Block index within the file
|
||||
pub idx: u64,
|
||||
}
|
||||
|
||||
/// Upload a block to the server.
|
||||
/// If the block already exists, the server will return a 200 OK response.
|
||||
/// If the block is new, the server will return a 201 Created response.
|
||||
#[utoipa::path(
|
||||
post,
|
||||
path = "/api/v1/block",
|
||||
tag = "Block Management",
|
||||
request_body(content = [u8], description = "Block data to upload", content_type = "application/octet-stream"),
|
||||
params(
|
||||
("file_hash" = String, Query, description = "File hash associated with the block"),
|
||||
("idx" = u64, Query, description = "Block index within the file")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "Block already exists", body = BlockUploadedResponse),
|
||||
(status = 201, description = "Block created successfully", body = BlockUploadedResponse),
|
||||
(status = 400, description = "Bad request", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
),
|
||||
security(
|
||||
("bearerAuth" = [])
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn upload_block_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Query(params): Query<UploadBlockParams>,
|
||||
extension: axum::extract::Extension<String>,
|
||||
body: Bytes,
|
||||
) -> Result<(StatusCode, ResponseResult), ResponseError> {
|
||||
// Convert the body bytes to Vec<u8>
|
||||
let data = body.to_vec();
|
||||
|
||||
// Calculate the hash of the block data
|
||||
let hash = Block::calculate_hash(&data);
|
||||
|
||||
// Get the username from the extension (set by the authorize middleware)
|
||||
let username = extension.0;
|
||||
let user_id = auth::get_user_id_from_token(&*state.db, &username).await?;
|
||||
|
||||
// Store the block data in the database
|
||||
match state
|
||||
.db
|
||||
.store_block(&hash, data, ¶ms.file_hash, params.idx, user_id)
|
||||
.await
|
||||
{
|
||||
Ok(is_new) => {
|
||||
if is_new {
|
||||
// Block is new, return 201 Created
|
||||
Ok((StatusCode::CREATED, ResponseResult::BlockUploaded(hash)))
|
||||
} else {
|
||||
// Block already exists, return 200 OK
|
||||
Ok((StatusCode::OK, ResponseResult::BlockUploaded(hash)))
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Failed to store block: {}", err);
|
||||
Err(ResponseError::InternalServerError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Retrieve a block by its hash.
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/v1/block/{hash}",
|
||||
tag = "Block Management",
|
||||
responses(
|
||||
(status = 200, description = "Block found", body = [u8], content_type = "application/octet-stream"),
|
||||
(status = 404, description = "Block not found", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
),
|
||||
params(
|
||||
("hash" = String, Path, description = "Block hash")
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn get_block_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
axum::extract::Path(hash): axum::extract::Path<String>,
|
||||
) -> Result<impl IntoResponse, ResponseError> {
|
||||
// Retrieve the block from the database
|
||||
match state.db.get_block(&hash).await {
|
||||
Ok(Some(data)) => {
|
||||
// Block found, return its data
|
||||
Ok((StatusCode::OK, axum::body::Bytes::from(data)))
|
||||
}
|
||||
Ok(None) => {
|
||||
// Block not found
|
||||
Err(ResponseError::NotFound(format!(
|
||||
"Block with hash '{}' not found",
|
||||
hash
|
||||
)))
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Failed to retrieve block: {}", err);
|
||||
Err(ResponseError::InternalServerError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks a block by its hash.
|
||||
#[utoipa::path(
|
||||
head,
|
||||
path = "/api/v1/block/{hash}",
|
||||
tag = "Block Management",
|
||||
responses(
|
||||
(status = 200, description = "Block found"),
|
||||
(status = 404, description = "Block not found", body = ResponseError),
|
||||
),
|
||||
params(
|
||||
("hash" = String, Path, description = "Block hash")
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn check_block_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
axum::extract::Path(hash): axum::extract::Path<String>,
|
||||
) -> Result<impl IntoResponse, ResponseError> {
|
||||
// Retrieve the block from the database
|
||||
match state.db.block_exists("", 0, &hash, 0).await {
|
||||
true => {
|
||||
// Block found
|
||||
Ok(StatusCode::OK)
|
||||
}
|
||||
false => {
|
||||
log::error!("Block with hash '{}' doesn't exist", hash);
|
||||
Err(ResponseError::NotFound(format!(
|
||||
"Block with hash '{}' not found",
|
||||
hash
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Request to verify if multiple blocks exist on the server
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct VerifyBlock {
|
||||
/// Block hash to verify
|
||||
pub block_hash: String,
|
||||
/// File hash associated with the block
|
||||
pub file_hash: String,
|
||||
/// Block index within the file
|
||||
pub block_index: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct VerifyBlocksRequest {
|
||||
/// List of blocks to verify
|
||||
pub blocks: Vec<VerifyBlock>,
|
||||
}
|
||||
|
||||
/// Response with list of missing blocks
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct VerifyBlocksResponse {
|
||||
/// List of block hashes that are missing on the server
|
||||
pub missing: Vec<String>,
|
||||
}
|
||||
|
||||
/// Verify if multiple blocks exist on the server.
|
||||
/// Returns a list of missing blocks.
|
||||
#[utoipa::path(
|
||||
post,
|
||||
path = "/api/v1/block/verify",
|
||||
tag = "Block Management",
|
||||
request_body(content = VerifyBlocksRequest, description = "List of block hashes to verify", content_type = "application/json"),
|
||||
responses(
|
||||
(status = 200, description = "Verification completed", body = VerifyBlocksResponse),
|
||||
(status = 400, description = "Bad request", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn verify_blocks_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Json(request): Json<VerifyBlocksRequest>,
|
||||
) -> Result<impl IntoResponse, ResponseError> {
|
||||
let mut missing = Vec::new();
|
||||
|
||||
// Check each block in the request
|
||||
for block in request.blocks {
|
||||
if !state
|
||||
.db
|
||||
.block_exists(&block.file_hash, block.block_index, &block.block_hash, 0)
|
||||
.await
|
||||
{
|
||||
missing.push(block.block_hash);
|
||||
}
|
||||
}
|
||||
|
||||
// Return the list of missing blocks
|
||||
Ok((
|
||||
StatusCode::OK,
|
||||
Json(VerifyBlocksResponse {
|
||||
missing, // Include missing blocks in the response
|
||||
}),
|
||||
))
|
||||
}
|
||||
|
||||
/// Block information with hash and index
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct BlockInfo {
|
||||
/// Block hash
|
||||
pub hash: String,
|
||||
/// Block index within the file
|
||||
pub index: u64,
|
||||
}
|
||||
|
||||
/// Block information with hash and size
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct UserBlockInfo {
|
||||
/// Block hash
|
||||
pub hash: String,
|
||||
/// Block size in bytes
|
||||
pub size: u64,
|
||||
}
|
||||
|
||||
/// Response for blocks by hash endpoint
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct BlocksResponse {
|
||||
/// List of blocks with their indices
|
||||
pub blocks: Vec<BlockInfo>,
|
||||
}
|
||||
|
||||
/// Retrieve blocks by hash (file hash or block hash).
|
||||
/// If the hash is a file hash, returns all blocks with their block index related to that file.
|
||||
/// If the hash is a block hash, returns the block itself.
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/v1/blocks/{hash}",
|
||||
tag = "Block Management",
|
||||
responses(
|
||||
(status = 200, description = "Blocks found", body = BlocksResponse),
|
||||
(status = 404, description = "Hash not found", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
),
|
||||
params(
|
||||
("hash" = String, Path, description = "File hash or block hash")
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn get_blocks_by_hash_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
axum::extract::Path(hash): axum::extract::Path<String>,
|
||||
) -> Result<impl IntoResponse, ResponseError> {
|
||||
// First, try to get file blocks by hash
|
||||
match state.db.get_file_blocks_ordered(&hash).await {
|
||||
Ok(blocks) if !blocks.is_empty() => {
|
||||
// This is a file hash, return all blocks with their indices
|
||||
let block_infos = blocks.into_iter()
|
||||
.map(|(hash, index)| BlockInfo { hash, index })
|
||||
.collect();
|
||||
Ok((StatusCode::OK, Json(BlocksResponse { blocks: block_infos })))
|
||||
}
|
||||
Ok(_) | Err(_) => {
|
||||
// Not a file hash or error occurred, try as block hash
|
||||
match state.db.get_block(&hash).await {
|
||||
Ok(Some(_)) => {
|
||||
// This is a block hash, return just this block with index 0
|
||||
Ok((
|
||||
StatusCode::OK,
|
||||
Json(BlocksResponse {
|
||||
blocks: vec![BlockInfo { hash: hash.clone(), index: 0 }],
|
||||
}),
|
||||
))
|
||||
}
|
||||
Ok(None) => {
|
||||
// Neither file nor block found
|
||||
Err(ResponseError::NotFound(format!(
|
||||
"No file or block with hash '{}' found",
|
||||
hash
|
||||
)))
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Failed to retrieve block: {}", err);
|
||||
Err(ResponseError::InternalServerError)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Query parameters for listing blocks
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct ListBlocksParams {
|
||||
/// Page number (1-indexed)
|
||||
#[schema(default = 1, minimum = 1)]
|
||||
pub page: Option<u32>,
|
||||
/// Number of items per page
|
||||
#[schema(default = 50, minimum = 1, maximum = 100)]
|
||||
pub per_page: Option<u32>,
|
||||
}
|
||||
|
||||
/// Response for listing blocks
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct ListBlocksResponse {
|
||||
/// List of block hashes
|
||||
pub blocks: Vec<String>,
|
||||
/// Total number of blocks
|
||||
pub total: u64,
|
||||
/// Current page number
|
||||
pub page: u32,
|
||||
/// Number of items per page
|
||||
pub per_page: u32,
|
||||
}
|
||||
|
||||
/// List all block hashes in the server with pagination
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/v1/blocks",
|
||||
tag = "Block Management",
|
||||
params(
|
||||
("page" = Option<u32>, Query, description = "Page number (1-indexed)"),
|
||||
("per_page" = Option<u32>, Query, description = "Number of items per page")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "List of block hashes", body = ListBlocksResponse),
|
||||
(status = 400, description = "Bad request"),
|
||||
(status = 500, description = "Internal server error"),
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn list_blocks_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Query(params): Query<ListBlocksParams>,
|
||||
) -> Result<impl IntoResponse, ResponseError> {
|
||||
let page = params.page.unwrap_or(1);
|
||||
let per_page = params.per_page.unwrap_or(50).min(100);
|
||||
|
||||
match state.db.list_blocks(page, per_page).await {
|
||||
Ok((blocks, total)) => {
|
||||
let response = ListBlocksResponse {
|
||||
blocks,
|
||||
total,
|
||||
page,
|
||||
per_page,
|
||||
};
|
||||
Ok((StatusCode::OK, Json(response)))
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Failed to list blocks: {}", err);
|
||||
Err(ResponseError::InternalServerError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Response for user blocks endpoint
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct UserBlocksResponse {
|
||||
/// List of blocks with their sizes
|
||||
pub blocks: Vec<UserBlockInfo>,
|
||||
/// Total number of blocks
|
||||
pub total: u64,
|
||||
/// Total number of all blocks
|
||||
pub all_blocks: u64,
|
||||
}
|
||||
|
||||
/// Retrieve all blocks uploaded by a specific user.
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/v1/user/blocks",
|
||||
tag = "Block Management",
|
||||
params(
|
||||
("page" = Option<u32>, Query, description = "Page number (1-indexed)"),
|
||||
("per_page" = Option<u32>, Query, description = "Number of items per page")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "Blocks found", body = UserBlocksResponse),
|
||||
(status = 401, description = "Unauthorized"),
|
||||
(status = 500, description = "Internal server error"),
|
||||
),
|
||||
security(
|
||||
("bearerAuth" = [])
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn get_user_blocks_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
extension: axum::extract::Extension<String>,
|
||||
Query(params): Query<ListBlocksParams>,
|
||||
) -> Result<impl IntoResponse, ResponseError> {
|
||||
let page = params.page.unwrap_or(1);
|
||||
let per_page = params.per_page.unwrap_or(50).min(100);
|
||||
|
||||
// Get the username from the extension (set by the authorize middleware)
|
||||
let username = extension.0;
|
||||
let user_id = auth::get_user_id_from_token(&*state.db, &username).await?;
|
||||
|
||||
let all_blocks = match state.db.list_blocks(1, 1).await {
|
||||
Ok((_, total)) => total,
|
||||
Err(err) => {
|
||||
log::error!("Failed to list blocks: {}", err);
|
||||
0
|
||||
}
|
||||
};
|
||||
|
||||
// Get all blocks related to the user
|
||||
match state.db.get_user_blocks(user_id, page, per_page).await {
|
||||
Ok(blocks) => {
|
||||
let total = blocks.len() as u64;
|
||||
let response = UserBlocksResponse {
|
||||
blocks: blocks.into_iter()
|
||||
.map(|(hash, size)| UserBlockInfo { hash, size })
|
||||
.collect(),
|
||||
total,
|
||||
all_blocks,
|
||||
};
|
||||
Ok((StatusCode::OK, Json(response)))
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Failed to retrieve user blocks: {}", err);
|
||||
Err(ResponseError::InternalServerError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Response for block downloads endpoint
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct BlockDownloadsResponse {
|
||||
/// Block hash
|
||||
pub block_hash: String,
|
||||
/// Number of times the block has been downloaded
|
||||
pub downloads_count: u64,
|
||||
/// Size of the block in bytes
|
||||
pub block_size: u64,
|
||||
}
|
||||
|
||||
/// Retrieve the number of times a block has been downloaded.
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/v1/block/{hash}/downloads",
|
||||
tag = "Block Management",
|
||||
responses(
|
||||
(status = 200, description = "Download count retrieved successfully", body = BlockDownloadsResponse),
|
||||
(status = 404, description = "Block not found"),
|
||||
(status = 500, description = "Internal server error"),
|
||||
),
|
||||
params(
|
||||
("hash" = String, Path, description = "Block hash")
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn get_block_downloads_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
axum::extract::Path(hash): axum::extract::Path<String>,
|
||||
) -> Result<impl IntoResponse, ResponseError> {
|
||||
// Check if the block exists
|
||||
if !state.db.block_exists("", 0, &hash, 0).await {
|
||||
return Err(ResponseError::NotFound(format!(
|
||||
"Block with hash '{}' not found",
|
||||
hash
|
||||
)));
|
||||
}
|
||||
|
||||
// Get the download count
|
||||
match state.db.get_block_downloads(&hash).await {
|
||||
Ok((count, block_size)) => {
|
||||
let response = BlockDownloadsResponse {
|
||||
block_hash: hash,
|
||||
downloads_count: count,
|
||||
block_size: block_size,
|
||||
};
|
||||
Ok((StatusCode::OK, Json(response)))
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Failed to retrieve block download count: {}", err);
|
||||
Err(ResponseError::InternalServerError)
|
||||
}
|
||||
}
|
||||
}
|
||||
67
components/rfs/src/server/config.rs
Normal file
67
components/rfs/src/server/config.rs
Normal file
@@ -0,0 +1,67 @@
|
||||
use anyhow::{Context, Result};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
fs,
|
||||
path::PathBuf,
|
||||
sync::{Arc, Mutex},
|
||||
};
|
||||
use utoipa::ToSchema;
|
||||
|
||||
use crate::server::{db::DBType, handlers, models::User};
|
||||
use crate::store;
|
||||
|
||||
#[derive(Debug, ToSchema, Serialize, Clone)]
|
||||
pub struct Job {
|
||||
pub id: String,
|
||||
}
|
||||
|
||||
#[derive(ToSchema)]
|
||||
pub struct AppState {
|
||||
pub jobs_state: Mutex<HashMap<String, handlers::FlistState>>,
|
||||
pub flists_progress: Mutex<HashMap<PathBuf, f32>>,
|
||||
pub db: Arc<DBType>,
|
||||
pub config: Config,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone, Deserialize)]
|
||||
pub struct Config {
|
||||
pub host: String,
|
||||
pub port: u16,
|
||||
pub store_url: Vec<String>,
|
||||
pub flist_dir: String,
|
||||
pub sqlite_path: Option<String>,
|
||||
|
||||
pub jwt_secret: String,
|
||||
pub jwt_expire_hours: i64,
|
||||
pub users: Vec<User>,
|
||||
|
||||
pub block_size: Option<usize>, // Optional block size in bytes
|
||||
pub storage_dir: String, // Path to the storage directory
|
||||
}
|
||||
|
||||
/// Parse the config file into Config struct.
|
||||
pub async fn parse_config(filepath: &str) -> Result<Config> {
|
||||
let content = fs::read_to_string(filepath).context("failed to read config file")?;
|
||||
let mut c: Config = toml::from_str(&content).context("failed to convert toml config data")?;
|
||||
|
||||
if !hostname_validator::is_valid(&c.host) {
|
||||
anyhow::bail!("host '{}' is invalid", c.host)
|
||||
}
|
||||
|
||||
store::parse_router(&c.store_url)
|
||||
.await
|
||||
.context("failed to parse store urls")?;
|
||||
fs::create_dir_all(&c.flist_dir).context("failed to create flists directory")?;
|
||||
fs::create_dir_all(&c.storage_dir).context("failed to create storage directory")?;
|
||||
|
||||
if c.jwt_expire_hours < 1 || c.jwt_expire_hours > 24 {
|
||||
anyhow::bail!(format!(
|
||||
"jwt expiry interval in hours '{}' is invalid, must be between [1, 24]",
|
||||
c.jwt_expire_hours
|
||||
))
|
||||
}
|
||||
|
||||
c.block_size = c.block_size.or(Some(1024 * 1024));
|
||||
Ok(c)
|
||||
}
|
||||
96
components/rfs/src/server/db/map.rs
Normal file
96
components/rfs/src/server/db/map.rs
Normal file
@@ -0,0 +1,96 @@
|
||||
use std::collections::HashMap;
|
||||
use utoipa::ToSchema;
|
||||
|
||||
use super::DB;
|
||||
use crate::server::models::{File, User};
|
||||
use anyhow::Result;
|
||||
|
||||
#[derive(Debug, ToSchema)]
|
||||
pub struct MapDB {
|
||||
users: HashMap<String, User>,
|
||||
}
|
||||
|
||||
impl MapDB {
|
||||
pub fn new(users: &[User]) -> Self {
|
||||
Self {
|
||||
users: users
|
||||
.iter()
|
||||
.map(|u| (u.username.clone(), u.to_owned()))
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DB for MapDB {
|
||||
async fn get_user_by_username(&self, username: &str) -> Option<User> {
|
||||
self.users.get(username).cloned()
|
||||
}
|
||||
|
||||
async fn block_exists(
|
||||
&self,
|
||||
_file_hash: &str,
|
||||
_block_index: u64,
|
||||
_block_hash: &str,
|
||||
_user_id: i64,
|
||||
) -> bool {
|
||||
// TODO:
|
||||
true
|
||||
}
|
||||
|
||||
async fn store_block(
|
||||
&self,
|
||||
_block_hash: &str,
|
||||
_data: Vec<u8>,
|
||||
_file_hash: &str,
|
||||
_block_index: u64,
|
||||
_user_id: i64,
|
||||
) -> Result<bool, anyhow::Error> {
|
||||
// TODO: Implement block storage logic
|
||||
Ok(true) // Placeholder return value
|
||||
}
|
||||
|
||||
async fn get_block(&self, _hash: &str) -> Result<Option<Vec<u8>>, anyhow::Error> {
|
||||
// TODO:
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
async fn get_file_by_hash(&self, _hash: &str) -> Result<Option<File>, anyhow::Error> {
|
||||
// TODO:
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
async fn get_file_blocks_ordered(
|
||||
&self,
|
||||
_file_hash: &str,
|
||||
) -> Result<Vec<(String, u64)>, anyhow::Error> {
|
||||
// TODO:
|
||||
Ok(Vec::new())
|
||||
}
|
||||
|
||||
async fn list_blocks(
|
||||
&self,
|
||||
_page: u32,
|
||||
_per_page: u32,
|
||||
) -> Result<(Vec<String>, u64), anyhow::Error> {
|
||||
// TODO:
|
||||
Ok((Vec::new(), 0))
|
||||
}
|
||||
|
||||
async fn get_user_blocks(
|
||||
&self,
|
||||
_user_id: i64,
|
||||
_page: u32,
|
||||
_per_page: u32,
|
||||
) -> Result<Vec<(String, u64)>, anyhow::Error> {
|
||||
// TODO:
|
||||
Ok(Vec::new())
|
||||
}
|
||||
|
||||
async fn increment_block_downloads(&self, _hash: &str) -> Result<(), anyhow::Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_block_downloads(&self, _hash: &str) -> Result<(u64, u64), anyhow::Error> {
|
||||
Ok((0, 0))
|
||||
}
|
||||
}
|
||||
166
components/rfs/src/server/db/mod.rs
Normal file
166
components/rfs/src/server/db/mod.rs
Normal file
@@ -0,0 +1,166 @@
|
||||
pub mod map;
|
||||
pub mod sqlite;
|
||||
mod storage;
|
||||
use crate::server::models::{File, User};
|
||||
|
||||
pub trait DB: Send + Sync {
|
||||
// User methods
|
||||
async fn get_user_by_username(&self, username: &str) -> Option<User>;
|
||||
|
||||
// Block methods
|
||||
async fn block_exists(
|
||||
&self,
|
||||
file_hash: &str,
|
||||
block_index: u64,
|
||||
block_hash: &str,
|
||||
user_id: i64,
|
||||
) -> bool;
|
||||
async fn store_block(
|
||||
&self,
|
||||
block_hash: &str,
|
||||
data: Vec<u8>,
|
||||
file_hash: &str,
|
||||
block_index: u64,
|
||||
user_id: i64,
|
||||
) -> Result<bool, anyhow::Error>;
|
||||
async fn get_block(&self, hash: &str) -> Result<Option<Vec<u8>>, anyhow::Error>;
|
||||
async fn increment_block_downloads(&self, hash: &str) -> Result<(), anyhow::Error>;
|
||||
async fn get_block_downloads(&self, hash: &str) -> Result<(u64, u64), anyhow::Error>;
|
||||
|
||||
// File methods
|
||||
async fn get_file_by_hash(&self, hash: &str) -> Result<Option<File>, anyhow::Error>;
|
||||
async fn get_file_blocks_ordered(
|
||||
&self,
|
||||
file_hash: &str,
|
||||
) -> Result<Vec<(String, u64)>, anyhow::Error>;
|
||||
async fn list_blocks(
|
||||
&self,
|
||||
page: u32,
|
||||
per_page: u32,
|
||||
) -> Result<(Vec<String>, u64), anyhow::Error>;
|
||||
|
||||
// Get all blocks related to a user
|
||||
async fn get_user_blocks(
|
||||
&self,
|
||||
user_id: i64,
|
||||
page: u32,
|
||||
per_page: u32,
|
||||
) -> Result<Vec<(String, u64)>, anyhow::Error>;
|
||||
}
|
||||
|
||||
pub enum DBType {
|
||||
MapDB(map::MapDB),
|
||||
SqlDB(sqlite::SqlDB),
|
||||
}
|
||||
|
||||
impl DB for DBType {
|
||||
// User methods
|
||||
async fn get_user_by_username(&self, username: &str) -> Option<User> {
|
||||
match self {
|
||||
DBType::MapDB(db) => db.get_user_by_username(username).await,
|
||||
DBType::SqlDB(db) => db.get_user_by_username(username).await,
|
||||
}
|
||||
}
|
||||
|
||||
// Block methods
|
||||
async fn block_exists(
|
||||
&self,
|
||||
file_hash: &str,
|
||||
block_index: u64,
|
||||
block_hash: &str,
|
||||
user_id: i64,
|
||||
) -> bool {
|
||||
match self {
|
||||
DBType::MapDB(db) => {
|
||||
db.block_exists(file_hash, block_index, block_hash, user_id)
|
||||
.await
|
||||
}
|
||||
DBType::SqlDB(db) => {
|
||||
db.block_exists(file_hash, block_index, block_hash, user_id)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn store_block(
|
||||
&self,
|
||||
block_hash: &str,
|
||||
data: Vec<u8>,
|
||||
file_hash: &str,
|
||||
block_index: u64,
|
||||
user_id: i64,
|
||||
) -> Result<bool, anyhow::Error> {
|
||||
match self {
|
||||
DBType::MapDB(db) => {
|
||||
db.store_block(block_hash, data, file_hash, block_index, user_id)
|
||||
.await
|
||||
}
|
||||
DBType::SqlDB(db) => {
|
||||
db.store_block(block_hash, data, file_hash, block_index, user_id)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_block(&self, hash: &str) -> Result<Option<Vec<u8>>, anyhow::Error> {
|
||||
match self {
|
||||
DBType::MapDB(db) => db.get_block(hash).await,
|
||||
DBType::SqlDB(db) => db.get_block(hash).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn increment_block_downloads(&self, hash: &str) -> Result<(), anyhow::Error> {
|
||||
match self {
|
||||
DBType::MapDB(db) => db.increment_block_downloads(hash).await,
|
||||
DBType::SqlDB(db) => db.increment_block_downloads(hash).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_block_downloads(&self, hash: &str) -> Result<(u64, u64), anyhow::Error> {
|
||||
match self {
|
||||
DBType::MapDB(db) => db.get_block_downloads(hash).await,
|
||||
DBType::SqlDB(db) => db.get_block_downloads(hash).await,
|
||||
}
|
||||
}
|
||||
|
||||
// File methods
|
||||
async fn get_file_by_hash(&self, hash: &str) -> Result<Option<File>, anyhow::Error> {
|
||||
match self {
|
||||
DBType::MapDB(db) => db.get_file_by_hash(hash).await,
|
||||
DBType::SqlDB(db) => db.get_file_by_hash(hash).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_file_blocks_ordered(
|
||||
&self,
|
||||
file_hash: &str,
|
||||
) -> Result<Vec<(String, u64)>, anyhow::Error> {
|
||||
match self {
|
||||
DBType::MapDB(db) => db.get_file_blocks_ordered(file_hash).await,
|
||||
DBType::SqlDB(db) => db.get_file_blocks_ordered(file_hash).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn list_blocks(
|
||||
&self,
|
||||
page: u32,
|
||||
per_page: u32,
|
||||
) -> Result<(Vec<String>, u64), anyhow::Error> {
|
||||
match self {
|
||||
DBType::MapDB(db) => db.list_blocks(page, per_page).await,
|
||||
DBType::SqlDB(db) => db.list_blocks(page, per_page).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_user_blocks(
|
||||
&self,
|
||||
user_id: i64,
|
||||
page: u32,
|
||||
per_page: u32,
|
||||
) -> Result<Vec<(String, u64)>, anyhow::Error> {
|
||||
match self {
|
||||
DBType::MapDB(db) => db.get_user_blocks(user_id, page, per_page).await,
|
||||
DBType::SqlDB(db) => db.get_user_blocks(user_id, page, per_page).await,
|
||||
}
|
||||
}
|
||||
}
|
||||
397
components/rfs/src/server/db/sqlite.rs
Normal file
397
components/rfs/src/server/db/sqlite.rs
Normal file
@@ -0,0 +1,397 @@
|
||||
use super::{storage::Storage, DB};
|
||||
use crate::server::models::{File, User};
|
||||
use anyhow::Result;
|
||||
use sqlx::{query, query_as, Row, SqlitePool};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct SqlDB {
|
||||
pool: SqlitePool, // Use a connection pool for efficient database access
|
||||
storage: Storage, // Directory for storing blocks
|
||||
}
|
||||
|
||||
static SCHEMA: &str = include_str!("../../../schema/server.sql");
|
||||
|
||||
impl SqlDB {
|
||||
pub async fn new(database_filepath: &str, storage_dir: &str, users: &[User]) -> Self {
|
||||
// Check if the database file exists, and create it if it doesn't
|
||||
if !std::path::Path::new(database_filepath).exists() {
|
||||
std::fs::File::create(database_filepath).expect("Failed to create database file");
|
||||
}
|
||||
|
||||
let pool = SqlitePool::connect_lazy(database_filepath)
|
||||
.expect("Failed to create database connection pool");
|
||||
|
||||
// Initialize the database schema
|
||||
Self::init_schema(&pool)
|
||||
.await
|
||||
.expect("Failed to initialize database schema");
|
||||
|
||||
let storage = Storage::new(storage_dir);
|
||||
|
||||
for user in users {
|
||||
if let Err(err) = Self::insert_user(&pool, user).await {
|
||||
log::error!("Failed to insert user '{}': {}", user.username, err);
|
||||
}
|
||||
}
|
||||
|
||||
Self { pool, storage }
|
||||
}
|
||||
|
||||
/// Initialize the database schema
|
||||
async fn init_schema(pool: &SqlitePool) -> Result<(), anyhow::Error> {
|
||||
sqlx::query(SCHEMA)
|
||||
.execute(pool)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("Failed to create database schema: {}", e))?;
|
||||
|
||||
log::info!("Database schema initialized successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn metadata_exists(
|
||||
&self,
|
||||
file_hash: &str,
|
||||
block_index: u64,
|
||||
block_hash: &str,
|
||||
user_id: i64,
|
||||
) -> bool {
|
||||
let result = query(
|
||||
"SELECT COUNT(*) as count FROM metadata WHERE file_hash = ? AND block_index = ? AND block_hash = ? AND user_id = ?",
|
||||
)
|
||||
.bind(file_hash)
|
||||
.bind(block_index as i64)
|
||||
.bind(block_hash)
|
||||
.bind(user_id)
|
||||
.fetch_one(&self.pool);
|
||||
|
||||
match result.await {
|
||||
Ok(row) => {
|
||||
let count: i64 = row.get(0);
|
||||
count > 0
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Error checking if metadata exists: {}", err);
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn insert_user(pool: &SqlitePool, user: &User) -> Result<(), anyhow::Error> {
|
||||
query(
|
||||
"INSERT OR IGNORE INTO users (username, password, created_at) VALUES (?, ?, CURRENT_TIMESTAMP)",
|
||||
)
|
||||
.bind(&user.username)
|
||||
.bind(&user.password)
|
||||
.execute(pool)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("Failed to insert user: {}", e))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl DB for SqlDB {
|
||||
async fn get_user_by_username(&self, username: &str) -> Option<User> {
|
||||
let query = "SELECT * FROM users WHERE username = ?";
|
||||
let result = query_as::<_, User>(query)
|
||||
.bind(username)
|
||||
.fetch_one(&self.pool);
|
||||
|
||||
match result.await {
|
||||
Ok(user) => Some(user),
|
||||
Err(_) => None,
|
||||
}
|
||||
}
|
||||
|
||||
async fn block_exists(
|
||||
&self,
|
||||
file_hash: &str,
|
||||
block_index: u64,
|
||||
block_hash: &str,
|
||||
user_id: i64,
|
||||
) -> bool {
|
||||
// Check if the block already exists in storage
|
||||
let block_exists = self.storage.block_exists(block_hash);
|
||||
|
||||
// Check if the metadata already exists in the database
|
||||
let metadata_exists = self
|
||||
.metadata_exists(file_hash, block_index, block_hash, user_id)
|
||||
.await;
|
||||
|
||||
// If both block and metadata exist, no need to store again
|
||||
if block_exists && (metadata_exists || file_hash.is_empty()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
false // Block does not exist
|
||||
}
|
||||
|
||||
async fn store_block(
|
||||
&self,
|
||||
block_hash: &str,
|
||||
data: Vec<u8>,
|
||||
file_hash: &str,
|
||||
block_index: u64,
|
||||
user_id: i64,
|
||||
) -> Result<bool, anyhow::Error> {
|
||||
// Check if the block already exists in storage
|
||||
let block_exists = self.storage.block_exists(block_hash);
|
||||
|
||||
// Check if the metadata already exists in the database
|
||||
let metadata_exists = self
|
||||
.metadata_exists(file_hash, block_index, block_hash, user_id)
|
||||
.await;
|
||||
|
||||
// If both block and metadata exist, no need to store again
|
||||
if block_exists && (metadata_exists || (file_hash.is_empty() && user_id == 0)) {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// Calculate block size
|
||||
let block_size = data.len() as i64;
|
||||
|
||||
// Store metadata if it doesn't exist
|
||||
if !metadata_exists {
|
||||
if let Err(err) = query(
|
||||
"INSERT INTO metadata (file_hash, block_index, block_hash, user_id, block_size, created_at)
|
||||
VALUES (?, ?, ?, ?, ?, CURRENT_TIMESTAMP)",
|
||||
)
|
||||
.bind(file_hash)
|
||||
.bind(block_index as i64)
|
||||
.bind(block_hash)
|
||||
.bind(user_id)
|
||||
.bind(block_size)
|
||||
.execute(&self.pool)
|
||||
.await
|
||||
{
|
||||
log::error!("Error storing metadata: {}", err);
|
||||
return Err(anyhow::anyhow!("Failed to store metadata: {}", err));
|
||||
}
|
||||
}
|
||||
|
||||
// Store the block data in the file system if it doesn't exist
|
||||
if !block_exists {
|
||||
if let Err(err) = self.storage.save_block(block_hash, &data) {
|
||||
log::error!("Error storing block in storage: {}", err);
|
||||
return Err(anyhow::anyhow!("Failed to store block in storage: {}", err));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(true) // Indicate that the block or metadata was newly stored
|
||||
}
|
||||
|
||||
async fn get_block(&self, hash: &str) -> Result<Option<Vec<u8>>, anyhow::Error> {
|
||||
// Retrieve the block data from storage
|
||||
match self.storage.get_block(hash) {
|
||||
Ok(Some(data)) => {
|
||||
if let Err(err) = self.increment_block_downloads(&hash).await {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Failed to increment download count for block {}: {}",
|
||||
hash,
|
||||
err
|
||||
));
|
||||
}
|
||||
Ok(Some(data))
|
||||
}
|
||||
Ok(None) => Ok(None),
|
||||
Err(err) => {
|
||||
log::error!("Error retrieving block from storage: {}", err);
|
||||
Err(anyhow::anyhow!(
|
||||
"Failed to retrieve block from storage: {}",
|
||||
err
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_file_by_hash(&self, hash: &str) -> Result<Option<File>, anyhow::Error> {
|
||||
// Retrieve the blocks associated with the file hash
|
||||
let blocks = match self.get_file_blocks_ordered(hash).await {
|
||||
Ok(blocks) => blocks,
|
||||
Err(err) => {
|
||||
log::error!("Failed to retrieve file blocks: {}", err);
|
||||
return Err(anyhow::anyhow!("Failed to retrieve file blocks: {}", err));
|
||||
}
|
||||
};
|
||||
|
||||
if blocks.is_empty() {
|
||||
return Ok(None); // No blocks found, file does not exist
|
||||
}
|
||||
|
||||
// Combine block data to reconstruct the file
|
||||
let mut file_content = Vec::new();
|
||||
for (block_hash, _) in blocks {
|
||||
match self.storage.get_block(&block_hash) {
|
||||
Ok(Some(data)) => {
|
||||
if let Err(err) = self.increment_block_downloads(&block_hash).await {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Failed to increment download count for block {}: {}",
|
||||
block_hash,
|
||||
err
|
||||
));
|
||||
}
|
||||
file_content.extend(data)
|
||||
}
|
||||
Ok(None) => {
|
||||
log::error!("Block {} not found", block_hash);
|
||||
return Err(anyhow::anyhow!("Block {} not found", block_hash));
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Failed to retrieve block {}: {}", block_hash, err);
|
||||
return Err(anyhow::anyhow!(
|
||||
"Failed to retrieve block {}: {}",
|
||||
block_hash,
|
||||
err
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Return the reconstructed file
|
||||
Ok(Some(File {
|
||||
file_hash: hash.to_string(),
|
||||
file_content,
|
||||
}))
|
||||
}
|
||||
|
||||
async fn get_file_blocks_ordered(
|
||||
&self,
|
||||
file_hash: &str,
|
||||
) -> Result<Vec<(String, u64)>, anyhow::Error> {
|
||||
let result = query(
|
||||
"SELECT block_hash, block_index FROM metadata WHERE file_hash = ? ORDER BY block_index",
|
||||
)
|
||||
.bind(file_hash)
|
||||
.fetch_all(&self.pool)
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(rows) => {
|
||||
let blocks = rows
|
||||
.into_iter()
|
||||
.map(|row| {
|
||||
let block_hash: String = row.get(0);
|
||||
let block_index: i64 = row.get(1);
|
||||
(block_hash, block_index as u64)
|
||||
})
|
||||
.collect::<Vec<(String, u64)>>();
|
||||
|
||||
Ok(blocks)
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Error retrieving file blocks: {}", err);
|
||||
Err(anyhow::anyhow!("Failed to retrieve file blocks: {}", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn list_blocks(
|
||||
&self,
|
||||
page: u32,
|
||||
per_page: u32,
|
||||
) -> Result<(Vec<String>, u64), anyhow::Error> {
|
||||
let blocks = match self.storage.list_blocks() {
|
||||
Ok(blocks) => blocks,
|
||||
Err(err) => {
|
||||
log::error!("Error listing blocks: {}", err);
|
||||
return Err(anyhow::anyhow!("Failed to list blocks: {}", err));
|
||||
}
|
||||
};
|
||||
|
||||
let total = blocks.len() as u64;
|
||||
let start = page
|
||||
.checked_sub(1)
|
||||
.and_then(|p| p.checked_mul(per_page))
|
||||
.ok_or_else(|| anyhow::anyhow!("Page or per_page value caused overflow"))?
|
||||
as usize;
|
||||
let end = (start + per_page as usize).min(total as usize);
|
||||
let page_blocks = blocks
|
||||
.into_iter()
|
||||
.skip(start)
|
||||
.take(end.saturating_sub(start))
|
||||
.collect();
|
||||
Ok((page_blocks, total))
|
||||
}
|
||||
|
||||
async fn get_user_blocks(
|
||||
&self,
|
||||
user_id: i64,
|
||||
page: u32,
|
||||
per_page: u32,
|
||||
) -> Result<Vec<(String, u64)>, anyhow::Error> {
|
||||
let offset = page
|
||||
.checked_sub(1)
|
||||
.and_then(|p| p.checked_mul(per_page))
|
||||
.ok_or_else(|| anyhow::anyhow!("Page or per_page value caused overflow"))?
|
||||
as i64;
|
||||
|
||||
let result = query(
|
||||
"SELECT block_hash, block_size FROM metadata WHERE user_id = ? ORDER BY block_index LIMIT ? OFFSET ?",
|
||||
)
|
||||
.bind(user_id)
|
||||
.bind(per_page as i64)
|
||||
.bind(offset)
|
||||
.fetch_all(&self.pool)
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(rows) => {
|
||||
let blocks = rows
|
||||
.into_iter()
|
||||
.map(|row| {
|
||||
let block_hash: String = row.get(0);
|
||||
let block_size: i64 = row.get(1);
|
||||
(block_hash, block_size as u64)
|
||||
})
|
||||
.collect::<Vec<(String, u64)>>();
|
||||
|
||||
Ok(blocks)
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Error retrieving user blocks: {}", err);
|
||||
Err(anyhow::anyhow!("Failed to retrieve user blocks: {}", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn increment_block_downloads(&self, hash: &str) -> Result<(), anyhow::Error> {
|
||||
let result =
|
||||
query("UPDATE metadata SET downloads_count = downloads_count + 1 WHERE block_hash = ?")
|
||||
.bind(hash)
|
||||
.execute(&self.pool)
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(_) => Ok(()),
|
||||
Err(err) => {
|
||||
log::error!("Error incrementing block downloads count: {}", err);
|
||||
Err(anyhow::anyhow!(
|
||||
"Failed to increment block downloads count: {}",
|
||||
err
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_block_downloads(&self, hash: &str) -> Result<(u64, u64), anyhow::Error> {
|
||||
let result = query("SELECT downloads_count, block_size FROM metadata WHERE block_hash = ?")
|
||||
.bind(hash)
|
||||
.fetch_one(&self.pool)
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(row) => {
|
||||
let count: i64 = row.get(0);
|
||||
let size: i64 = row.get(1);
|
||||
Ok((count as u64, size as u64))
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Error retrieving block downloads count and size: {}", err);
|
||||
Err(anyhow::anyhow!(
|
||||
"Failed to retrieve block downloads count and size: {}",
|
||||
err
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
95
components/rfs/src/server/db/storage.rs
Normal file
95
components/rfs/src/server/db/storage.rs
Normal file
@@ -0,0 +1,95 @@
|
||||
use std::fs;
|
||||
use std::io::{self, Write};
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Storage {
|
||||
base_dir: PathBuf,
|
||||
}
|
||||
|
||||
impl Storage {
|
||||
pub fn new(base_dir: &str) -> Self {
|
||||
let base_path = PathBuf::from(base_dir).join("blocks");
|
||||
fs::create_dir_all(&base_path).expect("Failed to create storage directory");
|
||||
Self {
|
||||
base_dir: base_path,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn save_block(&self, hash: &str, content: &[u8]) -> io::Result<()> {
|
||||
let sub_dir = self.base_dir.join(&hash[..4]);
|
||||
fs::create_dir_all(&sub_dir)?;
|
||||
|
||||
let block_path = sub_dir.join(hash);
|
||||
let mut file = fs::File::create(block_path)?;
|
||||
file.write_all(content)
|
||||
}
|
||||
|
||||
pub fn get_block(&self, hash: &str) -> io::Result<Option<Vec<u8>>> {
|
||||
let block_path = self.base_dir.join(&hash[..4]).join(hash);
|
||||
if block_path.exists() {
|
||||
Ok(Some(fs::read(block_path)?))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn block_exists(&self, hash: &str) -> bool {
|
||||
let block_path = self.base_dir.join(&hash[..4]).join(hash);
|
||||
block_path.exists()
|
||||
}
|
||||
|
||||
pub fn list_blocks(&self) -> io::Result<Vec<String>> {
|
||||
let mut block_hashes = Vec::new();
|
||||
|
||||
// Walk through the storage directory
|
||||
for entry in fs::read_dir(&self.base_dir)? {
|
||||
let entry = entry?;
|
||||
let path = entry.path();
|
||||
if path.is_dir() {
|
||||
// Each subdirectory represents the first 4 characters of the hash
|
||||
for block_entry in fs::read_dir(path)? {
|
||||
let block_entry = block_entry?;
|
||||
let block_path = block_entry.path();
|
||||
if block_path.is_file() {
|
||||
if let Some(file_name) = block_path.file_name() {
|
||||
if let Some(hash) = file_name.to_str() {
|
||||
block_hashes.push(hash.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(block_hashes)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_storage() {
|
||||
let storage = Storage::new("test_storage");
|
||||
|
||||
let hash = "abcd1234";
|
||||
let content = b"Hello, world!";
|
||||
|
||||
// Save block
|
||||
storage.save_block(hash, content).unwrap();
|
||||
assert!(storage.block_exists(hash));
|
||||
|
||||
let hash = "abcd12345";
|
||||
let content = b"Hello, world!";
|
||||
|
||||
// Get block
|
||||
storage.save_block(hash, content).unwrap();
|
||||
let retrieved_content = storage.get_block(hash).unwrap();
|
||||
assert_eq!(retrieved_content.unwrap(), content);
|
||||
|
||||
// Clean up
|
||||
fs::remove_dir_all("test_storage").unwrap();
|
||||
}
|
||||
}
|
||||
171
components/rfs/src/server/file_handlers.rs
Normal file
171
components/rfs/src/server/file_handlers.rs
Normal file
@@ -0,0 +1,171 @@
|
||||
use axum::{body::Bytes, extract::State, http::StatusCode, response::IntoResponse};
|
||||
use axum_macros::debug_handler;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::server::{
|
||||
auth,
|
||||
config::AppState,
|
||||
db::DB,
|
||||
models::{Block, File},
|
||||
response::{ResponseError, ResponseResult},
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use utoipa::ToSchema;
|
||||
|
||||
const BLOCK_SIZE: usize = 1024 * 1024; // 1MB
|
||||
|
||||
// File API endpoints are included in the main FlistApi in handlers.rs
|
||||
|
||||
/// Response for file upload
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct FileUploadResponse {
|
||||
/// The file hash
|
||||
pub file_hash: String,
|
||||
/// Message indicating success
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
/// Upload a file to the server.
|
||||
/// The file will be split into blocks and stored in the database.
|
||||
#[utoipa::path(
|
||||
post,
|
||||
path = "/api/v1/file",
|
||||
tag = "File Management",
|
||||
request_body(content = [u8], description = "File data to upload", content_type = "application/octet-stream"),
|
||||
responses(
|
||||
(status = 201, description = "File uploaded successfully", body = FileUploadResponse),
|
||||
(status = 400, description = "Bad request", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
),
|
||||
security(
|
||||
("bearerAuth" = [])
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn upload_file_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
extension: axum::extract::Extension<String>,
|
||||
body: Bytes,
|
||||
) -> Result<(StatusCode, ResponseResult), ResponseError> {
|
||||
// Convert the request body to a byte vector
|
||||
let data = body.to_vec();
|
||||
|
||||
// Create a new File record
|
||||
let file = File::new(data.clone());
|
||||
|
||||
// Store the file metadata in the database
|
||||
// In a real implementation, we would store this in the files table
|
||||
// For now, we'll just log it
|
||||
log::info!("Storing file metadata: hash={}", file.file_hash);
|
||||
|
||||
// Get the username from the extension (set by the authorize middleware)
|
||||
let username = extension.0;
|
||||
let user_id = auth::get_user_id_from_token(&*state.db, &username).await?;
|
||||
|
||||
// Store each block with a reference to the file
|
||||
for (i, chunk) in data
|
||||
.chunks(state.config.block_size.unwrap_or(BLOCK_SIZE))
|
||||
.enumerate()
|
||||
{
|
||||
let block_hash = Block::calculate_hash(chunk);
|
||||
|
||||
// TODO: parallel
|
||||
// Store each block in the storage with file hash and block index in metadata in DB
|
||||
match state
|
||||
.db
|
||||
.store_block(
|
||||
&block_hash,
|
||||
chunk.to_vec(),
|
||||
&file.file_hash,
|
||||
i as u64,
|
||||
user_id,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(_) => {
|
||||
log::debug!("Stored block {}", block_hash);
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Failed to store block: {}", err);
|
||||
return Err(ResponseError::InternalServerError);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log::info!(
|
||||
"Stored file metadata and blocks for file {}",
|
||||
file.file_hash
|
||||
);
|
||||
|
||||
// Return success response
|
||||
let response = FileUploadResponse {
|
||||
file_hash: file.file_hash,
|
||||
message: "File is uploaded successfully".to_string(),
|
||||
};
|
||||
|
||||
Ok((StatusCode::CREATED, ResponseResult::FileUploaded(response)))
|
||||
}
|
||||
|
||||
/// Request for file download with custom filename
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct FileDownloadRequest {
|
||||
/// The custom filename to use for download
|
||||
pub file_name: String,
|
||||
}
|
||||
|
||||
/// Retrieve a file by its hash from path, with optional custom filename in request body.
|
||||
/// The file will be reconstructed from its blocks.
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/v1/file/{hash}",
|
||||
tag = "File Management",
|
||||
request_body(content = FileDownloadRequest, description = "Optional custom filename for download", content_type = "application/json"),
|
||||
responses(
|
||||
(status = 200, description = "File found", body = [u8], content_type = "application/octet-stream"),
|
||||
(status = 404, description = "File not found", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
),
|
||||
params(
|
||||
("hash" = String, Path, description = "File hash")
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn get_file_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
axum::extract::Path(hash): axum::extract::Path<String>,
|
||||
request: Option<axum::extract::Json<FileDownloadRequest>>,
|
||||
) -> Result<impl IntoResponse, ResponseError> {
|
||||
// Get the file metadata using the hash
|
||||
let file = match state.db.get_file_by_hash(&hash).await {
|
||||
Ok(Some(file)) => file,
|
||||
Ok(None) => {
|
||||
return Err(ResponseError::NotFound(format!(
|
||||
"File with hash '{}' not found",
|
||||
hash
|
||||
)));
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Failed to retrieve file metadata: {}", err);
|
||||
return Err(ResponseError::InternalServerError);
|
||||
}
|
||||
};
|
||||
|
||||
// Set content disposition header with the custom filename from request if provided
|
||||
// Otherwise use the hash as the filename
|
||||
let filename = match request {
|
||||
Some(req) => req.0.file_name,
|
||||
None => format!("{}.bin", hash), // Default filename using hash
|
||||
};
|
||||
|
||||
let headers = [(
|
||||
axum::http::header::CONTENT_DISPOSITION,
|
||||
format!("attachment; filename=\"{}\"", filename),
|
||||
)];
|
||||
|
||||
// Return the file data
|
||||
Ok((
|
||||
StatusCode::OK,
|
||||
headers,
|
||||
axum::body::Bytes::from(file.file_content),
|
||||
))
|
||||
}
|
||||
595
components/rfs/src/server/handlers.rs
Normal file
595
components/rfs/src/server/handlers.rs
Normal file
@@ -0,0 +1,595 @@
|
||||
use anyhow::Error;
|
||||
use axum::{
|
||||
extract::{Path, State},
|
||||
response::IntoResponse,
|
||||
Extension, Json,
|
||||
};
|
||||
use axum_macros::debug_handler;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
fs,
|
||||
path::PathBuf,
|
||||
sync::{mpsc, Arc},
|
||||
};
|
||||
|
||||
use bollard::auth::DockerCredentials;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::docker;
|
||||
use crate::fungi;
|
||||
use crate::server::{
|
||||
auth::{SignInBody, SignInResponse, __path_sign_in_handler},
|
||||
config::{self, Job},
|
||||
db::DB,
|
||||
response::{DirListTemplate, DirLister, ErrorTemplate, TemplateErr},
|
||||
response::{FileInfo, ResponseError, ResponseResult, FlistStateResponse, HealthResponse, BlockUploadedResponse},
|
||||
serve_flists::visit_dir_one_level,
|
||||
};
|
||||
use crate::store;
|
||||
use utoipa::{OpenApi, ToSchema, Modify};
|
||||
use utoipa::openapi::security::{SecurityScheme, HttpAuthScheme, Http};
|
||||
use uuid::Uuid;
|
||||
use crate::server::block_handlers;
|
||||
use crate::server::file_handlers;
|
||||
use crate::server::serve_flists;
|
||||
use crate::server::website_handlers;
|
||||
|
||||
// Security scheme modifier for JWT Bearer authentication
|
||||
struct SecurityAddon;
|
||||
|
||||
impl Modify for SecurityAddon {
|
||||
fn modify(&self, openapi: &mut utoipa::openapi::OpenApi) {
|
||||
let components = openapi.components.as_mut().unwrap(); // Safe to unwrap since components are registered
|
||||
components.add_security_scheme(
|
||||
"bearerAuth",
|
||||
SecurityScheme::Http(Http::new(HttpAuthScheme::Bearer)),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(OpenApi)]
|
||||
#[openapi(
|
||||
paths(health_check_handler, create_flist_handler, get_flist_state_handler, preview_flist_handler, list_flists_handler, sign_in_handler, block_handlers::upload_block_handler, block_handlers::get_block_handler, block_handlers::check_block_handler, block_handlers::verify_blocks_handler, block_handlers::get_blocks_by_hash_handler, block_handlers::list_blocks_handler, block_handlers::get_block_downloads_handler, block_handlers::get_user_blocks_handler, file_handlers::upload_file_handler, file_handlers::get_file_handler, website_handlers::serve_website_handler, serve_flists::serve_flists),
|
||||
modifiers(&SecurityAddon),
|
||||
components(
|
||||
schemas(
|
||||
// Common schemas
|
||||
DirListTemplate, DirLister, ResponseError, ErrorTemplate, TemplateErr, ResponseResult, FileInfo, FlistStateResponse,
|
||||
// Response wrapper schemas
|
||||
HealthResponse, BlockUploadedResponse,
|
||||
// Authentication schemas
|
||||
SignInBody, SignInResponse,
|
||||
// Flist schemas
|
||||
FlistBody, Job, FlistState, FlistStateInfo, PreviewResponse,
|
||||
// Block schemas
|
||||
block_handlers::VerifyBlock, block_handlers::VerifyBlocksRequest, block_handlers::VerifyBlocksResponse,
|
||||
block_handlers::BlocksResponse, block_handlers::ListBlocksParams, block_handlers::ListBlocksResponse, block_handlers::BlockInfo,
|
||||
block_handlers::UserBlocksResponse, block_handlers::BlockDownloadsResponse, block_handlers::UploadBlockParams, block_handlers::UserBlockInfo,
|
||||
// File schemas
|
||||
file_handlers::FileUploadResponse, file_handlers::FileDownloadRequest
|
||||
)
|
||||
),
|
||||
tags(
|
||||
(name = "System", description = "System health and status"),
|
||||
(name = "Authentication", description = "Authentication endpoints"),
|
||||
(name = "Flist Management", description = "Flist creation and management"),
|
||||
(name = "Block Management", description = "Block storage and retrieval"),
|
||||
(name = "File Management", description = "File upload and download"),
|
||||
(name = "Website Serving", description = "Website content serving")
|
||||
)
|
||||
)]
|
||||
pub struct FlistApi;
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, Clone, ToSchema)]
|
||||
pub struct FlistBody {
|
||||
#[schema(example = "redis")]
|
||||
pub image_name: String,
|
||||
|
||||
pub username: Option<String>,
|
||||
pub password: Option<String>,
|
||||
pub auth: Option<String>,
|
||||
pub email: Option<String>,
|
||||
pub server_address: Option<String>,
|
||||
pub identity_token: Option<String>,
|
||||
pub registry_token: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, Clone, ToSchema)]
|
||||
pub struct PreviewResponse {
|
||||
pub content: Vec<String>,
|
||||
pub metadata: String,
|
||||
pub checksum: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, PartialEq, ToSchema)]
|
||||
pub enum FlistState {
|
||||
#[schema(title = "FlistStateAccepted")]
|
||||
Accepted(String),
|
||||
#[schema(title = "FlistStateStarted")]
|
||||
Started(String),
|
||||
#[schema(title = "FlistStateInProgress")]
|
||||
InProgress(FlistStateInfo),
|
||||
#[schema(title = "FlistStateCreated")]
|
||||
Created(String),
|
||||
#[schema(title = "FlistStateFailed")]
|
||||
Failed,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, PartialEq, ToSchema)]
|
||||
pub struct FlistStateInfo {
|
||||
msg: String,
|
||||
progress: f32,
|
||||
}
|
||||
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/v1",
|
||||
tag = "System",
|
||||
responses(
|
||||
(status = 200, description = "flist server is working", body = HealthResponse)
|
||||
)
|
||||
)]
|
||||
pub async fn health_check_handler() -> ResponseResult {
|
||||
ResponseResult::Health
|
||||
}
|
||||
|
||||
#[utoipa::path(
|
||||
post,
|
||||
path = "/api/v1/fl",
|
||||
tag = "Flist Management",
|
||||
request_body = FlistBody,
|
||||
responses(
|
||||
(status = 201, description = "Flist conversion started", body = Job),
|
||||
(status = 401, description = "Unauthorized user", body = ResponseError),
|
||||
(status = 403, description = "Forbidden", body = ResponseError),
|
||||
(status = 409, description = "Conflict", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
),
|
||||
security(
|
||||
("bearerAuth" = [])
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn create_flist_handler(
|
||||
State(state): State<Arc<config::AppState>>,
|
||||
Extension(username): Extension<String>,
|
||||
Json(body): Json<FlistBody>,
|
||||
) -> impl IntoResponse {
|
||||
let cfg = state.config.clone();
|
||||
let credentials = Some(DockerCredentials {
|
||||
username: body.username,
|
||||
password: body.password,
|
||||
auth: body.auth,
|
||||
email: body.email,
|
||||
serveraddress: body.server_address,
|
||||
identitytoken: body.identity_token,
|
||||
registrytoken: body.registry_token,
|
||||
});
|
||||
|
||||
let mut docker_image = body.image_name.to_string();
|
||||
if !docker_image.contains(':') {
|
||||
docker_image.push_str(":latest");
|
||||
}
|
||||
|
||||
let fl_name = docker_image.replace([':', '/'], "-") + ".fl";
|
||||
let username_dir = std::path::Path::new(&cfg.flist_dir).join(&username);
|
||||
let fl_path = username_dir.join(&fl_name);
|
||||
|
||||
if fl_path.exists() {
|
||||
return Err(ResponseError::Conflict("flist already exists".to_string()));
|
||||
}
|
||||
|
||||
if let Err(err) = fs::create_dir_all(&username_dir) {
|
||||
log::error!(
|
||||
"failed to create user flist directory `{:?}` with error {:?}",
|
||||
&username_dir,
|
||||
err
|
||||
);
|
||||
return Err(ResponseError::InternalServerError);
|
||||
}
|
||||
|
||||
let meta = match fungi::Writer::new(&fl_path, true).await {
|
||||
Ok(writer) => writer,
|
||||
Err(err) => {
|
||||
log::error!(
|
||||
"failed to create a new writer for flist `{:?}` with error {}",
|
||||
fl_path,
|
||||
err
|
||||
);
|
||||
return Err(ResponseError::InternalServerError);
|
||||
}
|
||||
};
|
||||
|
||||
let store = match store::parse_router(&cfg.store_url).await {
|
||||
Ok(s) => s,
|
||||
Err(err) => {
|
||||
log::error!("failed to parse router for store with error {}", err);
|
||||
return Err(ResponseError::InternalServerError);
|
||||
}
|
||||
};
|
||||
|
||||
// Create a new job id for the flist request
|
||||
let job: Job = Job {
|
||||
id: Uuid::new_v4().to_string(),
|
||||
};
|
||||
let current_job = job.clone();
|
||||
|
||||
state
|
||||
.jobs_state
|
||||
.lock()
|
||||
.expect("failed to lock state")
|
||||
.insert(
|
||||
job.id.clone(),
|
||||
FlistState::Accepted(format!("flist '{}' is accepted", &fl_name)),
|
||||
);
|
||||
|
||||
let flist_download_url = std::path::Path::new(&format!("{}:{}", cfg.host, cfg.port))
|
||||
.join(cfg.flist_dir)
|
||||
.join(username)
|
||||
.join(&fl_name);
|
||||
|
||||
tokio::spawn(async move {
|
||||
state
|
||||
.jobs_state
|
||||
.lock()
|
||||
.expect("failed to lock state")
|
||||
.insert(
|
||||
job.id.clone(),
|
||||
FlistState::Started(format!("flist '{}' is started", fl_name)),
|
||||
);
|
||||
|
||||
let container_name = Uuid::new_v4().to_string();
|
||||
let docker_tmp_dir =
|
||||
tempdir::TempDir::new(&container_name).expect("failed to create tmp dir for docker");
|
||||
|
||||
let (tx, rx) = mpsc::channel();
|
||||
let mut docker_to_fl =
|
||||
docker::DockerImageToFlist::new(meta, docker_image, credentials, docker_tmp_dir);
|
||||
|
||||
let res = docker_to_fl.prepare().await;
|
||||
if res.is_err() {
|
||||
let _ = tokio::fs::remove_file(&fl_path).await;
|
||||
state
|
||||
.jobs_state
|
||||
.lock()
|
||||
.expect("failed to lock state")
|
||||
.insert(job.id.clone(), FlistState::Failed);
|
||||
return;
|
||||
}
|
||||
|
||||
let files_count = docker_to_fl.files_count();
|
||||
let st = state.clone();
|
||||
let job_id = job.id.clone();
|
||||
let cloned_fl_path = fl_path.clone();
|
||||
tokio::spawn(async move {
|
||||
let mut progress: f32 = 0.0;
|
||||
|
||||
for _ in 0..files_count - 1 {
|
||||
let step = rx.recv().expect("failed to receive progress") as f32;
|
||||
progress += step;
|
||||
let progress_percentage = progress / files_count as f32 * 100.0;
|
||||
st.jobs_state.lock().expect("failed to lock state").insert(
|
||||
job_id.clone(),
|
||||
FlistState::InProgress(FlistStateInfo {
|
||||
msg: "flist is in progress".to_string(),
|
||||
progress: progress_percentage,
|
||||
}),
|
||||
);
|
||||
st.flists_progress
|
||||
.lock()
|
||||
.expect("failed to lock state")
|
||||
.insert(cloned_fl_path.clone(), progress_percentage);
|
||||
}
|
||||
});
|
||||
|
||||
let res = docker_to_fl.pack(store, Some(tx)).await;
|
||||
|
||||
// remove the file created with the writer if fl creation failed
|
||||
if res.is_err() {
|
||||
log::error!("failed creation failed with error {:?}", res.err());
|
||||
let _ = tokio::fs::remove_file(&fl_path).await;
|
||||
state
|
||||
.jobs_state
|
||||
.lock()
|
||||
.expect("failed to lock state")
|
||||
.insert(job.id.clone(), FlistState::Failed);
|
||||
return;
|
||||
}
|
||||
|
||||
state
|
||||
.jobs_state
|
||||
.lock()
|
||||
.expect("failed to lock state")
|
||||
.insert(
|
||||
job.id.clone(),
|
||||
FlistState::Created(format!(
|
||||
"flist {:?} is created successfully",
|
||||
flist_download_url
|
||||
)),
|
||||
);
|
||||
state
|
||||
.flists_progress
|
||||
.lock()
|
||||
.expect("failed to lock state")
|
||||
.insert(fl_path, 100.0);
|
||||
});
|
||||
|
||||
Ok(ResponseResult::FlistCreated(current_job))
|
||||
}
|
||||
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/v1/fl/{job_id}",
|
||||
tag = "Flist Management",
|
||||
responses(
|
||||
(status = 200, description = "Flist state", body = FlistStateResponse),
|
||||
(status = 404, description = "Flist not found", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
(status = 401, description = "Unauthorized user", body = ResponseError),
|
||||
(status = 403, description = "Forbidden", body = ResponseError),
|
||||
),
|
||||
params(
|
||||
("job_id" = String, Path, description = "flist job id")
|
||||
),
|
||||
security(
|
||||
("bearerAuth" = [])
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn get_flist_state_handler(
|
||||
Path(flist_job_id): Path<String>,
|
||||
State(state): State<Arc<config::AppState>>,
|
||||
) -> impl IntoResponse {
|
||||
if !&state
|
||||
.jobs_state
|
||||
.lock()
|
||||
.expect("failed to lock state")
|
||||
.contains_key(&flist_job_id.clone())
|
||||
{
|
||||
return Err(ResponseError::NotFound("flist doesn't exist".to_string()));
|
||||
}
|
||||
|
||||
let res_state = state
|
||||
.jobs_state
|
||||
.lock()
|
||||
.expect("failed to lock state")
|
||||
.get(&flist_job_id.clone())
|
||||
.expect("failed to get from state")
|
||||
.to_owned();
|
||||
|
||||
match res_state {
|
||||
FlistState::Accepted(_) => Ok(ResponseResult::FlistState(res_state)),
|
||||
FlistState::Started(_) => Ok(ResponseResult::FlistState(res_state)),
|
||||
FlistState::InProgress(_) => Ok(ResponseResult::FlistState(res_state)),
|
||||
FlistState::Created(_) => {
|
||||
state
|
||||
.jobs_state
|
||||
.lock()
|
||||
.expect("failed to lock state")
|
||||
.remove(&flist_job_id.clone());
|
||||
|
||||
Ok(ResponseResult::FlistState(res_state))
|
||||
}
|
||||
FlistState::Failed => {
|
||||
state
|
||||
.jobs_state
|
||||
.lock()
|
||||
.expect("failed to lock state")
|
||||
.remove(&flist_job_id.clone());
|
||||
|
||||
Err(ResponseError::InternalServerError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/v1/fl",
|
||||
tag = "Flist Management",
|
||||
responses(
|
||||
(status = 200, description = "Listing flists", body = HashMap<String, Vec<FileInfo>>),
|
||||
(status = 401, description = "Unauthorized user", body = ResponseError),
|
||||
(status = 403, description = "Forbidden", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn list_flists_handler(State(state): State<Arc<config::AppState>>) -> impl IntoResponse {
|
||||
let mut flists: HashMap<String, Vec<FileInfo>> = HashMap::new();
|
||||
|
||||
let rs: Result<Vec<FileInfo>, std::io::Error> =
|
||||
visit_dir_one_level(&state.config.flist_dir, &state).await;
|
||||
|
||||
let files = match rs {
|
||||
Ok(files) => files,
|
||||
Err(e) => {
|
||||
log::error!("failed to list flists directory with error: {}", e);
|
||||
return Err(ResponseError::InternalServerError);
|
||||
}
|
||||
};
|
||||
|
||||
for file in files {
|
||||
if !file.is_file {
|
||||
let flists_per_username = visit_dir_one_level(&file.path_uri, &state).await;
|
||||
match flists_per_username {
|
||||
Ok(files) => flists.insert(file.name, files),
|
||||
Err(e) => {
|
||||
log::error!("failed to list flists per username with error: {}", e);
|
||||
return Err(ResponseError::InternalServerError);
|
||||
}
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
Ok(ResponseResult::Flists(flists))
|
||||
}
|
||||
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/v1/fl/preview/{flist_path}",
|
||||
tag = "Flist Management",
|
||||
responses(
|
||||
(status = 200, description = "Flist preview result", body = PreviewResponse),
|
||||
(status = 400, description = "Bad request", body = ResponseError),
|
||||
(status = 401, description = "Unauthorized user", body = ResponseError),
|
||||
(status = 403, description = "Forbidden", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
),
|
||||
params(
|
||||
("flist_path" = String, Path, description = "flist file path")
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn preview_flist_handler(
|
||||
State(state): State<Arc<config::AppState>>,
|
||||
Path(flist_path): Path<String>,
|
||||
) -> impl IntoResponse {
|
||||
let fl_path = flist_path;
|
||||
|
||||
match validate_flist_path(&state, &fl_path).await {
|
||||
Ok(_) => (),
|
||||
Err(err) => return Err(ResponseError::BadRequest(err.to_string())),
|
||||
};
|
||||
|
||||
let content = match get_flist_content(&fl_path).await {
|
||||
Ok(paths) => paths,
|
||||
Err(_) => return Err(ResponseError::InternalServerError),
|
||||
};
|
||||
|
||||
let bytes = match std::fs::read(&fl_path) {
|
||||
Ok(b) => b,
|
||||
Err(err) => {
|
||||
log::error!(
|
||||
"failed to read flist '{}' into bytes with error {}",
|
||||
fl_path,
|
||||
err
|
||||
);
|
||||
return Err(ResponseError::InternalServerError);
|
||||
}
|
||||
};
|
||||
|
||||
// Convert PathBuf values to strings for OpenAPI compatibility
|
||||
let content_strings: Vec<String> = content
|
||||
.into_iter()
|
||||
.map(|path| path.to_string_lossy().to_string())
|
||||
.collect();
|
||||
|
||||
Ok(ResponseResult::PreviewFlist(PreviewResponse {
|
||||
content: content_strings,
|
||||
metadata: state.config.store_url.join("-"),
|
||||
checksum: sha256::digest(&bytes),
|
||||
}))
|
||||
}
|
||||
|
||||
async fn validate_flist_path(state: &Arc<config::AppState>, fl_path: &String) -> Result<(), Error> {
|
||||
// validate path starting with `/`
|
||||
if fl_path.starts_with("/") {
|
||||
anyhow::bail!("invalid flist path '{}', shouldn't start with '/'", fl_path);
|
||||
}
|
||||
|
||||
// path should include 3 parts [parent dir, username, flist file]
|
||||
let parts: Vec<_> = fl_path.split("/").collect();
|
||||
if parts.len() != 3 {
|
||||
anyhow::bail!(
|
||||
format!("invalid flist path '{}', should consist of 3 parts [parent directory, username and flist name", fl_path
|
||||
));
|
||||
}
|
||||
|
||||
// validate parent dir
|
||||
if parts[0] != state.config.flist_dir {
|
||||
anyhow::bail!(
|
||||
"invalid flist path '{}', parent directory should be '{}'",
|
||||
fl_path,
|
||||
state.config.flist_dir
|
||||
);
|
||||
}
|
||||
|
||||
// validate username
|
||||
match state.db.get_user_by_username(parts[1]).await {
|
||||
Some(_) => (),
|
||||
None => {
|
||||
anyhow::bail!(
|
||||
"invalid flist path '{}', username '{}' doesn't exist",
|
||||
fl_path,
|
||||
parts[1]
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
// validate flist extension
|
||||
let fl_name = parts[2].to_string();
|
||||
let ext = match std::path::Path::new(&fl_name).extension() {
|
||||
Some(ex) => ex.to_string_lossy().to_string(),
|
||||
None => "".to_string(),
|
||||
};
|
||||
|
||||
if ext != "fl" {
|
||||
anyhow::bail!(
|
||||
"invalid flist path '{}', invalid flist extension '{}' should be 'fl'",
|
||||
fl_path,
|
||||
ext
|
||||
);
|
||||
}
|
||||
|
||||
// validate flist existence
|
||||
if !std::path::Path::new(parts[0])
|
||||
.join(parts[1])
|
||||
.join(&fl_name)
|
||||
.exists()
|
||||
{
|
||||
anyhow::bail!("flist '{}' doesn't exist", fl_path);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_flist_content(fl_path: &String) -> Result<Vec<PathBuf>, Error> {
|
||||
let mut visitor = ReadVisitor::default();
|
||||
|
||||
let meta = match fungi::Reader::new(&fl_path).await {
|
||||
Ok(reader) => reader,
|
||||
Err(err) => {
|
||||
log::error!(
|
||||
"failed to initialize metadata database for flist `{}` with error {}",
|
||||
fl_path,
|
||||
err
|
||||
);
|
||||
anyhow::bail!("Internal server error");
|
||||
}
|
||||
};
|
||||
|
||||
match meta.walk(&mut visitor).await {
|
||||
Ok(()) => return Ok(visitor.into_inner()),
|
||||
Err(err) => {
|
||||
log::error!(
|
||||
"failed to walk through metadata for flist `{}` with error {}",
|
||||
fl_path,
|
||||
err
|
||||
);
|
||||
anyhow::bail!("Internal server error");
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct ReadVisitor {
|
||||
inner: Vec<PathBuf>,
|
||||
}
|
||||
|
||||
impl ReadVisitor {
|
||||
pub fn into_inner(self) -> Vec<PathBuf> {
|
||||
self.inner
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl fungi::meta::WalkVisitor for ReadVisitor {
|
||||
async fn visit(
|
||||
&mut self,
|
||||
path: &std::path::Path,
|
||||
_node: &fungi::meta::Inode,
|
||||
) -> fungi::meta::Result<fungi::meta::Walk> {
|
||||
self.inner.push(path.to_path_buf());
|
||||
Ok(fungi::meta::Walk::Continue)
|
||||
}
|
||||
}
|
||||
225
components/rfs/src/server/mod.rs
Normal file
225
components/rfs/src/server/mod.rs
Normal file
@@ -0,0 +1,225 @@
|
||||
mod auth;
|
||||
mod block_handlers;
|
||||
mod config;
|
||||
mod db;
|
||||
mod file_handlers;
|
||||
mod handlers;
|
||||
mod models;
|
||||
mod response;
|
||||
mod serve_flists;
|
||||
mod website_handlers;
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use axum::{
|
||||
error_handling::HandleErrorLayer,
|
||||
extract::{Path, State},
|
||||
http::StatusCode,
|
||||
middleware,
|
||||
response::IntoResponse,
|
||||
routing::{get, head, post},
|
||||
BoxError, Router,
|
||||
};
|
||||
use config::AppState;
|
||||
use hyper::{
|
||||
header::{ACCEPT, AUTHORIZATION, CONTENT_TYPE},
|
||||
Method,
|
||||
};
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
collections::HashMap,
|
||||
sync::{Arc, Mutex},
|
||||
time::Duration,
|
||||
};
|
||||
use tokio::signal;
|
||||
use tower::ServiceBuilder;
|
||||
use tower_http::cors::CorsLayer;
|
||||
use tower_http::{cors::Any, trace::TraceLayer};
|
||||
|
||||
use utoipa::OpenApi;
|
||||
use utoipa_swagger_ui::SwaggerUi;
|
||||
// Using only the main FlistApi for OpenAPI documentation
|
||||
|
||||
pub async fn app(config_path: &str) -> Result<()> {
|
||||
let config = config::parse_config(config_path)
|
||||
.await
|
||||
.context("failed to parse config file")?;
|
||||
|
||||
// Initialize the database based on configuration
|
||||
let db: Arc<db::DBType> = if let Some(sqlite_path) = &config.sqlite_path {
|
||||
log::info!("Using SQLite database at: {}", sqlite_path);
|
||||
Arc::new(db::DBType::SqlDB(
|
||||
db::sqlite::SqlDB::new(sqlite_path, &config.storage_dir, &config.users.clone()).await,
|
||||
))
|
||||
} else {
|
||||
log::info!("Using in-memory MapDB database");
|
||||
Arc::new(db::DBType::MapDB(db::map::MapDB::new(
|
||||
&config.users.clone(),
|
||||
)))
|
||||
};
|
||||
|
||||
let app_state = Arc::new(config::AppState {
|
||||
jobs_state: Mutex::new(HashMap::new()),
|
||||
flists_progress: Mutex::new(HashMap::new()),
|
||||
db,
|
||||
config,
|
||||
});
|
||||
|
||||
let cors = CorsLayer::new()
|
||||
.allow_origin(Any)
|
||||
.allow_methods([Method::GET, Method::POST])
|
||||
.allow_headers([AUTHORIZATION, ACCEPT, CONTENT_TYPE]);
|
||||
|
||||
let v1_routes = Router::new()
|
||||
.route("/api/v1", get(handlers::health_check_handler))
|
||||
.route("/api/v1/signin", post(auth::sign_in_handler))
|
||||
.route(
|
||||
"/api/v1/fl",
|
||||
post(handlers::create_flist_handler).layer(middleware::from_fn_with_state(
|
||||
app_state.clone(),
|
||||
auth::authorize,
|
||||
)),
|
||||
)
|
||||
.route(
|
||||
"/api/v1/fl/:job_id",
|
||||
get(handlers::get_flist_state_handler).layer(middleware::from_fn_with_state(
|
||||
app_state.clone(),
|
||||
auth::authorize,
|
||||
)),
|
||||
)
|
||||
.route(
|
||||
"/api/v1/fl/preview/:flist_path",
|
||||
get(handlers::preview_flist_handler),
|
||||
)
|
||||
.route("/api/v1/fl", get(handlers::list_flists_handler))
|
||||
.route(
|
||||
"/api/v1/block",
|
||||
post(block_handlers::upload_block_handler).layer(middleware::from_fn_with_state(
|
||||
app_state.clone(),
|
||||
auth::authorize,
|
||||
)),
|
||||
)
|
||||
.route(
|
||||
"/api/v1/block/:hash",
|
||||
get(block_handlers::get_block_handler),
|
||||
)
|
||||
.route(
|
||||
"/api/v1/block/:hash",
|
||||
head(block_handlers::check_block_handler),
|
||||
)
|
||||
.route(
|
||||
"/api/v1/block/verify",
|
||||
post(block_handlers::verify_blocks_handler),
|
||||
)
|
||||
.route(
|
||||
"/api/v1/blocks/:hash",
|
||||
get(block_handlers::get_blocks_by_hash_handler),
|
||||
)
|
||||
.route("/api/v1/blocks", get(block_handlers::list_blocks_handler))
|
||||
.route(
|
||||
"/api/v1/block/:hash/downloads",
|
||||
get(block_handlers::get_block_downloads_handler),
|
||||
)
|
||||
.route(
|
||||
"/api/v1/user/blocks",
|
||||
get(block_handlers::get_user_blocks_handler).layer(middleware::from_fn_with_state(
|
||||
app_state.clone(),
|
||||
auth::authorize,
|
||||
)),
|
||||
)
|
||||
.route(
|
||||
"/api/v1/file",
|
||||
post(file_handlers::upload_file_handler).layer(middleware::from_fn_with_state(
|
||||
app_state.clone(),
|
||||
auth::authorize,
|
||||
)),
|
||||
)
|
||||
.route("/api/v1/file/:hash", get(file_handlers::get_file_handler))
|
||||
.route(
|
||||
"/website/:website_hash/*path",
|
||||
get(website_handlers::serve_website_handler),
|
||||
)
|
||||
.route(
|
||||
"/website/:website_hash/",
|
||||
get(
|
||||
|state: State<Arc<AppState>>, path: Path<String>| async move {
|
||||
website_handlers::serve_website_handler(state, Path((path.0, "".to_string())))
|
||||
.await
|
||||
},
|
||||
),
|
||||
)
|
||||
.route("/*path", get(serve_flists::serve_flists));
|
||||
|
||||
let app = Router::new()
|
||||
.merge(
|
||||
SwaggerUi::new("/swagger-ui")
|
||||
.url("/api-docs/openapi.json", handlers::FlistApi::openapi()),
|
||||
)
|
||||
.merge(v1_routes)
|
||||
.layer(
|
||||
ServiceBuilder::new()
|
||||
.layer(HandleErrorLayer::new(handle_error))
|
||||
.load_shed()
|
||||
.concurrency_limit(1024)
|
||||
.timeout(Duration::from_secs(10))
|
||||
.layer(TraceLayer::new_for_http()),
|
||||
)
|
||||
.with_state(Arc::clone(&app_state))
|
||||
.layer(cors);
|
||||
|
||||
let address = format!("{}:{}", app_state.config.host, app_state.config.port);
|
||||
let listener = tokio::net::TcpListener::bind(address)
|
||||
.await
|
||||
.context("failed to bind address")?;
|
||||
|
||||
log::info!(
|
||||
"🚀 Server started successfully at {}:{}",
|
||||
app_state.config.host,
|
||||
app_state.config.port
|
||||
);
|
||||
|
||||
axum::serve(listener, app)
|
||||
.with_graceful_shutdown(shutdown_signal())
|
||||
.await
|
||||
.context("failed to serve listener")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn shutdown_signal() {
|
||||
let ctrl_c = async {
|
||||
signal::ctrl_c()
|
||||
.await
|
||||
.expect("failed to install Ctrl+C handler");
|
||||
};
|
||||
|
||||
#[cfg(unix)]
|
||||
let terminate = async {
|
||||
signal::unix::signal(signal::unix::SignalKind::terminate())
|
||||
.expect("failed to install signal handler")
|
||||
.recv()
|
||||
.await;
|
||||
};
|
||||
|
||||
tokio::select! {
|
||||
_ = ctrl_c => {},
|
||||
_ = terminate => {},
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_error(error: BoxError) -> impl IntoResponse {
|
||||
if error.is::<tower::timeout::error::Elapsed>() {
|
||||
return (StatusCode::REQUEST_TIMEOUT, Cow::from("request timed out"));
|
||||
}
|
||||
|
||||
if error.is::<tower::load_shed::error::Overloaded>() {
|
||||
return (
|
||||
StatusCode::SERVICE_UNAVAILABLE,
|
||||
Cow::from("service is overloaded, try again later"),
|
||||
);
|
||||
}
|
||||
|
||||
(
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
Cow::from(format!("Unhandled internal error: {}", error)),
|
||||
)
|
||||
}
|
||||
18
components/rfs/src/server/models/block.rs
Normal file
18
components/rfs/src/server/models/block.rs
Normal file
@@ -0,0 +1,18 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use utoipa::ToSchema;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, ToSchema)]
|
||||
pub struct Block {
|
||||
pub index: u64, // The index of the block in the file
|
||||
pub hash: String, // The hash of the block's content
|
||||
pub data: Vec<u8>, // The actual data of the block
|
||||
pub size: usize, // The size of the block's data
|
||||
}
|
||||
|
||||
impl Block {
|
||||
/// Calculates the hash of the block's data using SHA-256.
|
||||
pub fn calculate_hash(data: &[u8]) -> String {
|
||||
let hash = blake2b_simd::Params::new().hash_length(32).hash(data);
|
||||
hex::encode(hash.as_bytes())
|
||||
}
|
||||
}
|
||||
28
components/rfs/src/server/models/file.rs
Normal file
28
components/rfs/src/server/models/file.rs
Normal file
@@ -0,0 +1,28 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::{Digest, Sha256};
|
||||
use sqlx::FromRow;
|
||||
use utoipa::ToSchema;
|
||||
|
||||
#[derive(Debug, Clone, FromRow, Serialize, Deserialize, ToSchema)]
|
||||
pub struct File {
|
||||
pub file_hash: String, // Hash of the file content
|
||||
pub file_content: Vec<u8>, // Content of the file
|
||||
}
|
||||
|
||||
impl File {
|
||||
/// Calculates the hash of the block's data using SHA-256.
|
||||
pub fn calculate_hash(data: &[u8]) -> String {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(data);
|
||||
format!("{:x}", hasher.finalize())
|
||||
}
|
||||
|
||||
/// Creates a new File instance by calculating the hash of the content.
|
||||
pub fn new(file_content: Vec<u8>) -> Self {
|
||||
let file_hash = Self::calculate_hash(&file_content);
|
||||
Self {
|
||||
file_hash,
|
||||
file_content,
|
||||
}
|
||||
}
|
||||
}
|
||||
7
components/rfs/src/server/models/mod.rs
Normal file
7
components/rfs/src/server/models/mod.rs
Normal file
@@ -0,0 +1,7 @@
|
||||
pub mod block;
|
||||
pub mod file;
|
||||
pub mod user;
|
||||
|
||||
pub use block::Block;
|
||||
pub use file::File;
|
||||
pub use user::User;
|
||||
9
components/rfs/src/server/models/user.rs
Normal file
9
components/rfs/src/server/models/user.rs
Normal file
@@ -0,0 +1,9 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::FromRow;
|
||||
|
||||
#[derive(Debug, Clone, FromRow, Serialize, Deserialize)]
|
||||
pub struct User {
|
||||
pub id: Option<i64>,
|
||||
pub username: String,
|
||||
pub password: String,
|
||||
}
|
||||
232
components/rfs/src/server/response.rs
Normal file
232
components/rfs/src/server/response.rs
Normal file
@@ -0,0 +1,232 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use askama::Template;
|
||||
use axum::{
|
||||
body::Body,
|
||||
http::StatusCode,
|
||||
response::{Html, IntoResponse, Response},
|
||||
Json,
|
||||
};
|
||||
use serde::Serialize;
|
||||
use utoipa::ToSchema;
|
||||
|
||||
use crate::server::{
|
||||
auth::SignInResponse,
|
||||
config::Job,
|
||||
file_handlers::FileUploadResponse,
|
||||
handlers::{FlistState, PreviewResponse},
|
||||
};
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub enum ResponseError {
|
||||
#[schema(title = "ResponseErrorInternalServerError")]
|
||||
InternalServerError,
|
||||
#[schema(title = "ResponseErrorConflict")]
|
||||
Conflict(String),
|
||||
#[schema(title = "ResponseErrorNotFound")]
|
||||
NotFound(String),
|
||||
#[schema(title = "ResponseErrorUnauthorized")]
|
||||
Unauthorized(String),
|
||||
#[schema(title = "ResponseErrorBadRequest")]
|
||||
BadRequest(String),
|
||||
#[schema(title = "ResponseErrorForbidden")]
|
||||
Forbidden(String),
|
||||
#[schema(title = "ResponseErrorTemplateError")]
|
||||
TemplateError(ErrorTemplate),
|
||||
}
|
||||
|
||||
impl IntoResponse for ResponseError {
|
||||
fn into_response(self) -> Response<Body> {
|
||||
match self {
|
||||
ResponseError::InternalServerError => {
|
||||
(StatusCode::INTERNAL_SERVER_ERROR, "Internal server error").into_response()
|
||||
}
|
||||
ResponseError::Conflict(msg) => (StatusCode::CONFLICT, msg).into_response(),
|
||||
ResponseError::NotFound(msg) => (StatusCode::NOT_FOUND, msg).into_response(),
|
||||
ResponseError::Unauthorized(msg) => (StatusCode::UNAUTHORIZED, msg).into_response(),
|
||||
ResponseError::BadRequest(msg) => (StatusCode::BAD_REQUEST, msg).into_response(),
|
||||
ResponseError::Forbidden(msg) => (StatusCode::FORBIDDEN, msg).into_response(),
|
||||
ResponseError::TemplateError(t) => match t.render() {
|
||||
Ok(html) => {
|
||||
let mut resp = Html(html).into_response();
|
||||
match t.err {
|
||||
TemplateErr::NotFound(reason) => {
|
||||
*resp.status_mut() = StatusCode::NOT_FOUND;
|
||||
resp.headers_mut()
|
||||
.insert(FAIL_REASON_HEADER_NAME, reason.parse().unwrap());
|
||||
}
|
||||
TemplateErr::BadRequest(reason) => {
|
||||
*resp.status_mut() = StatusCode::BAD_REQUEST;
|
||||
resp.headers_mut()
|
||||
.insert(FAIL_REASON_HEADER_NAME, reason.parse().unwrap());
|
||||
}
|
||||
TemplateErr::InternalServerError(reason) => {
|
||||
*resp.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
|
||||
resp.headers_mut()
|
||||
.insert(FAIL_REASON_HEADER_NAME, reason.parse().unwrap());
|
||||
}
|
||||
}
|
||||
resp
|
||||
}
|
||||
Err(err) => {
|
||||
tracing::error!("template render failed, err={}", err);
|
||||
(
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
format!("Failed to render template. Error: {}", err),
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapper structs for OpenAPI documentation to match the actual JSON response format
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct FlistStateResponse {
|
||||
pub flist_state: FlistState,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct HealthResponse {
|
||||
pub msg: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct BlockUploadedResponse {
|
||||
pub hash: String,
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
|
||||
#[derive(ToSchema)]
|
||||
pub enum ResponseResult {
|
||||
#[schema(title = "ResponseResultHealth")]
|
||||
Health,
|
||||
#[schema(title = "ResponseResultFlistCreated")]
|
||||
FlistCreated(Job),
|
||||
#[schema(title = "ResponseResultFlistState")]
|
||||
FlistState(FlistState),
|
||||
#[schema(title = "ResponseResultFlists")]
|
||||
Flists(HashMap<String, Vec<FileInfo>>),
|
||||
#[schema(title = "ResponseResultPreviewFlist")]
|
||||
PreviewFlist(PreviewResponse),
|
||||
#[schema(title = "ResponseResultSignedIn")]
|
||||
SignedIn(SignInResponse),
|
||||
#[schema(title = "ResponseResultDirTemplate")]
|
||||
DirTemplate(DirListTemplate),
|
||||
#[schema(title = "ResponseResultBlockUploaded")]
|
||||
BlockUploaded(String),
|
||||
#[schema(title = "ResponseResultFileUploaded")]
|
||||
FileUploaded(FileUploadResponse),
|
||||
#[schema(value_type = String, title = "ResponseResultRes", format = "binary")]
|
||||
Res(hyper::Response<tower_http::services::fs::ServeFileSystemResponseBody>),
|
||||
}
|
||||
|
||||
impl IntoResponse for ResponseResult {
|
||||
fn into_response(self) -> Response<Body> {
|
||||
match self {
|
||||
ResponseResult::Health => (
|
||||
StatusCode::OK,
|
||||
Json(HealthResponse {
|
||||
msg: "flist server is working".to_string(),
|
||||
}),
|
||||
)
|
||||
.into_response(),
|
||||
ResponseResult::SignedIn(token) => (StatusCode::CREATED, Json(token)).into_response(),
|
||||
ResponseResult::FlistCreated(job) => (StatusCode::CREATED, Json(job)).into_response(),
|
||||
ResponseResult::FlistState(flist_state) => (
|
||||
StatusCode::OK,
|
||||
Json(serde_json::json!({
|
||||
"flist_state": flist_state
|
||||
})),
|
||||
)
|
||||
.into_response(),
|
||||
ResponseResult::Flists(flists) => (StatusCode::OK, Json(flists)).into_response(),
|
||||
ResponseResult::PreviewFlist(content) => {
|
||||
(StatusCode::OK, Json(content)).into_response()
|
||||
}
|
||||
ResponseResult::BlockUploaded(hash) => (
|
||||
StatusCode::OK,
|
||||
Json(BlockUploadedResponse {
|
||||
hash,
|
||||
message: "Block processed successfully".to_string(),
|
||||
}),
|
||||
)
|
||||
.into_response(),
|
||||
ResponseResult::FileUploaded(response) => {
|
||||
(StatusCode::CREATED, Json(response)).into_response()
|
||||
}
|
||||
ResponseResult::DirTemplate(t) => match t.render() {
|
||||
Ok(html) => Html(html).into_response(),
|
||||
Err(err) => {
|
||||
tracing::error!("template render failed, err={}", err);
|
||||
(
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
format!("Failed to render template. Error: {}", err),
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
},
|
||||
ResponseResult::Res(res) => res.map(axum::body::Body::new),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//////// TEMPLATES ////////
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct FileInfo {
|
||||
pub name: String,
|
||||
pub path_uri: String,
|
||||
pub is_file: bool,
|
||||
pub size: u64,
|
||||
pub last_modified: i64,
|
||||
pub progress: f32,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct DirLister {
|
||||
pub files: Vec<FileInfo>,
|
||||
}
|
||||
|
||||
#[derive(Template, Serialize, ToSchema)]
|
||||
#[template(path = "index.html")]
|
||||
pub struct DirListTemplate {
|
||||
pub lister: DirLister,
|
||||
pub cur_path: String,
|
||||
}
|
||||
|
||||
mod filters {
|
||||
pub(crate) fn datetime(ts: &i64) -> ::askama::Result<String> {
|
||||
if let Ok(format) =
|
||||
time::format_description::parse("[year]-[month]-[day] [hour]:[minute]:[second] UTC")
|
||||
{
|
||||
return Ok(time::OffsetDateTime::from_unix_timestamp(*ts)
|
||||
.unwrap()
|
||||
.format(&format)
|
||||
.unwrap());
|
||||
}
|
||||
Err(askama::Error::Fmt(std::fmt::Error))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Template, Serialize, ToSchema)]
|
||||
#[template(path = "error.html")]
|
||||
pub struct ErrorTemplate {
|
||||
pub err: TemplateErr,
|
||||
pub cur_path: String,
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
const FAIL_REASON_HEADER_NAME: &str = "fl-server-fail-reason";
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub enum TemplateErr {
|
||||
#[schema(title = "TemplateErrBadRequest")]
|
||||
BadRequest(String),
|
||||
#[schema(title = "TemplateErrNotFound")]
|
||||
NotFound(String),
|
||||
#[schema(title = "TemplateErrInternalServerError")]
|
||||
InternalServerError(String),
|
||||
}
|
||||
165
components/rfs/src/server/serve_flists.rs
Normal file
165
components/rfs/src/server/serve_flists.rs
Normal file
@@ -0,0 +1,165 @@
|
||||
use axum::extract::State;
|
||||
use std::{io::Error, path::PathBuf, sync::Arc};
|
||||
use tokio::io;
|
||||
use tower::util::ServiceExt;
|
||||
use tower_http::services::ServeDir;
|
||||
|
||||
use axum::{
|
||||
body::Body,
|
||||
http::{Request, StatusCode},
|
||||
response::IntoResponse,
|
||||
};
|
||||
use axum_macros::debug_handler;
|
||||
use percent_encoding::percent_decode;
|
||||
|
||||
use crate::server::{
|
||||
config,
|
||||
response::{
|
||||
DirListTemplate, DirLister, ErrorTemplate, FileInfo, ResponseError, ResponseResult,
|
||||
TemplateErr,
|
||||
},
|
||||
};
|
||||
|
||||
#[debug_handler]
|
||||
/// Serve flist files from the server's filesystem
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/{path}",
|
||||
tag = "Flist Management",
|
||||
params(
|
||||
("path" = String, Path, description = "Path to the flist file or directory to serve")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "Successfully served the flist or directory listing", body = ResponseResult),
|
||||
(status = 404, description = "Flist not found", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError)
|
||||
)
|
||||
)]
|
||||
pub async fn serve_flists(
|
||||
State(state): State<Arc<config::AppState>>,
|
||||
req: Request<Body>,
|
||||
) -> impl IntoResponse {
|
||||
let path = req.uri().path().to_string();
|
||||
|
||||
match ServeDir::new("").oneshot(req).await {
|
||||
Ok(res) => {
|
||||
let status = res.status();
|
||||
match status {
|
||||
StatusCode::NOT_FOUND => {
|
||||
let full_path = match validate_path(&path) {
|
||||
Ok(p) => p,
|
||||
Err(_) => {
|
||||
return Err(ResponseError::TemplateError(ErrorTemplate {
|
||||
err: TemplateErr::BadRequest("invalid path".to_string()),
|
||||
cur_path: path.to_string(),
|
||||
message: "invalid path".to_owned(),
|
||||
}));
|
||||
}
|
||||
};
|
||||
|
||||
let cur_path = std::path::Path::new(&full_path);
|
||||
|
||||
match cur_path.is_dir() {
|
||||
true => {
|
||||
let rs = visit_dir_one_level(&full_path, &state).await;
|
||||
match rs {
|
||||
Ok(files) => Ok(ResponseResult::DirTemplate(DirListTemplate {
|
||||
lister: DirLister { files },
|
||||
cur_path: path.to_string(),
|
||||
})),
|
||||
Err(e) => Err(ResponseError::TemplateError(ErrorTemplate {
|
||||
err: TemplateErr::InternalServerError(e.to_string()),
|
||||
cur_path: path.to_string(),
|
||||
message: e.to_string(),
|
||||
})),
|
||||
}
|
||||
}
|
||||
false => Err(ResponseError::TemplateError(ErrorTemplate {
|
||||
err: TemplateErr::NotFound("file not found".to_string()),
|
||||
cur_path: path.to_string(),
|
||||
message: "file not found".to_owned(),
|
||||
})),
|
||||
}
|
||||
}
|
||||
_ => Ok(ResponseResult::Res(res)),
|
||||
}
|
||||
}
|
||||
Err(err) => Err(ResponseError::TemplateError(ErrorTemplate {
|
||||
err: TemplateErr::InternalServerError(format!("Unhandled error: {}", err)),
|
||||
cur_path: path.to_string(),
|
||||
message: format!("Unhandled error: {}", err),
|
||||
})),
|
||||
}
|
||||
}
|
||||
|
||||
fn validate_path(path: &str) -> io::Result<PathBuf> {
|
||||
let path = path.trim_start_matches('/');
|
||||
let path = percent_decode(path.as_ref()).decode_utf8_lossy();
|
||||
|
||||
let mut full_path = PathBuf::new();
|
||||
|
||||
// validate
|
||||
for seg in path.split('/') {
|
||||
if seg.starts_with("..") || seg.contains('\\') {
|
||||
return Err(Error::other("invalid path"));
|
||||
}
|
||||
full_path.push(seg);
|
||||
}
|
||||
|
||||
Ok(full_path)
|
||||
}
|
||||
|
||||
pub async fn visit_dir_one_level<P: AsRef<std::path::Path>>(
|
||||
path: P,
|
||||
state: &Arc<config::AppState>,
|
||||
) -> io::Result<Vec<FileInfo>> {
|
||||
let path = path.as_ref();
|
||||
let mut dir = tokio::fs::read_dir(path).await?;
|
||||
let mut files: Vec<FileInfo> = Vec::new();
|
||||
|
||||
while let Some(child) = dir.next_entry().await? {
|
||||
let path_uri = child.path().to_string_lossy().to_string();
|
||||
let is_file = child.file_type().await?.is_file();
|
||||
let name = child.file_name().to_string_lossy().to_string();
|
||||
|
||||
let mut progress = 0.0;
|
||||
if is_file {
|
||||
match state
|
||||
.flists_progress
|
||||
.lock()
|
||||
.expect("failed to lock state")
|
||||
.get(&path.join(&name).to_path_buf())
|
||||
{
|
||||
Some(p) => progress = *p,
|
||||
None => progress = 100.0,
|
||||
}
|
||||
|
||||
let ext = child
|
||||
.path()
|
||||
.extension()
|
||||
.expect("failed to get path extension")
|
||||
.to_string_lossy()
|
||||
.to_string();
|
||||
if ext != "fl" {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
files.push(FileInfo {
|
||||
name,
|
||||
path_uri,
|
||||
is_file,
|
||||
size: child.metadata().await?.len(),
|
||||
last_modified: child
|
||||
.metadata()
|
||||
.await?
|
||||
.modified()?
|
||||
.duration_since(std::time::SystemTime::UNIX_EPOCH)
|
||||
.expect("failed to get duration")
|
||||
.as_secs() as i64,
|
||||
progress,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(files)
|
||||
}
|
||||
197
components/rfs/src/server/website_handlers.rs
Normal file
197
components/rfs/src/server/website_handlers.rs
Normal file
@@ -0,0 +1,197 @@
|
||||
use crate::fungi::{meta, Reader};
|
||||
use aes_gcm::{
|
||||
aead::{Aead, KeyInit},
|
||||
Aes256Gcm, Nonce,
|
||||
};
|
||||
use anyhow::{Context, Result};
|
||||
use axum::{
|
||||
extract::{Path, State},
|
||||
http::StatusCode,
|
||||
response::IntoResponse,
|
||||
};
|
||||
use axum_macros::debug_handler;
|
||||
use mime_guess::from_path;
|
||||
use std::fs;
|
||||
use std::sync::Arc;
|
||||
use tempfile::NamedTempFile;
|
||||
// OpenApi is now only used in the main handlers.rs file
|
||||
|
||||
use crate::server::{config::AppState, db::DB, response::ResponseError};
|
||||
|
||||
// Website API endpoints are included in the main FlistApi in handlers.rs
|
||||
|
||||
/// Resolves a file path within a flist database to get file information
|
||||
async fn get_file_from_flist(flist_content: &[u8], file_path: &str) -> Result<Vec<meta::Block>> {
|
||||
// Create a temporary file
|
||||
let temp_file = NamedTempFile::new().context("failed to create temporary file")?;
|
||||
|
||||
// Write flist content to the temporary file
|
||||
fs::write(temp_file.path(), flist_content)
|
||||
.context("failed to write flist content to temporary file")?;
|
||||
|
||||
// Open the flist file as a database using the existing Reader
|
||||
let reader = Reader::new(temp_file.path().to_str().unwrap())
|
||||
.await
|
||||
.context("failed to open flist as a database")?;
|
||||
|
||||
// Find the root inode
|
||||
let root_inode: u64 = reader
|
||||
.root_inode()
|
||||
.await
|
||||
.context("failed to find root inode")?
|
||||
.ino;
|
||||
|
||||
// Split the path and traverse
|
||||
let mut current_inode = root_inode;
|
||||
let path_components: Vec<&str> = file_path.split('/').collect();
|
||||
|
||||
for (i, component) in path_components.iter().enumerate() {
|
||||
if component.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// If this is the last component, get file info
|
||||
if i == path_components.len() - 1 {
|
||||
let file_inode = match reader.lookup(current_inode, component).await {
|
||||
Ok(inode) => match inode {
|
||||
Some(inode) => inode.ino,
|
||||
None => {
|
||||
anyhow::bail!("file not found");
|
||||
}
|
||||
},
|
||||
Err(err) => return Err(anyhow::Error::new(err).context("failed to lookup inode")),
|
||||
};
|
||||
|
||||
// Get blocks
|
||||
let blocks: Vec<meta::Block> = reader
|
||||
.blocks(file_inode)
|
||||
.await
|
||||
.context("failed to get blocks")?;
|
||||
|
||||
return Ok(blocks);
|
||||
}
|
||||
|
||||
// Find the next inode in the path
|
||||
current_inode = match reader.lookup(current_inode, component).await {
|
||||
Ok(inode) => match inode {
|
||||
Some(inode) => inode.ino,
|
||||
None => {
|
||||
anyhow::bail!("directory not found");
|
||||
}
|
||||
},
|
||||
Err(err) => return Err(anyhow::Error::new(err).context("failed to lookup inode")),
|
||||
};
|
||||
}
|
||||
|
||||
anyhow::bail!("file not found")
|
||||
}
|
||||
|
||||
async fn decrypt_block(state: &Arc<AppState>, block: &meta::Block) -> Result<Vec<u8>> {
|
||||
let encrypted = match state.db.get_block(&hex::encode(block.id)).await {
|
||||
Ok(Some(block_content)) => block_content,
|
||||
Ok(None) => {
|
||||
anyhow::bail!("Block {:?} not found", block.id);
|
||||
}
|
||||
Err(err) => {
|
||||
anyhow::bail!("Failed to get block {:?}: {}", block.id, err);
|
||||
}
|
||||
};
|
||||
|
||||
let cipher =
|
||||
Aes256Gcm::new_from_slice(&block.key).map_err(|_| anyhow::anyhow!("key is invalid"))?;
|
||||
let nonce = Nonce::from_slice(&block.key[..12]);
|
||||
|
||||
let compressed = cipher
|
||||
.decrypt(nonce, encrypted.as_slice())
|
||||
.map_err(|_| anyhow::anyhow!("encryption error"))?;
|
||||
|
||||
let mut decoder = snap::raw::Decoder::new();
|
||||
let plain = decoder.decompress_vec(&compressed)?;
|
||||
|
||||
Ok(plain)
|
||||
}
|
||||
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/v1/website/{website_hash}/{path}",
|
||||
tag = "Website Serving",
|
||||
responses(
|
||||
(status = 200, description = "Website file served successfully", content_type = "application/octet-stream", body = [u8]),
|
||||
(status = 404, description = "File not found", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
),
|
||||
params(
|
||||
("website_hash" = String, Path, description = "flist hash of the website directory"),
|
||||
("path" = String, Path, description = "Path to the file within the website directory, defaults to index.html if empty")
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn serve_website_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path((website_hash, path)): Path<(String, String)>,
|
||||
) -> impl IntoResponse {
|
||||
// If no path is provided, default to index.html
|
||||
let file_path = if path.is_empty() {
|
||||
"index.html".to_string()
|
||||
} else {
|
||||
path
|
||||
};
|
||||
|
||||
// Get the flist using the website hash
|
||||
let flist = match state.db.get_file_by_hash(&website_hash).await {
|
||||
Ok(Some(file)) => file,
|
||||
Ok(None) => {
|
||||
return Err(ResponseError::NotFound(format!(
|
||||
"Flist with hash '{}' not found",
|
||||
website_hash
|
||||
)));
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Failed to retrieve flist metadata: {}", err);
|
||||
return Err(ResponseError::InternalServerError);
|
||||
}
|
||||
};
|
||||
|
||||
// Resolve the file information from the flist content
|
||||
let file_blocks = match get_file_from_flist(&flist.file_content, &file_path).await {
|
||||
Ok(blocks) => blocks,
|
||||
Err(err) => {
|
||||
log::error!(
|
||||
"Failed to resolve file '{}' from flist '{}': {}",
|
||||
file_path,
|
||||
website_hash,
|
||||
err
|
||||
);
|
||||
return Err(ResponseError::NotFound(format!(
|
||||
"File {} not found in flist {}",
|
||||
file_path, website_hash
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
let mut file_content = Vec::new();
|
||||
for block in file_blocks {
|
||||
match decrypt_block(&state, &block).await {
|
||||
Ok(block_content) => file_content.extend(block_content),
|
||||
Err(err) => {
|
||||
log::error!(
|
||||
"Failed to decrypt block {:?} for file '{}' in website '{}': {}",
|
||||
block.id,
|
||||
file_path,
|
||||
website_hash,
|
||||
err
|
||||
);
|
||||
return Err(ResponseError::InternalServerError);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mime_type = from_path(&file_path).first_or_octet_stream();
|
||||
|
||||
Ok((
|
||||
StatusCode::OK,
|
||||
[(axum::http::header::CONTENT_TYPE, mime_type.to_string())],
|
||||
file_content,
|
||||
)
|
||||
.into_response())
|
||||
}
|
||||
380
components/rfs/src/server_api.rs
Normal file
380
components/rfs/src/server_api.rs
Normal file
@@ -0,0 +1,380 @@
|
||||
use anyhow::{Context, Result};
|
||||
use reqwest::Client;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct VerifyBlock {
|
||||
/// Block hash to verify
|
||||
pub block_hash: String,
|
||||
/// File hash associated with the block
|
||||
pub file_hash: String,
|
||||
/// Block index within the file
|
||||
pub block_index: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct VerifyBlocksRequest {
|
||||
blocks: Vec<VerifyBlock>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct VerifyBlocksResponse {
|
||||
missing: Vec<String>,
|
||||
}
|
||||
|
||||
/// Response structure for the blocks endpoint
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct BlocksResponse {
|
||||
blocks: Vec<(String, u64)>,
|
||||
}
|
||||
|
||||
/// Response for listing blocks
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ListBlocksResponse {
|
||||
pub blocks: Vec<String>,
|
||||
pub total: u64,
|
||||
pub page: u32,
|
||||
pub per_page: u32,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct SigninResponse {
|
||||
pub access_token: String,
|
||||
}
|
||||
|
||||
/// Response for user blocks endpoint
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct UserBlocksResponse {
|
||||
/// List of blocks with their sizes
|
||||
pub blocks: Vec<(String, u64)>,
|
||||
/// Total number of blocks
|
||||
pub total: u64,
|
||||
/// Total number of all blocks
|
||||
pub all_blocks: u64,
|
||||
}
|
||||
|
||||
/// Response for block downloads endpoint
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct BlockDownloadsResponse {
|
||||
/// Block hash
|
||||
pub block_hash: String,
|
||||
/// Number of times the block has been downloaded
|
||||
pub downloads_count: u64,
|
||||
/// Size of the block in bytes
|
||||
pub block_size: u64,
|
||||
}
|
||||
|
||||
/// Downloads blocks associated with a hash (file hash or block hash)
|
||||
/// Returns a vector of (block_hash, block_index) pairs
|
||||
pub async fn get_blocks_by_hash(hash: &str, server_url: String) -> Result<Vec<(String, u64)>> {
|
||||
info!("Getting blocks for hash: {}", hash);
|
||||
|
||||
// Create HTTP client
|
||||
let client = Client::new();
|
||||
|
||||
// Construct the blocks URL
|
||||
let blocks_url = format!("{}/api/v1/blocks/{}", server_url, hash);
|
||||
|
||||
info!("Requesting blocks from: {}", blocks_url);
|
||||
|
||||
// Send GET request to get the blocks
|
||||
let response = client
|
||||
.get(&blocks_url)
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to get blocks from server")?;
|
||||
|
||||
// Check if the request was successful
|
||||
if !response.status().is_success() {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Server returned error: {} - {}",
|
||||
response.status(),
|
||||
response.text().await?
|
||||
));
|
||||
}
|
||||
|
||||
// Parse the response
|
||||
let blocks_response: BlocksResponse = response
|
||||
.json()
|
||||
.await
|
||||
.context("Failed to parse blocks response")?;
|
||||
|
||||
info!("Retrieved {} blocks", blocks_response.blocks.len());
|
||||
|
||||
Ok(blocks_response.blocks)
|
||||
}
|
||||
|
||||
pub async fn download_block(block_hash: &str, server_url: &str) -> Result<bytes::Bytes> {
|
||||
let block_url = format!("{}/api/v1/block/{}", server_url, block_hash);
|
||||
|
||||
// Create HTTP client
|
||||
let client = Client::new();
|
||||
|
||||
// Send GET request to download the block
|
||||
let response = client
|
||||
.get(&block_url)
|
||||
.send()
|
||||
.await
|
||||
.context(format!("Failed to download block {}", block_hash))?;
|
||||
|
||||
// Check if the request was successful
|
||||
if !response.status().is_success() {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Server returned error for block {}: {} - {}",
|
||||
block_hash,
|
||||
response.status(),
|
||||
response.text().await?
|
||||
));
|
||||
}
|
||||
|
||||
// Get the block content
|
||||
let block_content = response
|
||||
.bytes()
|
||||
.await
|
||||
.context("Failed to read block content")?;
|
||||
info!(
|
||||
"Downloaded block {} ({} bytes)",
|
||||
block_hash,
|
||||
block_content.len()
|
||||
);
|
||||
|
||||
Ok(block_content)
|
||||
}
|
||||
|
||||
/// Verifies which blocks are missing on the server
|
||||
pub async fn verify_blocks_with_server(
|
||||
client: &Client,
|
||||
server_url: String,
|
||||
blocks: Vec<VerifyBlock>,
|
||||
) -> Result<Vec<String>> {
|
||||
let verify_url = format!("{}/api/v1/block/verify", server_url);
|
||||
let verify_request = VerifyBlocksRequest { blocks };
|
||||
|
||||
info!("Verifying blocks with server: {}", verify_url);
|
||||
|
||||
let response = client
|
||||
.post(&verify_url)
|
||||
.json(&verify_request)
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to verify blocks with server")?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Server returned error: {} - {}",
|
||||
response.status(),
|
||||
response.text().await?
|
||||
));
|
||||
}
|
||||
|
||||
let verify_response: VerifyBlocksResponse = response
|
||||
.json()
|
||||
.await
|
||||
.context("Failed to parse server response")?;
|
||||
|
||||
Ok(verify_response.missing)
|
||||
}
|
||||
|
||||
/// Uploads a single block to the server
|
||||
pub async fn upload_block(
|
||||
client: Arc<Client>,
|
||||
server_url: String,
|
||||
hash: String,
|
||||
data: Vec<u8>,
|
||||
file_hash: String,
|
||||
idx: u64,
|
||||
token: String,
|
||||
) -> Result<()> {
|
||||
let upload_block_url = format!("{}/api/v1/block", server_url);
|
||||
|
||||
info!("Uploading block: {}", hash);
|
||||
|
||||
// Send the data directly as bytes with query parameters
|
||||
let response = client
|
||||
.post(&upload_block_url)
|
||||
.header("Content-Type", "application/octet-stream")
|
||||
.header("Authorization", format!("Bearer {}", token)) // Add Authorization header
|
||||
.query(&[("file_hash", &file_hash), ("idx", &idx.to_string())])
|
||||
.body(data)
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to upload block")?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Failed to upload block {}: {} - {}",
|
||||
hash,
|
||||
response.status(),
|
||||
response.text().await?
|
||||
));
|
||||
}
|
||||
|
||||
if response.status() == 200 {
|
||||
info!("Block {} already exists on server", hash);
|
||||
}
|
||||
if response.status() == 201 {
|
||||
info!("Successfully uploaded block: {}", hash);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Checks if a block exists on the server by its hash.
|
||||
pub async fn check_block(server_url: &str, hash: &str) -> Result<bool> {
|
||||
let url = format!("{}/api/v1/block/{}", server_url, hash);
|
||||
|
||||
let client = Client::new();
|
||||
let response = client
|
||||
.head(&url)
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to send request to check block")?;
|
||||
|
||||
match response.status() {
|
||||
reqwest::StatusCode::OK => Ok(true), // Block exists
|
||||
reqwest::StatusCode::NOT_FOUND => Ok(false), // Block does not exist
|
||||
_ => Err(anyhow::anyhow!(
|
||||
"Unexpected response from server: {}",
|
||||
response.status()
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Lists blocks available on the server with pagination.
|
||||
/// Returns a vector of (block_hash, block_index) pairs.
|
||||
pub async fn list_blocks(
|
||||
server_url: &str,
|
||||
page_size: usize,
|
||||
page: usize,
|
||||
) -> Result<(Vec<String>, u64)> {
|
||||
let blocks_url = format!(
|
||||
"{}/api/v1/blocks?page={}&page_size={}",
|
||||
server_url, page, page_size
|
||||
);
|
||||
|
||||
// Create HTTP client
|
||||
let client = Client::new();
|
||||
|
||||
// Send GET request to get blocks for the current page
|
||||
let response = client
|
||||
.get(&blocks_url)
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to list blocks from server")?;
|
||||
|
||||
// Check if the request was successful
|
||||
if !response.status().is_success() {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Server returned error: {} - {}",
|
||||
response.status(),
|
||||
response.text().await?
|
||||
));
|
||||
}
|
||||
|
||||
// Parse the response
|
||||
let blocks_response: ListBlocksResponse = response
|
||||
.json()
|
||||
.await
|
||||
.context("Failed to parse blocks response")?;
|
||||
|
||||
Ok((blocks_response.blocks, blocks_response.total))
|
||||
}
|
||||
|
||||
pub async fn signin(
|
||||
client: &Client,
|
||||
server_url: &str,
|
||||
username: &str,
|
||||
password: &str,
|
||||
) -> Result<String> {
|
||||
let response = client
|
||||
.post(format!("{}/api/v1/signin", server_url))
|
||||
.json(&serde_json::json!({
|
||||
"username": username,
|
||||
"password": password,
|
||||
}))
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to send request to signin endpoint")?;
|
||||
|
||||
if response.status().is_success() {
|
||||
let signin_response: SigninResponse =
|
||||
response.json().await.context("Failed to parse response")?;
|
||||
Ok(signin_response.access_token)
|
||||
} else {
|
||||
anyhow::bail!("Failed to retrieve token: {}", response.status());
|
||||
}
|
||||
}
|
||||
|
||||
/// Get all blocks uploaded by the authenticated user
|
||||
pub async fn get_user_blocks(
|
||||
server_url: &str,
|
||||
token: &str,
|
||||
page: Option<u32>,
|
||||
per_page: Option<u32>,
|
||||
) -> Result<UserBlocksResponse> {
|
||||
let url = format!(
|
||||
"{}/api/v1/user/blocks?page={}&per_page={}",
|
||||
server_url,
|
||||
page.unwrap_or(1),
|
||||
per_page.unwrap_or(50)
|
||||
);
|
||||
|
||||
// Create HTTP client
|
||||
let client = Client::new();
|
||||
|
||||
// Send GET request with authorization header
|
||||
let response = client
|
||||
.get(&url)
|
||||
.header("Authorization", format!("Bearer {}", token))
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to get user blocks from server")?;
|
||||
|
||||
// Check if the request was successful
|
||||
if !response.status().is_success() {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Server returned error: {}",
|
||||
response.status(),
|
||||
));
|
||||
}
|
||||
|
||||
// Parse the response
|
||||
let blocks_response: UserBlocksResponse = response
|
||||
.json()
|
||||
.await
|
||||
.context("Failed to parse user blocks response")?;
|
||||
|
||||
Ok(blocks_response)
|
||||
}
|
||||
|
||||
/// Get the download count for a specific block
|
||||
pub async fn get_block_downloads(server_url: &str, hash: &str) -> Result<BlockDownloadsResponse> {
|
||||
let url = format!("{}/api/v1/block/{}/downloads", server_url, hash);
|
||||
|
||||
// Create HTTP client
|
||||
let client = Client::new();
|
||||
|
||||
// Send GET request
|
||||
let response = client
|
||||
.get(&url)
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to get block downloads from server")?;
|
||||
|
||||
// Check if the request was successful
|
||||
if !response.status().is_success() {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Server returned error: {}",
|
||||
response.status(),
|
||||
));
|
||||
}
|
||||
|
||||
// Parse the response
|
||||
let downloads_response: BlockDownloadsResponse = response
|
||||
.json()
|
||||
.await
|
||||
.context("Failed to parse block downloads response")?;
|
||||
|
||||
Ok(downloads_response)
|
||||
}
|
||||
133
components/rfs/src/store/bs.rs
Normal file
133
components/rfs/src/store/bs.rs
Normal file
@@ -0,0 +1,133 @@
|
||||
use super::{Error, Result, Store};
|
||||
use crate::fungi::meta::Block;
|
||||
use aes_gcm::{
|
||||
aead::{
|
||||
generic_array::{self, GenericArray},
|
||||
Aead, KeyInit,
|
||||
},
|
||||
Aes256Gcm, Nonce,
|
||||
};
|
||||
|
||||
fn hash(input: &[u8]) -> GenericArray<u8, generic_array::typenum::U32> {
|
||||
let hash = blake2b_simd::Params::new().hash_length(32).hash(input);
|
||||
GenericArray::from_slice(hash.as_bytes()).to_owned()
|
||||
}
|
||||
|
||||
/// The block store builds on top of a store and adds encryption and compression
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct BlockStore<S: Store> {
|
||||
store: S,
|
||||
}
|
||||
|
||||
impl<S> From<S> for BlockStore<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
fn from(store: S) -> Self {
|
||||
Self { store }
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> BlockStore<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
pub fn inner(self) -> S {
|
||||
self.store
|
||||
}
|
||||
|
||||
pub async fn get(&self, block: &Block) -> Result<Vec<u8>> {
|
||||
let encrypted = self.store.get(&block.id).await?;
|
||||
|
||||
let cipher = Aes256Gcm::new_from_slice(&block.key).map_err(|_| Error::InvalidKey)?;
|
||||
let nonce = Nonce::from_slice(&block.key[..12]);
|
||||
|
||||
let compressed = cipher
|
||||
.decrypt(nonce, encrypted.as_slice())
|
||||
.map_err(|_| Error::EncryptionError)?;
|
||||
|
||||
let mut decoder = snap::raw::Decoder::new();
|
||||
let plain = decoder.decompress_vec(&compressed)?;
|
||||
|
||||
Ok(plain)
|
||||
}
|
||||
|
||||
pub async fn set(&self, blob: &[u8]) -> Result<Block> {
|
||||
// we first calculate the hash of the plain-text data
|
||||
|
||||
let key = hash(blob);
|
||||
let mut encoder = snap::raw::Encoder::new();
|
||||
// data is then compressed
|
||||
let compressed = encoder.compress_vec(blob)?;
|
||||
|
||||
// we then encrypt it using the hash of the plain-text as a key
|
||||
let cipher = Aes256Gcm::new(&key);
|
||||
// the nonce is still driven from the key, a nonce is 12 bytes for aes
|
||||
// it's done like this so a store can still dedup the data
|
||||
let nonce = Nonce::from_slice(&key[..12]);
|
||||
|
||||
// we encrypt the data
|
||||
let encrypted = cipher
|
||||
.encrypt(nonce, compressed.as_slice())
|
||||
.map_err(|_| Error::EncryptionError)?;
|
||||
|
||||
// we hash it again, and use that as the store key
|
||||
let id = hash(&encrypted);
|
||||
|
||||
let block = Block {
|
||||
id: id.into(),
|
||||
key: key.into(),
|
||||
};
|
||||
|
||||
self.store.set(&block.id, &encrypted).await?;
|
||||
|
||||
Ok(block)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::super::Route;
|
||||
|
||||
use super::*;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
#[derive(Default)]
|
||||
struct InMemoryStore {
|
||||
map: Arc<Mutex<HashMap<Vec<u8>, Vec<u8>>>>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Store for InMemoryStore {
|
||||
async fn get(&self, key: &[u8]) -> Result<Vec<u8>> {
|
||||
let map = self.map.lock().await;
|
||||
let v = map.get(key).ok_or(Error::KeyNotFound)?;
|
||||
Ok(v.clone())
|
||||
}
|
||||
async fn set(&self, key: &[u8], blob: &[u8]) -> Result<()> {
|
||||
let mut map = self.map.lock().await;
|
||||
map.insert(key.into(), blob.into());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn routes(&self) -> Vec<Route> {
|
||||
vec![Route::url("mem://")]
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_block_store() {
|
||||
let store = InMemoryStore::default();
|
||||
let block_store = BlockStore::from(store);
|
||||
|
||||
let blob = "some random data to store";
|
||||
let block = block_store.set(blob.as_bytes()).await.unwrap();
|
||||
|
||||
let received = block_store.get(&block).await.unwrap();
|
||||
|
||||
assert_eq!(blob.as_bytes(), received.as_slice());
|
||||
}
|
||||
}
|
||||
83
components/rfs/src/store/dir.rs
Normal file
83
components/rfs/src/store/dir.rs
Normal file
@@ -0,0 +1,83 @@
|
||||
use super::{Error, Result, Route, Store};
|
||||
use std::io::ErrorKind;
|
||||
use std::os::unix::prelude::OsStrExt;
|
||||
use std::path::PathBuf;
|
||||
use tokio::fs;
|
||||
use url;
|
||||
|
||||
pub const SCHEME: &str = "dir";
|
||||
|
||||
/// DirStore is a simple store that store blobs on the filesystem
|
||||
/// and is mainly used for testing
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DirStore {
|
||||
root: PathBuf,
|
||||
}
|
||||
|
||||
impl DirStore {
|
||||
pub async fn make<U: AsRef<str>>(url: &U) -> Result<DirStore> {
|
||||
let u = url::Url::parse(url.as_ref())?;
|
||||
if u.scheme() != SCHEME {
|
||||
return Err(Error::InvalidScheme(u.scheme().into(), SCHEME.into()));
|
||||
}
|
||||
|
||||
Ok(DirStore::new(u.path()).await?)
|
||||
}
|
||||
pub async fn new<P: Into<PathBuf>>(root: P) -> Result<Self> {
|
||||
let root = root.into();
|
||||
fs::create_dir_all(&root).await?;
|
||||
Ok(Self { root })
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Store for DirStore {
|
||||
async fn get(&self, key: &[u8]) -> Result<Vec<u8>> {
|
||||
let file_name = hex::encode(key);
|
||||
let dir_path = self.root.join(&file_name[0..2]);
|
||||
|
||||
let mut path = dir_path.join(&file_name);
|
||||
let data = match fs::read(&path).await {
|
||||
Ok(data) => data,
|
||||
Err(err) if err.kind() == ErrorKind::NotFound => {
|
||||
path = self.root.join(file_name);
|
||||
let data = match fs::read(&path).await {
|
||||
Ok(data) => data,
|
||||
Err(err) if err.kind() == ErrorKind::NotFound => {
|
||||
return Err(Error::KeyNotFound);
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(Error::IO(err));
|
||||
}
|
||||
};
|
||||
data
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(Error::IO(err));
|
||||
}
|
||||
};
|
||||
|
||||
Ok(data)
|
||||
}
|
||||
|
||||
async fn set(&self, key: &[u8], blob: &[u8]) -> Result<()> {
|
||||
let file_name = hex::encode(key);
|
||||
let dir_path = self.root.join(&file_name[0..2]);
|
||||
|
||||
fs::create_dir_all(&dir_path).await?;
|
||||
|
||||
let file_path = dir_path.join(file_name);
|
||||
fs::write(file_path, blob).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn routes(&self) -> Vec<Route> {
|
||||
let r = Route::url(format!(
|
||||
"dir://{}",
|
||||
String::from_utf8_lossy(self.root.as_os_str().as_bytes())
|
||||
));
|
||||
|
||||
vec![r]
|
||||
}
|
||||
}
|
||||
73
components/rfs/src/store/http.rs
Normal file
73
components/rfs/src/store/http.rs
Normal file
@@ -0,0 +1,73 @@
|
||||
use super::{Error, Result, Route, Store};
|
||||
use reqwest::{self, StatusCode};
|
||||
use url::Url;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct HTTPStore {
|
||||
url: Url,
|
||||
}
|
||||
|
||||
impl HTTPStore {
|
||||
pub async fn make<U: AsRef<str>>(url: &U) -> Result<HTTPStore> {
|
||||
let u = Url::parse(url.as_ref())?;
|
||||
if u.scheme() != "http" && u.scheme() != "https" {
|
||||
return Err(Error::Other(anyhow::Error::msg("invalid scheme")));
|
||||
}
|
||||
|
||||
Ok(HTTPStore::new(u).await?)
|
||||
}
|
||||
pub async fn new<U: Into<Url>>(url: U) -> Result<Self> {
|
||||
let url = url.into();
|
||||
Ok(Self { url })
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Store for HTTPStore {
|
||||
async fn get(&self, key: &[u8]) -> Result<Vec<u8>> {
|
||||
let file = hex::encode(key);
|
||||
let mut file_path = self.url.clone();
|
||||
file_path
|
||||
.path_segments_mut()
|
||||
.map_err(|_| Error::Other(anyhow::Error::msg("cannot be base")))?
|
||||
.push(&file[0..2])
|
||||
.push(&file);
|
||||
let mut legacy_path = self.url.clone();
|
||||
|
||||
legacy_path
|
||||
.path_segments_mut()
|
||||
.map_err(|_| Error::Other(anyhow::Error::msg("cannot be base")))?
|
||||
.push(&file);
|
||||
|
||||
let data = match reqwest::get(file_path).await {
|
||||
Ok(mut response) => {
|
||||
if response.status() == StatusCode::NOT_FOUND {
|
||||
response = reqwest::get(legacy_path)
|
||||
.await
|
||||
.map_err(|_| Error::KeyNotFound)?;
|
||||
if response.status() != StatusCode::OK {
|
||||
return Err(Error::KeyNotFound);
|
||||
}
|
||||
}
|
||||
if response.status() != StatusCode::OK {
|
||||
return Err(Error::Unavailable);
|
||||
}
|
||||
response.bytes().await.map_err(|e| Error::Other(e.into()))?
|
||||
}
|
||||
Err(err) => return Err(Error::Other(err.into())),
|
||||
};
|
||||
Ok(data.into())
|
||||
}
|
||||
|
||||
async fn set(&self, _key: &[u8], _blob: &[u8]) -> Result<()> {
|
||||
Err(Error::Other(anyhow::Error::msg(
|
||||
"http store doesn't support uploading",
|
||||
)))
|
||||
}
|
||||
|
||||
fn routes(&self) -> Vec<Route> {
|
||||
let r = Route::url(self.url.clone());
|
||||
|
||||
vec![r]
|
||||
}
|
||||
}
|
||||
247
components/rfs/src/store/mod.rs
Normal file
247
components/rfs/src/store/mod.rs
Normal file
@@ -0,0 +1,247 @@
|
||||
mod bs;
|
||||
pub mod dir;
|
||||
pub mod http;
|
||||
mod router;
|
||||
pub mod s3store;
|
||||
pub mod server;
|
||||
pub mod zdb;
|
||||
|
||||
use anyhow::Context;
|
||||
use rand::seq::SliceRandom;
|
||||
|
||||
pub use bs::BlockStore;
|
||||
use regex::Regex;
|
||||
|
||||
use crate::fungi;
|
||||
|
||||
pub use self::router::Router;
|
||||
|
||||
pub async fn make<U: AsRef<str>>(u: U) -> Result<Stores> {
|
||||
let parsed = url::Url::parse(u.as_ref())?;
|
||||
|
||||
match parsed.scheme() {
|
||||
dir::SCHEME => return Ok(Stores::Dir(dir::DirStore::make(&u).await?)),
|
||||
"s3" | "s3s" | "s3s+tls" => return Ok(Stores::S3(s3store::S3Store::make(&u).await?)),
|
||||
"zdb" => return Ok(Stores::ZDB(zdb::ZdbStore::make(&u).await?)),
|
||||
"http" | "https" => return Ok(Stores::HTTP(http::HTTPStore::make(&u).await?)),
|
||||
server::SCHEME => return Ok(Stores::Server(server::ServerStore::make(&u).await?)),
|
||||
_ => return Err(Error::UnknownStore(parsed.scheme().into())),
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum Error {
|
||||
#[error("key not found")]
|
||||
KeyNotFound,
|
||||
#[error("invalid key")]
|
||||
InvalidKey,
|
||||
#[error("invalid blob")]
|
||||
InvalidBlob,
|
||||
#[error("key is not routable")]
|
||||
KeyNotRoutable,
|
||||
#[error("store is not available")]
|
||||
Unavailable,
|
||||
|
||||
#[error("compression error: {0}")]
|
||||
Compression(#[from] snap::Error),
|
||||
|
||||
#[error("encryption error")]
|
||||
EncryptionError,
|
||||
|
||||
// TODO: better display for the Box<Vec<Self>>
|
||||
#[error("multiple error: {0:?}")]
|
||||
Multiple(Box<Vec<Self>>),
|
||||
|
||||
#[error("io error: {0}")]
|
||||
IO(#[from] std::io::Error),
|
||||
|
||||
#[error("url parse error: {0}")]
|
||||
Url(#[from] url::ParseError),
|
||||
#[error("unknown store type '{0}'")]
|
||||
UnknownStore(String),
|
||||
#[error("invalid schema '{0}' expected '{1}'")]
|
||||
InvalidScheme(String, String),
|
||||
|
||||
#[error("unknown store error {0:#}")]
|
||||
Other(#[from] anyhow::Error),
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
pub struct Route {
|
||||
pub start: Option<u8>,
|
||||
pub end: Option<u8>,
|
||||
pub url: String,
|
||||
}
|
||||
|
||||
impl Route {
|
||||
pub fn url<S: Into<String>>(s: S) -> Self {
|
||||
Self {
|
||||
start: None,
|
||||
end: None,
|
||||
url: s.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
/// The store trait defines a simple (low level) key/value store interface to set/get blobs
|
||||
/// the concern of the store is to only store given data with given key and implement
|
||||
/// the means to retrieve it again once a get is called.
|
||||
#[async_trait::async_trait]
|
||||
pub trait Store: Send + Sync + 'static {
|
||||
async fn get(&self, key: &[u8]) -> Result<Vec<u8>>;
|
||||
async fn set(&self, key: &[u8], blob: &[u8]) -> Result<()>;
|
||||
fn routes(&self) -> Vec<Route>;
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<S> Store for Router<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
async fn get(&self, key: &[u8]) -> Result<Vec<u8>> {
|
||||
if key.is_empty() {
|
||||
return Err(Error::InvalidKey);
|
||||
}
|
||||
let mut errors = Vec::default();
|
||||
|
||||
// to make it fare we shuffle the list of matching routers randomly everytime
|
||||
// before we do a get
|
||||
let mut routers: Vec<&S> = self.route(key[0]).collect();
|
||||
routers.shuffle(&mut rand::thread_rng());
|
||||
for store in routers {
|
||||
match store.get(key).await {
|
||||
Ok(object) => return Ok(object),
|
||||
Err(err) => errors.push(err),
|
||||
};
|
||||
}
|
||||
|
||||
if errors.is_empty() {
|
||||
return Err(Error::KeyNotRoutable);
|
||||
}
|
||||
|
||||
// return aggregated errors
|
||||
return Err(Error::Multiple(Box::new(errors)));
|
||||
}
|
||||
|
||||
async fn set(&self, key: &[u8], blob: &[u8]) -> Result<()> {
|
||||
if key.is_empty() {
|
||||
return Err(Error::InvalidKey);
|
||||
}
|
||||
|
||||
let mut b = false;
|
||||
for store in self.route(key[0]) {
|
||||
b = true;
|
||||
store.set(key, blob).await?;
|
||||
}
|
||||
|
||||
if !b {
|
||||
return Err(Error::KeyNotRoutable);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn routes(&self) -> Vec<Route> {
|
||||
let mut routes = Vec::default();
|
||||
for (key, value) in self.routes.iter() {
|
||||
for sub in value.routes() {
|
||||
let r = Route {
|
||||
start: Some(sub.start.unwrap_or(*key.start())),
|
||||
end: Some(sub.end.unwrap_or(*key.end())),
|
||||
url: sub.url,
|
||||
};
|
||||
routes.push(r);
|
||||
}
|
||||
}
|
||||
|
||||
routes
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_router(meta: &fungi::Reader) -> Result<Router<Stores>> {
|
||||
let mut router = Router::new();
|
||||
|
||||
for route in meta.routes().await.context("failed to get store routes")? {
|
||||
let store = make(&route.url)
|
||||
.await
|
||||
.with_context(|| format!("failed to initialize store '{}'", route.url))?;
|
||||
router.add(route.start, route.end, store);
|
||||
}
|
||||
|
||||
Ok(router)
|
||||
}
|
||||
|
||||
pub async fn parse_router(urls: &[String]) -> anyhow::Result<Router<Stores>> {
|
||||
let mut router = Router::new();
|
||||
let pattern = r"^(?P<range>[0-9a-f]{2}-[0-9a-f]{2})=(?P<url>.+)$";
|
||||
let re = Regex::new(pattern)?;
|
||||
|
||||
for u in urls {
|
||||
let ((start, end), store) = match re.captures(u) {
|
||||
None => ((0x00, 0xff), make(u).await?),
|
||||
Some(captures) => {
|
||||
let url = captures.name("url").context("missing url group")?.as_str();
|
||||
let rng = captures
|
||||
.name("range")
|
||||
.context("missing range group")?
|
||||
.as_str();
|
||||
|
||||
let store = make(url).await?;
|
||||
let range = match rng.split_once('-') {
|
||||
None => anyhow::bail!("invalid range format"),
|
||||
Some((low, high)) => (
|
||||
u8::from_str_radix(low, 16)
|
||||
.with_context(|| format!("failed to parse low range '{}'", low))?,
|
||||
u8::from_str_radix(high, 16)
|
||||
.with_context(|| format!("failed to parse high range '{}'", high))?,
|
||||
),
|
||||
};
|
||||
(range, store)
|
||||
}
|
||||
};
|
||||
|
||||
router.add(start, end, store);
|
||||
}
|
||||
|
||||
Ok(router)
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub enum Stores {
|
||||
S3(s3store::S3Store),
|
||||
Dir(dir::DirStore),
|
||||
ZDB(zdb::ZdbStore),
|
||||
HTTP(http::HTTPStore),
|
||||
Server(server::ServerStore),
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Store for Stores {
|
||||
async fn get(&self, key: &[u8]) -> Result<Vec<u8>> {
|
||||
match self {
|
||||
self::Stores::S3(s3_store) => s3_store.get(key).await,
|
||||
self::Stores::Dir(dir_store) => dir_store.get(key).await,
|
||||
self::Stores::ZDB(zdb_store) => zdb_store.get(key).await,
|
||||
self::Stores::HTTP(http_store) => http_store.get(key).await,
|
||||
self::Stores::Server(server_store) => server_store.get(key).await,
|
||||
}
|
||||
}
|
||||
async fn set(&self, key: &[u8], blob: &[u8]) -> Result<()> {
|
||||
match self {
|
||||
self::Stores::S3(s3_store) => s3_store.set(key, blob).await,
|
||||
self::Stores::Dir(dir_store) => dir_store.set(key, blob).await,
|
||||
self::Stores::ZDB(zdb_store) => zdb_store.set(key, blob).await,
|
||||
self::Stores::HTTP(http_store) => http_store.set(key, blob).await,
|
||||
self::Stores::Server(server_store) => server_store.set(key, blob).await,
|
||||
}
|
||||
}
|
||||
fn routes(&self) -> Vec<Route> {
|
||||
match self {
|
||||
self::Stores::S3(s3_store) => s3_store.routes(),
|
||||
self::Stores::Dir(dir_store) => dir_store.routes(),
|
||||
self::Stores::ZDB(zdb_store) => zdb_store.routes(),
|
||||
self::Stores::HTTP(http_store) => http_store.routes(),
|
||||
self::Stores::Server(server_store) => server_store.routes(),
|
||||
}
|
||||
}
|
||||
}
|
||||
56
components/rfs/src/store/router.rs
Normal file
56
components/rfs/src/store/router.rs
Normal file
@@ -0,0 +1,56 @@
|
||||
use std::ops::RangeInclusive;
|
||||
|
||||
/// route implements a naive prefix router by going through the complete set of
|
||||
/// available routers and find that ones that matches this given prefix
|
||||
#[derive(Default, Clone)]
|
||||
pub struct Router<T> {
|
||||
pub(crate) routes: Vec<(RangeInclusive<u8>, T)>,
|
||||
}
|
||||
|
||||
impl<T> Router<T> {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
routes: Vec::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// add a range
|
||||
pub fn add(&mut self, start: u8, end: u8, route: T) {
|
||||
self.routes.push((start..=end, route));
|
||||
}
|
||||
|
||||
/// return all stores that matches a certain key
|
||||
///
|
||||
/// TODO: may be they need to be randomized
|
||||
pub fn route(&self, i: u8) -> impl Iterator<Item = &T> {
|
||||
self.routes
|
||||
.iter()
|
||||
.filter(move |f| f.0.contains(&i))
|
||||
.map(|v| &v.1)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test() {
|
||||
let mut router = Router::default();
|
||||
|
||||
router.add(0, 255, "a");
|
||||
router.add(0, 255, "b");
|
||||
router.add(0, 128, "c");
|
||||
|
||||
let paths: Vec<&str> = router.route(200).map(|v| *v).collect();
|
||||
assert_eq!(paths.len(), 2);
|
||||
assert_eq!(paths[0], "a");
|
||||
assert_eq!(paths[1], "b");
|
||||
|
||||
let paths: Vec<&str> = router.route(0).map(|v| *v).collect();
|
||||
assert_eq!(paths.len(), 3);
|
||||
assert_eq!(paths[0], "a");
|
||||
assert_eq!(paths[1], "b");
|
||||
assert_eq!(paths[2], "c");
|
||||
}
|
||||
}
|
||||
191
components/rfs/src/store/s3store.rs
Normal file
191
components/rfs/src/store/s3store.rs
Normal file
@@ -0,0 +1,191 @@
|
||||
use super::{Error, Result, Route, Store};
|
||||
|
||||
use anyhow::Context;
|
||||
use s3::{creds::Credentials, error::S3Error, Bucket, Region};
|
||||
use url::Url;
|
||||
|
||||
fn get_config<U: AsRef<str>>(u: U) -> Result<(Credentials, Region, String)> {
|
||||
let url = Url::parse(u.as_ref())?;
|
||||
|
||||
let access_key = url.username().to_string();
|
||||
let access_secret = url.password().map(|s| s.to_owned());
|
||||
|
||||
let host = url.host_str().context("host not found")?;
|
||||
let port = url.port().context("port not found")?;
|
||||
let scheme = match url.scheme() {
|
||||
"s3" => "http://",
|
||||
"s3+tls" | "s3s" => "https://",
|
||||
_ => return Err(Error::Other(anyhow::Error::msg("invalid scheme"))),
|
||||
};
|
||||
|
||||
let endpoint = format!("{}{}:{}", scheme, host, port);
|
||||
|
||||
let bucket_name = url.path().trim_start_matches('/').to_string();
|
||||
|
||||
let region_name = url
|
||||
.query_pairs()
|
||||
.find(|(key, _)| key == "region")
|
||||
.map(|(_, value)| value.to_string())
|
||||
.unwrap_or_default();
|
||||
|
||||
Ok((
|
||||
Credentials {
|
||||
access_key: Some(access_key),
|
||||
secret_key: access_secret,
|
||||
security_token: None,
|
||||
session_token: None,
|
||||
expiration: None,
|
||||
},
|
||||
Region::Custom {
|
||||
region: region_name,
|
||||
endpoint,
|
||||
},
|
||||
bucket_name,
|
||||
))
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct S3Store {
|
||||
bucket: Bucket,
|
||||
url: String,
|
||||
// this is only here as a work around for this bug https://github.com/durch/rust-s3/issues/337
|
||||
// because rfs uses the store in async (and parallel) matter to upload/download blobs
|
||||
// we need to synchronize this locally in that store which will hurt performance
|
||||
// the 2 solutions now is to either wait until this bug is fixed, or switch to another client
|
||||
// but for now we keep this work around
|
||||
}
|
||||
|
||||
impl S3Store {
|
||||
pub async fn make<U: AsRef<str>>(url: &U) -> Result<S3Store> {
|
||||
let (cred, region, bucket_name) = get_config(url.as_ref())?;
|
||||
Ok(S3Store::new(url.as_ref(), &bucket_name, region, cred)?)
|
||||
}
|
||||
pub fn new(url: &str, bucket_name: &str, region: Region, cred: Credentials) -> Result<Self> {
|
||||
let bucket = Bucket::new(bucket_name, region, cred)
|
||||
.context("failed instantiate bucket")?
|
||||
.with_path_style();
|
||||
|
||||
Ok(Self {
|
||||
bucket,
|
||||
url: url.to_owned(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Store for S3Store {
|
||||
async fn get(&self, key: &[u8]) -> super::Result<Vec<u8>> {
|
||||
match self.bucket.get_object(hex::encode(key)).await {
|
||||
Ok(res) => Ok(res.to_vec()),
|
||||
Err(S3Error::HttpFailWithBody(404, _)) => Err(Error::KeyNotFound),
|
||||
Err(S3Error::Io(err)) => Err(Error::IO(err)),
|
||||
Err(err) => Err(anyhow::Error::from(err).into()),
|
||||
}
|
||||
}
|
||||
|
||||
async fn set(&self, key: &[u8], blob: &[u8]) -> Result<()> {
|
||||
self.bucket
|
||||
.put_object(hex::encode(key), blob)
|
||||
.await
|
||||
.context("put object over s3 storage")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn routes(&self) -> Vec<Route> {
|
||||
vec![Route::url(self.url.clone())]
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_get_config() {
|
||||
let (cred, region, bucket_name) =
|
||||
get_config("s3s://minioadmin:minioadmin@127.0.0.1:9000/mybucket?region=minio").unwrap();
|
||||
assert_eq!(
|
||||
cred,
|
||||
Credentials {
|
||||
access_key: Some("minioadmin".to_string()),
|
||||
secret_key: Some("minioadmin".to_string()),
|
||||
security_token: None,
|
||||
session_token: None,
|
||||
expiration: None,
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
region,
|
||||
Region::Custom {
|
||||
region: "minio".to_string(),
|
||||
endpoint: "https://127.0.0.1:9000".to_string()
|
||||
}
|
||||
);
|
||||
assert_eq!(bucket_name, "mybucket".to_string())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_config_without_tls() {
|
||||
let (cred, region, bucket_name) =
|
||||
get_config("s3://minioadmin:minioadmin@127.0.0.1:9000/mybucket?region=minio").unwrap();
|
||||
assert_eq!(
|
||||
cred,
|
||||
Credentials {
|
||||
access_key: Some("minioadmin".to_string()),
|
||||
secret_key: Some("minioadmin".to_string()),
|
||||
security_token: None,
|
||||
session_token: None,
|
||||
expiration: None,
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
region,
|
||||
Region::Custom {
|
||||
region: "minio".to_string(),
|
||||
endpoint: "http://127.0.0.1:9000".to_string()
|
||||
}
|
||||
);
|
||||
assert_eq!(bucket_name, "mybucket".to_string())
|
||||
}
|
||||
|
||||
#[ignore]
|
||||
#[tokio::test]
|
||||
async fn test_set_get() {
|
||||
let url = "s3://minioadmin:minioadmin@127.0.0.1:9000/mybucket?region=minio";
|
||||
let (cred, region, bucket_name) = get_config(url).unwrap();
|
||||
|
||||
let store = S3Store::new(url, &bucket_name, region, cred);
|
||||
let store = store.unwrap();
|
||||
|
||||
let key = b"test.txt";
|
||||
let blob = b"# Hello, World!";
|
||||
|
||||
_ = store.set(key, blob).await;
|
||||
|
||||
let get_res = store.get(key).await;
|
||||
let get_res = get_res.unwrap();
|
||||
|
||||
assert_eq!(get_res, blob)
|
||||
}
|
||||
|
||||
#[ignore]
|
||||
#[tokio::test]
|
||||
async fn test_set_get_without_region() {
|
||||
let url = "s3://minioadmin:minioadmin@127.0.0.1:9000/mybucket";
|
||||
let (cred, region, bucket_name) = get_config(url).unwrap();
|
||||
|
||||
let store = S3Store::new(url, &bucket_name, region, cred);
|
||||
let store = store.unwrap();
|
||||
|
||||
let key = b"test2.txt";
|
||||
let blob = b"# Hello, World!";
|
||||
|
||||
_ = store.set(key, blob).await;
|
||||
|
||||
let get_res = store.get(key).await;
|
||||
let get_res = get_res.unwrap();
|
||||
|
||||
assert_eq!(get_res, blob)
|
||||
}
|
||||
}
|
||||
106
components/rfs/src/store/server.rs
Normal file
106
components/rfs/src/store/server.rs
Normal file
@@ -0,0 +1,106 @@
|
||||
use super::{Error, Result, Route, Store};
|
||||
use crate::server_api;
|
||||
use reqwest::Client;
|
||||
use std::sync::Arc;
|
||||
use url;
|
||||
|
||||
pub const SCHEME: &str = "server";
|
||||
|
||||
/// ServerStore is a store that interfaces with the fl-server's API
|
||||
/// It supports both uploads and downloads for blocks using the server's HTTP API
|
||||
#[derive(Clone)]
|
||||
pub struct ServerStore {
|
||||
/// Server URL
|
||||
server_url: String,
|
||||
/// HTTP client for making requests
|
||||
client: Arc<Client>,
|
||||
/// Authentication token
|
||||
token: Option<String>,
|
||||
}
|
||||
|
||||
impl ServerStore {
|
||||
pub async fn make<U: AsRef<str>>(url: &U) -> Result<ServerStore> {
|
||||
let u = url::Url::parse(url.as_ref())?;
|
||||
if u.scheme() != SCHEME {
|
||||
return Err(Error::InvalidScheme(u.scheme().into(), SCHEME.into()));
|
||||
}
|
||||
|
||||
// Extract the token from the query parameters
|
||||
let token = u
|
||||
.query_pairs()
|
||||
.find(|(key, _)| key == "token")
|
||||
.map(|(_, value)| value.to_string());
|
||||
|
||||
// Extract the actual server URL (e.g., "http://localhost:4000")
|
||||
let server_url = u
|
||||
.host_str()
|
||||
.map(|host| format!("{}://{}", host, u.path().trim_start_matches('/')))
|
||||
.ok_or_else(|| Error::InvalidScheme("Invalid host in URL".into(), SCHEME.into()))?;
|
||||
|
||||
let client = Arc::new(Client::new());
|
||||
|
||||
Ok(Self {
|
||||
server_url,
|
||||
client,
|
||||
token,
|
||||
})
|
||||
}
|
||||
|
||||
/// Create a new ServerStore with the given server URL
|
||||
pub fn new(server_url: String, token: Option<String>) -> Self {
|
||||
let client = Arc::new(Client::new());
|
||||
|
||||
Self {
|
||||
server_url,
|
||||
client,
|
||||
token,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Store for ServerStore {
|
||||
async fn get(&self, key: &[u8]) -> Result<Vec<u8>> {
|
||||
// Convert the key to a hex string
|
||||
let hash = hex::encode(key);
|
||||
|
||||
// Download the block from the server
|
||||
match server_api::download_block(&hash, &self.server_url).await {
|
||||
Ok(data) => Ok(data.to_vec()),
|
||||
Err(err) => {
|
||||
// Check if the error is because the block doesn't exist
|
||||
if err.to_string().contains("404") {
|
||||
return Err(Error::KeyNotFound);
|
||||
}
|
||||
Err(Error::Other(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn set(&self, key: &[u8], blob: &[u8]) -> Result<()> {
|
||||
// Convert the key to a hex string
|
||||
let hash = hex::encode(key);
|
||||
|
||||
// Upload the block to the server
|
||||
let file_hash = "".to_string(); // Use the hash as the file hash for simplicity
|
||||
let idx = 0; // Use 0 as the index for testing
|
||||
|
||||
server_api::upload_block(
|
||||
Arc::clone(&self.client),
|
||||
self.server_url.clone(),
|
||||
hash,
|
||||
blob.to_vec(),
|
||||
file_hash,
|
||||
idx,
|
||||
self.token.clone().unwrap_or_default(),
|
||||
)
|
||||
.await
|
||||
.map_err(|err| Error::Other(err))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn routes(&self) -> Vec<Route> {
|
||||
vec![Route::url(format!("{}://{}", SCHEME, self.server_url))]
|
||||
}
|
||||
}
|
||||
176
components/rfs/src/store/zdb.rs
Normal file
176
components/rfs/src/store/zdb.rs
Normal file
@@ -0,0 +1,176 @@
|
||||
use super::{Error, Result, Route, Store};
|
||||
use anyhow::Context;
|
||||
|
||||
use bb8_redis::{
|
||||
bb8::{CustomizeConnection, Pool},
|
||||
redis::{
|
||||
aio::Connection, cmd, AsyncCommands, ConnectionAddr, ConnectionInfo, RedisConnectionInfo,
|
||||
RedisError,
|
||||
},
|
||||
RedisConnectionManager,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
struct WithNamespace {
|
||||
namespace: Option<String>,
|
||||
password: Option<String>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl CustomizeConnection<Connection, RedisError> for WithNamespace {
|
||||
async fn on_acquire(&self, connection: &mut Connection) -> anyhow::Result<(), RedisError> {
|
||||
match self.namespace {
|
||||
Some(ref ns) if ns != "default" => {
|
||||
let mut c = cmd("SELECT");
|
||||
let c = c.arg(ns);
|
||||
if let Some(ref password) = self.password {
|
||||
c.arg(password);
|
||||
}
|
||||
|
||||
let result = c.query_async(connection).await;
|
||||
if let Err(ref err) = result {
|
||||
error!("failed to switch namespace to {}: {}", ns, err);
|
||||
}
|
||||
result
|
||||
}
|
||||
_ => Ok(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ZdbStoreFactory;
|
||||
|
||||
fn get_connection_info<U: AsRef<str>>(u: U) -> Result<(ConnectionInfo, Option<String>)> {
|
||||
let u = url::Url::parse(u.as_ref())?;
|
||||
|
||||
let (address, namespace) = match u.host() {
|
||||
Some(host) => {
|
||||
let addr = match host {
|
||||
url::Host::Domain(domain) => domain.to_owned(),
|
||||
url::Host::Ipv4(ipv4) => ipv4.to_string(),
|
||||
url::Host::Ipv6(ipv6) => ipv6.to_string(),
|
||||
};
|
||||
|
||||
let addr = ConnectionAddr::Tcp(addr, u.port().unwrap_or(9900));
|
||||
let ns: Option<String> = u
|
||||
.path_segments()
|
||||
.and_then(|s| s.last().map(|s| s.to_owned()));
|
||||
(addr, ns)
|
||||
}
|
||||
None => (ConnectionAddr::Unix(u.path().into()), None),
|
||||
};
|
||||
|
||||
Ok((
|
||||
ConnectionInfo {
|
||||
addr: address,
|
||||
redis: RedisConnectionInfo {
|
||||
db: 0,
|
||||
username: if u.username().is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(u.username().into())
|
||||
},
|
||||
password: u.password().map(|s| s.into()),
|
||||
},
|
||||
},
|
||||
namespace,
|
||||
))
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct ZdbStore {
|
||||
url: String,
|
||||
pool: Pool<RedisConnectionManager>,
|
||||
}
|
||||
|
||||
impl ZdbStore {
|
||||
pub async fn make<U: AsRef<str>>(url: &U) -> Result<ZdbStore> {
|
||||
let (mut info, namespace) = get_connection_info(url.as_ref())?;
|
||||
|
||||
let namespace = WithNamespace {
|
||||
namespace,
|
||||
password: info.redis.password.take(),
|
||||
};
|
||||
|
||||
log::debug!("connection {:#?}", info);
|
||||
log::debug!("switching namespace to: {:?}", namespace.namespace);
|
||||
|
||||
let mgr = RedisConnectionManager::new(info)
|
||||
.context("failed to create redis connection manager")?;
|
||||
|
||||
let pool = Pool::builder()
|
||||
.max_size(20)
|
||||
.connection_customizer(Box::new(namespace))
|
||||
.build(mgr)
|
||||
.await
|
||||
.context("failed to create connection pool")?;
|
||||
|
||||
Ok(ZdbStore {
|
||||
url: url.as_ref().to_string(),
|
||||
pool,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Store for ZdbStore {
|
||||
async fn get(&self, key: &[u8]) -> super::Result<Vec<u8>> {
|
||||
let mut con = self.pool.get().await.context("failed to get connection")?;
|
||||
|
||||
let result: Option<Vec<u8>> = con.get(key).await.context("failed to get blob")?;
|
||||
let result = result.ok_or(Error::KeyNotFound)?;
|
||||
|
||||
if result.is_empty() {
|
||||
return Err(Error::InvalidBlob);
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
async fn set(&self, key: &[u8], blob: &[u8]) -> Result<()> {
|
||||
let mut con = self.pool.get().await.context("failed to get connection")?;
|
||||
|
||||
if con
|
||||
.exists(key)
|
||||
.await
|
||||
.context("failed to check if blob exists")?
|
||||
{
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
con.set(key, blob).await.context("failed to set blob")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn routes(&self) -> Vec<Route> {
|
||||
vec![Route::url(self.url.clone())]
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_connection_info_simple() {
|
||||
let (info, ns) = get_connection_info("zdb://hub.grid.tf:9900").unwrap();
|
||||
assert_eq!(ns, None);
|
||||
assert_eq!(info.addr, ConnectionAddr::Tcp("hub.grid.tf".into(), 9900));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_connection_info_ns() {
|
||||
let (info, ns) = get_connection_info("zdb://username@hub.grid.tf/custom").unwrap();
|
||||
assert_eq!(ns, Some("custom".into()));
|
||||
assert_eq!(info.addr, ConnectionAddr::Tcp("hub.grid.tf".into(), 9900));
|
||||
assert_eq!(info.redis.username, Some("username".into()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_connection_info_unix() {
|
||||
let (info, ns) = get_connection_info("zdb:///path/to/socket").unwrap();
|
||||
assert_eq!(ns, None);
|
||||
assert_eq!(info.addr, ConnectionAddr::Unix("/path/to/socket".into()));
|
||||
}
|
||||
}
|
||||
287
components/rfs/src/sync.rs
Normal file
287
components/rfs/src/sync.rs
Normal file
@@ -0,0 +1,287 @@
|
||||
use anyhow::Result;
|
||||
use futures::{stream, StreamExt};
|
||||
use reqwest::Client;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Semaphore;
|
||||
|
||||
use crate::server_api::{self, VerifyBlock};
|
||||
|
||||
const PARALLEL_OPERATIONS: usize = 20;
|
||||
const DEFAULT_PAGE_SIZE: usize = 50;
|
||||
|
||||
/// Syncs a file or block between two servers using its hash
|
||||
pub async fn sync(
|
||||
hash: Option<&str>,
|
||||
source_server: &str,
|
||||
dest_server: &str,
|
||||
token: &str,
|
||||
) -> Result<()> {
|
||||
if token.is_empty() {
|
||||
return Err(anyhow::anyhow!("Authentication token is required. Use --token option or set RFS_TOKEN environment variable."));
|
||||
}
|
||||
|
||||
if hash.is_some() {
|
||||
return sync_blocks(hash.unwrap(), source_server, dest_server, token).await;
|
||||
}
|
||||
sync_all_blocks(source_server, dest_server, Some(DEFAULT_PAGE_SIZE), token).await
|
||||
}
|
||||
|
||||
/// Syncs all blocks of a file between two servers
|
||||
async fn sync_blocks(
|
||||
file_hash: &str,
|
||||
source_server: &str,
|
||||
dest_server: &str,
|
||||
token: &str,
|
||||
) -> Result<()> {
|
||||
// Get all blocks for the file from source server
|
||||
info!("Getting blocks for file hash: {}", file_hash);
|
||||
let blocks = server_api::get_blocks_by_hash(file_hash, source_server.to_string()).await?;
|
||||
|
||||
if blocks.is_empty() {
|
||||
return Err(anyhow::anyhow!(
|
||||
"No blocks found for file hash: {}",
|
||||
file_hash
|
||||
));
|
||||
}
|
||||
|
||||
info!("File has {} blocks", blocks.len());
|
||||
|
||||
// Create a client for API requests
|
||||
let client = Arc::new(Client::new());
|
||||
|
||||
// Prepare blocks with metadata for verification
|
||||
let blocks_with_metadata: Vec<VerifyBlock> = blocks
|
||||
.iter()
|
||||
.map(|(hash, idx)| VerifyBlock {
|
||||
block_hash: hash.clone(),
|
||||
file_hash: file_hash.to_string(),
|
||||
block_index: *idx,
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Verify which blocks are missing on the destination server
|
||||
let missing_blocks = server_api::verify_blocks_with_server(
|
||||
&client,
|
||||
dest_server.to_string(),
|
||||
blocks_with_metadata,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if missing_blocks.is_empty() {
|
||||
info!("All blocks already exist on destination server");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
info!(
|
||||
"{} of {} blocks are missing on destination server",
|
||||
missing_blocks.len(),
|
||||
blocks.len()
|
||||
);
|
||||
|
||||
// Create a semaphore to limit concurrent operations
|
||||
let semaphore = Arc::new(Semaphore::new(PARALLEL_OPERATIONS));
|
||||
|
||||
// Download missing blocks from source and upload to destination in parallel
|
||||
let results = stream::iter(blocks.iter())
|
||||
.filter_map(|(block_hash, block_idx)| {
|
||||
let is_missing = missing_blocks.iter().any(|hash| hash == block_hash);
|
||||
|
||||
if !is_missing {
|
||||
return futures::future::ready(None);
|
||||
}
|
||||
|
||||
let block_hash = block_hash.clone();
|
||||
let source_server = source_server.to_string();
|
||||
let dest_server = dest_server.to_string();
|
||||
let file_hash = file_hash.to_string();
|
||||
let block_idx = *block_idx;
|
||||
let permit = semaphore.clone();
|
||||
let client = client.clone();
|
||||
let token = token.to_string();
|
||||
|
||||
futures::future::ready(Some(async move {
|
||||
// Acquire a permit from the semaphore
|
||||
let _permit = permit
|
||||
.acquire()
|
||||
.await
|
||||
.expect("Failed to acquire semaphore permit");
|
||||
|
||||
info!("Syncing block {} (index: {})", block_hash, block_idx);
|
||||
|
||||
// Download the block from source server
|
||||
match server_api::download_block(&block_hash, &source_server).await {
|
||||
Ok(content) => {
|
||||
// Upload the block to destination server
|
||||
server_api::upload_block(
|
||||
client,
|
||||
dest_server,
|
||||
block_hash.clone(),
|
||||
content.to_vec(),
|
||||
file_hash,
|
||||
block_idx,
|
||||
token.clone(),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| (block_hash.clone(), e))
|
||||
}
|
||||
Err(e) => Err((block_hash.clone(), e)),
|
||||
}
|
||||
}))
|
||||
})
|
||||
.buffer_unordered(PARALLEL_OPERATIONS)
|
||||
.collect::<Vec<_>>()
|
||||
.await;
|
||||
|
||||
// Check for any errors in the sync operations
|
||||
let mut has_errors = false;
|
||||
for result in results {
|
||||
if let Err((block_hash, e)) = result {
|
||||
has_errors = true;
|
||||
error!("Failed to sync block {}: {}", block_hash, e);
|
||||
}
|
||||
}
|
||||
|
||||
if has_errors {
|
||||
Err(anyhow::anyhow!("Some blocks failed to sync"))
|
||||
} else {
|
||||
info!("All blocks synced successfully");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Syncs all blocks between two servers
|
||||
pub async fn sync_all_blocks(
|
||||
source_server: &str,
|
||||
dest_server: &str,
|
||||
page_size: Option<usize>,
|
||||
token: &str,
|
||||
) -> Result<()> {
|
||||
info!("Starting full block sync between servers");
|
||||
info!("Source server: {}", source_server);
|
||||
info!("Destination server: {}", dest_server);
|
||||
|
||||
let page_size = page_size.unwrap_or(DEFAULT_PAGE_SIZE);
|
||||
let client = Arc::new(Client::new());
|
||||
let semaphore = Arc::new(Semaphore::new(PARALLEL_OPERATIONS));
|
||||
|
||||
let mut page = 1;
|
||||
let mut total_blocks = 0;
|
||||
let mut total_synced = 0;
|
||||
let mut total_failed = 0;
|
||||
|
||||
loop {
|
||||
// Get a page of blocks from the source server
|
||||
info!("Fetching blocks page {} (size: {})", page, page_size);
|
||||
let (blocks, total) = match server_api::list_blocks(source_server, page_size, page).await {
|
||||
Ok(result) => result,
|
||||
Err(e) => {
|
||||
error!("Failed to list blocks from source server: {}", e);
|
||||
return Err(anyhow::anyhow!("Failed to list blocks from source server"));
|
||||
}
|
||||
};
|
||||
|
||||
if blocks.is_empty() {
|
||||
info!("No more blocks to sync");
|
||||
break;
|
||||
}
|
||||
|
||||
total_blocks = total;
|
||||
info!(
|
||||
"Retrieved {} blocks (page {}/{})",
|
||||
blocks.len(),
|
||||
page,
|
||||
(total_blocks as f64 / page_size as f64).ceil() as usize
|
||||
);
|
||||
|
||||
// Process blocks in parallel
|
||||
let results = stream::iter(blocks.iter())
|
||||
.map(|block_hash| {
|
||||
let block_hash = block_hash.clone();
|
||||
let source_server = source_server.to_string();
|
||||
let dest_server = dest_server.to_string();
|
||||
let permit = semaphore.clone();
|
||||
let client = client.clone();
|
||||
let token = token.to_string();
|
||||
|
||||
async move {
|
||||
// Acquire a permit from the semaphore
|
||||
let _permit = permit
|
||||
.acquire()
|
||||
.await
|
||||
.expect("Failed to acquire semaphore permit");
|
||||
|
||||
// Check if block exists on destination server
|
||||
match server_api::check_block(&dest_server, &block_hash).await {
|
||||
Ok(exists) => {
|
||||
if exists {
|
||||
// Block already exists on destination server
|
||||
debug!("Block {} already exists on destination server", block_hash);
|
||||
return Ok(block_hash);
|
||||
}
|
||||
|
||||
info!("Syncing block {}", block_hash);
|
||||
|
||||
// Download the block from source server
|
||||
match server_api::download_block(&block_hash, &source_server).await {
|
||||
Ok(content) => {
|
||||
// Upload the block to destination server
|
||||
// Note: We don't have file_hash and block_index for this block
|
||||
// so we use empty string and 0 as placeholders
|
||||
server_api::upload_block(
|
||||
client,
|
||||
dest_server,
|
||||
block_hash.clone(),
|
||||
content.to_vec(),
|
||||
"".to_string(),
|
||||
0,
|
||||
token.clone(),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| (block_hash.clone(), e))
|
||||
.map(|_| block_hash)
|
||||
}
|
||||
Err(e) => Err((block_hash.clone(), e)),
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to check if block {} exists: {}", block_hash, e);
|
||||
Err((block_hash, e))
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
.buffer_unordered(PARALLEL_OPERATIONS)
|
||||
.collect::<Vec<_>>()
|
||||
.await;
|
||||
|
||||
// Process results
|
||||
for result in results {
|
||||
match result {
|
||||
Ok(_) => total_synced += 1,
|
||||
Err((block_hash, e)) => {
|
||||
total_failed += 1;
|
||||
error!("Failed to sync block {}: {}", block_hash, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!(
|
||||
"Progress: {}/{} blocks synced ({} failed)",
|
||||
total_synced, total_blocks, total_failed
|
||||
);
|
||||
|
||||
// Move to the next page
|
||||
page += 1;
|
||||
}
|
||||
|
||||
info!(
|
||||
"Block sync completed: {}/{} blocks synced ({} failed)",
|
||||
total_synced, total_blocks, total_failed
|
||||
);
|
||||
|
||||
if total_failed > 0 {
|
||||
Err(anyhow::anyhow!("{} blocks failed to sync", total_failed))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
40
components/rfs/src/tree_visitor.rs
Normal file
40
components/rfs/src/tree_visitor.rs
Normal file
@@ -0,0 +1,40 @@
|
||||
use crate::fungi::meta::{FileType, Inode, Result, Walk, WalkVisitor};
|
||||
use std::path::Path;
|
||||
|
||||
pub struct TreeVisitor {
|
||||
// We don't need to track depth since the path already contains the structure
|
||||
}
|
||||
|
||||
impl TreeVisitor {
|
||||
pub fn new() -> Self {
|
||||
Self {}
|
||||
}
|
||||
|
||||
fn print_entry(&self, path: &Path, node: &Inode) {
|
||||
// Calculate depth from the path
|
||||
let depth = path.components().count().saturating_sub(1);
|
||||
let indent = " ".repeat(depth);
|
||||
let file_type = match node.mode.file_type() {
|
||||
FileType::Dir => "📁",
|
||||
FileType::Regular => "📄",
|
||||
FileType::Link => "🔗",
|
||||
_ => "❓",
|
||||
};
|
||||
|
||||
// Get just the filename
|
||||
let name = path
|
||||
.file_name()
|
||||
.map(|n| n.to_string_lossy())
|
||||
.unwrap_or_else(|| path.to_string_lossy());
|
||||
|
||||
println!("{}{} {}", indent, file_type, name);
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl WalkVisitor for TreeVisitor {
|
||||
async fn visit(&mut self, path: &Path, node: &Inode) -> Result<Walk> {
|
||||
self.print_entry(path, node);
|
||||
Ok(Walk::Continue)
|
||||
}
|
||||
}
|
||||
270
components/rfs/src/unpack.rs
Normal file
270
components/rfs/src/unpack.rs
Normal file
@@ -0,0 +1,270 @@
|
||||
use crate::cache::Cache;
|
||||
use crate::fungi::{
|
||||
meta::{Block, FileType, Inode, Result, Walk, WalkVisitor},
|
||||
Reader,
|
||||
};
|
||||
use crate::store::Store;
|
||||
use anyhow::Context;
|
||||
use nix::unistd::{fchownat, FchownatFlags, Gid, Uid};
|
||||
use std::fs::Permissions;
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::{ffi::OsStr, fs, sync::Arc};
|
||||
use tokio::fs::OpenOptions;
|
||||
use workers::WorkerPool;
|
||||
|
||||
/// unpack an FL to the given root location. it will download the files and reconstruct
|
||||
/// the filesystem.
|
||||
pub async fn unpack<P: AsRef<Path>, S: Store>(
|
||||
meta: &Reader,
|
||||
cache: &Cache<S>,
|
||||
root: P,
|
||||
preserve: bool,
|
||||
) -> Result<()> {
|
||||
// For now, we'll use the non-parallel version
|
||||
// TODO: Implement parallel download properly
|
||||
let mut visitor = CopyVisitor::new(meta, cache, root.as_ref(), preserve);
|
||||
meta.walk(&mut visitor).await
|
||||
}
|
||||
|
||||
struct CopyVisitor<'a, S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
preserve: bool,
|
||||
meta: &'a Reader,
|
||||
cache: &'a Cache<S>,
|
||||
root: &'a Path,
|
||||
}
|
||||
|
||||
impl<'a, S> CopyVisitor<'a, S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
pub fn new(meta: &'a Reader, cache: &'a Cache<S>, root: &'a Path, preserve: bool) -> Self {
|
||||
Self {
|
||||
meta,
|
||||
cache,
|
||||
root,
|
||||
preserve,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<'a, S> WalkVisitor for CopyVisitor<'a, S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
async fn visit(&mut self, path: &Path, node: &Inode) -> Result<Walk> {
|
||||
let rooted = self.root.join(path.strip_prefix("/").unwrap());
|
||||
|
||||
match node.mode.file_type() {
|
||||
FileType::Dir => {
|
||||
fs::create_dir_all(&rooted)
|
||||
.with_context(|| format!("failed to create directory '{:?}'", rooted))?;
|
||||
}
|
||||
FileType::Regular => {
|
||||
let mut fd = OpenOptions::new()
|
||||
.create_new(true)
|
||||
.write(true)
|
||||
.truncate(true)
|
||||
.open(&rooted)
|
||||
.await
|
||||
.with_context(|| format!("failed to create file '{:?}'", rooted))?;
|
||||
|
||||
let blocks = self.meta.blocks(node.ino).await?;
|
||||
self.cache
|
||||
.direct(&blocks, &mut fd)
|
||||
.await
|
||||
.with_context(|| format!("failed to download file '{:?}'", rooted))?;
|
||||
|
||||
fd.set_permissions(Permissions::from_mode(node.mode.mode()))
|
||||
.await?;
|
||||
}
|
||||
FileType::Link => {
|
||||
let target = node
|
||||
.data
|
||||
.as_deref()
|
||||
.ok_or_else(|| anyhow::anyhow!("link has no target path"))?;
|
||||
|
||||
let target = Path::new(OsStr::from_bytes(target));
|
||||
let target = if target.is_relative() {
|
||||
target.to_owned()
|
||||
} else {
|
||||
self.root.join(target)
|
||||
};
|
||||
|
||||
std::os::unix::fs::symlink(target, &rooted)
|
||||
.with_context(|| format!("failed to create symlink '{:?}'", rooted))?;
|
||||
}
|
||||
_ => {
|
||||
warn!("unknown file kind: {:?}", node.mode.file_type());
|
||||
return Ok(Walk::Continue);
|
||||
}
|
||||
};
|
||||
|
||||
if self.preserve {
|
||||
fchownat(
|
||||
None,
|
||||
&rooted,
|
||||
Some(Uid::from_raw(node.uid)),
|
||||
Some(Gid::from_raw(node.gid)),
|
||||
FchownatFlags::NoFollowSymlink,
|
||||
)
|
||||
.with_context(|| format!("failed to change ownership of '{:?}'", &rooted))?;
|
||||
}
|
||||
|
||||
Ok(Walk::Continue)
|
||||
}
|
||||
}
|
||||
|
||||
// Parallel download implementation
|
||||
struct ParallelCopyVisitor<'a, S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
meta: &'a Reader,
|
||||
root: &'a Path,
|
||||
preserve: bool,
|
||||
pool: &'a mut WorkerPool<Downloader<S>>,
|
||||
}
|
||||
|
||||
impl<'a, S> ParallelCopyVisitor<'a, S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
pub fn new(
|
||||
meta: &'a Reader,
|
||||
root: &'a Path,
|
||||
preserve: bool,
|
||||
pool: &'a mut WorkerPool<Downloader<S>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
meta,
|
||||
root,
|
||||
preserve,
|
||||
pool,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<'a, S> WalkVisitor for ParallelCopyVisitor<'a, S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
async fn visit(&mut self, path: &Path, node: &Inode) -> Result<Walk> {
|
||||
let rooted = self.root.join(path.strip_prefix("/").unwrap());
|
||||
|
||||
match node.mode.file_type() {
|
||||
FileType::Dir => {
|
||||
fs::create_dir_all(&rooted)
|
||||
.with_context(|| format!("failed to create directory '{:?}'", rooted))?;
|
||||
}
|
||||
FileType::Regular => {
|
||||
let blocks = self.meta.blocks(node.ino).await?;
|
||||
let worker = self.pool.get().await;
|
||||
worker.send((rooted.clone(), blocks, node.mode.mode()))?;
|
||||
}
|
||||
FileType::Link => {
|
||||
let target = node
|
||||
.data
|
||||
.as_deref()
|
||||
.ok_or_else(|| anyhow::anyhow!("link has no target path"))?;
|
||||
|
||||
let target = Path::new(OsStr::from_bytes(target));
|
||||
let target = if target.is_relative() {
|
||||
target.to_owned()
|
||||
} else {
|
||||
self.root.join(target)
|
||||
};
|
||||
|
||||
std::os::unix::fs::symlink(target, &rooted)
|
||||
.with_context(|| format!("failed to create symlink '{:?}'", rooted))?;
|
||||
}
|
||||
_ => {
|
||||
warn!("unknown file kind: {:?}", node.mode.file_type());
|
||||
return Ok(Walk::Continue);
|
||||
}
|
||||
};
|
||||
|
||||
if self.preserve {
|
||||
fchownat(
|
||||
None,
|
||||
&rooted,
|
||||
Some(Uid::from_raw(node.uid)),
|
||||
Some(Gid::from_raw(node.gid)),
|
||||
FchownatFlags::NoFollowSymlink,
|
||||
)
|
||||
.with_context(|| format!("failed to change ownership of '{:?}'", &rooted))?;
|
||||
}
|
||||
|
||||
Ok(Walk::Continue)
|
||||
}
|
||||
}
|
||||
|
||||
struct Downloader<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
cache: Arc<Cache<S>>,
|
||||
}
|
||||
|
||||
impl<S> Downloader<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
fn new(cache: Cache<S>) -> Self {
|
||||
Self {
|
||||
cache: Arc::new(cache),
|
||||
}
|
||||
}
|
||||
|
||||
async fn download(&self, path: &Path, blocks: &[Block], mode: u32) -> Result<()> {
|
||||
let mut fd = OpenOptions::new()
|
||||
.create_new(true)
|
||||
.write(true)
|
||||
.truncate(true)
|
||||
.open(&path)
|
||||
.await
|
||||
.with_context(|| format!("failed to create file '{:?}'", path))?;
|
||||
|
||||
self.cache
|
||||
.direct(&blocks, &mut fd)
|
||||
.await
|
||||
.with_context(|| format!("failed to download file '{:?}'", path))?;
|
||||
|
||||
fd.set_permissions(Permissions::from_mode(mode)).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Clone for Downloader<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
cache: Arc::clone(&self.cache),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<S> workers::Work for Downloader<S>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
type Input = (PathBuf, Vec<Block>, u32);
|
||||
type Output = ();
|
||||
|
||||
async fn run(&mut self, (path, blocks, mode): Self::Input) -> Self::Output {
|
||||
log::info!("downloading file {:?}", path);
|
||||
if let Err(err) = self.download(&path, &blocks, mode).await {
|
||||
log::error!("failed to download file {:?}: {}", path, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
435
components/rfs/src/upload.rs
Normal file
435
components/rfs/src/upload.rs
Normal file
@@ -0,0 +1,435 @@
|
||||
use anyhow::{Context, Result};
|
||||
use futures::future::join_all;
|
||||
use reqwest::Client;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use tokio::fs::File;
|
||||
use tokio::io::AsyncReadExt;
|
||||
use tokio::sync::Semaphore;
|
||||
|
||||
use crate::fungi;
|
||||
use crate::server_api;
|
||||
use crate::store;
|
||||
|
||||
pub const BLOCK_SIZE: usize = 1024 * 1024; // 1MB blocks, same as server
|
||||
const PARALLEL_UPLOAD: usize = 20; // Number of blocks to upload in parallel
|
||||
|
||||
pub fn calculate_hash(data: &[u8]) -> String {
|
||||
let hash = blake2b_simd::Params::new().hash_length(32).hash(data);
|
||||
hex::encode(hash.as_bytes())
|
||||
}
|
||||
|
||||
/// Splits the file into blocks and calculates their hashes
|
||||
pub async fn split_file_into_blocks(
|
||||
file_path: &Path,
|
||||
block_size: usize,
|
||||
) -> Result<(Vec<String>, Vec<(String, Vec<u8>)>)> {
|
||||
let mut file = File::open(file_path).await.context("Failed to open file")?;
|
||||
let mut blocks = Vec::new();
|
||||
let mut block_data = Vec::new();
|
||||
|
||||
loop {
|
||||
let mut buffer = vec![0; block_size];
|
||||
let bytes_read = file.read(&mut buffer).await?;
|
||||
|
||||
if bytes_read == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
buffer.truncate(bytes_read);
|
||||
|
||||
// Calculate hash for this block
|
||||
let hash = calculate_hash(&buffer);
|
||||
|
||||
blocks.push(hash.clone());
|
||||
block_data.push((hash, buffer));
|
||||
}
|
||||
|
||||
Ok((blocks, block_data))
|
||||
}
|
||||
|
||||
/// Calculates the hash of the entire file by combining the hashes of all blocks
|
||||
pub fn calculate_file_hash(blocks: &[String]) -> String {
|
||||
let mut hasher = Sha256::new();
|
||||
for block_hash in blocks {
|
||||
hasher.update(block_hash.as_bytes());
|
||||
}
|
||||
format!("{:x}", hasher.finalize())
|
||||
}
|
||||
|
||||
/// Uploads a file to the server, splitting it into blocks and only uploading missing blocks
|
||||
/// Returns the hash of the uploaded file
|
||||
pub async fn upload<P: AsRef<Path>>(
|
||||
file_path: P,
|
||||
server_url: String,
|
||||
block_size: Option<usize>,
|
||||
token: &str,
|
||||
) -> Result<String> {
|
||||
if token.is_empty() {
|
||||
return Err(anyhow::anyhow!("Authentication token is required. Use --token option or set RFS_TOKEN environment variable."));
|
||||
}
|
||||
|
||||
let block_size = block_size.unwrap_or(BLOCK_SIZE); // Use provided block size or default
|
||||
let file_path = file_path.as_ref();
|
||||
|
||||
info!("Uploading file: {}", file_path.display());
|
||||
debug!("Using block size: {} bytes", block_size);
|
||||
|
||||
// Create HTTP client
|
||||
let client = Client::new();
|
||||
|
||||
// Read the file size
|
||||
let file_size = File::open(file_path).await?.metadata().await?.len();
|
||||
|
||||
info!("File size: {} bytes", file_size);
|
||||
info!("Splitting file into blocks of {} bytes", block_size);
|
||||
|
||||
// Split file into blocks and calculate hashes
|
||||
let (blocks, block_data) = split_file_into_blocks(file_path, block_size).await?;
|
||||
info!("File split into {} blocks", blocks.len());
|
||||
|
||||
// Calculate the file hash by combining all block hashes
|
||||
let file_hash = calculate_file_hash(&blocks);
|
||||
info!("Calculated file hash: {}", file_hash);
|
||||
|
||||
// Prepare blocks with metadata for verification
|
||||
let blocks_with_metadata: Vec<server_api::VerifyBlock> = blocks
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(idx, hash)| server_api::VerifyBlock {
|
||||
block_hash: hash.clone(),
|
||||
file_hash: file_hash.clone(),
|
||||
block_index: idx as u64,
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Verify which blocks are missing on the server
|
||||
let missing_blocks =
|
||||
server_api::verify_blocks_with_server(&client, server_url.clone(), blocks_with_metadata)
|
||||
.await?;
|
||||
info!(
|
||||
"{} of {} blocks are missing and need to be uploaded",
|
||||
missing_blocks.len(),
|
||||
block_data.len()
|
||||
);
|
||||
|
||||
// Upload missing blocks in parallel
|
||||
let client = Arc::new(client);
|
||||
let missing_blocks = Arc::new(missing_blocks);
|
||||
|
||||
// Use a semaphore to limit concurrent uploads
|
||||
let semaphore = Arc::new(Semaphore::new(PARALLEL_UPLOAD));
|
||||
|
||||
// Create a vector to hold all upload tasks
|
||||
let mut upload_tasks = Vec::new();
|
||||
|
||||
for (idx, (hash, data)) in block_data.into_iter().enumerate() {
|
||||
if missing_blocks.iter().any(|block| block == &hash) {
|
||||
let hash_clone = hash.clone();
|
||||
let server_url_clone = server_url.clone();
|
||||
let client_clone = Arc::clone(&client);
|
||||
let file_hash_clone = file_hash.clone();
|
||||
let token_clone = token.to_string();
|
||||
|
||||
// Acquire a permit from the semaphore
|
||||
let _permit = semaphore.acquire().await.unwrap();
|
||||
|
||||
// Create a task for each block upload
|
||||
let task: tokio::task::JoinHandle<std::result::Result<(), anyhow::Error>> =
|
||||
tokio::spawn(server_api::upload_block(
|
||||
client_clone,
|
||||
server_url_clone,
|
||||
hash_clone,
|
||||
data,
|
||||
file_hash_clone,
|
||||
idx as u64,
|
||||
token_clone,
|
||||
));
|
||||
|
||||
upload_tasks.push(task);
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for all upload tasks to complete
|
||||
let results = join_all(upload_tasks).await;
|
||||
|
||||
// Check for any errors in the upload tasks
|
||||
for result in results {
|
||||
match result {
|
||||
Ok(task_result) => task_result?,
|
||||
Err(e) => {
|
||||
return Err(anyhow::anyhow!("Upload task failed: {}", e));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("File upload complete");
|
||||
Ok(file_hash)
|
||||
}
|
||||
|
||||
/// Uploads a directory to the server, processing all files recursively
|
||||
pub async fn upload_dir<P: AsRef<Path>>(
|
||||
dir_path: P,
|
||||
server_url: String,
|
||||
block_size: Option<usize>,
|
||||
token: &str,
|
||||
create_flist: bool,
|
||||
flist_output: Option<&str>,
|
||||
) -> Result<()> {
|
||||
if token.is_empty() {
|
||||
return Err(anyhow::anyhow!("Authentication token is required. Use --token option or set RFS_TOKEN environment variable."));
|
||||
}
|
||||
|
||||
let dir_path = dir_path.as_ref().to_path_buf();
|
||||
|
||||
info!("Uploading directory: {}", dir_path.display());
|
||||
debug!(
|
||||
"Using block size: {} bytes",
|
||||
block_size.unwrap_or(BLOCK_SIZE)
|
||||
);
|
||||
|
||||
// Collect all files in the directory recursively
|
||||
let mut file_paths = Vec::new();
|
||||
collect_files(&dir_path, &mut file_paths).context("Failed to read directory")?;
|
||||
|
||||
info!("Found {} files to upload", file_paths.len());
|
||||
|
||||
if !create_flist {
|
||||
// Upload each file
|
||||
for file_path in file_paths.clone() {
|
||||
upload(&file_path, server_url.clone(), block_size, token).await?;
|
||||
}
|
||||
|
||||
info!("Directory upload complete");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Create and handle flist if requested
|
||||
info!("Creating flist for the uploaded directory");
|
||||
|
||||
// Create a temporary flist file if no output path is specified
|
||||
let flist_path = match flist_output {
|
||||
Some(path) => PathBuf::from(path),
|
||||
None => {
|
||||
let temp_dir = std::env::temp_dir();
|
||||
temp_dir.join(format!(
|
||||
"{}.fl",
|
||||
dir_path.file_name().unwrap_or_default().to_string_lossy()
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
// Create the flist
|
||||
let writer = fungi::Writer::new(&flist_path, true)
|
||||
.await
|
||||
.context("Failed to create flist file")?;
|
||||
|
||||
// Create a store for the server
|
||||
let store = store::parse_router(&[format!(
|
||||
"{}://{}?token={}",
|
||||
store::server::SCHEME,
|
||||
server_url.clone(),
|
||||
token
|
||||
)])
|
||||
.await
|
||||
.context("Failed to create store")?;
|
||||
|
||||
// Pack the directory into the flist iteratively to avoid stack overflow
|
||||
let result =
|
||||
tokio::task::spawn_blocking(move || crate::pack(writer, store, dir_path, false, None))
|
||||
.await
|
||||
.context("Failed to join spawned task")?;
|
||||
|
||||
result.await.context("Failed to create flist")?;
|
||||
|
||||
info!("Flist created at: {}", flist_path.display());
|
||||
|
||||
// Upload the flist file if it was created
|
||||
if flist_path.exists() {
|
||||
info!("Uploading flist file");
|
||||
let flist_hash = upload(&flist_path, server_url.clone(), block_size, token)
|
||||
.await
|
||||
.context("Failed to upload flist file")?;
|
||||
|
||||
info!("Flist uploaded successfully. Hash: {}", flist_hash);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn collect_files(dir_path: &Path, file_paths: &mut Vec<PathBuf>) -> std::io::Result<()> {
|
||||
let mut stack = vec![dir_path.to_path_buf()];
|
||||
|
||||
while let Some(current_path) = stack.pop() {
|
||||
for entry in std::fs::read_dir(¤t_path)? {
|
||||
let entry = entry?;
|
||||
let path = entry.path();
|
||||
|
||||
if path.is_file() {
|
||||
file_paths.push(path);
|
||||
} else if path.is_dir() {
|
||||
stack.push(path);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Publishes a website by uploading its directory to the server
|
||||
pub async fn publish_website<P: AsRef<Path>>(
|
||||
dir_path: P,
|
||||
server_url: String,
|
||||
block_size: Option<usize>,
|
||||
token: &str,
|
||||
) -> Result<()> {
|
||||
if token.is_empty() {
|
||||
return Err(anyhow::anyhow!("Authentication token is required. Use --token option or set RFS_TOKEN environment variable."));
|
||||
}
|
||||
|
||||
let dir_path = dir_path.as_ref().to_path_buf();
|
||||
|
||||
debug!("Uploading directory: {}", dir_path.display());
|
||||
debug!(
|
||||
"Using block size: {} bytes",
|
||||
block_size.unwrap_or(BLOCK_SIZE)
|
||||
);
|
||||
|
||||
// Collect all files in the directory recursively
|
||||
let mut file_paths = Vec::new();
|
||||
collect_files(&dir_path, &mut file_paths).context("Failed to read directory")?;
|
||||
|
||||
debug!("Found {} files to upload", file_paths.len());
|
||||
|
||||
// Create and handle flist if requested
|
||||
debug!("Creating flist for the uploaded directory");
|
||||
|
||||
// Create a temporary flist file
|
||||
let temp_dir = std::env::temp_dir();
|
||||
let flist_path = temp_dir.join(format!(
|
||||
"{}.fl",
|
||||
dir_path.file_name().unwrap_or_default().to_string_lossy()
|
||||
));
|
||||
|
||||
// Create the flist
|
||||
let writer = fungi::Writer::new(&flist_path, true)
|
||||
.await
|
||||
.context("Failed to create flist file")?;
|
||||
|
||||
// Create a store for the server
|
||||
let store = store::parse_router(&[format!(
|
||||
"{}://{}?token={}",
|
||||
store::server::SCHEME,
|
||||
server_url.clone(),
|
||||
token
|
||||
)])
|
||||
.await
|
||||
.context("Failed to create store")?;
|
||||
|
||||
// Temporarily disable logs for the upload function
|
||||
let original_level = log::max_level();
|
||||
log::set_max_level(log::LevelFilter::Off);
|
||||
|
||||
// Pack the directory into the flist iteratively to avoid stack overflow
|
||||
let result =
|
||||
tokio::task::spawn_blocking(move || crate::pack(writer, store, dir_path, false, None))
|
||||
.await
|
||||
.context("Failed to join spawned task")?;
|
||||
|
||||
result.await.context("Failed to create flist")?;
|
||||
|
||||
debug!("Flist created at: {}", flist_path.display());
|
||||
|
||||
// Upload the flist file if it was created
|
||||
if flist_path.exists() {
|
||||
debug!("Uploading flist file");
|
||||
|
||||
let flist_hash = upload(&flist_path, server_url.clone(), block_size, token)
|
||||
.await
|
||||
.context("Failed to upload flist file")?;
|
||||
|
||||
// Restore the original log level
|
||||
log::set_max_level(original_level);
|
||||
|
||||
debug!("Flist uploaded successfully. Hash: {}", flist_hash);
|
||||
|
||||
info!("Website published successfully");
|
||||
info!("Website hash: {}", flist_hash);
|
||||
info!("Website URL: {}/website/{}/", server_url, flist_hash);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_token_from_server(
|
||||
server_url: &str,
|
||||
username: &str,
|
||||
password: &str,
|
||||
) -> Result<String> {
|
||||
let client = reqwest::Client::new();
|
||||
server_api::signin(&client, server_url, username, password).await
|
||||
}
|
||||
|
||||
/// Track user blocks on the server
|
||||
/// Returns information about the number of blocks and their total size
|
||||
pub async fn track(server_url: &str, token: &str, show_details: bool) -> Result<()> {
|
||||
if token.is_empty() {
|
||||
return Err(anyhow::anyhow!("Authentication token is required. Use --token option or set RFS_TOKEN environment variable."));
|
||||
}
|
||||
|
||||
let first_page = server_api::get_user_blocks(server_url, token, Some(1), None)
|
||||
.await
|
||||
.context("Failed to get user blocks")?;
|
||||
|
||||
let total_pages = (first_page.total as f64 / 50.0).ceil() as u32;
|
||||
|
||||
let mut tasks = Vec::new();
|
||||
for page in 1..=total_pages {
|
||||
let server_url = server_url.to_string();
|
||||
let token = token.to_string();
|
||||
tasks.push(tokio::spawn(async move {
|
||||
server_api::get_user_blocks(&server_url, &token, Some(page), Some(50)).await
|
||||
}));
|
||||
}
|
||||
|
||||
let mut user_blocks = Vec::new();
|
||||
for task in tasks {
|
||||
match task.await {
|
||||
Ok(Ok(blocks_per_page)) => {
|
||||
user_blocks.extend(blocks_per_page.blocks);
|
||||
}
|
||||
Ok(Err(err)) => {
|
||||
return Err(anyhow::anyhow!("Failed to get user blocks: {}", err));
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(anyhow::anyhow!("Task failed: {}", err));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate total size
|
||||
let total_size: u64 = user_blocks.iter().map(|(_, size)| size).sum();
|
||||
|
||||
println!("User Blocks Summary:");
|
||||
println!(
|
||||
"Usage percentage: {}%",
|
||||
(user_blocks.len() as f64 / first_page.all_blocks as f64) * 100.0
|
||||
);
|
||||
println!("Total blocks: {}", user_blocks.len());
|
||||
println!(
|
||||
"Total size: {} bytes ({:.2} MB)",
|
||||
total_size,
|
||||
total_size as f64 / (1024.0 * 1024.0)
|
||||
);
|
||||
|
||||
// Print individual blocks if there are any
|
||||
if show_details && !user_blocks.is_empty() {
|
||||
println!("\nBlock details:");
|
||||
for (hash, size) in &user_blocks {
|
||||
println!(" {} - {} bytes", hash, size);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
Reference in New Issue
Block a user