feat: Create minimal Zero-OS initramfs with console support
- Fixed build system to clone source repositories instead of downloading binaries - Enhanced scripts/fetch-github.sh with proper git repo cloning and branch handling - Updated scripts/compile-components.sh for RFS compilation with build-binary feature - Added minimal firmware installation for essential network drivers (73 modules) - Created comprehensive zinit configuration set (15 config files including getty) - Added util-linux package for getty/agetty console support - Optimized package selection for minimal 27MB initramfs footprint - Successfully builds bootable vmlinuz.efi with embedded initramfs - Confirmed working: VM boot, console login, network drivers, zinit init system Components: - initramfs.cpio.xz: 27MB compressed minimal Zero-OS image - vmlinuz.efi: 35MB bootable kernel with embedded initramfs - Complete Zero-OS toolchain: zinit, rfs, mycelium compiled from source
This commit is contained in:
181
components/rfs/src/server/auth.rs
Normal file
181
components/rfs/src/server/auth.rs
Normal file
@@ -0,0 +1,181 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use axum::{
|
||||
extract::{Json, Request, State},
|
||||
http::{self, StatusCode},
|
||||
middleware::Next,
|
||||
response::IntoResponse,
|
||||
};
|
||||
use axum_macros::debug_handler;
|
||||
use chrono::{Duration, Utc};
|
||||
use jsonwebtoken::{decode, encode, DecodingKey, EncodingKey, Header, TokenData, Validation};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use utoipa::ToSchema;
|
||||
|
||||
use crate::server::{
|
||||
config,
|
||||
db::DB,
|
||||
response::{ResponseError, ResponseResult},
|
||||
};
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct Claims {
|
||||
pub exp: usize, // Expiry time of the token
|
||||
pub iat: usize, // Issued at time of the token
|
||||
pub username: String, // Username associated with the token
|
||||
}
|
||||
|
||||
#[derive(Deserialize, ToSchema)]
|
||||
pub struct SignInBody {
|
||||
pub username: String,
|
||||
pub password: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct SignInResponse {
|
||||
pub access_token: String,
|
||||
}
|
||||
|
||||
#[utoipa::path(
|
||||
post,
|
||||
path = "/api/v1/signin",
|
||||
tag = "Authentication",
|
||||
request_body = SignInBody,
|
||||
responses(
|
||||
(status = 201, description = "User signed in successfully", body = SignInResponse),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
(status = 401, description = "Unauthorized user", body = ResponseError),
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn sign_in_handler(
|
||||
State(state): State<Arc<config::AppState>>,
|
||||
Json(user_data): Json<SignInBody>,
|
||||
) -> impl IntoResponse {
|
||||
let user = match state.db.get_user_by_username(&user_data.username).await {
|
||||
Some(user) => user,
|
||||
None => {
|
||||
return Err(ResponseError::Unauthorized(
|
||||
"User is not authorized".to_string(),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
if user_data.password != user.password {
|
||||
return Err(ResponseError::Unauthorized(
|
||||
"Wrong username or password".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let token = encode_jwt(
|
||||
user.username.clone(),
|
||||
state.config.jwt_secret.clone(),
|
||||
state.config.jwt_expire_hours,
|
||||
)
|
||||
.map_err(|_| ResponseError::InternalServerError)?;
|
||||
|
||||
Ok(ResponseResult::SignedIn(SignInResponse {
|
||||
access_token: token,
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn encode_jwt(
|
||||
username: String,
|
||||
jwt_secret: String,
|
||||
jwt_expire: i64,
|
||||
) -> Result<String, StatusCode> {
|
||||
let now = Utc::now();
|
||||
let exp: usize = (now + Duration::hours(jwt_expire)).timestamp() as usize;
|
||||
let iat: usize = now.timestamp() as usize;
|
||||
let claim = Claims { iat, exp, username };
|
||||
|
||||
encode(
|
||||
&Header::default(),
|
||||
&claim,
|
||||
&EncodingKey::from_secret(jwt_secret.as_ref()),
|
||||
)
|
||||
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)
|
||||
}
|
||||
|
||||
pub fn decode_jwt(jwt_token: String, jwt_secret: String) -> Result<TokenData<Claims>, StatusCode> {
|
||||
let result: Result<TokenData<Claims>, StatusCode> = decode(
|
||||
&jwt_token,
|
||||
&DecodingKey::from_secret(jwt_secret.as_ref()),
|
||||
&Validation::default(),
|
||||
)
|
||||
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR);
|
||||
result
|
||||
}
|
||||
|
||||
pub async fn authorize(
|
||||
State(state): State<Arc<config::AppState>>,
|
||||
mut req: Request,
|
||||
next: Next,
|
||||
) -> impl IntoResponse {
|
||||
let auth_header = match req.headers_mut().get(http::header::AUTHORIZATION) {
|
||||
Some(header) => header
|
||||
.to_str()
|
||||
.map_err(|_| ResponseError::Forbidden("Empty header is not allowed".to_string()))?,
|
||||
None => {
|
||||
return Err(ResponseError::Forbidden(
|
||||
"No JWT token is added to the header".to_string(),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
let mut header = auth_header.split_whitespace();
|
||||
let (_, token) = (header.next(), header.next());
|
||||
let token_str = match token {
|
||||
Some(t) => t.to_string(),
|
||||
None => {
|
||||
log::error!("failed to get token string");
|
||||
return Err(ResponseError::Unauthorized(
|
||||
"Authorization token is not provided".to_string(),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let token_data = match decode_jwt(token_str, state.config.jwt_secret.clone()) {
|
||||
Ok(data) => data,
|
||||
Err(_) => {
|
||||
return Err(ResponseError::Forbidden(
|
||||
"Unable to decode JWT token".to_string(),
|
||||
))
|
||||
}
|
||||
};
|
||||
|
||||
let current_user = match state
|
||||
.db
|
||||
.get_user_by_username(&token_data.claims.username)
|
||||
.await
|
||||
{
|
||||
Some(user) => user,
|
||||
None => {
|
||||
return Err(ResponseError::Unauthorized(
|
||||
"You are not an authorized user".to_string(),
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
req.extensions_mut().insert(current_user.username.clone());
|
||||
Ok(next.run(req).await)
|
||||
}
|
||||
|
||||
/// Get the user ID from the username stored in the request extension
|
||||
pub async fn get_user_id_from_token(db: &impl DB, username: &str) -> Result<i64, ResponseError> {
|
||||
match db.get_user_by_username(username).await {
|
||||
Some(user) => match user.id {
|
||||
Some(id) => Ok(id),
|
||||
None => {
|
||||
log::error!("User ID is missing for user: {}", username);
|
||||
Err(ResponseError::Unauthorized(
|
||||
"User ID is missing".to_string(),
|
||||
))
|
||||
}
|
||||
},
|
||||
None => {
|
||||
log::error!("User not found: {}", username);
|
||||
Err(ResponseError::Unauthorized("User not found".to_string()))
|
||||
}
|
||||
}
|
||||
}
|
||||
500
components/rfs/src/server/block_handlers.rs
Normal file
500
components/rfs/src/server/block_handlers.rs
Normal file
@@ -0,0 +1,500 @@
|
||||
use axum::{
|
||||
body::Bytes,
|
||||
extract::{Query, State},
|
||||
http::StatusCode,
|
||||
response::IntoResponse,
|
||||
Json,
|
||||
};
|
||||
use axum_macros::debug_handler;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::server::{
|
||||
auth,
|
||||
config::AppState,
|
||||
db::DB,
|
||||
models::Block,
|
||||
response::{ResponseError, ResponseResult, BlockUploadedResponse},
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use utoipa::ToSchema;
|
||||
|
||||
// Block API endpoints are included in the main FlistApi in handlers.rs
|
||||
|
||||
/// Query parameters for uploading a block
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct UploadBlockParams {
|
||||
/// File hash associated with the block
|
||||
pub file_hash: String,
|
||||
/// Block index within the file
|
||||
pub idx: u64,
|
||||
}
|
||||
|
||||
/// Upload a block to the server.
|
||||
/// If the block already exists, the server will return a 200 OK response.
|
||||
/// If the block is new, the server will return a 201 Created response.
|
||||
#[utoipa::path(
|
||||
post,
|
||||
path = "/api/v1/block",
|
||||
tag = "Block Management",
|
||||
request_body(content = [u8], description = "Block data to upload", content_type = "application/octet-stream"),
|
||||
params(
|
||||
("file_hash" = String, Query, description = "File hash associated with the block"),
|
||||
("idx" = u64, Query, description = "Block index within the file")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "Block already exists", body = BlockUploadedResponse),
|
||||
(status = 201, description = "Block created successfully", body = BlockUploadedResponse),
|
||||
(status = 400, description = "Bad request", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
),
|
||||
security(
|
||||
("bearerAuth" = [])
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn upload_block_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Query(params): Query<UploadBlockParams>,
|
||||
extension: axum::extract::Extension<String>,
|
||||
body: Bytes,
|
||||
) -> Result<(StatusCode, ResponseResult), ResponseError> {
|
||||
// Convert the body bytes to Vec<u8>
|
||||
let data = body.to_vec();
|
||||
|
||||
// Calculate the hash of the block data
|
||||
let hash = Block::calculate_hash(&data);
|
||||
|
||||
// Get the username from the extension (set by the authorize middleware)
|
||||
let username = extension.0;
|
||||
let user_id = auth::get_user_id_from_token(&*state.db, &username).await?;
|
||||
|
||||
// Store the block data in the database
|
||||
match state
|
||||
.db
|
||||
.store_block(&hash, data, ¶ms.file_hash, params.idx, user_id)
|
||||
.await
|
||||
{
|
||||
Ok(is_new) => {
|
||||
if is_new {
|
||||
// Block is new, return 201 Created
|
||||
Ok((StatusCode::CREATED, ResponseResult::BlockUploaded(hash)))
|
||||
} else {
|
||||
// Block already exists, return 200 OK
|
||||
Ok((StatusCode::OK, ResponseResult::BlockUploaded(hash)))
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Failed to store block: {}", err);
|
||||
Err(ResponseError::InternalServerError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Retrieve a block by its hash.
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/v1/block/{hash}",
|
||||
tag = "Block Management",
|
||||
responses(
|
||||
(status = 200, description = "Block found", body = [u8], content_type = "application/octet-stream"),
|
||||
(status = 404, description = "Block not found", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
),
|
||||
params(
|
||||
("hash" = String, Path, description = "Block hash")
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn get_block_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
axum::extract::Path(hash): axum::extract::Path<String>,
|
||||
) -> Result<impl IntoResponse, ResponseError> {
|
||||
// Retrieve the block from the database
|
||||
match state.db.get_block(&hash).await {
|
||||
Ok(Some(data)) => {
|
||||
// Block found, return its data
|
||||
Ok((StatusCode::OK, axum::body::Bytes::from(data)))
|
||||
}
|
||||
Ok(None) => {
|
||||
// Block not found
|
||||
Err(ResponseError::NotFound(format!(
|
||||
"Block with hash '{}' not found",
|
||||
hash
|
||||
)))
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Failed to retrieve block: {}", err);
|
||||
Err(ResponseError::InternalServerError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks a block by its hash.
|
||||
#[utoipa::path(
|
||||
head,
|
||||
path = "/api/v1/block/{hash}",
|
||||
tag = "Block Management",
|
||||
responses(
|
||||
(status = 200, description = "Block found"),
|
||||
(status = 404, description = "Block not found", body = ResponseError),
|
||||
),
|
||||
params(
|
||||
("hash" = String, Path, description = "Block hash")
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn check_block_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
axum::extract::Path(hash): axum::extract::Path<String>,
|
||||
) -> Result<impl IntoResponse, ResponseError> {
|
||||
// Retrieve the block from the database
|
||||
match state.db.block_exists("", 0, &hash, 0).await {
|
||||
true => {
|
||||
// Block found
|
||||
Ok(StatusCode::OK)
|
||||
}
|
||||
false => {
|
||||
log::error!("Block with hash '{}' doesn't exist", hash);
|
||||
Err(ResponseError::NotFound(format!(
|
||||
"Block with hash '{}' not found",
|
||||
hash
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Request to verify if multiple blocks exist on the server
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct VerifyBlock {
|
||||
/// Block hash to verify
|
||||
pub block_hash: String,
|
||||
/// File hash associated with the block
|
||||
pub file_hash: String,
|
||||
/// Block index within the file
|
||||
pub block_index: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct VerifyBlocksRequest {
|
||||
/// List of blocks to verify
|
||||
pub blocks: Vec<VerifyBlock>,
|
||||
}
|
||||
|
||||
/// Response with list of missing blocks
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct VerifyBlocksResponse {
|
||||
/// List of block hashes that are missing on the server
|
||||
pub missing: Vec<String>,
|
||||
}
|
||||
|
||||
/// Verify if multiple blocks exist on the server.
|
||||
/// Returns a list of missing blocks.
|
||||
#[utoipa::path(
|
||||
post,
|
||||
path = "/api/v1/block/verify",
|
||||
tag = "Block Management",
|
||||
request_body(content = VerifyBlocksRequest, description = "List of block hashes to verify", content_type = "application/json"),
|
||||
responses(
|
||||
(status = 200, description = "Verification completed", body = VerifyBlocksResponse),
|
||||
(status = 400, description = "Bad request", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn verify_blocks_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Json(request): Json<VerifyBlocksRequest>,
|
||||
) -> Result<impl IntoResponse, ResponseError> {
|
||||
let mut missing = Vec::new();
|
||||
|
||||
// Check each block in the request
|
||||
for block in request.blocks {
|
||||
if !state
|
||||
.db
|
||||
.block_exists(&block.file_hash, block.block_index, &block.block_hash, 0)
|
||||
.await
|
||||
{
|
||||
missing.push(block.block_hash);
|
||||
}
|
||||
}
|
||||
|
||||
// Return the list of missing blocks
|
||||
Ok((
|
||||
StatusCode::OK,
|
||||
Json(VerifyBlocksResponse {
|
||||
missing, // Include missing blocks in the response
|
||||
}),
|
||||
))
|
||||
}
|
||||
|
||||
/// Block information with hash and index
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct BlockInfo {
|
||||
/// Block hash
|
||||
pub hash: String,
|
||||
/// Block index within the file
|
||||
pub index: u64,
|
||||
}
|
||||
|
||||
/// Block information with hash and size
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct UserBlockInfo {
|
||||
/// Block hash
|
||||
pub hash: String,
|
||||
/// Block size in bytes
|
||||
pub size: u64,
|
||||
}
|
||||
|
||||
/// Response for blocks by hash endpoint
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct BlocksResponse {
|
||||
/// List of blocks with their indices
|
||||
pub blocks: Vec<BlockInfo>,
|
||||
}
|
||||
|
||||
/// Retrieve blocks by hash (file hash or block hash).
|
||||
/// If the hash is a file hash, returns all blocks with their block index related to that file.
|
||||
/// If the hash is a block hash, returns the block itself.
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/v1/blocks/{hash}",
|
||||
tag = "Block Management",
|
||||
responses(
|
||||
(status = 200, description = "Blocks found", body = BlocksResponse),
|
||||
(status = 404, description = "Hash not found", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
),
|
||||
params(
|
||||
("hash" = String, Path, description = "File hash or block hash")
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn get_blocks_by_hash_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
axum::extract::Path(hash): axum::extract::Path<String>,
|
||||
) -> Result<impl IntoResponse, ResponseError> {
|
||||
// First, try to get file blocks by hash
|
||||
match state.db.get_file_blocks_ordered(&hash).await {
|
||||
Ok(blocks) if !blocks.is_empty() => {
|
||||
// This is a file hash, return all blocks with their indices
|
||||
let block_infos = blocks.into_iter()
|
||||
.map(|(hash, index)| BlockInfo { hash, index })
|
||||
.collect();
|
||||
Ok((StatusCode::OK, Json(BlocksResponse { blocks: block_infos })))
|
||||
}
|
||||
Ok(_) | Err(_) => {
|
||||
// Not a file hash or error occurred, try as block hash
|
||||
match state.db.get_block(&hash).await {
|
||||
Ok(Some(_)) => {
|
||||
// This is a block hash, return just this block with index 0
|
||||
Ok((
|
||||
StatusCode::OK,
|
||||
Json(BlocksResponse {
|
||||
blocks: vec![BlockInfo { hash: hash.clone(), index: 0 }],
|
||||
}),
|
||||
))
|
||||
}
|
||||
Ok(None) => {
|
||||
// Neither file nor block found
|
||||
Err(ResponseError::NotFound(format!(
|
||||
"No file or block with hash '{}' found",
|
||||
hash
|
||||
)))
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Failed to retrieve block: {}", err);
|
||||
Err(ResponseError::InternalServerError)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Query parameters for listing blocks
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct ListBlocksParams {
|
||||
/// Page number (1-indexed)
|
||||
#[schema(default = 1, minimum = 1)]
|
||||
pub page: Option<u32>,
|
||||
/// Number of items per page
|
||||
#[schema(default = 50, minimum = 1, maximum = 100)]
|
||||
pub per_page: Option<u32>,
|
||||
}
|
||||
|
||||
/// Response for listing blocks
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct ListBlocksResponse {
|
||||
/// List of block hashes
|
||||
pub blocks: Vec<String>,
|
||||
/// Total number of blocks
|
||||
pub total: u64,
|
||||
/// Current page number
|
||||
pub page: u32,
|
||||
/// Number of items per page
|
||||
pub per_page: u32,
|
||||
}
|
||||
|
||||
/// List all block hashes in the server with pagination
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/v1/blocks",
|
||||
tag = "Block Management",
|
||||
params(
|
||||
("page" = Option<u32>, Query, description = "Page number (1-indexed)"),
|
||||
("per_page" = Option<u32>, Query, description = "Number of items per page")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "List of block hashes", body = ListBlocksResponse),
|
||||
(status = 400, description = "Bad request"),
|
||||
(status = 500, description = "Internal server error"),
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn list_blocks_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Query(params): Query<ListBlocksParams>,
|
||||
) -> Result<impl IntoResponse, ResponseError> {
|
||||
let page = params.page.unwrap_or(1);
|
||||
let per_page = params.per_page.unwrap_or(50).min(100);
|
||||
|
||||
match state.db.list_blocks(page, per_page).await {
|
||||
Ok((blocks, total)) => {
|
||||
let response = ListBlocksResponse {
|
||||
blocks,
|
||||
total,
|
||||
page,
|
||||
per_page,
|
||||
};
|
||||
Ok((StatusCode::OK, Json(response)))
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Failed to list blocks: {}", err);
|
||||
Err(ResponseError::InternalServerError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Response for user blocks endpoint
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct UserBlocksResponse {
|
||||
/// List of blocks with their sizes
|
||||
pub blocks: Vec<UserBlockInfo>,
|
||||
/// Total number of blocks
|
||||
pub total: u64,
|
||||
/// Total number of all blocks
|
||||
pub all_blocks: u64,
|
||||
}
|
||||
|
||||
/// Retrieve all blocks uploaded by a specific user.
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/v1/user/blocks",
|
||||
tag = "Block Management",
|
||||
params(
|
||||
("page" = Option<u32>, Query, description = "Page number (1-indexed)"),
|
||||
("per_page" = Option<u32>, Query, description = "Number of items per page")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "Blocks found", body = UserBlocksResponse),
|
||||
(status = 401, description = "Unauthorized"),
|
||||
(status = 500, description = "Internal server error"),
|
||||
),
|
||||
security(
|
||||
("bearerAuth" = [])
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn get_user_blocks_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
extension: axum::extract::Extension<String>,
|
||||
Query(params): Query<ListBlocksParams>,
|
||||
) -> Result<impl IntoResponse, ResponseError> {
|
||||
let page = params.page.unwrap_or(1);
|
||||
let per_page = params.per_page.unwrap_or(50).min(100);
|
||||
|
||||
// Get the username from the extension (set by the authorize middleware)
|
||||
let username = extension.0;
|
||||
let user_id = auth::get_user_id_from_token(&*state.db, &username).await?;
|
||||
|
||||
let all_blocks = match state.db.list_blocks(1, 1).await {
|
||||
Ok((_, total)) => total,
|
||||
Err(err) => {
|
||||
log::error!("Failed to list blocks: {}", err);
|
||||
0
|
||||
}
|
||||
};
|
||||
|
||||
// Get all blocks related to the user
|
||||
match state.db.get_user_blocks(user_id, page, per_page).await {
|
||||
Ok(blocks) => {
|
||||
let total = blocks.len() as u64;
|
||||
let response = UserBlocksResponse {
|
||||
blocks: blocks.into_iter()
|
||||
.map(|(hash, size)| UserBlockInfo { hash, size })
|
||||
.collect(),
|
||||
total,
|
||||
all_blocks,
|
||||
};
|
||||
Ok((StatusCode::OK, Json(response)))
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Failed to retrieve user blocks: {}", err);
|
||||
Err(ResponseError::InternalServerError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Response for block downloads endpoint
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct BlockDownloadsResponse {
|
||||
/// Block hash
|
||||
pub block_hash: String,
|
||||
/// Number of times the block has been downloaded
|
||||
pub downloads_count: u64,
|
||||
/// Size of the block in bytes
|
||||
pub block_size: u64,
|
||||
}
|
||||
|
||||
/// Retrieve the number of times a block has been downloaded.
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/v1/block/{hash}/downloads",
|
||||
tag = "Block Management",
|
||||
responses(
|
||||
(status = 200, description = "Download count retrieved successfully", body = BlockDownloadsResponse),
|
||||
(status = 404, description = "Block not found"),
|
||||
(status = 500, description = "Internal server error"),
|
||||
),
|
||||
params(
|
||||
("hash" = String, Path, description = "Block hash")
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn get_block_downloads_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
axum::extract::Path(hash): axum::extract::Path<String>,
|
||||
) -> Result<impl IntoResponse, ResponseError> {
|
||||
// Check if the block exists
|
||||
if !state.db.block_exists("", 0, &hash, 0).await {
|
||||
return Err(ResponseError::NotFound(format!(
|
||||
"Block with hash '{}' not found",
|
||||
hash
|
||||
)));
|
||||
}
|
||||
|
||||
// Get the download count
|
||||
match state.db.get_block_downloads(&hash).await {
|
||||
Ok((count, block_size)) => {
|
||||
let response = BlockDownloadsResponse {
|
||||
block_hash: hash,
|
||||
downloads_count: count,
|
||||
block_size: block_size,
|
||||
};
|
||||
Ok((StatusCode::OK, Json(response)))
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Failed to retrieve block download count: {}", err);
|
||||
Err(ResponseError::InternalServerError)
|
||||
}
|
||||
}
|
||||
}
|
||||
67
components/rfs/src/server/config.rs
Normal file
67
components/rfs/src/server/config.rs
Normal file
@@ -0,0 +1,67 @@
|
||||
use anyhow::{Context, Result};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
fs,
|
||||
path::PathBuf,
|
||||
sync::{Arc, Mutex},
|
||||
};
|
||||
use utoipa::ToSchema;
|
||||
|
||||
use crate::server::{db::DBType, handlers, models::User};
|
||||
use crate::store;
|
||||
|
||||
#[derive(Debug, ToSchema, Serialize, Clone)]
|
||||
pub struct Job {
|
||||
pub id: String,
|
||||
}
|
||||
|
||||
#[derive(ToSchema)]
|
||||
pub struct AppState {
|
||||
pub jobs_state: Mutex<HashMap<String, handlers::FlistState>>,
|
||||
pub flists_progress: Mutex<HashMap<PathBuf, f32>>,
|
||||
pub db: Arc<DBType>,
|
||||
pub config: Config,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone, Deserialize)]
|
||||
pub struct Config {
|
||||
pub host: String,
|
||||
pub port: u16,
|
||||
pub store_url: Vec<String>,
|
||||
pub flist_dir: String,
|
||||
pub sqlite_path: Option<String>,
|
||||
|
||||
pub jwt_secret: String,
|
||||
pub jwt_expire_hours: i64,
|
||||
pub users: Vec<User>,
|
||||
|
||||
pub block_size: Option<usize>, // Optional block size in bytes
|
||||
pub storage_dir: String, // Path to the storage directory
|
||||
}
|
||||
|
||||
/// Parse the config file into Config struct.
|
||||
pub async fn parse_config(filepath: &str) -> Result<Config> {
|
||||
let content = fs::read_to_string(filepath).context("failed to read config file")?;
|
||||
let mut c: Config = toml::from_str(&content).context("failed to convert toml config data")?;
|
||||
|
||||
if !hostname_validator::is_valid(&c.host) {
|
||||
anyhow::bail!("host '{}' is invalid", c.host)
|
||||
}
|
||||
|
||||
store::parse_router(&c.store_url)
|
||||
.await
|
||||
.context("failed to parse store urls")?;
|
||||
fs::create_dir_all(&c.flist_dir).context("failed to create flists directory")?;
|
||||
fs::create_dir_all(&c.storage_dir).context("failed to create storage directory")?;
|
||||
|
||||
if c.jwt_expire_hours < 1 || c.jwt_expire_hours > 24 {
|
||||
anyhow::bail!(format!(
|
||||
"jwt expiry interval in hours '{}' is invalid, must be between [1, 24]",
|
||||
c.jwt_expire_hours
|
||||
))
|
||||
}
|
||||
|
||||
c.block_size = c.block_size.or(Some(1024 * 1024));
|
||||
Ok(c)
|
||||
}
|
||||
96
components/rfs/src/server/db/map.rs
Normal file
96
components/rfs/src/server/db/map.rs
Normal file
@@ -0,0 +1,96 @@
|
||||
use std::collections::HashMap;
|
||||
use utoipa::ToSchema;
|
||||
|
||||
use super::DB;
|
||||
use crate::server::models::{File, User};
|
||||
use anyhow::Result;
|
||||
|
||||
#[derive(Debug, ToSchema)]
|
||||
pub struct MapDB {
|
||||
users: HashMap<String, User>,
|
||||
}
|
||||
|
||||
impl MapDB {
|
||||
pub fn new(users: &[User]) -> Self {
|
||||
Self {
|
||||
users: users
|
||||
.iter()
|
||||
.map(|u| (u.username.clone(), u.to_owned()))
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DB for MapDB {
|
||||
async fn get_user_by_username(&self, username: &str) -> Option<User> {
|
||||
self.users.get(username).cloned()
|
||||
}
|
||||
|
||||
async fn block_exists(
|
||||
&self,
|
||||
_file_hash: &str,
|
||||
_block_index: u64,
|
||||
_block_hash: &str,
|
||||
_user_id: i64,
|
||||
) -> bool {
|
||||
// TODO:
|
||||
true
|
||||
}
|
||||
|
||||
async fn store_block(
|
||||
&self,
|
||||
_block_hash: &str,
|
||||
_data: Vec<u8>,
|
||||
_file_hash: &str,
|
||||
_block_index: u64,
|
||||
_user_id: i64,
|
||||
) -> Result<bool, anyhow::Error> {
|
||||
// TODO: Implement block storage logic
|
||||
Ok(true) // Placeholder return value
|
||||
}
|
||||
|
||||
async fn get_block(&self, _hash: &str) -> Result<Option<Vec<u8>>, anyhow::Error> {
|
||||
// TODO:
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
async fn get_file_by_hash(&self, _hash: &str) -> Result<Option<File>, anyhow::Error> {
|
||||
// TODO:
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
async fn get_file_blocks_ordered(
|
||||
&self,
|
||||
_file_hash: &str,
|
||||
) -> Result<Vec<(String, u64)>, anyhow::Error> {
|
||||
// TODO:
|
||||
Ok(Vec::new())
|
||||
}
|
||||
|
||||
async fn list_blocks(
|
||||
&self,
|
||||
_page: u32,
|
||||
_per_page: u32,
|
||||
) -> Result<(Vec<String>, u64), anyhow::Error> {
|
||||
// TODO:
|
||||
Ok((Vec::new(), 0))
|
||||
}
|
||||
|
||||
async fn get_user_blocks(
|
||||
&self,
|
||||
_user_id: i64,
|
||||
_page: u32,
|
||||
_per_page: u32,
|
||||
) -> Result<Vec<(String, u64)>, anyhow::Error> {
|
||||
// TODO:
|
||||
Ok(Vec::new())
|
||||
}
|
||||
|
||||
async fn increment_block_downloads(&self, _hash: &str) -> Result<(), anyhow::Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_block_downloads(&self, _hash: &str) -> Result<(u64, u64), anyhow::Error> {
|
||||
Ok((0, 0))
|
||||
}
|
||||
}
|
||||
166
components/rfs/src/server/db/mod.rs
Normal file
166
components/rfs/src/server/db/mod.rs
Normal file
@@ -0,0 +1,166 @@
|
||||
pub mod map;
|
||||
pub mod sqlite;
|
||||
mod storage;
|
||||
use crate::server::models::{File, User};
|
||||
|
||||
pub trait DB: Send + Sync {
|
||||
// User methods
|
||||
async fn get_user_by_username(&self, username: &str) -> Option<User>;
|
||||
|
||||
// Block methods
|
||||
async fn block_exists(
|
||||
&self,
|
||||
file_hash: &str,
|
||||
block_index: u64,
|
||||
block_hash: &str,
|
||||
user_id: i64,
|
||||
) -> bool;
|
||||
async fn store_block(
|
||||
&self,
|
||||
block_hash: &str,
|
||||
data: Vec<u8>,
|
||||
file_hash: &str,
|
||||
block_index: u64,
|
||||
user_id: i64,
|
||||
) -> Result<bool, anyhow::Error>;
|
||||
async fn get_block(&self, hash: &str) -> Result<Option<Vec<u8>>, anyhow::Error>;
|
||||
async fn increment_block_downloads(&self, hash: &str) -> Result<(), anyhow::Error>;
|
||||
async fn get_block_downloads(&self, hash: &str) -> Result<(u64, u64), anyhow::Error>;
|
||||
|
||||
// File methods
|
||||
async fn get_file_by_hash(&self, hash: &str) -> Result<Option<File>, anyhow::Error>;
|
||||
async fn get_file_blocks_ordered(
|
||||
&self,
|
||||
file_hash: &str,
|
||||
) -> Result<Vec<(String, u64)>, anyhow::Error>;
|
||||
async fn list_blocks(
|
||||
&self,
|
||||
page: u32,
|
||||
per_page: u32,
|
||||
) -> Result<(Vec<String>, u64), anyhow::Error>;
|
||||
|
||||
// Get all blocks related to a user
|
||||
async fn get_user_blocks(
|
||||
&self,
|
||||
user_id: i64,
|
||||
page: u32,
|
||||
per_page: u32,
|
||||
) -> Result<Vec<(String, u64)>, anyhow::Error>;
|
||||
}
|
||||
|
||||
pub enum DBType {
|
||||
MapDB(map::MapDB),
|
||||
SqlDB(sqlite::SqlDB),
|
||||
}
|
||||
|
||||
impl DB for DBType {
|
||||
// User methods
|
||||
async fn get_user_by_username(&self, username: &str) -> Option<User> {
|
||||
match self {
|
||||
DBType::MapDB(db) => db.get_user_by_username(username).await,
|
||||
DBType::SqlDB(db) => db.get_user_by_username(username).await,
|
||||
}
|
||||
}
|
||||
|
||||
// Block methods
|
||||
async fn block_exists(
|
||||
&self,
|
||||
file_hash: &str,
|
||||
block_index: u64,
|
||||
block_hash: &str,
|
||||
user_id: i64,
|
||||
) -> bool {
|
||||
match self {
|
||||
DBType::MapDB(db) => {
|
||||
db.block_exists(file_hash, block_index, block_hash, user_id)
|
||||
.await
|
||||
}
|
||||
DBType::SqlDB(db) => {
|
||||
db.block_exists(file_hash, block_index, block_hash, user_id)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn store_block(
|
||||
&self,
|
||||
block_hash: &str,
|
||||
data: Vec<u8>,
|
||||
file_hash: &str,
|
||||
block_index: u64,
|
||||
user_id: i64,
|
||||
) -> Result<bool, anyhow::Error> {
|
||||
match self {
|
||||
DBType::MapDB(db) => {
|
||||
db.store_block(block_hash, data, file_hash, block_index, user_id)
|
||||
.await
|
||||
}
|
||||
DBType::SqlDB(db) => {
|
||||
db.store_block(block_hash, data, file_hash, block_index, user_id)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_block(&self, hash: &str) -> Result<Option<Vec<u8>>, anyhow::Error> {
|
||||
match self {
|
||||
DBType::MapDB(db) => db.get_block(hash).await,
|
||||
DBType::SqlDB(db) => db.get_block(hash).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn increment_block_downloads(&self, hash: &str) -> Result<(), anyhow::Error> {
|
||||
match self {
|
||||
DBType::MapDB(db) => db.increment_block_downloads(hash).await,
|
||||
DBType::SqlDB(db) => db.increment_block_downloads(hash).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_block_downloads(&self, hash: &str) -> Result<(u64, u64), anyhow::Error> {
|
||||
match self {
|
||||
DBType::MapDB(db) => db.get_block_downloads(hash).await,
|
||||
DBType::SqlDB(db) => db.get_block_downloads(hash).await,
|
||||
}
|
||||
}
|
||||
|
||||
// File methods
|
||||
async fn get_file_by_hash(&self, hash: &str) -> Result<Option<File>, anyhow::Error> {
|
||||
match self {
|
||||
DBType::MapDB(db) => db.get_file_by_hash(hash).await,
|
||||
DBType::SqlDB(db) => db.get_file_by_hash(hash).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_file_blocks_ordered(
|
||||
&self,
|
||||
file_hash: &str,
|
||||
) -> Result<Vec<(String, u64)>, anyhow::Error> {
|
||||
match self {
|
||||
DBType::MapDB(db) => db.get_file_blocks_ordered(file_hash).await,
|
||||
DBType::SqlDB(db) => db.get_file_blocks_ordered(file_hash).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn list_blocks(
|
||||
&self,
|
||||
page: u32,
|
||||
per_page: u32,
|
||||
) -> Result<(Vec<String>, u64), anyhow::Error> {
|
||||
match self {
|
||||
DBType::MapDB(db) => db.list_blocks(page, per_page).await,
|
||||
DBType::SqlDB(db) => db.list_blocks(page, per_page).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_user_blocks(
|
||||
&self,
|
||||
user_id: i64,
|
||||
page: u32,
|
||||
per_page: u32,
|
||||
) -> Result<Vec<(String, u64)>, anyhow::Error> {
|
||||
match self {
|
||||
DBType::MapDB(db) => db.get_user_blocks(user_id, page, per_page).await,
|
||||
DBType::SqlDB(db) => db.get_user_blocks(user_id, page, per_page).await,
|
||||
}
|
||||
}
|
||||
}
|
||||
397
components/rfs/src/server/db/sqlite.rs
Normal file
397
components/rfs/src/server/db/sqlite.rs
Normal file
@@ -0,0 +1,397 @@
|
||||
use super::{storage::Storage, DB};
|
||||
use crate::server::models::{File, User};
|
||||
use anyhow::Result;
|
||||
use sqlx::{query, query_as, Row, SqlitePool};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct SqlDB {
|
||||
pool: SqlitePool, // Use a connection pool for efficient database access
|
||||
storage: Storage, // Directory for storing blocks
|
||||
}
|
||||
|
||||
static SCHEMA: &str = include_str!("../../../schema/server.sql");
|
||||
|
||||
impl SqlDB {
|
||||
pub async fn new(database_filepath: &str, storage_dir: &str, users: &[User]) -> Self {
|
||||
// Check if the database file exists, and create it if it doesn't
|
||||
if !std::path::Path::new(database_filepath).exists() {
|
||||
std::fs::File::create(database_filepath).expect("Failed to create database file");
|
||||
}
|
||||
|
||||
let pool = SqlitePool::connect_lazy(database_filepath)
|
||||
.expect("Failed to create database connection pool");
|
||||
|
||||
// Initialize the database schema
|
||||
Self::init_schema(&pool)
|
||||
.await
|
||||
.expect("Failed to initialize database schema");
|
||||
|
||||
let storage = Storage::new(storage_dir);
|
||||
|
||||
for user in users {
|
||||
if let Err(err) = Self::insert_user(&pool, user).await {
|
||||
log::error!("Failed to insert user '{}': {}", user.username, err);
|
||||
}
|
||||
}
|
||||
|
||||
Self { pool, storage }
|
||||
}
|
||||
|
||||
/// Initialize the database schema
|
||||
async fn init_schema(pool: &SqlitePool) -> Result<(), anyhow::Error> {
|
||||
sqlx::query(SCHEMA)
|
||||
.execute(pool)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("Failed to create database schema: {}", e))?;
|
||||
|
||||
log::info!("Database schema initialized successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn metadata_exists(
|
||||
&self,
|
||||
file_hash: &str,
|
||||
block_index: u64,
|
||||
block_hash: &str,
|
||||
user_id: i64,
|
||||
) -> bool {
|
||||
let result = query(
|
||||
"SELECT COUNT(*) as count FROM metadata WHERE file_hash = ? AND block_index = ? AND block_hash = ? AND user_id = ?",
|
||||
)
|
||||
.bind(file_hash)
|
||||
.bind(block_index as i64)
|
||||
.bind(block_hash)
|
||||
.bind(user_id)
|
||||
.fetch_one(&self.pool);
|
||||
|
||||
match result.await {
|
||||
Ok(row) => {
|
||||
let count: i64 = row.get(0);
|
||||
count > 0
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Error checking if metadata exists: {}", err);
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn insert_user(pool: &SqlitePool, user: &User) -> Result<(), anyhow::Error> {
|
||||
query(
|
||||
"INSERT OR IGNORE INTO users (username, password, created_at) VALUES (?, ?, CURRENT_TIMESTAMP)",
|
||||
)
|
||||
.bind(&user.username)
|
||||
.bind(&user.password)
|
||||
.execute(pool)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("Failed to insert user: {}", e))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl DB for SqlDB {
|
||||
async fn get_user_by_username(&self, username: &str) -> Option<User> {
|
||||
let query = "SELECT * FROM users WHERE username = ?";
|
||||
let result = query_as::<_, User>(query)
|
||||
.bind(username)
|
||||
.fetch_one(&self.pool);
|
||||
|
||||
match result.await {
|
||||
Ok(user) => Some(user),
|
||||
Err(_) => None,
|
||||
}
|
||||
}
|
||||
|
||||
async fn block_exists(
|
||||
&self,
|
||||
file_hash: &str,
|
||||
block_index: u64,
|
||||
block_hash: &str,
|
||||
user_id: i64,
|
||||
) -> bool {
|
||||
// Check if the block already exists in storage
|
||||
let block_exists = self.storage.block_exists(block_hash);
|
||||
|
||||
// Check if the metadata already exists in the database
|
||||
let metadata_exists = self
|
||||
.metadata_exists(file_hash, block_index, block_hash, user_id)
|
||||
.await;
|
||||
|
||||
// If both block and metadata exist, no need to store again
|
||||
if block_exists && (metadata_exists || file_hash.is_empty()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
false // Block does not exist
|
||||
}
|
||||
|
||||
async fn store_block(
|
||||
&self,
|
||||
block_hash: &str,
|
||||
data: Vec<u8>,
|
||||
file_hash: &str,
|
||||
block_index: u64,
|
||||
user_id: i64,
|
||||
) -> Result<bool, anyhow::Error> {
|
||||
// Check if the block already exists in storage
|
||||
let block_exists = self.storage.block_exists(block_hash);
|
||||
|
||||
// Check if the metadata already exists in the database
|
||||
let metadata_exists = self
|
||||
.metadata_exists(file_hash, block_index, block_hash, user_id)
|
||||
.await;
|
||||
|
||||
// If both block and metadata exist, no need to store again
|
||||
if block_exists && (metadata_exists || (file_hash.is_empty() && user_id == 0)) {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// Calculate block size
|
||||
let block_size = data.len() as i64;
|
||||
|
||||
// Store metadata if it doesn't exist
|
||||
if !metadata_exists {
|
||||
if let Err(err) = query(
|
||||
"INSERT INTO metadata (file_hash, block_index, block_hash, user_id, block_size, created_at)
|
||||
VALUES (?, ?, ?, ?, ?, CURRENT_TIMESTAMP)",
|
||||
)
|
||||
.bind(file_hash)
|
||||
.bind(block_index as i64)
|
||||
.bind(block_hash)
|
||||
.bind(user_id)
|
||||
.bind(block_size)
|
||||
.execute(&self.pool)
|
||||
.await
|
||||
{
|
||||
log::error!("Error storing metadata: {}", err);
|
||||
return Err(anyhow::anyhow!("Failed to store metadata: {}", err));
|
||||
}
|
||||
}
|
||||
|
||||
// Store the block data in the file system if it doesn't exist
|
||||
if !block_exists {
|
||||
if let Err(err) = self.storage.save_block(block_hash, &data) {
|
||||
log::error!("Error storing block in storage: {}", err);
|
||||
return Err(anyhow::anyhow!("Failed to store block in storage: {}", err));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(true) // Indicate that the block or metadata was newly stored
|
||||
}
|
||||
|
||||
async fn get_block(&self, hash: &str) -> Result<Option<Vec<u8>>, anyhow::Error> {
|
||||
// Retrieve the block data from storage
|
||||
match self.storage.get_block(hash) {
|
||||
Ok(Some(data)) => {
|
||||
if let Err(err) = self.increment_block_downloads(&hash).await {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Failed to increment download count for block {}: {}",
|
||||
hash,
|
||||
err
|
||||
));
|
||||
}
|
||||
Ok(Some(data))
|
||||
}
|
||||
Ok(None) => Ok(None),
|
||||
Err(err) => {
|
||||
log::error!("Error retrieving block from storage: {}", err);
|
||||
Err(anyhow::anyhow!(
|
||||
"Failed to retrieve block from storage: {}",
|
||||
err
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_file_by_hash(&self, hash: &str) -> Result<Option<File>, anyhow::Error> {
|
||||
// Retrieve the blocks associated with the file hash
|
||||
let blocks = match self.get_file_blocks_ordered(hash).await {
|
||||
Ok(blocks) => blocks,
|
||||
Err(err) => {
|
||||
log::error!("Failed to retrieve file blocks: {}", err);
|
||||
return Err(anyhow::anyhow!("Failed to retrieve file blocks: {}", err));
|
||||
}
|
||||
};
|
||||
|
||||
if blocks.is_empty() {
|
||||
return Ok(None); // No blocks found, file does not exist
|
||||
}
|
||||
|
||||
// Combine block data to reconstruct the file
|
||||
let mut file_content = Vec::new();
|
||||
for (block_hash, _) in blocks {
|
||||
match self.storage.get_block(&block_hash) {
|
||||
Ok(Some(data)) => {
|
||||
if let Err(err) = self.increment_block_downloads(&block_hash).await {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Failed to increment download count for block {}: {}",
|
||||
block_hash,
|
||||
err
|
||||
));
|
||||
}
|
||||
file_content.extend(data)
|
||||
}
|
||||
Ok(None) => {
|
||||
log::error!("Block {} not found", block_hash);
|
||||
return Err(anyhow::anyhow!("Block {} not found", block_hash));
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Failed to retrieve block {}: {}", block_hash, err);
|
||||
return Err(anyhow::anyhow!(
|
||||
"Failed to retrieve block {}: {}",
|
||||
block_hash,
|
||||
err
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Return the reconstructed file
|
||||
Ok(Some(File {
|
||||
file_hash: hash.to_string(),
|
||||
file_content,
|
||||
}))
|
||||
}
|
||||
|
||||
async fn get_file_blocks_ordered(
|
||||
&self,
|
||||
file_hash: &str,
|
||||
) -> Result<Vec<(String, u64)>, anyhow::Error> {
|
||||
let result = query(
|
||||
"SELECT block_hash, block_index FROM metadata WHERE file_hash = ? ORDER BY block_index",
|
||||
)
|
||||
.bind(file_hash)
|
||||
.fetch_all(&self.pool)
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(rows) => {
|
||||
let blocks = rows
|
||||
.into_iter()
|
||||
.map(|row| {
|
||||
let block_hash: String = row.get(0);
|
||||
let block_index: i64 = row.get(1);
|
||||
(block_hash, block_index as u64)
|
||||
})
|
||||
.collect::<Vec<(String, u64)>>();
|
||||
|
||||
Ok(blocks)
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Error retrieving file blocks: {}", err);
|
||||
Err(anyhow::anyhow!("Failed to retrieve file blocks: {}", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn list_blocks(
|
||||
&self,
|
||||
page: u32,
|
||||
per_page: u32,
|
||||
) -> Result<(Vec<String>, u64), anyhow::Error> {
|
||||
let blocks = match self.storage.list_blocks() {
|
||||
Ok(blocks) => blocks,
|
||||
Err(err) => {
|
||||
log::error!("Error listing blocks: {}", err);
|
||||
return Err(anyhow::anyhow!("Failed to list blocks: {}", err));
|
||||
}
|
||||
};
|
||||
|
||||
let total = blocks.len() as u64;
|
||||
let start = page
|
||||
.checked_sub(1)
|
||||
.and_then(|p| p.checked_mul(per_page))
|
||||
.ok_or_else(|| anyhow::anyhow!("Page or per_page value caused overflow"))?
|
||||
as usize;
|
||||
let end = (start + per_page as usize).min(total as usize);
|
||||
let page_blocks = blocks
|
||||
.into_iter()
|
||||
.skip(start)
|
||||
.take(end.saturating_sub(start))
|
||||
.collect();
|
||||
Ok((page_blocks, total))
|
||||
}
|
||||
|
||||
async fn get_user_blocks(
|
||||
&self,
|
||||
user_id: i64,
|
||||
page: u32,
|
||||
per_page: u32,
|
||||
) -> Result<Vec<(String, u64)>, anyhow::Error> {
|
||||
let offset = page
|
||||
.checked_sub(1)
|
||||
.and_then(|p| p.checked_mul(per_page))
|
||||
.ok_or_else(|| anyhow::anyhow!("Page or per_page value caused overflow"))?
|
||||
as i64;
|
||||
|
||||
let result = query(
|
||||
"SELECT block_hash, block_size FROM metadata WHERE user_id = ? ORDER BY block_index LIMIT ? OFFSET ?",
|
||||
)
|
||||
.bind(user_id)
|
||||
.bind(per_page as i64)
|
||||
.bind(offset)
|
||||
.fetch_all(&self.pool)
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(rows) => {
|
||||
let blocks = rows
|
||||
.into_iter()
|
||||
.map(|row| {
|
||||
let block_hash: String = row.get(0);
|
||||
let block_size: i64 = row.get(1);
|
||||
(block_hash, block_size as u64)
|
||||
})
|
||||
.collect::<Vec<(String, u64)>>();
|
||||
|
||||
Ok(blocks)
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Error retrieving user blocks: {}", err);
|
||||
Err(anyhow::anyhow!("Failed to retrieve user blocks: {}", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn increment_block_downloads(&self, hash: &str) -> Result<(), anyhow::Error> {
|
||||
let result =
|
||||
query("UPDATE metadata SET downloads_count = downloads_count + 1 WHERE block_hash = ?")
|
||||
.bind(hash)
|
||||
.execute(&self.pool)
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(_) => Ok(()),
|
||||
Err(err) => {
|
||||
log::error!("Error incrementing block downloads count: {}", err);
|
||||
Err(anyhow::anyhow!(
|
||||
"Failed to increment block downloads count: {}",
|
||||
err
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_block_downloads(&self, hash: &str) -> Result<(u64, u64), anyhow::Error> {
|
||||
let result = query("SELECT downloads_count, block_size FROM metadata WHERE block_hash = ?")
|
||||
.bind(hash)
|
||||
.fetch_one(&self.pool)
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(row) => {
|
||||
let count: i64 = row.get(0);
|
||||
let size: i64 = row.get(1);
|
||||
Ok((count as u64, size as u64))
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Error retrieving block downloads count and size: {}", err);
|
||||
Err(anyhow::anyhow!(
|
||||
"Failed to retrieve block downloads count and size: {}",
|
||||
err
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
95
components/rfs/src/server/db/storage.rs
Normal file
95
components/rfs/src/server/db/storage.rs
Normal file
@@ -0,0 +1,95 @@
|
||||
use std::fs;
|
||||
use std::io::{self, Write};
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Storage {
|
||||
base_dir: PathBuf,
|
||||
}
|
||||
|
||||
impl Storage {
|
||||
pub fn new(base_dir: &str) -> Self {
|
||||
let base_path = PathBuf::from(base_dir).join("blocks");
|
||||
fs::create_dir_all(&base_path).expect("Failed to create storage directory");
|
||||
Self {
|
||||
base_dir: base_path,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn save_block(&self, hash: &str, content: &[u8]) -> io::Result<()> {
|
||||
let sub_dir = self.base_dir.join(&hash[..4]);
|
||||
fs::create_dir_all(&sub_dir)?;
|
||||
|
||||
let block_path = sub_dir.join(hash);
|
||||
let mut file = fs::File::create(block_path)?;
|
||||
file.write_all(content)
|
||||
}
|
||||
|
||||
pub fn get_block(&self, hash: &str) -> io::Result<Option<Vec<u8>>> {
|
||||
let block_path = self.base_dir.join(&hash[..4]).join(hash);
|
||||
if block_path.exists() {
|
||||
Ok(Some(fs::read(block_path)?))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn block_exists(&self, hash: &str) -> bool {
|
||||
let block_path = self.base_dir.join(&hash[..4]).join(hash);
|
||||
block_path.exists()
|
||||
}
|
||||
|
||||
pub fn list_blocks(&self) -> io::Result<Vec<String>> {
|
||||
let mut block_hashes = Vec::new();
|
||||
|
||||
// Walk through the storage directory
|
||||
for entry in fs::read_dir(&self.base_dir)? {
|
||||
let entry = entry?;
|
||||
let path = entry.path();
|
||||
if path.is_dir() {
|
||||
// Each subdirectory represents the first 4 characters of the hash
|
||||
for block_entry in fs::read_dir(path)? {
|
||||
let block_entry = block_entry?;
|
||||
let block_path = block_entry.path();
|
||||
if block_path.is_file() {
|
||||
if let Some(file_name) = block_path.file_name() {
|
||||
if let Some(hash) = file_name.to_str() {
|
||||
block_hashes.push(hash.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(block_hashes)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_storage() {
|
||||
let storage = Storage::new("test_storage");
|
||||
|
||||
let hash = "abcd1234";
|
||||
let content = b"Hello, world!";
|
||||
|
||||
// Save block
|
||||
storage.save_block(hash, content).unwrap();
|
||||
assert!(storage.block_exists(hash));
|
||||
|
||||
let hash = "abcd12345";
|
||||
let content = b"Hello, world!";
|
||||
|
||||
// Get block
|
||||
storage.save_block(hash, content).unwrap();
|
||||
let retrieved_content = storage.get_block(hash).unwrap();
|
||||
assert_eq!(retrieved_content.unwrap(), content);
|
||||
|
||||
// Clean up
|
||||
fs::remove_dir_all("test_storage").unwrap();
|
||||
}
|
||||
}
|
||||
171
components/rfs/src/server/file_handlers.rs
Normal file
171
components/rfs/src/server/file_handlers.rs
Normal file
@@ -0,0 +1,171 @@
|
||||
use axum::{body::Bytes, extract::State, http::StatusCode, response::IntoResponse};
|
||||
use axum_macros::debug_handler;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::server::{
|
||||
auth,
|
||||
config::AppState,
|
||||
db::DB,
|
||||
models::{Block, File},
|
||||
response::{ResponseError, ResponseResult},
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use utoipa::ToSchema;
|
||||
|
||||
const BLOCK_SIZE: usize = 1024 * 1024; // 1MB
|
||||
|
||||
// File API endpoints are included in the main FlistApi in handlers.rs
|
||||
|
||||
/// Response for file upload
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct FileUploadResponse {
|
||||
/// The file hash
|
||||
pub file_hash: String,
|
||||
/// Message indicating success
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
/// Upload a file to the server.
|
||||
/// The file will be split into blocks and stored in the database.
|
||||
#[utoipa::path(
|
||||
post,
|
||||
path = "/api/v1/file",
|
||||
tag = "File Management",
|
||||
request_body(content = [u8], description = "File data to upload", content_type = "application/octet-stream"),
|
||||
responses(
|
||||
(status = 201, description = "File uploaded successfully", body = FileUploadResponse),
|
||||
(status = 400, description = "Bad request", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
),
|
||||
security(
|
||||
("bearerAuth" = [])
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn upload_file_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
extension: axum::extract::Extension<String>,
|
||||
body: Bytes,
|
||||
) -> Result<(StatusCode, ResponseResult), ResponseError> {
|
||||
// Convert the request body to a byte vector
|
||||
let data = body.to_vec();
|
||||
|
||||
// Create a new File record
|
||||
let file = File::new(data.clone());
|
||||
|
||||
// Store the file metadata in the database
|
||||
// In a real implementation, we would store this in the files table
|
||||
// For now, we'll just log it
|
||||
log::info!("Storing file metadata: hash={}", file.file_hash);
|
||||
|
||||
// Get the username from the extension (set by the authorize middleware)
|
||||
let username = extension.0;
|
||||
let user_id = auth::get_user_id_from_token(&*state.db, &username).await?;
|
||||
|
||||
// Store each block with a reference to the file
|
||||
for (i, chunk) in data
|
||||
.chunks(state.config.block_size.unwrap_or(BLOCK_SIZE))
|
||||
.enumerate()
|
||||
{
|
||||
let block_hash = Block::calculate_hash(chunk);
|
||||
|
||||
// TODO: parallel
|
||||
// Store each block in the storage with file hash and block index in metadata in DB
|
||||
match state
|
||||
.db
|
||||
.store_block(
|
||||
&block_hash,
|
||||
chunk.to_vec(),
|
||||
&file.file_hash,
|
||||
i as u64,
|
||||
user_id,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(_) => {
|
||||
log::debug!("Stored block {}", block_hash);
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Failed to store block: {}", err);
|
||||
return Err(ResponseError::InternalServerError);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log::info!(
|
||||
"Stored file metadata and blocks for file {}",
|
||||
file.file_hash
|
||||
);
|
||||
|
||||
// Return success response
|
||||
let response = FileUploadResponse {
|
||||
file_hash: file.file_hash,
|
||||
message: "File is uploaded successfully".to_string(),
|
||||
};
|
||||
|
||||
Ok((StatusCode::CREATED, ResponseResult::FileUploaded(response)))
|
||||
}
|
||||
|
||||
/// Request for file download with custom filename
|
||||
#[derive(Debug, Serialize, Deserialize, ToSchema)]
|
||||
pub struct FileDownloadRequest {
|
||||
/// The custom filename to use for download
|
||||
pub file_name: String,
|
||||
}
|
||||
|
||||
/// Retrieve a file by its hash from path, with optional custom filename in request body.
|
||||
/// The file will be reconstructed from its blocks.
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/v1/file/{hash}",
|
||||
tag = "File Management",
|
||||
request_body(content = FileDownloadRequest, description = "Optional custom filename for download", content_type = "application/json"),
|
||||
responses(
|
||||
(status = 200, description = "File found", body = [u8], content_type = "application/octet-stream"),
|
||||
(status = 404, description = "File not found", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
),
|
||||
params(
|
||||
("hash" = String, Path, description = "File hash")
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn get_file_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
axum::extract::Path(hash): axum::extract::Path<String>,
|
||||
request: Option<axum::extract::Json<FileDownloadRequest>>,
|
||||
) -> Result<impl IntoResponse, ResponseError> {
|
||||
// Get the file metadata using the hash
|
||||
let file = match state.db.get_file_by_hash(&hash).await {
|
||||
Ok(Some(file)) => file,
|
||||
Ok(None) => {
|
||||
return Err(ResponseError::NotFound(format!(
|
||||
"File with hash '{}' not found",
|
||||
hash
|
||||
)));
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Failed to retrieve file metadata: {}", err);
|
||||
return Err(ResponseError::InternalServerError);
|
||||
}
|
||||
};
|
||||
|
||||
// Set content disposition header with the custom filename from request if provided
|
||||
// Otherwise use the hash as the filename
|
||||
let filename = match request {
|
||||
Some(req) => req.0.file_name,
|
||||
None => format!("{}.bin", hash), // Default filename using hash
|
||||
};
|
||||
|
||||
let headers = [(
|
||||
axum::http::header::CONTENT_DISPOSITION,
|
||||
format!("attachment; filename=\"{}\"", filename),
|
||||
)];
|
||||
|
||||
// Return the file data
|
||||
Ok((
|
||||
StatusCode::OK,
|
||||
headers,
|
||||
axum::body::Bytes::from(file.file_content),
|
||||
))
|
||||
}
|
||||
595
components/rfs/src/server/handlers.rs
Normal file
595
components/rfs/src/server/handlers.rs
Normal file
@@ -0,0 +1,595 @@
|
||||
use anyhow::Error;
|
||||
use axum::{
|
||||
extract::{Path, State},
|
||||
response::IntoResponse,
|
||||
Extension, Json,
|
||||
};
|
||||
use axum_macros::debug_handler;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
fs,
|
||||
path::PathBuf,
|
||||
sync::{mpsc, Arc},
|
||||
};
|
||||
|
||||
use bollard::auth::DockerCredentials;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::docker;
|
||||
use crate::fungi;
|
||||
use crate::server::{
|
||||
auth::{SignInBody, SignInResponse, __path_sign_in_handler},
|
||||
config::{self, Job},
|
||||
db::DB,
|
||||
response::{DirListTemplate, DirLister, ErrorTemplate, TemplateErr},
|
||||
response::{FileInfo, ResponseError, ResponseResult, FlistStateResponse, HealthResponse, BlockUploadedResponse},
|
||||
serve_flists::visit_dir_one_level,
|
||||
};
|
||||
use crate::store;
|
||||
use utoipa::{OpenApi, ToSchema, Modify};
|
||||
use utoipa::openapi::security::{SecurityScheme, HttpAuthScheme, Http};
|
||||
use uuid::Uuid;
|
||||
use crate::server::block_handlers;
|
||||
use crate::server::file_handlers;
|
||||
use crate::server::serve_flists;
|
||||
use crate::server::website_handlers;
|
||||
|
||||
// Security scheme modifier for JWT Bearer authentication
|
||||
struct SecurityAddon;
|
||||
|
||||
impl Modify for SecurityAddon {
|
||||
fn modify(&self, openapi: &mut utoipa::openapi::OpenApi) {
|
||||
let components = openapi.components.as_mut().unwrap(); // Safe to unwrap since components are registered
|
||||
components.add_security_scheme(
|
||||
"bearerAuth",
|
||||
SecurityScheme::Http(Http::new(HttpAuthScheme::Bearer)),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(OpenApi)]
|
||||
#[openapi(
|
||||
paths(health_check_handler, create_flist_handler, get_flist_state_handler, preview_flist_handler, list_flists_handler, sign_in_handler, block_handlers::upload_block_handler, block_handlers::get_block_handler, block_handlers::check_block_handler, block_handlers::verify_blocks_handler, block_handlers::get_blocks_by_hash_handler, block_handlers::list_blocks_handler, block_handlers::get_block_downloads_handler, block_handlers::get_user_blocks_handler, file_handlers::upload_file_handler, file_handlers::get_file_handler, website_handlers::serve_website_handler, serve_flists::serve_flists),
|
||||
modifiers(&SecurityAddon),
|
||||
components(
|
||||
schemas(
|
||||
// Common schemas
|
||||
DirListTemplate, DirLister, ResponseError, ErrorTemplate, TemplateErr, ResponseResult, FileInfo, FlistStateResponse,
|
||||
// Response wrapper schemas
|
||||
HealthResponse, BlockUploadedResponse,
|
||||
// Authentication schemas
|
||||
SignInBody, SignInResponse,
|
||||
// Flist schemas
|
||||
FlistBody, Job, FlistState, FlistStateInfo, PreviewResponse,
|
||||
// Block schemas
|
||||
block_handlers::VerifyBlock, block_handlers::VerifyBlocksRequest, block_handlers::VerifyBlocksResponse,
|
||||
block_handlers::BlocksResponse, block_handlers::ListBlocksParams, block_handlers::ListBlocksResponse, block_handlers::BlockInfo,
|
||||
block_handlers::UserBlocksResponse, block_handlers::BlockDownloadsResponse, block_handlers::UploadBlockParams, block_handlers::UserBlockInfo,
|
||||
// File schemas
|
||||
file_handlers::FileUploadResponse, file_handlers::FileDownloadRequest
|
||||
)
|
||||
),
|
||||
tags(
|
||||
(name = "System", description = "System health and status"),
|
||||
(name = "Authentication", description = "Authentication endpoints"),
|
||||
(name = "Flist Management", description = "Flist creation and management"),
|
||||
(name = "Block Management", description = "Block storage and retrieval"),
|
||||
(name = "File Management", description = "File upload and download"),
|
||||
(name = "Website Serving", description = "Website content serving")
|
||||
)
|
||||
)]
|
||||
pub struct FlistApi;
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, Clone, ToSchema)]
|
||||
pub struct FlistBody {
|
||||
#[schema(example = "redis")]
|
||||
pub image_name: String,
|
||||
|
||||
pub username: Option<String>,
|
||||
pub password: Option<String>,
|
||||
pub auth: Option<String>,
|
||||
pub email: Option<String>,
|
||||
pub server_address: Option<String>,
|
||||
pub identity_token: Option<String>,
|
||||
pub registry_token: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, Clone, ToSchema)]
|
||||
pub struct PreviewResponse {
|
||||
pub content: Vec<String>,
|
||||
pub metadata: String,
|
||||
pub checksum: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, PartialEq, ToSchema)]
|
||||
pub enum FlistState {
|
||||
#[schema(title = "FlistStateAccepted")]
|
||||
Accepted(String),
|
||||
#[schema(title = "FlistStateStarted")]
|
||||
Started(String),
|
||||
#[schema(title = "FlistStateInProgress")]
|
||||
InProgress(FlistStateInfo),
|
||||
#[schema(title = "FlistStateCreated")]
|
||||
Created(String),
|
||||
#[schema(title = "FlistStateFailed")]
|
||||
Failed,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, PartialEq, ToSchema)]
|
||||
pub struct FlistStateInfo {
|
||||
msg: String,
|
||||
progress: f32,
|
||||
}
|
||||
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/v1",
|
||||
tag = "System",
|
||||
responses(
|
||||
(status = 200, description = "flist server is working", body = HealthResponse)
|
||||
)
|
||||
)]
|
||||
pub async fn health_check_handler() -> ResponseResult {
|
||||
ResponseResult::Health
|
||||
}
|
||||
|
||||
#[utoipa::path(
|
||||
post,
|
||||
path = "/api/v1/fl",
|
||||
tag = "Flist Management",
|
||||
request_body = FlistBody,
|
||||
responses(
|
||||
(status = 201, description = "Flist conversion started", body = Job),
|
||||
(status = 401, description = "Unauthorized user", body = ResponseError),
|
||||
(status = 403, description = "Forbidden", body = ResponseError),
|
||||
(status = 409, description = "Conflict", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
),
|
||||
security(
|
||||
("bearerAuth" = [])
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn create_flist_handler(
|
||||
State(state): State<Arc<config::AppState>>,
|
||||
Extension(username): Extension<String>,
|
||||
Json(body): Json<FlistBody>,
|
||||
) -> impl IntoResponse {
|
||||
let cfg = state.config.clone();
|
||||
let credentials = Some(DockerCredentials {
|
||||
username: body.username,
|
||||
password: body.password,
|
||||
auth: body.auth,
|
||||
email: body.email,
|
||||
serveraddress: body.server_address,
|
||||
identitytoken: body.identity_token,
|
||||
registrytoken: body.registry_token,
|
||||
});
|
||||
|
||||
let mut docker_image = body.image_name.to_string();
|
||||
if !docker_image.contains(':') {
|
||||
docker_image.push_str(":latest");
|
||||
}
|
||||
|
||||
let fl_name = docker_image.replace([':', '/'], "-") + ".fl";
|
||||
let username_dir = std::path::Path::new(&cfg.flist_dir).join(&username);
|
||||
let fl_path = username_dir.join(&fl_name);
|
||||
|
||||
if fl_path.exists() {
|
||||
return Err(ResponseError::Conflict("flist already exists".to_string()));
|
||||
}
|
||||
|
||||
if let Err(err) = fs::create_dir_all(&username_dir) {
|
||||
log::error!(
|
||||
"failed to create user flist directory `{:?}` with error {:?}",
|
||||
&username_dir,
|
||||
err
|
||||
);
|
||||
return Err(ResponseError::InternalServerError);
|
||||
}
|
||||
|
||||
let meta = match fungi::Writer::new(&fl_path, true).await {
|
||||
Ok(writer) => writer,
|
||||
Err(err) => {
|
||||
log::error!(
|
||||
"failed to create a new writer for flist `{:?}` with error {}",
|
||||
fl_path,
|
||||
err
|
||||
);
|
||||
return Err(ResponseError::InternalServerError);
|
||||
}
|
||||
};
|
||||
|
||||
let store = match store::parse_router(&cfg.store_url).await {
|
||||
Ok(s) => s,
|
||||
Err(err) => {
|
||||
log::error!("failed to parse router for store with error {}", err);
|
||||
return Err(ResponseError::InternalServerError);
|
||||
}
|
||||
};
|
||||
|
||||
// Create a new job id for the flist request
|
||||
let job: Job = Job {
|
||||
id: Uuid::new_v4().to_string(),
|
||||
};
|
||||
let current_job = job.clone();
|
||||
|
||||
state
|
||||
.jobs_state
|
||||
.lock()
|
||||
.expect("failed to lock state")
|
||||
.insert(
|
||||
job.id.clone(),
|
||||
FlistState::Accepted(format!("flist '{}' is accepted", &fl_name)),
|
||||
);
|
||||
|
||||
let flist_download_url = std::path::Path::new(&format!("{}:{}", cfg.host, cfg.port))
|
||||
.join(cfg.flist_dir)
|
||||
.join(username)
|
||||
.join(&fl_name);
|
||||
|
||||
tokio::spawn(async move {
|
||||
state
|
||||
.jobs_state
|
||||
.lock()
|
||||
.expect("failed to lock state")
|
||||
.insert(
|
||||
job.id.clone(),
|
||||
FlistState::Started(format!("flist '{}' is started", fl_name)),
|
||||
);
|
||||
|
||||
let container_name = Uuid::new_v4().to_string();
|
||||
let docker_tmp_dir =
|
||||
tempdir::TempDir::new(&container_name).expect("failed to create tmp dir for docker");
|
||||
|
||||
let (tx, rx) = mpsc::channel();
|
||||
let mut docker_to_fl =
|
||||
docker::DockerImageToFlist::new(meta, docker_image, credentials, docker_tmp_dir);
|
||||
|
||||
let res = docker_to_fl.prepare().await;
|
||||
if res.is_err() {
|
||||
let _ = tokio::fs::remove_file(&fl_path).await;
|
||||
state
|
||||
.jobs_state
|
||||
.lock()
|
||||
.expect("failed to lock state")
|
||||
.insert(job.id.clone(), FlistState::Failed);
|
||||
return;
|
||||
}
|
||||
|
||||
let files_count = docker_to_fl.files_count();
|
||||
let st = state.clone();
|
||||
let job_id = job.id.clone();
|
||||
let cloned_fl_path = fl_path.clone();
|
||||
tokio::spawn(async move {
|
||||
let mut progress: f32 = 0.0;
|
||||
|
||||
for _ in 0..files_count - 1 {
|
||||
let step = rx.recv().expect("failed to receive progress") as f32;
|
||||
progress += step;
|
||||
let progress_percentage = progress / files_count as f32 * 100.0;
|
||||
st.jobs_state.lock().expect("failed to lock state").insert(
|
||||
job_id.clone(),
|
||||
FlistState::InProgress(FlistStateInfo {
|
||||
msg: "flist is in progress".to_string(),
|
||||
progress: progress_percentage,
|
||||
}),
|
||||
);
|
||||
st.flists_progress
|
||||
.lock()
|
||||
.expect("failed to lock state")
|
||||
.insert(cloned_fl_path.clone(), progress_percentage);
|
||||
}
|
||||
});
|
||||
|
||||
let res = docker_to_fl.pack(store, Some(tx)).await;
|
||||
|
||||
// remove the file created with the writer if fl creation failed
|
||||
if res.is_err() {
|
||||
log::error!("failed creation failed with error {:?}", res.err());
|
||||
let _ = tokio::fs::remove_file(&fl_path).await;
|
||||
state
|
||||
.jobs_state
|
||||
.lock()
|
||||
.expect("failed to lock state")
|
||||
.insert(job.id.clone(), FlistState::Failed);
|
||||
return;
|
||||
}
|
||||
|
||||
state
|
||||
.jobs_state
|
||||
.lock()
|
||||
.expect("failed to lock state")
|
||||
.insert(
|
||||
job.id.clone(),
|
||||
FlistState::Created(format!(
|
||||
"flist {:?} is created successfully",
|
||||
flist_download_url
|
||||
)),
|
||||
);
|
||||
state
|
||||
.flists_progress
|
||||
.lock()
|
||||
.expect("failed to lock state")
|
||||
.insert(fl_path, 100.0);
|
||||
});
|
||||
|
||||
Ok(ResponseResult::FlistCreated(current_job))
|
||||
}
|
||||
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/v1/fl/{job_id}",
|
||||
tag = "Flist Management",
|
||||
responses(
|
||||
(status = 200, description = "Flist state", body = FlistStateResponse),
|
||||
(status = 404, description = "Flist not found", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
(status = 401, description = "Unauthorized user", body = ResponseError),
|
||||
(status = 403, description = "Forbidden", body = ResponseError),
|
||||
),
|
||||
params(
|
||||
("job_id" = String, Path, description = "flist job id")
|
||||
),
|
||||
security(
|
||||
("bearerAuth" = [])
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn get_flist_state_handler(
|
||||
Path(flist_job_id): Path<String>,
|
||||
State(state): State<Arc<config::AppState>>,
|
||||
) -> impl IntoResponse {
|
||||
if !&state
|
||||
.jobs_state
|
||||
.lock()
|
||||
.expect("failed to lock state")
|
||||
.contains_key(&flist_job_id.clone())
|
||||
{
|
||||
return Err(ResponseError::NotFound("flist doesn't exist".to_string()));
|
||||
}
|
||||
|
||||
let res_state = state
|
||||
.jobs_state
|
||||
.lock()
|
||||
.expect("failed to lock state")
|
||||
.get(&flist_job_id.clone())
|
||||
.expect("failed to get from state")
|
||||
.to_owned();
|
||||
|
||||
match res_state {
|
||||
FlistState::Accepted(_) => Ok(ResponseResult::FlistState(res_state)),
|
||||
FlistState::Started(_) => Ok(ResponseResult::FlistState(res_state)),
|
||||
FlistState::InProgress(_) => Ok(ResponseResult::FlistState(res_state)),
|
||||
FlistState::Created(_) => {
|
||||
state
|
||||
.jobs_state
|
||||
.lock()
|
||||
.expect("failed to lock state")
|
||||
.remove(&flist_job_id.clone());
|
||||
|
||||
Ok(ResponseResult::FlistState(res_state))
|
||||
}
|
||||
FlistState::Failed => {
|
||||
state
|
||||
.jobs_state
|
||||
.lock()
|
||||
.expect("failed to lock state")
|
||||
.remove(&flist_job_id.clone());
|
||||
|
||||
Err(ResponseError::InternalServerError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/v1/fl",
|
||||
tag = "Flist Management",
|
||||
responses(
|
||||
(status = 200, description = "Listing flists", body = HashMap<String, Vec<FileInfo>>),
|
||||
(status = 401, description = "Unauthorized user", body = ResponseError),
|
||||
(status = 403, description = "Forbidden", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn list_flists_handler(State(state): State<Arc<config::AppState>>) -> impl IntoResponse {
|
||||
let mut flists: HashMap<String, Vec<FileInfo>> = HashMap::new();
|
||||
|
||||
let rs: Result<Vec<FileInfo>, std::io::Error> =
|
||||
visit_dir_one_level(&state.config.flist_dir, &state).await;
|
||||
|
||||
let files = match rs {
|
||||
Ok(files) => files,
|
||||
Err(e) => {
|
||||
log::error!("failed to list flists directory with error: {}", e);
|
||||
return Err(ResponseError::InternalServerError);
|
||||
}
|
||||
};
|
||||
|
||||
for file in files {
|
||||
if !file.is_file {
|
||||
let flists_per_username = visit_dir_one_level(&file.path_uri, &state).await;
|
||||
match flists_per_username {
|
||||
Ok(files) => flists.insert(file.name, files),
|
||||
Err(e) => {
|
||||
log::error!("failed to list flists per username with error: {}", e);
|
||||
return Err(ResponseError::InternalServerError);
|
||||
}
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
Ok(ResponseResult::Flists(flists))
|
||||
}
|
||||
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/v1/fl/preview/{flist_path}",
|
||||
tag = "Flist Management",
|
||||
responses(
|
||||
(status = 200, description = "Flist preview result", body = PreviewResponse),
|
||||
(status = 400, description = "Bad request", body = ResponseError),
|
||||
(status = 401, description = "Unauthorized user", body = ResponseError),
|
||||
(status = 403, description = "Forbidden", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
),
|
||||
params(
|
||||
("flist_path" = String, Path, description = "flist file path")
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn preview_flist_handler(
|
||||
State(state): State<Arc<config::AppState>>,
|
||||
Path(flist_path): Path<String>,
|
||||
) -> impl IntoResponse {
|
||||
let fl_path = flist_path;
|
||||
|
||||
match validate_flist_path(&state, &fl_path).await {
|
||||
Ok(_) => (),
|
||||
Err(err) => return Err(ResponseError::BadRequest(err.to_string())),
|
||||
};
|
||||
|
||||
let content = match get_flist_content(&fl_path).await {
|
||||
Ok(paths) => paths,
|
||||
Err(_) => return Err(ResponseError::InternalServerError),
|
||||
};
|
||||
|
||||
let bytes = match std::fs::read(&fl_path) {
|
||||
Ok(b) => b,
|
||||
Err(err) => {
|
||||
log::error!(
|
||||
"failed to read flist '{}' into bytes with error {}",
|
||||
fl_path,
|
||||
err
|
||||
);
|
||||
return Err(ResponseError::InternalServerError);
|
||||
}
|
||||
};
|
||||
|
||||
// Convert PathBuf values to strings for OpenAPI compatibility
|
||||
let content_strings: Vec<String> = content
|
||||
.into_iter()
|
||||
.map(|path| path.to_string_lossy().to_string())
|
||||
.collect();
|
||||
|
||||
Ok(ResponseResult::PreviewFlist(PreviewResponse {
|
||||
content: content_strings,
|
||||
metadata: state.config.store_url.join("-"),
|
||||
checksum: sha256::digest(&bytes),
|
||||
}))
|
||||
}
|
||||
|
||||
async fn validate_flist_path(state: &Arc<config::AppState>, fl_path: &String) -> Result<(), Error> {
|
||||
// validate path starting with `/`
|
||||
if fl_path.starts_with("/") {
|
||||
anyhow::bail!("invalid flist path '{}', shouldn't start with '/'", fl_path);
|
||||
}
|
||||
|
||||
// path should include 3 parts [parent dir, username, flist file]
|
||||
let parts: Vec<_> = fl_path.split("/").collect();
|
||||
if parts.len() != 3 {
|
||||
anyhow::bail!(
|
||||
format!("invalid flist path '{}', should consist of 3 parts [parent directory, username and flist name", fl_path
|
||||
));
|
||||
}
|
||||
|
||||
// validate parent dir
|
||||
if parts[0] != state.config.flist_dir {
|
||||
anyhow::bail!(
|
||||
"invalid flist path '{}', parent directory should be '{}'",
|
||||
fl_path,
|
||||
state.config.flist_dir
|
||||
);
|
||||
}
|
||||
|
||||
// validate username
|
||||
match state.db.get_user_by_username(parts[1]).await {
|
||||
Some(_) => (),
|
||||
None => {
|
||||
anyhow::bail!(
|
||||
"invalid flist path '{}', username '{}' doesn't exist",
|
||||
fl_path,
|
||||
parts[1]
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
// validate flist extension
|
||||
let fl_name = parts[2].to_string();
|
||||
let ext = match std::path::Path::new(&fl_name).extension() {
|
||||
Some(ex) => ex.to_string_lossy().to_string(),
|
||||
None => "".to_string(),
|
||||
};
|
||||
|
||||
if ext != "fl" {
|
||||
anyhow::bail!(
|
||||
"invalid flist path '{}', invalid flist extension '{}' should be 'fl'",
|
||||
fl_path,
|
||||
ext
|
||||
);
|
||||
}
|
||||
|
||||
// validate flist existence
|
||||
if !std::path::Path::new(parts[0])
|
||||
.join(parts[1])
|
||||
.join(&fl_name)
|
||||
.exists()
|
||||
{
|
||||
anyhow::bail!("flist '{}' doesn't exist", fl_path);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_flist_content(fl_path: &String) -> Result<Vec<PathBuf>, Error> {
|
||||
let mut visitor = ReadVisitor::default();
|
||||
|
||||
let meta = match fungi::Reader::new(&fl_path).await {
|
||||
Ok(reader) => reader,
|
||||
Err(err) => {
|
||||
log::error!(
|
||||
"failed to initialize metadata database for flist `{}` with error {}",
|
||||
fl_path,
|
||||
err
|
||||
);
|
||||
anyhow::bail!("Internal server error");
|
||||
}
|
||||
};
|
||||
|
||||
match meta.walk(&mut visitor).await {
|
||||
Ok(()) => return Ok(visitor.into_inner()),
|
||||
Err(err) => {
|
||||
log::error!(
|
||||
"failed to walk through metadata for flist `{}` with error {}",
|
||||
fl_path,
|
||||
err
|
||||
);
|
||||
anyhow::bail!("Internal server error");
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct ReadVisitor {
|
||||
inner: Vec<PathBuf>,
|
||||
}
|
||||
|
||||
impl ReadVisitor {
|
||||
pub fn into_inner(self) -> Vec<PathBuf> {
|
||||
self.inner
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl fungi::meta::WalkVisitor for ReadVisitor {
|
||||
async fn visit(
|
||||
&mut self,
|
||||
path: &std::path::Path,
|
||||
_node: &fungi::meta::Inode,
|
||||
) -> fungi::meta::Result<fungi::meta::Walk> {
|
||||
self.inner.push(path.to_path_buf());
|
||||
Ok(fungi::meta::Walk::Continue)
|
||||
}
|
||||
}
|
||||
225
components/rfs/src/server/mod.rs
Normal file
225
components/rfs/src/server/mod.rs
Normal file
@@ -0,0 +1,225 @@
|
||||
mod auth;
|
||||
mod block_handlers;
|
||||
mod config;
|
||||
mod db;
|
||||
mod file_handlers;
|
||||
mod handlers;
|
||||
mod models;
|
||||
mod response;
|
||||
mod serve_flists;
|
||||
mod website_handlers;
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use axum::{
|
||||
error_handling::HandleErrorLayer,
|
||||
extract::{Path, State},
|
||||
http::StatusCode,
|
||||
middleware,
|
||||
response::IntoResponse,
|
||||
routing::{get, head, post},
|
||||
BoxError, Router,
|
||||
};
|
||||
use config::AppState;
|
||||
use hyper::{
|
||||
header::{ACCEPT, AUTHORIZATION, CONTENT_TYPE},
|
||||
Method,
|
||||
};
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
collections::HashMap,
|
||||
sync::{Arc, Mutex},
|
||||
time::Duration,
|
||||
};
|
||||
use tokio::signal;
|
||||
use tower::ServiceBuilder;
|
||||
use tower_http::cors::CorsLayer;
|
||||
use tower_http::{cors::Any, trace::TraceLayer};
|
||||
|
||||
use utoipa::OpenApi;
|
||||
use utoipa_swagger_ui::SwaggerUi;
|
||||
// Using only the main FlistApi for OpenAPI documentation
|
||||
|
||||
pub async fn app(config_path: &str) -> Result<()> {
|
||||
let config = config::parse_config(config_path)
|
||||
.await
|
||||
.context("failed to parse config file")?;
|
||||
|
||||
// Initialize the database based on configuration
|
||||
let db: Arc<db::DBType> = if let Some(sqlite_path) = &config.sqlite_path {
|
||||
log::info!("Using SQLite database at: {}", sqlite_path);
|
||||
Arc::new(db::DBType::SqlDB(
|
||||
db::sqlite::SqlDB::new(sqlite_path, &config.storage_dir, &config.users.clone()).await,
|
||||
))
|
||||
} else {
|
||||
log::info!("Using in-memory MapDB database");
|
||||
Arc::new(db::DBType::MapDB(db::map::MapDB::new(
|
||||
&config.users.clone(),
|
||||
)))
|
||||
};
|
||||
|
||||
let app_state = Arc::new(config::AppState {
|
||||
jobs_state: Mutex::new(HashMap::new()),
|
||||
flists_progress: Mutex::new(HashMap::new()),
|
||||
db,
|
||||
config,
|
||||
});
|
||||
|
||||
let cors = CorsLayer::new()
|
||||
.allow_origin(Any)
|
||||
.allow_methods([Method::GET, Method::POST])
|
||||
.allow_headers([AUTHORIZATION, ACCEPT, CONTENT_TYPE]);
|
||||
|
||||
let v1_routes = Router::new()
|
||||
.route("/api/v1", get(handlers::health_check_handler))
|
||||
.route("/api/v1/signin", post(auth::sign_in_handler))
|
||||
.route(
|
||||
"/api/v1/fl",
|
||||
post(handlers::create_flist_handler).layer(middleware::from_fn_with_state(
|
||||
app_state.clone(),
|
||||
auth::authorize,
|
||||
)),
|
||||
)
|
||||
.route(
|
||||
"/api/v1/fl/:job_id",
|
||||
get(handlers::get_flist_state_handler).layer(middleware::from_fn_with_state(
|
||||
app_state.clone(),
|
||||
auth::authorize,
|
||||
)),
|
||||
)
|
||||
.route(
|
||||
"/api/v1/fl/preview/:flist_path",
|
||||
get(handlers::preview_flist_handler),
|
||||
)
|
||||
.route("/api/v1/fl", get(handlers::list_flists_handler))
|
||||
.route(
|
||||
"/api/v1/block",
|
||||
post(block_handlers::upload_block_handler).layer(middleware::from_fn_with_state(
|
||||
app_state.clone(),
|
||||
auth::authorize,
|
||||
)),
|
||||
)
|
||||
.route(
|
||||
"/api/v1/block/:hash",
|
||||
get(block_handlers::get_block_handler),
|
||||
)
|
||||
.route(
|
||||
"/api/v1/block/:hash",
|
||||
head(block_handlers::check_block_handler),
|
||||
)
|
||||
.route(
|
||||
"/api/v1/block/verify",
|
||||
post(block_handlers::verify_blocks_handler),
|
||||
)
|
||||
.route(
|
||||
"/api/v1/blocks/:hash",
|
||||
get(block_handlers::get_blocks_by_hash_handler),
|
||||
)
|
||||
.route("/api/v1/blocks", get(block_handlers::list_blocks_handler))
|
||||
.route(
|
||||
"/api/v1/block/:hash/downloads",
|
||||
get(block_handlers::get_block_downloads_handler),
|
||||
)
|
||||
.route(
|
||||
"/api/v1/user/blocks",
|
||||
get(block_handlers::get_user_blocks_handler).layer(middleware::from_fn_with_state(
|
||||
app_state.clone(),
|
||||
auth::authorize,
|
||||
)),
|
||||
)
|
||||
.route(
|
||||
"/api/v1/file",
|
||||
post(file_handlers::upload_file_handler).layer(middleware::from_fn_with_state(
|
||||
app_state.clone(),
|
||||
auth::authorize,
|
||||
)),
|
||||
)
|
||||
.route("/api/v1/file/:hash", get(file_handlers::get_file_handler))
|
||||
.route(
|
||||
"/website/:website_hash/*path",
|
||||
get(website_handlers::serve_website_handler),
|
||||
)
|
||||
.route(
|
||||
"/website/:website_hash/",
|
||||
get(
|
||||
|state: State<Arc<AppState>>, path: Path<String>| async move {
|
||||
website_handlers::serve_website_handler(state, Path((path.0, "".to_string())))
|
||||
.await
|
||||
},
|
||||
),
|
||||
)
|
||||
.route("/*path", get(serve_flists::serve_flists));
|
||||
|
||||
let app = Router::new()
|
||||
.merge(
|
||||
SwaggerUi::new("/swagger-ui")
|
||||
.url("/api-docs/openapi.json", handlers::FlistApi::openapi()),
|
||||
)
|
||||
.merge(v1_routes)
|
||||
.layer(
|
||||
ServiceBuilder::new()
|
||||
.layer(HandleErrorLayer::new(handle_error))
|
||||
.load_shed()
|
||||
.concurrency_limit(1024)
|
||||
.timeout(Duration::from_secs(10))
|
||||
.layer(TraceLayer::new_for_http()),
|
||||
)
|
||||
.with_state(Arc::clone(&app_state))
|
||||
.layer(cors);
|
||||
|
||||
let address = format!("{}:{}", app_state.config.host, app_state.config.port);
|
||||
let listener = tokio::net::TcpListener::bind(address)
|
||||
.await
|
||||
.context("failed to bind address")?;
|
||||
|
||||
log::info!(
|
||||
"🚀 Server started successfully at {}:{}",
|
||||
app_state.config.host,
|
||||
app_state.config.port
|
||||
);
|
||||
|
||||
axum::serve(listener, app)
|
||||
.with_graceful_shutdown(shutdown_signal())
|
||||
.await
|
||||
.context("failed to serve listener")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn shutdown_signal() {
|
||||
let ctrl_c = async {
|
||||
signal::ctrl_c()
|
||||
.await
|
||||
.expect("failed to install Ctrl+C handler");
|
||||
};
|
||||
|
||||
#[cfg(unix)]
|
||||
let terminate = async {
|
||||
signal::unix::signal(signal::unix::SignalKind::terminate())
|
||||
.expect("failed to install signal handler")
|
||||
.recv()
|
||||
.await;
|
||||
};
|
||||
|
||||
tokio::select! {
|
||||
_ = ctrl_c => {},
|
||||
_ = terminate => {},
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_error(error: BoxError) -> impl IntoResponse {
|
||||
if error.is::<tower::timeout::error::Elapsed>() {
|
||||
return (StatusCode::REQUEST_TIMEOUT, Cow::from("request timed out"));
|
||||
}
|
||||
|
||||
if error.is::<tower::load_shed::error::Overloaded>() {
|
||||
return (
|
||||
StatusCode::SERVICE_UNAVAILABLE,
|
||||
Cow::from("service is overloaded, try again later"),
|
||||
);
|
||||
}
|
||||
|
||||
(
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
Cow::from(format!("Unhandled internal error: {}", error)),
|
||||
)
|
||||
}
|
||||
18
components/rfs/src/server/models/block.rs
Normal file
18
components/rfs/src/server/models/block.rs
Normal file
@@ -0,0 +1,18 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use utoipa::ToSchema;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, ToSchema)]
|
||||
pub struct Block {
|
||||
pub index: u64, // The index of the block in the file
|
||||
pub hash: String, // The hash of the block's content
|
||||
pub data: Vec<u8>, // The actual data of the block
|
||||
pub size: usize, // The size of the block's data
|
||||
}
|
||||
|
||||
impl Block {
|
||||
/// Calculates the hash of the block's data using SHA-256.
|
||||
pub fn calculate_hash(data: &[u8]) -> String {
|
||||
let hash = blake2b_simd::Params::new().hash_length(32).hash(data);
|
||||
hex::encode(hash.as_bytes())
|
||||
}
|
||||
}
|
||||
28
components/rfs/src/server/models/file.rs
Normal file
28
components/rfs/src/server/models/file.rs
Normal file
@@ -0,0 +1,28 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::{Digest, Sha256};
|
||||
use sqlx::FromRow;
|
||||
use utoipa::ToSchema;
|
||||
|
||||
#[derive(Debug, Clone, FromRow, Serialize, Deserialize, ToSchema)]
|
||||
pub struct File {
|
||||
pub file_hash: String, // Hash of the file content
|
||||
pub file_content: Vec<u8>, // Content of the file
|
||||
}
|
||||
|
||||
impl File {
|
||||
/// Calculates the hash of the block's data using SHA-256.
|
||||
pub fn calculate_hash(data: &[u8]) -> String {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(data);
|
||||
format!("{:x}", hasher.finalize())
|
||||
}
|
||||
|
||||
/// Creates a new File instance by calculating the hash of the content.
|
||||
pub fn new(file_content: Vec<u8>) -> Self {
|
||||
let file_hash = Self::calculate_hash(&file_content);
|
||||
Self {
|
||||
file_hash,
|
||||
file_content,
|
||||
}
|
||||
}
|
||||
}
|
||||
7
components/rfs/src/server/models/mod.rs
Normal file
7
components/rfs/src/server/models/mod.rs
Normal file
@@ -0,0 +1,7 @@
|
||||
pub mod block;
|
||||
pub mod file;
|
||||
pub mod user;
|
||||
|
||||
pub use block::Block;
|
||||
pub use file::File;
|
||||
pub use user::User;
|
||||
9
components/rfs/src/server/models/user.rs
Normal file
9
components/rfs/src/server/models/user.rs
Normal file
@@ -0,0 +1,9 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::FromRow;
|
||||
|
||||
#[derive(Debug, Clone, FromRow, Serialize, Deserialize)]
|
||||
pub struct User {
|
||||
pub id: Option<i64>,
|
||||
pub username: String,
|
||||
pub password: String,
|
||||
}
|
||||
232
components/rfs/src/server/response.rs
Normal file
232
components/rfs/src/server/response.rs
Normal file
@@ -0,0 +1,232 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use askama::Template;
|
||||
use axum::{
|
||||
body::Body,
|
||||
http::StatusCode,
|
||||
response::{Html, IntoResponse, Response},
|
||||
Json,
|
||||
};
|
||||
use serde::Serialize;
|
||||
use utoipa::ToSchema;
|
||||
|
||||
use crate::server::{
|
||||
auth::SignInResponse,
|
||||
config::Job,
|
||||
file_handlers::FileUploadResponse,
|
||||
handlers::{FlistState, PreviewResponse},
|
||||
};
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub enum ResponseError {
|
||||
#[schema(title = "ResponseErrorInternalServerError")]
|
||||
InternalServerError,
|
||||
#[schema(title = "ResponseErrorConflict")]
|
||||
Conflict(String),
|
||||
#[schema(title = "ResponseErrorNotFound")]
|
||||
NotFound(String),
|
||||
#[schema(title = "ResponseErrorUnauthorized")]
|
||||
Unauthorized(String),
|
||||
#[schema(title = "ResponseErrorBadRequest")]
|
||||
BadRequest(String),
|
||||
#[schema(title = "ResponseErrorForbidden")]
|
||||
Forbidden(String),
|
||||
#[schema(title = "ResponseErrorTemplateError")]
|
||||
TemplateError(ErrorTemplate),
|
||||
}
|
||||
|
||||
impl IntoResponse for ResponseError {
|
||||
fn into_response(self) -> Response<Body> {
|
||||
match self {
|
||||
ResponseError::InternalServerError => {
|
||||
(StatusCode::INTERNAL_SERVER_ERROR, "Internal server error").into_response()
|
||||
}
|
||||
ResponseError::Conflict(msg) => (StatusCode::CONFLICT, msg).into_response(),
|
||||
ResponseError::NotFound(msg) => (StatusCode::NOT_FOUND, msg).into_response(),
|
||||
ResponseError::Unauthorized(msg) => (StatusCode::UNAUTHORIZED, msg).into_response(),
|
||||
ResponseError::BadRequest(msg) => (StatusCode::BAD_REQUEST, msg).into_response(),
|
||||
ResponseError::Forbidden(msg) => (StatusCode::FORBIDDEN, msg).into_response(),
|
||||
ResponseError::TemplateError(t) => match t.render() {
|
||||
Ok(html) => {
|
||||
let mut resp = Html(html).into_response();
|
||||
match t.err {
|
||||
TemplateErr::NotFound(reason) => {
|
||||
*resp.status_mut() = StatusCode::NOT_FOUND;
|
||||
resp.headers_mut()
|
||||
.insert(FAIL_REASON_HEADER_NAME, reason.parse().unwrap());
|
||||
}
|
||||
TemplateErr::BadRequest(reason) => {
|
||||
*resp.status_mut() = StatusCode::BAD_REQUEST;
|
||||
resp.headers_mut()
|
||||
.insert(FAIL_REASON_HEADER_NAME, reason.parse().unwrap());
|
||||
}
|
||||
TemplateErr::InternalServerError(reason) => {
|
||||
*resp.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
|
||||
resp.headers_mut()
|
||||
.insert(FAIL_REASON_HEADER_NAME, reason.parse().unwrap());
|
||||
}
|
||||
}
|
||||
resp
|
||||
}
|
||||
Err(err) => {
|
||||
tracing::error!("template render failed, err={}", err);
|
||||
(
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
format!("Failed to render template. Error: {}", err),
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapper structs for OpenAPI documentation to match the actual JSON response format
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct FlistStateResponse {
|
||||
pub flist_state: FlistState,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct HealthResponse {
|
||||
pub msg: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct BlockUploadedResponse {
|
||||
pub hash: String,
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
|
||||
#[derive(ToSchema)]
|
||||
pub enum ResponseResult {
|
||||
#[schema(title = "ResponseResultHealth")]
|
||||
Health,
|
||||
#[schema(title = "ResponseResultFlistCreated")]
|
||||
FlistCreated(Job),
|
||||
#[schema(title = "ResponseResultFlistState")]
|
||||
FlistState(FlistState),
|
||||
#[schema(title = "ResponseResultFlists")]
|
||||
Flists(HashMap<String, Vec<FileInfo>>),
|
||||
#[schema(title = "ResponseResultPreviewFlist")]
|
||||
PreviewFlist(PreviewResponse),
|
||||
#[schema(title = "ResponseResultSignedIn")]
|
||||
SignedIn(SignInResponse),
|
||||
#[schema(title = "ResponseResultDirTemplate")]
|
||||
DirTemplate(DirListTemplate),
|
||||
#[schema(title = "ResponseResultBlockUploaded")]
|
||||
BlockUploaded(String),
|
||||
#[schema(title = "ResponseResultFileUploaded")]
|
||||
FileUploaded(FileUploadResponse),
|
||||
#[schema(value_type = String, title = "ResponseResultRes", format = "binary")]
|
||||
Res(hyper::Response<tower_http::services::fs::ServeFileSystemResponseBody>),
|
||||
}
|
||||
|
||||
impl IntoResponse for ResponseResult {
|
||||
fn into_response(self) -> Response<Body> {
|
||||
match self {
|
||||
ResponseResult::Health => (
|
||||
StatusCode::OK,
|
||||
Json(HealthResponse {
|
||||
msg: "flist server is working".to_string(),
|
||||
}),
|
||||
)
|
||||
.into_response(),
|
||||
ResponseResult::SignedIn(token) => (StatusCode::CREATED, Json(token)).into_response(),
|
||||
ResponseResult::FlistCreated(job) => (StatusCode::CREATED, Json(job)).into_response(),
|
||||
ResponseResult::FlistState(flist_state) => (
|
||||
StatusCode::OK,
|
||||
Json(serde_json::json!({
|
||||
"flist_state": flist_state
|
||||
})),
|
||||
)
|
||||
.into_response(),
|
||||
ResponseResult::Flists(flists) => (StatusCode::OK, Json(flists)).into_response(),
|
||||
ResponseResult::PreviewFlist(content) => {
|
||||
(StatusCode::OK, Json(content)).into_response()
|
||||
}
|
||||
ResponseResult::BlockUploaded(hash) => (
|
||||
StatusCode::OK,
|
||||
Json(BlockUploadedResponse {
|
||||
hash,
|
||||
message: "Block processed successfully".to_string(),
|
||||
}),
|
||||
)
|
||||
.into_response(),
|
||||
ResponseResult::FileUploaded(response) => {
|
||||
(StatusCode::CREATED, Json(response)).into_response()
|
||||
}
|
||||
ResponseResult::DirTemplate(t) => match t.render() {
|
||||
Ok(html) => Html(html).into_response(),
|
||||
Err(err) => {
|
||||
tracing::error!("template render failed, err={}", err);
|
||||
(
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
format!("Failed to render template. Error: {}", err),
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
},
|
||||
ResponseResult::Res(res) => res.map(axum::body::Body::new),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//////// TEMPLATES ////////
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct FileInfo {
|
||||
pub name: String,
|
||||
pub path_uri: String,
|
||||
pub is_file: bool,
|
||||
pub size: u64,
|
||||
pub last_modified: i64,
|
||||
pub progress: f32,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct DirLister {
|
||||
pub files: Vec<FileInfo>,
|
||||
}
|
||||
|
||||
#[derive(Template, Serialize, ToSchema)]
|
||||
#[template(path = "index.html")]
|
||||
pub struct DirListTemplate {
|
||||
pub lister: DirLister,
|
||||
pub cur_path: String,
|
||||
}
|
||||
|
||||
mod filters {
|
||||
pub(crate) fn datetime(ts: &i64) -> ::askama::Result<String> {
|
||||
if let Ok(format) =
|
||||
time::format_description::parse("[year]-[month]-[day] [hour]:[minute]:[second] UTC")
|
||||
{
|
||||
return Ok(time::OffsetDateTime::from_unix_timestamp(*ts)
|
||||
.unwrap()
|
||||
.format(&format)
|
||||
.unwrap());
|
||||
}
|
||||
Err(askama::Error::Fmt(std::fmt::Error))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Template, Serialize, ToSchema)]
|
||||
#[template(path = "error.html")]
|
||||
pub struct ErrorTemplate {
|
||||
pub err: TemplateErr,
|
||||
pub cur_path: String,
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
const FAIL_REASON_HEADER_NAME: &str = "fl-server-fail-reason";
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub enum TemplateErr {
|
||||
#[schema(title = "TemplateErrBadRequest")]
|
||||
BadRequest(String),
|
||||
#[schema(title = "TemplateErrNotFound")]
|
||||
NotFound(String),
|
||||
#[schema(title = "TemplateErrInternalServerError")]
|
||||
InternalServerError(String),
|
||||
}
|
||||
165
components/rfs/src/server/serve_flists.rs
Normal file
165
components/rfs/src/server/serve_flists.rs
Normal file
@@ -0,0 +1,165 @@
|
||||
use axum::extract::State;
|
||||
use std::{io::Error, path::PathBuf, sync::Arc};
|
||||
use tokio::io;
|
||||
use tower::util::ServiceExt;
|
||||
use tower_http::services::ServeDir;
|
||||
|
||||
use axum::{
|
||||
body::Body,
|
||||
http::{Request, StatusCode},
|
||||
response::IntoResponse,
|
||||
};
|
||||
use axum_macros::debug_handler;
|
||||
use percent_encoding::percent_decode;
|
||||
|
||||
use crate::server::{
|
||||
config,
|
||||
response::{
|
||||
DirListTemplate, DirLister, ErrorTemplate, FileInfo, ResponseError, ResponseResult,
|
||||
TemplateErr,
|
||||
},
|
||||
};
|
||||
|
||||
#[debug_handler]
|
||||
/// Serve flist files from the server's filesystem
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/{path}",
|
||||
tag = "Flist Management",
|
||||
params(
|
||||
("path" = String, Path, description = "Path to the flist file or directory to serve")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "Successfully served the flist or directory listing", body = ResponseResult),
|
||||
(status = 404, description = "Flist not found", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError)
|
||||
)
|
||||
)]
|
||||
pub async fn serve_flists(
|
||||
State(state): State<Arc<config::AppState>>,
|
||||
req: Request<Body>,
|
||||
) -> impl IntoResponse {
|
||||
let path = req.uri().path().to_string();
|
||||
|
||||
match ServeDir::new("").oneshot(req).await {
|
||||
Ok(res) => {
|
||||
let status = res.status();
|
||||
match status {
|
||||
StatusCode::NOT_FOUND => {
|
||||
let full_path = match validate_path(&path) {
|
||||
Ok(p) => p,
|
||||
Err(_) => {
|
||||
return Err(ResponseError::TemplateError(ErrorTemplate {
|
||||
err: TemplateErr::BadRequest("invalid path".to_string()),
|
||||
cur_path: path.to_string(),
|
||||
message: "invalid path".to_owned(),
|
||||
}));
|
||||
}
|
||||
};
|
||||
|
||||
let cur_path = std::path::Path::new(&full_path);
|
||||
|
||||
match cur_path.is_dir() {
|
||||
true => {
|
||||
let rs = visit_dir_one_level(&full_path, &state).await;
|
||||
match rs {
|
||||
Ok(files) => Ok(ResponseResult::DirTemplate(DirListTemplate {
|
||||
lister: DirLister { files },
|
||||
cur_path: path.to_string(),
|
||||
})),
|
||||
Err(e) => Err(ResponseError::TemplateError(ErrorTemplate {
|
||||
err: TemplateErr::InternalServerError(e.to_string()),
|
||||
cur_path: path.to_string(),
|
||||
message: e.to_string(),
|
||||
})),
|
||||
}
|
||||
}
|
||||
false => Err(ResponseError::TemplateError(ErrorTemplate {
|
||||
err: TemplateErr::NotFound("file not found".to_string()),
|
||||
cur_path: path.to_string(),
|
||||
message: "file not found".to_owned(),
|
||||
})),
|
||||
}
|
||||
}
|
||||
_ => Ok(ResponseResult::Res(res)),
|
||||
}
|
||||
}
|
||||
Err(err) => Err(ResponseError::TemplateError(ErrorTemplate {
|
||||
err: TemplateErr::InternalServerError(format!("Unhandled error: {}", err)),
|
||||
cur_path: path.to_string(),
|
||||
message: format!("Unhandled error: {}", err),
|
||||
})),
|
||||
}
|
||||
}
|
||||
|
||||
fn validate_path(path: &str) -> io::Result<PathBuf> {
|
||||
let path = path.trim_start_matches('/');
|
||||
let path = percent_decode(path.as_ref()).decode_utf8_lossy();
|
||||
|
||||
let mut full_path = PathBuf::new();
|
||||
|
||||
// validate
|
||||
for seg in path.split('/') {
|
||||
if seg.starts_with("..") || seg.contains('\\') {
|
||||
return Err(Error::other("invalid path"));
|
||||
}
|
||||
full_path.push(seg);
|
||||
}
|
||||
|
||||
Ok(full_path)
|
||||
}
|
||||
|
||||
pub async fn visit_dir_one_level<P: AsRef<std::path::Path>>(
|
||||
path: P,
|
||||
state: &Arc<config::AppState>,
|
||||
) -> io::Result<Vec<FileInfo>> {
|
||||
let path = path.as_ref();
|
||||
let mut dir = tokio::fs::read_dir(path).await?;
|
||||
let mut files: Vec<FileInfo> = Vec::new();
|
||||
|
||||
while let Some(child) = dir.next_entry().await? {
|
||||
let path_uri = child.path().to_string_lossy().to_string();
|
||||
let is_file = child.file_type().await?.is_file();
|
||||
let name = child.file_name().to_string_lossy().to_string();
|
||||
|
||||
let mut progress = 0.0;
|
||||
if is_file {
|
||||
match state
|
||||
.flists_progress
|
||||
.lock()
|
||||
.expect("failed to lock state")
|
||||
.get(&path.join(&name).to_path_buf())
|
||||
{
|
||||
Some(p) => progress = *p,
|
||||
None => progress = 100.0,
|
||||
}
|
||||
|
||||
let ext = child
|
||||
.path()
|
||||
.extension()
|
||||
.expect("failed to get path extension")
|
||||
.to_string_lossy()
|
||||
.to_string();
|
||||
if ext != "fl" {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
files.push(FileInfo {
|
||||
name,
|
||||
path_uri,
|
||||
is_file,
|
||||
size: child.metadata().await?.len(),
|
||||
last_modified: child
|
||||
.metadata()
|
||||
.await?
|
||||
.modified()?
|
||||
.duration_since(std::time::SystemTime::UNIX_EPOCH)
|
||||
.expect("failed to get duration")
|
||||
.as_secs() as i64,
|
||||
progress,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(files)
|
||||
}
|
||||
197
components/rfs/src/server/website_handlers.rs
Normal file
197
components/rfs/src/server/website_handlers.rs
Normal file
@@ -0,0 +1,197 @@
|
||||
use crate::fungi::{meta, Reader};
|
||||
use aes_gcm::{
|
||||
aead::{Aead, KeyInit},
|
||||
Aes256Gcm, Nonce,
|
||||
};
|
||||
use anyhow::{Context, Result};
|
||||
use axum::{
|
||||
extract::{Path, State},
|
||||
http::StatusCode,
|
||||
response::IntoResponse,
|
||||
};
|
||||
use axum_macros::debug_handler;
|
||||
use mime_guess::from_path;
|
||||
use std::fs;
|
||||
use std::sync::Arc;
|
||||
use tempfile::NamedTempFile;
|
||||
// OpenApi is now only used in the main handlers.rs file
|
||||
|
||||
use crate::server::{config::AppState, db::DB, response::ResponseError};
|
||||
|
||||
// Website API endpoints are included in the main FlistApi in handlers.rs
|
||||
|
||||
/// Resolves a file path within a flist database to get file information
|
||||
async fn get_file_from_flist(flist_content: &[u8], file_path: &str) -> Result<Vec<meta::Block>> {
|
||||
// Create a temporary file
|
||||
let temp_file = NamedTempFile::new().context("failed to create temporary file")?;
|
||||
|
||||
// Write flist content to the temporary file
|
||||
fs::write(temp_file.path(), flist_content)
|
||||
.context("failed to write flist content to temporary file")?;
|
||||
|
||||
// Open the flist file as a database using the existing Reader
|
||||
let reader = Reader::new(temp_file.path().to_str().unwrap())
|
||||
.await
|
||||
.context("failed to open flist as a database")?;
|
||||
|
||||
// Find the root inode
|
||||
let root_inode: u64 = reader
|
||||
.root_inode()
|
||||
.await
|
||||
.context("failed to find root inode")?
|
||||
.ino;
|
||||
|
||||
// Split the path and traverse
|
||||
let mut current_inode = root_inode;
|
||||
let path_components: Vec<&str> = file_path.split('/').collect();
|
||||
|
||||
for (i, component) in path_components.iter().enumerate() {
|
||||
if component.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// If this is the last component, get file info
|
||||
if i == path_components.len() - 1 {
|
||||
let file_inode = match reader.lookup(current_inode, component).await {
|
||||
Ok(inode) => match inode {
|
||||
Some(inode) => inode.ino,
|
||||
None => {
|
||||
anyhow::bail!("file not found");
|
||||
}
|
||||
},
|
||||
Err(err) => return Err(anyhow::Error::new(err).context("failed to lookup inode")),
|
||||
};
|
||||
|
||||
// Get blocks
|
||||
let blocks: Vec<meta::Block> = reader
|
||||
.blocks(file_inode)
|
||||
.await
|
||||
.context("failed to get blocks")?;
|
||||
|
||||
return Ok(blocks);
|
||||
}
|
||||
|
||||
// Find the next inode in the path
|
||||
current_inode = match reader.lookup(current_inode, component).await {
|
||||
Ok(inode) => match inode {
|
||||
Some(inode) => inode.ino,
|
||||
None => {
|
||||
anyhow::bail!("directory not found");
|
||||
}
|
||||
},
|
||||
Err(err) => return Err(anyhow::Error::new(err).context("failed to lookup inode")),
|
||||
};
|
||||
}
|
||||
|
||||
anyhow::bail!("file not found")
|
||||
}
|
||||
|
||||
async fn decrypt_block(state: &Arc<AppState>, block: &meta::Block) -> Result<Vec<u8>> {
|
||||
let encrypted = match state.db.get_block(&hex::encode(block.id)).await {
|
||||
Ok(Some(block_content)) => block_content,
|
||||
Ok(None) => {
|
||||
anyhow::bail!("Block {:?} not found", block.id);
|
||||
}
|
||||
Err(err) => {
|
||||
anyhow::bail!("Failed to get block {:?}: {}", block.id, err);
|
||||
}
|
||||
};
|
||||
|
||||
let cipher =
|
||||
Aes256Gcm::new_from_slice(&block.key).map_err(|_| anyhow::anyhow!("key is invalid"))?;
|
||||
let nonce = Nonce::from_slice(&block.key[..12]);
|
||||
|
||||
let compressed = cipher
|
||||
.decrypt(nonce, encrypted.as_slice())
|
||||
.map_err(|_| anyhow::anyhow!("encryption error"))?;
|
||||
|
||||
let mut decoder = snap::raw::Decoder::new();
|
||||
let plain = decoder.decompress_vec(&compressed)?;
|
||||
|
||||
Ok(plain)
|
||||
}
|
||||
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/v1/website/{website_hash}/{path}",
|
||||
tag = "Website Serving",
|
||||
responses(
|
||||
(status = 200, description = "Website file served successfully", content_type = "application/octet-stream", body = [u8]),
|
||||
(status = 404, description = "File not found", body = ResponseError),
|
||||
(status = 500, description = "Internal server error", body = ResponseError),
|
||||
),
|
||||
params(
|
||||
("website_hash" = String, Path, description = "flist hash of the website directory"),
|
||||
("path" = String, Path, description = "Path to the file within the website directory, defaults to index.html if empty")
|
||||
)
|
||||
)]
|
||||
#[debug_handler]
|
||||
pub async fn serve_website_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path((website_hash, path)): Path<(String, String)>,
|
||||
) -> impl IntoResponse {
|
||||
// If no path is provided, default to index.html
|
||||
let file_path = if path.is_empty() {
|
||||
"index.html".to_string()
|
||||
} else {
|
||||
path
|
||||
};
|
||||
|
||||
// Get the flist using the website hash
|
||||
let flist = match state.db.get_file_by_hash(&website_hash).await {
|
||||
Ok(Some(file)) => file,
|
||||
Ok(None) => {
|
||||
return Err(ResponseError::NotFound(format!(
|
||||
"Flist with hash '{}' not found",
|
||||
website_hash
|
||||
)));
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("Failed to retrieve flist metadata: {}", err);
|
||||
return Err(ResponseError::InternalServerError);
|
||||
}
|
||||
};
|
||||
|
||||
// Resolve the file information from the flist content
|
||||
let file_blocks = match get_file_from_flist(&flist.file_content, &file_path).await {
|
||||
Ok(blocks) => blocks,
|
||||
Err(err) => {
|
||||
log::error!(
|
||||
"Failed to resolve file '{}' from flist '{}': {}",
|
||||
file_path,
|
||||
website_hash,
|
||||
err
|
||||
);
|
||||
return Err(ResponseError::NotFound(format!(
|
||||
"File {} not found in flist {}",
|
||||
file_path, website_hash
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
let mut file_content = Vec::new();
|
||||
for block in file_blocks {
|
||||
match decrypt_block(&state, &block).await {
|
||||
Ok(block_content) => file_content.extend(block_content),
|
||||
Err(err) => {
|
||||
log::error!(
|
||||
"Failed to decrypt block {:?} for file '{}' in website '{}': {}",
|
||||
block.id,
|
||||
file_path,
|
||||
website_hash,
|
||||
err
|
||||
);
|
||||
return Err(ResponseError::InternalServerError);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mime_type = from_path(&file_path).first_or_octet_stream();
|
||||
|
||||
Ok((
|
||||
StatusCode::OK,
|
||||
[(axum::http::header::CONTENT_TYPE, mime_type.to_string())],
|
||||
file_content,
|
||||
)
|
||||
.into_response())
|
||||
}
|
||||
Reference in New Issue
Block a user