This commit is contained in:
2025-09-13 18:50:03 +02:00
parent 820ef4bc49
commit 07f5b8d363
8 changed files with 1510 additions and 30 deletions

View File

@@ -0,0 +1,303 @@
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.core.redisclient
import freeflowuniverse.herolib.hero.herofs
import time
import os
// Advanced example of using HeroFS - the Hero Filesystem
// Demonstrates more complex operations including:
// - File operations (move, rename, metadata)
// - Symlinks
// - Binary data handling
// - Directory hierarchies
// - Searching and filtering
fn main() {
// Initialize the HeroFS factory
mut fs_factory := herofs.new()!
println('HeroFS factory initialized')
// Create a new filesystem
mut my_fs := fs_factory.fs.new(
name: 'project_workspace'
description: 'Project development workspace'
quota_bytes: 5 * 1024 * 1024 * 1024 // 5GB quota
)!
// Save the filesystem to get an ID
fs_id := fs_factory.fs.set(my_fs)!
println('Created filesystem: ${my_fs.name} with ID: ${fs_id}')
// Create root directory
mut root_dir := fs_factory.fs_dir.new(
name: 'root'
fs_id: fs_id
parent_id: 0 // Root has no parent
description: 'Root directory'
)!
// Save the root directory
root_dir_id := fs_factory.fs_dir.set(root_dir)!
println('Created root directory with ID: ${root_dir_id}')
// Update the filesystem with the root directory ID
my_fs.root_dir_id = root_dir_id
fs_factory.fs.set(my_fs)!
// Create a directory hierarchy
println('\nCreating directory hierarchy...')
// Main project directories
mut src_dir := fs_factory.fs_dir.new(
name: 'src'
fs_id: fs_id
parent_id: root_dir_id
description: 'Source code'
)!
src_dir_id := fs_factory.fs_dir.set(src_dir)!
mut docs_dir := fs_factory.fs_dir.new(
name: 'docs'
fs_id: fs_id
parent_id: root_dir_id
description: 'Documentation'
)!
docs_dir_id := fs_factory.fs_dir.set(docs_dir)!
mut assets_dir := fs_factory.fs_dir.new(
name: 'assets'
fs_id: fs_id
parent_id: root_dir_id
description: 'Project assets'
)!
assets_dir_id := fs_factory.fs_dir.set(assets_dir)!
// Subdirectories
mut images_dir := fs_factory.fs_dir.new(
name: 'images'
fs_id: fs_id
parent_id: assets_dir_id
description: 'Image assets'
)!
images_dir_id := fs_factory.fs_dir.set(images_dir)!
mut api_docs_dir := fs_factory.fs_dir.new(
name: 'api'
fs_id: fs_id
parent_id: docs_dir_id
description: 'API documentation'
)!
api_docs_dir_id := fs_factory.fs_dir.set(api_docs_dir)!
println('Directory hierarchy created successfully')
// Create some files with different content types
println('\nCreating various files...')
// Text file for source code
code_content := 'fn main() {\n println("Hello, HeroFS!")\n}\n'.bytes()
mut code_blob := fs_factory.fs_blob.new(
data: code_content
mime_type: 'text/plain'
name: 'main.v blob'
)!
code_blob_id := fs_factory.fs_blob.set(code_blob)!
mut code_file := fs_factory.fs_file.new(
name: 'main.v'
fs_id: fs_id
directories: [src_dir_id]
blobs: [code_blob_id]
mime_type: 'text/plain'
metadata: {
'language': 'vlang',
'version': '0.3.3'
}
)!
code_file_id := fs_factory.fs_file.set(code_file)!
// Markdown documentation file
docs_content := '# API Documentation\n\n## Endpoints\n\n- GET /api/v1/users\n- POST /api/v1/users\n'.bytes()
mut docs_blob := fs_factory.fs_blob.new(
data: docs_content
mime_type: 'text/markdown'
name: 'api.md blob'
)!
docs_blob_id := fs_factory.fs_blob.set(docs_blob)!
mut docs_file := fs_factory.fs_file.new(
name: 'api.md'
fs_id: fs_id
directories: [api_docs_dir_id]
blobs: [docs_blob_id]
mime_type: 'text/markdown'
)!
docs_file_id := fs_factory.fs_file.set(docs_file)!
// Create a binary file (sample image)
// For this example, we'll just create random bytes
mut image_data := []u8{len: 1024, init: u8(index % 256)}
mut image_blob := fs_factory.fs_blob.new(
data: image_data
mime_type: 'image/png'
name: 'logo.png blob'
)!
image_blob_id := fs_factory.fs_blob.set(image_blob)!
mut image_file := fs_factory.fs_file.new(
name: 'logo.png'
fs_id: fs_id
directories: [images_dir_id]
blobs: [image_blob_id]
mime_type: 'image/png'
metadata: {
'width': '200',
'height': '100',
'format': 'PNG'
}
)!
image_file_id := fs_factory.fs_file.set(image_file)!
println('Files created successfully')
// Create symlinks
println('\nCreating symlinks...')
// Symlink to the API docs from the root directory
mut api_symlink := fs_factory.fs_symlink.new(
name: 'api-docs'
fs_id: fs_id
parent_id: root_dir_id
target_id: api_docs_dir_id
target_type: .directory
description: 'Shortcut to API documentation'
)!
api_symlink_id := fs_factory.fs_symlink.set(api_symlink)!
// Symlink to the logo from the docs directory
mut logo_symlink := fs_factory.fs_symlink.new(
name: 'logo.png'
fs_id: fs_id
parent_id: docs_dir_id
target_id: image_file_id
target_type: .file
description: 'Shortcut to project logo'
)!
logo_symlink_id := fs_factory.fs_symlink.set(logo_symlink)!
println('Symlinks created successfully')
// Demonstrate file operations
println('\nDemonstrating file operations...')
// 1. Move a file to multiple directories (hard link-like behavior)
println('Moving logo.png to both images and docs directories...')
image_file = fs_factory.fs_file.get(image_file_id)!
image_file = fs_factory.fs_file.move(image_file_id, [images_dir_id, docs_dir_id])!
// 2. Rename a file
println('Renaming main.v to app.v...')
code_file = fs_factory.fs_file.rename(code_file_id, 'app.v')!
// 3. Update file metadata
println('Updating file metadata...')
fs_factory.fs_file.update_metadata(docs_file_id, 'status', 'draft')!
fs_factory.fs_file.update_metadata(docs_file_id, 'author', 'HeroFS Team')!
// 4. Update file access time when "reading" it
println('Updating file access time...')
fs_factory.fs_file.update_accessed(docs_file_id)!
// 5. Add additional content to a file (append a blob)
println('Appending content to API docs...')
additional_content := '\n## Authentication\n\nUse Bearer token for authentication.\n'.bytes()
mut additional_blob := fs_factory.fs_blob.new(
data: additional_content
mime_type: 'text/markdown'
name: 'api_append.md blob'
)!
additional_blob_id := fs_factory.fs_blob.set(additional_blob)!
fs_factory.fs_file.append_blob(docs_file_id, additional_blob_id)!
// Demonstrate directory operations
println('\nDemonstrating directory operations...')
// 1. Create a new directory and move it
mut temp_dir := fs_factory.fs_dir.new(
name: 'temp'
fs_id: fs_id
parent_id: root_dir_id
description: 'Temporary directory'
)!
temp_dir_id := fs_factory.fs_dir.set(temp_dir)!
println('Moving temp directory to be under docs...')
fs_factory.fs_dir.move(temp_dir_id, docs_dir_id)!
// 2. Rename a directory
println('Renaming temp directory to drafts...')
fs_factory.fs_dir.rename(temp_dir_id, 'drafts')!
// 3. Check if a directory has children
has_children := fs_factory.fs_dir.has_children(docs_dir_id)!
println('Does docs directory have children? ${has_children}')
// Demonstrate searching and filtering
println('\nDemonstrating searching and filtering...')
// 1. List all files in the filesystem
all_files := fs_factory.fs_file.list_by_filesystem(fs_id)!
println('All files in filesystem (${all_files.len}):')
for file in all_files {
println('- ${file.name} (ID: ${file.id})')
}
// 2. List files by MIME type
markdown_files := fs_factory.fs_file.list_by_mime_type('text/markdown')!
println('\nMarkdown files (${markdown_files.len}):')
for file in markdown_files {
println('- ${file.name} (ID: ${file.id})')
}
// 3. List all symlinks
all_symlinks := fs_factory.fs_symlink.list_by_filesystem(fs_id)!
println('\nAll symlinks (${all_symlinks.len}):')
for symlink in all_symlinks {
target_type_str := if symlink.target_type == .file { 'file' } else { 'directory' }
println('- ${symlink.name} -> ${symlink.target_id} (${target_type_str})')
}
// 4. Check for broken symlinks
println('\nChecking for broken symlinks:')
for symlink in all_symlinks {
is_broken := fs_factory.fs_symlink.is_broken(symlink.id)!
println('- ${symlink.name}: ${if is_broken { "BROKEN" } else { "OK" }}')
}
// Demonstrate file content retrieval
println('\nDemonstrating file content retrieval:')
// Get the updated API docs file and print its content
docs_file = fs_factory.fs_file.get(docs_file_id)!
println('Content of ${docs_file.name}:')
mut full_content := ''
for blob_id in docs_file.blobs {
blob := fs_factory.fs_blob.get(blob_id)!
full_content += blob.data.bytestr()
}
println('---BEGIN CONTENT---')
println(full_content)
println('---END CONTENT---')
// Print filesystem usage
println('\nFilesystem usage:')
my_fs = fs_factory.fs.get(fs_id)!
println('Used: ${my_fs.used_bytes} bytes')
println('Quota: ${my_fs.quota_bytes} bytes')
println('Available: ${my_fs.quota_bytes - my_fs.used_bytes} bytes')
println('\nHeroFS advanced example completed successfully!')
}

View File

@@ -0,0 +1,109 @@
#!/usr/bin/env -S v -n -w -cg -gc none -cc tcc -d use_openssl -enable-globals run
import freeflowuniverse.herolib.core.redisclient
import freeflowuniverse.herolib.hero.herofs
// Basic example of using HeroFS - the Hero Filesystem
// Demonstrates creating a filesystem, directories, and files
fn main() {
// Initialize the HeroFS factory
mut fs_factory := herofs.new()!
println('HeroFS factory initialized')
// Create a new filesystem
mut my_fs := fs_factory.fs.new(
name: 'my_documents'
description: 'Personal documents filesystem'
quota_bytes: 1024 * 1024 * 1024 // 1GB quota
)!
// Save the filesystem to get an ID
fs_id := fs_factory.fs.set(my_fs)!
println('Created filesystem: ${my_fs.name} with ID: ${fs_id}')
// Create root directory
mut root_dir := fs_factory.fs_dir.new(
name: 'root'
fs_id: fs_id
parent_id: 0 // Root has no parent
description: 'Root directory'
)!
// Save the root directory
root_dir_id := fs_factory.fs_dir.set(root_dir)!
println('Created root directory with ID: ${root_dir_id}')
// Update the filesystem with the root directory ID
my_fs.root_dir_id = root_dir_id
fs_factory.fs.set(my_fs)!
// Create some subdirectories
mut docs_dir := fs_factory.fs_dir.new(
name: 'documents'
fs_id: fs_id
parent_id: root_dir_id
description: 'Documents directory'
)!
mut pics_dir := fs_factory.fs_dir.new(
name: 'pictures'
fs_id: fs_id
parent_id: root_dir_id
description: 'Pictures directory'
)!
// Save the subdirectories
docs_dir_id := fs_factory.fs_dir.set(docs_dir)!
pics_dir_id := fs_factory.fs_dir.set(pics_dir)!
println('Created documents directory with ID: ${docs_dir_id}')
println('Created pictures directory with ID: ${pics_dir_id}')
// Create a text file blob
text_content := 'Hello, world! This is a test file in HeroFS.'.bytes()
mut text_blob := fs_factory.fs_blob.new(
data: text_content
mime_type: 'text/plain'
name: 'hello.txt blob'
)!
// Save the blob
blob_id := fs_factory.fs_blob.set(text_blob)!
println('Created text blob with ID: ${blob_id}')
// Create a file referencing the blob
mut text_file := fs_factory.fs_file.new(
name: 'hello.txt'
fs_id: fs_id
directories: [docs_dir_id]
blobs: [blob_id]
mime_type: 'text/plain'
)!
// Save the file
file_id := fs_factory.fs_file.set(text_file)!
println('Created text file with ID: ${file_id}')
// List all directories in the filesystem
dirs := fs_factory.fs_dir.list_by_filesystem(fs_id)!
println('\nAll directories in filesystem:')
for dir in dirs {
println('- ${dir.name} (ID: ${dir.id})')
}
// List all files in the documents directory
files := fs_factory.fs_file.list_by_directory(docs_dir_id)!
println('\nFiles in documents directory:')
for file in files {
println('- ${file.name} (ID: ${file.id}, Size: ${file.size_bytes} bytes)')
// Get the file's content from its blobs
if file.blobs.len > 0 {
blob := fs_factory.fs_blob.get(file.blobs[0])!
content := blob.data.bytestr()
println(' Content: "${content}"')
}
}
println('\nHeroFS basic example completed successfully!')
}

33
lib/hero/herofs/factory.v Normal file
View File

@@ -0,0 +1,33 @@
module herofs
import freeflowuniverse.herolib.hero.db
pub struct FsFactory {
pub mut:
fs DBFs
fs_blob DBFsBlob
fs_dir DBFsDir
fs_file DBFsFile
fs_symlink DBFsSymlink
}
pub fn new() !FsFactory {
mut mydb := db.new()!
return FsFactory{
fs: DBFs{
db: &mydb
},
fs_blob: DBFsBlob{
db: &mydb
},
fs_dir: DBFsDir{
db: &mydb
},
fs_file: DBFsFile{
db: &mydb
},
fs_symlink: DBFsSymlink{
db: &mydb
}
}
}

View File

@@ -3,10 +3,14 @@ module herofs
import time
import crypto.blake3
import json
import freeflowuniverse.herolib.data.encoder
import freeflowuniverse.herolib.data.ourtime
import freeflowuniverse.herolib.hero.db
// Fs represents a filesystem, is the top level container for files and directories and symlinks, blobs are used over filesystems
@[heap]
pub struct Fs {
db.Base
pub mut:
name string
group_id u32 // Associated group for permissions
@@ -15,5 +19,120 @@ pub mut:
used_bytes u64 // Current usage in bytes
}
// We only keep the root directory ID here, other directories can be found by querying parent_id in FsDir
pub struct DBFs {
pub mut:
db &db.DB @[skip; str: skip]
}
pub fn (self Fs) type_name() string {
return 'fs'
}
pub fn (self Fs) dump(mut e &encoder.Encoder) ! {
e.add_string(self.name)
e.add_u32(self.group_id)
e.add_u32(self.root_dir_id)
e.add_u64(self.quota_bytes)
e.add_u64(self.used_bytes)
}
fn (mut self DBFs) load(mut o Fs, mut e &encoder.Decoder) ! {
o.name = e.get_string()!
o.group_id = e.get_u32()!
o.root_dir_id = e.get_u32()!
o.quota_bytes = e.get_u64()!
o.used_bytes = e.get_u64()!
}
@[params]
pub struct FsArg {
pub mut:
name string @[required]
description string
group_id u32
root_dir_id u32
quota_bytes u64
used_bytes u64
tags []string
comments []db.CommentArg
}
// get new filesystem, not from the DB
pub fn (mut self DBFs) new(args FsArg) !Fs {
mut o := Fs{
name: args.name
group_id: args.group_id
root_dir_id: args.root_dir_id
quota_bytes: args.quota_bytes
used_bytes: args.used_bytes
}
// Set base fields
o.description = args.description
o.tags = self.db.tags_get(args.tags)!
o.comments = self.db.comments_get(args.comments)!
o.updated_at = ourtime.now().unix()
return o
}
pub fn (mut self DBFs) set(o Fs) !u32 {
return self.db.set[Fs](o)!
}
pub fn (mut self DBFs) delete(id u32) ! {
self.db.delete[Fs](id)!
}
pub fn (mut self DBFs) exist(id u32) !bool {
return self.db.exists[Fs](id)!
}
pub fn (mut self DBFs) get(id u32) !Fs {
mut o, data := self.db.get_data[Fs](id)!
mut e_decoder := encoder.decoder_new(data)
self.load(mut o, mut e_decoder)!
return o
}
pub fn (mut self DBFs) list() ![]Fs {
return self.db.list[Fs]()!.map(self.get(it)!)
}
// Additional hset operations for efficient lookups
pub fn (mut self DBFs) get_by_name(name string) !Fs {
// We'll store a mapping of name -> id in a separate hash
id_str := self.db.redis.hget('fs:names', name)!
if id_str == '' {
return error('Filesystem with name "${name}" not found')
}
return self.get(id_str.u32())!
}
// Custom method to increase used_bytes
pub fn (mut self DBFs) increase_usage(id u32, bytes u64) !u64 {
mut fs := self.get(id)!
fs.used_bytes += bytes
self.set(fs)!
return fs.used_bytes
}
// Custom method to decrease used_bytes
pub fn (mut self DBFs) decrease_usage(id u32, bytes u64) !u64 {
mut fs := self.get(id)!
if bytes > fs.used_bytes {
fs.used_bytes = 0
} else {
fs.used_bytes -= bytes
}
self.set(fs)!
return fs.used_bytes
}
// Check if quota is exceeded
pub fn (mut self DBFs) check_quota(id u32, additional_bytes u64) !bool {
fs := self.get(id)!
return (fs.used_bytes + additional_bytes) <= fs.quota_bytes
}

View File

@@ -2,42 +2,151 @@ module herofs
import time
import crypto.blake3
import freeflowuniverse.herolib.data.encoder
import freeflowuniverse.herolib.data.ourtime
import freeflowuniverse.herolib.hero.db
// FsBlob represents binary data up to 1MB
@[heap]
pub struct FsBlob {
db.Base
pub mut:
hash string // blake192 hash of content
data []u8 // Binary data (max 1MB)
size_bytes int // Size in bytes
created_at i64
mime_type string //TODO: is there not more formal way how to store mime types? enum? or list is too long?
encoding string //TODO: make enum
mime_type string // MIME type
encoding string // Encoding type
}
pub fn (mut b FsBlob) calculate_hash() {
hash := blake3.sum256(b.data)
b.hash = hash.hex()[..48] // blake192 = first 192 bits = 48 hex chars
pub struct DBFsBlob {
pub mut:
db &db.DB @[skip; str: skip]
}
pub fn new_fs_blob(data []u8) !FsBlob {
if data.len > 1024 * 1024 { // 1MB limit
pub fn (self FsBlob) type_name() string {
return 'fs_blob'
}
pub fn (self FsBlob) dump(mut e &encoder.Encoder) ! {
e.add_string(self.hash)
e.add_list_u8(self.data)
e.add_int(self.size_bytes)
e.add_i64(self.created_at)
e.add_string(self.mime_type)
e.add_string(self.encoding)
}
fn (mut self DBFsBlob) load(mut o FsBlob, mut e &encoder.Decoder) ! {
o.hash = e.get_string()!
o.data = e.get_list_u8()!
o.size_bytes = e.get_int()!
o.created_at = e.get_i64()!
o.mime_type = e.get_string()!
o.encoding = e.get_string()!
}
@[params]
pub struct FsBlobArg {
pub mut:
data []u8 @[required]
mime_type string
encoding string
name string
description string
tags []string
comments []db.CommentArg
}
pub fn (mut blob FsBlob) calculate_hash() {
hash := blake3.sum256(blob.data)
blob.hash = hash.hex()[..48] // blake192 = first 192 bits = 48 hex chars
}
// get new blob, not from the DB
pub fn (mut self DBFsBlob) new(args FsBlobArg) !FsBlob {
if args.data.len > 1024 * 1024 { // 1MB limit
return error('Blob size exceeds 1MB limit')
}
mut blob := FsBlob{
data: data
size_bytes: data.len
created_at: time.now().unix()
encoding: 'none'
}
blob.calculate_id()
return blob
mut o := FsBlob{
data: args.data
size_bytes: args.data.len
created_at: ourtime.now().unix()
mime_type: args.mime_type
encoding: if args.encoding == '' { 'none' } else { args.encoding }
}
pub fn (b FsBlob) verify_integrity() bool {
hash := blake3.sum256(b.data)
return hash.hex()[..48] == b.id
// Calculate hash
o.calculate_hash()
// Set base fields
o.name = args.name
o.description = args.description
o.tags = self.db.tags_get(args.tags)!
o.comments = self.db.comments_get(args.comments)!
o.updated_at = ourtime.now().unix()
return o
}
//TODO: we will need other hset so we can go back from hash to id (which is u32)
pub fn (mut self DBFsBlob) set(o FsBlob) !u32 {
// Check if a blob with this hash already exists
hash_id := self.db.redis.hget('fsblob:hashes', o.hash)!
if hash_id != '' {
// Blob already exists, return existing ID
return hash_id.u32()
}
// Use db set function which now returns the ID
id := self.db.set[FsBlob](o)!
// Store the hash -> id mapping for lookup
self.db.redis.hset('fsblob:hashes', o.hash, id.str())!
return id
}
pub fn (mut self DBFsBlob) delete(id u32) ! {
// Get the blob to retrieve its hash
mut blob := self.get(id)!
// Remove hash -> id mapping
self.db.redis.hdel('fsblob:hashes', blob.hash)!
// Delete the blob
self.db.delete[FsBlob](id)!
}
pub fn (mut self DBFsBlob) exist(id u32) !bool {
return self.db.exists[FsBlob](id)!
}
pub fn (mut self DBFsBlob) get(id u32) !FsBlob {
mut o, data := self.db.get_data[FsBlob](id)!
mut e_decoder := encoder.decoder_new(data)
self.load(mut o, mut e_decoder)!
return o
}
pub fn (mut self DBFsBlob) list() ![]FsBlob {
return self.db.list[FsBlob]()!.map(self.get(it)!)
}
pub fn (mut self DBFsBlob) get_by_hash(hash string) !FsBlob {
id_str := self.db.redis.hget('fsblob:hashes', hash)!
if id_str == '' {
return error('Blob with hash "${hash}" not found')
}
return self.get(id_str.u32())!
}
pub fn (mut self DBFsBlob) exists_by_hash(hash string) !bool {
id_str := self.db.redis.hget('fsblob:hashes', hash)!
return id_str != ''
}
pub fn (blob FsBlob) verify_integrity() bool {
hash := blake3.sum256(blob.data)
return hash.hex()[..48] == blob.hash
}

View File

@@ -3,14 +3,205 @@ module herofs
import time
import crypto.blake3
import json
import freeflowuniverse.herolib.data.encoder
import freeflowuniverse.herolib.data.ourtime
import freeflowuniverse.herolib.hero.db
// FsDir represents a directory in a filesystem
@[heap]
pub struct FsDir {
db.Base
pub mut:
name string
fs_id u32 // Associated filesystem
parent_id u32 // Parent directory ID (empty for root)
parent_id u32 // Parent directory ID (0 for root)
}
//we only keep the parents, not the children, as children can be found by doing a query on parent_id, we will need some smart hsets to make this fast enough and efficient
pub struct DBFsDir {
pub mut:
db &db.DB @[skip; str: skip]
}
pub fn (self FsDir) type_name() string {
return 'fs_dir'
}
pub fn (self FsDir) dump(mut e &encoder.Encoder) ! {
e.add_string(self.name)
e.add_u32(self.fs_id)
e.add_u32(self.parent_id)
}
fn (mut self DBFsDir) load(mut o FsDir, mut e &encoder.Decoder) ! {
o.name = e.get_string()!
o.fs_id = e.get_u32()!
o.parent_id = e.get_u32()!
}
@[params]
pub struct FsDirArg {
pub mut:
name string @[required]
description string
fs_id u32 @[required]
parent_id u32
tags []string
comments []db.CommentArg
}
// get new directory, not from the DB
pub fn (mut self DBFsDir) new(args FsDirArg) !FsDir {
mut o := FsDir{
name: args.name,
fs_id: args.fs_id,
parent_id: args.parent_id
}
// Set base fields
o.description = args.description
o.tags = self.db.tags_get(args.tags)!
o.comments = self.db.comments_get(args.comments)!
o.updated_at = ourtime.now().unix()
return o
}
pub fn (mut self DBFsDir) set(o FsDir) !u32 {
id := self.db.set[FsDir](o)!
// Store directory in filesystem's directory index
path_key := '${o.fs_id}:${o.parent_id}:${o.name}'
self.db.redis.hset('fsdir:paths', path_key, id.str())!
// Store in filesystem's directory list
self.db.redis.sadd('fsdir:fs:${o.fs_id}', id.str())!
// Store in parent's children list
if o.parent_id > 0 {
self.db.redis.sadd('fsdir:children:${o.parent_id}', id.str())!
}
return id
}
pub fn (mut self DBFsDir) delete(id u32) ! {
// Get the directory info before deleting
dir := self.get(id)!
// Check if directory has children
children := self.db.redis.smembers('fsdir:children:${id}')!
if children.len > 0 {
return error('Cannot delete directory ${dir.name} (ID: ${id}) because it has ${children.len} children')
}
// Remove from path index
path_key := '${dir.fs_id}:${dir.parent_id}:${dir.name}'
self.db.redis.hdel('fsdir:paths', path_key)!
// Remove from filesystem's directory list
self.db.redis.srem('fsdir:fs:${dir.fs_id}', id.str())!
// Remove from parent's children list
if dir.parent_id > 0 {
self.db.redis.srem('fsdir:children:${dir.parent_id}', id.str())!
}
// Delete the directory itself
self.db.delete[FsDir](id)!
}
pub fn (mut self DBFsDir) exist(id u32) !bool {
return self.db.exists[FsDir](id)!
}
pub fn (mut self DBFsDir) get(id u32) !FsDir {
mut o, data := self.db.get_data[FsDir](id)!
mut e_decoder := encoder.decoder_new(data)
self.load(mut o, mut e_decoder)!
return o
}
pub fn (mut self DBFsDir) list() ![]FsDir {
return self.db.list[FsDir]()!.map(self.get(it)!)
}
// Get directory by path components
pub fn (mut self DBFsDir) get_by_path(fs_id u32, parent_id u32, name string) !FsDir {
path_key := '${fs_id}:${parent_id}:${name}'
id_str := self.db.redis.hget('fsdir:paths', path_key)!
if id_str == '' {
return error('Directory "${name}" not found in filesystem ${fs_id} under parent ${parent_id}')
}
return self.get(id_str.u32())!
}
// Get all directories in a filesystem
pub fn (mut self DBFsDir) list_by_filesystem(fs_id u32) ![]FsDir {
dir_ids := self.db.redis.smembers('fsdir:fs:${fs_id}')!
mut dirs := []FsDir{}
for id_str in dir_ids {
dirs << self.get(id_str.u32())!
}
return dirs
}
// Get children of a directory
pub fn (mut self DBFsDir) list_children(dir_id u32) ![]FsDir {
child_ids := self.db.redis.smembers('fsdir:children:${dir_id}')!
mut dirs := []FsDir{}
for id_str in child_ids {
dirs << self.get(id_str.u32())!
}
return dirs
}
// Check if a directory has children
pub fn (mut self DBFsDir) has_children(dir_id u32) !bool {
count := self.db.redis.scard('fsdir:children:${dir_id}')!
return count > 0
}
// Rename a directory
pub fn (mut self DBFsDir) rename(id u32, new_name string) !u32 {
mut dir := self.get(id)!
// Remove old path index
old_path_key := '${dir.fs_id}:${dir.parent_id}:${dir.name}'
self.db.redis.hdel('fsdir:paths', old_path_key)!
// Update name
dir.name = new_name
// Save with new name
return self.set(dir)!
}
// Move a directory to a new parent
pub fn (mut self DBFsDir) move(id u32, new_parent_id u32) !u32 {
mut dir := self.get(id)!
// Check that new parent exists and is in the same filesystem
if new_parent_id > 0 {
parent := self.get(new_parent_id)!
if parent.fs_id != dir.fs_id {
return error('Cannot move directory across filesystems')
}
}
// Remove old path index
old_path_key := '${dir.fs_id}:${dir.parent_id}:${dir.name}'
self.db.redis.hdel('fsdir:paths', old_path_key)!
// Remove from old parent's children list
if dir.parent_id > 0 {
self.db.redis.srem('fsdir:children:${dir.parent_id}', id.str())!
}
// Update parent
dir.parent_id = new_parent_id
// Save with new parent
return self.set(dir)!
}

View File

@@ -3,15 +3,19 @@ module herofs
import time
import crypto.blake3
import json
import freeflowuniverse.herolib.data.encoder
import freeflowuniverse.herolib.data.ourtime
import freeflowuniverse.herolib.hero.db
// FsFile represents a file in a filesystem
@[heap]
pub struct FsFile {
db.Base
pub mut:
name string
fs_id string // Associated filesystem
fs_id u32 // Associated filesystem
directories []u32 // Directory IDs where this file exists, means file can be part of multiple directories (like hard links in Linux)
blobs []u32 // Blake192 IDs of file content blobs (we reference them with u32 IDs for efficiency)
blobs []u32 // IDs of file content blobs
size_bytes u64
mime_type string // e.g., "image/png"
checksum string // e.g., SHA256 checksum of the file
@@ -19,3 +23,347 @@ pub mut:
metadata map[string]string // Custom metadata
}
pub struct DBFsFile {
pub mut:
db &db.DB @[skip; str: skip]
}
pub fn (self FsFile) type_name() string {
return 'fs_file'
}
pub fn (self FsFile) dump(mut e &encoder.Encoder) ! {
e.add_string(self.name)
e.add_u32(self.fs_id)
// Handle directories
e.add_u16(u16(self.directories.len))
for dir_id in self.directories {
e.add_u32(dir_id)
}
// Handle blobs
e.add_u16(u16(self.blobs.len))
for blob_id in self.blobs {
e.add_u32(blob_id)
}
e.add_u64(self.size_bytes)
e.add_string(self.mime_type)
e.add_string(self.checksum)
e.add_i64(self.accessed_at)
// Handle metadata map
e.add_u16(u16(self.metadata.len))
for key, value in self.metadata {
e.add_string(key)
e.add_string(value)
}
}
fn (mut self DBFsFile) load(mut o FsFile, mut e &encoder.Decoder) ! {
o.name = e.get_string()!
o.fs_id = e.get_u32()!
// Load directories
dirs_count := e.get_u16()!
o.directories = []u32{cap: int(dirs_count)}
for _ in 0 .. dirs_count {
o.directories << e.get_u32()!
}
// Load blobs
blobs_count := e.get_u16()!
o.blobs = []u32{cap: int(blobs_count)}
for _ in 0 .. blobs_count {
o.blobs << e.get_u32()!
}
o.size_bytes = e.get_u64()!
o.mime_type = e.get_string()!
o.checksum = e.get_string()!
o.accessed_at = e.get_i64()!
// Load metadata map
metadata_count := e.get_u16()!
o.metadata = map[string]string{}
for _ in 0 .. metadata_count {
key := e.get_string()!
value := e.get_string()!
o.metadata[key] = value
}
}
@[params]
pub struct FsFileArg {
pub mut:
name string @[required]
description string
fs_id u32 @[required]
directories []u32 @[required]
blobs []u32
size_bytes u64
mime_type string
checksum string
metadata map[string]string
tags []string
comments []db.CommentArg
}
// get new file, not from the DB
pub fn (mut self DBFsFile) new(args FsFileArg) !FsFile {
// Calculate size based on blobs if not provided
mut size := args.size_bytes
if size == 0 && args.blobs.len > 0 {
// We'll need to sum the sizes of all blobs
for blob_id in args.blobs {
blob_exists := self.db.exists[FsBlob](blob_id)!
if !blob_exists {
return error('Blob with ID ${blob_id} does not exist')
}
// Get blob data
mut blob_obj, blob_data := self.db.get_data[FsBlob](blob_id)!
mut e_decoder := encoder.decoder_new(blob_data)
// Skip hash
e_decoder.get_string()!
// Skip data, get size directly
e_decoder.get_list_u8()!
size += u64(e_decoder.get_int()!)
}
}
mut o := FsFile{
name: args.name,
fs_id: args.fs_id,
directories: args.directories,
blobs: args.blobs,
size_bytes: size,
mime_type: args.mime_type,
checksum: args.checksum,
accessed_at: ourtime.now().unix(),
metadata: args.metadata
}
// Set base fields
o.description = args.description
o.tags = self.db.tags_get(args.tags)!
o.comments = self.db.comments_get(args.comments)!
o.updated_at = ourtime.now().unix()
return o
}
pub fn (mut self DBFsFile) set(o FsFile) !u32 {
// Check that directories exist
for dir_id in o.directories {
dir_exists := self.db.exists[FsDir](dir_id)!
if !dir_exists {
return error('Directory with ID ${dir_id} does not exist')
}
}
// Check that blobs exist
for blob_id in o.blobs {
blob_exists := self.db.exists[FsBlob](blob_id)!
if !blob_exists {
return error('Blob with ID ${blob_id} does not exist')
}
}
id := self.db.set[FsFile](o)!
// Store file in each directory's file index
for dir_id in o.directories {
// Store by name in each directory
path_key := '${dir_id}:${o.name}'
self.db.redis.hset('fsfile:paths', path_key, id.str())!
// Add to directory's file list
self.db.redis.sadd('fsfile:dir:${dir_id}', id.str())!
}
// Store in filesystem's file list
self.db.redis.sadd('fsfile:fs:${o.fs_id}', id.str())!
// Store by mimetype
if o.mime_type != '' {
self.db.redis.sadd('fsfile:mime:${o.mime_type}', id.str())!
}
return id
}
pub fn (mut self DBFsFile) delete(id u32) ! {
// Get the file info before deleting
file := self.get(id)!
// Remove from each directory's file index
for dir_id in file.directories {
// Remove from path index
path_key := '${dir_id}:${file.name}'
self.db.redis.hdel('fsfile:paths', path_key)!
// Remove from directory's file list
self.db.redis.srem('fsfile:dir:${dir_id}', id.str())!
}
// Remove from filesystem's file list
self.db.redis.srem('fsfile:fs:${file.fs_id}', id.str())!
// Remove from mimetype index
if file.mime_type != '' {
self.db.redis.srem('fsfile:mime:${file.mime_type}', id.str())!
}
// Delete the file itself
self.db.delete[FsFile](id)!
}
pub fn (mut self DBFsFile) exist(id u32) !bool {
return self.db.exists[FsFile](id)!
}
pub fn (mut self DBFsFile) get(id u32) !FsFile {
mut o, data := self.db.get_data[FsFile](id)!
mut e_decoder := encoder.decoder_new(data)
self.load(mut o, mut e_decoder)!
return o
}
pub fn (mut self DBFsFile) list() ![]FsFile {
return self.db.list[FsFile]()!.map(self.get(it)!)
}
// Get file by path in a specific directory
pub fn (mut self DBFsFile) get_by_path(dir_id u32, name string) !FsFile {
path_key := '${dir_id}:${name}'
id_str := self.db.redis.hget('fsfile:paths', path_key)!
if id_str == '' {
return error('File "${name}" not found in directory ${dir_id}')
}
return self.get(id_str.u32())!
}
// List files in a directory
pub fn (mut self DBFsFile) list_by_directory(dir_id u32) ![]FsFile {
file_ids := self.db.redis.smembers('fsfile:dir:${dir_id}')!
mut files := []FsFile{}
for id_str in file_ids {
files << self.get(id_str.u32())!
}
return files
}
// List files in a filesystem
pub fn (mut self DBFsFile) list_by_filesystem(fs_id u32) ![]FsFile {
file_ids := self.db.redis.smembers('fsfile:fs:${fs_id}')!
mut files := []FsFile{}
for id_str in file_ids {
files << self.get(id_str.u32())!
}
return files
}
// List files by mime type
pub fn (mut self DBFsFile) list_by_mime_type(mime_type string) ![]FsFile {
file_ids := self.db.redis.smembers('fsfile:mime:${mime_type}')!
mut files := []FsFile{}
for id_str in file_ids {
files << self.get(id_str.u32())!
}
return files
}
// Update file with a new blob (append)
pub fn (mut self DBFsFile) append_blob(id u32, blob_id u32) !u32 {
// Check blob exists
blob_exists := self.db.exists[FsBlob](blob_id)!
if !blob_exists {
return error('Blob with ID ${blob_id} does not exist')
}
// Get blob size
mut blob_obj, blob_data := self.db.get_data[FsBlob](blob_id)!
mut e_decoder := encoder.decoder_new(blob_data)
// Skip hash
e_decoder.get_string()!
// Skip data, get size directly
e_decoder.get_list_u8()!
blob_size := e_decoder.get_int()!
// Get file
mut file := self.get(id)!
// Add blob if not already in the list
if blob_id !in file.blobs {
file.blobs << blob_id
file.size_bytes += u64(blob_size)
file.updated_at = ourtime.now().unix()
}
// Save file
return self.set(file)!
}
// Update file accessed timestamp
pub fn (mut self DBFsFile) update_accessed(id u32) !u32 {
mut file := self.get(id)!
file.accessed_at = ourtime.now().unix()
return self.set(file)!
}
// Update file metadata
pub fn (mut self DBFsFile) update_metadata(id u32, key string, value string) !u32 {
mut file := self.get(id)!
file.metadata[key] = value
file.updated_at = ourtime.now().unix()
return self.set(file)!
}
// Rename a file
pub fn (mut self DBFsFile) rename(id u32, new_name string) !u32 {
mut file := self.get(id)!
// Remove old path indexes
for dir_id in file.directories {
old_path_key := '${dir_id}:${file.name}'
self.db.redis.hdel('fsfile:paths', old_path_key)!
}
// Update name
file.name = new_name
// Save with new name
return self.set(file)!
}
// Move file to different directories
pub fn (mut self DBFsFile) move(id u32, new_directories []u32) !u32 {
mut file := self.get(id)!
// Check that all new directories exist
for dir_id in new_directories {
dir_exists := self.db.exists[FsDir](dir_id)!
if !dir_exists {
return error('Directory with ID ${dir_id} does not exist')
}
}
// Remove from old directories
for dir_id in file.directories {
path_key := '${dir_id}:${file.name}'
self.db.redis.hdel('fsfile:paths', path_key)!
self.db.redis.srem('fsfile:dir:${dir_id}', id.str())!
}
// Update directories
file.directories = new_directories
// Save with new directories
return self.set(file)!
}

View File

@@ -3,10 +3,14 @@ module herofs
import time
import crypto.blake3
import json
import freeflowuniverse.herolib.data.encoder
import freeflowuniverse.herolib.data.ourtime
import freeflowuniverse.herolib.hero.db
// FsSymlink represents a symbolic link in a filesystem
@[heap]
pub struct FsSymlink {
db.Base
pub mut:
name string
fs_id u32 // Associated filesystem
@@ -20,3 +24,267 @@ pub enum SymlinkTargetType {
directory
}
pub struct DBFsSymlink {
pub mut:
db &db.DB @[skip; str: skip]
}
pub fn (self FsSymlink) type_name() string {
return 'fs_symlink'
}
pub fn (self FsSymlink) dump(mut e &encoder.Encoder) ! {
e.add_string(self.name)
e.add_u32(self.fs_id)
e.add_u32(self.parent_id)
e.add_u32(self.target_id)
e.add_u8(u8(self.target_type))
}
fn (mut self DBFsSymlink) load(mut o FsSymlink, mut e &encoder.Decoder) ! {
o.name = e.get_string()!
o.fs_id = e.get_u32()!
o.parent_id = e.get_u32()!
o.target_id = e.get_u32()!
o.target_type = unsafe { SymlinkTargetType(e.get_u8()!) }
}
@[params]
pub struct FsSymlinkArg {
pub mut:
name string @[required]
description string
fs_id u32 @[required]
parent_id u32 @[required]
target_id u32 @[required]
target_type SymlinkTargetType @[required]
tags []string
comments []db.CommentArg
}
// get new symlink, not from the DB
pub fn (mut self DBFsSymlink) new(args FsSymlinkArg) !FsSymlink {
mut o := FsSymlink{
name: args.name,
fs_id: args.fs_id,
parent_id: args.parent_id,
target_id: args.target_id,
target_type: args.target_type
}
// Set base fields
o.description = args.description
o.tags = self.db.tags_get(args.tags)!
o.comments = self.db.comments_get(args.comments)!
o.updated_at = ourtime.now().unix()
return o
}
pub fn (mut self DBFsSymlink) set(o FsSymlink) !u32 {
// Check parent directory exists
if o.parent_id > 0 {
parent_exists := self.db.exists[FsDir](o.parent_id)!
if !parent_exists {
return error('Parent directory with ID ${o.parent_id} does not exist')
}
}
// Check target exists based on target type
if o.target_type == .file {
target_exists := self.db.exists[FsFile](o.target_id)!
if !target_exists {
return error('Target file with ID ${o.target_id} does not exist')
}
} else if o.target_type == .directory {
target_exists := self.db.exists[FsDir](o.target_id)!
if !target_exists {
return error('Target directory with ID ${o.target_id} does not exist')
}
}
id := self.db.set[FsSymlink](o)!
// Store symlink in parent directory's symlink index
path_key := '${o.parent_id}:${o.name}'
self.db.redis.hset('fssymlink:paths', path_key, id.str())!
// Add to parent's symlinks list
self.db.redis.sadd('fssymlink:parent:${o.parent_id}', id.str())!
// Store in filesystem's symlink list
self.db.redis.sadd('fssymlink:fs:${o.fs_id}', id.str())!
// Store in target's referrers list
target_key := '${o.target_type}:${o.target_id}'
self.db.redis.sadd('fssymlink:target:${target_key}', id.str())!
return id
}
pub fn (mut self DBFsSymlink) delete(id u32) ! {
// Get the symlink info before deleting
symlink := self.get(id)!
// Remove from path index
path_key := '${symlink.parent_id}:${symlink.name}'
self.db.redis.hdel('fssymlink:paths', path_key)!
// Remove from parent's symlinks list
self.db.redis.srem('fssymlink:parent:${symlink.parent_id}', id.str())!
// Remove from filesystem's symlink list
self.db.redis.srem('fssymlink:fs:${symlink.fs_id}', id.str())!
// Remove from target's referrers list
target_key := '${symlink.target_type}:${symlink.target_id}'
self.db.redis.srem('fssymlink:target:${target_key}', id.str())!
// Delete the symlink itself
self.db.delete[FsSymlink](id)!
}
pub fn (mut self DBFsSymlink) exist(id u32) !bool {
return self.db.exists[FsSymlink](id)!
}
pub fn (mut self DBFsSymlink) get(id u32) !FsSymlink {
mut o, data := self.db.get_data[FsSymlink](id)!
mut e_decoder := encoder.decoder_new(data)
self.load(mut o, mut e_decoder)!
return o
}
pub fn (mut self DBFsSymlink) list() ![]FsSymlink {
return self.db.list[FsSymlink]()!.map(self.get(it)!)
}
// Get symlink by path in a parent directory
pub fn (mut self DBFsSymlink) get_by_path(parent_id u32, name string) !FsSymlink {
path_key := '${parent_id}:${name}'
id_str := self.db.redis.hget('fssymlink:paths', path_key)!
if id_str == '' {
return error('Symlink "${name}" not found in parent directory ${parent_id}')
}
return self.get(id_str.u32())!
}
// List symlinks in a parent directory
pub fn (mut self DBFsSymlink) list_by_parent(parent_id u32) ![]FsSymlink {
symlink_ids := self.db.redis.smembers('fssymlink:parent:${parent_id}')!
mut symlinks := []FsSymlink{}
for id_str in symlink_ids {
symlinks << self.get(id_str.u32())!
}
return symlinks
}
// List symlinks in a filesystem
pub fn (mut self DBFsSymlink) list_by_filesystem(fs_id u32) ![]FsSymlink {
symlink_ids := self.db.redis.smembers('fssymlink:fs:${fs_id}')!
mut symlinks := []FsSymlink{}
for id_str in symlink_ids {
symlinks << self.get(id_str.u32())!
}
return symlinks
}
// List symlinks pointing to a target
pub fn (mut self DBFsSymlink) list_by_target(target_type SymlinkTargetType, target_id u32) ![]FsSymlink {
target_key := '${target_type}:${target_id}'
symlink_ids := self.db.redis.smembers('fssymlink:target:${target_key}')!
mut symlinks := []FsSymlink{}
for id_str in symlink_ids {
symlinks << self.get(id_str.u32())!
}
return symlinks
}
// Rename a symlink
pub fn (mut self DBFsSymlink) rename(id u32, new_name string) !u32 {
mut symlink := self.get(id)!
// Remove old path index
old_path_key := '${symlink.parent_id}:${symlink.name}'
self.db.redis.hdel('fssymlink:paths', old_path_key)!
// Update name
symlink.name = new_name
// Save with new name
return self.set(symlink)!
}
// Move symlink to a new parent directory
pub fn (mut self DBFsSymlink) move(id u32, new_parent_id u32) !u32 {
mut symlink := self.get(id)!
// Check that new parent exists and is in the same filesystem
if new_parent_id > 0 {
parent_data, _ := self.db.get_data[FsDir](new_parent_id)!
if parent_data.fs_id != symlink.fs_id {
return error('Cannot move symlink across filesystems')
}
}
// Remove old path index
old_path_key := '${symlink.parent_id}:${symlink.name}'
self.db.redis.hdel('fssymlink:paths', old_path_key)!
// Remove from old parent's symlinks list
self.db.redis.srem('fssymlink:parent:${symlink.parent_id}', id.str())!
// Update parent
symlink.parent_id = new_parent_id
// Save with new parent
return self.set(symlink)!
}
// Redirect symlink to a new target
pub fn (mut self DBFsSymlink) redirect(id u32, new_target_id u32, new_target_type SymlinkTargetType) !u32 {
mut symlink := self.get(id)!
// Check new target exists
if new_target_type == .file {
target_exists := self.db.exists[FsFile](new_target_id)!
if !target_exists {
return error('Target file with ID ${new_target_id} does not exist')
}
} else if new_target_type == .directory {
target_exists := self.db.exists[FsDir](new_target_id)!
if !target_exists {
return error('Target directory with ID ${new_target_id} does not exist')
}
}
// Remove from old target's referrers list
old_target_key := '${symlink.target_type}:${symlink.target_id}'
self.db.redis.srem('fssymlink:target:${old_target_key}', id.str())!
// Update target
symlink.target_id = new_target_id
symlink.target_type = new_target_type
// Save with new target
return self.set(symlink)!
}
// Resolve a symlink to get its target
pub fn (mut self DBFsSymlink) resolve(id u32) !u32 {
symlink := self.get(id)!
return symlink.target_id
}
// Check if a symlink is broken (target doesn't exist)
pub fn (mut self DBFsSymlink) is_broken(id u32) !bool {
symlink := self.get(id)!
if symlink.target_type == .file {
return !self.db.exists[FsFile](symlink.target_id)!
} else if symlink.target_type == .directory {
return !self.db.exists[FsDir](symlink.target_id)!
}
return true // Unknown target type is considered broken
}