This commit is contained in:
2025-09-15 10:20:09 +02:00
parent ab1044079e
commit 9a41f9e732
22 changed files with 248 additions and 9350 deletions

View File

@@ -4,7 +4,7 @@ module redisclient
pub struct RedisURL {
address string = '127.0.0.1'
port int = 6379
// db int
db int
}
pub fn get_redis_url(url string) !RedisURL {
@@ -20,5 +20,24 @@ pub fn get_redis_url(url string) !RedisURL {
pub fn core_get(url RedisURL) !&Redis {
mut r := new('${url.address}:${url.port}')!
if url.db>0{
r.selectdb(url.db)!
}
return r
}
//give a test db, if db is 0, we will set it on 31
pub fn test_get(url_ RedisURL) !&Redis {
mut url:=url_
if url.db==0{
url.db=31
}
return core_get(url)!
}
//delete the test db
pub fn test_delete(url_ RedisURL) !&Redis {
mut r:= test_get(url)!
r.flush()!
}

View File

@@ -10,8 +10,21 @@ pub mut:
redis &redisclient.Redis @[skip; str: skip]
}
pub fn new() !DB {
mut redisconnection := redisclient.core_get()!
@[params]
pub struct DBArgs {
pub mut:
redis ?&redisclient.Redis
}
pub fn new(args DBArgs) !DB {
mut redisconnection := args.redis or {redisclient.core_get()!}
return DB{
redis: redisconnection
}
}
pub fn new_test() !DB {
mut redisconnection := redisclient.test_get()!
return DB{
redis: redisconnection
}

View File

@@ -1,6 +1,7 @@
module herofs
import freeflowuniverse.herolib.hero.db
import freeflowuniverse.herolib.core.redisclient
@[heap]
pub struct FsFactory {
@@ -13,8 +14,14 @@ pub mut:
fs_symlink DBFsSymlink
}
pub fn new() !FsFactory {
mut mydb := db.new()!
@[params]
pub struct DBArgs {
pub mut:
redis ?&redisclient.Redis
}
pub fn new(args DBArgs) !FsFactory {
mut mydb := db.new(redis:args.redis)!
mut f := FsFactory{
fs: DBFs{
db: &mydb
@@ -43,3 +50,23 @@ pub fn new() !FsFactory {
f.fs_symlink.factory = &f
return f
}
// is the main function we need to use to get a filesystem, will get it from database and initialize if needed
pub fn new_fs(args FsArg) !Fs {
mut f := new()!
mut fs := f.fs.new_get_set(args)!
return fs
}
pub fn new_fs_test() !Fs {
mut r:=redisclient.test_get()!
mut f := new(redis:r)!
mut fs := f.fs.new_get_set(name: 'test')!
return fs
}
pub fn delete_fs_test() ! {
mut r:=redisclient.test_get()!
r.flush()!
return fs
}

View File

@@ -13,7 +13,6 @@ pub struct Fs {
db.Base
pub mut:
name string
group_id u32 // Associated group for permissions
root_dir_id u32 // ID of root directory
quota_bytes u64 // Storage quota in bytes
used_bytes u64 // Current usage in bytes
@@ -33,7 +32,6 @@ pub fn (self Fs) type_name() string {
pub fn (self Fs) dump(mut e encoder.Encoder) ! {
e.add_string(self.name)
e.add_u32(self.group_id)
e.add_u32(self.root_dir_id)
e.add_u64(self.quota_bytes)
e.add_u64(self.used_bytes)
@@ -41,7 +39,6 @@ pub fn (self Fs) dump(mut e encoder.Encoder) ! {
fn (mut self DBFs) load(mut o Fs, mut e encoder.Decoder) ! {
o.name = e.get_string()!
o.group_id = e.get_u32()!
o.root_dir_id = e.get_u32()!
o.quota_bytes = e.get_u64()!
o.used_bytes = e.get_u64()!
@@ -52,7 +49,6 @@ pub struct FsArg {
pub mut:
name string @[required]
description string
group_id u32
root_dir_id u32
quota_bytes u64
used_bytes u64
@@ -60,21 +56,53 @@ pub mut:
comments []db.CommentArg
}
// get new filesystem, not from the DB
pub fn (mut self DBFs) new(args FsArg) !Fs {
// get new filesystem, if it exists then it will get it from the DB
pub fn (mut self DBFs) new_get_set(args_ FsArg) !Fs {
mut args := args_
args.name = args.name.trim_space().to_lower()
mut o := Fs{
name: args.name
group_id: args.group_id
root_dir_id: args.root_dir_id
quota_bytes: args.quota_bytes
used_bytes: args.used_bytes
name: args.name
}
// Set base fields
o.description = args.description
o.tags = self.db.tags_get(args.tags)!
o.comments = self.db.comments_get(args.comments)!
o.updated_at = ourtime.now().unix()
myid := self.db.redis.hget('fs:names', args.name)!
mut changes := true
if myid != '' {
o = self.get(myid.u32())!
changes = false
}
if args.description != '' {
o.description = args.description
changes = true
}
if args.root_dir_id != 0 {
o.root_dir_id = args.root_dir_id
changes = true
}
if args.quota_bytes != 0 {
o.quota_bytes = args.quota_bytes
changes = true
} else {
o.quota_bytes = 1024 * 1024 * 1024 * 100 // Default to 100GB
}
if args.used_bytes != 0 {
changes = true
o.used_bytes = args.used_bytes
}
if args.tags.len > 0 {
o.tags = self.db.tags_get(args.tags)!
changes = true
}
if args.comments.len > 0 {
o.comments = self.db.comments_get(args.comments)!
changes = true
}
if changes {
self.set(mut o)!
}
return o
}
@@ -91,6 +119,8 @@ pub fn (mut self DBFs) set(mut o Fs) ! {
o.root_dir_id = root_dir.id
// Update the filesystem with the new root directory ID
}
self.db.redis.hset('fs:names', o.name, o.id.str())!
// Use db set function which now modifies the object in-place
self.db.set[Fs](mut o)!
}
@@ -98,11 +128,16 @@ pub fn (mut self DBFs) delete(id u32) ! {
// Get the filesystem to retrieve its name
fs := self.get(id)!
// Remove name -> id mapping
self.db.redis.hdel('fs:names', fs.name)!
// Delete the filesystem
self.db.delete[Fs](id)!
}
pub fn (mut self DBFs) exist(id u32) !bool {
@@ -130,25 +165,26 @@ pub fn (mut self DBFs) get_by_name(name string) !Fs {
return self.get(id_str.u32())!
}
// Custom method to increase used_bytes
pub fn (mut self DBFs) increase_usage(id u32, bytes u64) !u64 {
mut fs := self.get(id)!
fs.used_bytes += bytes
self.set(fs)!
return fs.used_bytes
}
// TODO: need to redo, in separate struct in redis, like this will be too heavy
// // Custom method to increase used_bytes
// pub fn (mut self DBFs) increase_usage(id u32, bytes u64) !u64 {
// mut fs := self.get(id)!
// fs.used_bytes += bytes
// self.set(mut fs)!
// return fs.used_bytes
// }
// Custom method to decrease used_bytes
pub fn (mut self DBFs) decrease_usage(id u32, bytes u64) !u64 {
mut fs := self.get(id)!
if bytes > fs.used_bytes {
fs.used_bytes = 0
} else {
fs.used_bytes -= bytes
}
self.set(fs)!
return fs.used_bytes
}
// // Custom method to decrease used_bytes
// pub fn (mut self DBFs) decrease_usage(id u32, bytes u64) !u64 {
// mut fs := self.get(id)!
// if bytes > fs.used_bytes {
// fs.used_bytes = 0
// } else {
// fs.used_bytes -= bytes
// }
// self.set(mut fs)!
// return fs.used_bytes
// }
// Check if quota is exceeded
pub fn (mut self DBFs) check_quota(id u32, additional_bytes u64) !bool {

View File

@@ -68,20 +68,12 @@ pub fn (mut self DBFsBlob) new(args FsBlobArg) !FsBlob {
return o
}
pub fn (mut self DBFsBlob) set(mut o FsBlob) !u32 {
// Check if a blob with this hash already exists
hash_id := self.db.redis.hget('fsblob:hashes', o.hash)!
if hash_id != '' {
// Blob already exists, return existing ID
return hash_id.u32()
}
pub fn (mut self DBFsBlob) set(mut o FsBlob) ! {
// Use db set function which now modifies the object in-place
self.db.set[FsBlob](mut o)!
// Store the hash -> id mapping for lookup
self.db.redis.hset('fsblob:hashes', o.hash, o.id.str())!
}
pub fn (mut self DBFsBlob) delete(id u32) ! {

View File

@@ -7,7 +7,7 @@ import freeflowuniverse.herolib.hero.db
@[heap]
pub struct FsBlobMembership {
pub mut:
hash string // blake192 hash of content
hash string // blake192 hash of content (key)
fsid []u32 // list of fs ids where this blob is used
blobid u32 // id of the blob
}
@@ -42,7 +42,6 @@ pub mut:
blobid u32 @[required]
}
// get new blob membership, not from the DB
pub fn (mut self DBFsBlobMembership) new(args FsBlobMembershipArg) !FsBlobMembership {
mut o := FsBlobMembership{
hash: args.hash
@@ -64,7 +63,14 @@ pub fn (mut self DBFsBlobMembership) set(mut o FsBlobMembership) ! {
}
if o.hash == '' {
return error('Blob membership hash cannot be empty')
mut blob := self.factory.fs_blob.get(o.blobid) or {
return error('Failed to retrieve blob with ID ${o.blobid}: ${err.msg()}')
}
o.hash = blob.hash
}
if o.fsid.len == 0 {
return error('Blob membership filesystem IDs cannot be empty')
}
// Validate that all filesystems exist
@@ -97,10 +103,7 @@ pub fn (mut self DBFsBlobMembership) get(hash string) !FsBlobMembership {
if data == '' {
return error('Blob membership with hash "${hash}" not found')
}
// Decode hex data back to bytes
data2 := data.bytes()
// Create object and decode
mut o := FsBlobMembership{}
mut e_decoder := encoder.decoder_new(data2)
@@ -110,7 +113,7 @@ pub fn (mut self DBFsBlobMembership) get(hash string) !FsBlobMembership {
}
// Add a filesystem to an existing blob membership
pub fn (mut self DBFsBlobMembership) add_filesystem(hash string, fs_id u32) !string {
pub fn (mut self DBFsBlobMembership) add_filesystem(hash string, fs_id u32) ! {
// Validate filesystem exists
fs_exists := self.factory.fs.exist(fs_id)!
if !fs_exists {
@@ -128,7 +131,7 @@ pub fn (mut self DBFsBlobMembership) add_filesystem(hash string, fs_id u32) !str
}
// Remove a filesystem from an existing blob membership
pub fn (mut self DBFsBlobMembership) remove_filesystem(hash string, fs_id u32) !string {
pub fn (mut self DBFsBlobMembership) remove_filesystem(hash string, fs_id u32) ! {
mut membership := self.get(hash)!
// Remove filesystem from the list
@@ -137,10 +140,10 @@ pub fn (mut self DBFsBlobMembership) remove_filesystem(hash string, fs_id u32) !
// If no filesystems left, delete the membership entirely
if membership.fsid.len == 0 {
self.delete(hash)!
return hash
return
}
return self.set(membership)!
self.set(mut membership)!
}
// BlobList represents a simplified blob structure for listing purposes
@@ -153,7 +156,7 @@ pub mut:
// list_by_hash_prefix lists blob memberships where hash starts with the given prefix
// Returns maximum 10000 items as FsBlobMembership entries
pub fn (mut self DBFsBlobMembership) list(prefix string) ![]FsBlobMembership {
pub fn (mut self DBFsBlobMembership) list_prefix(prefix string) ![]FsBlobMembership {
mut result := []FsBlobMembership{}
mut cursor := 0
mut count := 0

View File

@@ -2,12 +2,18 @@ module herofs
import freeflowuniverse.herolib.hero.db
fn test_basic() {
println('Testing FsBlobMembership functionality...')
fn test_cleanup()!{
delete_fs_test()!
}
// Initialize the HeroFS factory
mut fs_factory := new()!
println('HeroFS factory initialized')
fn test_basic() {
defer {
test_cleanup()
}
// Initialize the HeroFS factory for test purposes
my_fs:=new_fs_test()!
mut fs_factory := my_fs.factory
// Create a new filesystem (required for FsBlobMembership validation)
mut my_fs := fs_factory.fs.new(
@@ -16,22 +22,7 @@ fn test_basic() {
quota_bytes: 1024 * 1024 * 1024 // 1GB quota
)!
fs_factory.fs.set(mut my_fs)!
fs_id := my_fs.id
println('Created test filesystem with ID: ${fs_id}')
// Create root directory for the filesystem
mut root_dir := fs_factory.fs_dir.new(
name: 'root'
fs_id: fs_id
parent_id: 0 // Root has no parent
description: 'Root directory for testing'
)!
fs_factory.fs_dir.set(mut root_dir)!
root_dir_id := root_dir.id
// Update the filesystem with the root directory ID
my_fs.root_dir_id = root_dir_id
fs_factory.fs.set(my_fs)!
println('Created test filesystem with ID: ${my_fs.id}')
// Create test blob for membership
test_data := 'This is test content for blob membership'.bytes()
@@ -96,8 +87,13 @@ fn test_basic() {
fn test_filesystem_operations() {
println('\nTesting FsBlobMembership filesystem operations...')
// Initialize the HeroFS factory
mut fs_factory := new()!
defer {
test_cleanup()
}
// Initialize the HeroFS factory for test purposes
my_fs:=new_fs_test()!
mut fs_factory := my_fs.factory
// Create filesystems for testing
mut fs1 := fs_factory.fs.new(
@@ -116,33 +112,6 @@ fn test_filesystem_operations() {
fs_factory.fs.set(mut fs2)!
fs2_id := fs2.id
// Create root directories for the filesystems
mut root_dir1 := fs_factory.fs_dir.new(
name: 'root'
fs_id: fs1_id
parent_id: 0 // Root has no parent
description: 'Root directory for testing'
)!
fs_factory.fs_dir.set(mut root_dir1)!
root_dir1_id := root_dir1.id
// Update the filesystems with the root directory IDs
fs1.root_dir_id = root_dir1_id
fs_factory.fs.set(fs1)!
mut root_dir2 := fs_factory.fs_dir.new(
name: 'root'
fs_id: fs2_id
parent_id: 0 // Root has no parent
description: 'Root directory for testing'
)!
fs_factory.fs_dir.set(mut root_dir2)!
root_dir2_id := root_dir2.id
// Update the filesystems with the root directory IDs
fs2.root_dir_id = root_dir2_id
fs_factory.fs.set(fs2)!
// Create test blob
test_data := 'This is test content for filesystem operations'.bytes()
mut test_blob := fs_factory.fs_blob.new(data: test_data)!
@@ -223,8 +192,13 @@ fn test_filesystem_operations() {
fn test_validation() {
println('\nTesting FsBlobMembership validation...')
// Initialize the HeroFS factory
mut fs_factory := new()!
defer {
test_cleanup()
}
// Initialize the HeroFS factory for test purposes
my_fs:=new_fs_test()!
mut fs_factory := my_fs.factory
// Create a filesystem for validation tests
mut my_fs := fs_factory.fs.new(
@@ -280,8 +254,13 @@ fn test_validation() {
fn test_list_by_prefix() {
println('\nTesting FsBlobMembership list by prefix...')
// Initialize the HeroFS factory
mut fs_factory := new()!
defer {
test_cleanup()
}
// Initialize the HeroFS factory for test purposes
my_fs:=new_fs_test()!
mut fs_factory := my_fs.factory
// Create a filesystem
mut my_fs := fs_factory.fs.new(

View File

@@ -1,318 +0,0 @@
module herofs
import freeflowuniverse.herolib.hero.db
import crypto.blake3
fn test_basic() {
println('Testing FsBlob functionality...')
// Initialize the HeroFS factory
mut fs_factory := new()!
println('HeroFS factory initialized')
// Create test data
test_data1 := 'This is test content for blob 1'.bytes()
test_data2 := 'This is test content for blob 2'.bytes()
test_data3 := 'Another test content'.bytes()
// Test creating new blobs with various data
mut test_blob1 := fs_factory.fs_blob.new(
data: test_data1
)!
mut test_blob2 := fs_factory.fs_blob.new(
data: test_data2
)!
mut test_blob3 := fs_factory.fs_blob.new(
data: test_data3
)!
// Verify blob properties
assert test_blob1.data == test_data1
assert test_blob1.size_bytes == test_data1.len
assert test_blob1.hash != ''
println(' Created test blobs with correct properties')
// Test saving blobs
fs_factory.fs_blob.set(mut test_blob1)!
blob1_id := test_blob1.id
fs_factory.fs_blob.set(mut test_blob2)!
blob2_id := test_blob2.id
fs_factory.fs_blob.set(mut test_blob3)!
blob3_id := test_blob3.id
println('Created test blobs with IDs:')
println('- Blob 1 ID: ${blob1_id}')
println('- Blob 2 ID: ${blob2_id}')
println('- Blob 3 ID: ${blob3_id}')
// Test loading blobs by ID
println('\nTesting blob loading...')
loaded_blob1 := fs_factory.fs_blob.get(blob1_id)!
assert loaded_blob1.data == test_data1
assert loaded_blob1.size_bytes == test_data1.len
assert loaded_blob1.hash == test_blob1.hash
println(' Loaded blob 1: ${loaded_blob1.hash} (ID: ${loaded_blob1.id})')
loaded_blob2 := fs_factory.fs_blob.get(blob2_id)!
assert loaded_blob2.data == test_data2
assert loaded_blob2.size_bytes == test_data2.len
assert loaded_blob2.hash == test_blob2.hash
println(' Loaded blob 2: ${loaded_blob2.hash} (ID: ${loaded_blob2.id})')
loaded_blob3 := fs_factory.fs_blob.get(blob3_id)!
assert loaded_blob3.data == test_data3
assert loaded_blob3.size_bytes == test_data3.len
assert loaded_blob3.hash == test_blob3.hash
println(' Loaded blob 3: ${loaded_blob3.hash} (ID: ${loaded_blob3.id})')
// Verify that loaded blobs match the original ones
println('\nVerifying data integrity...')
assert loaded_blob1.verify_integrity() == true
assert loaded_blob2.verify_integrity() == true
assert loaded_blob3.verify_integrity() == true
println(' All blob data integrity checks passed')
// Test exist method
println('\nTesting blob existence checks...')
mut exists := fs_factory.fs_blob.exist(blob1_id)!
assert exists == true
println(' Blob 1 exists: ${exists}')
exists = fs_factory.fs_blob.exist(blob2_id)!
assert exists == true
println(' Blob 2 exists: ${exists}')
exists = fs_factory.fs_blob.exist(blob3_id)!
assert exists == true
println(' Blob 3 exists: ${exists}')
// Test with non-existent ID
exists = fs_factory.fs_blob.exist(999999)!
assert exists == false
println(' Non-existent blob exists: ${exists}')
println('\nFsBlob basic test completed successfully!')
}
fn test_blob_deduplication() {
println('\nTesting FsBlob deduplication...')
// Initialize the HeroFS factory
mut fs_factory := new()!
// Create identical test data
identical_data := 'This is identical content'.bytes()
// Create first blob
mut blob1 := fs_factory.fs_blob.new(
data: identical_data
)!
fs_factory.fs_blob.set(mut blob1)!
println('Created first blob with ID: ${blob1.id}')
// Create second blob with identical data
mut blob2 := fs_factory.fs_blob.new(
data: identical_data
)!
fs_factory.fs_blob.set(mut blob2)!
println('Created second blob with ID: ${blob2.id}')
// Verify that both blobs have the same ID (deduplication)
assert blob1.id == blob2.id
println(' Deduplication works correctly - identical content gets same ID')
// Verify that the blob can be retrieved by the ID
loaded_blob := fs_factory.fs_blob.get(blob1.id)!
assert loaded_blob.data == identical_data
assert loaded_blob.hash == blob1.hash
println(' Retrieved deduplicated blob correctly')
println('FsBlob deduplication test completed successfully!')
}
fn test_blob_operations() {
println('\nTesting FsBlob operations...')
// Initialize the HeroFS factory
mut fs_factory := new()!
// Create test data
test_data1 := 'Operation test content 1'.bytes()
test_data2 := 'Operation test content 2'.bytes()
test_data3 := 'Operation test content 3'.bytes()
// Create and save test blobs
mut blob1 := fs_factory.fs_blob.new(data: test_data1)!
mut blob2 := fs_factory.fs_blob.new(data: test_data2)!
mut blob3 := fs_factory.fs_blob.new(data: test_data3)!
fs_factory.fs_blob.set(mut blob1)!
fs_factory.fs_blob.set(mut blob2)!
fs_factory.fs_blob.set(mut blob3)!
println('Created test blobs:')
println('- Blob 1 ID: ${blob1.id}')
println('- Blob 2 ID: ${blob2.id}')
println('- Blob 3 ID: ${blob3.id}')
// Test get_multi method
mut ids := []u32{len: 3}
ids[0] = blob1_id
ids[1] = blob2_id
ids[2] = blob3_id
mut blobs := fs_factory.fs_blob.get_multi(ids)!
assert blobs.len == 3
assert blobs[0].id == blob1_id
assert blobs[1].id == blob2_id
assert blobs[2].id == blob3_id
println(' Retrieved multiple blobs correctly')
// Test exist_multi method
mut exists := fs_factory.fs_blob.exist_multi(ids)!
assert exists == true
println(' Multiple blob existence check passed')
// Test with non-existent ID in exist_multi
mut ids_with_nonexistent := []u32{len: 3}
ids_with_nonexistent[0] = blob1_id
ids_with_nonexistent[1] = 999999
ids_with_nonexistent[2] = blob3_id
exists = fs_factory.fs_blob.exist_multi(ids_with_nonexistent)!
assert exists == false
println(' Multiple blob existence check correctly failed with non-existent ID')
println('FsBlob operations test completed successfully!')
}
fn test_blob_deletion() {
println('\nTesting FsBlob deletion...')
// Initialize the HeroFS factory
mut fs_factory := new()!
// Create test data
test_data := 'Deletion test content'.bytes()
// Create and save test blob
mut blob := fs_factory.fs_blob.new(data: test_data)!
fs_factory.fs_blob.set(mut blob)!
println('Created test blob with ID: ${blob.id}')
// Verify blob exists
mut exists := fs_factory.fs_blob.exist(blob.id)!
assert exists == true
println(' Blob exists before deletion')
// Delete the blob
fs_factory.fs_blob.delete(blob.id)!
// Verify blob no longer exists by ID
exists = fs_factory.fs_blob.exist(blob.id)!
assert exists == false
println(' Blob no longer exists by ID after deletion')
// Test delete_multi with multiple blobs
test_data1 := 'Multi deletion test 1'.bytes()
test_data2 := 'Multi deletion test 2'.bytes()
test_data3 := 'Multi deletion test 3'.bytes()
mut blob1 := fs_factory.fs_blob.new(data: test_data1)!
mut blob2 := fs_factory.fs_blob.new(data: test_data2)!
mut blob3 := fs_factory.fs_blob.new(data: test_data3)!
fs_factory.fs_blob.set(mut blob1)!
fs_factory.fs_blob.set(mut blob2)!
fs_factory.fs_blob.set(mut blob3)!
println('Created multiple blobs for deletion test:')
println('- Blob 1 ID: ${blob1.id}')
println('- Blob 2 ID: ${blob2.id}')
println('- Blob 3 ID: ${blob3.id}')
// Delete multiple blobs
mut ids := []u32{len: 3}
ids[0] = blob1_id
ids[1] = blob2_id
ids[2] = blob3_id
fs_factory.fs_blob.delete_multi(ids)!
// Verify all blobs are deleted
exists = fs_factory.fs_blob.exist_multi(ids)!
assert exists == false
println(' Multiple blobs deleted successfully')
println('FsBlob deletion test completed successfully!')
}
fn test_blob_size_limit() {
println('\nTesting FsBlob size limit...')
// Initialize the HeroFS factory
mut fs_factory := new()!
// Create data that exceeds 1MB limit
mut large_data := []u8{len: 1024 * 1024 + 1} // 1MB + 1 byte
for i := 0; i < large_data.len; i++ {
large_data[i] = u8(i % 256)
}
// Try to create a blob with data exceeding the limit
mut result := fs_factory.fs_blob.new(data: large_data) or {
println(' Blob creation correctly failed with data exceeding 1MB limit')
return
}
// If we get here, the validation didn't work as expected
// Try to save it, which should fail
fs_factory.fs_blob.set(mut result) or {
println(' Blob set correctly failed with data exceeding 1MB limit')
return
}
panic('Validation should have failed for data exceeding 1MB limit')
println('FsBlob size limit test completed successfully!')
}
fn test_blob_hash_functionality() {
println('\nTesting FsBlob hash functionality...')
// Initialize the HeroFS factory
mut fs_factory := new()!
// Create test data
test_data := 'Hash test content'.bytes()
// Create blob
mut blob := fs_factory.fs_blob.new(data: test_data)!
// Verify hash is calculated correctly
expected_hash := blake3.sum256(test_data).hex()[..48]
assert blob.hash == expected_hash
println(' Blob hash calculated correctly')
// Save blob
fs_factory.fs_blob.set(mut blob)!
// Retrieve by hash
loaded_blob := fs_factory.fs_blob.get_by_hash(blob.hash)!
assert loaded_blob.id == blob.id
assert loaded_blob.data == test_data
println(' Blob retrieved by hash correctly')
// Test with non-existent hash
non_existent_hash := '0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef'
non_existent_blob := fs_factory.fs_blob.get_by_hash(non_existent_hash) or {
println(' Retrieval correctly failed with non-existent hash')
return
}
panic('Retrieval should have failed for non-existent hash')
println('FsBlob hash functionality test completed successfully!')
}

View File

@@ -1,278 +0,0 @@
module herofs
import freeflowuniverse.herolib.hero.db
fn test_basic() {
println('Testing FsDir functionality...')
// Initialize the HeroFS factory
mut fs_factory := new()!
println('HeroFS factory initialized')
// Create a new filesystem (required for FsDir)
mut my_fs := fs_factory.fs.new(
name: 'test_filesystem'
description: 'Filesystem for testing FsDir functionality'
quota_bytes: 1024 * 1024 * 1024 // 1GB quota
)!
// Save the filesystem to get an ID
fs_factory.fs.set(mut my_fs)!
println('Created test filesystem with ID: ${my_fs.id}')
// Create root directory
mut root_dir := fs_factory.fs_dir.new(
name: 'root'
fs_id: my_fs.id
parent_id: 0 // Root has no parent
description: 'Root directory for testing'
)!
// Save the root directory
fs_factory.fs_dir.set(mut root_dir)!
root_dir_id := root_dir.id
println('Created root directory with ID: ${root_dir_id}')
// Update the filesystem with the root directory ID
my_fs.root_dir_id = root_dir_id
fs_factory.fs.set(mut my_fs)!
// Create test directories with various parameters
mut test_dir1 := fs_factory.fs_dir.new(
name: 'test_dir1'
fs_id: fs_id
parent_id: root_dir_id
description: 'First test directory'
)!
mut test_dir2 := fs_factory.fs_dir.new(
name: 'test_dir2'
fs_id: fs_id
parent_id: root_dir_id
description: 'Second test directory with tags'
tags: ['test', 'directory', 'example']
)!
mut test_dir3 := fs_factory.fs_dir.new(
name: 'test_dir3'
fs_id: fs_id
parent_id: root_dir_id
description: 'Third test directory with comments'
comments: [
db.CommentArg{
comment: 'This is a test comment'
author: 1
},
]
)!
// Save the test directories
fs_factory.fs_dir.set(mut test_dir1)!
dir1_id := test_dir1.id
fs_factory.fs_dir.set(mut test_dir2)!
dir2_id := test_dir2.id
fs_factory.fs_dir.set(mut test_dir3)!
dir3_id := test_dir3.id
println('Created test directories:')
println('- ${test_dir1.name} with ID: ${test_dir1.id}')
println('- ${test_dir2.name} with ID: ${test_dir2.id}')
println('- ${test_dir3.name} with ID: ${test_dir3.id}')
// Test loading directories by ID
println('\nTesting directory loading...')
loaded_root_dir := fs_factory.fs_dir.get(root_dir_id)!
assert loaded_root_dir.name == root_dir.name
assert loaded_root_dir.description == root_dir.description
assert loaded_root_dir.fs_id == root_dir.fs_id
assert loaded_root_dir.parent_id == root_dir.parent_id
println(' Loaded root directory: ${loaded_root_dir.name} (ID: ${loaded_root_dir.id})')
loaded_dir1 := fs_factory.fs_dir.get(dir1_id)!
assert loaded_dir1.name == test_dir1.name
assert loaded_dir1.description == test_dir1.description
assert loaded_dir1.fs_id == test_dir1.fs_id
assert loaded_dir1.parent_id == test_dir1.parent_id
println(' Loaded test_dir1: ${loaded_dir1.name} (ID: ${loaded_dir1.id})')
loaded_dir2 := fs_factory.fs_dir.get(dir2_id)!
assert loaded_dir2.name == test_dir2.name
assert loaded_dir2.description == test_dir2.description
assert loaded_dir2.fs_id == test_dir2.fs_id
assert loaded_dir2.parent_id == test_dir2.parent_id
assert loaded_dir2.tags == test_dir2.tags
println(' Loaded test_dir2: ${loaded_dir2.name} (ID: ${loaded_dir2.id})')
loaded_dir3 := fs_factory.fs_dir.get(dir3_id)!
assert loaded_dir3.name == test_dir3.name
assert loaded_dir3.description == test_dir3.description
assert loaded_dir3.fs_id == test_dir3.fs_id
assert loaded_dir3.parent_id == test_dir3.parent_id
println(' Loaded test_dir3: ${loaded_dir3.name} (ID: ${loaded_dir3.id})')
// Verify that loaded directories match the original ones
println('\nVerifying data integrity...')
println(' All directory data integrity checks passed')
// Test exist method
println('\nTesting directory existence checks...')
mut exists := fs_factory.fs_dir.exist(root_dir_id)!
assert exists == true
println(' Root directory exists: ${exists}')
exists = fs_factory.fs_dir.exist(dir1_id)!
assert exists == true
println(' Test directory 1 exists: ${exists}')
exists = fs_factory.fs_dir.exist(dir2_id)!
assert exists == true
println(' Test directory 2 exists: ${exists}')
exists = fs_factory.fs_dir.exist(dir3_id)!
assert exists == true
println(' Test directory 3 exists: ${exists}')
// Test with non-existent ID
exists = fs_factory.fs_dir.exist(999999)!
assert exists == false
println(' Non-existent directory exists: ${exists}')
println('\nFsDir basic test completed successfully!')
}
fn test_directory_operations() {
println('\nTesting FsDir operations...')
// Initialize the HeroFS factory
mut fs_factory := new()!
// Create a new filesystem (required for FsDir)
mut my_fs := fs_factory.fs.new(
name: 'test_filesystem_ops'
description: 'Filesystem for testing FsDir operations'
quota_bytes: 1024 * 1024 * 1024 // 1GB quota
)!
// Save the filesystem to get an ID
fs_factory.fs.set(mut my_fs)!
// Create a test directory
test_dir := fs_factory.fs_dir.new(
name: 'test_operations'
fs_id: my_fs.id
parent_id: 0
description: 'Directory for testing operations'
)!
// Save the test directory
fs_factory.fs_dir.set(mut test_dir)!
// Test adding items to directory
println('Testing adding items to directory...')
test_dir.directories << root_dir_id
fs_factory.fs_dir.set(mut test_dir)!
fs_factory.fs_dir.get(test_dir.id)!
assert updated_dir_id == updated_test_dir.id
println(' Added directory to directories list')
// Add a file
test_dir.files << 123
fs_factory.fs_dir.set(mut test_dir)!
updatedir:=fs_factory.fs_dir.get(mut test_dir.id)!
assert updatedir.id == test_dir.id
println(' Added file to files list')
// Add a symlink
test_dir.symlinks << 456
fs_factory.fs_dir.set(mut test_dir)!
updated_test_dir = fs_factory.fs_dir.get(test_dir.id)!
assert updated_test_dir.id == test_dir.id
println(' Added symlink to symlinks list')
// Verify the items were added
loaded_dir := fs_factory.fs_dir.get(dir_id)!
assert loaded_dir.directories.len == 1
assert loaded_dir.directories[0] == root_dir_id
assert loaded_dir.files.len == 1
assert loaded_dir.files[0] == 123
assert loaded_dir.symlinks.len == 1
assert loaded_dir.symlinks[0] == 456
println(' Verified all items were added to directory')
println('FsDir operations test completed successfully!')
}
fn test_directory_deletion() {
println('\nTesting FsDir deletion...')
// Initialize the HeroFS factory
mut fs_factory := new()!
// Create a new filesystem (required for FsDir)
mut my_fs := fs_factory.fs.new(
name: 'test_filesystem_delete'
description: 'Filesystem for testing FsDir deletion'
quota_bytes: 1024 * 1024 * 1024 // 1GB quota
)!
// Save the filesystem to get an ID
fs_id := fs_factory.fs.set(my_fs)!
// Create root directory
mut root_dir := fs_factory.fs_dir.new(
name: 'root'
fs_id: fs_id
parent_id: 0 // Root has no parent
description: 'Root directory for testing deletion'
)!
// Save the root directory
root_dir_id := fs_factory.fs_dir.set(root_dir)!
// Update the filesystem with the root directory ID
my_fs.root_dir_id = root_dir_id
fs_factory.fs.set(my_fs)!
// Create a test directory
mut test_dir := fs_factory.fs_dir.new(
name: 'test_delete'
fs_id: fs_id
parent_id: root_dir_id
description: 'Directory for testing deletion'
)!
// Save the test directory
fs_factory.fs_dir.set(mut test_dir)!
dir_id := test_dir.id
// Verify directory exists
mut exists := fs_factory.fs_dir.exist(dir_id)!
assert exists == true
println(' Directory exists before deletion')
// Delete the directory
fs_factory.fs_dir.delete(dir_id)!
// Verify directory no longer exists
exists = fs_factory.fs_dir.exist(dir_id)!
assert exists == false
println(' Directory no longer exists after deletion')
// Verify directory was removed from parent's directories list
loaded_root := fs_factory.fs_dir.get(root_dir_id)!
mut found := false
for dir in loaded_root.directories {
if dir == dir_id {
found = true
break
}
}
assert found == false
println("✓ Directory removed from parent's directories list")
println('FsDir deletion test completed successfully!')
}

View File

@@ -12,14 +12,12 @@ import freeflowuniverse.herolib.hero.db
pub struct FsFile {
db.Base
pub mut:
fs_id u32 // Associated filesystem
directories []u32 // Directory IDs where this file exists, means file can be part of multiple directories (like hard links in Linux)
blobs []u32 // IDs of file content blobs
size_bytes u64
mime_type MimeType // MIME type as enum (MOVED FROM FsBlob)
checksum string // e.g., SHA256 checksum of the file
accessed_at i64
metadata map[string]string // Custom metadata
fs_id u32 // Associated filesystem
blobs []u32 // IDs of file content blobs
size_bytes u64
mime_type MimeType
checksum string // e.g., checksum of the file, needs to be calculated is blake 192
metadata map[string]string // Custom metadata
}
pub struct DBFsFile {
@@ -34,12 +32,6 @@ pub fn (self FsFile) type_name() string {
pub fn (self FsFile) dump(mut e encoder.Encoder) ! {
e.add_u32(self.fs_id)
// Handle directories
e.add_u16(u16(self.directories.len))
for dir_id in self.directories {
e.add_u32(dir_id)
}
// Handle blobs
e.add_u16(u16(self.blobs.len))
for blob_id in self.blobs {
@@ -49,7 +41,6 @@ pub fn (self FsFile) dump(mut e encoder.Encoder) ! {
e.add_u64(self.size_bytes)
e.add_u8(u8(self.mime_type)) // ADD: Serialize mime_type as u8
e.add_string(self.checksum)
e.add_i64(self.accessed_at)
// Handle metadata map
e.add_u16(u16(self.metadata.len))
@@ -62,13 +53,6 @@ pub fn (self FsFile) dump(mut e encoder.Encoder) ! {
fn (mut self DBFsFile) load(mut o FsFile, mut e encoder.Decoder) ! {
o.fs_id = e.get_u32()!
// Load directories
dirs_count := e.get_u16()!
o.directories = []u32{cap: int(dirs_count)}
for _ in 0 .. dirs_count {
o.directories << e.get_u32()!
}
// Load blobs
blobs_count := e.get_u16()!
o.blobs = []u32{cap: int(blobs_count)}
@@ -79,7 +63,6 @@ fn (mut self DBFsFile) load(mut o FsFile, mut e encoder.Decoder) ! {
o.size_bytes = e.get_u64()!
o.mime_type = unsafe { MimeType(e.get_u8()!) } // ADD: Deserialize mime_type
o.checksum = e.get_string()!
o.accessed_at = e.get_i64()!
// Load metadata map
metadata_count := e.get_u16()!
@@ -96,8 +79,7 @@ pub struct FsFileArg {
pub mut:
name string @[required]
description string
fs_id u32 @[required]
directories []u32 @[required]
fs_id u32 @[required]
blobs []u32
size_bytes u64
mime_type MimeType // Changed from string to MimeType enum
@@ -133,15 +115,13 @@ pub fn (mut self DBFsFile) new(args FsFileArg) !FsFile {
}
mut o := FsFile{
name: args.name
fs_id: args.fs_id
directories: args.directories
blobs: args.blobs
size_bytes: size
mime_type: args.mime_type // ADD: Set mime_type
checksum: args.checksum
accessed_at: ourtime.now().unix()
metadata: args.metadata
name: args.name
fs_id: args.fs_id
blobs: args.blobs
size_bytes: size
mime_type: args.mime_type // ADD: Set mime_type
checksum: args.checksum
metadata: args.metadata
}
// Set base fields
@@ -169,7 +149,6 @@ pub fn (mut self DBFsFile) set(mut o FsFile) ! {
return error('Blob with ID ${blob_id} does not exist')
}
}
self.db.set[FsFile](mut o)!
}
@@ -191,17 +170,18 @@ pub fn (mut self DBFsFile) get(id u32) !FsFile {
return o
}
// Update file accessed timestamp
pub fn (mut self DBFsFile) update_accessed(id u32) !u32 {
mut file := self.get(id)!
file.accessed_at = ourtime.now().unix()
return self.set(file)!
}
// TODO: future, have separate redis struct for the updates, not in root obj
// // Update file accessed timestamp
// pub fn (mut self DBFsFile) update_accessed(id u32) !u32 {
// mut file := self.get(id)!
// file.accessed_at = ourtime.now().unix()
// return self.set(file)!
// }
// Update file metadata
pub fn (mut self DBFsFile) update_metadata(id u32, key string, value string) !u32 {
mut file := self.get(id)!
file.metadata[key] = value
file.updated_at = ourtime.now().unix()
return self.set(file)!
}
// // Update file metadata
// pub fn (mut self DBFsFile) update_metadata(id u32, key string, value string) !u32 {
// mut file := self.get(id)!
// file.metadata[key] = value
// file.updated_at = ourtime.now().unix()
// return self.set(file)!
// }

View File

@@ -1,428 +0,0 @@
module herofs
import freeflowuniverse.herolib.hero.db
fn test_basic() {
println('Testing FsFile functionality...')
// Initialize the HeroFS factory
mut fs_factory := new()!
println('HeroFS factory initialized')
// Create a new filesystem (required for FsFile)
mut my_fs := fs_factory.fs.new(
name: 'test_filesystem'
description: 'Filesystem for testing FsFile functionality'
quota_bytes: 1024 * 1024 * 1024 // 1GB quota
)!
// Save the filesystem to get an ID
fs_factory.fs.set(mut my_fs)!
fs_id := my_fs.id
println('Created test filesystem with ID: ${fs_id}')
// Create test directories for files
mut test_dir1 := fs_factory.fs_dir.new(
name: 'test_dir1'
fs_id: fs_id
parent_id: root_dir_id
description: 'First test directory for files'
)!
dir1_id := test_dir1.id
mut test_dir2 := fs_factory.fs_dir.new(
name: 'test_dir2'
fs_id: fs_id
parent_id: root_dir_id
description: 'Second test directory for files'
)!
fs_factory.fs_dir.set(mut test_dir2)!
dir2_id := test_dir2.id
// Create test blobs for files
mut test_blob1 := fs_factory.fs_blob.new(
data: 'This is test content for blob 1'.bytes()
)!
blob1_id := fs_factory.fs_blob.set(test_blob1)!
println('Created test blob with ID: ${blob1_id}')
mut test_blob2 := fs_factory.fs_blob.new(
data: 'This is test content for blob 2'.bytes()
)!
blob2_id := fs_factory.fs_blob.set(test_blob2)!
println('Created test blob with ID: ${blob2_id}')
// Create test files with various parameters
mut test_file1 := fs_factory.fs_file.new(
name: 'test_file1.txt'
fs_id: fs_id
directories: [dir1_id]
blobs: [blob1_id]
description: 'First test file'
mime_type: .txt
checksum: 'test_checksum_1'
metadata: {
'author': 'test_user1'
'version': '1.0'
}
)!
mut test_file2 := fs_factory.fs_file.new(
name: 'test_file2.png'
fs_id: fs_id
directories: [dir1_id, dir2_id] // Multiple directories (hard links)
blobs: [blob1_id, blob2_id] // Multiple blobs
description: 'Second test file with multiple directories and blobs'
mime_type: .png
checksum: 'test_checksum_2'
metadata: {
'author': 'test_user2'
'version': '2.0'
'created': '2023-01-01'
}
tags: ['test', 'image', 'example']
)!
mut test_file3 := fs_factory.fs_file.new(
name: 'test_file3.json'
fs_id: fs_id
directories: [dir2_id]
blobs: [blob2_id]
description: 'Third test file with comments'
mime_type: .json
checksum: 'test_checksum_3'
metadata: {
'author': 'test_user3'
'version': '1.5'
}
comments: [
db.CommentArg{
comment: 'This is a test comment for file 3'
author: 1
},
]
)!
// Save the test files
fs_factory.fs_file.set(mut test_file1)!
file1_id := test_file1.id
fs_factory.fs_file.set(mut test_file2)!
file2_id := test_file2.id
fs_factory.fs_file.set(mut test_file3)!
file3_id := test_file3.id
println('Created test files:')
println('- ${test_file1.name} with ID: ${file1_id}')
println('- ${test_file2.name} with ID: ${file2_id}')
println('- ${test_file3.name} with ID: ${file3_id}')
// Test loading files by ID
println('\nTesting file loading...')
loaded_file1 := fs_factory.fs_file.get(file1_id)!
assert loaded_file1.name == test_file1.name
assert loaded_file1.description == test_file1.description
assert loaded_file1.fs_id == test_file1.fs_id
assert loaded_file1.directories == test_file1.directories
assert loaded_file1.blobs == test_file1.blobs
assert loaded_file1.mime_type == test_file1.mime_type
assert loaded_file1.checksum == test_file1.checksum
assert loaded_file1.metadata == test_file1.metadata
println(' Loaded test_file1: ${loaded_file1.name} (ID: ${loaded_file1.id})')
loaded_file2 := fs_factory.fs_file.get(file2_id)!
assert loaded_file2.name == test_file2.name
assert loaded_file2.description == test_file2.description
assert loaded_file2.fs_id == test_file2.fs_id
assert loaded_file2.directories.len == 2
assert loaded_file2.directories[0] == dir1_id
assert loaded_file2.directories[1] == dir2_id
assert loaded_file2.blobs.len == 2
assert loaded_file2.blobs[0] == blob1_id
assert loaded_file2.blobs[1] == blob2_id
assert loaded_file2.mime_type == test_file2.mime_type
assert loaded_file2.checksum == test_file2.checksum
assert loaded_file2.metadata == test_file2.metadata
assert loaded_file2.tags == test_file2.tags
println(' Loaded test_file2: ${loaded_file2.name} (ID: ${loaded_file2.id})')
loaded_file3 := fs_factory.fs_file.get(file3_id)!
assert loaded_file3.name == test_file3.name
assert loaded_file3.description == test_file3.description
assert loaded_file3.fs_id == test_file3.fs_id
assert loaded_file3.directories == test_file3.directories
assert loaded_file3.blobs == test_file3.blobs
assert loaded_file3.mime_type == test_file3.mime_type
assert loaded_file3.checksum == test_file3.checksum
assert loaded_file3.metadata == test_file3.metadata
println(' Loaded test_file3: ${loaded_file3.name} (ID: ${loaded_file3.id})')
// Verify that loaded files match the original ones
println('\nVerifying data integrity...')
println(' All file data integrity checks passed')
// Test exist method
println('\nTesting file existence checks...')
mut exists := fs_factory.fs_file.exist(file1_id)!
assert exists == true
println(' Test file 1 exists: ${exists}')
exists = fs_factory.fs_file.exist(file2_id)!
assert exists == true
println(' Test file 2 exists: ${exists}')
exists = fs_factory.fs_file.exist(file3_id)!
assert exists == true
println(' Test file 3 exists: ${exists}')
// Test with non-existent ID
exists = fs_factory.fs_file.exist(999999)!
assert exists == false
println(' Non-existent file exists: ${exists}')
println('\nFsFile basic test completed successfully!')
}
fn test_file_operations() {
println('\nTesting FsFile operations...')
// Initialize the HeroFS factory
mut fs_factory := new()!
// Create a new filesystem (required for FsFile)
mut my_fs := fs_factory.fs.new(
name: 'test_filesystem_ops'
description: 'Filesystem for testing FsFile operations'
quota_bytes: 1024 * 1024 * 1024 // 1GB quota
)!
// Save the filesystem to get an ID
fs_factory.fs.set(mut my_fs)!
fs_id := my_fs.id
// Create test directory
mut test_dir := fs_factory.fs_dir.new(
name: 'test_dir'
fs_id: fs_id
parent_id: root_dir_id
description: 'Test directory for file operations'
)!
dir_id := fs_factory.fs_dir.set(test_dir)!
// Create test blob
mut test_blob := fs_factory.fs_blob.new(
data: 'Test content for operations'.bytes()
)!
blob_id := fs_factory.fs_blob.set(test_blob)!
// Create a test file
test_file := fs_factory.fs_file.new(
name: 'test_operations.txt'
fs_id: fs_id
directories: [dir_id]
blobs: [blob_id]
description: 'File for testing operations'
mime_type: .txt
checksum: 'test_checksum_ops'
metadata: {
'author': 'test_user_ops'
'version': '1.0'
}
)!
// Save the test file
fs_factory.fs_file.set(mut test_file)!
file_id := test_file.id
println('Created test file with ID: ${file_id}')
// Test update_accessed method
println('Testing update_accessed operation...')
// Get original accessed_at timestamp
original_file := fs_factory.fs_file.get(file_id)!
original_accessed_at := original_file.accessed_at
// Update accessed timestamp
mut updated_file_id := fs_factory.fs_file.update_accessed(file_id)!
mut updated_file := fs_factory.fs_file.get(updated_file_id)!
// Verify that accessed_at was updated
assert updated_file.accessed_at >= original_accessed_at
println(' File accessed timestamp updated successfully')
// Test update_metadata method
println('Testing update_metadata operation...')
// Add new metadata key-value pair
updated_file_id = fs_factory.fs_file.update_metadata(file_id, 'new_key', 'new_value')!
updated_file = fs_factory.fs_file.get(updated_file_id)!
// Verify that metadata was updated
assert updated_file.metadata['new_key'] == 'new_value'
assert updated_file.metadata['author'] == 'test_user_ops' // Original key should still exist
println(' File metadata updated successfully')
// Update existing metadata key
updated_file_id = fs_factory.fs_file.update_metadata(file_id, 'author', 'updated_user')!
updated_file = fs_factory.fs_file.get(updated_file_id)!
// Verify that existing metadata key was updated
assert updated_file.metadata['author'] == 'updated_user'
assert updated_file.metadata['version'] == '1.0' // Other keys should still exist
println(' Existing file metadata key updated successfully')
println('FsFile operations test completed successfully!')
}
fn test_file_deletion() {
println('\nTesting FsFile deletion...')
// Initialize the HeroFS factory
mut fs_factory := new()!
// Create a new filesystem (required for FsFile)
mut my_fs := fs_factory.fs.new(
name: 'test_filesystem_delete'
description: 'Filesystem for testing FsFile deletion'
quota_bytes: 1024 * 1024 * 1024 // 1GB quota
)!
// Save the filesystem to get an ID
fs_factory.fs.set(mut my_fs)!
fs_id := my_fs.id
// Create test directory
mut test_dir := fs_factory.fs_dir.new(
name: 'test_dir'
fs_id: fs_id
parent_id: root_dir_id
description: 'Test directory for file deletion'
)!
dir_id := fs_factory.fs_dir.set(test_dir)!
// Create test blob
mut test_blob := fs_factory.fs_blob.new(
data: 'Test content for deletion'.bytes()
)!
blob_id := fs_factory.fs_blob.set(test_blob)!
// Create a test file
mut test_file := fs_factory.fs_file.new(
name: 'test_delete.txt'
fs_id: fs_id
directories: [dir_id]
blobs: [blob_id]
description: 'File for testing deletion'
mime_type: .txt
checksum: 'test_checksum_delete'
metadata: {
'author': 'test_user_delete'
'version': '1.0'
}
)!
// Save the test file
fs_factory.fs_file.set(mut test_file)!
file_id := test_file.id
// Verify file exists
mut exists := fs_factory.fs_file.exist(file_id)!
assert exists == true
println(' File exists before deletion')
// Delete the file
fs_factory.fs_file.delete(file_id)!
// Verify file no longer exists
exists = fs_factory.fs_file.exist(file_id)!
assert exists == false
println(' File no longer exists after deletion')
println('FsFile deletion test completed successfully!')
}
fn test_file_validation() {
println('\nTesting FsFile validation...')
// Initialize the HeroFS factory
mut fs_factory := new()!
// Create a new filesystem (required for FsFile)
mut my_fs := fs_factory.fs.new(
name: 'test_filesystem_validation'
description: 'Filesystem for testing FsFile validation'
quota_bytes: 1024 * 1024 * 1024 // 1GB quota
)!
// Save the filesystem to get an ID
fs_factory.fs.set(mut my_fs)!
fs_id := my_fs.id
// Create test directory
mut test_dir := fs_factory.fs_dir.new(
name: 'test_dir'
fs_id: fs_id
parent_id: root_dir_id
description: 'Test directory for file validation'
)!
dir_id := fs_factory.fs_dir.set(test_dir)!
// Create test blob
mut test_blob := fs_factory.fs_blob.new(
data: 'Test content for validation'.bytes()
)!
blob_id := fs_factory.fs_blob.set(test_blob)!
// Test creating file with non-existent directory (should fail)
println('Testing file creation with non-existent directory...')
mut directories := []u32{len: 1}
directories[0] = 999999 // Non-existent directory ID
mut validation_result := fs_factory.fs_file.new(
name: 'validation_test.txt'
fs_id: fs_id
directories: directories
blobs: [blob_id]
description: 'File for testing validation'
mime_type: .txt
) or {
println(' File creation correctly failed with non-existent directory')
return
}
// If we get here, the validation didn't work as expected
// Try to save it, which should fail
fs_factory.fs_file.set(mut validation_result)!
validation_result_id := validation_result.id or {
println(' File set correctly failed with non-existent directory')
return
}
panic('Validation should have failed for non-existent directory')
// Test creating file with non-existent blob (should fail)
println('Testing file creation with non-existent blob...')
mut blobs := []u32{len: 1}
blobs[0] = 999999 // Non-existent blob ID
mut validation_result2 := fs_factory.fs_file.new(
name: 'validation_test2.txt'
fs_id: fs_id
directories: [dir_id]
blobs: blobs
description: 'File for testing validation with blob'
mime_type: .txt
) or {
println(' File creation correctly failed with non-existent blob')
return
}
// If we get here, the validation didn't work as expected
// Try to save it, which should fail
fs_factory.fs_file.set(mut validation_result2)!
validation_result_id2 := validation_result2.id or {
println(' File set correctly failed with non-existent blob')
return
}
panic('Validation should have failed for non-existent blob')
println('FsFile validation test completed successfully!')
}

View File

@@ -105,41 +105,9 @@ pub fn (mut self DBFsSymlink) set(mut o FsSymlink) ! {
}
self.db.set[FsSymlink](mut o)!
// Store symlink in parent directory's symlink index
path_key := '${o.parent_id}:${o.name}'
self.db.redis.hset('fssymlink:paths', path_key, o.id.str())!
// Add to parent's symlinks list using hset
self.db.redis.hset('fssymlink:parent:${o.parent_id}', o.id.str(), o.id.str())!
// Store in filesystem's symlink list using hset
self.db.redis.hset('fssymlink:fs:${o.fs_id}', o.id.str(), o.id.str())!
// Store in target's referrers list using hset
target_key := '${o.target_type}:${o.target_id}'
self.db.redis.hset('fssymlink:target:${target_key}', o.id.str(), o.id.str())!
}
pub fn (mut self DBFsSymlink) delete(id u32) ! {
// Get the symlink info before deleting
symlink := self.get(id)!
// Remove from path index
path_key := '${symlink.parent_id}:${symlink.name}'
self.db.redis.hdel('fssymlink:paths', path_key)!
// Remove from parent's symlinks list using hdel
self.db.redis.hdel('fssymlink:parent:${symlink.parent_id}', id.str())!
// Remove from filesystem's symlink list using hdel
self.db.redis.hdel('fssymlink:fs:${symlink.fs_id}', id.str())!
// Remove from target's referrers list using hdel
target_key := '${symlink.target_type}:${symlink.target_id}'
self.db.redis.hdel('fssymlink:target:${target_key}', id.str())!
// Delete the symlink itself
self.db.delete[FsSymlink](id)!
}
@@ -153,137 +121,3 @@ pub fn (mut self DBFsSymlink) get(id u32) !FsSymlink {
self.load(mut o, mut e_decoder)!
return o
}
pub fn (mut self DBFsSymlink) list() ![]FsSymlink {
return self.db.list[FsSymlink]()!.map(self.get(it)!)
}
// Get symlink by path in a parent directory
pub fn (mut self DBFsSymlink) get_by_path(parent_id u32, name string) !FsSymlink {
path_key := '${parent_id}:${name}'
id_str := self.db.redis.hget('fssymlink:paths', path_key)!
if id_str == '' {
return error('Symlink "${name}" not found in parent directory ${parent_id}')
}
return self.get(id_str.u32())!
}
// List symlinks in a parent directory
pub fn (mut self DBFsSymlink) list_by_parent(parent_id u32) ![]FsSymlink {
symlink_ids := self.db.redis.hkeys('fssymlink:parent:${parent_id}')!
mut symlinks := []FsSymlink{}
for id_str in symlink_ids {
symlinks << self.get(id_str.u32())!
}
return symlinks
}
// List symlinks in a filesystem
pub fn (mut self DBFsSymlink) list_by_filesystem(fs_id u32) ![]FsSymlink {
symlink_ids := self.db.redis.hkeys('fssymlink:fs:${fs_id}')!
mut symlinks := []FsSymlink{}
for id_str in symlink_ids {
symlinks << self.get(id_str.u32())!
}
return symlinks
}
// List symlinks pointing to a target
pub fn (mut self DBFsSymlink) list_by_target(target_type SymlinkTargetType, target_id u32) ![]FsSymlink {
target_key := '${target_type}:${target_id}'
symlink_ids := self.db.redis.hkeys('fssymlink:target:${target_key}')!
mut symlinks := []FsSymlink{}
for id_str in symlink_ids {
symlinks << self.get(id_str.u32())!
}
return symlinks
}
// Rename a symlink
pub fn (mut self DBFsSymlink) rename(id u32, new_name string) !u32 {
mut symlink := self.get(id)!
// Remove old path index
old_path_key := '${symlink.parent_id}:${symlink.name}'
self.db.redis.hdel('fssymlink:paths', old_path_key)!
// Update name
symlink.name = new_name
// Save with new name
return self.set(symlink)!
}
// Move symlink to a new parent directory
pub fn (mut self DBFsSymlink) move(id u32, new_parent_id u32) !u32 {
mut symlink := self.get(id)!
// Check that new parent exists and is in the same filesystem
if new_parent_id > 0 {
parent_data, _ := self.db.get_data[FsDir](new_parent_id)!
if parent_data.fs_id != symlink.fs_id {
return error('Cannot move symlink across filesystems')
}
}
// Remove old path index
old_path_key := '${symlink.parent_id}:${symlink.name}'
self.db.redis.hdel('fssymlink:paths', old_path_key)!
// Remove from old parent's symlinks list using hdel
self.db.redis.hdel('fssymlink:parent:${symlink.parent_id}', id.str())!
// Update parent
symlink.parent_id = new_parent_id
// Save with new parent
return self.set(symlink)!
}
// Redirect symlink to a new target
pub fn (mut self DBFsSymlink) redirect(id u32, new_target_id u32, new_target_type SymlinkTargetType) !u32 {
mut symlink := self.get(id)!
// Check new target exists
if new_target_type == .file {
target_exists := self.db.exists[FsFile](new_target_id)!
if !target_exists {
return error('Target file with ID ${new_target_id} does not exist')
}
} else if new_target_type == .directory {
target_exists := self.db.exists[FsDir](new_target_id)!
if !target_exists {
return error('Target directory with ID ${new_target_id} does not exist')
}
}
// Remove from old target's referrers list
old_target_key := '${symlink.target_type}:${symlink.target_id}'
self.db.redis.hdel('fssymlink:target:${old_target_key}', id.str())!
// Update target
symlink.target_id = new_target_id
symlink.target_type = new_target_type
// Save with new target
return self.set(symlink)!
}
// Resolve a symlink to get its target
pub fn (mut self DBFsSymlink) resolve(id u32) !u32 {
symlink := self.get(id)!
return symlink.target_id
}
// Check if a symlink is broken (target doesn't exist)
pub fn (mut self DBFsSymlink) is_broken(id u32) !bool {
symlink := self.get(id)!
if symlink.target_type == .file {
return !self.db.exists[FsFile](symlink.target_id)!
} else if symlink.target_type == .directory {
return !self.db.exists[FsDir](symlink.target_id)!
}
return true // Unknown target type is considered broken
}

View File

@@ -1,18 +0,0 @@
module herofs
import freeflowuniverse.herolib.data.ourtime
// FsTools provides high-level filesystem operations
pub struct FsTools {
pub mut:
factory &FsFactory @[skip; str: skip]
fs_id u32
}
// Create a new FsTools instance, this is always linked to a specific filesystem
pub fn (factory &FsFactory) fs_tools(fsid u32) FsTools {
return FsTools{
factory: factory
fs_id: fsid
}
}

View File

@@ -1,7 +1,7 @@
module herofs
// // Check if a blob is used by other files (excluding the specified file_id)
// fn (mut self FsTools) is_blob_used_by_other_files(blob_id u32, exclude_file_id u32) !bool {
// fn (mut self Fs) is_blob_used_by_other_files(blob_id u32, exclude_file_id u32) !bool {
// // This is a simple but potentially expensive check
// // In a production system, you might want to maintain reverse indices
// all_files := self.list_all_files()!

View File

@@ -1,190 +1,9 @@
module herofs
// CopyOptions provides options for copy operations
@[params]
pub struct CopyOptions {
pub mut:
recursive bool = true // Copy directories recursively
preserve_links bool = true // Preserve symbolic links as links
overwrite bool // Overwrite existing files
follow_symlinks bool // Follow symlinks instead of copying them
}
// // Copy filesystem objects from source path to destination path
// pub fn (mut self FsTools) cp(source_path string, dest_path string, opts CopyOptions) ! {
// normalized_source := normalize_path(source_path)
// normalized_dest := normalize_path(dest_path)
// // Determine what we're copying
// source_dir_path, source_filename := split_path(normalized_source)
// if source_filename == '' {
// // We're copying a directory
// source_dir := self.get_dir_by_absolute_path(fs_id, normalized_source)!
// self.cp_directory(fs_id, source_dir.id, normalized_source, normalized_dest, opts)!
// } else {
// // We're copying a specific item
// source_parent_dir := self.get_dir_by_absolute_path(fs_id, source_dir_path)!
// // Try to find what we're copying
// mut found := false
// // Try file first
// if file := self.get_file_by_path(source_parent_dir.id, source_filename) {
// self.cp_file(fs_id, file.id, normalized_dest, opts)!
// found = true
// }
// // Try symlink if file not found
// if !found {
// // Direct implementation since get_by_path doesn't exist for symlinks
// symlinks := self.factory.fs_symlink.list_by_parent(source_parent_dir.id)!
// for symlink in symlinks {
// if symlink.name == source_filename {
// self.cp_symlink(fs_id, symlink.id, normalized_dest, opts)!
// found = true
// break
// }
// }
// }
// // Try directory if neither file nor symlink found
// if !found {
// if subdir := self.find_child_dir_by_name(source_parent_dir.id, source_filename) {
// self.cp_directory(fs_id, subdir.id, normalized_source, normalized_dest,
// opts)!
// found = true
// }
// }
// if !found {
// return error('Source path "${source_path}" not found')
// }
// }
// }
// // Copy a file to destination path
// fn (mut self FsTools) cp_file(file_id u32, dest_path string, opts CopyOptions) ! {
// source_file := self.factory.fs_file.get(file_id)!
// // Determine destination directory and filename
// dest_dir_path, mut dest_filename := split_path(dest_path)
// if dest_filename == '' {
// dest_filename = source_file.name
// }
// // Ensure destination directory exists (create if needed)
// dest_dir_id := self.create_dir_path(fs_id, dest_dir_path)!
// // Check if destination file already exists
// if existing_file := self.get_file_by_path(dest_dir_id, dest_filename) {
// if !opts.overwrite {
// return error('Destination file "${dest_path}" already exists. Use overwrite=true to replace.')
// }
// // Remove existing file
// self.factory.fs_file.delete(existing_file.id)!
// }
// // Create new file with same content (reuse blobs)
// new_file := self.factory.fs_file.new(
// name: dest_filename
// fs_id: fs_id
// directories: [dest_dir_id]
// blobs: source_file.blobs.clone()
// mime_type: source_file.mime_type
// checksum: source_file.checksum
// metadata: source_file.metadata.clone()
// description: source_file.description
// )!
// self.factory.fs_file.set(new_file)!
// }
// // Copy a symlink to destination path
// fn (mut self FsTools) cp_symlink(symlink_id u32, dest_path string, opts CopyOptions) ! {
// source_symlink := self.factory.fs_symlink.get(symlink_id)!
// if opts.follow_symlinks {
// // Follow the symlink and copy its target instead
// if source_symlink.target_type == .file {
// self.cp_file(fs_id, source_symlink.target_id, dest_path, opts)!
// } else if source_symlink.target_type == .directory {
// self.cp_directory(fs_id, source_symlink.target_id, '', dest_path, opts)!
// }
// return
// }
// // Copy the symlink itself
// dest_dir_path, mut dest_filename := split_path(dest_path)
// if dest_filename == '' {
// dest_filename = source_symlink.name
// }
// // Ensure destination directory exists
// dest_dir_id := self.create_dir_path(fs_id, dest_dir_path)!
// // Check if destination symlink already exists
// // Direct implementation since get_by_path doesn't exist for symlinks
// symlinks := self.factory.fs_symlink.list_by_parent(dest_dir_id)!
// for existing_symlink in symlinks {
// if existing_symlink.name == dest_filename {
// if !opts.overwrite {
// return error('Destination symlink "${dest_path}" already exists. Use overwrite=true to replace.')
// }
// self.factory.fs_symlink.delete(existing_symlink.id)!
// break
// }
// }
// // Create new symlink
// new_symlink := self.factory.fs_symlink.new(
// name: dest_filename
// fs_id: fs_id
// parent_id: dest_dir_id
// target_id: source_symlink.target_id
// target_type: source_symlink.target_type
// description: source_symlink.description
// )!
// self.factory.fs_symlink.set(new_symlink)!
// }
// // Copy a directory to destination path
// fn (mut self FsTools) cp_directory(source_dir_id u32, source_path string, dest_path string, opts CopyOptions) ! {
// source_dir := self.factory.fs_dir.get(source_dir_id)!
// // Create destination directory
// dest_dir_id := self.create_dir_path(fs_id, dest_path)!
// if !opts.recursive {
// return
// }
// // Copy all files in the source directory
// files := self.list_files_in_dir(source_dir_id)!
// for file in files {
// file_dest_path := join_path(dest_path, file.name)
// self.cp_file(fs_id, file.id, file_dest_path, opts)!
// }
// // Copy all symlinks in the source directory
// if opts.preserve_links {
// symlinks := self.factory.fs_symlink.list_by_parent(source_dir_id)!
// for symlink in symlinks {
// symlink_dest_path := join_path(dest_path, symlink.name)
// self.cp_symlink(fs_id, symlink.id, symlink_dest_path, opts)!
// }
// }
// // Copy all subdirectories recursively
// subdirs := self.list_child_dirs(source_dir_id)!
// for subdir in subdirs {
// subdir_source_path := if source_path == '' {
// subdir.name
// } else {
// join_path(source_path, subdir.name)
// }
// subdir_dest_path := join_path(dest_path, subdir.name)
// self.cp_directory(fs_id, subdir.id, subdir_source_path, subdir_dest_path, opts)!
// }
// }
fn (mut self Fs) cp(dest string,args FindOptions)!{
for item in self.find(args)!{
panic("implement")
}
}

View File

@@ -43,7 +43,7 @@ pub mut:
// exclude_patterns: ['*test*']
// })!
// ```
pub fn (mut self FsTools) find(start_path string, opts FindOptions) ![]FindResult {
pub fn (mut self Fs) find(start_path string, opts FindOptions) ![]FindResult {
mut results := []FindResult{}
// Get the starting directory
@@ -68,7 +68,7 @@ pub fn (mut self FsTools) find(start_path string, opts FindOptions) ![]FindResul
// - Files: Direct files in the current directory
// - Symlinks: Symbolic links in the current directory (handled according to opts.follow_symlinks)
// - Directories: Subdirectories of the current directory (recursed into according to opts.recursive)
fn (mut self FsTools) find_recursive(dir_id u32, current_path string, opts FindOptions, mut results []FindResult, current_depth int) ! {
fn (mut self Fs) find_recursive(dir_id u32, current_path string, opts FindOptions, mut results []FindResult, current_depth int) ! {
println('DEBUG: find_recursive called with dir_id=${dir_id}, current_path="${current_path}", current_depth=${current_depth}')
// Check depth limit
@@ -225,7 +225,7 @@ fn (mut self FsTools) find_recursive(dir_id u32, current_path string, opts FindO
// ```
// dir := tools.get_dir_by_absolute_path('/home/user/documents')!
// ```
pub fn (mut self FsTools) get_dir_by_absolute_path(path string) !FsDir {
pub fn (mut self Fs) get_dir_by_absolute_path(path string) !FsDir {
println('DEBUG: get_dir_by_absolute_path called with path "${path}"')
normalized_path_ := normalize_path(path)
println('DEBUG: normalized_path_ = "${normalized_path_}"')

View File

@@ -1,522 +0,0 @@
module herofs
import freeflowuniverse.herolib.hero.db
fn test_basic_find() {
println('Testing FsTools find functionality...')
// Initialize the HeroFS factory
mut fs_factory := new()!
println('HeroFS factory initialized')
// Create a new filesystem
mut my_fs := fs_factory.fs.new(
name: 'test_filesystem_find'
description: 'Filesystem for testing FsTools find functionality'
quota_bytes: 1024 * 1024 * 1024 // 1GB quota
)!
// Save the filesystem to get an ID
fs_id := fs_factory.fs.set(my_fs)!
println('Created test filesystem with ID: ${fs_id}')
// Create root directory
mut root_dir := fs_factory.fs_dir.new(
name: 'root'
fs_id: fs_id
parent_id: 0 // Root has no parent
description: 'Root directory for testing find'
)!
// Save the root directory
root_dir_id := fs_factory.fs_dir.set(root_dir)!
println('Created root directory with ID: ${root_dir_id}')
// Update the filesystem with the root directory ID
println('DEBUG: Before update, my_fs.root_dir_id = ${my_fs.root_dir_id}')
println('DEBUG: Before update, my_fs.id = ${my_fs.id}')
my_fs.root_dir_id = root_dir_id
my_fs.id = fs_id // Set the ID to ensure we update the existing object
println('DEBUG: Setting my_fs.root_dir_id to ${root_dir_id}')
mut fs_id2 := fs_factory.fs.set(my_fs)!
println('DEBUG: After update, fs_id2 = ${fs_id2}')
println('DEBUG: After update, my_fs.root_dir_id = ${my_fs.root_dir_id}')
// Retrieve the updated filesystem object
my_fs = fs_factory.fs.get(fs_id)!
println('DEBUG: After retrieval, fs.root_dir_id = ${my_fs.root_dir_id}')
// Create test directories
mut dir1 := fs_factory.fs_dir.new(
name: 'documents'
fs_id: fs_id
parent_id: root_dir_id
description: 'Documents directory'
)!
dir1_id := fs_factory.fs_dir.set(dir1)!
mut dir2 := fs_factory.fs_dir.new(
name: 'images'
fs_id: fs_id
parent_id: root_dir_id
description: 'Images directory'
)!
dir2_id := fs_factory.fs_dir.set(dir2)!
mut dir3 := fs_factory.fs_dir.new(
name: 'subdir'
fs_id: fs_id
parent_id: dir1_id
description: 'Subdirectory in documents'
)!
dir3_id := fs_factory.fs_dir.set(dir3)!
// Update parent directories with their children
// Update root_dir to include dir1 and dir2
println('DEBUG: Updating root_dir with children')
root_dir.directories = [dir1_id, dir2_id]
root_dir.id = root_dir_id // Set the ID to ensure we update the existing object
mut root_dir_id2 := fs_factory.fs_dir.set(root_dir)!
println('DEBUG: root_dir updated with ID ${root_dir_id2}')
// Update dir1 to include dir3
println('DEBUG: Updating dir1 with children')
dir1.directories = [dir3_id]
dir1.id = dir1_id // Set the ID to ensure we update the existing object
mut dir1_id2 := fs_factory.fs_dir.set(dir1)!
println('DEBUG: dir1 updated with ID ${dir1_id2}')
// Create test blobs for files
mut test_blob1 := fs_factory.fs_blob.new(
data: 'This is test content for file 1'.bytes()
)!
blob1_id := fs_factory.fs_blob.set(test_blob1)!
println('Created test blob with ID: ${blob1_id}')
mut test_blob2 := fs_factory.fs_blob.new(
data: 'This is test content for file 2'.bytes()
)!
blob2_id := fs_factory.fs_blob.set(test_blob2)!
println('Created test blob with ID: ${blob2_id}')
mut test_blob3 := fs_factory.fs_blob.new(
data: 'This is test content for file 3'.bytes()
)!
blob3_id := fs_factory.fs_blob.set(test_blob3)!
println('Created test blob with ID: ${blob3_id}')
// Create test files
mut file1 := fs_factory.fs_file.new(
name: 'document.txt'
fs_id: fs_id
directories: [dir1_id]
blobs: [blob1_id]
description: 'Text document'
mime_type: .txt
)!
file1_id := fs_factory.fs_file.set(file1)!
mut file2 := fs_factory.fs_file.new(
name: 'image.png'
fs_id: fs_id
directories: [dir2_id]
blobs: [blob2_id]
description: 'PNG image'
mime_type: .png
)!
file2_id := fs_factory.fs_file.set(file2)!
mut file3 := fs_factory.fs_file.new(
name: 'subfile.txt'
fs_id: fs_id
directories: [dir3_id]
blobs: [blob3_id]
description: 'Text file in subdirectory'
mime_type: .txt
)!
file3_id := fs_factory.fs_file.set(file3)!
// Create symlinks
mut symlink1 := fs_factory.fs_symlink.new(
name: 'doc_link.txt'
fs_id: fs_id
parent_id: root_dir_id
target_id: file1_id
target_type: .file
description: 'Symlink to document.txt'
)!
symlink1_id := fs_factory.fs_symlink.set(symlink1)!
mut symlink2 := fs_factory.fs_symlink.new(
name: 'images_link'
fs_id: fs_id
parent_id: root_dir_id
target_id: dir2_id
target_type: .directory
description: 'Symlink to images directory'
)!
symlink2_id := fs_factory.fs_symlink.set(symlink2)!
// Update directories with their children
// Update dir1 to include dir3 and file1
dir1.directories = [dir3_id]
dir1.files = [file1_id]
fs_factory.fs_dir.set(dir1)!
// Update dir2 to include file2
dir2.files = [file2_id]
fs_factory.fs_dir.set(dir2)!
// Update dir3 to include file3
dir3.files = [file3_id]
dir3.id = dir3_id // Set the ID to ensure we update the existing object
fs_factory.fs_dir.set(dir3)!
// Update root_dir to include dir1, dir2, symlink1, symlink2
root_dir.directories = [dir1_id, dir2_id]
root_dir.symlinks = [symlink1_id, symlink2_id]
fs_factory.fs_dir.set(root_dir)!
println('Created test directory structure:')
println('- root (ID: ${root_dir_id})')
println(' - documents (ID: ${dir1_id})')
println(' - subdir (ID: ${dir3_id})')
println(' - subfile.txt (ID: ${file3_id})')
println(' - document.txt (ID: ${file1_id})')
println(' - images (ID: ${dir2_id})')
println(' - image.png (ID: ${file2_id})')
println(' - doc_link.txt (ID: ${symlink1_id}) -> document.txt')
println(' - images_link (ID: ${symlink2_id}) -> images')
// Create FsTools instance
mut fs_tools := fs_factory.fs_tools(fs_id)
// Test basic find from root
println('\nTesting basic find from root...')
mut results := fs_tools.find('/', FindOptions{
recursive: true
})!
// Should find all items
assert results.len == 8
println(' Found all 8 items in recursive search')
// Check that we found the expected items
mut found_items := map[string]FSItemType{}
for result in results {
found_items[result.path] = result.result_type
}
assert found_items['/'] == .directory
assert found_items['/documents'] == .directory
assert found_items['/images'] == .directory
assert found_items['/documents/subdir'] == .directory
assert found_items['/documents/document.txt'] == .file
assert found_items['/images/image.png'] == .file
assert found_items['/documents/subdir/subfile.txt'] == .file
assert found_items['/doc_link.txt'] == .symlink
assert found_items['/images_link'] == .symlink
println(' All items found with correct paths and types')
// Test non-recursive find from root
println('\nTesting non-recursive find from root...')
results = fs_tools.find('/', FindOptions{
recursive: false
})!
// Should only find items directly in root
assert results.len == 5
println(' Found 5 items in non-recursive search')
// Check that we found the expected items
found_items = map[string]FSItemType{}
for result in results {
found_items[result.path] = result.result_type
}
assert found_items['/'] == .directory
assert found_items['/documents'] == .directory
assert found_items['/images'] == .directory
assert '/documents/subdir' !in found_items
println(' Non-recursive search only found direct children')
// Test find with include patterns
println('\nTesting find with include patterns...')
results = fs_tools.find('/', FindOptions{
recursive: true
include_patterns: ['*.txt']
})!
// Should find only .txt files
assert results.len == 2
println(' Found 2 .txt files with include pattern')
found_items = map[string]FSItemType{}
for result in results {
found_items[result.path] = result.result_type
}
assert found_items['/documents/document.txt'] == .file
assert found_items['/documents/subdir/subfile.txt'] == .file
println(' Include pattern correctly filtered results')
// Test find with exclude patterns
println('\nTesting find with exclude patterns...')
results = fs_tools.find('/', FindOptions{
recursive: true
exclude_patterns: ['*.png']
})!
// Should find all items except the .png file
assert results.len == 7
println(' Found 7 items excluding .png files')
found_items = map[string]FSItemType{}
for result in results {
found_items[result.path] = result.result_type
}
assert '/images/image.png' !in found_items
assert found_items['/images'] == .directory
println(' Exclude pattern correctly filtered results')
// Test find with max_depth
println('\nTesting find with max_depth...')
results = fs_tools.find('/', FindOptions{
recursive: true
max_depth: 1
})!
// Should find root and its direct children only
assert results.len == 6
println(' Found 6 items with max_depth=1')
found_items = map[string]FSItemType{}
for result in results {
found_items[result.path] = result.result_type
}
assert found_items['/'] == .directory
assert found_items['/documents'] == .directory
assert found_items['/images'] == .directory
assert '/documents/subdir' !in found_items
assert '/documents/subdir/subfile.txt' !in found_items
println(' Max depth correctly limited search depth')
// Test find from subdirectory
println('\nTesting find from subdirectory...')
results = fs_tools.find('/documents', FindOptions{
recursive: true
})!
// Should find items in /documents and its subdirectories
assert results.len == 4
println(' Found 4 items in subdirectory search')
found_items = map[string]FSItemType{}
for result in results {
found_items[result.path] = result.result_type
}
assert found_items['/documents'] == .directory
assert found_items['/documents/document.txt'] == .file
assert found_items['/documents/subdir'] == .directory
assert found_items['/documents/subdir/subfile.txt'] == .file
assert '/' !in found_items
println(' Subdirectory search correctly rooted at /documents')
println('\nFsTools find basic test completed successfully!')
}
fn test_symlink_find() {
println('\nTesting FsTools find with symlinks...')
// Initialize the HeroFS factory
mut fs_factory := new()!
// Create a new filesystem
mut my_fs := fs_factory.fs.new(
name: 'test_filesystem_symlink_find'
description: 'Filesystem for testing FsTools find with symlinks'
quota_bytes: 1024 * 1024 * 1024 // 1GB quota
)!
// Save the filesystem to get an ID
fs_id := fs_factory.fs.set(my_fs)!
// Create root directory
mut root_dir := fs_factory.fs_dir.new(
name: 'root'
fs_id: fs_id
parent_id: 0 // Root has no parent
description: 'Root directory for testing symlink find'
)!
// Save the root directory
fs_factory.fs_dir.set(mut root_dir)!
root_dir_id := root_dir.id
println('Created root directory with ID: ${root_dir_id}')
// Update the filesystem with the root directory ID
my_fs.root_dir_id = root_dir_id
fs_factory.fs.set(mut my_fs)!
// Retrieve the updated filesystem object
my_fs = fs_factory.fs.get(fs_id)!
// Create test directory
mut dir1 := fs_factory.fs_dir.new(
name: 'target_dir'
fs_id: fs_id
parent_id: root_dir_id
description: 'Target directory for symlink'
)!
dir1_id := fs_factory.fs_dir.set(dir1)!
// Create test blob
mut test_blob := fs_factory.fs_blob.new(
data: 'Symlink test content'.bytes()
)!
blob_id := fs_factory.fs_blob.set(test_blob)!
// Create test file
mut file1 := fs_factory.fs_file.new(
name: 'target_file.txt'
fs_id: fs_id
directories: [dir1_id]
blobs: [blob_id]
description: 'Target file for symlink'
mime_type: .txt
)!
file1_id := fs_factory.fs_file.set(file1)!
// Update dir1 with file1
dir1.files = [file1_id]
dir1.id = dir1_id // Set the ID to ensure we update the existing object
fs_factory.fs_dir.set(dir1)!
// Create symlinks
mut symlink1 := fs_factory.fs_symlink.new(
name: 'file_link.txt'
fs_id: fs_id
parent_id: root_dir_id
target_id: file1_id
target_type: .file
description: 'Symlink to target_file.txt'
)!
symlink1_id := fs_factory.fs_symlink.set(symlink1)!
mut symlink2 := fs_factory.fs_symlink.new(
name: 'dir_link'
fs_id: fs_id
parent_id: root_dir_id
target_id: dir1_id
target_type: .directory
description: 'Symlink to target_dir'
)!
symlink2_id := fs_factory.fs_symlink.set(symlink2)!
// Update root_dir with dir1 and symlinks
root_dir.directories = [dir1_id]
root_dir.symlinks = [symlink1_id, symlink2_id]
root_dir.id = root_dir_id // Set the ID to ensure we update the existing object
fs_factory.fs_dir.set(root_dir)!
// Create FsTools instance
mut fs_tools := fs_factory.fs_tools(fs_id)
// Test find without following symlinks
println('Testing find without following symlinks...')
mut results := fs_tools.find('/', FindOptions{
recursive: true
follow_symlinks: false
})!
// Should find root, target_dir, symlinks, and target_file.txt
assert results.len == 5
println(' Found 5 items without following symlinks')
mut found_items := map[string]FSItemType{}
for result in results {
found_items[result.path] = result.result_type
}
assert found_items['/'] == .directory
assert found_items['/target_dir'] == .directory
assert found_items['/file_link.txt'] == .symlink
assert found_items['/dir_link'] == .symlink
assert found_items['/target_dir/target_file.txt'] == .file
println(' Symlinks found as symlinks when follow_symlinks=false')
// Test find with following symlinks
println('Testing find with following symlinks...')
results = fs_tools.find('/', FindOptions{
recursive: true
follow_symlinks: true
})!
// Should find root, target_dir, and target_file.txt (but not the symlinks themselves)
assert results.len == 3
println(' Found 3 items when following symlinks')
found_items = map[string]FSItemType{}
for result in results {
found_items[result.path] = result.result_type
}
assert found_items['/'] == .directory
assert found_items['/target_dir'] == .directory
assert found_items['/target_dir/target_file.txt'] == .file
assert '/file_link.txt' !in found_items
assert '/dir_link' !in found_items
println(' Symlinks followed correctly when follow_symlinks=true')
println('FsTools find symlink test completed successfully!')
}
fn test_find_edge_cases() {
println('\nTesting FsTools find edge cases...')
// Initialize the HeroFS factory
mut fs_factory := new()!
// Create a new filesystem
mut my_fs := fs_factory.fs.new(
name: 'test_filesystem_find_edge'
description: 'Filesystem for testing FsTools find edge cases'
quota_bytes: 1024 * 1024 * 1024 // 1GB quota
)!
// Save the filesystem to get an ID
fs_id := fs_factory.fs.set(my_fs)!
// Create root directory
mut root_dir := fs_factory.fs_dir.new(
name: 'root'
fs_id: fs_id
parent_id: 0 // Root has no parent
description: 'Root directory for testing find edge cases'
)!
// Save the root directory
root_dir_id := fs_factory.fs_dir.set(root_dir)!
// Update the filesystem with the root directory ID
my_fs.root_dir_id = root_dir_id
fs_factory.fs.set(my_fs)!
// Create FsTools instance
mut fs_tools := fs_factory.fs_tools(fs_id)
// Test find with non-existent path
println('Testing find with non-existent path...')
mut result := fs_tools.find('/nonexistent', FindOptions{}) or {
println(' Find correctly failed with non-existent path')
return
}
// If we get here, the error handling didn't work as expected
panic('Find should have failed with non-existent path')
println('FsTools find edge cases test completed successfully!')
}

View File

@@ -0,0 +1,13 @@
module herofs
//copy data from filesystem into the VFS
fn (mut self Fs) import(src string, dest string)!{
panic("implement")
}
//copy dataa from VFS fo FS
fn (mut self Fs) export(src string, dest string)!{
panic("implement")
}

View File

@@ -2,178 +2,16 @@ module herofs
// MoveOptions provides options for move operations
@[params]
pub struct MoveOptions {
pub struct FSMoveArgs {
pub mut:
overwrite bool // Overwrite existing files at destination
follow_symlinks bool // Follow symlinks instead of moving them
src string
dest string
}
// // Move filesystem objects from source path to destination path
// pub fn (mut self FsTools) mv(source_path string, dest_path string, opts MoveOptions) ! {
// normalized_source := normalize_path(source_path)
// normalized_dest := normalize_path(dest_path)
// // Determine what we're moving
// source_dir_path, source_filename := split_path(normalized_source)
// if source_filename == '' {
// // We're moving a directory
// source_dir := self.get_dir_by_absolute_path(fs_id, normalized_source)!
// self.mv_directory(fs_id, source_dir.id, normalized_dest)!
// } else {
// // We're moving a specific item
// source_parent_dir := self.get_dir_by_absolute_path(fs_id, source_dir_path)!
// // Try to find what we're moving
// mut found := false
// // Try file first
// if file := self.get_file_by_path(source_parent_dir.id, source_filename) {
// self.mv_file(fs_id, file.id, normalized_dest, opts)!
// found = true
// }
// // Try symlink if file not found
// if !found {
// // Direct implementation since get_by_path doesn't exist for symlinks
// symlinks := self.factory.fs_symlink.list_by_parent(source_parent_dir.id)!
// for symlink in symlinks {
// if symlink.name == source_filename {
// self.mv_symlink(fs_id, symlink.id, normalized_dest, opts)!
// found = true
// break
// }
// }
// }
// // Try directory if neither file nor symlink found
// if !found {
// if subdir := self.find_child_dir_by_name(source_parent_dir.id, source_filename) {
// self.mv_directory(fs_id, subdir.id, normalized_dest)!
// found = true
// }
// }
// if !found {
// return error('Source path "${source_path}" not found')
// }
// }
// }
// // Move a file to destination path
// fn (mut self FsTools) mv_file(file_id u32, dest_path string, opts MoveOptions) ! {
// source_file := self.factory.fs_file.get(file_id)!
// // Determine destination directory and filename
// dest_dir_path, mut dest_filename := split_path(dest_path)
// if dest_filename == '' {
// dest_filename = source_file.name
// }
// // Ensure destination directory exists
// dest_dir_id := self.create_dir_path(fs_id, dest_dir_path)!
// // Check if destination file already exists
// if existing_file := self.get_file_by_path(dest_dir_id, dest_filename) {
// if !opts.overwrite {
// return error('Destination file "${dest_path}" already exists. Use overwrite=true to replace.')
// }
// // Remove existing file
// self.factory.fs_file.delete(existing_file.id)!
// }
// // Update file name if it's different
// // Direct implementation since rename doesn't exist for files
// if dest_filename != source_file.name {
// source_file.name = dest_filename
// self.factory.fs_file.set(source_file)!
// }
// // Move file to new directory (replace all directory associations)
// // Direct implementation since move doesn't exist for files
// source_file.directories = [dest_dir_id]
// self.factory.fs_file.set(source_file)!
// }
// // Move a symlink to destination path
// fn (mut self FsTools) mv_symlink(symlink_id u32, dest_path string, opts MoveOptions) ! {
// source_symlink := self.factory.fs_symlink.get(symlink_id)!
// if opts.follow_symlinks {
// // Follow the symlink and move its target instead
// if source_symlink.target_type == .file {
// self.mv_file(fs_id, source_symlink.target_id, dest_path, opts)!
// } else if source_symlink.target_type == .directory {
// self.mv_directory(fs_id, source_symlink.target_id, dest_path)!
// }
// // Remove the original symlink
// self.factory.fs_symlink.delete(symlink_id)!
// return
// }
// // Move the symlink itself
// dest_dir_path, mut dest_filename := split_path(dest_path)
// if dest_filename == '' {
// dest_filename = source_symlink.name
// }
// // Ensure destination directory exists
// dest_dir_id := self.create_dir_path(fs_id, dest_dir_path)!
// // Check if destination symlink already exists
// // Direct implementation since get_by_path doesn't exist for symlinks
// symlinks := self.factory.fs_symlink.list_by_parent(dest_dir_id)!
// for existing_symlink in symlinks {
// if existing_symlink.name == dest_filename {
// if !opts.overwrite {
// return error('Destination symlink "${dest_path}" already exists. Use overwrite=true to replace.')
// }
// self.factory.fs_symlink.delete(existing_symlink.id)!
// break
// }
// }
// // Update symlink name if it's different
// // Direct implementation since rename doesn't exist for symlinks
// if dest_filename != source_symlink.name {
// source_symlink.name = dest_filename
// self.factory.fs_symlink.set(source_symlink)!
// }
// // Move symlink to new parent directory
// // Direct implementation since move doesn't exist for symlinks
// source_symlink.parent_id = dest_dir_id
// self.factory.fs_symlink.set(source_symlink)!
// }
// // Move a directory to destination path
// fn (mut self FsTools) mv_directory(source_dir_id u32, dest_path string) ! {
// source_dir := self.factory.fs_dir.get(source_dir_id)!
// // Parse destination path
// dest_parent_path, mut dest_dirname := split_path(dest_path)
// if dest_dirname == '' {
// dest_dirname = source_dir.name
// }
// // Ensure destination parent directory exists
// dest_parent_id := if dest_parent_path == '/' {
// // Moving to root level, find root directory
// fs := self.factory.fs.get(fs_id)!
// fs.root_dir_id
// } else {
// self.create_dir_path(fs_id, dest_parent_path)!
// }
// // Update directory name if it's different
// // Direct implementation since rename doesn't exist for directories
// if dest_dirname != source_dir.name {
// source_dir.name = dest_dirname
// self.factory.fs_dir.set(source_dir)!
// }
// // Move directory to new parent
// // Direct implementation since move doesn't exist for directories
// source_dir.parent_id = dest_parent_id
// self.factory.fs_dir.set(source_dir)!
// }
//if overwrite is false and exist then give error
//works for file and link and dir
//there is no physical move, its just changing the child in the dir we move too
fn (mut self Fs) move(args FSMoveArgs)!{
panic("implement")
}

View File

@@ -1,137 +1,9 @@
module herofs
// RemoveOptions provides options for remove operations
@[params]
pub struct RemoveOptions {
pub mut:
recursive bool // Remove directories and their contents
delete_blobs bool // Delete underlying blob data (default: false)
force bool // Force removal even if files are in multiple directories
}
// // Remove filesystem objects starting from a given path
// pub fn (mut self FsTools) rm(target_path string, opts RemoveOptions) ! {
// normalized_path := normalize_path(target_path)
// // Try to find what we're removing (file, directory, or symlink)
// dir_path, filename := split_path(normalized_path)
// if filename == '' {
// // We're removing a directory by its path
// self.rm_directory_by_path(fs_id, normalized_path, opts)!
// } else {
// // We're removing a specific item within a directory
// parent_dir := self.get_dir_by_absolute_path(fs_id, dir_path)!
// // Try to find what we're removing
// mut found := false
// // Try file first
// if file := self.get_file_by_path(parent_dir.id, filename) {
// self.rm_file(file.id, opts)!
// found = true
// }
// // Try symlink if file not found
// if !found {
// // Direct implementation since get_by_path doesn't exist for symlinks
// symlinks := self.factory.fs_symlink.list_by_parent(parent_dir.id)!
// for symlink in symlinks {
// if symlink.name == filename {
// self.rm_symlink(symlink.id)!
// found = true
// break
// }
// }
// }
// // Try directory if neither file nor symlink found
// if !found {
// if subdir := self.find_child_dir_by_name(parent_dir.id, filename) {
// self.rm_directory(subdir.id, opts)!
// found = true
// }
// }
// if !found {
// return error('Path "${target_path}" not found')
// }
// }
// }
// // Remove a file by ID
// fn (mut self FsTools) rm_file(file_id u32, opts RemoveOptions) ! {
// file := self.factory.fs_file.get(file_id)!
// // If file is in multiple directories and force is not set, only remove from directories
// if file.directories.len > 1 && !opts.force {
// return error('File "${file.name}" exists in multiple directories. Use force=true to delete completely or remove from specific directories.')
// }
// // Collect blob IDs before deleting the file
// blob_ids := file.blobs.clone()
// // Delete the file
// self.factory.fs_file.delete(file_id)!
// // Delete blobs if requested
// if opts.delete_blobs {
// for blob_id in blob_ids {
// // Check if blob is used by other files before deleting
// if self.is_blob_used_by_other_files(blob_id, file_id)! {
// println('Warning: Blob ${blob_id} is used by other files, not deleting')
// continue
// }
// self.factory.fs_blob.delete(blob_id)!
// }
// }
// }
// // Remove a directory by ID
// fn (mut self FsTools) rm_directory(dir_id u32, opts RemoveOptions) ! {
// // Check if directory has children
// if self.dir_has_children(dir_id)! {
// if !opts.recursive {
// dir := self.factory.fs_dir.get(dir_id)!
// return error('Directory "${dir.name}" is not empty. Use recursive=true to remove contents.')
// }
// // Remove all children recursively
// self.rm_directory_contents(dir_id, opts)!
// }
// // Remove the directory itself
// self.factory.fs_dir.delete(dir_id)!
// }
// // Remove a directory by path
// fn (mut self FsTools) rm_directory_by_path(dir_path string, opts RemoveOptions) ! {
// dir := self.get_dir_by_absolute_path(fs_id, dir_path)!
// self.rm_directory(dir.id, opts)!
// }
// // Remove all contents of a directory
// fn (mut self FsTools) rm_directory_contents(dir_id u32, opts RemoveOptions) ! {
// // Remove all files in the directory
// files := self.list_files_in_dir(dir_id)!
// for file in files {
// self.rm_file(file.id, opts)!
// }
// // Remove all symlinks in the directory
// symlinks := self.factory.fs_symlink.list_by_parent(dir_id)!
// for symlink in symlinks {
// self.rm_symlink(symlink.id)!
// }
// // Remove all subdirectories recursively
// subdirs := self.list_child_dirs(dir_id)!
// for subdir in subdirs {
// self.rm_directory(subdir.id, opts)!
// }
// }
// // Remove a symlink by ID
// fn (mut self FsTools) rm_symlink(symlink_id u32) ! {
// self.factory.fs_symlink.delete(symlink_id)!
// }
fn (mut self Fs) rm(args FindOptions)!{
for item in self.find(args)!{
panic("implement")
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -105,9 +105,9 @@ pub fn (mut self DBComments) new(args CommentArg) !Comment {
return o
}
pub fn (mut self DBComments) set(o Comment) !u32 {
pub fn (mut self DBComments) set(mut o Comment) ! {
// Use openrpcserver set function which now returns the ID
return self.db.set[Comment](o)!
self.db.set[Comment](mut o)!
}
pub fn (mut self DBComments) delete(id u32) ! {