refactor: Update Fs and DBFs structures for new fields

- Add `group_id` to Fs and DBFs structures
- Update `FsFile` to include `directories` and `accessed_at` fields
- Update `FsBlobArg` with `mime_type`, `encoding`, and `created_at` fields
- Add usage tracking methods `increase_usage` and `decrease_usage` to DBFs
This commit is contained in:
Mahmoud-Emad
2025-09-28 11:57:24 +03:00
parent 61487902d6
commit f0efca563e
10 changed files with 147 additions and 109 deletions

View File

@@ -18,7 +18,7 @@ fn main() {
)!
// Save the filesystem
fs_factory.fs.set(mut my_fs)!
my_fs = fs_factory.fs.set(my_fs)!
println('Created filesystem: ${my_fs.name} with ID: ${my_fs.id}')
// Create root directory
@@ -28,11 +28,11 @@ fn main() {
parent_id: 0
description: 'Root directory'
)!
fs_factory.fs_dir.set(mut root_dir)!
root_dir = fs_factory.fs_dir.set(root_dir)!
// Update the filesystem with the root directory ID
my_fs.root_dir_id = root_dir.id
fs_factory.fs.set(mut my_fs)!
my_fs = fs_factory.fs.set(my_fs)!
// Create some sample directory structure
println('\nCreating sample directory structure...')
@@ -49,15 +49,15 @@ fn main() {
// Create blobs for file content
v_code := 'fn main() {\n println("Hello from V!")\n}\n'.bytes()
mut v_blob := fs_factory.fs_blob.new(data: v_code)!
fs_factory.fs_blob.set(mut v_blob)!
v_blob = fs_factory.fs_blob.set(v_blob)!
readme_content := '# My Project\n\nThis is a sample project.\n\n## Features\n\n- Feature 1\n- Feature 2\n'.bytes()
mut readme_blob := fs_factory.fs_blob.new(data: readme_content)!
fs_factory.fs_blob.set(mut readme_blob)!
readme_blob = fs_factory.fs_blob.set(readme_blob)!
test_content := 'fn test_main() {\n assert 1 == 1\n}\n'.bytes()
mut test_blob := fs_factory.fs_blob.new(data: test_content)!
fs_factory.fs_blob.set(mut test_blob)!
test_blob = fs_factory.fs_blob.set(test_blob)!
// Create files
mut main_file := fs_factory.fs_file.new(
@@ -66,7 +66,7 @@ fn main() {
blobs: [v_blob.id]
mime_type: .txt
)!
fs_factory.fs_file.set(mut main_file)!
main_file = fs_factory.fs_file.set(main_file)!
fs_factory.fs_file.add_to_directory(main_file.id, src_dir_id)!
mut readme_file := fs_factory.fs_file.new(
@@ -75,7 +75,7 @@ fn main() {
blobs: [readme_blob.id]
mime_type: .md
)!
fs_factory.fs_file.set(mut readme_file)!
readme_file = fs_factory.fs_file.set(readme_file)!
fs_factory.fs_file.add_to_directory(readme_file.id, root_dir.id)!
mut test_file := fs_factory.fs_file.new(
@@ -84,7 +84,7 @@ fn main() {
blobs: [test_blob.id]
mime_type: .txt
)!
fs_factory.fs_file.set(mut test_file)!
test_file = fs_factory.fs_file.set(test_file)!
fs_factory.fs_file.add_to_directory(test_file.id, test_dir_id)!
// Create a symbolic link
@@ -96,7 +96,7 @@ fn main() {
target_type: .file
description: 'Link to main.v'
)!
fs_factory.fs_symlink.set(mut main_symlink)!
main_symlink = fs_factory.fs_symlink.set(main_symlink)!
println('Sample filesystem structure created!')

View File

@@ -23,7 +23,7 @@ fn main() {
)!
// Save the filesystem
fs_factory.fs.set(mut my_fs)!
my_fs = fs_factory.fs.set(my_fs)!
println('Created filesystem: ${my_fs.name} with ID: ${my_fs.id}')
// Create root directory
@@ -35,12 +35,12 @@ fn main() {
)!
// Save the root directory
fs_factory.fs_dir.set(mut root_dir)!
root_dir = fs_factory.fs_dir.set(root_dir)!
println('Created root directory with ID: ${root_dir.id}')
// Update the filesystem with the root directory ID
my_fs.root_dir_id = root_dir.id
fs_factory.fs.set(mut my_fs)!
my_fs = fs_factory.fs.set(my_fs)!
// Create a directory hierarchy
println('\nCreating directory hierarchy...')
@@ -52,7 +52,7 @@ fn main() {
parent_id: root_dir.id
description: 'Source code'
)!
fs_factory.fs_dir.set(mut src_dir)!
src_dir = fs_factory.fs_dir.set(src_dir)!
mut docs_dir := fs_factory.fs_dir.new(
name: 'docs'
@@ -60,7 +60,7 @@ fn main() {
parent_id: root_dir.id
description: 'Documentation'
)!
fs_factory.fs_dir.set(mut docs_dir)!
docs_dir = fs_factory.fs_dir.set(docs_dir)!
mut assets_dir := fs_factory.fs_dir.new(
name: 'assets'
@@ -68,7 +68,7 @@ fn main() {
parent_id: root_dir.id
description: 'Project assets'
)!
fs_factory.fs_dir.set(mut assets_dir)!
assets_dir = fs_factory.fs_dir.set(assets_dir)!
// Subdirectories
mut images_dir := fs_factory.fs_dir.new(
@@ -77,7 +77,7 @@ fn main() {
parent_id: assets_dir.id
description: 'Image assets'
)!
fs_factory.fs_dir.set(mut images_dir)!
images_dir = fs_factory.fs_dir.set(images_dir)!
mut api_docs_dir := fs_factory.fs_dir.new(
name: 'api'
@@ -85,19 +85,19 @@ fn main() {
parent_id: docs_dir.id
description: 'API documentation'
)!
fs_factory.fs_dir.set(mut api_docs_dir)!
api_docs_dir = fs_factory.fs_dir.set(api_docs_dir)!
// Add directories to their parents
root_dir.directories << src_dir.id
root_dir.directories << docs_dir.id
root_dir.directories << assets_dir.id
fs_factory.fs_dir.set(mut root_dir)!
root_dir = fs_factory.fs_dir.set(root_dir)!
assets_dir.directories << images_dir.id
fs_factory.fs_dir.set(mut assets_dir)!
assets_dir = fs_factory.fs_dir.set(assets_dir)!
docs_dir.directories << api_docs_dir.id
fs_factory.fs_dir.set(mut docs_dir)!
docs_dir = fs_factory.fs_dir.set(docs_dir)!
println('Directory hierarchy created successfully')
@@ -107,7 +107,7 @@ fn main() {
// Text file for source code
code_content := 'fn main() {\n println("Hello, HeroFS!")\n}\n'.bytes()
mut code_blob := fs_factory.fs_blob.new(data: code_content)!
fs_factory.fs_blob.set(mut code_blob)!
code_blob = fs_factory.fs_blob.set(code_blob)!
mut code_file := fs_factory.fs_file.new(
name: 'main.v'
@@ -119,13 +119,13 @@ fn main() {
'version': '0.3.3'
}
)!
fs_factory.fs_file.set(mut code_file)!
code_file = fs_factory.fs_file.set(code_file)!
fs_factory.fs_file.add_to_directory(code_file.id, src_dir.id)!
// Markdown documentation file
docs_content := '# API Documentation\n\n## Endpoints\n\n- GET /api/v1/users\n- POST /api/v1/users\n'.bytes()
mut docs_blob := fs_factory.fs_blob.new(data: docs_content)!
fs_factory.fs_blob.set(mut docs_blob)!
docs_blob = fs_factory.fs_blob.set(docs_blob)!
mut docs_file := fs_factory.fs_file.new(
name: 'api.md'
@@ -133,14 +133,14 @@ fn main() {
blobs: [docs_blob.id]
mime_type: .md
)!
fs_factory.fs_file.set(mut docs_file)!
docs_file = fs_factory.fs_file.set(docs_file)!
fs_factory.fs_file.add_to_directory(docs_file.id, api_docs_dir.id)!
// Create a binary file (sample image)
// For this example, we'll just create random bytes
mut image_data := []u8{len: 1024, init: u8(index % 256)}
mut image_blob := fs_factory.fs_blob.new(data: image_data)!
fs_factory.fs_blob.set(mut image_blob)!
image_blob = fs_factory.fs_blob.set(image_blob)!
mut image_file := fs_factory.fs_file.new(
name: 'logo.png'
@@ -153,7 +153,7 @@ fn main() {
'format': 'PNG'
}
)!
fs_factory.fs_file.set(mut image_file)!
image_file = fs_factory.fs_file.set(image_file)!
fs_factory.fs_file.add_to_directory(image_file.id, images_dir.id)!
println('Files created successfully')
@@ -170,7 +170,7 @@ fn main() {
target_type: .directory
description: 'Shortcut to API documentation'
)!
fs_factory.fs_symlink.set(mut api_symlink)!
api_symlink = fs_factory.fs_symlink.set(api_symlink)!
// Symlink to the logo from the docs directory
mut logo_symlink := fs_factory.fs_symlink.new(
@@ -181,14 +181,14 @@ fn main() {
target_type: .file
description: 'Shortcut to project logo'
)!
fs_factory.fs_symlink.set(mut logo_symlink)!
logo_symlink = fs_factory.fs_symlink.set(logo_symlink)!
// Add symlinks to their parent directories
root_dir.symlinks << api_symlink.id
fs_factory.fs_dir.set(mut root_dir)!
root_dir = fs_factory.fs_dir.set(root_dir)!
docs_dir.symlinks << logo_symlink.id
fs_factory.fs_dir.set(mut docs_dir)!
docs_dir = fs_factory.fs_dir.set(docs_dir)!
println('Symlinks created successfully')
@@ -252,7 +252,7 @@ fn main() {
println('Appending content to API docs...')
additional_content := '\n## Authentication\n\nUse Bearer token for authentication.\n'.bytes()
mut additional_blob := fs_factory.fs_blob.new(data: additional_content)!
fs_factory.fs_blob.set(mut additional_blob)!
additional_blob = fs_factory.fs_blob.set(additional_blob)!
fs_factory.fs_file.append_blob(docs_file.id, additional_blob.id)!
// Demonstrate directory operations
@@ -265,11 +265,11 @@ fn main() {
parent_id: root_dir.id
description: 'Temporary directory'
)!
fs_factory.fs_dir.set(mut temp_dir)!
temp_dir = fs_factory.fs_dir.set(temp_dir)!
// Add to parent
root_dir.directories << temp_dir.id
fs_factory.fs_dir.set(mut root_dir)!
root_dir = fs_factory.fs_dir.set(root_dir)!
// Move temp directory under docs
println('Moving temp directory under docs...')

44
examples/hero/herofs/import_export_example.vsh Normal file → Executable file
View File

@@ -19,7 +19,7 @@ fn main() {
)!
// Save the filesystem
fs_factory.fs.set(mut my_fs)!
my_fs = fs_factory.fs.set(my_fs)!
println('Created filesystem: ${my_fs.name} with ID: ${my_fs.id}')
// Create root directory
@@ -28,9 +28,9 @@ fn main() {
fs_id: my_fs.id
parent_id: 0 // Root has no parent
)!
fs_factory.fs_dir.set(mut root_dir)!
root_dir = fs_factory.fs_dir.set(root_dir)!
my_fs.root_dir_id = root_dir.id
fs_factory.fs.set(mut my_fs)!
my_fs = fs_factory.fs.set(my_fs)!
// Get filesystem instance for operations
mut fs := fs_factory.fs.get(my_fs.id)!
@@ -47,7 +47,7 @@ fn main() {
test_file1 := os.join_path(test_dir, 'hello.txt')
test_file2 := os.join_path(test_dir, 'example.v')
test_file3 := os.join_path(test_dir, 'README.md')
// Create subdirectory with files
sub_dir := os.join_path(test_dir, 'docs')
os.mkdir_all(sub_dir)!
@@ -64,15 +64,15 @@ fn main() {
// Import single file
println('Importing single file: ${test_file1}')
fs.import(test_file1, '/imported_hello.txt', herofs.ImportOptions{
overwrite: true
overwrite: true
preserve_meta: true
})!
// Import entire directory recursively
println('Importing directory: ${test_dir}')
fs.import(test_dir, '/imported_files', herofs.ImportOptions{
recursive: true
overwrite: true
recursive: true
overwrite: true
preserve_meta: true
})!
@@ -112,20 +112,18 @@ fn main() {
// Export single file
println('Exporting single file to: ${export_dir}/exported_hello.txt')
fs.export('/imported_hello.txt', os.join_path(export_dir, 'exported_hello.txt'),
herofs.ExportOptions{
overwrite: true
preserve_meta: true
})!
fs.export('/imported_hello.txt', os.join_path(export_dir, 'exported_hello.txt'), herofs.ExportOptions{
overwrite: true
preserve_meta: true
})!
// Export entire directory
println('Exporting directory to: ${export_dir}/exported_files')
fs.export('/imported_files', os.join_path(export_dir, 'exported_files'),
herofs.ExportOptions{
recursive: true
overwrite: true
preserve_meta: true
})!
fs.export('/imported_files', os.join_path(export_dir, 'exported_files'), herofs.ExportOptions{
recursive: true
overwrite: true
preserve_meta: true
})!
// Verify exports
println('\nVerifying exported files...')
@@ -173,9 +171,7 @@ fn main() {
println('Testing import without overwrite (should fail)...')
fs.import(test_overwrite_file, '/overwrite_test.txt', herofs.ImportOptions{
overwrite: false
}) or {
println(' Import correctly failed when overwrite=false: ${err}')
}
}) or { println(' Import correctly failed when overwrite=false: ${err}') }
// Update file content and import with overwrite
os.write_file(test_overwrite_file, 'Updated content')!
@@ -186,7 +182,7 @@ fn main() {
// Test export overwrite behavior
export_test_file := os.join_path(export_dir, 'overwrite_export_test.txt')
// Export first time
fs.export('/overwrite_test.txt', export_test_file, herofs.ExportOptions{
overwrite: false
@@ -196,9 +192,7 @@ fn main() {
println('Testing export without overwrite (should fail)...')
fs.export('/overwrite_test.txt', export_test_file, herofs.ExportOptions{
overwrite: false
}) or {
println(' Export correctly failed when overwrite=false: ${err}')
}
}) or { println(' Export correctly failed when overwrite=false: ${err}') }
// Export with overwrite
fs.export('/overwrite_test.txt', export_test_file, herofs.ExportOptions{

View File

@@ -1,6 +1,7 @@
module herofs
import freeflowuniverse.herolib.data.encoder
import freeflowuniverse.herolib.data.ourtime
import freeflowuniverse.herolib.hero.db
// Fs represents a filesystem, is the top level container for files and directories and symlinks, blobs are used over filesystems
@@ -9,6 +10,7 @@ pub struct Fs {
db.Base
pub mut:
name string
group_id u32 // Associated group for permissions
root_dir_id u32 // ID of root directory
quota_bytes u64 // Storage quota in bytes
used_bytes u64 // Current usage in bytes
@@ -29,6 +31,7 @@ pub fn (self Fs) type_name() string {
pub fn (self Fs) dump(mut e encoder.Encoder) ! {
e.add_string(self.name)
e.add_u32(self.group_id)
e.add_u32(self.root_dir_id)
e.add_u64(self.quota_bytes)
e.add_u64(self.used_bytes)
@@ -36,6 +39,7 @@ pub fn (self Fs) dump(mut e encoder.Encoder) ! {
fn (mut self DBFs) load(mut o Fs, mut e encoder.Decoder) ! {
o.name = e.get_string()!
o.group_id = e.get_u32()!
o.root_dir_id = e.get_u32()!
o.quota_bytes = e.get_u64()!
o.used_bytes = e.get_u64()!
@@ -46,11 +50,12 @@ pub struct FsArg {
pub mut:
name string @[required]
description string
group_id u32
root_dir_id u32
quota_bytes u64
used_bytes u64
tags []string
comments []db.CommentArg
messages []db.MessageArg
}
// get new filesystem, not from the DB
@@ -63,6 +68,9 @@ pub fn (mut self DBFs) new(args FsArg) !Fs {
if args.description != '' {
o.description = args.description
}
if args.group_id != 0 {
o.group_id = args.group_id
}
if args.root_dir_id != 0 {
o.root_dir_id = args.root_dir_id
}
@@ -77,8 +85,8 @@ pub fn (mut self DBFs) new(args FsArg) !Fs {
if args.tags.len > 0 {
o.tags = self.db.tags_get(args.tags)!
}
if args.comments.len > 0 {
o.comments = self.db.comments_get(args.comments)!
if args.messages.len > 0 {
o.messages = self.db.messages_get(args.messages)!
}
return o
@@ -124,13 +132,13 @@ pub fn (mut self DBFs) new_get_set(args_ FsArg) !Fs {
o.tags = self.db.tags_get(args.tags)!
changes = true
}
if args.comments.len > 0 {
o.comments = self.db.comments_get(args.comments)!
if args.messages.len > 0 {
o.messages = self.db.messages_get(args.messages)!
changes = true
}
if changes {
o=self.set(o)!
o = self.set(o)!
}
return o
@@ -138,8 +146,8 @@ pub fn (mut self DBFs) new_get_set(args_ FsArg) !Fs {
pub fn (mut self DBFs) set(o Fs) !Fs {
mut o_mut := o
if o_mut.id==0{
o_mut.id=self.db.new_id()!
if o_mut.id == 0 {
o_mut.id = self.db.new_id()!
}
if o_mut.root_dir_id == 0 {
// If no root directory is set, create one
@@ -202,9 +210,25 @@ pub fn (mut self DBFs) get_by_name(name string) !Fs {
return self.get(id_str.u32())!
}
// Note: Filesystem usage tracking methods are not implemented yet
// These would be used for quota enforcement and storage monitoring
// Future implementation should use separate Redis structures for performance
// Increase used bytes counter
pub fn (mut self DBFs) increase_usage(id u32, bytes u64) ! {
mut fs := self.get(id)!
fs.used_bytes += bytes
fs.updated_at = ourtime.now().unix()
self.set(fs)!
}
// Decrease used bytes counter
pub fn (mut self DBFs) decrease_usage(id u32, bytes u64) ! {
mut fs := self.get(id)!
if fs.used_bytes >= bytes {
fs.used_bytes -= bytes
} else {
fs.used_bytes = 0
}
fs.updated_at = ourtime.now().unix()
self.set(fs)!
}
// Check if quota is exceeded
pub fn (mut self DBFs) check_quota(id u32, additional_bytes u64) !bool {

View File

@@ -13,6 +13,9 @@ pub mut:
hash string // blake192 hash of content
data []u8 // Binary data (max 1MB)
size_bytes int // Size in bytes
created_at i64
mime_type string // MIME type
encoding string // Encoding type
}
pub struct DBFsBlob {
@@ -29,18 +32,27 @@ pub fn (self FsBlob) dump(mut e encoder.Encoder) ! {
e.add_string(self.hash)
e.add_list_u8(self.data)
e.add_int(self.size_bytes)
e.add_i64(self.created_at)
e.add_string(self.mime_type)
e.add_string(self.encoding)
}
fn (mut self DBFsBlob) load(mut o FsBlob, mut e encoder.Decoder) ! {
o.hash = e.get_string()!
o.data = e.get_list_u8()!
o.size_bytes = e.get_int()!
o.created_at = e.get_i64()!
o.mime_type = e.get_string()!
o.encoding = e.get_string()!
}
@[params]
pub struct FsBlobArg {
pub mut:
data []u8 @[required]
data []u8 @[required]
mime_type string
encoding string
created_at i64
}
pub fn (mut blob FsBlob) calculate_hash() {
@@ -57,6 +69,9 @@ pub fn (mut self DBFsBlob) new(args FsBlobArg) !FsBlob {
mut o := FsBlob{
data: args.data
size_bytes: args.data.len
created_at: if args.created_at != 0 { args.created_at } else { ourtime.now().unix() }
mime_type: args.mime_type
encoding: args.encoding
}
// Calculate hash

View File

@@ -33,7 +33,6 @@ fn test_basic() ! {
println(root_dir)
panic('sd')
// Create test blob for membership
test_data := 'This is test content for blob membership'.bytes()
mut test_blob := fs_factory.fs_blob.new(data: test_data)!
@@ -241,7 +240,7 @@ fn test_validation() ! {
)!
// Try to save it, which should fail
test_membership=fs_factory.fs_blob_membership.set(test_membership) or {
test_membership = fs_factory.fs_blob_membership.set(test_membership) or {
println(' Membership set correctly failed with non-existent blob')
return
}
@@ -264,7 +263,7 @@ fn test_validation() ! {
)!
// Try to save it, which should fail
test_membership2=fs_factory.fs_blob_membership.set(test_membership2) or {
test_membership2 = fs_factory.fs_blob_membership.set(test_membership2) or {
println(' Membership set correctly failed with non-existent filesystem')
return
}

View File

@@ -83,7 +83,7 @@ pub mut:
fs_id u32 @[required]
parent_id u32
tags []string
comments []db.CommentArg
messages []db.MessageArg
directories []u32
files []u32
symlinks []u32
@@ -103,7 +103,7 @@ pub fn (mut self DBFsDir) new(args FsDirArg) !FsDir {
// Set base fields
o.tags = self.db.tags_get(args.tags)!
o.comments = self.db.comments_get(args.comments)!
o.messages = self.db.messages_get(args.messages)!
o.created_at = ourtime.now().unix()
o.updated_at = o.created_at

View File

@@ -9,12 +9,14 @@ import freeflowuniverse.herolib.hero.db
pub struct FsFile {
db.Base
pub mut:
fs_id u32 // Associated filesystem
blobs []u32 // IDs of file content blobs
size_bytes u64
mime_type MimeType
checksum string // e.g., checksum of the file, needs to be calculated is blake 192
metadata map[string]string // Custom metadata
fs_id u32 // Associated filesystem
directories []u32 // Directory IDs where this file exists
blobs []u32 // IDs of file content blobs
size_bytes u64
mime_type MimeType
checksum string // e.g., checksum of the file, needs to be calculated is blake 192
accessed_at i64
metadata map[string]string // Custom metadata
}
pub struct DBFsFile {
@@ -29,6 +31,13 @@ pub fn (self FsFile) type_name() string {
pub fn (self FsFile) dump(mut e encoder.Encoder) ! {
e.add_u32(self.fs_id)
// Handle directories
e.add_u16(u16(self.directories.len))
for dir_id in self.directories {
e.add_u32(dir_id)
}
// Handle blobs
e.add_u16(u16(self.blobs.len))
for blob_id in self.blobs {
@@ -38,6 +47,7 @@ pub fn (self FsFile) dump(mut e encoder.Encoder) ! {
e.add_u64(self.size_bytes)
e.add_u8(u8(self.mime_type)) // ADD: Serialize mime_type as u8
e.add_string(self.checksum)
e.add_i64(self.accessed_at)
// Handle metadata map
e.add_u16(u16(self.metadata.len))
@@ -50,6 +60,13 @@ pub fn (self FsFile) dump(mut e encoder.Encoder) ! {
fn (mut self DBFsFile) load(mut o FsFile, mut e encoder.Decoder) ! {
o.fs_id = e.get_u32()!
// Load directories
directories_count := e.get_u16()!
o.directories = []u32{cap: int(directories_count)}
for _ in 0 .. directories_count {
o.directories << e.get_u32()!
}
// Load blobs
blobs_count := e.get_u16()!
o.blobs = []u32{cap: int(blobs_count)}
@@ -60,6 +77,7 @@ fn (mut self DBFsFile) load(mut o FsFile, mut e encoder.Decoder) ! {
o.size_bytes = e.get_u64()!
o.mime_type = unsafe { MimeType(e.get_u8()!) } // ADD: Deserialize mime_type
o.checksum = e.get_string()!
o.accessed_at = e.get_i64()!
// Load metadata map
metadata_count := e.get_u16()!
@@ -77,13 +95,15 @@ pub mut:
name string @[required]
description string
fs_id u32 @[required]
directories []u32
blobs []u32
size_bytes u64
mime_type MimeType // Changed from string to MimeType enum
checksum string
accessed_at i64
metadata map[string]string
tags []string
comments []db.CommentArg
messages []db.MessageArg
}
// get new file, not from the DB
@@ -112,19 +132,21 @@ pub fn (mut self DBFsFile) new(args FsFileArg) !FsFile {
}
mut o := FsFile{
name: args.name
fs_id: args.fs_id
blobs: args.blobs
size_bytes: size
mime_type: args.mime_type // ADD: Set mime_type
checksum: args.checksum
metadata: args.metadata
name: args.name
fs_id: args.fs_id
directories: args.directories
blobs: args.blobs
size_bytes: size
mime_type: args.mime_type // ADD: Set mime_type
checksum: args.checksum
accessed_at: if args.accessed_at != 0 { args.accessed_at } else { ourtime.now().unix() }
metadata: args.metadata
}
// Set base fields
o.description = args.description
o.tags = self.db.tags_get(args.tags)!
o.comments = self.db.comments_get(args.comments)!
o.messages = self.db.messages_get(args.messages)!
o.updated_at = ourtime.now().unix()
return o

View File

@@ -57,7 +57,7 @@ pub mut:
target_id u32 @[required]
target_type SymlinkTargetType @[required]
tags []string
comments []db.CommentArg
messages []db.MessageArg
}
// get new symlink, not from the DB
@@ -73,7 +73,7 @@ pub fn (mut self DBFsSymlink) new(args FsSymlinkArg) !FsSymlink {
// Set base fields
o.description = args.description
o.tags = self.db.tags_get(args.tags)!
o.comments = self.db.comments_get(args.comments)!
o.messages = self.db.messages_get(args.messages)!
o.updated_at = ourtime.now().unix()
return o

View File

@@ -92,21 +92,16 @@ pub fn (mut self Fs) find(start_path string, opts FindOptions) ![]FindResult {
// - Symlinks: Symbolic links in the current directory (handled according to opts.follow_symlinks)
// - Directories: Subdirectories of the current directory (recursed into according to opts.recursive)
fn (mut self Fs) find_recursive(dir_id u32, current_path string, opts FindOptions, mut results []FindResult, current_depth int) ! {
println('DEBUG: find_recursive called with dir_id=${dir_id}, current_path="${current_path}", current_depth=${current_depth}')
// Check depth limit
if opts.max_depth >= 0 && current_depth > opts.max_depth {
println('DEBUG: Max depth reached, returning')
return
}
// Get current directory info
current_dir := self.factory.fs_dir.get(dir_id)!
println('DEBUG: Got directory "${current_dir.name}" with ${current_dir.files.len} files, ${current_dir.directories.len} directories, ${current_dir.symlinks.len} symlinks')
// Check if current directory matches search criteria
if should_include(current_dir.name, opts.include_patterns, opts.exclude_patterns) {
println('DEBUG: Including directory "${current_dir.name}" in results')
results << FindResult{
result_type: .directory
id: dir_id
@@ -116,11 +111,9 @@ fn (mut self Fs) find_recursive(dir_id u32, current_path string, opts FindOption
// Get files in current directory
for file_id in current_dir.files {
println('DEBUG: Processing file ID ${file_id}')
file := self.factory.fs_file.get(file_id)!
if should_include(file.name, opts.include_patterns, opts.exclude_patterns) {
file_path := join_path(current_path, file.name)
println('DEBUG: Including file "${file.name}" in results')
results << FindResult{
result_type: .file
id: file.id
@@ -201,14 +194,12 @@ fn (mut self Fs) find_recursive(dir_id u32, current_path string, opts FindOption
}
for dir_id2 in current_dir.directories {
println('DEBUG: Found child directory ID ${dir_id2} in directory ${dir_id}')
subdir := self.factory.fs_dir.get(dir_id2)!
subdir_path := join_path(current_path, subdir.name)
// Include child directories in results if they match patterns
if should_include(subdir.name, opts.include_patterns, opts.exclude_patterns) {
if !opts.recursive {
println('DEBUG: Including directory "${subdir.name}" in results')
results << FindResult{
result_type: .directory
id: subdir.id
@@ -220,12 +211,9 @@ fn (mut self Fs) find_recursive(dir_id u32, current_path string, opts FindOption
// Always recurse into directories when recursive is true, regardless of patterns
// The patterns apply to what gets included in results, not to traversal
if opts.recursive {
println('DEBUG: Processing directory "${subdir.name}"')
self.find_recursive(dir_id2, subdir_path, opts, mut results, current_depth + 1)!
}
}
println('DEBUG: find_recursive finished with ${results.len} results')
}
// get_dir_by_absolute_path resolves an absolute path to a directory ID
@@ -241,15 +229,11 @@ fn (mut self Fs) find_recursive(dir_id u32, current_path string, opts FindOption
// dir := tools.get_dir_by_absolute_path('/home/user/documents')!
// ```
pub fn (mut self Fs) get_dir_by_absolute_path(path string) !FsDir {
println('DEBUG: get_dir_by_absolute_path called with path "${path}"')
normalized_path_ := normalize_path(path)
println('DEBUG: normalized_path_ = "${normalized_path_}"')
// Handle root directory case
if normalized_path_ == '/' {
println('DEBUG: Handling root directory case')
fs := self.factory.fs.get(self.id)!
println('DEBUG: fs.root_dir_id = ${fs.root_dir_id}')
return self.factory.fs_dir.get(fs.root_dir_id)!
}