webdav completion wip

This commit is contained in:
timurgordon
2025-03-13 04:34:10 +01:00
parent ff430c2e4d
commit a974091442
12 changed files with 765 additions and 156 deletions

View File

@@ -12,6 +12,7 @@ mut:
file_create(path string) !FSEntry
file_read(path string) ![]u8
file_write(path string, data []u8) !
file_concatenate(path string, data []u8) !
file_delete(path string) !
// Directory operations
@@ -34,6 +35,8 @@ mut:
// FSEntry Operations
get_path(entry &FSEntry) !string
print() !
// Cleanup operation
destroy() !

View File

@@ -164,9 +164,9 @@ pub fn (mut fs DatabaseVFS) root_get_as_dir() !&Directory {
id: fs.get_next_id()
file_type: .directory
name: ''
created_at: time.now().unix()
modified_at: time.now().unix()
accessed_at: time.now().unix()
created_at: time.utc().unix()
modified_at: time.utc().unix()
accessed_at: time.utc().unix()
mode: 0o755 // default directory permissions
owner: 'user' // TODO: get from system
group: 'user' // TODO: get from system

View File

@@ -48,7 +48,7 @@ fn test_directory_encoder_decoder() ! {
fn test_file_encoder_decoder() ! {
println('Testing encoding/decoding files...')
current_time := time.now().unix()
current_time := time.utc().unix()
file := File{
metadata: vfs.Metadata{
id: u32(current_time)

View File

@@ -144,7 +144,10 @@ pub fn (mut fs DatabaseVFS) directory_rm(mut dir Directory, name string) ! {
// delete file chunks in data_db
for id in file.chunk_ids {
log.debug('[DatabaseVFS] Deleting chunk ${id}')
fs.db_data.delete(id)!
fs.db_data.delete(id) or {
log.error('Failed to delete chunk ${id}: ${err}')
return error('Failed to delete chunk ${id}: ${err}')
}
}
log.debug('[DatabaseVFS] Deleting file metadata ${file.metadata.id}')
@@ -288,15 +291,26 @@ pub fn (mut fs DatabaseVFS) directory_copy(mut dir Directory, args_ CopyDirArgs)
found = true
if entry is File {
mut file_entry := entry as File
mut file_data := []u8{}
// log.debug('[DatabaseVFS] Got database chunk ids ${chunk_ids}')
for id in file_entry.chunk_ids {
// there were chunk ids stored with file so file has data
if chunk_bytes := fs.db_data.get(id) {
file_data << chunk_bytes
} else {
return error('Failed to fetch file data: ${err}')
}
}
mut new_file := File{
...file_entry,
metadata: Metadata{...file_entry.metadata,
id: fs.get_next_id()
name: args.dst_entry_name
}
parent_id: args.dst_parent_dir.metadata.id
}
fs.save_entry(new_file)!
fs.save_file(new_file, file_data)!
args.dst_parent_dir.children << new_file.metadata.id
fs.save_entry(args.dst_parent_dir)!
return args.dst_parent_dir

View File

@@ -2,6 +2,7 @@ module vfs_db
import freeflowuniverse.herolib.vfs
import freeflowuniverse.herolib.core.texttools
import arrays
import log
import os
import time
@@ -49,7 +50,6 @@ pub fn (mut self DatabaseVFS) file_read(path_ string) ![]u8 {
pub fn (mut self DatabaseVFS) file_write(path_ string, data []u8) ! {
path := texttools.path_fix(path_)
if mut entry := self.get_entry(path) {
if mut entry is File {
log.info('[DatabaseVFS] Writing ${data.len} bytes to ${path}')
@@ -59,7 +59,7 @@ pub fn (mut self DatabaseVFS) file_write(path_ string, data []u8) ! {
} else {
panic('handle error')
}
} else {
} else {
self.file_create(path) or {
return error('Failed to create file: ${err}')
}
@@ -67,6 +67,62 @@ pub fn (mut self DatabaseVFS) file_write(path_ string, data []u8) ! {
}
}
pub fn (mut self DatabaseVFS) file_concatenate(path_ string, data []u8) ! {
path := texttools.path_fix(path_)
if data.len == 0 {
return // Nothing to append
}
if mut entry := self.get_entry(path) {
if mut entry is File {
log.info('[DatabaseVFS] Appending ${data.len} bytes to ${path}')
// Split new data into chunks of 64 KB
chunks := arrays.chunk(data, (64 * 1024) - 1)
mut chunk_ids := entry.chunk_ids.clone() // Start with existing chunk IDs
// Add new chunks
for chunk in chunks {
chunk_id := self.db_data.set(data: chunk) or {
return error('Failed to save file data chunk: ${err}')
}
chunk_ids << chunk_id
log.debug('[DatabaseVFS] Added chunk ${chunk_id} to ${path}')
}
// Update the file with new chunk IDs and updated size
updated_file := File{
metadata: vfs.Metadata{
...entry.metadata
size: entry.metadata.size + u64(data.len)
modified_at: time.now().unix()
}
chunk_ids: chunk_ids
parent_id: entry.parent_id
}
// Encode the file with all its metadata
metadata_bytes := updated_file.encode()
// Save the metadata_bytes to metadata_db
metadata_db_id := self.db_metadata.set(data: metadata_bytes) or {
return error('Failed to save file metadata on id:${entry.metadata.id}: ${err}')
}
self.id_table[entry.metadata.id] = metadata_db_id
} else {
return error('Not a file: ${path}')
}
} else {
// If file doesn't exist, create it first
self.file_create(path) or {
return error('Failed to create file: ${err}')
}
// Then write data to it
self.file_write(path, data)!
}
}
pub fn (mut self DatabaseVFS) file_delete(path string) ! {
log.info('[DatabaseVFS] Deleting file ${path}')
parent_path := os.dir(path)