fixes to chunked file data processing

This commit is contained in:
timurgordon
2025-03-08 01:52:53 +01:00
parent afad769066
commit 01cac0f741
10 changed files with 447 additions and 388 deletions

View File

@@ -0,0 +1,77 @@
module vfs_db
import arrays
import freeflowuniverse.herolib.vfs
import freeflowuniverse.herolib.data.ourdb
import freeflowuniverse.herolib.data.encoder
import time
import log
// DatabaseVFS represents the virtual filesystem
@[heap]
pub struct DatabaseVFS {
pub mut:
root_id u32 // ID of root directory
block_size u32 // Size of data blocks in bytes
db_data &Database @[str: skip] // Database instance for file data storage
db_metadata &Database @[str: skip] // Database instance for metadata storage
last_inserted_id u32
id_table map[u32]u32
}
pub interface Database {
mut:
get(id u32) ![]u8
set(ourdb.OurDBSetArgs) !u32
delete(id u32) !
}
// Get the next ID, it should be some kind of auto-incrementing ID
pub fn (mut fs DatabaseVFS) get_next_id() u32 {
fs.last_inserted_id = fs.last_inserted_id + 1
return fs.last_inserted_id
}
// load_entry loads an entry from the database by ID and sets up parent references
// loads without data
fn (mut fs DatabaseVFS) load_entry(vfs_id u32) !FSEntry {
if metadata := fs.db_metadata.get(fs.get_database_id(vfs_id)!) {
match decode_entry_type(metadata)! {
.directory {
mut dir := decode_directory(metadata) or {
return error('Failed to decode directory: ${err}')
}
return dir
}
.file {
return decode_file_metadata(metadata) or { return error('Failed to decode file: ${err}') }
}
.symlink {
mut symlink := decode_symlink(metadata) or {
return error('Failed to decode symlink: ${err}')
}
return symlink
}
}
} else {
return error('Entry ${vfs_id} not found ${err}')
}
}
// fn (mut fs DatabaseVFS) file_read(file File) ![]u8 {
// metadata := fs.db_metadata.get(fs.get_database_id(file.metadata.id)!) or {
// return error('Failed to get file metadata ${err}')
// }
// _, chunk_ids := decode_file_metadata(metadata) or { return error('Failed to decode file: ${err}') }
// mut file_data := []u8{}
// // log.debug('[DatabaseVFS] Got database chunk ids ${chunk_ids}')
// for id in chunk_ids {
// // there were chunk ids stored with file so file has data
// if chunk_bytes := fs.db_data.get(id) {
// file_data << chunk_bytes
// } else {
// return error('Failed to fetch file data: ${err}')
// }
// }
// return file_data
// }

View File

@@ -0,0 +1,84 @@
module vfs_db
import arrays
import freeflowuniverse.herolib.vfs
import freeflowuniverse.herolib.data.ourdb
import freeflowuniverse.herolib.data.encoder
import time
import log
// save_entry saves an entry to the database
pub fn (mut fs DatabaseVFS) save_entry(entry FSEntry) !u32 {
match entry {
Directory {
encoded := entry.encode()
db_id := fs.db_metadata.set(id: entry.metadata.id, data: encoded) or {
return error('Failed to save directory on id:${entry.metadata.id}: ${err}')
}
for child_id in entry.children {
_ := fs.db_metadata.get(fs.get_database_id(child_id)!) or {
return error('Failed to get entry for directory child ${child_id} missing.\n${err}')
}
}
log.debug('[DatabaseVFS] Saving dir entry with children ${entry.children}')
fs.set_database_id(entry.metadata.id, db_id)!
return entry.metadata.id
}
File {
metadata_bytes := entry.encode()
// Save the metadata_bytes to metadata_db
metadata_db_id := fs.db_metadata.set(id: entry.metadata.id, data: metadata_bytes) or {
return error('Failed to save file metadata on id:${entry.metadata.id}: ${err}')
}
fs.set_database_id(entry.metadata.id, metadata_db_id)!
return entry.metadata.id
}
Symlink {
encoded := entry.encode()
db_id := fs.db_metadata.set(id: entry.metadata.id, data: encoded) or {
return error('Failed to save symlink on id:${entry.metadata.id}: ${err}')
}
fs.set_database_id(entry.metadata.id, db_id)!
return entry.metadata.id
}
}
}
// save_entry saves an entry to the database
pub fn (mut fs DatabaseVFS) save_file(file_ File, data []u8) !u32 {
file := File {...file_
metadata: vfs.Metadata {...file_.metadata
id: fs.get_next_id()
}
}
metadata_bytes := if data.len == 0 {
file.encode()
} else {
// file has data so that will be stored in data_db
// its corresponding id stored with file metadata
// split data_encoded into chunks of 64 kb
chunks := arrays.chunk(data, 64 * 1024)
mut chunk_ids := []u32{}
for chunk in chunks {
chunk_ids << fs.db_data.set(data: chunk) or {
return error('Failed to save file data on id:${file.metadata.id}: ${err}')
}
}
new_file := File{...file,
metadata: vfs.Metadata{...file.metadata,
size: u64(data.len)
}
chunk_ids: chunk_ids
}
// Encode the db_data ID in with the file metadata
file.encode()
}
// Save the metadata_bytes to metadata_db
metadata_db_id := fs.db_metadata.set(id: file.metadata.id, data: metadata_bytes) or {
return error('Failed to save file metadata on id:${file.metadata.id}: ${err}')
}
fs.set_database_id(file.metadata.id, metadata_db_id)!
return file.metadata.id
}

View File

@@ -3,73 +3,6 @@ module vfs_db
import freeflowuniverse.herolib.data.encoder
import freeflowuniverse.herolib.vfs
// encode_metadata encodes the common metadata structure
fn encode_metadata(mut e encoder.Encoder, m vfs.Metadata) {
e.add_u32(m.id)
e.add_string(m.name)
e.add_string(m.path)
e.add_u8(u8(m.file_type)) // FileType enum as u8
e.add_u64(m.size)
e.add_i64(m.created_at)
e.add_i64(m.modified_at)
e.add_i64(m.accessed_at)
e.add_u32(m.mode)
e.add_string(m.owner)
e.add_string(m.group)
}
// decode_metadata decodes the common metadata structure
fn decode_metadata(mut d encoder.Decoder) !vfs.Metadata {
id := d.get_u32()!
name := d.get_string()!
path := d.get_string()!
file_type_byte := d.get_u8()!
size := d.get_u64()!
created_at := d.get_i64()!
modified_at := d.get_i64()!
accessed_at := d.get_i64()!
mode := d.get_u32()!
owner := d.get_string()!
group := d.get_string()!
return vfs.Metadata{
id: id
name: name
path: path
file_type: unsafe { vfs.FileType(file_type_byte) }
size: size
created_at: created_at
modified_at: modified_at
accessed_at: accessed_at
mode: mode
owner: owner
group: group
}
}
// Directory encoding/decoding
// encode encodes a Directory to binary format
pub fn (dir Directory) encode() []u8 {
mut e := encoder.new()
e.add_u8(1) // version byte
e.add_u8(u8(vfs.FileType.directory)) // type byte
// Encode metadata
encode_metadata(mut e, dir.metadata)
// Encode parent_id
e.add_u32(dir.parent_id)
// Encode children IDs
e.add_u16(u16(dir.children.len))
for child_id in dir.children {
e.add_u32(child_id)
}
return e.data
}
// decode_directory decodes a binary format back to Directory
pub fn decode_directory(data []u8) !Directory {
mut d := encoder.decoder_new(data)
@@ -104,34 +37,10 @@ pub fn decode_directory(data []u8) !Directory {
}
}
// File encoding/decoding
// encode encodes a File metadata to binary format (without the actual file data)
pub fn (f File) encode(data_db_id ?u32) []u8 {
mut e := encoder.new()
e.add_u8(1) // version byte
e.add_u8(u8(vfs.FileType.file)) // type byte
// Encode metadata
encode_metadata(mut e, f.metadata)
// Encode parent_id
e.add_u32(f.parent_id)
if id := data_db_id {
// only encode data_db_id if it's given
// if file has no data, it also doesn't have corresponding id in data_db
e.add_u32(id)
}
// Note: We no longer encode file data here
// The data ID will be appended by the save_entry function
return e.data
}
// decode_file decodes a binary format back to File (without the actual file data)
// returns file without data and the key of data in data db
pub fn decode_file_metadata(data []u8) !(File, ?u32) {
// returns file without data and the sequence of keys of chunks of data in data db
pub fn decode_file_metadata(data []u8) !File {
mut d := encoder.decoder_new(data)
version := d.get_u8()!
if version != 1 {
@@ -149,41 +58,34 @@ pub fn decode_file_metadata(data []u8) !(File, ?u32) {
// Decode parent_id
parent_id := d.get_u32()!
decoded_file := File{
mut chunk_ids := []u32{}
if metadata.size == 0 {
blocksize := d.get_u16() or {
return error('Failed to get block size ${err}')
}
if blocksize != 0 {
return error('File data is empty, expected zero block size')
}
// means there was no data_db ids stored with file, so is empty file
} else {
// Decode data_db block ID's
// if data isn't empty, we expect a blocksize byte
// blocksize is max 2 bytes, so max 4gb entry size
blocksize := d.get_u16()!
for i in 0 .. blocksize {
chunk_ids << d.get_u32() or {
return error('Failed to get block id ${err}')
}
}
}
return File{
metadata: metadata
parent_id: parent_id
data: '' // Empty data, will be loaded separately
chunk_ids: chunk_ids
}
if d.data.len == 0 {
return decoded_file, none
// means there was no data_db id stored with file, so is empty file
}
// Decode data ID reference
// This will be used to fetch the actual data from db_data
data_id := d.get_u32()!
return decoded_file, data_id
}
// Symlink encoding/decoding
// encode encodes a Symlink to binary format
pub fn (sl Symlink) encode() []u8 {
mut e := encoder.new()
e.add_u8(1) // version byte
e.add_u8(u8(vfs.FileType.symlink)) // type byte
// Encode metadata
encode_metadata(mut e, sl.metadata)
// Encode parent_id
e.add_u32(sl.parent_id)
// Encode target path
e.add_string(sl.target)
return e.data
}
// decode_symlink decodes a binary format back to Symlink
pub fn decode_symlink(data []u8) !Symlink {
@@ -213,3 +115,40 @@ pub fn decode_symlink(data []u8) !Symlink {
target: target
}
}
// decode_metadata decodes the common metadata structure
fn decode_metadata(mut d encoder.Decoder) !vfs.Metadata {
id := d.get_u32()!
name := d.get_string()!
path := d.get_string()!
file_type_byte := d.get_u8()!
size := d.get_u64()!
created_at := d.get_i64()!
modified_at := d.get_i64()!
accessed_at := d.get_i64()!
mode := d.get_u32()!
owner := d.get_string()!
group := d.get_string()!
return vfs.Metadata{
id: id
name: name
path: path
file_type: unsafe { vfs.FileType(file_type_byte) }
size: size
created_at: created_at
modified_at: modified_at
accessed_at: accessed_at
mode: mode
owner: owner
group: group
}
}
// decode_entry_type decodes the common metadata structure
fn decode_entry_type(data []u8) !vfs.FileType {
if data.len < 2 {
return error('Corrupt metadata bytes')
}
return unsafe { vfs.FileType(data[1]) }
}

80
lib/vfs/vfs_db/encode.v Normal file
View File

@@ -0,0 +1,80 @@
module vfs_db
import freeflowuniverse.herolib.data.encoder
import freeflowuniverse.herolib.vfs
// encode_metadata encodes the common metadata structure
fn encode_metadata(mut e encoder.Encoder, m vfs.Metadata) {
e.add_u32(m.id)
e.add_string(m.name)
e.add_string(m.path)
e.add_u8(u8(m.file_type)) // FileType enum as u8
e.add_u64(m.size)
e.add_i64(m.created_at)
e.add_i64(m.modified_at)
e.add_i64(m.accessed_at)
e.add_u32(m.mode)
e.add_string(m.owner)
e.add_string(m.group)
}
// encode encodes a Directory to binary format
pub fn (dir Directory) encode() []u8 {
mut e := encoder.new()
e.add_u8(1) // version byte
e.add_u8(u8(vfs.FileType.directory)) // type byte
// Encode metadata
encode_metadata(mut e, dir.metadata)
// Encode parent_id
e.add_u32(dir.parent_id)
// Encode children IDs
e.add_u16(u16(dir.children.len))
for child_id in dir.children {
e.add_u32(child_id)
}
return e.data
}
// File encoding/decoding
// encode encodes a File metadata to binary format (without the actual file data)
pub fn (f File) encode() []u8 {
mut e := encoder.new()
e.add_u8(1) // version byte
e.add_u8(u8(vfs.FileType.file)) // type byte
// Encode metadata
encode_metadata(mut e, f.metadata)
// Encode parent_id
e.add_u32(f.parent_id)
// Encode blocksize and block ids
// if file has no data, it also should have zero block size
e.add_u16(u16(f.chunk_ids.len))
for id in f.chunk_ids {
e.add_u32(id)
}
return e.data
}
// encode encodes a Symlink to binary format
pub fn (sl Symlink) encode() []u8 {
mut e := encoder.new()
e.add_u8(1) // version byte
e.add_u8(u8(vfs.FileType.symlink)) // type byte
// Encode metadata
encode_metadata(mut e, sl.metadata)
// Encode parent_id
e.add_u32(sl.parent_id)
// Encode target path
e.add_string(sl.target)
return e.data
}

View File

@@ -11,6 +11,7 @@ fn test_directory_encoder_decoder() ! {
dir := Directory{
metadata: vfs.Metadata{
id: u32(current_time)
path: '/root'
name: 'root'
file_type: .directory
created_at: current_time
@@ -53,6 +54,7 @@ fn test_file_encoder_decoder() ! {
metadata: vfs.Metadata{
id: u32(current_time)
name: 'test.txt'
path: '/test.txt'
file_type: .file
created_at: current_time
modified_at: current_time
@@ -61,13 +63,14 @@ fn test_file_encoder_decoder() ! {
owner: 'user'
group: 'user'
}
data: 'Hello, world!'
// data: 'Hello, world!'
parent_id: 0
chunk_ids: [u32(12), u32(27)]
}
encoded := file.encode()
mut decoded := decode_file(encoded) or { return error('Failed to decode file: ${err}') }
decoded := decode_file_metadata(encoded) or { return error('Failed to decode file: ${err}') }
assert decoded.metadata.id == file.metadata.id
assert decoded.metadata.name == file.metadata.name
@@ -78,8 +81,8 @@ fn test_file_encoder_decoder() ! {
assert decoded.metadata.mode == file.metadata.mode
assert decoded.metadata.owner == file.metadata.owner
assert decoded.metadata.group == file.metadata.group
assert decoded.data == file.data
assert decoded.parent_id == file.parent_id
assert decoded.chunk_ids == [u32(12), u32(27)]
println('Test completed successfully!')
}
@@ -92,6 +95,7 @@ fn test_symlink_encoder_decoder() ! {
metadata: vfs.Metadata{
id: u32(current_time)
name: 'test.txt'
path: '/test.txt'
file_type: .symlink
created_at: current_time
modified_at: current_time

View File

@@ -6,15 +6,8 @@ import freeflowuniverse.herolib.vfs
pub struct File {
pub mut:
metadata vfs.Metadata // vfs.Metadata from models_common.v
data string // File content stored in DB
parent_id u32 // ID of parent directory
}
// write updates the file's content and returns updated file
pub fn (mut file File) write(content string) {
file.data = content
file.metadata.size = u64(content.len)
file.metadata.modified()
chunk_ids []u32 // a list of data addresses for chunks of 64 kb in data_db
}
// Rename the file
@@ -22,11 +15,6 @@ fn (mut f File) rename(name string) {
f.metadata.name = name
}
// read returns the file's content
pub fn (mut f File) read() string {
return f.data
}
fn (f &File) get_metadata() vfs.Metadata {
return f.metadata
}
@@ -61,36 +49,36 @@ pub:
parent_id u32
}
// mkdir creates a new directory with default permissions
pub fn (mut fs DatabaseVFS) new_file(file NewFile) !&File {
f := File{
data: file.data
parent_id: file.parent_id
metadata: fs.new_metadata(NewMetadata{
name: file.name
path: file.path
mode: file.mode
owner: file.owner
group: file.group
size: u64(file.data.len)
file_type: .file
})
}
// // mkdir creates a new directory with default permissions
// pub fn (mut fs DatabaseVFS) new_file(file NewFile) !&File {
// f := File{
// parent_id: file.parent_id
// parent_id: file.parent_id
// metadata: fs.new_metadata(NewMetadata{
// name: file.name
// path: file.path
// mode: file.mode
// owner: file.owner
// group: file.group
// size: u64(file.data.len)
// file_type: .file
// })
// }
// Save new directory to DB
fs.save_entry(f)!
return &f
}
// // Save new directory to DB
// fs.save_entry(f)!
// return &f
// }
// mkdir creates a new directory with default permissions
pub fn (mut fs DatabaseVFS) copy_file(file File) !&File {
return fs.new_file(
data: file.data
name: file.metadata.name
path: file.metadata.path
mode: file.metadata.mode
owner: file.metadata.owner
group: file.metadata.group
parent_id: file.parent_id
)
}
// // mkdir creates a new directory with default permissions
// pub fn (mut fs DatabaseVFS) copy_file(file File) !&File {
// return fs.new_file(
// data: file.data
// name: file.metadata.name
// path: file.metadata.path
// mode: file.metadata.mode
// owner: file.metadata.owner
// group: file.metadata.group
// parent_id: file.parent_id
// )
// }

View File

@@ -1,113 +0,0 @@
module vfs_db
import freeflowuniverse.herolib.vfs
import freeflowuniverse.herolib.data.ourdb
import freeflowuniverse.herolib.data.encoder
import time
import log
// DatabaseVFS represents the virtual filesystem
@[heap]
pub struct DatabaseVFS {
pub mut:
root_id u32 // ID of root directory
block_size u32 // Size of data blocks in bytes
db_data &Database @[str: skip] // Database instance for file data storage
db_metadata &Database @[str: skip] // Database instance for metadata storage
last_inserted_id u32
id_table map[u32]u32
}
pub interface Database {
mut:
get(id u32) ![]u8
set(ourdb.OurDBSetArgs) !u32
delete(id u32) !
}
// Get the next ID, it should be some kind of auto-incrementing ID
pub fn (mut fs DatabaseVFS) get_next_id() u32 {
fs.last_inserted_id = fs.last_inserted_id + 1
return fs.last_inserted_id
}
// load_entry loads an entry from the database by ID and sets up parent references
fn (mut fs DatabaseVFS) load_entry(vfs_id u32) !FSEntry {
if metadata := fs.db_metadata.get(fs.get_database_id(vfs_id)!) {
// First byte is version, second byte indicates the type
// TODO: check we dont overflow filetype (u8 in boundaries of filetype)
entry_type := unsafe { vfs.FileType(metadata[1]) }
match entry_type {
.directory {
mut dir := decode_directory(metadata) or {
return error('Failed to decode directory: ${err}')
}
return dir
}
.file {
mut file, data_id := decode_file_metadata(metadata) or { return error('Failed to decode file: ${err}') }
if id := data_id {
// there was a data_db index stored with file so file has data
if file_data := fs.db_data.get(id) {
file.data = file_data.bytestr()
} else {
return error('This should never happen, data is not where its supposed to be')
}
}
return file
}
.symlink {
mut symlink := decode_symlink(metadata) or {
return error('Failed to decode symlink: ${err}')
}
return symlink
}
}
}
return error('Entry not found')
}
// save_entry saves an entry to the database
pub fn (mut fs DatabaseVFS) save_entry(entry FSEntry) !u32 {
match entry {
Directory {
encoded := entry.encode()
db_id := fs.db_metadata.set(id: entry.metadata.id, data: encoded) or {
return error('Failed to save directory on id:${entry.metadata.id}: ${err}')
}
fs.set_database_id(entry.metadata.id, db_id)!
return entry.metadata.id
}
File {
// First encode file data and store in db_data
metadata_bytes := if entry.data.len == 0 {
entry.encode(none)
} else {
// file has data so that will be stored in data_db
// its corresponding id stored with file metadata
data_encoded := entry.data.bytes()
data_db_id := fs.db_data.set(id: entry.metadata.id, data: data_encoded) or {
return error('Failed to save file data on id:${entry.metadata.id}: ${err}')
}
// Encode the db_data ID in with the file metadata
entry.encode(data_db_id)
}
// Save the metadata_bytes to metadata_db
metadata_db_id := fs.db_metadata.set(id: entry.metadata.id, data: metadata_bytes) or {
return error('Failed to save file metadata on id:${entry.metadata.id}: ${err}')
}
fs.set_database_id(entry.metadata.id, metadata_db_id)!
return entry.metadata.id
}
Symlink {
encoded := entry.encode()
db_id := fs.db_metadata.set(id: entry.metadata.id, data: encoded) or {
return error('Failed to save symlink on id:${entry.metadata.id}: ${err}')
}
fs.set_database_id(entry.metadata.id, db_id)!
return entry.metadata.id
}
}
}

View File

@@ -2,6 +2,7 @@ module vfs_db
import freeflowuniverse.herolib.vfs { Metadata }
import time
import log
// mkdir creates a new directory with default permissions
pub fn (mut fs DatabaseVFS) directory_mkdir(mut dir Directory, name_ string) !&Directory {
@@ -88,17 +89,9 @@ pub fn (mut fs DatabaseVFS) copy_directory(dir Directory) !&Directory {
}
// touch creates a new empty file with default permissions
pub fn (mut fs DatabaseVFS) directory_touch(dir_ Directory, name_ string) !&File {
pub fn (mut fs DatabaseVFS) directory_touch(mut dir Directory, name_ string) !&File {
name := name_.trim('/')
mut dir := dir_
// First, make sure we're working with the latest version of the directory
if updated_dir := fs.load_entry(dir.metadata.id) {
if updated_dir is Directory {
dir = updated_dir
}
}
// Check if file already exists
for child_id in dir.children {
if entry := fs.load_entry(child_id) {
@@ -115,85 +108,49 @@ pub fn (mut fs DatabaseVFS) directory_touch(dir_ Directory, name_ string) !&File
}
// Create new file with correct parent_id
mut new_file := fs.new_file(
mut new_file := fs.save_file(File{
parent_id: dir.metadata.id
name: name
path: path
)!
metadata: vfs.Metadata {
name: name
path: path
}
}, [])!
// Ensure parent_id is set correctly
if new_file.parent_id != dir.metadata.id {
new_file.parent_id = dir.metadata.id
fs.save_entry(new_file)!
}
// Update children list
dir.children << new_file.metadata.id
fs.save_entry(dir)!
// Reload the directory to ensure we have the latest version
if updated_dir := fs.load_entry(dir.metadata.id) {
if updated_dir is Directory {
dir = updated_dir
}
}
return new_file
}
// rm removes a file or directory by name
pub fn (mut fs DatabaseVFS) directory_rm(mut dir Directory, name string) ! {
mut found := false
mut found_id := u32(0)
mut found_idx := 0
// First, make sure we're working with the latest version of the directory
if updated_dir := fs.load_entry(dir.metadata.id) {
if updated_dir is Directory {
dir = updated_dir
}
}
for i, child_id in dir.children {
if entry := fs.load_entry(child_id) {
if entry.metadata.name == name {
found = true
found_id = child_id
found_idx = i
if entry is Directory {
if entry.children.len > 0 {
return error('Directory not empty')
}
}
break
}
}
}
if !found {
entry := fs.directory_get_entry(dir, name) or {
return error('${name} not found')
}
if entry is Directory {
if entry.children.len > 0 {
return error('Directory not empty')
}
}
// get entry from db_metadata
metadata_bytes := fs.db_metadata.get(fs.get_database_id(found_id)!) or { return error('Failed to delete entry: ${err}') }
file, data_id := decode_file_metadata(metadata_bytes)!
metadata_bytes := fs.db_metadata.get(fs.get_database_id(entry.metadata.id)!) or { return error('Failed to delete entry: ${err}') }
file, chunk_ids := decode_file_metadata(metadata_bytes)!
if id := data_id {
// means file has associated data in db_data
// delete file chunks in data_db
for id in chunk_ids {
log.debug('[DatabaseVFS] Deleting chunk ${id}')
fs.db_data.delete(id)!
}
log.debug('[DatabaseVFS] Deleting file metadata ${file.metadata.id}')
fs.db_metadata.delete(fs.get_database_id(entry.metadata.id)!) or { return error('Failed to delete entry: ${err}') }
fs.db_metadata.delete(file.metadata.id) or { return error('Failed to delete entry: ${err}') }
// Update children list - make sure we don't remove the wrong child
dir.children.delete(found_idx)
fs.save_entry(dir)!
// Reload the directory to ensure we have the latest version
if updated_dir := fs.load_entry(dir.metadata.id) {
if updated_dir is Directory {
dir = updated_dir
}
dir.children = dir.children.filter(it != entry.metadata.id).clone()
fs.save_entry(dir) or {
return error('Failed to save updated directory.\n${err}')
}
}
@@ -483,13 +440,6 @@ pub fn (mut fs DatabaseVFS) directory_rename(dir Directory, src_name string, dst
pub fn (mut fs DatabaseVFS) directory_children(mut dir Directory, recursive bool) ![]FSEntry {
mut entries := []FSEntry{}
// Make sure we're working with the latest version of the directory
if updated_dir := fs.load_entry(dir.metadata.id) {
if updated_dir is Directory {
dir = updated_dir
}
}
for child_id in dir.children {
if entry := fs.load_entry(child_id) {
entries << entry
@@ -497,6 +447,8 @@ pub fn (mut fs DatabaseVFS) directory_children(mut dir Directory, recursive bool
mut d := entry as Directory
entries << fs.directory_children(mut d, true)!
}
} else {
panic('Filesystem is corrupted, this should never happen ${err}')
}
}
return entries.clone()

View File

@@ -37,29 +37,45 @@ pub fn (mut fs DatabaseVFS) root_get_as_dir() !&Directory {
}
fn (mut self DatabaseVFS) get_entry(path_ string) !FSEntry {
path := '/${path_.trim_left('/').trim_right('/')}'
path := '${path_.trim_left('/').trim_right('/')}'
if path == '/' || path == '' || path == '.' {
return FSEntry(self.root_get_as_dir()!)
}
mut current := *self.root_get_as_dir()!
return self.directory_get_entry(mut current, path) or {
return error('Path not found: ${path}')
parts := path.split('/')
mut parent_dir := *self.root_get_as_dir()!
for i, part in parts {
entry := self.directory_get_entry(parent_dir, part) or {
return error('Failed to get entry ${err}')
}
if i == parts.len - 1 {
// last part, means entry is found
return entry
}
if entry is Directory {
parent_dir = entry
} else {
return error('Failed to get entry, expected dir')
}
}
// mut current := *self.root_get_as_dir()!
// return self.directory_get_entry(mut current, path) or {
return error('Path not found: ${path}')
// }
}
fn (mut self DatabaseVFS) directory_get_entry(mut dir Directory, path string) ?FSEntry {
mut children := self.directory_children(mut dir, false) or {
panic('this should never happen')
}
for mut child in children {
if child.metadata.path == path {
return child
} else if child is Directory {
mut child_dir := child as Directory
return self.directory_get_entry(mut child_dir, path) or {
continue
// internal function to get an entry of some name from a directory
fn (mut self DatabaseVFS) directory_get_entry(dir Directory, name string) ?FSEntry {
// mut children := self.directory_children(mut dir, false) or {
// panic('this should never happen')
// }
for child_id in dir.children {
if entry := self.load_entry(child_id) {
if entry.metadata.name == name {
return entry
}
} else {
panic('Filesystem is corrupted, this should never happen ${err}')
}
}
return none

View File

@@ -13,33 +13,50 @@ pub fn (mut fs DatabaseVFS) root_get() !vfs.FSEntry {
pub fn (mut self DatabaseVFS) file_create(path_ string) !vfs.FSEntry {
path := '/${path_.trim_left('/').trim_right('/')}'
log.info('[DatabaseVFS] Creating file ${path}')
// Get parent directory
parent_path := os.dir(path)
file_name := os.base(path)
mut parent_dir := self.get_directory(parent_path)!
return self.directory_touch(parent_dir, file_name)!
log.info('[DatabaseVFS] Creating file ${file_name} in ${parent_path}')
entry := self.directory_touch(mut parent_dir, file_name)!
log.info('[DatabaseVFS] Created file ${file_name} in ${parent_path}')
return entry
}
pub fn (mut self DatabaseVFS) file_read(path_ string) ![]u8 {
path := '/${path_.trim_left('/').trim_right('/')}'
log.info('[DatabaseVFS] Reading file ${path}')
mut entry := self.get_entry(path)!
if mut entry is File {
return entry.read().bytes()
mut file := self.get_entry(path)!
if mut file is File {
metadata := self.db_metadata.get(self.get_database_id(file.metadata.id)!) or {
return error('Failed to get file metadata ${err}')
}
_, chunk_ids := decode_file_metadata(metadata) or { return error('Failed to decode file: ${err}') }
println('debugzo-1 ${chunk_ids}')
mut file_data := []u8{}
// log.debug('[DatabaseVFS] Got database chunk ids ${chunk_ids}')
for id in chunk_ids {
log.debug('[DatabaseVFS] Getting chunk ${id}')
// there were chunk ids stored with file so file has data
if chunk_bytes := self.db_data.get(id) {
file_data << chunk_bytes
} else {
return error('Failed to fetch file data: ${err}')
}
}
return file_data
}
return error('Not a file: ${path}')
}
pub fn (mut self DatabaseVFS) file_write(path_ string, data []u8) ! {
log.info('[DatabaseVFS] Writing file ${path_}')
path := '/${path_.trim_left('/').trim_right('/')}'
path := texttools.path_fix_absolute(path_)
if mut entry := self.get_entry(path) {
if mut entry is File {
log.info('[DatabaseVFS] Writing to file ${path}')
entry.write(data.bytestr())
self.save_entry(entry)!
log.info('[DatabaseVFS] Writing ${data.len} bytes to ${path}')
self.save_file(entry, data)!
} else {
panic('handle error')
}
@@ -55,7 +72,10 @@ pub fn (mut self DatabaseVFS) file_delete(path string) ! {
file_name := os.base(path)
mut parent_dir := self.get_directory(parent_path)!
self.directory_rm(mut parent_dir, file_name)!
self.directory_rm(mut parent_dir, file_name) or {
log.error(err.msg())
return err
}
}
pub fn (mut self DatabaseVFS) dir_create(path_ string) !vfs.FSEntry {
@@ -138,7 +158,7 @@ pub fn (mut self DatabaseVFS) link_delete(path string) ! {
self.directory_rm(mut parent_dir, file_name)!
}
pub fn (mut self DatabaseVFS) exists(path_ string) bool {
pub fn (mut self DatabaseVFS) exists(path_ string) bool {
path := if !path_.starts_with('/') {
'/${path_}'
} else {
@@ -229,8 +249,20 @@ pub fn (mut self DatabaseVFS) move(src_path string, dst_path string) !vfs.FSEntr
)!
}
pub fn (mut self DatabaseVFS) delete(path string) ! {
// TODO: implement
pub fn (mut self DatabaseVFS) delete(path_ string) ! {
if path_ == '/' || path_ == '' || path_ == '.' {
return error('cant delete root')
}
path := '/${path_.trim_left('/').trim_right('/')}'
parent_path := os.dir(path)
file_name := os.base(path)
mut parent_dir := self.get_directory(parent_path)!
self.directory_rm(mut parent_dir, file_name) or {
log.error('[DatabaseVFS] Failed to remove ${file_name} from ${parent_dir.metadata.path}\n${err}')
return err
}
}
pub fn (mut self DatabaseVFS) destroy() ! {