...
This commit is contained in:
@@ -11,13 +11,14 @@ pub struct VFSDedupeDB {
|
||||
}
|
||||
|
||||
pub fn (mut db VFSDedupeDB) set(args ourdb.OurDBSetArgs) !u32 {
|
||||
return db.store(args.data,
|
||||
dedupestor.Reference{owner: u16(1), id: args.id or {panic('VFS Must provide id')}}
|
||||
)!
|
||||
return db.store(args.data, dedupestor.Reference{
|
||||
owner: u16(1)
|
||||
id: args.id or { panic('VFS Must provide id') }
|
||||
})!
|
||||
}
|
||||
|
||||
pub fn (mut db VFSDedupeDB) delete(id u32) ! {
|
||||
db.DedupeStore.delete(id, dedupestor.Reference{owner: u16(1), id: id})!
|
||||
db.DedupeStore.delete(id, dedupestor.Reference{ owner: u16(1), id: id })!
|
||||
}
|
||||
|
||||
example_data_dir := os.join_path(os.dir(@FILE), 'example_db')
|
||||
@@ -33,35 +34,23 @@ mut db_data := VFSDedupeDB{
|
||||
}
|
||||
|
||||
mut db_metadata := ourdb.new(
|
||||
path: os.join_path(example_data_dir, 'metadata')
|
||||
path: os.join_path(example_data_dir, 'metadata')
|
||||
incremental_mode: false
|
||||
)!
|
||||
|
||||
// Create VFS with separate databases for data and metadata
|
||||
mut vfs := vfs_db.new(mut db_data, mut db_metadata) or {
|
||||
panic('Failed to create VFS: ${err}')
|
||||
}
|
||||
mut vfs := vfs_db.new(mut db_data, mut db_metadata) or { panic('Failed to create VFS: ${err}') }
|
||||
|
||||
println('\n---------BEGIN EXAMPLE')
|
||||
println('---------WRITING FILES')
|
||||
vfs.file_create('/some_file.txt') or {
|
||||
panic('Failed to create file: ${err}')
|
||||
}
|
||||
vfs.file_create('/another_file.txt') or {
|
||||
panic('Failed to create file: ${err}')
|
||||
}
|
||||
vfs.file_create('/some_file.txt') or { panic('Failed to create file: ${err}') }
|
||||
vfs.file_create('/another_file.txt') or { panic('Failed to create file: ${err}') }
|
||||
|
||||
vfs.file_write('/some_file.txt', 'gibberish'.bytes()) or {
|
||||
panic('Failed to write file: ${err}')
|
||||
}
|
||||
vfs.file_write('/another_file.txt', 'abcdefg'.bytes()) or {
|
||||
panic('Failed to write file: ${err}')
|
||||
}
|
||||
vfs.file_write('/some_file.txt', 'gibberish'.bytes()) or { panic('Failed to write file: ${err}') }
|
||||
vfs.file_write('/another_file.txt', 'abcdefg'.bytes()) or { panic('Failed to write file: ${err}') }
|
||||
|
||||
println('\n---------READING FILES')
|
||||
some_file_content := vfs.file_read('/some_file.txt') or {
|
||||
panic('Failed to read file: ${err}')
|
||||
}
|
||||
some_file_content := vfs.file_read('/some_file.txt') or { panic('Failed to read file: ${err}') }
|
||||
println(some_file_content.bytestr())
|
||||
|
||||
another_file_content := vfs.file_read('/another_file.txt') or {
|
||||
@@ -69,19 +58,15 @@ another_file_content := vfs.file_read('/another_file.txt') or {
|
||||
}
|
||||
println(another_file_content.bytestr())
|
||||
|
||||
println("\n---------WRITING DUPLICATE FILE (DB SIZE: ${os.file_size(os.join_path(example_data_dir, 'data/0.db'))})")
|
||||
vfs.file_create('/duplicate.txt') or {
|
||||
panic('Failed to create file: ${err}')
|
||||
}
|
||||
vfs.file_write('/duplicate.txt', 'gibberish'.bytes()) or {
|
||||
panic('Failed to write file: ${err}')
|
||||
}
|
||||
println('\n---------WRITING DUPLICATE FILE (DB SIZE: ${os.file_size(os.join_path(example_data_dir,
|
||||
'data/0.db'))})')
|
||||
vfs.file_create('/duplicate.txt') or { panic('Failed to create file: ${err}') }
|
||||
vfs.file_write('/duplicate.txt', 'gibberish'.bytes()) or { panic('Failed to write file: ${err}') }
|
||||
|
||||
println("\n---------WROTE DUPLICATE FILE (DB SIZE: ${os.file_size(os.join_path(example_data_dir, 'data/0.db'))})")
|
||||
println('\n---------WROTE DUPLICATE FILE (DB SIZE: ${os.file_size(os.join_path(example_data_dir,
|
||||
'data/0.db'))})')
|
||||
println('---------READING FILES')
|
||||
some_file_content3 := vfs.file_read('/some_file.txt') or {
|
||||
panic('Failed to read file: ${err}')
|
||||
}
|
||||
some_file_content3 := vfs.file_read('/some_file.txt') or { panic('Failed to read file: ${err}') }
|
||||
println(some_file_content3.bytestr())
|
||||
|
||||
another_file_content3 := vfs.file_read('/another_file.txt') or {
|
||||
@@ -89,22 +74,21 @@ another_file_content3 := vfs.file_read('/another_file.txt') or {
|
||||
}
|
||||
println(another_file_content3.bytestr())
|
||||
|
||||
duplicate_content := vfs.file_read('/duplicate.txt') or {
|
||||
panic('Failed to read file: ${err}')
|
||||
}
|
||||
duplicate_content := vfs.file_read('/duplicate.txt') or { panic('Failed to read file: ${err}') }
|
||||
println(duplicate_content.bytestr())
|
||||
|
||||
println("\n---------DELETING DUPLICATE FILE (DB SIZE: ${os.file_size(os.join_path(example_data_dir, 'data/0.db'))})")
|
||||
vfs.file_delete('/duplicate.txt') or {
|
||||
panic('Failed to delete file: ${err}')
|
||||
}
|
||||
println('\n---------DELETING DUPLICATE FILE (DB SIZE: ${os.file_size(os.join_path(example_data_dir,
|
||||
'data/0.db'))})')
|
||||
vfs.file_delete('/duplicate.txt') or { panic('Failed to delete file: ${err}') }
|
||||
|
||||
data_path := os.join_path(example_data_dir, 'data/0.db')
|
||||
db_file_path := os.join_path(data_path, '0.db')
|
||||
println("---------READING FILES (DB SIZE: ${if os.exists(db_file_path) { os.file_size(db_file_path) } else { 0 }})")
|
||||
some_file_content2 := vfs.file_read('/some_file.txt') or {
|
||||
panic('Failed to read file: ${err}')
|
||||
}
|
||||
data_path2 := os.join_path(example_data_dir, 'data/0.db')
|
||||
db_file_path := os.join_path(data_path2, '0.db')
|
||||
println('---------READING FILES (DB SIZE: ${if os.exists(db_file_path) {
|
||||
os.file_size(db_file_path)
|
||||
} else {
|
||||
0
|
||||
}})')
|
||||
some_file_content2 := vfs.file_read('/some_file.txt') or { panic('Failed to read file: ${err}') }
|
||||
println(some_file_content2.bytestr())
|
||||
|
||||
another_file_content2 := vfs.file_read('/another_file.txt') or {
|
||||
@@ -119,4 +103,4 @@ println(another_file_content2.bytestr())
|
||||
// }
|
||||
// if duplicate_content.len > 0 {
|
||||
// println(duplicate_content.bytestr())
|
||||
// }
|
||||
// }
|
||||
|
||||
@@ -16,26 +16,24 @@ os.mkdir_all(metadata_dir)!
|
||||
|
||||
// Create separate databases for data and metadata
|
||||
mut db_data := ourdb.new(
|
||||
path: data_dir
|
||||
path: data_dir
|
||||
incremental_mode: false
|
||||
)!
|
||||
|
||||
mut db_metadata := ourdb.new(
|
||||
path: metadata_dir
|
||||
path: metadata_dir
|
||||
incremental_mode: false
|
||||
)!
|
||||
|
||||
// Create VFS with separate databases for data and metadata
|
||||
mut vfs := vfs_db.new_with_separate_dbs(
|
||||
mut db_data,
|
||||
mut db_metadata,
|
||||
data_dir: data_dir,
|
||||
mut vfs := vfs_db.new_with_separate_dbs(mut db_data, mut db_metadata,
|
||||
data_dir: data_dir
|
||||
metadata_dir: metadata_dir
|
||||
)!
|
||||
|
||||
// Create a root directory if it doesn't exist
|
||||
if !vfs.exists('/') {
|
||||
vfs.dir_create('/')!
|
||||
vfs.dir_create('/')!
|
||||
}
|
||||
|
||||
// Create some files and directories
|
||||
@@ -55,13 +53,13 @@ println('Nested file content: ${vfs.file_read('/test_dir/nested_file.txt')!.byte
|
||||
println('Root directory contents:')
|
||||
root_entries := vfs.dir_list('/')!
|
||||
for entry in root_entries {
|
||||
println('- ${entry.get_metadata().name} (${entry.get_metadata().file_type})')
|
||||
println('- ${entry.get_metadata().name} (${entry.get_metadata().file_type})')
|
||||
}
|
||||
|
||||
println('Test directory contents:')
|
||||
test_dir_entries := vfs.dir_list('/test_dir')!
|
||||
for entry in test_dir_entries {
|
||||
println('- ${entry.get_metadata().name} (${entry.get_metadata().file_type})')
|
||||
println('- ${entry.get_metadata().name} (${entry.get_metadata().file_type})')
|
||||
}
|
||||
|
||||
// Create a duplicate file with the same content
|
||||
|
||||
@@ -16,59 +16,43 @@ os.mkdir_all(example_data_dir)!
|
||||
|
||||
// Create separate databases for data and metadata
|
||||
mut db_data := ourdb.new(
|
||||
path: os.join_path(example_data_dir, 'data')
|
||||
path: os.join_path(example_data_dir, 'data')
|
||||
incremental_mode: false
|
||||
)!
|
||||
|
||||
mut db_metadata := ourdb.new(
|
||||
path: os.join_path(example_data_dir, 'metadata')
|
||||
path: os.join_path(example_data_dir, 'metadata')
|
||||
incremental_mode: false
|
||||
)!
|
||||
|
||||
// Create VFS with separate databases for data and metadata
|
||||
mut vfs := vfs_db.new(mut db_data, mut db_metadata) or {
|
||||
panic('Failed to create VFS: ${err}')
|
||||
}
|
||||
mut vfs := vfs_db.new(mut db_data, mut db_metadata) or { panic('Failed to create VFS: ${err}') }
|
||||
|
||||
println('\n---------BEGIN DIRECTORY OPERATIONS EXAMPLE')
|
||||
|
||||
// Create directories with subdirectories
|
||||
println('\n---------CREATING DIRECTORIES')
|
||||
vfs.dir_create('/dir1') or {
|
||||
panic('Failed to create directory: ${err}')
|
||||
}
|
||||
vfs.dir_create('/dir1') or { panic('Failed to create directory: ${err}') }
|
||||
println('Created directory: /dir1')
|
||||
|
||||
vfs.dir_create('/dir1/subdir1') or {
|
||||
panic('Failed to create directory: ${err}')
|
||||
}
|
||||
vfs.dir_create('/dir1/subdir1') or { panic('Failed to create directory: ${err}') }
|
||||
println('Created directory: /dir1/subdir1')
|
||||
|
||||
vfs.dir_create('/dir1/subdir2') or {
|
||||
panic('Failed to create directory: ${err}')
|
||||
}
|
||||
vfs.dir_create('/dir1/subdir2') or { panic('Failed to create directory: ${err}') }
|
||||
println('Created directory: /dir1/subdir2')
|
||||
|
||||
vfs.dir_create('/dir2') or {
|
||||
panic('Failed to create directory: ${err}')
|
||||
}
|
||||
vfs.dir_create('/dir2') or { panic('Failed to create directory: ${err}') }
|
||||
println('Created directory: /dir2')
|
||||
|
||||
vfs.dir_create('/dir2/subdir1') or {
|
||||
panic('Failed to create directory: ${err}')
|
||||
}
|
||||
vfs.dir_create('/dir2/subdir1') or { panic('Failed to create directory: ${err}') }
|
||||
println('Created directory: /dir2/subdir1')
|
||||
|
||||
vfs.dir_create('/dir2/subdir1/subsubdir1') or {
|
||||
panic('Failed to create directory: ${err}')
|
||||
}
|
||||
vfs.dir_create('/dir2/subdir1/subsubdir1') or { panic('Failed to create directory: ${err}') }
|
||||
println('Created directory: /dir2/subdir1/subsubdir1')
|
||||
|
||||
// List directories
|
||||
println('\n---------LISTING ROOT DIRECTORY')
|
||||
root_entries := vfs.dir_list('/') or {
|
||||
panic('Failed to list directory: ${err}')
|
||||
}
|
||||
root_entries := vfs.dir_list('/') or { panic('Failed to list directory: ${err}') }
|
||||
println('Root directory contains:')
|
||||
for entry in root_entries {
|
||||
entry_type := if entry.get_metadata().file_type == .directory { 'directory' } else { 'file' }
|
||||
@@ -76,9 +60,7 @@ for entry in root_entries {
|
||||
}
|
||||
|
||||
println('\n---------LISTING /dir1 DIRECTORY')
|
||||
dir1_entries := vfs.dir_list('/dir1') or {
|
||||
panic('Failed to list directory: ${err}')
|
||||
}
|
||||
dir1_entries := vfs.dir_list('/dir1') or { panic('Failed to list directory: ${err}') }
|
||||
println('/dir1 directory contains:')
|
||||
for entry in dir1_entries {
|
||||
entry_type := if entry.get_metadata().file_type == .directory { 'directory' } else { 'file' }
|
||||
@@ -87,9 +69,7 @@ for entry in dir1_entries {
|
||||
|
||||
// Write a file in a subdirectory
|
||||
println('\n---------WRITING FILE IN SUBDIRECTORY')
|
||||
vfs.file_create('/dir1/subdir1/test_file.txt') or {
|
||||
panic('Failed to create file: ${err}')
|
||||
}
|
||||
vfs.file_create('/dir1/subdir1/test_file.txt') or { panic('Failed to create file: ${err}') }
|
||||
println('Created file: /dir1/subdir1/test_file.txt')
|
||||
|
||||
test_content := 'This is a test file in a subdirectory'
|
||||
@@ -104,13 +84,15 @@ file_content := vfs.file_read('/dir1/subdir1/test_file.txt') or {
|
||||
panic('Failed to read file: ${err}')
|
||||
}
|
||||
println('File content: ${file_content.bytestr()}')
|
||||
println('Content verification: ${if file_content.bytestr() == test_content { 'SUCCESS' } else { 'FAILED' }}')
|
||||
println('Content verification: ${if file_content.bytestr() == test_content {
|
||||
'SUCCESS'
|
||||
} else {
|
||||
'FAILED'
|
||||
}}')
|
||||
|
||||
// List the subdirectory to see the file
|
||||
println('\n---------LISTING /dir1/subdir1 DIRECTORY')
|
||||
subdir1_entries := vfs.dir_list('/dir1/subdir1') or {
|
||||
panic('Failed to list directory: ${err}')
|
||||
}
|
||||
subdir1_entries := vfs.dir_list('/dir1/subdir1') or { panic('Failed to list directory: ${err}') }
|
||||
println('/dir1/subdir1 directory contains:')
|
||||
for entry in subdir1_entries {
|
||||
entry_type := if entry.get_metadata().file_type == .directory { 'directory' } else { 'file' }
|
||||
@@ -119,9 +101,7 @@ for entry in subdir1_entries {
|
||||
|
||||
// Delete the file
|
||||
println('\n---------DELETING FILE')
|
||||
vfs.file_delete('/dir1/subdir1/test_file.txt') or {
|
||||
panic('Failed to delete file: ${err}')
|
||||
}
|
||||
vfs.file_delete('/dir1/subdir1/test_file.txt') or { panic('Failed to delete file: ${err}') }
|
||||
println('Deleted file: /dir1/subdir1/test_file.txt')
|
||||
|
||||
// List the subdirectory again to verify the file is gone
|
||||
@@ -158,7 +138,11 @@ deep_file_content := vfs.file_read('/dir2/subdir1/subsubdir1/deep_file.txt') or
|
||||
panic('Failed to read file: ${err}')
|
||||
}
|
||||
println('File content: ${deep_file_content.bytestr()}')
|
||||
println('Content verification: ${if deep_file_content.bytestr() == deep_content { 'SUCCESS' } else { 'FAILED' }}')
|
||||
println('Content verification: ${if deep_file_content.bytestr() == deep_content {
|
||||
'SUCCESS'
|
||||
} else {
|
||||
'FAILED'
|
||||
}}')
|
||||
|
||||
// Clean up by deleting directories (optional)
|
||||
println('\n---------CLEANING UP')
|
||||
|
||||
Reference in New Issue
Block a user