Merge branch 'development_ourdb_new' into development_actions007
This commit is contained in:
@@ -1,27 +1,20 @@
|
||||
#!/usr/bin/env -S v -n -w -gc none -cc tcc -d use_openssl -enable-globals run
|
||||
|
||||
// import freeflowuniverse.herolib.vfs.webdav
|
||||
import freeflowuniverse.herolib.vfs.vfs_nested
|
||||
import freeflowuniverse.herolib.vfs.vfs_core
|
||||
import freeflowuniverse.herolib.vfs.vfs_ourdb
|
||||
import freeflowuniverse.herolib.dav.webdav
|
||||
import freeflowuniverse.herolib.vfs.vfs_db
|
||||
import freeflowuniverse.herolib.data.ourdb
|
||||
import os
|
||||
import log
|
||||
|
||||
mut high_level_vfs := vfsnested.new()
|
||||
const database_path := os.join_path(os.dir(@FILE), 'database')
|
||||
|
||||
// lower level VFS Implementations that use OurDB
|
||||
mut vfs1 := vfsourdb.new('/tmp/test_webdav_ourdbvfs/vfs1', '/tmp/test_webdav_ourdbvfs/vfs1')!
|
||||
mut vfs2 := vfsourdb.new('/tmp/test_webdav_ourdbvfs/vfs2', '/tmp/test_webdav_ourdbvfs/vfs2')!
|
||||
mut vfs3 := vfsourdb.new('/tmp/test_webdav_ourdbvfs/vfs3', '/tmp/test_webdav_ourdbvfs/vfs3')!
|
||||
mut metadata_db := ourdb.new(path:os.join_path(database_path, 'metadata'))!
|
||||
mut data_db := ourdb.new(path:os.join_path(database_path, 'data'))!
|
||||
mut vfs := vfs_db.new(mut metadata_db, mut data_db)!
|
||||
mut server := webdav.new_server(vfs: vfs, user_db: {
|
||||
'admin': '123'
|
||||
})!
|
||||
|
||||
// Nest OurDB VFS instances at different paths
|
||||
high_level_vfs.add_vfs('/data', vfs1) or { panic(err) }
|
||||
high_level_vfs.add_vfs('/config', vfs2) or { panic(err) }
|
||||
high_level_vfs.add_vfs('/data/backup', vfs3) or { panic(err) } // Nested under /data
|
||||
log.set_level(.debug)
|
||||
|
||||
// // Create WebDAV Server that uses high level VFS
|
||||
// mut webdav_server := webdav.new_app(
|
||||
// vfs: high_level_vfs
|
||||
// user_db: {
|
||||
// 'omda': '123'
|
||||
// }
|
||||
// )!
|
||||
// webdav_server.run()
|
||||
server.run()
|
||||
@@ -116,11 +116,20 @@ pub fn name_fix_dot_notation_to_snake_case(name string) string {
|
||||
return name.replace('.', '_')
|
||||
}
|
||||
|
||||
// remove underscores and extension
|
||||
pub fn name_fix_no_underscore_no_ext(name_ string) string {
|
||||
return name_fix_keepext(name_).all_before_last('.').replace('_', '')
|
||||
// normalize a file path while preserving path structure
|
||||
pub fn path_fix(path_ string) string {
|
||||
if path_.len == 0 {
|
||||
return ''
|
||||
}
|
||||
return "${path_.trim('/')}"
|
||||
}
|
||||
|
||||
// normalize a file path while preserving path structure
|
||||
pub fn path_fix_absolute(path string) string {
|
||||
return "/${path_fix(path)}"
|
||||
}
|
||||
|
||||
|
||||
// remove underscores and extension
|
||||
pub fn name_fix_no_ext(name_ string) string {
|
||||
return name_fix_keepext(name_).all_before_last('.').trim_right('_')
|
||||
|
||||
@@ -6,3 +6,33 @@ fn test_main() {
|
||||
assert name_fix_keepext('\$sds_?_!"`{_ 4F') == 'sds_4f'
|
||||
assert name_fix_keepext('\$sds_?_!"`{_ 4F.jpg') == 'sds_4f.jpg'
|
||||
}
|
||||
|
||||
fn test_path_fix() {
|
||||
// Test empty path
|
||||
assert path_fix('') == ''
|
||||
|
||||
// Test absolute paths
|
||||
assert path_fix('/home/user') == '/home/user'
|
||||
assert path_fix('/home/USER') == '/home/user'
|
||||
assert path_fix('/home/user/Documents') == '/home/user/documents'
|
||||
|
||||
// Test relative paths
|
||||
assert path_fix('home/user') == 'home/user'
|
||||
assert path_fix('./home/user') == './home/user'
|
||||
assert path_fix('../home/user') == '../home/user'
|
||||
|
||||
// Test paths with special characters
|
||||
assert path_fix('/home/user/My Documents') == '/home/user/my_documents'
|
||||
assert path_fix('/home/user/file-name.txt') == '/home/user/file_name.txt'
|
||||
assert path_fix('/home/user/file name with spaces.txt') == '/home/user/file_name_with_spaces.txt'
|
||||
|
||||
// Test paths with multiple special characters
|
||||
assert path_fix('/home/user/!@#$%^&*()_+.txt') == '/home/user/'
|
||||
|
||||
// Test paths with multiple components and extensions
|
||||
assert path_fix('/home/user/Documents/report.pdf') == '/home/user/documents/report.pdf'
|
||||
assert path_fix('/home/user/Documents/report.PDF') == '/home/user/documents/report.pdf'
|
||||
|
||||
// Test paths with multiple slashes
|
||||
assert path_fix('/home//user///documents') == '/home/user/documents'
|
||||
}
|
||||
|
||||
@@ -75,7 +75,6 @@ pub fn (mut db OurDB) set_(x u32, old_location Location, data []u8) ! {
|
||||
file_nr: file_nr
|
||||
position: u32(db.file.tell()!)
|
||||
}
|
||||
println('Writing ${x} data at position: ${new_location.position}, size: ${data.len}')
|
||||
|
||||
// Calculate CRC of data
|
||||
crc := calculate_crc(data)
|
||||
@@ -120,18 +119,15 @@ fn (mut db OurDB) get_(location Location) ![]u8 {
|
||||
db.db_file_select(location.file_nr)!
|
||||
|
||||
if location.position == 0 {
|
||||
return error('Record not found')
|
||||
return error('Record not found, location: ${location}')
|
||||
}
|
||||
|
||||
// Seek to position
|
||||
db.file.seek(i64(location.position), .start)!
|
||||
|
||||
// Read header
|
||||
mut header := []u8{len: header_size}
|
||||
header_read_bytes := db.file.read(mut header)!
|
||||
if header_read_bytes != header_size {
|
||||
header := db.file.read_bytes_at(header_size, location.position)
|
||||
if header.len != header_size {
|
||||
return error('failed to read header')
|
||||
}
|
||||
|
||||
// Parse size (2 bytes)
|
||||
size := u16(header[0]) | (u16(header[1]) << 8)
|
||||
|
||||
@@ -139,6 +135,8 @@ fn (mut db OurDB) get_(location Location) ![]u8 {
|
||||
stored_crc := u32(header[2]) | (u32(header[3]) << 8) | (u32(header[4]) << 16) | (u32(header[5]) << 24)
|
||||
|
||||
// Read data
|
||||
// seek data beginning
|
||||
db.file.seek(i64(location.position+12), .start)!
|
||||
mut data := []u8{len: int(size)}
|
||||
data_read_bytes := db.file.read(mut data) or {
|
||||
return error('Failed to read file, ${size} ${err}')
|
||||
|
||||
30
lib/data/ourdb_syncer/Diagram.md
Normal file
30
lib/data/ourdb_syncer/Diagram.md
Normal file
@@ -0,0 +1,30 @@
|
||||
```
|
||||
+-----------------+
|
||||
| User |
|
||||
| (HTTP Client) |
|
||||
+-----------------+
|
||||
|
|
||||
| HTTP Requests (GET, SET, DELETE)
|
||||
v
|
||||
+-----------------+
|
||||
| HTTP Server |
|
||||
| (Exposed API) |
|
||||
+-----------------+
|
||||
|
|
||||
| Internal Communication via Mycelium Network
|
||||
|
|
||||
+-------------------+-------------------+
|
||||
| | |
|
||||
v v v
|
||||
+-----------------+ +-----------------+ +-----------------+
|
||||
| Master | | Worker 1 | | Worker 2 |
|
||||
| (Handles Writes)| | (Handles Reads) | | (Handles Reads) |
|
||||
| OurDB | | OurDB | | OurDB |
|
||||
+-----------------+ +-----------------+ +-----------------+
|
||||
| | |
|
||||
| | |
|
||||
| v |
|
||||
| Data Sync via Mycelium Network |
|
||||
| |
|
||||
+------------------->+------------------+
|
||||
```
|
||||
@@ -1,30 +0,0 @@
|
||||
# Mycelium Streamer
|
||||
|
||||
## Overview
|
||||
|
||||
This project demonstrates a master-worker setup using `mycelium` for distributed data storage. The master node interacts with worker nodes over the network to store and retrieve data.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before running the master node example, ensure the following:
|
||||
|
||||
- `mycelium` binary is installed and running on both local and remote machines.
|
||||
- Worker nodes are set up and running with the mycelium instance.
|
||||
|
||||
## Setup
|
||||
|
||||
1. Start `mycelium` on the local machine with the following command:
|
||||
|
||||
```bash
|
||||
mycelium --peers tcp://188.40.132.242:9651 "quic://[2a01:4f8:212:fa6::2]:9651" tcp://185.69.166.7:9651 "quic://[2a02:1802:5e:0:ec4:7aff:fe51:e36b]:9651" tcp://65.21.231.58:9651 "quic://[2a01:4f9:5a:1042::2]:9651" "tcp://[2604:a00:50:17b:9e6b:ff:fe1f:e054]:9651" quic://5.78.122.16:9651 "tcp://[2a01:4ff:2f0:3621::1]:9651" quic://142.93.217.194:9651 --tun-name tun2 --tcp-listen-port 9652 --quic-listen-port 9653 --api-addr 127.0.0.1:9000
|
||||
```
|
||||
|
||||
Replace IP addresses and ports with your specific configuration.
|
||||
|
||||
2. On the remote machine where the worker will run, execute the same `mycelium` command as above.
|
||||
|
||||
3. Execute the worker example code provided (`herolib/examples/data/deduped_mycelium_worker.vsh`) on the remote worker machine.
|
||||
|
||||
## Running the Master Example
|
||||
|
||||
After setting up `mycelium` and the worker nodes, run the master example script (`herolib/examples/data/deduped_mycelium_master.vsh`) on the local machine.
|
||||
166
lib/data/ourdb_syncer/README.md
Normal file
166
lib/data/ourdb_syncer/README.md
Normal file
@@ -0,0 +1,166 @@
|
||||
# Key-Value HTTP Service with Master-Worker Architecture over Mycelium
|
||||
|
||||
## Overview
|
||||
|
||||
This project implements a distributed key-value storage service exposed via an HTTP API. It uses a master-worker architecture to handle read and write operations efficiently, with internal communication facilitated by the [Mycelium network](https://github.com/threefoldtech/mycelium). The system is built in [V](https://vlang.io/) and uses [OurDB](https://github.com/freeflowuniverse/herolib/tree/main/lib/data/ourdb) for embedded key-value storage.
|
||||
|
||||
### Key Features
|
||||
|
||||
- **HTTP API**: Users can perform `GET` (read), `SET` (write), and `DELETE` operations on key-value pairs via an HTTP server.
|
||||
- **Master-Worker Architecture**:
|
||||
- **Master**: Handles all write operations (`SET`, `DELETE`) to ensure data consistency.
|
||||
- **Workers**: Handle read operations (`GET`) to distribute the load.
|
||||
- **Data Synchronization**: Changes made by the master are propagated to all workers to ensure consistent reads.
|
||||
- **Mycelium Integration**: Internal communication between the HTTP server, master, and workers is handled over the Mycelium network, an encrypted IPv6 overlay network.
|
||||
- **Embedded Storage**: Uses OurDB, a lightweight embedded key-value database, for data persistence on each node.
|
||||
|
||||
### Use Case
|
||||
|
||||
This service is ideal for applications requiring a simple, distributed key-value store with strong consistency guarantees, such as configuration management, decentralized data sharing, or caching in a peer-to-peer network.
|
||||
|
||||
## Architecture
|
||||
|
||||
The system is designed with a clear separation of concerns, ensuring scalability and consistency. Below is a simplified diagram of the architecture:
|
||||
|
||||
```
|
||||
+-----------------+
|
||||
| User |
|
||||
| (HTTP Client) |
|
||||
+-----------------+
|
||||
|
|
||||
| HTTP Requests
|
||||
v
|
||||
+-----------------+
|
||||
| HTTP Server |<----+
|
||||
+-----------------+ | External Interface
|
||||
| |
|
||||
| Mycelium |
|
||||
| Network |
|
||||
v v
|
||||
+-----------------+ +-----------------+
|
||||
| Master |---->| Workers |
|
||||
| (Writes) | | (Reads) |
|
||||
| OurDB | | OurDB |
|
||||
+-----------------+ +-----------------+
|
||||
```
|
||||
|
||||
### Components
|
||||
|
||||
1. **HTTP Server**:
|
||||
- Acts as the entry point for user requests.
|
||||
- Routes write requests (`SET`, `DELETE`) to the master.
|
||||
- Routes read requests (`GET`) to one of the workers (e.g., using load balancing).
|
||||
|
||||
2. **Master**:
|
||||
- Handles all write operations to ensure data consistency.
|
||||
- Stores data in a local OurDB instance.
|
||||
- Propagates updates to workers via the Mycelium network.
|
||||
|
||||
3. **Workers**:
|
||||
- Handle read operations to distribute the load.
|
||||
- Store a synchronized copy of the data in a local OurDB instance.
|
||||
- Receive updates from the master via the Mycelium network.
|
||||
|
||||
4. **Mycelium Network**:
|
||||
- Provides secure, encrypted peer-to-peer communication between the HTTP server, master, and workers.
|
||||
|
||||
5. **OurDB**:
|
||||
- An embedded key-value database used by the master and workers for data storage.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
To run this project, you need the following:
|
||||
|
||||
- [V](https://vlang.io/) (Vlang compiler) installed.
|
||||
- [Mycelium](https://github.com/threefoldtech/mycelium) network configured (either public or private).
|
||||
- [OurDB](https://github.com/freeflowuniverse/herolib/tree/main/lib/data/ourdb) library included in your project (part of the HeroLib suite).
|
||||
|
||||
## Installation
|
||||
|
||||
1. **Clone the Repository**:
|
||||
```bash
|
||||
git clone <repository-url>
|
||||
cd <repository-name>
|
||||
```
|
||||
|
||||
2. **Install Dependencies**:
|
||||
Ensure V is installed and the `ourdb` library is available. You may need to pull the HeroLib dependencies:
|
||||
```bash
|
||||
v install
|
||||
```
|
||||
|
||||
3. **Configure Mycelium**:
|
||||
- Set up a Mycelium network (public or private) and note the addresses of the master and worker nodes.
|
||||
- Update the configuration in the HTTP server to point to the correct Mycelium addresses.
|
||||
|
||||
4. **Build the Project**:
|
||||
Compile the V code for the HTTP server, master, and workers:
|
||||
```bash
|
||||
v run main.v
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
### Running the System
|
||||
|
||||
1. **Start the Master**:
|
||||
Run the master node to handle write operations:
|
||||
```bash
|
||||
v run master.v
|
||||
```
|
||||
|
||||
2. **Start the Workers**:
|
||||
Run one or more worker nodes to handle read operations:
|
||||
```bash
|
||||
v run worker.v
|
||||
```
|
||||
|
||||
3. **Start the HTTP Server**:
|
||||
Run the HTTP server to expose the API to users:
|
||||
```bash
|
||||
v run server.v
|
||||
```
|
||||
|
||||
### Making Requests
|
||||
|
||||
The HTTP server exposes the following endpoints:
|
||||
|
||||
- **SET a Key-Value Pair**:
|
||||
```bash
|
||||
curl -X POST http://localhost:8080/set -d "key=mykey&value=myvalue"
|
||||
```
|
||||
- Writes the key-value pair to the master, which syncs it to workers.
|
||||
|
||||
- **GET a Value by Key**:
|
||||
```bash
|
||||
curl http://localhost:8080/get?key=mykey
|
||||
```
|
||||
- Retrieves the value from a worker.
|
||||
|
||||
- **DELETE a Key**:
|
||||
```bash
|
||||
curl -X POST http://localhost:8080/delete -d "key=mykey"
|
||||
```
|
||||
- Deletes the key-value pair via the master, which syncs the deletion to workers.
|
||||
|
||||
## Development
|
||||
|
||||
### Code Structure
|
||||
|
||||
- streamer
|
||||
- `streamer.v`: Implements the HTTP server and request routing logic.
|
||||
- `nodes.v`: Implements the master/worker node, handling writes and synchronization.
|
||||
|
||||
- http_server
|
||||
- `server.v`: Implements the HTTP server and request routing logic.
|
||||
|
||||
- examples
|
||||
- `master_example.v`: A simple example that starts the streamer and master node.
|
||||
- `worker_example.v`: A simple example that starts the streamer and worker node.
|
||||
- `db_example.v`: A simple example that starts the streamer, master, and worker nodes.
|
||||
|
||||
### Extending the System
|
||||
|
||||
- **Add More Workers**: Scale the system by starting additional worker nodes and updating the HTTP server’s worker list.
|
||||
- **Enhance Synchronization**: Implement more advanced replication strategies (e.g., conflict resolution, versioning) if needed.
|
||||
- **Improve Load Balancing**: Add sophisticated load balancing for read requests (e.g., based on worker load or latency).
|
||||
18
lib/data/ourdb_syncer/examples/db_example.v
Normal file
18
lib/data/ourdb_syncer/examples/db_example.v
Normal file
@@ -0,0 +1,18 @@
|
||||
module main
|
||||
|
||||
import freeflowuniverse.herolib.data.ourdb_syncer.streamer
|
||||
|
||||
fn main() {
|
||||
master_public_key := '570c1069736786f06c4fd2a6dc6c17cd88347604593b60e34b5688c369fa1b39'
|
||||
|
||||
// Create a new streamer
|
||||
mut streamer_ := streamer.connect_streamer(
|
||||
name: 'streamer'
|
||||
port: 8080
|
||||
master_public_key: master_public_key
|
||||
)!
|
||||
|
||||
workers := streamer_.get_workers()!
|
||||
|
||||
println('workers: ${workers}')
|
||||
}
|
||||
20
lib/data/ourdb_syncer/examples/master_example.v
Normal file
20
lib/data/ourdb_syncer/examples/master_example.v
Normal file
@@ -0,0 +1,20 @@
|
||||
module main
|
||||
|
||||
import freeflowuniverse.herolib.data.ourdb_syncer.streamer
|
||||
|
||||
fn main() {
|
||||
println('Strating the streamer first!')
|
||||
|
||||
// Create a new streamer
|
||||
mut streamer_ := streamer.new_streamer(
|
||||
name: 'streamer'
|
||||
port: 8080
|
||||
)!
|
||||
|
||||
mut master_node := streamer_.add_master(
|
||||
address: '4ff:3da9:f2b2:4103:fa6e:7ea:7cbe:8fef'
|
||||
public_key: '570c1069736786f06c4fd2a6dc6c17cd88347604593b60e34b5688c369fa1b39'
|
||||
)!
|
||||
|
||||
master_node.start_and_listen()!
|
||||
}
|
||||
19
lib/data/ourdb_syncer/examples/worker_example.v
Normal file
19
lib/data/ourdb_syncer/examples/worker_example.v
Normal file
@@ -0,0 +1,19 @@
|
||||
module main
|
||||
|
||||
import freeflowuniverse.herolib.data.ourdb_syncer.streamer
|
||||
|
||||
fn main() {
|
||||
// Create a new streamer
|
||||
mut streamer_ := streamer.connect_streamer(
|
||||
name: 'streamer'
|
||||
port: 8080
|
||||
master_public_key: '570c1069736786f06c4fd2a6dc6c17cd88347604593b60e34b5688c369fa1b39'
|
||||
)!
|
||||
|
||||
mut worker_node := streamer_.add_worker(
|
||||
public_key: '46a9f9cee1ce98ef7478f3dea759589bbf6da9156533e63fed9f233640ac072c'
|
||||
address: '4ff:3da9:f2b2:4103:fa6e:7ea:7cbe:8fef'
|
||||
)!
|
||||
|
||||
worker_node.start_and_listen()!
|
||||
}
|
||||
35
lib/data/ourdb_syncer/http/CLIENT.md
Normal file
35
lib/data/ourdb_syncer/http/CLIENT.md
Normal file
@@ -0,0 +1,35 @@
|
||||
# OurDB Client
|
||||
|
||||
## Overview
|
||||
This client is created to interact with an OurDB server.
|
||||
|
||||
## Prerequisites
|
||||
Before running the client script, ensure that the OurDB server is up and running. You can start the server by following the instructions in the [OurDB Server README](./SERVER.md).
|
||||
|
||||
## Installation
|
||||
|
||||
Ensure you have the V programming language installed. You can download it from [vlang.io](https://vlang.io/).
|
||||
|
||||
## Running the Client
|
||||
|
||||
Once the OurDB server is running, execute the client script:
|
||||
```sh
|
||||
examples/data/ourdb_client.vsh
|
||||
```
|
||||
|
||||
Alternatively, you can run it using V:
|
||||
```sh
|
||||
v -enable-globals run ourdb_client.vsh
|
||||
```
|
||||
|
||||
## How It Works
|
||||
1. Connects to the OurDB server on `localhost:3000`.
|
||||
2. Sets a record with the value `hello`.
|
||||
3. Retrieves the record by ID and verifies the stored value.
|
||||
4. Deletes the record.
|
||||
|
||||
## Example Output
|
||||
```
|
||||
Set result: { id: 1, value: 'hello' }
|
||||
Get result: { id: 1, value: 'hello' }
|
||||
```
|
||||
0
lib/data/ourdb_syncer/http/client.v
Normal file
0
lib/data/ourdb_syncer/http/client.v
Normal file
50
lib/data/ourdb_syncer/http/server.v
Normal file
50
lib/data/ourdb_syncer/http/server.v
Normal file
@@ -0,0 +1,50 @@
|
||||
module server
|
||||
|
||||
// import net.http
|
||||
// import rand
|
||||
|
||||
// struct App {
|
||||
// master_addr string // Mycelium address of master
|
||||
// worker_addrs []string // Mycelium addresses of workers
|
||||
// }
|
||||
|
||||
// fn (app App) handle_set(w http.ResponseWriter, r http.Request) {
|
||||
// // Parse key-value from request
|
||||
// key := r.form['key'] or { return w.write_string('Missing key') }
|
||||
// value := r.form['value'] or { return w.write_string('Missing value') }
|
||||
|
||||
// // Forward SET request to master via Mycelium
|
||||
// response := send_to_mycelium(app.master_addr, 'SET', key, value)
|
||||
// w.write_string(response)
|
||||
// }
|
||||
|
||||
// fn (app App) handle_get(w http.Response, r http.Request) {
|
||||
// // Parse key from request
|
||||
// key := r.data
|
||||
|
||||
// // Select a random worker to handle GET
|
||||
// worker_addr := app.worker_addrs[rand.intn(app.worker_addrs.len) or { 0 }]
|
||||
// // response := send_to_mycelium(worker_addr, 'GET', key, '')
|
||||
// // w.write_string(response)
|
||||
// }
|
||||
|
||||
// fn (app App) handle_delete(w http.ResponseWriter, r http.Request) {
|
||||
// // Parse key from request
|
||||
// key := r.form['key'] or { return w.write_string('Missing key') }
|
||||
|
||||
// // Forward DELETE request to master via Mycelium
|
||||
// response := send_to_mycelium(app.master_addr, 'DELETE', key, '')
|
||||
// w.write_string(response)
|
||||
// }
|
||||
|
||||
// fn main() {
|
||||
// app := App{
|
||||
// master_addr: 'mycelium://master_node_address'
|
||||
// worker_addrs: ['mycelium://worker1_address', 'mycelium://worker2_address']
|
||||
// }
|
||||
// mut server := http.new_server('0.0.0.0:8080')
|
||||
// server.handle('/set', app.handle_set)
|
||||
// server.handle('/get', app.handle_get)
|
||||
// server.handle('/delete', app.handle_delete)
|
||||
// server.listen_and_serve()
|
||||
// }
|
||||
@@ -1,162 +0,0 @@
|
||||
module ourdb
|
||||
|
||||
import freeflowuniverse.herolib.clients.mycelium
|
||||
import rand
|
||||
import time
|
||||
import encoding.base64
|
||||
import json
|
||||
import x.json2
|
||||
|
||||
struct MyceliumStreamer {
|
||||
pub mut:
|
||||
master &OurDB @[skip; str: skip]
|
||||
workers map[string]&OurDB @[skip; str: skip] // key is mycelium public key, value is ourdb
|
||||
incremental_mode bool = true // default is true
|
||||
mycelium_client mycelium.Mycelium @[skip; str: skip] // not a reference since we own it
|
||||
id string = rand.string(10)
|
||||
}
|
||||
|
||||
struct MyceliumStreamerInstances {
|
||||
pub mut:
|
||||
instances map[string]&MyceliumStreamer
|
||||
}
|
||||
|
||||
pub struct NewStreamerArgs {
|
||||
pub mut:
|
||||
incremental_mode bool = true // default is true
|
||||
server_port int = 9000 // default is 9000
|
||||
is_worker bool // true if this is a worker node
|
||||
id string = rand.string(10)
|
||||
}
|
||||
|
||||
fn new_db_streamer(args NewStreamerArgs) !OurDB {
|
||||
path := if args.is_worker {
|
||||
'/tmp/ourdb_worker_${rand.string(8)}'
|
||||
} else {
|
||||
'/tmp/ourdb_master'
|
||||
}
|
||||
return new(
|
||||
record_nr_max: 16777216 - 1
|
||||
record_size_max: 1024
|
||||
path: path
|
||||
reset: true
|
||||
incremental_mode: args.incremental_mode
|
||||
)!
|
||||
}
|
||||
|
||||
pub fn (mut s MyceliumStreamer) add_worker(public_key string) ! {
|
||||
mut db := new_db_streamer(
|
||||
incremental_mode: s.incremental_mode
|
||||
is_worker: true
|
||||
)!
|
||||
s.workers[public_key] = &db
|
||||
}
|
||||
|
||||
pub fn new_streamer(args NewStreamerArgs) !MyceliumStreamer {
|
||||
mut db := new_db_streamer(args)!
|
||||
|
||||
// Initialize mycelium client
|
||||
mut client := mycelium.get()!
|
||||
client.server_url = 'http://localhost:${args.server_port}'
|
||||
client.name = if args.is_worker { 'worker_node' } else { 'master_node' }
|
||||
|
||||
mut s := MyceliumStreamer{
|
||||
master: &db
|
||||
workers: {}
|
||||
incremental_mode: args.incremental_mode
|
||||
mycelium_client: client
|
||||
id: args.id
|
||||
}
|
||||
|
||||
mut instances_factory := MyceliumStreamerInstances{}
|
||||
instances_factory.instances[s.id] = &s
|
||||
|
||||
println('Created ${if args.is_worker { 'worker' } else { 'master' }} node with ID: ${s.id}')
|
||||
return s
|
||||
}
|
||||
|
||||
pub struct GetStreamerArgs {
|
||||
pub mut:
|
||||
id string @[required]
|
||||
}
|
||||
|
||||
pub fn get_streamer(args GetStreamerArgs) !MyceliumStreamer {
|
||||
mut instances_factory := MyceliumStreamerInstances{}
|
||||
|
||||
for id, instamce in instances_factory.instances {
|
||||
if id == args.id {
|
||||
return *instamce
|
||||
}
|
||||
}
|
||||
|
||||
return error('streamer with id ${args.id} not found')
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct MyceliumRecordArgs {
|
||||
pub:
|
||||
id u32 @[required]
|
||||
value string @[required]
|
||||
}
|
||||
|
||||
pub fn (mut s MyceliumStreamer) write(record MyceliumRecordArgs) !u32 {
|
||||
mut id := s.master.set(id: record.id, data: record.value.bytes()) or {
|
||||
return error('Failed to set id ${record.id} to value ${record.value} due to: ${err}')
|
||||
}
|
||||
|
||||
// Get updates from the beginning (id 0) to ensure complete sync
|
||||
data := s.master.push_updates(id) or { return error('Failed to push updates due to: ${err}') }
|
||||
|
||||
// Broadcast to all workers
|
||||
for worker_key, mut worker in s.workers {
|
||||
s.mycelium_client.send_msg(
|
||||
public_key: worker_key
|
||||
payload: base64.encode(data)
|
||||
topic: 'db_sync'
|
||||
)!
|
||||
worker.sync_updates(data) or { return error('Failed to sync worker: ${err}') }
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
pub struct MyceliumReadArgs {
|
||||
pub:
|
||||
id u32 @[required]
|
||||
worker_public_key string
|
||||
}
|
||||
|
||||
// listen continuously checks for messages from master and applies updates
|
||||
pub fn (mut s MyceliumStreamer) listen() ! {
|
||||
spawn fn [mut s] () ! {
|
||||
msg := s.mycelium_client.receive_msg(wait: true, peek: true, topic: 'db_sync') or {
|
||||
return error('Failed to receive message: ${err}')
|
||||
}
|
||||
|
||||
if msg.payload.len > 0 {
|
||||
update_data := base64.decode(msg.payload)
|
||||
if mut worker := s.workers[msg.dst_pk] {
|
||||
worker.sync_updates(update_data) or {
|
||||
return error('Failed to sync worker: ${err}')
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
time.sleep(time.second * 1)
|
||||
return s.listen()
|
||||
}
|
||||
|
||||
pub fn (mut s MyceliumStreamer) read(args MyceliumReadArgs) ![]u8 {
|
||||
if args.worker_public_key.len > 0 {
|
||||
return s.read_from_worker(args)
|
||||
}
|
||||
return s.master.get(args.id)!
|
||||
}
|
||||
|
||||
fn (mut s MyceliumStreamer) read_from_worker(args MyceliumReadArgs) ![]u8 {
|
||||
if mut worker := s.workers[args.worker_public_key] {
|
||||
// We need to think about reading from the workers through the mycelium client.
|
||||
return worker.get(args.id)!
|
||||
}
|
||||
return error('worker with public key ${args.worker_public_key} not found')
|
||||
}
|
||||
@@ -1,226 +0,0 @@
|
||||
module ourdb
|
||||
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import veb
|
||||
import rand
|
||||
import time
|
||||
import json
|
||||
|
||||
// Represents the server context, extending the veb.Context
|
||||
pub struct ServerContext {
|
||||
veb.Context
|
||||
}
|
||||
|
||||
// Represents the OurDB server instance
|
||||
@[heap]
|
||||
pub struct OurDBServer {
|
||||
veb.Middleware[ServerContext]
|
||||
pub mut:
|
||||
db &OurDB // Reference to the database instance
|
||||
port int // Port on which the server runs
|
||||
allowed_hosts []string // List of allowed hostnames
|
||||
allowed_operations []string // List of allowed operations (e.g., set, get, delete)
|
||||
secret_key string // Secret key for authentication
|
||||
}
|
||||
|
||||
// Represents the arguments required to initialize the OurDB server
|
||||
@[params]
|
||||
pub struct OurDBServerArgs {
|
||||
pub mut:
|
||||
port int = 3000 // Server port, default is 3000
|
||||
allowed_hosts []string = ['localhost'] // Allowed hosts
|
||||
allowed_operations []string = ['set', 'get', 'delete'] // Allowed operations
|
||||
secret_key string = rand.string_from_set('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789',
|
||||
32) // Generated secret key
|
||||
config OurDBConfig // Database configuration parameters
|
||||
}
|
||||
|
||||
// Creates a new instance of the OurDB server
|
||||
pub fn new_server(args OurDBServerArgs) !OurDBServer {
|
||||
mut db := new(
|
||||
record_nr_max: args.config.record_nr_max
|
||||
record_size_max: args.config.record_size_max
|
||||
file_size: args.config.file_size
|
||||
path: args.config.path
|
||||
incremental_mode: args.config.incremental_mode
|
||||
reset: args.config.reset
|
||||
) or { return error('Failed to create ourdb: ${err}') }
|
||||
|
||||
mut server := OurDBServer{
|
||||
port: args.port
|
||||
allowed_hosts: args.allowed_hosts
|
||||
allowed_operations: args.allowed_operations
|
||||
secret_key: args.secret_key
|
||||
db: &db
|
||||
}
|
||||
|
||||
server.use(handler: server.logger_handler)
|
||||
server.use(handler: server.allowed_hosts_handler)
|
||||
server.use(handler: server.allowed_operations_handler)
|
||||
return server
|
||||
}
|
||||
|
||||
// Middleware for logging incoming requests and responses
|
||||
fn (self &OurDBServer) logger_handler(mut ctx ServerContext) bool {
|
||||
start_time := time.now()
|
||||
request := ctx.req
|
||||
method := request.method.str().to_upper()
|
||||
client_ip := ctx.req.header.get(.x_forwarded_for) or { ctx.req.host.str().split(':')[0] }
|
||||
user_agent := ctx.req.header.get(.user_agent) or { 'Unknown' }
|
||||
|
||||
console.print_header('${start_time.format()} | [Request] IP: ${client_ip} | Method: ${method} | Path: ${request.url} | User-Agent: ${user_agent}')
|
||||
return true
|
||||
}
|
||||
|
||||
// Middleware to check if the client host is allowed
|
||||
fn (self &OurDBServer) allowed_hosts_handler(mut ctx ServerContext) bool {
|
||||
client_host := ctx.req.host.str().split(':')[0].to_lower()
|
||||
if !self.allowed_hosts.contains(client_host) {
|
||||
ctx.request_error('403 Forbidden: Host not allowed')
|
||||
console.print_stderr('Unauthorized host: ${client_host}')
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Middleware to check if the requested operation is allowed
|
||||
fn (self &OurDBServer) allowed_operations_handler(mut ctx ServerContext) bool {
|
||||
url_parts := ctx.req.url.split('/')
|
||||
operation := url_parts[1]
|
||||
if operation !in self.allowed_operations {
|
||||
ctx.request_error('403 Forbidden: Operation not allowed')
|
||||
console.print_stderr('Unauthorized operation: ${operation}')
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Parameters for running the server
|
||||
@[params]
|
||||
pub struct RunParams {
|
||||
pub mut:
|
||||
background bool // If true, the server runs in the background
|
||||
}
|
||||
|
||||
// Starts the OurDB server
|
||||
pub fn (mut self OurDBServer) run(params RunParams) {
|
||||
if params.background {
|
||||
spawn veb.run[OurDBServer, ServerContext](mut self, self.port)
|
||||
} else {
|
||||
veb.run[OurDBServer, ServerContext](mut self, self.port)
|
||||
}
|
||||
}
|
||||
|
||||
// Represents a generic success response
|
||||
@[params]
|
||||
struct SuccessResponse[T] {
|
||||
message string // Success message
|
||||
data T // Response data
|
||||
}
|
||||
|
||||
// Represents an error response
|
||||
@[params]
|
||||
struct ErrorResponse {
|
||||
error string @[required] // Error type
|
||||
message string @[required] // Error message
|
||||
}
|
||||
|
||||
// Returns an error response
|
||||
fn (server OurDBServer) error(args ErrorResponse) ErrorResponse {
|
||||
return args
|
||||
}
|
||||
|
||||
// Returns a success response
|
||||
fn (server OurDBServer) success[T](args SuccessResponse[T]) SuccessResponse[T] {
|
||||
return args
|
||||
}
|
||||
|
||||
// Request body structure for the `/set` endpoint
|
||||
pub struct KeyValueData {
|
||||
pub mut:
|
||||
id u32 // Record ID
|
||||
value string // Value to store
|
||||
}
|
||||
|
||||
// API endpoint to set a key-value pair in the database
|
||||
@['/set'; post]
|
||||
pub fn (mut server OurDBServer) set(mut ctx ServerContext) veb.Result {
|
||||
request_body := ctx.req.data.str()
|
||||
mut decoded_body := json.decode(KeyValueData, request_body) or {
|
||||
ctx.res.set_status(.bad_request)
|
||||
return ctx.json[ErrorResponse](server.error(
|
||||
error: 'bad_request'
|
||||
message: 'Invalid request body'
|
||||
))
|
||||
}
|
||||
|
||||
if server.db.incremental_mode && decoded_body.id > 0 {
|
||||
ctx.res.set_status(.bad_request)
|
||||
return ctx.json[ErrorResponse](server.error(
|
||||
error: 'bad_request'
|
||||
message: 'Cannot set id when incremental mode is enabled'
|
||||
))
|
||||
}
|
||||
|
||||
mut record := if server.db.incremental_mode {
|
||||
server.db.set(data: decoded_body.value.bytes()) or {
|
||||
ctx.res.set_status(.bad_request)
|
||||
return ctx.json[ErrorResponse](server.error(
|
||||
error: 'bad_request'
|
||||
message: 'Failed to set key: ${err}'
|
||||
))
|
||||
}
|
||||
} else {
|
||||
server.db.set(id: decoded_body.id, data: decoded_body.value.bytes()) or {
|
||||
ctx.res.set_status(.bad_request)
|
||||
return ctx.json[ErrorResponse](server.error(
|
||||
error: 'bad_request'
|
||||
message: 'Failed to set key: ${err}'
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
decoded_body.id = record
|
||||
ctx.res.set_status(.created)
|
||||
return ctx.json(server.success(message: 'Successfully set the key', data: decoded_body))
|
||||
}
|
||||
|
||||
// API endpoint to retrieve a record by ID
|
||||
@['/get/:id'; get]
|
||||
pub fn (mut server OurDBServer) get(mut ctx ServerContext, id string) veb.Result {
|
||||
id_ := id.u32()
|
||||
record := server.db.get(id_) or {
|
||||
ctx.res.set_status(.not_found)
|
||||
return ctx.json[ErrorResponse](server.error(
|
||||
error: 'not_found'
|
||||
message: 'Record does not exist: ${err}'
|
||||
))
|
||||
}
|
||||
|
||||
data := KeyValueData{
|
||||
id: id_
|
||||
value: record.bytestr()
|
||||
}
|
||||
|
||||
ctx.res.set_status(.ok)
|
||||
return ctx.json(server.success(message: 'Successfully get record', data: data))
|
||||
}
|
||||
|
||||
// API endpoint to delete a record by ID
|
||||
@['/delete/:id'; delete]
|
||||
pub fn (mut server OurDBServer) delete(mut ctx ServerContext, id string) veb.Result {
|
||||
id_ := id.u32()
|
||||
|
||||
server.db.delete(id_) or {
|
||||
ctx.res.set_status(.not_found)
|
||||
return ctx.json[ErrorResponse](server.error(
|
||||
error: 'not_found'
|
||||
message: 'Failed to delete key: ${err}'
|
||||
))
|
||||
}
|
||||
|
||||
ctx.res.set_status(.no_content)
|
||||
return ctx.json({
|
||||
'message': 'Successfully deleted record'
|
||||
})
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
module ourdb
|
||||
module streamer
|
||||
|
||||
import encoding.binary
|
||||
|
||||
236
lib/data/ourdb_syncer/streamer/nodes.v
Normal file
236
lib/data/ourdb_syncer/streamer/nodes.v
Normal file
@@ -0,0 +1,236 @@
|
||||
module streamer
|
||||
|
||||
import time
|
||||
import freeflowuniverse.herolib.clients.mycelium
|
||||
import freeflowuniverse.herolib.data.ourdb
|
||||
import freeflowuniverse.herolib.osal
|
||||
import encoding.base64
|
||||
import json
|
||||
|
||||
// StreamerNode represents either a master or worker node in the streamer network
|
||||
pub struct StreamerNode {
|
||||
pub mut:
|
||||
name string = 'streamer_node' // Name of the node
|
||||
public_key string // Mycelium public key of the node
|
||||
address string // Network address (e.g., "127.0.0.1:8080")
|
||||
mycelium_client &mycelium.Mycelium = unsafe { nil } // Mycelium client instance
|
||||
workers []StreamerNode // List of connected workers (for master nodes)
|
||||
port int = 8080 // HTTP server port
|
||||
is_master bool // Flag indicating if this is a master node
|
||||
db &ourdb.OurDB // Embedded key-value database
|
||||
master_public_key string // Public key of the master node (for workers)
|
||||
last_synced_index u32 // Last synchronized index for workers
|
||||
}
|
||||
|
||||
// is_running checks if the node is operational by pinging its address
|
||||
fn (node &StreamerNode) is_running() bool {
|
||||
ping_result := osal.ping(address: node.address, retry: 2) or { return false }
|
||||
return ping_result == .ok
|
||||
}
|
||||
|
||||
// connect_to_master connects the worker node to its master
|
||||
fn (mut worker StreamerNode) connect_to_master() ! {
|
||||
if worker.is_master {
|
||||
return error('Master nodes cannot connect to other master nodes')
|
||||
}
|
||||
|
||||
worker_json := json.encode(worker)
|
||||
|
||||
log_event(
|
||||
event_type: 'connection'
|
||||
message: 'Connecting worker ${worker.public_key} to master ${worker.master_public_key}'
|
||||
)
|
||||
|
||||
worker.mycelium_client.send_msg(
|
||||
topic: 'connect'
|
||||
payload: worker_json
|
||||
public_key: worker.master_public_key
|
||||
) or { return error('Failed to send connect message: ${err}') }
|
||||
}
|
||||
|
||||
// start_and_listen runs the node's main event loop
|
||||
pub fn (mut node StreamerNode) start_and_listen() ! {
|
||||
log_event(
|
||||
event_type: 'logs'
|
||||
message: 'Starting node at ${node.address} with public key ${node.public_key}'
|
||||
)
|
||||
for {
|
||||
time.sleep(2 * time.second)
|
||||
node.handle_log_messages() or {}
|
||||
node.handle_connect_messages() or {}
|
||||
node.handle_ping_nodes() or {}
|
||||
node.handle_master_sync() or {}
|
||||
}
|
||||
}
|
||||
|
||||
// WriteParams defines parameters for writing to the database
|
||||
@[params]
|
||||
pub struct WriteParams {
|
||||
pub mut:
|
||||
key u32 // Key to write (optional in non-incremental mode)
|
||||
value string @[required] // Value to write
|
||||
}
|
||||
|
||||
// write adds data to the database and propagates it to all nodes
|
||||
pub fn (mut node StreamerNode) write(params WriteParams) !u32 {
|
||||
if node.db.incremental_mode && params.key != 0 {
|
||||
return error('Incremental mode enabled; key must be omitted')
|
||||
}
|
||||
if !node.is_master {
|
||||
return error('Only master nodes can initiate database writes')
|
||||
}
|
||||
|
||||
// data := params.value.bytes()
|
||||
// encoded_data := base64.encode(data)
|
||||
// mut targets := node.workers.map(it.public_key)
|
||||
// targets << node.public_key
|
||||
|
||||
// for target_key in targets {
|
||||
// node.mycelium_client.send_msg(
|
||||
// topic: 'db_write'
|
||||
// payload: encoded_data
|
||||
// public_key: target_key
|
||||
// )!
|
||||
// }
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// ReadParams defines parameters for reading from the database
|
||||
@[params]
|
||||
pub struct ReadParams {
|
||||
pub mut:
|
||||
key u32 @[required] // Key to read
|
||||
}
|
||||
|
||||
// read retrieves data from the database (worker only)
|
||||
pub fn (mut node StreamerNode) read(params ReadParams) !string {
|
||||
if node.is_master {
|
||||
return error('Only worker nodes can read from the database')
|
||||
}
|
||||
value := node.db.get(params.key) or { return error('Failed to read from database: ${err}') }
|
||||
return value.bytestr()
|
||||
}
|
||||
|
||||
// LogEventParams defines parameters for logging events
|
||||
@[params]
|
||||
struct LogEventParams {
|
||||
message string @[required] // Event message
|
||||
event_type string @[required] // Event type (e.g., "info", "warning", "error")
|
||||
}
|
||||
|
||||
// log_event logs an event with a timestamp
|
||||
pub fn log_event(params LogEventParams) {
|
||||
now := time.now().format()
|
||||
println('${now}| ${params.event_type}: ${params.message}')
|
||||
}
|
||||
|
||||
// handle_log_messages processes incoming log messages
|
||||
fn (mut node StreamerNode) handle_log_messages() ! {
|
||||
message := node.mycelium_client.receive_msg(wait: false, peek: true, topic: 'logs')!
|
||||
if message.payload.len > 0 {
|
||||
msg := base64.decode(message.payload).bytestr()
|
||||
log_event(event_type: 'logs', message: msg)
|
||||
}
|
||||
}
|
||||
|
||||
// to_json_str converts the node to json string
|
||||
fn (mut node StreamerNode) to_json_str() !string {
|
||||
mut to_json := json.encode(node)
|
||||
return to_json
|
||||
}
|
||||
|
||||
// master_sync processes incoming master sync messages
|
||||
fn (mut node StreamerNode) handle_master_sync() ! {
|
||||
message := node.mycelium_client.receive_msg(wait: false, peek: true, topic: 'master_sync')!
|
||||
if message.payload.len > 0 {
|
||||
master_id := base64.decode(message.payload).bytestr()
|
||||
log_event(event_type: 'logs', message: 'Calling master ${master_id} for sync')
|
||||
|
||||
master_json := node.to_json_str()!
|
||||
println('Master db: ${node.db}')
|
||||
println('master_json: ${master_json}')
|
||||
node.mycelium_client.send_msg(
|
||||
topic: 'master_sync_replay'
|
||||
payload: master_json
|
||||
public_key: message.src_pk
|
||||
)!
|
||||
|
||||
// // // last_synced_index := node.db.get_last_index()!
|
||||
// database_data_bytes := node.db.push_updates(0) or {
|
||||
// return error('Failed to push updates: ${err}')
|
||||
// }
|
||||
|
||||
// println('database_data_bytes: ${database_data_bytes}')
|
||||
node.mycelium_client.send_msg(
|
||||
topic: 'master_sync_db'
|
||||
payload: master_json
|
||||
public_key: message.src_pk
|
||||
)!
|
||||
|
||||
log_event(
|
||||
event_type: 'logs'
|
||||
message: 'Responded to master ${master_id} for sync'
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// handle_connect_messages processes connect messages to add workers
|
||||
fn (mut node StreamerNode) handle_connect_messages() ! {
|
||||
message := node.mycelium_client.receive_msg(wait: false, peek: true, topic: 'connect')!
|
||||
if message.payload.len > 0 {
|
||||
worker_json := base64.decode(message.payload).bytestr()
|
||||
worker := json.decode(StreamerNode, worker_json) or {
|
||||
return error('Failed to decode worker node: ${err}')
|
||||
}
|
||||
if !node.workers.any(it.public_key == worker.public_key) {
|
||||
node.workers << worker
|
||||
log_event(
|
||||
event_type: 'connection'
|
||||
message: 'Master ${node.public_key} connected worker ${worker.public_key}'
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handle_ping_nodes pings all workers or the master, removing unresponsive workers
|
||||
pub fn (mut node StreamerNode) handle_ping_nodes() ! {
|
||||
if node.is_master {
|
||||
mut i := 0
|
||||
for i < node.workers.len {
|
||||
worker := &node.workers[i]
|
||||
if !worker.is_running() {
|
||||
log_event(event_type: 'logs', message: 'Worker ${worker.address} is not running')
|
||||
log_event(event_type: 'logs', message: 'Removing worker ${worker.public_key}')
|
||||
node.workers.delete(i)
|
||||
} else {
|
||||
node.mycelium_client.send_msg(
|
||||
topic: 'logs'
|
||||
payload: 'Master ${node.public_key} is pinging worker ${worker.public_key}'
|
||||
public_key: worker.public_key
|
||||
)!
|
||||
i++
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if !node.is_running() {
|
||||
return error('Worker node is not running')
|
||||
}
|
||||
if node.master_public_key.len == 0 {
|
||||
return error('Master public key is not set')
|
||||
}
|
||||
node.mycelium_client.send_msg(
|
||||
topic: 'logs'
|
||||
payload: 'Worker ${node.public_key} is pinging master ${node.master_public_key}'
|
||||
public_key: node.master_public_key
|
||||
)!
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_master_sync_replay(mut mycelium_client mycelium.Mycelium) !string {
|
||||
message := mycelium_client.receive_msg(wait: false, peek: true, topic: 'master_sync_replay')!
|
||||
if message.payload.len > 0 {
|
||||
return message.payload
|
||||
}
|
||||
return ''
|
||||
}
|
||||
291
lib/data/ourdb_syncer/streamer/streamer.v
Normal file
291
lib/data/ourdb_syncer/streamer/streamer.v
Normal file
@@ -0,0 +1,291 @@
|
||||
module streamer
|
||||
|
||||
import freeflowuniverse.herolib.clients.mycelium
|
||||
import freeflowuniverse.herolib.data.ourdb
|
||||
import encoding.base64
|
||||
import json
|
||||
import time
|
||||
|
||||
// Maximum number of workers allowed
|
||||
const max_workers = 10
|
||||
|
||||
// Streamer represents the entire network, including master and workers
|
||||
pub struct Streamer {
|
||||
pub mut:
|
||||
name string = 'streamer'
|
||||
port int = 8080
|
||||
master StreamerNode
|
||||
incremental_mode bool = true // Incremental mode for database
|
||||
reset bool = true // Reset database
|
||||
}
|
||||
|
||||
// NewStreamerParams defines parameters for creating a new streamer
|
||||
@[params]
|
||||
pub struct NewStreamerParams {
|
||||
pub mut:
|
||||
name string = 'streamer'
|
||||
port int = 8080
|
||||
incremental_mode bool = true // Incremental mode for database
|
||||
reset bool = true // Reset database
|
||||
}
|
||||
|
||||
// Creates a new streamer instance
|
||||
pub fn new_streamer(params NewStreamerParams) !Streamer {
|
||||
log_event(
|
||||
event_type: 'logs'
|
||||
message: 'Creating new streamer'
|
||||
)
|
||||
|
||||
mut db := new_db(
|
||||
incremental_mode: params.incremental_mode
|
||||
reset: params.reset
|
||||
)!
|
||||
|
||||
master := StreamerNode{
|
||||
db: db
|
||||
}
|
||||
|
||||
return Streamer{
|
||||
name: params.name
|
||||
port: params.port
|
||||
master: master
|
||||
incremental_mode: params.incremental_mode
|
||||
reset: params.reset
|
||||
}
|
||||
}
|
||||
|
||||
@[params]
|
||||
struct NewMyCeliumClientParams {
|
||||
port int = 8080 // HTTP server port
|
||||
name string = 'streamer_client' // Mycelium client name
|
||||
}
|
||||
|
||||
fn new_mycelium_client(params NewMyCeliumClientParams) !&mycelium.Mycelium {
|
||||
mut mycelium_client := mycelium.get()!
|
||||
mycelium_client.server_url = 'http://localhost:${params.port}'
|
||||
mycelium_client.name = params.name
|
||||
return mycelium_client
|
||||
}
|
||||
|
||||
@[params]
|
||||
struct DBClientParams {
|
||||
db_dir string = '/tmp/ourdb' // Database directory
|
||||
reset bool = true // Reset database
|
||||
incremental_mode bool = true // Incremental mode for database
|
||||
record_size_max u32 = 1024 // Maximum record size
|
||||
record_nr_max u32 = 16777216 - 1 // Maximum number of records
|
||||
}
|
||||
|
||||
fn new_db(params DBClientParams) !&ourdb.OurDB {
|
||||
mut db := ourdb.new(
|
||||
record_nr_max: params.record_nr_max
|
||||
record_size_max: params.record_size_max
|
||||
path: params.db_dir
|
||||
reset: params.reset
|
||||
incremental_mode: params.incremental_mode
|
||||
)!
|
||||
return &db
|
||||
}
|
||||
|
||||
// ConnectStreamerParams defines parameters for connecting to an existing streamer
|
||||
@[params]
|
||||
pub struct ConnectStreamerParams {
|
||||
pub mut:
|
||||
master_public_key string @[required] // Public key of the master node
|
||||
port int = 8080 // HTTP server port
|
||||
name string = 'streamer' // Mycelium client name
|
||||
}
|
||||
|
||||
// Connects to an existing streamer master node; intended for worker nodes
|
||||
pub fn connect_streamer(params ConnectStreamerParams) !Streamer {
|
||||
log_event(
|
||||
event_type: 'info'
|
||||
message: 'Connecting to streamer'
|
||||
)
|
||||
|
||||
mut streamer_ := new_streamer(
|
||||
port: params.port
|
||||
name: params.name
|
||||
)!
|
||||
|
||||
// To fo this, we need to let the user send te node IP to ping it.
|
||||
// // Setting the master address to just ping the node
|
||||
// streamer_.master = StreamerNode{
|
||||
// address: params.master_public_key
|
||||
// }
|
||||
|
||||
// if !streamer_.master.is_running() {
|
||||
// return error('Master node is not running')
|
||||
// }
|
||||
|
||||
// 1. Get the master node | Done
|
||||
// 2. Keep listening until we receive replay from the master node | Done
|
||||
// 3. Sync the master workers | Done
|
||||
// 4. Push to the network that a new visitor has joined | Done
|
||||
// 5. Sync the master DB InProgress...
|
||||
|
||||
mut mycelium_client := new_mycelium_client(
|
||||
port: params.port
|
||||
name: params.name
|
||||
)!
|
||||
|
||||
// 1. Push an event to the running network to get the master
|
||||
mycelium_client.send_msg(
|
||||
topic: 'master_sync'
|
||||
payload: params.master_public_key
|
||||
public_key: params.master_public_key
|
||||
)!
|
||||
|
||||
mut encoded_master := ''
|
||||
|
||||
// 2. Keep listening until we receive replay from the master node
|
||||
mut retries := 0
|
||||
for {
|
||||
time.sleep(2 * time.second)
|
||||
log_event(
|
||||
event_type: 'info'
|
||||
message: 'Waiting for master sync replay'
|
||||
)
|
||||
|
||||
encoded_master = handle_master_sync_replay(mut mycelium_client) or { '' }
|
||||
if encoded_master.len > 0 {
|
||||
log_event(
|
||||
event_type: 'info'
|
||||
message: 'Got master sync replay'
|
||||
)
|
||||
|
||||
encoded_master = encoded_master
|
||||
break
|
||||
}
|
||||
|
||||
if retries > 10 {
|
||||
log_event(
|
||||
event_type: 'error'
|
||||
message: 'Failed to connect to master node'
|
||||
)
|
||||
return error('Failed to connect to master node')
|
||||
}
|
||||
retries++
|
||||
}
|
||||
|
||||
// 3. Sync the master DB
|
||||
master_to_json := base64.decode(encoded_master).bytestr()
|
||||
master := json.decode(StreamerNode, master_to_json) or {
|
||||
return error('Failed to decode master node: ${err}')
|
||||
}
|
||||
|
||||
println('MasterDB is: ${master.db}')
|
||||
|
||||
streamer_.master = master
|
||||
|
||||
return streamer_
|
||||
}
|
||||
|
||||
// StreamerNodeParams defines parameters for creating a new master or worker node
|
||||
@[params]
|
||||
pub struct StreamerNodeParams {
|
||||
pub mut:
|
||||
public_key string @[required] // Node public key
|
||||
address string @[required] // Node address
|
||||
db_dir string = '/tmp/ourdb' // Database directory
|
||||
incremental_mode bool = true // Incremental mode for database
|
||||
reset bool = true // Reset database
|
||||
name string = 'streamer_node' // Node/Mycelium name
|
||||
port int = 8080 // HTTP server port
|
||||
master bool // Flag indicating if this is a master node
|
||||
}
|
||||
|
||||
// Creates a new master node
|
||||
fn (self Streamer) new_node(params StreamerNodeParams) !StreamerNode {
|
||||
mut client := new_mycelium_client(name: params.name, port: params.port)!
|
||||
mut db := new_db(
|
||||
db_dir: params.db_dir
|
||||
incremental_mode: params.incremental_mode
|
||||
reset: params.reset
|
||||
)!
|
||||
|
||||
return StreamerNode{
|
||||
address: params.address
|
||||
public_key: params.public_key
|
||||
mycelium_client: client
|
||||
db: db
|
||||
is_master: params.master
|
||||
master_public_key: params.public_key
|
||||
}
|
||||
}
|
||||
|
||||
// Adds a master node to the streamer
|
||||
pub fn (mut self Streamer) add_master(params StreamerNodeParams) !StreamerNode {
|
||||
if self.master.public_key.len != 0 {
|
||||
return error('Streamer already has a master node!')
|
||||
}
|
||||
|
||||
mut params_ := params
|
||||
params_.master = true
|
||||
|
||||
new_master := self.new_node(params_)!
|
||||
self.master = new_master
|
||||
return self.master
|
||||
}
|
||||
|
||||
// Connects to an existing streamer master node; intended for worker nodes
|
||||
pub fn (mut self Streamer) add_worker(params StreamerNodeParams) !StreamerNode {
|
||||
if params.master {
|
||||
return error('Worker nodes cannot be master nodes')
|
||||
}
|
||||
|
||||
if self.master.public_key.len == 0 {
|
||||
return error('Streamer has no master node')
|
||||
}
|
||||
|
||||
if self.master.workers.len >= max_workers {
|
||||
return error('Maximum worker limit reached')
|
||||
}
|
||||
|
||||
mut worker_node := self.new_node(params)!
|
||||
|
||||
if !worker_node.is_running() {
|
||||
return error('Worker node is not running')
|
||||
}
|
||||
|
||||
self.master.workers << worker_node
|
||||
worker_node.master_public_key = self.master.public_key
|
||||
worker_node.connect_to_master()!
|
||||
return worker_node
|
||||
}
|
||||
|
||||
// Gets the master node
|
||||
pub fn (self Streamer) get_master() StreamerNode {
|
||||
return self.master
|
||||
}
|
||||
|
||||
// Get master worker nodes
|
||||
pub fn (self Streamer) get_workers() ![]StreamerNode {
|
||||
if self.master.public_key.len == 0 {
|
||||
return error('Streamer has no master node')
|
||||
}
|
||||
|
||||
return self.master.workers
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct GetWorkerParams {
|
||||
pub mut:
|
||||
public_key string @[required] // Public key of the worker node
|
||||
}
|
||||
|
||||
// Get worker node
|
||||
pub fn (self Streamer) get_worker(params GetWorkerParams) !StreamerNode {
|
||||
if !self.master.is_master {
|
||||
return self.master
|
||||
}
|
||||
|
||||
// Find the worker node
|
||||
for worker in self.master.workers {
|
||||
if params.public_key == worker.public_key {
|
||||
return worker
|
||||
}
|
||||
}
|
||||
|
||||
return error('Worker with public key ${params.public_key} not found')
|
||||
}
|
||||
@@ -149,3 +149,106 @@ You can configure the WebDAV server using the following parameters when calling
|
||||
- Support for advanced WebDAV methods like `LOCK` and `UNLOCK`.
|
||||
- Integration with persistent databases for user credentials.
|
||||
- TLS/SSL support for secure connections.
|
||||
|
||||
|
||||
# WebDAV Property Model
|
||||
|
||||
This file implements the WebDAV property model as defined in [RFC 4918](https://tools.ietf.org/html/rfc4918). It provides a set of property types that represent various WebDAV properties used in PROPFIND and PROPPATCH operations.
|
||||
|
||||
## Overview
|
||||
|
||||
The `model_property.v` file defines:
|
||||
|
||||
1. A `Property` interface that all WebDAV properties must implement
|
||||
2. Various property type implementations for standard WebDAV properties
|
||||
3. Helper functions for XML serialization and time formatting
|
||||
|
||||
## Property Interface
|
||||
|
||||
```v
|
||||
pub interface Property {
|
||||
xml() string
|
||||
xml_name() string
|
||||
}
|
||||
```
|
||||
|
||||
All WebDAV properties must implement:
|
||||
- `xml()`: Returns the full XML representation of the property with its value
|
||||
- `xml_name()`: Returns just the XML tag name of the property (used in property requests)
|
||||
|
||||
## Property Types
|
||||
|
||||
The file implements the following WebDAV property types:
|
||||
|
||||
| Property Type | Description |
|
||||
|---------------|-------------|
|
||||
| `DisplayName` | The display name of a resource |
|
||||
| `GetLastModified` | Last modification time of a resource |
|
||||
| `GetContentType` | MIME type of a resource |
|
||||
| `GetContentLength` | Size of a resource in bytes |
|
||||
| `ResourceType` | Indicates if a resource is a collection (directory) or not |
|
||||
| `CreationDate` | Creation date of a resource |
|
||||
| `SupportedLock` | Lock capabilities supported by the server |
|
||||
| `LockDiscovery` | Active locks on a resource |
|
||||
|
||||
## Helper Functions
|
||||
|
||||
- `fn (p []Property) xml() string`: Generates XML for a list of properties
|
||||
- `fn format_iso8601(t time.Time) string`: Formats a time in ISO8601 format for WebDAV
|
||||
|
||||
## Usage
|
||||
|
||||
These property types are used when responding to WebDAV PROPFIND requests to describe resources in the WebDAV server.
|
||||
|
||||
|
||||
# WebDAV Locker
|
||||
|
||||
This file implements a locking mechanism for resources in a WebDAV context. It provides functionality to manage locks on resources, ensuring that they are not modified by multiple clients simultaneously.
|
||||
|
||||
## Overview
|
||||
|
||||
The `locker.v` file defines:
|
||||
|
||||
1. A `Locker` structure that manages locks for resources.
|
||||
2. A `LockResult` structure that represents the result of a lock operation.
|
||||
3. Methods for locking and unlocking resources, checking lock status, and managing locks.
|
||||
|
||||
## Locker Structure
|
||||
|
||||
```v
|
||||
struct Locker {
|
||||
mut:
|
||||
locks map[string]Lock
|
||||
}
|
||||
```
|
||||
|
||||
- `locks`: A mutable map that stores locks keyed by resource name.
|
||||
|
||||
## LockResult Structure
|
||||
|
||||
```v
|
||||
pub struct LockResult {
|
||||
pub:
|
||||
token string // The lock token
|
||||
is_new_lock bool // Whether this is a new lock or an existing one
|
||||
}
|
||||
```
|
||||
|
||||
- `token`: The unique identifier for the lock.
|
||||
- `is_new_lock`: Indicates if this is a new lock or an existing one.
|
||||
|
||||
## Locking and Unlocking
|
||||
|
||||
- `pub fn (mut lm Locker) lock(l Lock) !Lock`: Attempts to lock a resource for a specific owner. Returns a `LockResult` with the lock token and whether it's a new lock.
|
||||
- `pub fn (mut lm Locker) unlock(resource string) bool`: Unlocks a resource by removing its lock.
|
||||
- `pub fn (lm Locker) is_locked(resource string) bool`: Checks if a resource is currently locked.
|
||||
- `pub fn (lm Locker) get_lock(resource string) ?Lock`: Returns the lock object for a resource if it exists and is valid.
|
||||
- `pub fn (mut lm Locker) unlock_with_token(resource string, token string) bool`: Unlocks a resource if the correct token is provided.
|
||||
|
||||
## Recursive Locking
|
||||
|
||||
- `pub fn (mut lm Locker) lock_recursive(l Lock) !Lock`: Locks a resource recursively, allowing for child resources to be locked (implementation for child resources is not complete).
|
||||
|
||||
## Cleanup
|
||||
|
||||
- `pub fn (mut lm Locker) cleanup_expired_locks()`: Cleans up expired locks (implementation is currently commented out).
|
||||
|
||||
@@ -1,206 +0,0 @@
|
||||
module webdav
|
||||
|
||||
import encoding.xml
|
||||
import log
|
||||
import freeflowuniverse.herolib.core.pathlib
|
||||
import freeflowuniverse.herolib.vfs
|
||||
import freeflowuniverse.herolib.vfs.vfs_db
|
||||
import os
|
||||
import time
|
||||
import net.http
|
||||
import veb
|
||||
|
||||
// PropfindRequest represents a parsed PROPFIND request
|
||||
pub struct PropfindRequest {
|
||||
pub:
|
||||
typ PropfindType
|
||||
props []string // Property names if typ is prop
|
||||
depth Depth // Depth of the request (0, 1, or -1 for infinity)
|
||||
xml_content string // Original XML content
|
||||
}
|
||||
|
||||
pub enum Depth {
|
||||
infinity = -1
|
||||
zero = 0
|
||||
one = 1
|
||||
}
|
||||
|
||||
// PropfindType represents the type of PROPFIND request
|
||||
pub enum PropfindType {
|
||||
allprop // Request all properties
|
||||
propname // Request property names only
|
||||
prop // Request specific properties
|
||||
invalid // Invalid request
|
||||
}
|
||||
|
||||
// parse_propfind_xml parses the XML body of a PROPFIND request
|
||||
pub fn parse_propfind_xml(req http.Request) !PropfindRequest {
|
||||
|
||||
data := req.data
|
||||
// Parse Depth header
|
||||
depth_str := req.header.get_custom('Depth') or { '0' }
|
||||
depth := parse_depth(depth_str)
|
||||
|
||||
|
||||
if data.len == 0 {
|
||||
// If no body is provided, default to allprop
|
||||
return PropfindRequest{
|
||||
typ: .allprop
|
||||
depth: depth
|
||||
xml_content: ''
|
||||
}
|
||||
}
|
||||
|
||||
doc := xml.XMLDocument.from_string(data) or {
|
||||
return error('Failed to parse XML: ${err}')
|
||||
}
|
||||
|
||||
root := doc.root
|
||||
if root.name.to_lower() != 'propfind' && !root.name.ends_with(':propfind') {
|
||||
return error('Invalid PROPFIND request: root element must be propfind')
|
||||
}
|
||||
|
||||
mut typ := PropfindType.invalid
|
||||
mut props := []string{}
|
||||
|
||||
// Check for allprop, propname, or prop elements
|
||||
for child in root.children {
|
||||
if child is xml.XMLNode {
|
||||
node := child as xml.XMLNode
|
||||
|
||||
// Check for allprop
|
||||
if node.name == 'allprop' || node.name == 'D:allprop' {
|
||||
typ = .allprop
|
||||
break
|
||||
}
|
||||
|
||||
// Check for propname
|
||||
if node.name == 'propname' || node.name == 'D:propname' {
|
||||
typ = .propname
|
||||
break
|
||||
}
|
||||
|
||||
// Check for prop
|
||||
if node.name == 'prop' || node.name == 'D:prop' {
|
||||
typ = .prop
|
||||
|
||||
// Extract property names
|
||||
for prop_child in node.children {
|
||||
if prop_child is xml.XMLNode {
|
||||
prop_node := prop_child as xml.XMLNode
|
||||
props << prop_node.name
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if typ == .invalid {
|
||||
return error('Invalid PROPFIND request: missing prop, allprop, or propname element')
|
||||
}
|
||||
|
||||
return PropfindRequest{
|
||||
typ: typ
|
||||
props: props
|
||||
depth: depth
|
||||
xml_content: data
|
||||
}
|
||||
}
|
||||
|
||||
// parse_depth parses the Depth header value
|
||||
pub fn parse_depth(depth_str string) Depth {
|
||||
if depth_str == 'infinity' { return .infinity}
|
||||
else if depth_str == '0' { return .zero}
|
||||
else if depth_str == '1' { return .one}
|
||||
else {
|
||||
log.warn('[WebDAV] Invalid Depth header value: ${depth_str}, defaulting to infinity')
|
||||
return .infinity
|
||||
}
|
||||
}
|
||||
|
||||
// returns the properties of a filesystem entry
|
||||
fn get_properties(entry &vfs.FSEntry) []Property {
|
||||
mut props := []Property{}
|
||||
|
||||
metadata := entry.get_metadata()
|
||||
|
||||
// Display name
|
||||
props << DisplayName(metadata.name)
|
||||
props << GetLastModified(format_iso8601(metadata.modified_time()))
|
||||
props << GetContentType(if entry.is_dir() {'httpd/unix-directory'} else {get_file_content_type(entry.get_path())})
|
||||
props << ResourceType(entry.is_dir())
|
||||
|
||||
// Content length (only for files)
|
||||
if !entry.is_dir() {
|
||||
props << GetContentLength(metadata.size.str())
|
||||
}
|
||||
|
||||
// Creation date
|
||||
props << CreationDate(format_iso8601(metadata.created_time()))
|
||||
return props
|
||||
}
|
||||
|
||||
// Response represents a WebDAV response for a resource
|
||||
pub struct Response {
|
||||
pub:
|
||||
href string
|
||||
found_props []Property
|
||||
not_found_props []Property
|
||||
}
|
||||
|
||||
fn (r Response) xml() string {
|
||||
return '<D:response>\n<D:href>${r.href}</D:href>
|
||||
<D:propstat><D:prop>${r.found_props.map(it.xml()).join_lines()}</D:prop><D:status>HTTP/1.1 200 OK</D:status></D:propstat>
|
||||
</D:response>'
|
||||
}
|
||||
|
||||
// generate_propfind_response generates a PROPFIND response XML string from Response structs
|
||||
pub fn (r []Response) xml () string {
|
||||
return '<?xml version="1.0" encoding="UTF-8"?>\n<D:multistatus xmlns:D="DAV:">
|
||||
${r.map(it.xml()).join_lines()}\n</D:multistatus>'
|
||||
}
|
||||
|
||||
fn get_file_content_type(path string) string {
|
||||
ext := path.all_after_last('.')
|
||||
content_type := if v := veb.mime_types[ext] {
|
||||
v
|
||||
} else {
|
||||
'text/plain; charset=utf-8'
|
||||
}
|
||||
|
||||
return content_type
|
||||
}
|
||||
|
||||
// get_responses returns all properties for the given path and depth
|
||||
fn (mut app App) get_responses(entry vfs.FSEntry, req PropfindRequest) ![]Response {
|
||||
mut responses := []Response{}
|
||||
|
||||
path := if entry.is_dir() && entry.get_path() != '/' {
|
||||
'${entry.get_path()}/'
|
||||
} else {
|
||||
entry.get_path()
|
||||
}
|
||||
log.debug('Finfing for ${path}')
|
||||
// main entry response
|
||||
responses << Response {
|
||||
href: path
|
||||
// not_found: entry.get_unfound_properties(req)
|
||||
found_props: get_properties(entry)
|
||||
}
|
||||
|
||||
if !entry.is_dir() || req.depth == .zero {
|
||||
return responses
|
||||
}
|
||||
|
||||
entries := app.vfs.dir_list(path) or {
|
||||
log.error('Failed to list directory for ${path} ${err}')
|
||||
return responses }
|
||||
for e in entries {
|
||||
responses << app.get_responses(e, PropfindRequest {
|
||||
...req,
|
||||
depth: if req.depth == .one { .zero } else { .infinity }
|
||||
})!
|
||||
}
|
||||
return responses
|
||||
}
|
||||
@@ -5,10 +5,10 @@ import freeflowuniverse.herolib.ui.console
|
||||
import freeflowuniverse.herolib.vfs
|
||||
|
||||
@[heap]
|
||||
pub struct App {
|
||||
pub struct Server {
|
||||
veb.Middleware[Context]
|
||||
pub mut:
|
||||
lock_manager LockManager
|
||||
lock_manager Locker
|
||||
user_db map[string]string @[required]
|
||||
vfs vfs.VFSImplementation
|
||||
}
|
||||
@@ -18,23 +18,23 @@ pub struct Context {
|
||||
}
|
||||
|
||||
@[params]
|
||||
pub struct AppArgs {
|
||||
pub struct ServerArgs {
|
||||
pub mut:
|
||||
user_db map[string]string @[required]
|
||||
vfs vfs.VFSImplementation
|
||||
}
|
||||
|
||||
pub fn new_app(args AppArgs) !&App {
|
||||
mut app := &App{
|
||||
pub fn new_server(args ServerArgs) !&Server {
|
||||
mut server := &Server{
|
||||
user_db: args.user_db.clone()
|
||||
vfs: args.vfs
|
||||
}
|
||||
|
||||
// register middlewares for all routes
|
||||
app.use(handler: app.auth_middleware)
|
||||
app.use(handler: middleware_log_request)
|
||||
app.use(handler: middleware_log_response, after: true)
|
||||
return app
|
||||
server.use(handler: middleware_log_request)
|
||||
server.use(handler: server.auth_middleware)
|
||||
server.use(handler: middleware_log_response, after: true)
|
||||
return server
|
||||
}
|
||||
|
||||
@[params]
|
||||
@@ -44,11 +44,11 @@ pub mut:
|
||||
background bool
|
||||
}
|
||||
|
||||
pub fn (mut app App) run(params RunParams) {
|
||||
pub fn (mut server Server) run(params RunParams) {
|
||||
console.print_green('Running the server on port: ${params.port}')
|
||||
if params.background {
|
||||
spawn veb.run[App, Context](mut app, params.port)
|
||||
spawn veb.run[Server, Context](mut server, params.port)
|
||||
} else {
|
||||
veb.run[App, Context](mut app, params.port)
|
||||
veb.run[Server, Context](mut server, params.port)
|
||||
}
|
||||
}
|
||||
22
lib/dav/webdav/factory_test.v
Normal file
22
lib/dav/webdav/factory_test.v
Normal file
@@ -0,0 +1,22 @@
|
||||
module webdav
|
||||
|
||||
import net.http
|
||||
import freeflowuniverse.herolib.core.pathlib
|
||||
import time
|
||||
import freeflowuniverse.herolib.data.ourdb
|
||||
import encoding.base64
|
||||
import rand
|
||||
import os
|
||||
import freeflowuniverse.herolib.vfs.vfs_db
|
||||
|
||||
const testdata_path := os.join_path(os.dir(@FILE), 'testdata')
|
||||
const database_path := os.join_path(testdata_path, 'database')
|
||||
|
||||
fn test_new_server() {
|
||||
mut metadata_db := ourdb.new(path:os.join_path(database_path, 'metadata'))!
|
||||
mut data_db := ourdb.new(path:os.join_path(database_path, 'data'))!
|
||||
mut vfs := vfs_db.new(mut metadata_db, mut data_db)!
|
||||
server := new_server(vfs: vfs, user_db: {
|
||||
'admin': '123'
|
||||
})!
|
||||
}
|
||||
@@ -3,7 +3,7 @@ module webdav
|
||||
import time
|
||||
import rand
|
||||
|
||||
struct LockManager {
|
||||
struct Locker {
|
||||
mut:
|
||||
locks map[string]Lock
|
||||
}
|
||||
@@ -18,7 +18,7 @@ pub:
|
||||
// lock attempts to lock a resource for a specific owner
|
||||
// Returns a LockResult with the lock token and whether it's a new lock
|
||||
// Returns an error if the resource is already locked by a different owner
|
||||
pub fn (mut lm LockManager) lock(l Lock) !Lock {
|
||||
pub fn (mut lm Locker) lock(l Lock) !Lock {
|
||||
if l.resource in lm.locks {
|
||||
// Check if the lock is still valid
|
||||
existing_lock := lm.locks[l.resource]
|
||||
@@ -54,7 +54,7 @@ pub fn (mut lm LockManager) lock(l Lock) !Lock {
|
||||
return new_lock
|
||||
}
|
||||
|
||||
pub fn (mut lm LockManager) unlock(resource string) bool {
|
||||
pub fn (mut lm Locker) unlock(resource string) bool {
|
||||
if resource in lm.locks {
|
||||
lm.locks.delete(resource)
|
||||
return true
|
||||
@@ -63,7 +63,7 @@ pub fn (mut lm LockManager) unlock(resource string) bool {
|
||||
}
|
||||
|
||||
// is_locked checks if a resource is currently locked
|
||||
pub fn (lm LockManager) is_locked(resource string) bool {
|
||||
pub fn (lm Locker) is_locked(resource string) bool {
|
||||
if resource in lm.locks {
|
||||
lock_ := lm.locks[resource]
|
||||
// Check if lock is expired
|
||||
@@ -76,7 +76,7 @@ pub fn (lm LockManager) is_locked(resource string) bool {
|
||||
}
|
||||
|
||||
// get_lock returns the Lock object for a resource if it exists and is valid
|
||||
pub fn (lm LockManager) get_lock(resource string) ?Lock {
|
||||
pub fn (lm Locker) get_lock(resource string) ?Lock {
|
||||
if resource in lm.locks {
|
||||
lock_ := lm.locks[resource]
|
||||
// Check if lock is expired
|
||||
@@ -88,7 +88,7 @@ pub fn (lm LockManager) get_lock(resource string) ?Lock {
|
||||
return none
|
||||
}
|
||||
|
||||
pub fn (mut lm LockManager) unlock_with_token(resource string, token string) bool {
|
||||
pub fn (mut lm Locker) unlock_with_token(resource string, token string) bool {
|
||||
if resource in lm.locks {
|
||||
lock_ := lm.locks[resource]
|
||||
if lock_.token == token {
|
||||
@@ -99,7 +99,7 @@ pub fn (mut lm LockManager) unlock_with_token(resource string, token string) boo
|
||||
return false
|
||||
}
|
||||
|
||||
fn (mut lm LockManager) lock_recursive(l Lock) !Lock {
|
||||
fn (mut lm Locker) lock_recursive(l Lock) !Lock {
|
||||
if l.depth == 0 {
|
||||
return lm.lock(l)
|
||||
}
|
||||
@@ -108,7 +108,7 @@ fn (mut lm LockManager) lock_recursive(l Lock) !Lock {
|
||||
return lm.lock(l)
|
||||
}
|
||||
|
||||
pub fn (mut lm LockManager) cleanup_expired_locks() {
|
||||
pub fn (mut lm Locker) cleanup_expired_locks() {
|
||||
// now := time.now().unix()
|
||||
// lm.locks
|
||||
// lm.locks = lm.locks.filter(it.value.created_at.unix() + it.value.timeout > now)
|
||||
80
lib/dav/webdav/locker_test.v
Normal file
80
lib/dav/webdav/locker_test.v
Normal file
@@ -0,0 +1,80 @@
|
||||
module webdav
|
||||
|
||||
import time
|
||||
import rand
|
||||
|
||||
fn test_lock() {
|
||||
mut locker := Locker{locks: map[string]Lock{}}
|
||||
|
||||
// Lock the resource
|
||||
result := locker.lock(
|
||||
resource: 'test-resource',
|
||||
owner: 'test-owner',
|
||||
depth: 0,
|
||||
timeout: 3600,
|
||||
) or { panic(err) }
|
||||
assert result.token != ''
|
||||
assert locker.is_locked('test-resource')
|
||||
}
|
||||
|
||||
fn test_unlock() {
|
||||
mut locker := Locker{locks: map[string]Lock{}}
|
||||
|
||||
// Lock the resource
|
||||
locker.lock(
|
||||
resource: 'test-resource',
|
||||
owner: 'test-owner',
|
||||
depth: 0,
|
||||
timeout: 3600,
|
||||
) or { panic(err) }
|
||||
|
||||
// Unlock the resource
|
||||
is_unlocked := locker.unlock('test-resource')
|
||||
assert is_unlocked
|
||||
assert !locker.is_locked('test-resource')
|
||||
}
|
||||
|
||||
fn test_lock_with_different_owner() {
|
||||
mut locker := Locker{locks: map[string]Lock{}}
|
||||
lock1 := Lock{
|
||||
resource: 'test-resource',
|
||||
owner: 'owner1',
|
||||
depth: 0,
|
||||
timeout: 3600,
|
||||
}
|
||||
lock2 := Lock{
|
||||
resource: 'test-resource',
|
||||
owner: 'owner2',
|
||||
depth: 0,
|
||||
timeout: 3600,
|
||||
}
|
||||
|
||||
// Lock the resource with the first owner
|
||||
locker.lock(lock1) or { panic(err) }
|
||||
|
||||
// Attempt to lock the resource with a different owner
|
||||
if result := locker.lock(lock2) {
|
||||
assert false, 'locking should fail'
|
||||
} else {
|
||||
assert err == error('Resource is already locked by a different owner')
|
||||
}
|
||||
}
|
||||
|
||||
fn test_cleanup_expired_locks() {
|
||||
mut locker := Locker{locks: map[string]Lock{}}
|
||||
|
||||
// Lock the resource
|
||||
locker.lock(
|
||||
resource: 'test-resource',
|
||||
owner: 'test-owner',
|
||||
depth: 0,
|
||||
timeout: 1,
|
||||
) or { panic(err) }
|
||||
|
||||
// Wait for the lock to expire
|
||||
time.sleep(2 * time.second)
|
||||
|
||||
// Cleanup expired locks
|
||||
locker.cleanup_expired_locks()
|
||||
assert !locker.is_locked('test-resource')
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
import freeflowuniverse.herolib.dav.webdav
|
||||
import freeflowuniverse.herolib.vfs.vfs_nested
|
||||
import freeflowuniverse.herolib.vfs
|
||||
import freeflowuniverse.herolib.vfs.vfs_db
|
||||
import os
|
||||
|
||||
fn test_logic() ! {
|
||||
println('Testing OurDB VFS Logic to WebDAV Server...')
|
||||
|
||||
// Create test directories
|
||||
test_data_dir := os.join_path(os.temp_dir(), 'vfs_db_test_data')
|
||||
test_meta_dir := os.join_path(os.temp_dir(), 'vfs_db_test_meta')
|
||||
|
||||
os.mkdir_all(test_data_dir)!
|
||||
os.mkdir_all(test_meta_dir)!
|
||||
|
||||
defer {
|
||||
os.rmdir_all(test_data_dir) or {}
|
||||
os.rmdir_all(test_meta_dir) or {}
|
||||
}
|
||||
|
||||
// Create VFS instance; lower level VFS Implementations that use OurDB
|
||||
mut vfs1 := vfs_db.new(test_data_dir, test_meta_dir)!
|
||||
|
||||
mut high_level_vfs := vfsnested.new()
|
||||
|
||||
// Nest OurDB VFS instances at different paths
|
||||
high_level_vfs.add_vfs('/', vfs1) or { panic(err) }
|
||||
|
||||
// Test directory listing
|
||||
entries := high_level_vfs.dir_list('/')!
|
||||
assert entries.len == 1 // Data directory
|
||||
|
||||
// // Check if dir is existing
|
||||
// assert high_level_vfs.exists('/') == true
|
||||
|
||||
// // Check if dir is not existing
|
||||
// assert high_level_vfs.exists('/data') == true
|
||||
}
|
||||
@@ -1,285 +0,0 @@
|
||||
module webdav
|
||||
|
||||
import time
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import encoding.xml
|
||||
import net.urllib
|
||||
import veb
|
||||
import log
|
||||
import strings
|
||||
|
||||
@['/:path...'; options]
|
||||
pub fn (app &App) options(mut ctx Context, path string) veb.Result {
|
||||
ctx.set_custom_header('DAV', '1,2') or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_header(.allow, 'OPTIONS, PROPFIND, MKCOL, GET, HEAD, POST, PUT, DELETE, COPY, MOVE')
|
||||
// ctx.set_header(.connection, 'close')
|
||||
ctx.set_custom_header('MS-Author-Via', 'DAV') or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_header(.access_control_allow_origin, '*')
|
||||
ctx.set_header(.access_control_allow_methods, 'OPTIONS, PROPFIND, MKCOL, GET, HEAD, POST, PUT, DELETE, COPY, MOVE')
|
||||
ctx.set_header(.access_control_allow_headers, 'Authorization, Content-Type')
|
||||
ctx.set_header(.content_length, '0')
|
||||
return ctx.ok('')
|
||||
}
|
||||
|
||||
@['/:path...'; lock]
|
||||
pub fn (mut app App) lock(mut ctx Context, path string) veb.Result {
|
||||
resource := ctx.req.url
|
||||
|
||||
// Parse lock information from XML body instead of headers
|
||||
lock_info := parse_lock_xml(ctx.req.data) or {
|
||||
console.print_stderr('Failed to parse lock XML: ${err}')
|
||||
ctx.res.set_status(.bad_request)
|
||||
return ctx.text('Invalid lock request: ${err}')
|
||||
}
|
||||
|
||||
// Get depth and timeout from headers (these are still in headers)
|
||||
// Parse timeout header which can be in format "Second-600"
|
||||
timeout_str := ctx.get_custom_header('Timeout') or { 'Second-3600' }
|
||||
mut timeout := 3600 // Default 1 hour
|
||||
|
||||
if timeout_str.to_lower().starts_with('second-') {
|
||||
timeout_val := timeout_str.all_after('Second-')
|
||||
if timeout_val.int() > 0 {
|
||||
timeout = timeout_val.int()
|
||||
}
|
||||
}
|
||||
|
||||
new_lock := Lock {
|
||||
...lock_info,
|
||||
resource: ctx.req.url
|
||||
depth: ctx.get_custom_header('Depth') or { '0' }.int()
|
||||
timeout: timeout
|
||||
}
|
||||
|
||||
// Try to acquire the lock
|
||||
lock_result := app.lock_manager.lock(new_lock) or {
|
||||
// If we get here, the resource is locked by a different owner
|
||||
ctx.res.set_status(.locked)
|
||||
return ctx.text('Resource is already locked by a different owner.')
|
||||
}
|
||||
|
||||
log.debug('[WebDAV] Received lock result ${lock_result.xml()}')
|
||||
ctx.res.set_status(.ok)
|
||||
ctx.set_custom_header('Lock-Token', '${lock_result.token}') or { return ctx.server_error(err.msg()) }
|
||||
|
||||
// Create a proper WebDAV lock response
|
||||
return ctx.send_response_to_client('application/xml', lock_result.xml())
|
||||
}
|
||||
|
||||
@['/:path...'; unlock]
|
||||
pub fn (mut app App) unlock(mut ctx Context, path string) veb.Result {
|
||||
resource := ctx.req.url
|
||||
token_ := ctx.get_custom_header('Lock-Token') or { return ctx.server_error(err.msg()) }
|
||||
token := token_.trim_string_left('<').trim_string_right('>')
|
||||
if token.len == 0 {
|
||||
console.print_stderr('Unlock failed: `Lock-Token` header required.')
|
||||
ctx.res.set_status(.bad_request)
|
||||
return ctx.text('Lock failed: `Owner` header missing.')
|
||||
}
|
||||
|
||||
if app.lock_manager.unlock_with_token(resource, token) {
|
||||
ctx.res.set_status(.no_content)
|
||||
return ctx.text('Lock successfully released')
|
||||
}
|
||||
|
||||
console.print_stderr('Resource is not locked or token mismatch.')
|
||||
ctx.res.set_status(.conflict)
|
||||
return ctx.text('Resource is not locked or token mismatch')
|
||||
}
|
||||
|
||||
@['/:path...'; get]
|
||||
pub fn (mut app App) get_file(mut ctx Context, path string) veb.Result {
|
||||
log.info('[WebDAV] Getting file ${path}')
|
||||
file_data := app.vfs.file_read(path) or { return ctx.server_error(err.msg()) }
|
||||
ext := path.all_after_last('.')
|
||||
content_type := veb.mime_types['.${ext}'] or { 'text/plain' }
|
||||
println('debugzo000 ${file_data.bytestr().len}')
|
||||
println('debugzo001 ${file_data.len}')
|
||||
// ctx.res.header.set(.content_length, file_data.len.str())
|
||||
// ctx.res.set_status(.ok)
|
||||
return ctx.send_response_to_client(content_type, file_data.bytestr())
|
||||
}
|
||||
|
||||
@[head]
|
||||
pub fn (app &App) index(mut ctx Context) veb.Result {
|
||||
ctx.set_custom_header('DAV', '1,2') or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_header(.allow, 'OPTIONS, PROPFIND, MKCOL, GET, HEAD, POST, PUT, DELETE, COPY, MOVE')
|
||||
ctx.set_custom_header('MS-Author-Via', 'DAV') or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_header(.access_control_allow_origin, '*')
|
||||
ctx.set_header(.access_control_allow_methods, 'OPTIONS, PROPFIND, MKCOL, GET, HEAD, POST, PUT, DELETE, COPY, MOVE')
|
||||
ctx.set_header(.access_control_allow_headers, 'Authorization, Content-Type')
|
||||
ctx.set_header(.content_length, '0')
|
||||
return ctx.ok('')
|
||||
}
|
||||
|
||||
@['/:path...'; head]
|
||||
pub fn (mut app App) exists(mut ctx Context, path string) veb.Result {
|
||||
// Check if the requested path exists in the virtual filesystem
|
||||
if !app.vfs.exists(path) {
|
||||
return ctx.not_found()
|
||||
}
|
||||
|
||||
// Add necessary WebDAV headers
|
||||
// ctx.set_header(.authorization, 'Basic') // Indicates Basic auth usage
|
||||
ctx.set_custom_header('dav', '1, 2') or {
|
||||
return ctx.server_error('Failed to set DAV header: ${err}')
|
||||
}
|
||||
ctx.set_header(.content_length, '0') // HEAD request, so no body
|
||||
// ctx.set_header(.date, time.now().as_utc().format_rfc1123()) // Correct UTC date format
|
||||
// ctx.set_header(.content_type, 'application/xml') // XML is common for WebDAV metadata
|
||||
ctx.set_custom_header('Allow', 'OPTIONS, GET, HEAD, PROPFIND, PROPPATCH, MKCOL, PUT, DELETE, COPY, MOVE, LOCK, UNLOCK') or {
|
||||
return ctx.server_error('Failed to set Allow header: ${err}')
|
||||
}
|
||||
ctx.set_header(.accept_ranges, 'bytes') // Allows range-based file downloads
|
||||
ctx.set_custom_header('Cache-Control', 'no-cache, no-store, must-revalidate') or {
|
||||
return ctx.server_error('Failed to set Cache-Control header: ${err}')
|
||||
}
|
||||
ctx.set_custom_header('Last-Modified', time.now().as_utc().format()) or {
|
||||
return ctx.server_error('Failed to set Last-Modified header: ${err}')
|
||||
}
|
||||
ctx.res.set_version(.v1_1)
|
||||
|
||||
// Debugging output (can be removed in production)
|
||||
return ctx.ok('')
|
||||
}
|
||||
|
||||
@['/:path...'; delete]
|
||||
pub fn (mut app App) delete(mut ctx Context, path string) veb.Result {
|
||||
app.vfs.delete(path) or {
|
||||
return ctx.server_error(err.msg())
|
||||
}
|
||||
|
||||
|
||||
// Return success response
|
||||
return ctx.no_content()
|
||||
}
|
||||
|
||||
@['/:path...'; copy]
|
||||
pub fn (mut app App) copy(mut ctx Context, path string) veb.Result {
|
||||
if !app.vfs.exists(path) {
|
||||
return ctx.not_found()
|
||||
}
|
||||
|
||||
destination := ctx.req.header.get_custom('Destination') or {
|
||||
return ctx.server_error(err.msg())
|
||||
}
|
||||
destination_url := urllib.parse(destination) or {
|
||||
ctx.res.set_status(.bad_request)
|
||||
return ctx.text('Invalid Destination ${destination}: ${err}')
|
||||
}
|
||||
destination_path_str := destination_url.path
|
||||
|
||||
app.vfs.copy(path, destination_path_str) or {
|
||||
console.print_stderr('failed to copy: ${err}')
|
||||
return ctx.server_error(err.msg())
|
||||
}
|
||||
|
||||
ctx.res.set_status(.ok)
|
||||
return ctx.text('HTTP 200: Successfully copied entry: ${path}')
|
||||
}
|
||||
|
||||
@['/:path...'; move]
|
||||
pub fn (mut app App) move(mut ctx Context, path string) veb.Result {
|
||||
if !app.vfs.exists(path) {
|
||||
return ctx.not_found()
|
||||
}
|
||||
|
||||
destination := ctx.req.header.get_custom('Destination') or {
|
||||
return ctx.server_error(err.msg())
|
||||
}
|
||||
destination_url := urllib.parse(destination) or {
|
||||
ctx.res.set_status(.bad_request)
|
||||
return ctx.text('Invalid Destination ${destination}: ${err}')
|
||||
}
|
||||
destination_path_str := destination_url.path
|
||||
|
||||
log.info('[WebDAV] ${@FN} from ${path} to ${destination_path_str}')
|
||||
app.vfs.move(path, destination_path_str) or {
|
||||
console.print_stderr('failed to move: ${err}')
|
||||
return ctx.server_error(err.msg())
|
||||
}
|
||||
|
||||
ctx.res.set_status(.ok)
|
||||
return ctx.text('HTTP 200: Successfully copied entry: ${path}')
|
||||
}
|
||||
|
||||
@['/:path...'; mkcol]
|
||||
pub fn (mut app App) mkcol(mut ctx Context, path string) veb.Result {
|
||||
if app.vfs.exists(path) {
|
||||
ctx.res.set_status(.bad_request)
|
||||
return ctx.text('Another collection exists at ${path}')
|
||||
}
|
||||
|
||||
log.info('[WebDAV] Make Collection ${path}')
|
||||
app.vfs.dir_create(path) or {
|
||||
console.print_stderr('failed to create directory ${path}: ${err}')
|
||||
return ctx.server_error(err.msg())
|
||||
}
|
||||
|
||||
ctx.res.set_status(.created)
|
||||
return ctx.text('HTTP 201: Created')
|
||||
}
|
||||
|
||||
@['/:path...'; propfind]
|
||||
fn (mut app App) propfind(mut ctx Context, path string) veb.Result {
|
||||
// Parse PROPFIND request
|
||||
propfind_req := parse_propfind_xml(ctx.req) or {
|
||||
return ctx.error(WebDAVError{
|
||||
status: .bad_request
|
||||
message: 'Failed to parse PROPFIND XML: ${err}'
|
||||
tag: 'propfind-parse-error'
|
||||
})
|
||||
}
|
||||
|
||||
log.debug('[WebDAV] Propfind Request: ${propfind_req.typ} ${propfind_req.depth}')
|
||||
|
||||
|
||||
// Check if resource is locked
|
||||
if app.lock_manager.is_locked(ctx.req.url) {
|
||||
// If the resource is locked, we should still return properties
|
||||
// but we might need to indicate the lock status in the response
|
||||
// This is handled in the property generation
|
||||
log.info('[WebDAV] Resource is locked: ${ctx.req.url}')
|
||||
}
|
||||
|
||||
entry := app.vfs.get(path) or {
|
||||
return ctx.error(
|
||||
status: .not_found
|
||||
message: 'Path ${path} does not exist'
|
||||
tag: 'resource-must-be-null'
|
||||
)
|
||||
}
|
||||
|
||||
responses := app.get_responses(entry, propfind_req) or {
|
||||
return ctx.server_error('Failed to get entry properties ${err}')
|
||||
}
|
||||
|
||||
// log.debug('[WebDAV] Propfind responses ${responses}')
|
||||
|
||||
// Create multistatus response using the responses
|
||||
ctx.res.set_status(.multi_status)
|
||||
return ctx.send_response_to_client('application/xml', responses.xml())
|
||||
}
|
||||
|
||||
@['/:path...'; put]
|
||||
fn (mut app App) create_or_update(mut ctx Context, path string) veb.Result {
|
||||
if app.vfs.exists(path) {
|
||||
if fs_entry := app.vfs.get(path) {
|
||||
if fs_entry.is_dir() {
|
||||
console.print_stderr('Cannot PUT to a directory: ${path}')
|
||||
ctx.res.set_status(.method_not_allowed)
|
||||
return ctx.text('HTTP 405: Method Not Allowed')
|
||||
}
|
||||
} else {
|
||||
return ctx.server_error('failed to get FS Entry ${path}: ${err.msg()}')
|
||||
}
|
||||
} else {
|
||||
app.vfs.file_create(path) or { return ctx.server_error(err.msg()) }
|
||||
}
|
||||
if ctx.req.data.len > 0 {
|
||||
data := ctx.req.data.bytes()
|
||||
app.vfs.file_write(path, data) or { return ctx.server_error(err.msg()) }
|
||||
return ctx.ok('HTTP 200: Successfully wrote file: ${path}')
|
||||
}
|
||||
return ctx.ok('HTTP 200: Successfully created file: ${path}')
|
||||
}
|
||||
@@ -1,49 +1,53 @@
|
||||
module webdav
|
||||
|
||||
import time
|
||||
import encoding.base64
|
||||
import freeflowuniverse.herolib.core.texttools
|
||||
|
||||
fn (server &Server) auth_middleware(mut ctx Context) bool {
|
||||
ctx.set_custom_header('Date', texttools.format_rfc1123(time.utc())) or { return false }
|
||||
|
||||
fn (app &App) auth_middleware(mut ctx Context) bool {
|
||||
// return true
|
||||
auth_header := ctx.get_header(.authorization) or {
|
||||
ctx.res.set_status(.unauthorized)
|
||||
ctx.res.header.add(.www_authenticate, 'Basic realm="WebDAV Server"')
|
||||
ctx.send_response_to_client('text', 'unauthorized')
|
||||
ctx.set_header(.www_authenticate, 'Basic realm="/"')
|
||||
ctx.send_response_to_client('', '')
|
||||
return false
|
||||
}
|
||||
if auth_header == '' {
|
||||
ctx.res.set_status(.unauthorized)
|
||||
ctx.res.header.add(.www_authenticate, 'Basic realm="WebDAV Server"')
|
||||
ctx.send_response_to_client('text', 'unauthorized')
|
||||
ctx.set_header(.www_authenticate, 'Basic realm="/"')
|
||||
ctx.send_response_to_client('', '')
|
||||
return false
|
||||
}
|
||||
|
||||
if !auth_header.starts_with('Basic ') {
|
||||
ctx.res.set_status(.unauthorized)
|
||||
ctx.res.header.add(.www_authenticate, 'Basic realm="WebDAV Server"')
|
||||
ctx.send_response_to_client('text', 'unauthorized')
|
||||
ctx.set_header(.www_authenticate, 'Basic realm="/"')
|
||||
ctx.send_response_to_client('', '')
|
||||
return false
|
||||
}
|
||||
auth_decoded := base64.decode_str(auth_header[6..])
|
||||
split_credentials := auth_decoded.split(':')
|
||||
if split_credentials.len != 2 {
|
||||
ctx.res.set_status(.unauthorized)
|
||||
ctx.res.header.add(.www_authenticate, 'Basic realm="WebDAV Server"')
|
||||
ctx.set_header(.www_authenticate, 'Basic realm="/"')
|
||||
ctx.send_response_to_client('', '')
|
||||
return false
|
||||
}
|
||||
username := split_credentials[0]
|
||||
hashed_pass := split_credentials[1]
|
||||
if user := app.user_db[username] {
|
||||
if user := server.user_db[username] {
|
||||
if user != hashed_pass {
|
||||
ctx.res.set_status(.unauthorized)
|
||||
ctx.res.header.add(.www_authenticate, 'Basic realm="WebDAV Server"')
|
||||
ctx.send_response_to_client('text', 'unauthorized')
|
||||
ctx.set_header(.www_authenticate, 'Basic realm="/"')
|
||||
ctx.send_response_to_client('', '')
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
ctx.res.set_status(.unauthorized)
|
||||
ctx.res.header.add(.www_authenticate, 'Basic realm="WebDAV Server"')
|
||||
ctx.send_response_to_client('text', 'unauthorized')
|
||||
ctx.set_header(.www_authenticate, 'Basic realm="/"')
|
||||
ctx.send_response_to_client('', '')
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ fn middleware_log_request(mut ctx Context) bool {
|
||||
}
|
||||
|
||||
fn middleware_log_response(mut ctx Context) bool {
|
||||
log.debug('[WebDAV] Response: ${ctx.req.url} ${ctx.res.header}\n')
|
||||
log.debug('[WebDAV] Response: ${ctx.req.url} ${ctx.res.status()}\n')
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ module webdav
|
||||
import encoding.xml
|
||||
import time
|
||||
|
||||
pub struct Lock {
|
||||
struct Lock {
|
||||
pub mut:
|
||||
resource string
|
||||
owner string
|
||||
|
||||
231
lib/dav/webdav/model_property.v
Normal file
231
lib/dav/webdav/model_property.v
Normal file
@@ -0,0 +1,231 @@
|
||||
module webdav
|
||||
|
||||
import encoding.xml
|
||||
import log
|
||||
import freeflowuniverse.herolib.core.pathlib
|
||||
import freeflowuniverse.herolib.vfs
|
||||
import os
|
||||
import time
|
||||
import veb
|
||||
|
||||
// Property represents a WebDAV property
|
||||
pub interface Property {
|
||||
xml() xml.XMLNodeContents
|
||||
// xml_name() string
|
||||
// to_xml_node() xml.XMLNode
|
||||
// }
|
||||
}
|
||||
|
||||
type DisplayName = string
|
||||
type GetETag = string
|
||||
type GetLastModified = string
|
||||
type GetContentType = string
|
||||
type GetContentLength = string
|
||||
type QuotaAvailableBytes = u64
|
||||
type QuotaUsedBytes = u64
|
||||
type QuotaUsed = u64
|
||||
type Quota = u64
|
||||
type ResourceType = bool
|
||||
type CreationDate = string
|
||||
type SupportedLock = string
|
||||
type LockDiscovery = string
|
||||
|
||||
// fn (p []Property) xml() string {
|
||||
// return '<D:propstat>
|
||||
// <D:prop>${p.map(it.xml()).join_lines()}</D:prop>
|
||||
// <D:status>HTTP/1.1 200 OK</D:status>
|
||||
// </D:propstat>'
|
||||
// }
|
||||
|
||||
fn (p []Property) xml() xml.XMLNode {
|
||||
return xml.XMLNode{
|
||||
name: 'D:propstat'
|
||||
children: [
|
||||
xml.XMLNode{
|
||||
name: 'D:prop'
|
||||
children: p.map(it.xml())
|
||||
},
|
||||
xml.XMLNode{
|
||||
name: 'D:status'
|
||||
children: [xml.XMLNodeContents('HTTP/1.1 200 OK')]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
fn (p DisplayName) xml() xml.XMLNodeContents {
|
||||
return xml.XMLNode{
|
||||
name: 'D:displayname'
|
||||
children: [xml.XMLNodeContents(p)]
|
||||
}
|
||||
}
|
||||
|
||||
fn (p GetETag) xml() xml.XMLNodeContents {
|
||||
return xml.XMLNode{
|
||||
name: 'D:getetag'
|
||||
children: [xml.XMLNodeContents(p)]
|
||||
}
|
||||
}
|
||||
|
||||
fn (p GetLastModified) xml() xml.XMLNodeContents {
|
||||
return xml.XMLNode{
|
||||
name: 'D:getlastmodified'
|
||||
children: [xml.XMLNodeContents(p)]
|
||||
}
|
||||
}
|
||||
|
||||
fn (p GetContentType) xml() xml.XMLNodeContents {
|
||||
return xml.XMLNode{
|
||||
name: 'D:getcontenttype'
|
||||
children: [xml.XMLNodeContents(p)]
|
||||
}
|
||||
}
|
||||
|
||||
fn (p GetContentLength) xml() xml.XMLNodeContents {
|
||||
return xml.XMLNode{
|
||||
name: 'D:getcontentlength'
|
||||
children: [xml.XMLNodeContents(p)]
|
||||
}
|
||||
}
|
||||
|
||||
fn (p QuotaAvailableBytes) xml() xml.XMLNodeContents {
|
||||
return xml.XMLNode{
|
||||
name: 'D:quota-available-bytes'
|
||||
children: [xml.XMLNodeContents(p.str())]
|
||||
}
|
||||
}
|
||||
|
||||
fn (p QuotaUsedBytes) xml() xml.XMLNodeContents {
|
||||
return xml.XMLNode{
|
||||
name: 'D:quota-used-bytes'
|
||||
children: [xml.XMLNodeContents(p.str())]
|
||||
}
|
||||
}
|
||||
|
||||
fn (p Quota) xml() xml.XMLNodeContents {
|
||||
return xml.XMLNode{
|
||||
name: 'D:quota'
|
||||
children: [xml.XMLNodeContents(p.str())]
|
||||
}
|
||||
}
|
||||
|
||||
fn (p QuotaUsed) xml() xml.XMLNodeContents {
|
||||
return xml.XMLNode{
|
||||
name: 'D:quotaused'
|
||||
children: [xml.XMLNodeContents(p.str())]
|
||||
}
|
||||
}
|
||||
|
||||
fn (p ResourceType) xml() xml.XMLNodeContents {
|
||||
if p {
|
||||
// If it's a collection, add the collection element as a child
|
||||
mut children := []xml.XMLNodeContents{}
|
||||
children << xml.XMLNode{
|
||||
name: 'D:collection'
|
||||
}
|
||||
|
||||
return xml.XMLNode{
|
||||
name: 'D:resourcetype'
|
||||
children: children
|
||||
}
|
||||
} else {
|
||||
// If it's not a collection, return an empty resourcetype element
|
||||
return xml.XMLNode{
|
||||
name: 'D:resourcetype'
|
||||
children: []xml.XMLNodeContents{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn (p CreationDate) xml() xml.XMLNodeContents {
|
||||
return xml.XMLNode{
|
||||
name: 'D:creationdate'
|
||||
children: [xml.XMLNodeContents(p)]
|
||||
}
|
||||
}
|
||||
|
||||
fn (p SupportedLock) xml() xml.XMLNodeContents {
|
||||
// Create children for the supportedlock node
|
||||
mut children := []xml.XMLNodeContents{}
|
||||
|
||||
// First lockentry - exclusive
|
||||
mut lockscope1_children := []xml.XMLNodeContents{}
|
||||
lockscope1_children << xml.XMLNode{
|
||||
name: 'D:exclusive'
|
||||
}
|
||||
|
||||
lockscope1 := xml.XMLNode{
|
||||
name: 'D:lockscope'
|
||||
children: lockscope1_children
|
||||
}
|
||||
|
||||
mut locktype1_children := []xml.XMLNodeContents{}
|
||||
locktype1_children << xml.XMLNode{
|
||||
name: 'D:write'
|
||||
}
|
||||
|
||||
locktype1 := xml.XMLNode{
|
||||
name: 'D:locktype'
|
||||
children: locktype1_children
|
||||
}
|
||||
|
||||
mut lockentry1_children := []xml.XMLNodeContents{}
|
||||
lockentry1_children << lockscope1
|
||||
lockentry1_children << locktype1
|
||||
|
||||
lockentry1 := xml.XMLNode{
|
||||
name: 'D:lockentry'
|
||||
children: lockentry1_children
|
||||
}
|
||||
|
||||
// Second lockentry - shared
|
||||
mut lockscope2_children := []xml.XMLNodeContents{}
|
||||
lockscope2_children << xml.XMLNode{
|
||||
name: 'D:shared'
|
||||
}
|
||||
|
||||
lockscope2 := xml.XMLNode{
|
||||
name: 'D:lockscope'
|
||||
children: lockscope2_children
|
||||
}
|
||||
|
||||
mut locktype2_children := []xml.XMLNodeContents{}
|
||||
locktype2_children << xml.XMLNode{
|
||||
name: 'D:write'
|
||||
}
|
||||
|
||||
locktype2 := xml.XMLNode{
|
||||
name: 'D:locktype'
|
||||
children: locktype2_children
|
||||
}
|
||||
|
||||
mut lockentry2_children := []xml.XMLNodeContents{}
|
||||
lockentry2_children << lockscope2
|
||||
lockentry2_children << locktype2
|
||||
|
||||
lockentry2 := xml.XMLNode{
|
||||
name: 'D:lockentry'
|
||||
children: lockentry2_children
|
||||
}
|
||||
|
||||
// Add both lockentries to children
|
||||
children << lockentry1
|
||||
children << lockentry2
|
||||
|
||||
// Return the supportedlock node
|
||||
return xml.XMLNode{
|
||||
name: 'D:supportedlock'
|
||||
children: children
|
||||
}
|
||||
}
|
||||
|
||||
fn (p LockDiscovery) xml() xml.XMLNodeContents {
|
||||
return xml.XMLNode{
|
||||
name: 'D:lockdiscovery'
|
||||
children: [xml.XMLNodeContents(p)]
|
||||
}
|
||||
}
|
||||
|
||||
fn format_iso8601(t time.Time) string {
|
||||
return '${t.year:04d}-${t.month:02d}-${t.day:02d}T${t.hour:02d}:${t.minute:02d}:${t.second:02d}Z'
|
||||
}
|
||||
93
lib/dav/webdav/model_property_test.v
Normal file
93
lib/dav/webdav/model_property_test.v
Normal file
@@ -0,0 +1,93 @@
|
||||
module webdav
|
||||
|
||||
import time
|
||||
|
||||
fn test_property_xml() {
|
||||
// Test DisplayName property
|
||||
display_name := DisplayName('test-file.txt')
|
||||
assert display_name.xml() == '<D:displayname>test-file.txt</D:displayname>'
|
||||
assert display_name.xml_name() == '<displayname/>'
|
||||
|
||||
// Test GetLastModified property
|
||||
last_modified := GetLastModified('Mon, 01 Jan 2024 12:00:00 GMT')
|
||||
assert last_modified.xml() == '<D:getlastmodified>Mon, 01 Jan 2024 12:00:00 GMT</D:getlastmodified>'
|
||||
assert last_modified.xml_name() == '<getlastmodified/>'
|
||||
|
||||
// Test GetContentType property
|
||||
content_type := GetContentType('text/plain')
|
||||
assert content_type.xml() == '<D:getcontenttype>text/plain</D:getcontenttype>'
|
||||
assert content_type.xml_name() == '<getcontenttype/>'
|
||||
|
||||
// Test GetContentLength property
|
||||
content_length := GetContentLength('1024')
|
||||
assert content_length.xml() == '<D:getcontentlength>1024</D:getcontentlength>'
|
||||
assert content_length.xml_name() == '<getcontentlength/>'
|
||||
|
||||
// Test ResourceType property for collection (directory)
|
||||
resource_type_dir := ResourceType(true)
|
||||
assert resource_type_dir.xml() == '<D:resourcetype><D:collection/></D:resourcetype>'
|
||||
assert resource_type_dir.xml_name() == '<resourcetype/>'
|
||||
|
||||
// Test ResourceType property for non-collection (file)
|
||||
resource_type_file := ResourceType(false)
|
||||
assert resource_type_file.xml() == '<D:resourcetype/>'
|
||||
assert resource_type_file.xml_name() == '<resourcetype/>'
|
||||
|
||||
// Test CreationDate property
|
||||
creation_date := CreationDate('2024-01-01T12:00:00Z')
|
||||
assert creation_date.xml() == '<D:creationdate>2024-01-01T12:00:00Z</D:creationdate>'
|
||||
assert creation_date.xml_name() == '<creationdate/>'
|
||||
|
||||
// Test SupportedLock property
|
||||
supported_lock := SupportedLock('')
|
||||
assert supported_lock.xml().contains('<D:supportedlock>')
|
||||
assert supported_lock.xml().contains('<D:lockentry>')
|
||||
assert supported_lock.xml().contains('<D:lockscope><D:exclusive/></D:lockscope>')
|
||||
assert supported_lock.xml().contains('<D:lockscope><D:shared/></D:lockscope>')
|
||||
assert supported_lock.xml().contains('<D:locktype><D:write/></D:locktype>')
|
||||
assert supported_lock.xml_name() == '<supportedlock/>'
|
||||
|
||||
// Test LockDiscovery property
|
||||
lock_discovery := LockDiscovery('lock-info')
|
||||
assert lock_discovery.xml() == '<D:lockdiscovery>lock-info</D:lockdiscovery>'
|
||||
assert lock_discovery.xml_name() == '<lockdiscovery/>'
|
||||
}
|
||||
|
||||
fn test_property_array_xml() {
|
||||
// Create an array of properties
|
||||
mut properties := []Property{}
|
||||
|
||||
// Add different property types to the array
|
||||
properties << DisplayName('test-file.txt')
|
||||
properties << GetContentType('text/plain')
|
||||
properties << ResourceType(false)
|
||||
|
||||
// Test the xml() function for the array of properties
|
||||
xml_output := properties.xml()
|
||||
|
||||
// Verify the XML output contains the expected structure
|
||||
assert xml_output.contains('<D:propstat>')
|
||||
assert xml_output.contains('<D:prop>')
|
||||
assert xml_output.contains('<D:displayname>test-file.txt</D:displayname>')
|
||||
assert xml_output.contains('<D:getcontenttype>text/plain</D:getcontenttype>')
|
||||
assert xml_output.contains('<D:resourcetype/>')
|
||||
assert xml_output.contains('<D:status>HTTP/1.1 200 OK</D:status>')
|
||||
}
|
||||
|
||||
fn test_format_iso8601() {
|
||||
// Create a test time
|
||||
test_time := time.Time{
|
||||
year: 2024
|
||||
month: 1
|
||||
day: 1
|
||||
hour: 12
|
||||
minute: 30
|
||||
second: 45
|
||||
}
|
||||
|
||||
// Format the time using the format_iso8601 function
|
||||
formatted_time := format_iso8601(test_time)
|
||||
|
||||
// Verify the formatted time matches the expected ISO8601 format
|
||||
assert formatted_time == '2024-01-01T12:30:45Z'
|
||||
}
|
||||
@@ -4,109 +4,234 @@ import encoding.xml
|
||||
import log
|
||||
import freeflowuniverse.herolib.core.pathlib
|
||||
import freeflowuniverse.herolib.vfs
|
||||
import freeflowuniverse.herolib.vfs.vfs_db
|
||||
import os
|
||||
import time
|
||||
import net.http
|
||||
import veb
|
||||
|
||||
// Property represents a WebDAV property
|
||||
pub interface Property {
|
||||
xml() string
|
||||
xml_name() string
|
||||
// PropfindRequest represents a parsed PROPFIND request
|
||||
pub struct PropfindRequest {
|
||||
pub:
|
||||
typ PropfindType
|
||||
props []string // Property names if typ is prop
|
||||
depth Depth // Depth of the request (0, 1, or -1 for infinity)
|
||||
xml_content string // Original XML content
|
||||
}
|
||||
|
||||
type DisplayName = string
|
||||
type GetLastModified = string
|
||||
type GetContentType = string
|
||||
type GetContentLength = string
|
||||
type ResourceType = bool
|
||||
type CreationDate = string
|
||||
type SupportedLock = string
|
||||
type LockDiscovery = string
|
||||
|
||||
fn (p []Property) xml() string {
|
||||
return '<D:propstat>
|
||||
<D:prop>${p.map(it.xml()).join_lines()}</D:prop>
|
||||
<D:status>HTTP/1.1 200 OK</D:status>
|
||||
</D:propstat>'
|
||||
pub enum Depth {
|
||||
infinity = -1
|
||||
zero = 0
|
||||
one = 1
|
||||
}
|
||||
|
||||
fn (p DisplayName) xml() string {
|
||||
return '<D:displayname>${p}</D:displayname>'
|
||||
// PropfindType represents the type of PROPFIND request
|
||||
pub enum PropfindType {
|
||||
allprop // Request all properties
|
||||
propname // Request property names only
|
||||
prop // Request specific properties
|
||||
invalid // Invalid request
|
||||
}
|
||||
|
||||
fn (p DisplayName) xml_name() string {
|
||||
return '<displayname/>'
|
||||
}
|
||||
// parse_propfind_xml parses the XML body of a PROPFIND request
|
||||
pub fn parse_propfind_xml(req http.Request) !PropfindRequest {
|
||||
|
||||
fn (p GetLastModified) xml() string {
|
||||
return '<D:getlastmodified>${p}</D:getlastmodified>'
|
||||
}
|
||||
data := req.data
|
||||
// Parse Depth header
|
||||
depth_str := req.header.get_custom('Depth') or { '0' }
|
||||
depth := parse_depth(depth_str)
|
||||
|
||||
|
||||
fn (p GetLastModified) xml_name() string {
|
||||
return '<getlastmodified/>'
|
||||
}
|
||||
if data.len == 0 {
|
||||
// If no body is provided, default to allprop
|
||||
return PropfindRequest{
|
||||
typ: .allprop
|
||||
depth: depth
|
||||
xml_content: ''
|
||||
}
|
||||
}
|
||||
|
||||
fn (p GetContentType) xml() string {
|
||||
return '<D:getcontenttype>${p}</D:getcontenttype>'
|
||||
}
|
||||
doc := xml.XMLDocument.from_string(data) or {
|
||||
return error('Failed to parse XML: ${err}')
|
||||
}
|
||||
|
||||
fn (p GetContentType) xml_name() string {
|
||||
return '<getcontenttype/>'
|
||||
}
|
||||
root := doc.root
|
||||
if root.name.to_lower() != 'propfind' && !root.name.ends_with(':propfind') {
|
||||
return error('Invalid PROPFIND request: root element must be propfind')
|
||||
}
|
||||
|
||||
fn (p GetContentLength) xml() string {
|
||||
return '<D:getcontentlength>${p}</D:getcontentlength>'
|
||||
}
|
||||
mut typ := PropfindType.allprop
|
||||
mut props := []string{}
|
||||
|
||||
// Check for allprop, propname, or prop elements
|
||||
for child in root.children {
|
||||
if child is xml.XMLNode {
|
||||
node := child as xml.XMLNode
|
||||
|
||||
// Check for allprop
|
||||
if node.name == 'allprop' || node.name == 'D:allprop' {
|
||||
typ = .allprop
|
||||
break
|
||||
}
|
||||
|
||||
// Check for propname
|
||||
if node.name == 'propname' || node.name == 'D:propname' {
|
||||
typ = .propname
|
||||
break
|
||||
}
|
||||
|
||||
// Check for prop
|
||||
if node.name == 'prop' || node.name == 'D:prop' {
|
||||
typ = .prop
|
||||
|
||||
// Extract property names
|
||||
for prop_child in node.children {
|
||||
if prop_child is xml.XMLNode {
|
||||
prop_node := prop_child as xml.XMLNode
|
||||
props << prop_node.name
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if typ == .invalid {
|
||||
return error('Invalid PROPFIND request: missing prop, allprop, or propname element')
|
||||
}
|
||||
|
||||
fn (p GetContentLength) xml_name() string {
|
||||
return '<getcontentlength/>'
|
||||
}
|
||||
|
||||
fn (p ResourceType) xml() string {
|
||||
return if p {
|
||||
'<D:resourcetype><D:collection/></D:resourcetype>'
|
||||
} else {
|
||||
'<D:resourcetype/>'
|
||||
return PropfindRequest{
|
||||
typ: typ
|
||||
props: props
|
||||
depth: depth
|
||||
xml_content: data
|
||||
}
|
||||
}
|
||||
|
||||
fn (p ResourceType) xml_name() string {
|
||||
return '<resourcetype/>'
|
||||
// parse_depth parses the Depth header value
|
||||
pub fn parse_depth(depth_str string) Depth {
|
||||
if depth_str == 'infinity' { return .infinity}
|
||||
else if depth_str == '0' { return .zero}
|
||||
else if depth_str == '1' { return .one}
|
||||
else {
|
||||
log.warn('[WebDAV] Invalid Depth header value: ${depth_str}, defaulting to infinity')
|
||||
return .infinity
|
||||
}
|
||||
}
|
||||
|
||||
fn (p CreationDate) xml() string {
|
||||
return '<D:creationdate>${p}</D:creationdate>'
|
||||
// Response represents a WebDAV response for a resource
|
||||
pub struct PropfindResponse {
|
||||
pub:
|
||||
href string
|
||||
found_props []Property
|
||||
not_found_props []Property
|
||||
}
|
||||
|
||||
fn (p CreationDate) xml_name() string {
|
||||
return '<creationdate/>'
|
||||
fn (r PropfindResponse) xml() xml.XMLNodeContents {
|
||||
return xml.XMLNode{
|
||||
name: 'D:response'
|
||||
children: [
|
||||
xml.XMLNode{
|
||||
name: 'D:href'
|
||||
children: [xml.XMLNodeContents(r.href)]
|
||||
},
|
||||
xml.XMLNode{
|
||||
name: 'D:propstat'
|
||||
children: [
|
||||
xml.XMLNode{
|
||||
name: 'D:prop'
|
||||
children: r.found_props.map(it.xml())
|
||||
},
|
||||
xml.XMLNode{
|
||||
name: 'D:status'
|
||||
children: [xml.XMLNodeContents('HTTP/1.1 200 OK')]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
fn (p SupportedLock) xml() string {
|
||||
return '<D:supportedlock>
|
||||
<D:lockentry>
|
||||
<D:lockscope><D:exclusive/></D:lockscope>
|
||||
<D:locktype><D:write/></D:locktype>
|
||||
</D:lockentry>
|
||||
<D:lockentry>
|
||||
<D:lockscope><D:shared/></D:lockscope>
|
||||
<D:locktype><D:write/></D:locktype>
|
||||
</D:lockentry>
|
||||
</D:supportedlock>'
|
||||
// generate_propfind_response generates a PROPFIND response XML string from Response structs
|
||||
pub fn (r []PropfindResponse) xml() string {
|
||||
// Create multistatus root node
|
||||
multistatus_node := xml.XMLNode{
|
||||
name: 'D:multistatus'
|
||||
attributes: {
|
||||
'xmlns:D': 'DAV:'
|
||||
}
|
||||
children: r.map(it.xml())
|
||||
}
|
||||
|
||||
// Create a new XML document with the root node
|
||||
doc := xml.XMLDocument{
|
||||
version: '1.0'
|
||||
root: multistatus_node
|
||||
}
|
||||
|
||||
// Generate XML string
|
||||
doc.validate() or {panic('this should never happen ${err}')}
|
||||
return format_xml(doc.str())
|
||||
}
|
||||
|
||||
fn (p SupportedLock) xml_name() string {
|
||||
return '<supportedlock/>'
|
||||
fn get_file_content_type(path string) string {
|
||||
ext := path.all_after_last('.')
|
||||
content_type := if v := veb.mime_types[ext] {
|
||||
v
|
||||
} else {
|
||||
'text/plain; charset=utf-8'
|
||||
}
|
||||
|
||||
return content_type
|
||||
}
|
||||
|
||||
fn (p LockDiscovery) xml() string {
|
||||
return '<D:lockdiscovery>${p}</D:lockdiscovery>'
|
||||
// parse_xml takes an XML string and returns a cleaned version with whitespace removed between tags
|
||||
pub fn format_xml(xml_str string) string {
|
||||
mut result := ''
|
||||
mut i := 0
|
||||
mut in_tag := false
|
||||
mut content_start := 0
|
||||
|
||||
// Process the string character by character
|
||||
for i < xml_str.len {
|
||||
ch := xml_str[i]
|
||||
|
||||
// Start of a tag
|
||||
if ch == `<` {
|
||||
// If we were collecting content between tags, process it
|
||||
if !in_tag && i > content_start {
|
||||
// Get the content between tags and trim whitespace
|
||||
content := xml_str[content_start..i].trim_space()
|
||||
result += content
|
||||
}
|
||||
|
||||
in_tag = true
|
||||
result += '<'
|
||||
}
|
||||
// End of a tag
|
||||
else if ch == `>` {
|
||||
in_tag = false
|
||||
result += '>'
|
||||
content_start = i + 1
|
||||
}
|
||||
// Inside a tag - preserve all characters including whitespace
|
||||
else if in_tag {
|
||||
result += ch.ascii_str()
|
||||
}
|
||||
// Outside a tag - only add non-whitespace or handle whitespace in content
|
||||
else if !in_tag {
|
||||
// We'll collect and process this content when we reach the next tag
|
||||
// or at the end of the string
|
||||
}
|
||||
|
||||
i++
|
||||
}
|
||||
|
||||
// Handle any remaining content at the end of the string
|
||||
if !in_tag && content_start < xml_str.len {
|
||||
content := xml_str[content_start..].trim_space()
|
||||
result += content
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
fn (p LockDiscovery) xml_name() string {
|
||||
return '<lockdiscovery/>'
|
||||
}
|
||||
|
||||
fn format_iso8601(t time.Time) string {
|
||||
return '${t.year:04d}-${t.month:02d}-${t.day:02d}T${t.hour:02d}:${t.minute:02d}:${t.second:02d}Z'
|
||||
}
|
||||
529
lib/dav/webdav/server.v
Normal file
529
lib/dav/webdav/server.v
Normal file
@@ -0,0 +1,529 @@
|
||||
module webdav
|
||||
|
||||
import time
|
||||
import freeflowuniverse.herolib.core.texttools
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import encoding.xml
|
||||
import net.urllib
|
||||
import net
|
||||
import net.http.chunked
|
||||
import veb
|
||||
import log
|
||||
import strings
|
||||
|
||||
@[head]
|
||||
pub fn (server &Server) index(mut ctx Context) veb.Result {
|
||||
ctx.set_header(.content_length, '0')
|
||||
ctx.set_custom_header('DAV', '1,2') or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('Date', texttools.format_rfc1123(time.utc())) or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('Allow', 'OPTIONS, HEAD, GET, PROPFIND, DELETE, COPY, MOVE, PROPPATCH, LOCK, UNLOCK') or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('MS-Author-Via', 'DAV') or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('Server', 'WsgiDAV-compatible WebDAV Server') or { return ctx.server_error(err.msg()) }
|
||||
return ctx.ok('')
|
||||
}
|
||||
|
||||
@['/:path...'; options]
|
||||
pub fn (server &Server) options(mut ctx Context, path string) veb.Result {
|
||||
ctx.set_header(.content_length, '0')
|
||||
ctx.set_custom_header('DAV', '1,2') or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('Date', texttools.format_rfc1123(time.utc())) or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('Allow', 'OPTIONS, HEAD, GET, PROPFIND, DELETE, COPY, MOVE, PROPPATCH, LOCK, UNLOCK') or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('MS-Author-Via', 'DAV') or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('Server', 'WsgiDAV-compatible WebDAV Server') or { return ctx.server_error(err.msg()) }
|
||||
return ctx.ok('')
|
||||
}
|
||||
|
||||
@['/:path...'; lock]
|
||||
pub fn (mut server Server) lock(mut ctx Context, path string) veb.Result {
|
||||
resource := ctx.req.url
|
||||
|
||||
// Parse lock information from XML body instead of headers
|
||||
lock_info := parse_lock_xml(ctx.req.data) or {
|
||||
console.print_stderr('Failed to parse lock XML: ${err}')
|
||||
ctx.res.set_status(.bad_request)
|
||||
return ctx.text('Invalid lock request: ${err}')
|
||||
}
|
||||
|
||||
// Get depth and timeout from headers (these are still in headers)
|
||||
// Parse timeout header which can be in format "Second-600"
|
||||
timeout_str := ctx.get_custom_header('Timeout') or { 'Second-3600' }
|
||||
mut timeout := 3600 // Default 1 hour
|
||||
|
||||
if timeout_str.to_lower().starts_with('second-') {
|
||||
timeout_val := timeout_str.all_after('Second-')
|
||||
if timeout_val.int() > 0 {
|
||||
timeout = timeout_val.int()
|
||||
}
|
||||
}
|
||||
|
||||
new_lock := Lock {
|
||||
...lock_info,
|
||||
resource: ctx.req.url
|
||||
depth: ctx.get_custom_header('Depth') or { '0' }.int()
|
||||
timeout: timeout
|
||||
}
|
||||
|
||||
// Try to acquire the lock
|
||||
lock_result := server.lock_manager.lock(new_lock) or {
|
||||
// If we get here, the resource is locked by a different owner
|
||||
ctx.res.set_status(.locked)
|
||||
return ctx.text('Resource is already locked by a different owner.')
|
||||
}
|
||||
|
||||
// Set WsgiDAV-like headers
|
||||
ctx.res.set_status(.ok)
|
||||
ctx.set_custom_header('Lock-Token', 'opaquelocktoken:${lock_result.token}') or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('Date', texttools.format_rfc1123(time.utc())) or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('Server', 'veb WebDAV Server') or { return ctx.server_error(err.msg()) }
|
||||
|
||||
// Create a proper WebDAV lock response
|
||||
return ctx.send_response_to_client('application/xml', lock_result.xml())
|
||||
}
|
||||
|
||||
@['/:path...'; unlock]
|
||||
pub fn (mut server Server) unlock(mut ctx Context, path string) veb.Result {
|
||||
resource := ctx.req.url
|
||||
token_ := ctx.get_custom_header('Lock-Token') or { return ctx.server_error(err.msg()) }
|
||||
// Handle the opaquelocktoken: prefix that WsgiDAV uses
|
||||
token := token_.trim_string_left('<').trim_string_right('>')
|
||||
.trim_string_left('opaquelocktoken:')
|
||||
if token.len == 0 {
|
||||
console.print_stderr('Unlock failed: `Lock-Token` header required.')
|
||||
ctx.res.set_status(.bad_request)
|
||||
return ctx.text('Lock failed: `Lock-Token` header missing or invalid.')
|
||||
}
|
||||
|
||||
if server.lock_manager.unlock_with_token(resource, token) {
|
||||
// Add WsgiDAV-like headers
|
||||
ctx.set_custom_header('Date', texttools.format_rfc1123(time.utc())) or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('Server', 'veb WebDAV Server') or { return ctx.server_error(err.msg()) }
|
||||
ctx.res.set_status(.no_content)
|
||||
return ctx.text('')
|
||||
}
|
||||
|
||||
console.print_stderr('Resource is not locked or token mismatch.')
|
||||
ctx.res.set_status(.conflict)
|
||||
return ctx.text('Resource is not locked or token mismatch')
|
||||
}
|
||||
|
||||
@['/:path...'; get]
|
||||
pub fn (mut server Server) get_file(mut ctx Context, path string) veb.Result {
|
||||
log.info('[WebDAV] Getting file ${path}')
|
||||
file_data := server.vfs.file_read(path) or {
|
||||
log.error('[WebDAV] ${err.msg()}')
|
||||
return ctx.server_error(err.msg())
|
||||
}
|
||||
ext := path.all_after_last('.')
|
||||
content_type := veb.mime_types['.${ext}'] or { 'text/plain; charset=utf-8' }
|
||||
|
||||
// Add WsgiDAV-like headers
|
||||
ctx.set_header(.content_length, file_data.len.str())
|
||||
ctx.set_custom_header('Date', texttools.format_rfc1123(time.utc())) or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_header(.accept_ranges, 'bytes')
|
||||
ctx.set_custom_header('ETag', '"${path}-${time.now().unix()}"') or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('Last-Modified', texttools.format_rfc1123(time.utc())) or { return ctx.server_error(err.msg()) }
|
||||
|
||||
return ctx.send_response_to_client(content_type, file_data.bytestr())
|
||||
}
|
||||
|
||||
@['/:path...'; head]
|
||||
pub fn (mut server Server) exists(mut ctx Context, path string) veb.Result {
|
||||
// Check if the requested path exists in the virtual filesystem
|
||||
if !server.vfs.exists(path) {
|
||||
return ctx.not_found()
|
||||
}
|
||||
|
||||
// Add necessary WebDAV headers
|
||||
// ctx.set_header(.authorization, 'Basic') // Indicates Basic auth usage
|
||||
ctx.set_custom_header('dav', '1, 2') or {
|
||||
return ctx.server_error('Failed to set DAV header: ${err}')
|
||||
}
|
||||
ctx.set_header(.content_length, '0') // HEAD request, so no body
|
||||
// ctx.set_header(.content_type, 'application/xml') // XML is common for WebDAV metadata
|
||||
ctx.set_custom_header('Allow', 'OPTIONS, GET, HEAD, PROPFIND, PROPPATCH, MKCOL, PUT, DELETE, COPY, MOVE, LOCK, UNLOCK') or {
|
||||
return ctx.server_error('Failed to set Allow header: ${err}')
|
||||
}
|
||||
ctx.set_header(.accept_ranges, 'bytes') // Allows range-based file downloads
|
||||
ctx.set_custom_header('Cache-Control', 'no-cache, no-store, must-revalidate') or {
|
||||
return ctx.server_error('Failed to set Cache-Control header: ${err}')
|
||||
}
|
||||
ctx.set_custom_header('Last-Modified', texttools.format_rfc1123(time.utc())) or {
|
||||
return ctx.server_error('Failed to set Last-Modified header: ${err}')
|
||||
}
|
||||
ctx.res.set_version(.v1_1)
|
||||
|
||||
// Debugging output (can be removed in production)
|
||||
return ctx.ok('')
|
||||
}
|
||||
|
||||
@['/:path...'; delete]
|
||||
pub fn (mut server Server) delete(mut ctx Context, path string) veb.Result {
|
||||
server.vfs.delete(path) or {
|
||||
return ctx.server_error(err.msg())
|
||||
}
|
||||
|
||||
// Add WsgiDAV-like headers
|
||||
ctx.set_custom_header('Date', texttools.format_rfc1123(time.utc())) or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('Server', 'veb WebDAV Server') or { return ctx.server_error(err.msg()) }
|
||||
|
||||
server.vfs.print() or {panic(err)}
|
||||
// Return success response
|
||||
return ctx.no_content()
|
||||
}
|
||||
|
||||
@['/:path...'; copy]
|
||||
pub fn (mut server Server) copy(mut ctx Context, path string) veb.Result {
|
||||
if !server.vfs.exists(path) {
|
||||
return ctx.not_found()
|
||||
}
|
||||
|
||||
destination := ctx.req.header.get_custom('Destination') or {
|
||||
return ctx.server_error(err.msg())
|
||||
}
|
||||
destination_url := urllib.parse(destination) or {
|
||||
ctx.res.set_status(.bad_request)
|
||||
return ctx.text('Invalid Destination ${destination}: ${err}')
|
||||
}
|
||||
destination_path_str := destination_url.path
|
||||
|
||||
// Check if destination exists
|
||||
destination_exists := server.vfs.exists(destination_path_str)
|
||||
|
||||
server.vfs.copy(path, destination_path_str) or {
|
||||
log.error('[WebDAV] Failed to copy: ${err}')
|
||||
return ctx.server_error(err.msg())
|
||||
}
|
||||
|
||||
// Add WsgiDAV-like headers
|
||||
ctx.set_custom_header('Date', texttools.format_rfc1123(time.utc())) or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('Server', 'veb WebDAV Server') or { return ctx.server_error(err.msg()) }
|
||||
|
||||
// Return 201 Created if the destination was created, 204 No Content if it was overwritten
|
||||
if destination_exists {
|
||||
return ctx.no_content()
|
||||
} else {
|
||||
ctx.res.set_status(.created)
|
||||
return ctx.text('')
|
||||
}
|
||||
}
|
||||
|
||||
@['/:path...'; move]
|
||||
pub fn (mut server Server) move(mut ctx Context, path string) veb.Result {
|
||||
if !server.vfs.exists(path) {
|
||||
return ctx.not_found()
|
||||
}
|
||||
|
||||
destination := ctx.req.header.get_custom('Destination') or {
|
||||
return ctx.server_error(err.msg())
|
||||
}
|
||||
destination_url := urllib.parse(destination) or {
|
||||
ctx.res.set_status(.bad_request)
|
||||
return ctx.text('Invalid Destination ${destination}: ${err}')
|
||||
}
|
||||
destination_path_str := destination_url.path
|
||||
|
||||
// Check if destination exists
|
||||
destination_exists := server.vfs.exists(destination_path_str)
|
||||
|
||||
log.info('[WebDAV] ${@FN} from ${path} to ${destination_path_str}')
|
||||
server.vfs.move(path, destination_path_str) or {
|
||||
log.error('Failed to move: ${err}')
|
||||
return ctx.server_error(err.msg())
|
||||
}
|
||||
|
||||
// Add WsgiDAV-like headers
|
||||
ctx.set_custom_header('Date', texttools.format_rfc1123(time.utc())) or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('Server', 'veb WebDAV Server') or { return ctx.server_error(err.msg()) }
|
||||
|
||||
// Return 204 No Content for successful move operations (WsgiDAV behavior)
|
||||
ctx.res.set_status(.no_content)
|
||||
return ctx.text('')
|
||||
}
|
||||
|
||||
@['/:path...'; mkcol]
|
||||
pub fn (mut server Server) mkcol(mut ctx Context, path string) veb.Result {
|
||||
if server.vfs.exists(path) {
|
||||
ctx.res.set_status(.bad_request)
|
||||
return ctx.text('Another collection exists at ${path}')
|
||||
}
|
||||
|
||||
log.info('[WebDAV] Make Collection ${path}')
|
||||
server.vfs.dir_create(path) or {
|
||||
console.print_stderr('failed to create directory ${path}: ${err}')
|
||||
return ctx.server_error(err.msg())
|
||||
}
|
||||
|
||||
// Add WsgiDAV-like headers
|
||||
ctx.set_header(.content_type, 'text/html; charset=utf-8')
|
||||
ctx.set_header(.content_length, '0')
|
||||
ctx.set_custom_header('Date', texttools.format_rfc1123(time.utc())) or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('Server', 'veb WebDAV Server') or { return ctx.server_error(err.msg()) }
|
||||
|
||||
ctx.res.set_status(.created)
|
||||
return ctx.text('')
|
||||
}
|
||||
|
||||
@['/:path...'; put]
|
||||
fn (mut server Server) create_or_update(mut ctx Context, path string) veb.Result {
|
||||
// Check if parent directory exists (RFC 4918 9.7.1: A PUT that would result in the creation of a resource
|
||||
// without an appropriately scoped parent collection MUST fail with a 409 Conflict)
|
||||
parent_path := path.all_before_last('/')
|
||||
if parent_path != '' && !server.vfs.exists(parent_path) {
|
||||
log.error('[WebDAV] Parent directory ${parent_path} does not exist for ${path}')
|
||||
ctx.res.set_status(.conflict)
|
||||
return ctx.text('HTTP 409: Conflict - Parent collection does not exist')
|
||||
}
|
||||
|
||||
is_update := server.vfs.exists(path)
|
||||
if is_update {
|
||||
log.debug('[WebDAV] ${path} exists, updating')
|
||||
if fs_entry := server.vfs.get(path) {
|
||||
log.debug('[WebDAV] Got FSEntry ${fs_entry}')
|
||||
// RFC 4918 9.7.2: PUT for Collections - A PUT request to an existing collection MAY be treated as an error
|
||||
if fs_entry.is_dir() {
|
||||
log.error('[WebDAV] Cannot PUT to a directory: ${path}')
|
||||
ctx.res.set_status(.method_not_allowed)
|
||||
ctx.set_header(.allow, 'OPTIONS, PROPFIND, MKCOL, GET, HEAD, DELETE')
|
||||
return ctx.text('HTTP 405: Method Not Allowed - Cannot PUT to a collection')
|
||||
}
|
||||
} else {
|
||||
log.error('[WebDAV] Failed to get FS Entry for ${path}\n${err.msg()}')
|
||||
return ctx.server_error('Failed to get FS Entry ${path}: ${err.msg()}')
|
||||
}
|
||||
} else {
|
||||
log.debug('[WebDAV] ${path} does not exist, creating')
|
||||
server.vfs.file_create(path) or {
|
||||
log.error('[WebDAV] Failed to create file ${path}: ${err.msg()}')
|
||||
return ctx.server_error('Failed to create file: ${err.msg()}')
|
||||
}
|
||||
}
|
||||
|
||||
// Process Content-Type if provided
|
||||
content_type := ctx.req.header.get(.content_type) or { '' }
|
||||
if content_type != '' {
|
||||
log.debug('[WebDAV] Content-Type provided: ${content_type}')
|
||||
}
|
||||
|
||||
// Check if we have a Content-Length header
|
||||
content_length_str := ctx.req.header.get(.content_length) or { '0' }
|
||||
content_length := content_length_str.int()
|
||||
log.debug('[WebDAV] Content-Length: ${content_length}')
|
||||
|
||||
// Check for chunked transfer encoding
|
||||
transfer_encoding := ctx.req.header.get_custom('Transfer-Encoding') or { '' }
|
||||
is_chunked := transfer_encoding.to_lower().contains('chunked')
|
||||
log.debug('[WebDAV] Transfer-Encoding: ${transfer_encoding}, is_chunked: ${is_chunked}')
|
||||
|
||||
// Handle the file upload based on the request type
|
||||
if is_chunked || content_length > 0 {
|
||||
// Take over the connection to handle streaming data
|
||||
ctx.takeover_conn()
|
||||
|
||||
// Create a buffer for reading chunks
|
||||
mut buffer := []u8{len: 8200} // 8KB buffer for reading chunks
|
||||
mut total_bytes := 0
|
||||
mut all_data := []u8{}
|
||||
|
||||
// Process any data that's already been read
|
||||
if ctx.req.data.len > 0 {
|
||||
all_data << ctx.req.data.bytes()
|
||||
total_bytes += ctx.req.data.len
|
||||
log.debug('[WebDAV] Added ${ctx.req.data.len} initial bytes from request data')
|
||||
}
|
||||
|
||||
// Read data in chunks from the connection
|
||||
if is_chunked {
|
||||
// For chunked encoding, we need to read until we get a zero-length chunk
|
||||
log.info('[WebDAV] Reading chunked data for ${path}')
|
||||
|
||||
// Write initial data to the file
|
||||
if all_data.len > 0 {
|
||||
server.vfs.file_write(path, all_data) or {
|
||||
log.error('[WebDAV] Failed to write initial data to ${path}: ${err.msg()}')
|
||||
// Send error response
|
||||
ctx.res.set_status(.internal_server_error)
|
||||
ctx.res.header.set(.content_type, 'text/plain')
|
||||
ctx.res.header.set(.content_length, '${err.msg().len}')
|
||||
ctx.conn.write(ctx.res.bytestr().bytes()) or {}
|
||||
ctx.conn.write(err.msg().bytes()) or {}
|
||||
ctx.conn.close() or {}
|
||||
return veb.no_result()
|
||||
}
|
||||
}
|
||||
|
||||
// Continue reading chunks from the connection
|
||||
for {
|
||||
// Read a chunk from the connection
|
||||
n := ctx.conn.read(mut buffer) or {
|
||||
if err.code() == net.err_timed_out_code {
|
||||
log.info('[WebDAV] Connection timed out, finished reading')
|
||||
break
|
||||
}
|
||||
log.error('[WebDAV] Error reading from connection: ${err}')
|
||||
break
|
||||
}
|
||||
|
||||
if n <= 0 {
|
||||
log.info('[WebDAV] Reached end of data stream')
|
||||
break
|
||||
}
|
||||
|
||||
|
||||
// Process the chunk using the chunked module
|
||||
chunk := buffer[..n].clone()
|
||||
chunk_str := chunk.bytestr()
|
||||
|
||||
// Try to decode the chunk if it looks like a valid chunked format
|
||||
if chunk_str.contains('\r\n') {
|
||||
log.debug('[WebDAV] Attempting to decode chunked data')
|
||||
decoded := chunked.decode(chunk_str) or {
|
||||
log.error('[WebDAV] Failed to decode chunked data: ${err}')
|
||||
// If decoding fails, just use the raw chunk
|
||||
server.vfs.file_concatenate(path, chunk) or {
|
||||
log.error('[WebDAV] Failed to append chunk to ${path}: ${err.msg()}')
|
||||
// Send error response
|
||||
ctx.res.set_status(.internal_server_error)
|
||||
ctx.res.header.set(.content_type, 'text/plain')
|
||||
ctx.res.header.set(.content_length, '${err.msg().len}')
|
||||
ctx.conn.write(ctx.res.bytestr().bytes()) or {}
|
||||
ctx.conn.write(err.msg().bytes()) or {}
|
||||
ctx.conn.close() or {}
|
||||
return veb.no_result()
|
||||
}
|
||||
}
|
||||
|
||||
// If decoding succeeds, write the decoded data
|
||||
if decoded.len > 0 {
|
||||
log.debug('[WebDAV] Successfully decoded chunked data: ${decoded.len} bytes')
|
||||
server.vfs.file_concatenate(path, decoded.bytes()) or {
|
||||
log.error('[WebDAV] Failed to append decoded chunk to ${path}: ${err.msg()}')
|
||||
// Send error response
|
||||
ctx.res.set_status(.internal_server_error)
|
||||
ctx.res.header.set(.content_type, 'text/plain')
|
||||
ctx.res.header.set(.content_length, '${err.msg().len}')
|
||||
ctx.conn.write(ctx.res.bytestr().bytes()) or {}
|
||||
ctx.conn.write(err.msg().bytes()) or {}
|
||||
ctx.conn.close() or {}
|
||||
return veb.no_result()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If it doesn't look like chunked data, use the raw chunk
|
||||
server.vfs.file_concatenate(path, chunk) or {
|
||||
log.error('[WebDAV] Failed to append chunk to ${path}: ${err.msg()}')
|
||||
// Send error response
|
||||
ctx.res.set_status(.internal_server_error)
|
||||
ctx.res.header.set(.content_type, 'text/plain')
|
||||
ctx.res.header.set(.content_length, '${err.msg().len}')
|
||||
ctx.conn.write(ctx.res.bytestr().bytes()) or {}
|
||||
ctx.conn.write(err.msg().bytes()) or {}
|
||||
ctx.conn.close() or {}
|
||||
return veb.no_result()
|
||||
}
|
||||
}
|
||||
|
||||
total_bytes += n
|
||||
log.debug('[WebDAV] Read ${n} bytes, total: ${total_bytes}')
|
||||
}
|
||||
} else if content_length > 0 {
|
||||
// For Content-Length uploads, read exactly that many bytes
|
||||
log.info('[WebDAV] Reading ${content_length} bytes for ${path}')
|
||||
mut remaining := content_length - all_data.len
|
||||
|
||||
// Write initial data to the file
|
||||
if all_data.len > 0 {
|
||||
server.vfs.file_write(path, all_data) or {
|
||||
log.error('[WebDAV] Failed to write initial data to ${path}: ${err.msg()}')
|
||||
// Send error response
|
||||
ctx.res.set_status(.internal_server_error)
|
||||
ctx.res.header.set(.content_type, 'text/plain')
|
||||
ctx.res.header.set(.content_length, '${err.msg().len}')
|
||||
ctx.conn.write(ctx.res.bytestr().bytes()) or {}
|
||||
ctx.conn.write(err.msg().bytes()) or {}
|
||||
ctx.conn.close() or {}
|
||||
return veb.no_result()
|
||||
}
|
||||
}
|
||||
|
||||
// Continue reading until we've read all the content
|
||||
for remaining > 0 {
|
||||
// Adjust buffer size for the last chunk if needed
|
||||
read_size := if remaining < buffer.len { remaining } else { buffer.len }
|
||||
|
||||
// Read a chunk from the connection
|
||||
n := ctx.conn.read(mut buffer[..read_size]) or {
|
||||
if err.code() == net.err_timed_out_code {
|
||||
log.info('[WebDAV] Connection timed out, finished reading')
|
||||
break
|
||||
}
|
||||
log.error('[WebDAV] Error reading from connection: ${err}')
|
||||
break
|
||||
}
|
||||
|
||||
if n <= 0 {
|
||||
log.info('[WebDAV] Reached end of data stream')
|
||||
break
|
||||
}
|
||||
|
||||
// Append the chunk to our file
|
||||
chunk := buffer[..n].clone()
|
||||
server.vfs.file_concatenate(path, chunk) or {
|
||||
log.error('[WebDAV] Failed to append chunk to ${path}: ${err.msg()}')
|
||||
// Send error response
|
||||
ctx.res.set_status(.internal_server_error)
|
||||
ctx.res.header.set(.content_type, 'text/plain')
|
||||
ctx.res.header.set(.content_length, '${err.msg().len}')
|
||||
ctx.conn.write(ctx.res.bytestr().bytes()) or {}
|
||||
ctx.conn.write(err.msg().bytes()) or {}
|
||||
return veb.no_result()
|
||||
}
|
||||
|
||||
total_bytes += n
|
||||
remaining -= n
|
||||
log.debug('[WebDAV] Read ${n} bytes, remaining: ${remaining}')
|
||||
}
|
||||
}
|
||||
|
||||
log.info('[WebDAV] Successfully wrote ${total_bytes} bytes to ${path}')
|
||||
|
||||
// Send success response
|
||||
ctx.res.header.set(.content_type, 'text/html; charset=utf-8')
|
||||
ctx.res.header.set(.content_length, '0')
|
||||
ctx.res.header.set_custom('Date', texttools.format_rfc1123(time.utc())) or {}
|
||||
ctx.res.header.set_custom('Server', 'veb WebDAV Server') or {}
|
||||
|
||||
if is_update {
|
||||
ctx.res.set_status(.no_content) // 204 No Content
|
||||
} else {
|
||||
ctx.res.set_status(.created) // 201 Created
|
||||
}
|
||||
|
||||
ctx.conn.write(ctx.res.bytestr().bytes()) or {
|
||||
log.error('[WebDAV] Failed to write response: ${err}')
|
||||
}
|
||||
ctx.conn.close() or {}
|
||||
|
||||
return veb.no_result()
|
||||
} else {
|
||||
// Empty PUT is still valid (creates empty file or replaces with empty content)
|
||||
server.vfs.file_write(path, []u8{}) or {
|
||||
log.error('[WebDAV] Failed to write empty data to ${path}: ${err.msg()}')
|
||||
return ctx.server_error('Failed to write file: ${err.msg()}')
|
||||
}
|
||||
log.info('[WebDAV] Created empty file at ${path}')
|
||||
|
||||
// Add WsgiDAV-like headers
|
||||
ctx.set_header(.content_type, 'text/html; charset=utf-8')
|
||||
ctx.set_header(.content_length, '0')
|
||||
ctx.set_custom_header('Date', texttools.format_rfc1123(time.utc())) or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('Server', 'veb WebDAV Server') or { return ctx.server_error(err.msg()) }
|
||||
|
||||
// Set appropriate status code based on whether this was a create or update
|
||||
if is_update {
|
||||
return ctx.no_content()
|
||||
} else {
|
||||
ctx.res.set_status(.created)
|
||||
return ctx.text('')
|
||||
}
|
||||
}
|
||||
}
|
||||
145
lib/dav/webdav/server_propfind.v
Normal file
145
lib/dav/webdav/server_propfind.v
Normal file
@@ -0,0 +1,145 @@
|
||||
module webdav
|
||||
|
||||
import encoding.xml
|
||||
import log
|
||||
import freeflowuniverse.herolib.core.pathlib
|
||||
import freeflowuniverse.herolib.vfs
|
||||
import freeflowuniverse.herolib.vfs.vfs_db
|
||||
import os
|
||||
import time
|
||||
import freeflowuniverse.herolib.core.texttools
|
||||
import net.http
|
||||
import veb
|
||||
|
||||
@['/:path...'; propfind]
|
||||
fn (mut server Server) propfind(mut ctx Context, path string) veb.Result {
|
||||
// Parse PROPFIND request
|
||||
propfind_req := parse_propfind_xml(ctx.req) or {
|
||||
return ctx.error(WebDAVError{
|
||||
status: .bad_request
|
||||
message: 'Failed to parse PROPFIND XML: ${err}'
|
||||
tag: 'propfind-parse-error'
|
||||
})
|
||||
}
|
||||
|
||||
log.debug('[WebDAV] Propfind Request: ${propfind_req.typ}')
|
||||
|
||||
// Check if resource is locked
|
||||
if server.lock_manager.is_locked(ctx.req.url) {
|
||||
// If the resource is locked, we should still return properties
|
||||
// but we might need to indicate the lock status in the response
|
||||
// This is handled in the property generation
|
||||
log.info('[WebDAV] Resource is locked: ${ctx.req.url}')
|
||||
}
|
||||
|
||||
entry := server.vfs.get(path) or {
|
||||
return ctx.error(
|
||||
status: .not_found
|
||||
message: 'Path ${path} does not exist'
|
||||
tag: 'resource-must-be-null'
|
||||
)
|
||||
}
|
||||
|
||||
responses := server.get_responses(entry, propfind_req, path) or {
|
||||
return ctx.server_error('Failed to get entry properties ${err}')
|
||||
}
|
||||
|
||||
|
||||
// Add WsgiDAV-like headers
|
||||
ctx.set_header(.content_type, 'application/xml; charset=utf-8')
|
||||
ctx.set_custom_header('Date', texttools.format_rfc1123(time.utc())) or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('Server', 'WsgiDAV-compatible WebDAV Server') or { return ctx.server_error(err.msg()) }
|
||||
|
||||
// Create multistatus response using the responses
|
||||
ctx.res.set_status(.multi_status)
|
||||
return ctx.send_response_to_client('application/xml', responses.xml())
|
||||
}
|
||||
|
||||
// returns the properties of a filesystem entry
|
||||
fn (mut server Server) get_entry_property(entry &vfs.FSEntry, name string) !Property {
|
||||
return match name {
|
||||
'creationdate' { Property(CreationDate(format_iso8601(entry.get_metadata().created_time()))) }
|
||||
'getetag' { Property(GetETag(entry.get_metadata().id.str())) }
|
||||
'resourcetype' { Property(ResourceType(entry.is_dir())) }
|
||||
'getlastmodified' { Property(GetLastModified(texttools.format_rfc1123(entry.get_metadata().modified_time()))) }
|
||||
'getcontentlength' { Property(GetContentLength(entry.get_metadata().size.str())) }
|
||||
'quota-available-bytes' { Property(QuotaAvailableBytes(16184098816)) }
|
||||
'quota-used-bytes' { Property(QuotaUsedBytes(16184098816)) }
|
||||
'quotaused' { Property(QuotaUsed(16184098816)) }
|
||||
'quota' { Property(Quota(16184098816)) }
|
||||
else { panic('implement ${name}')}
|
||||
}
|
||||
}
|
||||
|
||||
// get_responses returns all properties for the given path and depth
|
||||
fn (mut server Server) get_responses(entry vfs.FSEntry, req PropfindRequest, path string) ![]PropfindResponse {
|
||||
mut responses := []PropfindResponse{}
|
||||
|
||||
if req.typ == .prop {
|
||||
mut properties := []Property{}
|
||||
mut erronous_properties := map[int][]Property{} // properties that have errors indexed by error code
|
||||
for name in req.props {
|
||||
if property := server.get_entry_property(entry, name.trim_string_left('D:')) {
|
||||
properties << property
|
||||
} else {
|
||||
// TODO: implement error reporting
|
||||
}
|
||||
}
|
||||
// main entry response
|
||||
responses << PropfindResponse {
|
||||
href: if entry.is_dir() {'${path.trim_string_right("/")}/'} else {path}
|
||||
// not_found: entry.get_unfound_properties(req)
|
||||
found_props: properties
|
||||
}
|
||||
} else {
|
||||
responses << PropfindResponse {
|
||||
href: if entry.is_dir() {'${path.trim_string_right("/")}/'} else {path}
|
||||
// not_found: entry.get_unfound_properties(req)
|
||||
found_props: server.get_properties(entry)
|
||||
}
|
||||
}
|
||||
|
||||
if !entry.is_dir() || req.depth == .zero {
|
||||
return responses
|
||||
}
|
||||
|
||||
entries := server.vfs.dir_list(path) or {
|
||||
log.error('Failed to list directory for ${path} ${err}')
|
||||
return responses }
|
||||
for e in entries {
|
||||
responses << server.get_responses(e, PropfindRequest {
|
||||
...req,
|
||||
depth: if req.depth == .one { .zero } else { .infinity }
|
||||
}, '${path.trim_string_right("/")}/${e.get_metadata().name}')!
|
||||
}
|
||||
return responses
|
||||
}
|
||||
|
||||
// returns the properties of a filesystem entry
|
||||
fn (mut server Server) get_properties(entry &vfs.FSEntry) []Property {
|
||||
mut props := []Property{}
|
||||
|
||||
metadata := entry.get_metadata()
|
||||
// Display name
|
||||
props << DisplayName(metadata.name)
|
||||
props << GetLastModified(texttools.format_rfc1123(metadata.modified_time()))
|
||||
|
||||
if entry.is_dir() {
|
||||
props << QuotaAvailableBytes(16184098816)
|
||||
props << QuotaUsedBytes(16184098816)
|
||||
} else {
|
||||
props << GetContentType(if entry.is_dir() {'httpd/unix-directory'} else {get_file_content_type(entry.get_metadata().name)})
|
||||
}
|
||||
props << ResourceType(entry.is_dir())
|
||||
// props << SupportedLock('')
|
||||
// props << LockDiscovery('')
|
||||
|
||||
// Content length (only for files)
|
||||
if !entry.is_dir() {
|
||||
props << GetContentLength(metadata.size.str())
|
||||
}
|
||||
|
||||
// Creation date
|
||||
props << CreationDate(format_iso8601(metadata.created_time()))
|
||||
return props
|
||||
}
|
||||
@@ -1,214 +1,554 @@
|
||||
module webdav
|
||||
|
||||
import net.http
|
||||
import freeflowuniverse.herolib.core.pathlib
|
||||
import freeflowuniverse.herolib.vfs.vfs_db
|
||||
import freeflowuniverse.herolib.data.ourdb
|
||||
import encoding.xml
|
||||
import os
|
||||
import time
|
||||
import encoding.base64
|
||||
import rand
|
||||
import veb
|
||||
import net.http
|
||||
import log
|
||||
|
||||
fn test_run() {
|
||||
mut app := new_app(
|
||||
user_db: {
|
||||
'mario': '123'
|
||||
}
|
||||
)!
|
||||
spawn app.run()
|
||||
fn testsuite_begin() {
|
||||
log.set_level(.debug)
|
||||
}
|
||||
|
||||
// fn test_get() {
|
||||
// root_dir := '/tmp/webdav'
|
||||
// mut app := new_app(
|
||||
// server_port: rand.int_in_range(8000, 9000)!
|
||||
// root_dir: root_dir
|
||||
// user_db: {
|
||||
// 'mario': '123'
|
||||
// }
|
||||
// )!
|
||||
// app.run(background: true)
|
||||
// time.sleep(1 * time.second)
|
||||
// file_name := 'newfile.txt'
|
||||
// mut p := pathlib.get_file(path: '${root_dir}/${file_name}', create: true)!
|
||||
// p.write('my new file')!
|
||||
const testdata_path := os.join_path(os.dir(@FILE), 'testdata')
|
||||
const database_path := os.join_path(testdata_path, 'database')
|
||||
|
||||
// mut req := http.new_request(.get, 'http://localhost:${app.server_port}/${file_name}',
|
||||
// '')
|
||||
// signature := base64.encode_str('mario:123')
|
||||
// req.add_custom_header('Authorization', 'Basic ${signature}')!
|
||||
// Helper function to create a test server and DatabaseVFS
|
||||
fn setup_test_server(function string) !(&vfs_db.DatabaseVFS, &Server) {
|
||||
if !os.exists(testdata_path) {
|
||||
os.mkdir_all(testdata_path) or { return error('Failed to create testdata directory: ${err}') }
|
||||
}
|
||||
if !os.exists(database_path) {
|
||||
os.mkdir_all(database_path) or { return error('Failed to create database directory: ${err}') }
|
||||
}
|
||||
|
||||
mut metadata_db := ourdb.new(path: os.join_path(database_path, '${function}/metadata'))!
|
||||
mut data_db := ourdb.new(path: os.join_path(database_path, '${function}/data'))!
|
||||
mut vfs := vfs_db.new(mut metadata_db, mut data_db)!
|
||||
|
||||
// Create a test server
|
||||
mut server := new_server(vfs: vfs, user_db: {
|
||||
'admin': '123'
|
||||
})!
|
||||
|
||||
return vfs, server
|
||||
}
|
||||
|
||||
// response := req.do()!
|
||||
// assert response.body == 'my new file'
|
||||
// }
|
||||
// Helper function to create a test file in the DatabaseVFS
|
||||
fn create_test_file(mut vfs vfs_db.DatabaseVFS, path string, content string) ! {
|
||||
vfs.file_write(path, content.bytes())!
|
||||
}
|
||||
|
||||
// fn test_put() {
|
||||
// root_dir := '/tmp/webdav'
|
||||
// mut app := new_app(
|
||||
// server_port: rand.int_in_range(8000, 9000)!
|
||||
// root_dir: root_dir
|
||||
// user_db: {
|
||||
// 'mario': '123'
|
||||
// }
|
||||
// )!
|
||||
// app.run(background: true)
|
||||
// time.sleep(1 * time.second)
|
||||
// file_name := 'newfile_put.txt'
|
||||
// Helper function to create a test directory in the DatabaseVFS
|
||||
fn create_test_directory(mut vfs vfs_db.DatabaseVFS, path string) ! {
|
||||
vfs.dir_create(path)!
|
||||
}
|
||||
|
||||
// mut data := 'my new put file'
|
||||
// mut req := http.new_request(.put, 'http://localhost:${app.server_port}/${file_name}',
|
||||
// data)
|
||||
// signature := base64.encode_str('mario:123')
|
||||
// req.add_custom_header('Authorization', 'Basic ${signature}')!
|
||||
// mut response := req.do()!
|
||||
fn test_server_run() ! {
|
||||
_, mut server := setup_test_server(@FILE)!
|
||||
spawn server.run()
|
||||
time.sleep(100 * time.millisecond)
|
||||
}
|
||||
|
||||
// mut p := pathlib.get_file(path: '${root_dir}/${file_name}')!
|
||||
fn test_server_index() ! {
|
||||
_, mut server := setup_test_server(@FILE)!
|
||||
|
||||
mut ctx := Context{
|
||||
req: http.Request{
|
||||
method: http.Method.head
|
||||
url: '/'
|
||||
}
|
||||
res: http.Response{}
|
||||
}
|
||||
|
||||
server.index(mut ctx)
|
||||
|
||||
assert ctx.res.status() == http.Status.ok
|
||||
assert ctx.res.header.get_custom('DAV')! == '1,2'
|
||||
assert ctx.res.header.get(.allow)! == 'OPTIONS, PROPFIND, MKCOL, GET, HEAD, POST, PUT, DELETE, COPY, MOVE'
|
||||
assert ctx.res.header.get_custom('MS-Author-Via')! == 'DAV'
|
||||
assert ctx.res.header.get(.access_control_allow_origin)! == '*'
|
||||
assert ctx.res.header.get(.access_control_allow_methods)! == 'OPTIONS, PROPFIND, MKCOL, GET, HEAD, POST, PUT, DELETE, COPY, MOVE'
|
||||
assert ctx.res.header.get(.access_control_allow_headers)! == 'Authorization, Content-Type'
|
||||
assert ctx.res.header.get(.content_length)! == '0'
|
||||
}
|
||||
|
||||
// assert p.exists()
|
||||
// assert p.read()! == data
|
||||
fn test_server_options() ! {
|
||||
_, mut server := setup_test_server(@FILE)!
|
||||
|
||||
mut ctx := Context{
|
||||
req: http.Request{
|
||||
method: http.Method.options
|
||||
url: '/test_path'
|
||||
}
|
||||
res: http.Response{}
|
||||
}
|
||||
|
||||
server.options(mut ctx, 'test_path')
|
||||
|
||||
assert ctx.res.status() == http.Status.ok
|
||||
assert ctx.res.header.get_custom('DAV')! == '1,2'
|
||||
assert ctx.res.header.get(.allow)! == 'OPTIONS, PROPFIND, MKCOL, GET, HEAD, POST, PUT, DELETE, COPY, MOVE'
|
||||
assert ctx.res.header.get_custom('MS-Author-Via')! == 'DAV'
|
||||
assert ctx.res.header.get(.access_control_allow_origin)! == '*'
|
||||
assert ctx.res.header.get(.access_control_allow_methods)! == 'OPTIONS, PROPFIND, MKCOL, GET, HEAD, POST, PUT, DELETE, COPY, MOVE'
|
||||
assert ctx.res.header.get(.access_control_allow_headers)! == 'Authorization, Content-Type'
|
||||
assert ctx.res.header.get(.content_length)! == '0'
|
||||
}
|
||||
|
||||
// data = 'updated data'
|
||||
// req = http.new_request(.put, 'http://localhost:${app.server_port}/${file_name}', data)
|
||||
// req.add_custom_header('Authorization', 'Basic ${signature}')!
|
||||
// response = req.do()!
|
||||
fn test_server_lock() ! {
|
||||
_, mut server := setup_test_server(@FILE)!
|
||||
|
||||
// Create a test file to lock
|
||||
test_path := 'test_lock_file.txt'
|
||||
|
||||
// Prepare lock XML request body
|
||||
lock_xml := '<?xml version="1.0" encoding="utf-8"?>
|
||||
<D:lockinfo xmlns:D="DAV:">
|
||||
<D:lockscope><D:exclusive/></D:lockscope>
|
||||
<D:locktype><D:write/></D:locktype>
|
||||
<D:owner>
|
||||
<D:href>test-user</D:href>
|
||||
</D:owner>
|
||||
</D:lockinfo>'
|
||||
|
||||
mut ctx := Context{
|
||||
req: http.Request{
|
||||
method: http.Method.lock
|
||||
url: '/${test_path}'
|
||||
data: lock_xml
|
||||
header: http.Header{}
|
||||
}
|
||||
res: http.Response{}
|
||||
}
|
||||
|
||||
// Set headers
|
||||
ctx.req.header.add_custom('Depth', '0')!
|
||||
ctx.req.header.add_custom('Timeout', 'Second-3600')!
|
||||
|
||||
server.lock(mut ctx, test_path)
|
||||
|
||||
// Check response
|
||||
assert ctx.res.status() == http.Status.ok
|
||||
assert ctx.res.header.get_custom('Lock-Token')! != ''
|
||||
assert ctx.res.header.get(.content_type)! == 'application/xml'
|
||||
|
||||
// Verify response contains proper lock XML
|
||||
assert ctx.res.body.len > 0
|
||||
assert ctx.res.body.contains('<D:lockdiscovery')
|
||||
assert ctx.res.body.contains('<D:activelock>')
|
||||
}
|
||||
|
||||
// p = pathlib.get_file(path: '${root_dir}/${file_name}')!
|
||||
fn test_server_unlock() ! {
|
||||
_, mut server := setup_test_server(@FILE)!
|
||||
|
||||
// Create a test file
|
||||
test_path := 'test_unlock_file.txt'
|
||||
|
||||
// First lock the resource
|
||||
lock_xml := '<?xml version="1.0" encoding="utf-8"?>
|
||||
<D:lockinfo xmlns:D="DAV:">
|
||||
<D:lockscope><D:exclusive/></D:lockscope>
|
||||
<D:locktype><D:write/></D:locktype>
|
||||
<D:owner>
|
||||
<D:href>test-user</D:href>
|
||||
</D:owner>
|
||||
</D:lockinfo>'
|
||||
|
||||
mut lock_ctx := Context{
|
||||
req: http.Request{
|
||||
method: http.Method.lock
|
||||
url: '/${test_path}'
|
||||
data: lock_xml
|
||||
header: http.Header{}
|
||||
}
|
||||
res: http.Response{}
|
||||
}
|
||||
|
||||
lock_ctx.req.header.add_custom('Depth', '0')!
|
||||
lock_ctx.req.header.add_custom('Timeout', 'Second-3600')!
|
||||
|
||||
server.lock(mut lock_ctx, test_path)
|
||||
|
||||
// Extract lock token from response
|
||||
lock_token := lock_ctx.res.header.get_custom('Lock-Token')!
|
||||
|
||||
// Now unlock the resource
|
||||
mut unlock_ctx := Context{
|
||||
req: http.Request{
|
||||
method: http.Method.unlock
|
||||
url: '/${test_path}'
|
||||
header: http.Header{}
|
||||
}
|
||||
res: http.Response{}
|
||||
}
|
||||
|
||||
unlock_ctx.req.header.add_custom('Lock-Token', lock_token)!
|
||||
|
||||
server.unlock(mut unlock_ctx, test_path)
|
||||
|
||||
// Check response
|
||||
assert unlock_ctx.res.status() == http.Status.no_content
|
||||
}
|
||||
|
||||
// assert p.exists()
|
||||
// assert p.read()! == data
|
||||
// }
|
||||
fn test_server_get_file() ! {
|
||||
mut vfs, mut server := setup_test_server(@FN)!
|
||||
|
||||
// Create a test file
|
||||
test_path := 'test_get_file.txt'
|
||||
test_content := 'This is a test file content'
|
||||
create_test_file(mut vfs, test_path, test_content)!
|
||||
|
||||
mut ctx := Context{
|
||||
req: http.Request{
|
||||
method: http.Method.get
|
||||
url: '/${test_path}'
|
||||
}
|
||||
res: http.Response{}
|
||||
}
|
||||
|
||||
server.get_file(mut ctx, test_path)
|
||||
|
||||
// Check response
|
||||
assert ctx.res.status() == http.Status.ok
|
||||
assert ctx.res.header.get(.content_type)! == 'text/plain'
|
||||
assert ctx.res.body == test_content
|
||||
}
|
||||
|
||||
// fn test_copy() {
|
||||
// root_dir := '/tmp/webdav'
|
||||
// mut app := new_app(
|
||||
// server_port: rand.int_in_range(8000, 9000)!
|
||||
// root_dir: root_dir
|
||||
// user_db: {
|
||||
// 'mario': '123'
|
||||
// }
|
||||
// )!
|
||||
// app.run(background: true)
|
||||
fn test_server_exists() ! {
|
||||
mut vfs, mut server := setup_test_server(@FILE)!
|
||||
|
||||
// Create a test file
|
||||
test_path := 'test_exists_file.txt'
|
||||
test_content := 'This is a test file content'
|
||||
create_test_file(mut vfs, test_path, test_content)!
|
||||
|
||||
// Test for existing file
|
||||
mut ctx := Context{
|
||||
req: http.Request{
|
||||
method: http.Method.head
|
||||
url: '/${test_path}'
|
||||
}
|
||||
res: http.Response{}
|
||||
}
|
||||
|
||||
server.exists(mut ctx, test_path)
|
||||
|
||||
// Check response for existing file
|
||||
assert ctx.res.status() == http.Status.ok
|
||||
assert ctx.res.header.get_custom('dav')! == '1, 2'
|
||||
assert ctx.res.header.get(.content_length)! == '0'
|
||||
assert ctx.res.header.get_custom('Allow')!.contains('OPTIONS')
|
||||
assert ctx.res.header.get(.accept_ranges)! == 'bytes'
|
||||
|
||||
// Test for non-existing file
|
||||
mut ctx2 := Context{
|
||||
req: http.Request{
|
||||
method: http.Method.head
|
||||
url: '/nonexistent_file.txt'
|
||||
}
|
||||
res: http.Response{}
|
||||
}
|
||||
|
||||
server.exists(mut ctx2, 'nonexistent_file.txt')
|
||||
|
||||
// Check response for non-existing file
|
||||
assert ctx2.res.status() == http.Status.not_found
|
||||
}
|
||||
|
||||
// time.sleep(1 * time.second)
|
||||
// file_name1, file_name2 := 'newfile_copy1.txt', 'newfile_copy2.txt'
|
||||
// mut p1 := pathlib.get_file(path: '${root_dir}/${file_name1}', create: true)!
|
||||
// data := 'file copy data'
|
||||
// p1.write(data)!
|
||||
fn test_server_delete() ! {
|
||||
mut vfs, mut server := setup_test_server(@FILE)!
|
||||
|
||||
// Create a test file
|
||||
test_path := 'test_delete_file.txt'
|
||||
test_content := 'This is a test file to delete'
|
||||
create_test_file(mut vfs, test_path, test_content)!
|
||||
|
||||
// Verify file exists
|
||||
assert vfs.exists(test_path)
|
||||
|
||||
mut ctx := Context{
|
||||
req: http.Request{
|
||||
method: http.Method.delete
|
||||
url: '/${test_path}'
|
||||
}
|
||||
res: http.Response{}
|
||||
}
|
||||
|
||||
server.delete(mut ctx, test_path)
|
||||
|
||||
// Check response
|
||||
assert ctx.res.status() == http.Status.no_content
|
||||
|
||||
// Verify file was deleted
|
||||
assert !vfs.exists(test_path)
|
||||
}
|
||||
|
||||
// mut req := http.new_request(.copy, 'http://localhost:${app.server_port}/${file_name1}',
|
||||
// '')
|
||||
// signature := base64.encode_str('mario:123')
|
||||
// req.add_custom_header('Authorization', 'Basic ${signature}')!
|
||||
// req.add_custom_header('Destination', 'http://localhost:${app.server_port}/${file_name2}')!
|
||||
// mut response := req.do()!
|
||||
fn test_server_copy() ! {
|
||||
mut vfs, mut server := setup_test_server(@FILE)!
|
||||
|
||||
// Create a test file
|
||||
source_path := 'test_copy_source.txt'
|
||||
dest_path := 'test_copy_dest.txt'
|
||||
test_content := 'This is a test file to copy'
|
||||
create_test_file(mut vfs, source_path, test_content)!
|
||||
|
||||
mut ctx := Context{
|
||||
req: http.Request{
|
||||
method: http.Method.copy
|
||||
url: '/${source_path}'
|
||||
header: http.Header{}
|
||||
}
|
||||
res: http.Response{}
|
||||
}
|
||||
|
||||
// Set Destination header
|
||||
ctx.req.header.add_custom('Destination', 'http://localhost/${dest_path}')!
|
||||
log.set_level(.debug)
|
||||
server.copy(mut ctx, source_path)
|
||||
|
||||
// Check response
|
||||
assert ctx.res.status() == http.Status.ok
|
||||
|
||||
// Verify destination file exists and has the same content
|
||||
assert vfs.exists(dest_path)
|
||||
dest_content := vfs.file_read(dest_path) or { panic(err) }
|
||||
assert dest_content.bytestr() == test_content
|
||||
}
|
||||
|
||||
// assert p1.exists()
|
||||
// mut p2 := pathlib.get_file(path: '${root_dir}/${file_name2}')!
|
||||
// assert p2.exists()
|
||||
// assert p2.read()! == data
|
||||
// }
|
||||
fn test_server_move() ! {
|
||||
mut vfs, mut server := setup_test_server(@FILE)!
|
||||
|
||||
// Create a test file
|
||||
source_path := 'test_move_source.txt'
|
||||
dest_path := 'test_move_dest.txt'
|
||||
test_content := 'This is a test file to move'
|
||||
create_test_file(mut vfs, source_path, test_content)!
|
||||
|
||||
mut ctx := Context{
|
||||
req: http.Request{
|
||||
method: http.Method.move
|
||||
url: '/${source_path}'
|
||||
header: http.Header{}
|
||||
}
|
||||
res: http.Response{}
|
||||
}
|
||||
|
||||
// Set Destination header
|
||||
ctx.req.header.add_custom('Destination', 'http://localhost/${dest_path}')!
|
||||
|
||||
server.move(mut ctx, source_path)
|
||||
|
||||
// Check response
|
||||
assert ctx.res.status() == http.Status.ok
|
||||
|
||||
// Verify source file no longer exists
|
||||
assert !vfs.exists(source_path)
|
||||
|
||||
// Verify destination file exists and has the same content
|
||||
assert vfs.exists(dest_path)
|
||||
dest_content := vfs.file_read(dest_path) or { panic(err) }
|
||||
assert dest_content.bytestr() == test_content
|
||||
}
|
||||
|
||||
// fn test_move() {
|
||||
// root_dir := '/tmp/webdav'
|
||||
// mut app := new_app(
|
||||
// server_port: rand.int_in_range(8000, 9000)!
|
||||
// root_dir: root_dir
|
||||
// user_db: {
|
||||
// 'mario': '123'
|
||||
// }
|
||||
// )!
|
||||
// app.run(background: true)
|
||||
fn test_server_mkcol() ! {
|
||||
mut vfs, mut server := setup_test_server(@FILE)!
|
||||
|
||||
// Test directory path
|
||||
test_dir := 'test_mkcol_dir'
|
||||
|
||||
mut ctx := Context{
|
||||
req: http.Request{
|
||||
method: http.Method.mkcol
|
||||
url: '/${test_dir}'
|
||||
}
|
||||
res: http.Response{}
|
||||
}
|
||||
|
||||
server.mkcol(mut ctx, test_dir)
|
||||
|
||||
// Check response
|
||||
assert ctx.res.status() == http.Status.created
|
||||
|
||||
// Verify directory was created
|
||||
assert vfs.exists(test_dir)
|
||||
dir_entry := vfs.get(test_dir) or { panic(err) }
|
||||
assert dir_entry.is_dir()
|
||||
|
||||
// Test creating a collection that already exists
|
||||
mut ctx2 := Context{
|
||||
req: http.Request{
|
||||
method: http.Method.mkcol
|
||||
url: '/${test_dir}'
|
||||
}
|
||||
res: http.Response{}
|
||||
}
|
||||
|
||||
server.mkcol(mut ctx2, test_dir)
|
||||
|
||||
// Should return bad request for existing collection
|
||||
assert ctx2.res.status() == http.Status.bad_request
|
||||
}
|
||||
|
||||
// time.sleep(1 * time.second)
|
||||
// file_name1, file_name2 := 'newfile_move1.txt', 'newfile_move2.txt'
|
||||
// mut p := pathlib.get_file(path: '${root_dir}/${file_name1}', create: true)!
|
||||
// data := 'file move data'
|
||||
// p.write(data)!
|
||||
fn test_server_put() ! {
|
||||
mut vfs, mut server := setup_test_server(@FILE)!
|
||||
|
||||
// Test file path
|
||||
test_file := 'test_put_file.txt'
|
||||
test_content := 'This is content for PUT test'
|
||||
|
||||
mut ctx := Context{
|
||||
req: http.Request{
|
||||
method: http.Method.put
|
||||
url: '/${test_file}'
|
||||
data: test_content
|
||||
}
|
||||
res: http.Response{}
|
||||
}
|
||||
|
||||
server.create_or_update(mut ctx, test_file)
|
||||
|
||||
// Check response
|
||||
assert ctx.res.status() == http.Status.ok
|
||||
|
||||
// Verify file was created with correct content
|
||||
assert vfs.exists(test_file)
|
||||
file_content := vfs.file_read(test_file) or { panic(err) }
|
||||
assert file_content.bytestr() == test_content
|
||||
|
||||
// Test updating existing file
|
||||
new_content := 'Updated content for PUT test'
|
||||
mut ctx2 := Context{
|
||||
req: http.Request{
|
||||
method: http.Method.put
|
||||
url: '/${test_file}'
|
||||
data: new_content
|
||||
}
|
||||
res: http.Response{}
|
||||
}
|
||||
|
||||
server.create_or_update(mut ctx2, test_file)
|
||||
|
||||
// Check response
|
||||
assert ctx2.res.status() == http.Status.ok
|
||||
|
||||
// Verify file was updated with new content
|
||||
updated_content := vfs.file_read(test_file) or { panic(err) }
|
||||
assert updated_content.bytestr() == new_content
|
||||
}
|
||||
|
||||
// mut req := http.new_request(.move, 'http://localhost:${app.server_port}/${file_name1}',
|
||||
// '')
|
||||
// signature := base64.encode_str('mario:123')
|
||||
// req.add_custom_header('Authorization', 'Basic ${signature}')!
|
||||
// req.add_custom_header('Destination', 'http://localhost:${app.server_port}/${file_name2}')!
|
||||
// mut response := req.do()!
|
||||
|
||||
// p = pathlib.get_file(path: '${root_dir}/${file_name2}')!
|
||||
// assert p.exists()
|
||||
// assert p.read()! == data
|
||||
// }
|
||||
|
||||
// fn test_delete() {
|
||||
// root_dir := '/tmp/webdav'
|
||||
// mut app := new_app(
|
||||
// server_port: rand.int_in_range(8000, 9000)!
|
||||
// root_dir: root_dir
|
||||
// user_db: {
|
||||
// 'mario': '123'
|
||||
// }
|
||||
// )!
|
||||
// app.run(background: true)
|
||||
|
||||
// time.sleep(1 * time.second)
|
||||
// file_name := 'newfile_delete.txt'
|
||||
// mut p := pathlib.get_file(path: '${root_dir}/${file_name}', create: true)!
|
||||
|
||||
// mut req := http.new_request(.delete, 'http://localhost:${app.server_port}/${file_name}',
|
||||
// '')
|
||||
// signature := base64.encode_str('mario:123')
|
||||
// req.add_custom_header('Authorization', 'Basic ${signature}')!
|
||||
// mut response := req.do()!
|
||||
|
||||
// assert !p.exists()
|
||||
// }
|
||||
|
||||
// fn test_mkcol() {
|
||||
// root_dir := '/tmp/webdav'
|
||||
// mut app := new_app(
|
||||
// server_port: rand.int_in_range(8000, 9000)!
|
||||
// root_dir: root_dir
|
||||
// user_db: {
|
||||
// 'mario': '123'
|
||||
// }
|
||||
// )!
|
||||
// app.run(background: true)
|
||||
|
||||
// time.sleep(1 * time.second)
|
||||
// dir_name := 'newdir'
|
||||
|
||||
// mut req := http.new_request(.mkcol, 'http://localhost:${app.server_port}/${dir_name}',
|
||||
// '')
|
||||
// signature := base64.encode_str('mario:123')
|
||||
// req.add_custom_header('Authorization', 'Basic ${signature}')!
|
||||
// mut response := req.do()!
|
||||
|
||||
// mut p := pathlib.get_dir(path: '${root_dir}/${dir_name}')!
|
||||
// assert p.exists()
|
||||
// }
|
||||
|
||||
// fn test_propfind() {
|
||||
// root_dir := '/tmp/webdav'
|
||||
// mut app := new_app(
|
||||
// server_port: rand.int_in_range(8000, 9000)!
|
||||
// root_dir: root_dir
|
||||
// user_db: {
|
||||
// 'mario': '123'
|
||||
// }
|
||||
// )!
|
||||
// app.run(background: true)
|
||||
|
||||
// time.sleep(1 * time.second)
|
||||
// dir_name := 'newdir'
|
||||
// file1 := 'file1.txt'
|
||||
// file2 := 'file2.html'
|
||||
// dir1 := 'dir1'
|
||||
|
||||
// mut p := pathlib.get_dir(path: '${root_dir}/${dir_name}', create: true)!
|
||||
// mut file1_p := pathlib.get_file(path: '${p.path}/${file1}', create: true)!
|
||||
// mut file2_p := pathlib.get_file(path: '${p.path}/${file2}', create: true)!
|
||||
// mut dir1_p := pathlib.get_dir(path: '${p.path}/${dir1}', create: true)!
|
||||
|
||||
// mut req := http.new_request(.propfind, 'http://localhost:${app.server_port}/${dir_name}',
|
||||
// '')
|
||||
// signature := base64.encode_str('mario:123')
|
||||
// req.add_custom_header('Authorization', 'Basic ${signature}')!
|
||||
// mut response := req.do()!
|
||||
|
||||
// assert response.status_code == 207
|
||||
// }
|
||||
fn test_server_propfind() ! {
|
||||
mut vfs, mut server := setup_test_server(@FILE)!
|
||||
|
||||
// Create test directory and file structure
|
||||
root_dir := 'propfind_test'
|
||||
file_in_root := '${root_dir}/test_file.txt'
|
||||
subdir := '${root_dir}/subdir'
|
||||
file_in_subdir := '${subdir}/subdir_file.txt'
|
||||
|
||||
create_test_directory(mut vfs, root_dir)!
|
||||
create_test_file(mut vfs, file_in_root, 'Test file content')!
|
||||
create_test_directory(mut vfs, subdir)!
|
||||
create_test_file(mut vfs, file_in_subdir, 'Subdir file content')!
|
||||
|
||||
// Test PROPFIND with depth=0 (just the resource)
|
||||
propfind_xml := '<?xml version="1.0" encoding="utf-8"?>
|
||||
<D:propfind xmlns:D="DAV:">
|
||||
<D:allprop/>
|
||||
</D:propfind>'
|
||||
|
||||
mut ctx := Context{
|
||||
req: http.Request{
|
||||
method: http.Method.propfind
|
||||
url: '/${root_dir}'
|
||||
data: propfind_xml
|
||||
header: http.Header{}
|
||||
}
|
||||
res: http.Response{}
|
||||
}
|
||||
|
||||
// Set Depth header to 0
|
||||
ctx.req.header.add_custom('Depth', '0')!
|
||||
|
||||
server.propfind(mut ctx, root_dir)
|
||||
|
||||
// Check response
|
||||
assert ctx.res.status() == http.Status.multi_status
|
||||
assert ctx.res.header.get(.content_type)! == 'application/xml'
|
||||
assert ctx.res.body.contains('<D:multistatus')
|
||||
assert ctx.res.body.contains('<D:response>')
|
||||
assert ctx.res.body.contains('<D:href>${root_dir}</D:href>')
|
||||
// Should only include the requested resource
|
||||
assert !ctx.res.body.contains('<D:href>${file_in_root}</D:href>')
|
||||
|
||||
// Test PROPFIND with depth=1 (resource and immediate children)
|
||||
mut ctx2 := Context{
|
||||
req: http.Request{
|
||||
method: http.Method.propfind
|
||||
url: '/${root_dir}'
|
||||
data: propfind_xml
|
||||
header: http.Header{}
|
||||
}
|
||||
res: http.Response{}
|
||||
}
|
||||
|
||||
// Set Depth header to 1
|
||||
ctx2.req.header.add_custom('Depth', '1')!
|
||||
|
||||
server.propfind(mut ctx2, root_dir)
|
||||
|
||||
// Check response
|
||||
assert ctx2.res.status() == http.Status.multi_status
|
||||
assert ctx2.res.body.contains('<D:multistatus')
|
||||
// Should include the resource and immediate children
|
||||
assert ctx2.res.body.contains('<D:href>${root_dir}</D:href>')
|
||||
assert ctx2.res.body.contains('<D:href>${file_in_root}</D:href>')
|
||||
assert ctx2.res.body.contains('<D:href>${subdir}</D:href>')
|
||||
// But not grandchildren
|
||||
assert !ctx2.res.body.contains('<D:href>${file_in_subdir}</D:href>')
|
||||
|
||||
// Test PROPFIND with depth=infinity (all descendants)
|
||||
mut ctx3 := Context{
|
||||
req: http.Request{
|
||||
method: http.Method.propfind
|
||||
url: '/${root_dir}'
|
||||
data: propfind_xml
|
||||
header: http.Header{}
|
||||
}
|
||||
res: http.Response{}
|
||||
}
|
||||
|
||||
// Set Depth header to infinity
|
||||
ctx3.req.header.add_custom('Depth', 'infinity')!
|
||||
|
||||
server.propfind(mut ctx3, root_dir)
|
||||
|
||||
// Check response
|
||||
assert ctx3.res.status() == http.Status.multi_status
|
||||
// Should include all descendants
|
||||
assert ctx3.res.body.contains('<D:href>${root_dir}</D:href>')
|
||||
assert ctx3.res.body.contains('<D:href>${file_in_root}</D:href>')
|
||||
assert ctx3.res.body.contains('<D:href>${subdir}</D:href>')
|
||||
assert ctx3.res.body.contains('<D:href>${file_in_subdir}</D:href>')
|
||||
|
||||
// Test PROPFIND for non-existent resource
|
||||
mut ctx4 := Context{
|
||||
req: http.Request{
|
||||
method: http.Method.propfind
|
||||
url: '/nonexistent'
|
||||
data: propfind_xml
|
||||
header: http.Header{}
|
||||
}
|
||||
res: http.Response{}
|
||||
}
|
||||
|
||||
ctx4.req.header.add_custom('Depth', '0')!
|
||||
|
||||
server.propfind(mut ctx4, 'nonexistent')
|
||||
|
||||
// Should return not found
|
||||
assert ctx4.res.status() == http.Status.not_found
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
<?xml version="1.0" encoding="utf-8" ?>
|
||||
<D:prop xmlns:D="DAV:">
|
||||
<D:lockdiscovery xmlns:D="DAV:">
|
||||
<D:lockdiscovery>
|
||||
<D:activelock>
|
||||
<D:locktype><D:@{l.lock_type}/></D:locktype>
|
||||
<D:lockscope><D:@{l.scope}/></D:lockscope>
|
||||
@@ -10,7 +10,7 @@
|
||||
</D:owner>
|
||||
<D:timeout>Second-@{l.timeout}</D:timeout>
|
||||
<D:locktoken>
|
||||
<D:href>@{l.token}</D:href>
|
||||
<D:href>opaquelocktoken:@{l.token}</D:href>
|
||||
</D:locktoken>
|
||||
<D:lockroot>
|
||||
<D:href>@{l.resource}</D:href>
|
||||
|
||||
@@ -12,6 +12,7 @@ mut:
|
||||
file_create(path string) !FSEntry
|
||||
file_read(path string) ![]u8
|
||||
file_write(path string, data []u8) !
|
||||
file_concatenate(path string, data []u8) !
|
||||
file_delete(path string) !
|
||||
|
||||
// Directory operations
|
||||
@@ -32,6 +33,11 @@ mut:
|
||||
move(src_path string, dst_path string) !FSEntry
|
||||
delete(path string) !
|
||||
|
||||
// FSEntry Operations
|
||||
get_path(entry &FSEntry) !string
|
||||
|
||||
print() !
|
||||
|
||||
// Cleanup operation
|
||||
destroy() !
|
||||
}
|
||||
@@ -39,7 +45,7 @@ mut:
|
||||
// FSEntry represents a filesystem entry (file, directory, or symlink)
|
||||
pub interface FSEntry {
|
||||
get_metadata() Metadata
|
||||
get_path() string
|
||||
// get_path() string
|
||||
is_dir() bool
|
||||
is_file() bool
|
||||
is_symlink() bool
|
||||
|
||||
@@ -7,7 +7,6 @@ pub struct Metadata {
|
||||
pub mut:
|
||||
id u32 @[required] // unique identifier used as key in DB
|
||||
name string @[required] // name of file or directory
|
||||
path string @[required] // path of file or directory
|
||||
file_type FileType
|
||||
size u64
|
||||
created_at i64 // unix epoch timestamp
|
||||
|
||||
@@ -35,26 +35,30 @@ pub fn (mut fs DatabaseVFS) get_next_id() u32 {
|
||||
// load_entry loads an entry from the database by ID and sets up parent references
|
||||
// loads without data
|
||||
fn (mut fs DatabaseVFS) load_entry(vfs_id u32) !FSEntry {
|
||||
if metadata := fs.db_metadata.get(fs.get_database_id(vfs_id)!) {
|
||||
match decode_entry_type(metadata)! {
|
||||
.directory {
|
||||
mut dir := decode_directory(metadata) or {
|
||||
return error('Failed to decode directory: ${err}')
|
||||
if db_id := fs.id_table[vfs_id] {
|
||||
if metadata := fs.db_metadata.get(db_id) {
|
||||
match decode_entry_type(metadata)! {
|
||||
.directory {
|
||||
mut dir := decode_directory(metadata) or {
|
||||
return error('Failed to decode directory: ${err}')
|
||||
}
|
||||
return dir
|
||||
}
|
||||
return dir
|
||||
}
|
||||
.file {
|
||||
return decode_file_metadata(metadata) or { return error('Failed to decode file: ${err}') }
|
||||
}
|
||||
.symlink {
|
||||
mut symlink := decode_symlink(metadata) or {
|
||||
return error('Failed to decode symlink: ${err}')
|
||||
.file {
|
||||
return decode_file_metadata(metadata) or { return error('Failed to decode file: ${err}') }
|
||||
}
|
||||
.symlink {
|
||||
mut symlink := decode_symlink(metadata) or {
|
||||
return error('Failed to decode symlink: ${err}')
|
||||
}
|
||||
return symlink
|
||||
}
|
||||
return symlink
|
||||
}
|
||||
} else {
|
||||
return error('Entry ${vfs_id} not found ${err}')
|
||||
}
|
||||
} else {
|
||||
return error('Entry ${vfs_id} not found ${err}')
|
||||
return error('Entry ${vfs_id} not found')
|
||||
}
|
||||
}
|
||||
|
||||
@@ -74,4 +78,102 @@ fn (mut fs DatabaseVFS) load_entry(vfs_id u32) !FSEntry {
|
||||
// }
|
||||
// }
|
||||
// return file_data
|
||||
// }
|
||||
// }
|
||||
|
||||
|
||||
fn (mut self DatabaseVFS) get_entry(path string) !FSEntry {
|
||||
if path == '/' || path == '' || path == '.' {
|
||||
return FSEntry(self.root_get_as_dir()!)
|
||||
}
|
||||
parts := path.trim_string_left('/').split('/')
|
||||
mut parent_dir := *self.root_get_as_dir()!
|
||||
for i, part in parts {
|
||||
entry := self.directory_get_entry(parent_dir, part) or {
|
||||
return error('Failed to get entry ${err}')
|
||||
}
|
||||
if i == parts.len - 1 {
|
||||
// last part, means entry is found
|
||||
return entry
|
||||
}
|
||||
if entry is Directory {
|
||||
parent_dir = entry
|
||||
} else {
|
||||
return error('Failed to get entry, expected dir')
|
||||
}
|
||||
}
|
||||
// mut current := *self.root_get_as_dir()!
|
||||
// return self.directory_get_entry(mut current, path) or {
|
||||
return error('Path not found: ${path}')
|
||||
// }
|
||||
}
|
||||
|
||||
// internal function to get an entry of some name from a directory
|
||||
fn (mut self DatabaseVFS) directory_get_entry(dir Directory, name string) ?FSEntry {
|
||||
// mut children := self.directory_children(mut dir, false) or {
|
||||
// panic('this should never happen')
|
||||
// }
|
||||
for child_id in dir.children {
|
||||
if entry := self.load_entry(child_id) {
|
||||
if entry.metadata.name == name {
|
||||
return entry
|
||||
}
|
||||
} else {
|
||||
panic('Filesystem is corrupted, this should never happen ${err}')
|
||||
}
|
||||
}
|
||||
return none
|
||||
}
|
||||
|
||||
fn (mut self DatabaseVFS) get_directory(path string) !&Directory {
|
||||
mut entry := self.get_entry(path)!
|
||||
if mut entry is Directory {
|
||||
return &entry
|
||||
}
|
||||
return error('Not a directory: ${path}')
|
||||
}
|
||||
|
||||
|
||||
pub fn (mut self DatabaseVFS) get_path(entry_ &vfs.FSEntry) !string {
|
||||
// entry := self.load_entry(entry_.metadata.)
|
||||
// entry.parent_id == 0 {
|
||||
// return '/${entry.metadata.name}'
|
||||
// } else {
|
||||
// parent := self.load_entry(entry.parent_id)!
|
||||
// return '${self.get_path(parent)!}/${entry.metadata.name}'
|
||||
// }
|
||||
return ''
|
||||
}
|
||||
|
||||
|
||||
// Implementation of VFSImplementation interface
|
||||
pub fn (mut fs DatabaseVFS) root_get_as_dir() !&Directory {
|
||||
// Try to load root directory from DB if it exists
|
||||
|
||||
if db_id := fs.id_table[fs.root_id] {
|
||||
if data := fs.db_metadata.get(db_id) {
|
||||
mut loaded_root := decode_directory(data) or {
|
||||
panic('Failed to decode root directory: ${err}')
|
||||
}
|
||||
return &loaded_root
|
||||
}
|
||||
}
|
||||
|
||||
// Create and save new root directory
|
||||
mut myroot := Directory{
|
||||
metadata: vfs.Metadata{
|
||||
id: fs.get_next_id()
|
||||
file_type: .directory
|
||||
name: ''
|
||||
created_at: time.utc().unix()
|
||||
modified_at: time.utc().unix()
|
||||
accessed_at: time.utc().unix()
|
||||
mode: 0o755 // default directory permissions
|
||||
owner: 'user' // TODO: get from system
|
||||
group: 'user' // TODO: get from system
|
||||
}
|
||||
parent_id: 0
|
||||
}
|
||||
fs.save_entry(myroot) or {return error('failed to set root ${err}')}
|
||||
fs.root_id = myroot.metadata.id
|
||||
return &myroot
|
||||
}
|
||||
@@ -12,12 +12,10 @@ fn setup_vfs() !(&DatabaseVFS, string) {
|
||||
// Create separate databases for data and metadata
|
||||
mut db_data := ourdb.new(
|
||||
path: os.join_path(test_data_dir, 'data')
|
||||
incremental_mode: false
|
||||
)!
|
||||
|
||||
mut db_metadata := ourdb.new(
|
||||
path: os.join_path(test_data_dir, 'metadata')
|
||||
incremental_mode: false
|
||||
)!
|
||||
|
||||
// Create VFS with separate databases for data and metadata
|
||||
@@ -8,39 +8,42 @@ import time
|
||||
import log
|
||||
|
||||
// save_entry saves an entry to the database
|
||||
pub fn (mut fs DatabaseVFS) save_entry(entry FSEntry) !u32 {
|
||||
pub fn (mut fs DatabaseVFS) save_entry(entry FSEntry) ! {
|
||||
match entry {
|
||||
Directory {
|
||||
encoded := entry.encode()
|
||||
db_id := fs.db_metadata.set(id: entry.metadata.id, data: encoded) or {
|
||||
db_id := fs.db_metadata.set(data: encoded) or {
|
||||
return error('Failed to save directory on id:${entry.metadata.id}: ${err}')
|
||||
}
|
||||
for child_id in entry.children {
|
||||
_ := fs.db_metadata.get(fs.get_database_id(child_id)!) or {
|
||||
return error('Failed to get entry for directory child ${child_id} missing.\n${err}')
|
||||
}
|
||||
}
|
||||
log.debug('[DatabaseVFS] Saving dir entry with children ${entry.children}')
|
||||
fs.set_database_id(entry.metadata.id, db_id)!
|
||||
return entry.metadata.id
|
||||
fs.id_table[entry.metadata.id] = db_id
|
||||
// for child_id in entry.children {
|
||||
// if db_id := fs.id_table[child_id] {
|
||||
// _ := fs.db_metadata.get(fs.get_database_id(child_id)!) or {
|
||||
// return error('Failed to get entry for directory child ${child_id} missing.\n${err}')
|
||||
// }
|
||||
// log.debug('[DatabaseVFS] Saving dir entry with children ${entry.children}')
|
||||
// fs.set_database_id(entry.metadata.id, db_id)!
|
||||
// return entry.metadata.id
|
||||
// } else {
|
||||
// return error('Failed to get entry for directory child ${child_id} missing.\n${err}')
|
||||
// }
|
||||
// }
|
||||
}
|
||||
File {
|
||||
metadata_bytes := entry.encode()
|
||||
// Save the metadata_bytes to metadata_db
|
||||
metadata_db_id := fs.db_metadata.set(id: entry.metadata.id, data: metadata_bytes) or {
|
||||
metadata_db_id := fs.db_metadata.set(data: metadata_bytes) or {
|
||||
return error('Failed to save file metadata on id:${entry.metadata.id}: ${err}')
|
||||
}
|
||||
|
||||
fs.set_database_id(entry.metadata.id, metadata_db_id)!
|
||||
return entry.metadata.id
|
||||
fs.id_table[entry.metadata.id] = metadata_db_id
|
||||
}
|
||||
Symlink {
|
||||
encoded := entry.encode()
|
||||
db_id := fs.db_metadata.set(id: entry.metadata.id, data: encoded) or {
|
||||
db_id := fs.db_metadata.set(data: encoded) or {
|
||||
return error('Failed to save symlink on id:${entry.metadata.id}: ${err}')
|
||||
}
|
||||
fs.set_database_id(entry.metadata.id, db_id)!
|
||||
return entry.metadata.id
|
||||
fs.id_table[entry.metadata.id] = db_id
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -64,15 +67,15 @@ pub fn (mut fs DatabaseVFS) save_file(file_ File, data []u8) !u32 {
|
||||
if data.len > 0 {
|
||||
// file has data so that will be stored in data_db
|
||||
// split data_encoded into chunks of 64 kb
|
||||
chunks := arrays.chunk(data, 64 * 1024)
|
||||
chunks := arrays.chunk(data, (64 * 1024) - 1)
|
||||
mut chunk_ids := []u32{}
|
||||
|
||||
for i, chunk in chunks {
|
||||
// Generate a unique ID for each chunk based on the file ID
|
||||
chunk_id := file_id * 1000 + u32(i) + 1
|
||||
chunk_ids << fs.db_data.set(id: chunk_id, data: chunk) or {
|
||||
chunk_ids << fs.db_data.set(data: chunk) or {
|
||||
return error('Failed to save file data on id:${file.metadata.id}: ${err}')
|
||||
}
|
||||
log.debug('[DatabaseVFS] Saving chunk ${chunk_ids}')
|
||||
}
|
||||
|
||||
// Update the file with chunk IDs and size
|
||||
@@ -89,10 +92,10 @@ pub fn (mut fs DatabaseVFS) save_file(file_ File, data []u8) !u32 {
|
||||
// Encode the file with all its metadata
|
||||
metadata_bytes := updated_file.encode()
|
||||
// Save the metadata_bytes to metadata_db
|
||||
metadata_db_id := fs.db_metadata.set(id: file.metadata.id, data: metadata_bytes) or {
|
||||
metadata_db_id := fs.db_metadata.set(data: metadata_bytes) or {
|
||||
return error('Failed to save file metadata on id:${file.metadata.id}: ${err}')
|
||||
}
|
||||
|
||||
fs.set_database_id(file.metadata.id, metadata_db_id)!
|
||||
fs.id_table[file.metadata.id] = metadata_db_id
|
||||
return file.metadata.id
|
||||
}
|
||||
|
||||
@@ -120,7 +120,6 @@ pub fn decode_symlink(data []u8) !Symlink {
|
||||
fn decode_metadata(mut d encoder.Decoder) !vfs.Metadata {
|
||||
id := d.get_u32()!
|
||||
name := d.get_string()!
|
||||
path := d.get_string()!
|
||||
file_type_byte := d.get_u8()!
|
||||
size := d.get_u64()!
|
||||
created_at := d.get_i64()!
|
||||
@@ -133,7 +132,6 @@ fn decode_metadata(mut d encoder.Decoder) !vfs.Metadata {
|
||||
return vfs.Metadata{
|
||||
id: id
|
||||
name: name
|
||||
path: path
|
||||
file_type: unsafe { vfs.FileType(file_type_byte) }
|
||||
size: size
|
||||
created_at: created_at
|
||||
|
||||
@@ -7,7 +7,6 @@ import freeflowuniverse.herolib.vfs
|
||||
fn encode_metadata(mut e encoder.Encoder, m vfs.Metadata) {
|
||||
e.add_u32(m.id)
|
||||
e.add_string(m.name)
|
||||
e.add_string(m.path)
|
||||
e.add_u8(u8(m.file_type)) // FileType enum as u8
|
||||
e.add_u64(m.size)
|
||||
e.add_i64(m.created_at)
|
||||
|
||||
@@ -11,7 +11,6 @@ fn test_directory_encoder_decoder() ! {
|
||||
dir := Directory{
|
||||
metadata: vfs.Metadata{
|
||||
id: u32(current_time)
|
||||
path: '/root'
|
||||
name: 'root'
|
||||
file_type: .directory
|
||||
created_at: current_time
|
||||
@@ -49,12 +48,11 @@ fn test_directory_encoder_decoder() ! {
|
||||
fn test_file_encoder_decoder() ! {
|
||||
println('Testing encoding/decoding files...')
|
||||
|
||||
current_time := time.now().unix()
|
||||
current_time := time.utc().unix()
|
||||
file := File{
|
||||
metadata: vfs.Metadata{
|
||||
id: u32(current_time)
|
||||
name: 'test.txt'
|
||||
path: '/test.txt'
|
||||
file_type: .file
|
||||
size: 13 // Size of 'Hello, world!'
|
||||
created_at: current_time
|
||||
@@ -96,7 +94,6 @@ fn test_symlink_encoder_decoder() ! {
|
||||
metadata: vfs.Metadata{
|
||||
id: u32(current_time)
|
||||
name: 'test.txt'
|
||||
path: '/test.txt'
|
||||
file_type: .symlink
|
||||
created_at: current_time
|
||||
modified_at: current_time
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
module vfs_db
|
||||
|
||||
import freeflowuniverse.herolib.vfs
|
||||
import freeflowuniverse.herolib.data.ourdb
|
||||
import time
|
||||
|
||||
// get_database_id get's the corresponding db id for a file's metadata id.
|
||||
// since multiple vfs can use single db, or db's can have their own id logic
|
||||
// databases set independent id's to data
|
||||
pub fn (fs DatabaseVFS) get_database_id(vfs_id u32) !u32 {
|
||||
return fs.id_table[vfs_id] or { error('VFS ID ${vfs_id} not found.') }
|
||||
}
|
||||
|
||||
// get_database_id get's the corresponding db id for a file's metadata id.
|
||||
// since multiple vfs can use single db, or db's can have their own id logic
|
||||
// databases set independent id's to data
|
||||
pub fn (mut fs DatabaseVFS) set_database_id(vfs_id u32, db_id u32) ! {
|
||||
fs.id_table[vfs_id] = db_id
|
||||
}
|
||||
@@ -1,77 +0,0 @@
|
||||
module vfs_db
|
||||
|
||||
import os
|
||||
import freeflowuniverse.herolib.data.ourdb
|
||||
import rand
|
||||
|
||||
fn setup_vfs() !&DatabaseVFS {
|
||||
test_data_dir := os.join_path(os.temp_dir(), 'vfsourdb_id_table_test_${rand.string(3)}')
|
||||
os.mkdir_all(test_data_dir)!
|
||||
|
||||
// Create separate databases for data and metadata
|
||||
mut db_data := ourdb.new(
|
||||
path: os.join_path(test_data_dir, 'data')
|
||||
incremental_mode: false
|
||||
)!
|
||||
|
||||
mut db_metadata := ourdb.new(
|
||||
path: os.join_path(test_data_dir, 'metadata')
|
||||
incremental_mode: false
|
||||
)!
|
||||
|
||||
// Create VFS with separate databases for data and metadata
|
||||
mut vfs := new(mut db_data, mut db_metadata)!
|
||||
return vfs
|
||||
}
|
||||
|
||||
fn test_set_get_database_id() ! {
|
||||
mut vfs := setup_vfs()!
|
||||
|
||||
// Test setting and getting database IDs
|
||||
vfs_id := u32(1)
|
||||
db_id := u32(42)
|
||||
|
||||
// Set the database ID
|
||||
vfs.set_database_id(vfs_id, db_id)!
|
||||
|
||||
// Get the database ID and verify it matches
|
||||
retrieved_id := vfs.get_database_id(vfs_id)!
|
||||
assert retrieved_id == db_id
|
||||
}
|
||||
|
||||
fn test_get_nonexistent_id() ! {
|
||||
mut vfs := setup_vfs()!
|
||||
|
||||
// Try to get a database ID that doesn't exist
|
||||
if _ := vfs.get_database_id(999) {
|
||||
assert false, 'Expected error when getting non-existent ID'
|
||||
} else {
|
||||
assert err.msg() == 'VFS ID 999 not found.'
|
||||
}
|
||||
}
|
||||
|
||||
fn test_multiple_ids() ! {
|
||||
mut vfs := setup_vfs()!
|
||||
|
||||
// Set multiple IDs
|
||||
vfs.set_database_id(1, 101)!
|
||||
vfs.set_database_id(2, 102)!
|
||||
vfs.set_database_id(3, 103)!
|
||||
|
||||
// Verify all IDs can be retrieved correctly
|
||||
assert vfs.get_database_id(1)! == 101
|
||||
assert vfs.get_database_id(2)! == 102
|
||||
assert vfs.get_database_id(3)! == 103
|
||||
}
|
||||
|
||||
fn test_update_id() ! {
|
||||
mut vfs := setup_vfs()!
|
||||
|
||||
// Set an ID
|
||||
vfs.set_database_id(1, 100)!
|
||||
assert vfs.get_database_id(1)! == 100
|
||||
|
||||
// Update the ID
|
||||
vfs.set_database_id(1, 200)!
|
||||
assert vfs.get_database_id(1)! == 200
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
module vfs_db
|
||||
|
||||
import time
|
||||
import freeflowuniverse.herolib.vfs
|
||||
|
||||
// Metadata represents the common metadata for both files and directories
|
||||
pub struct NewMetadata {
|
||||
pub mut:
|
||||
name string @[required] // name of file or directory
|
||||
path string @[required] // name of file or directory
|
||||
file_type vfs.FileType @[required]
|
||||
size u64 @[required]
|
||||
mode u32 = 0o644 // file permissions
|
||||
owner string = 'user'
|
||||
group string = 'user'
|
||||
}
|
||||
|
||||
pub fn (mut fs DatabaseVFS) new_metadata(metadata NewMetadata) vfs.Metadata {
|
||||
return vfs.new_metadata(
|
||||
id: fs.get_next_id()
|
||||
name: metadata.name
|
||||
path: metadata.path
|
||||
file_type: metadata.file_type
|
||||
size: metadata.size
|
||||
mode: metadata.mode
|
||||
owner: metadata.owner
|
||||
group: metadata.group
|
||||
)
|
||||
}
|
||||
@@ -1,142 +0,0 @@
|
||||
module vfs_db
|
||||
|
||||
import os
|
||||
import freeflowuniverse.herolib.data.ourdb
|
||||
import freeflowuniverse.herolib.vfs as vfs_mod
|
||||
import rand
|
||||
|
||||
fn setup_vfs() !&DatabaseVFS {
|
||||
test_data_dir := os.join_path(os.temp_dir(), 'vfsourdb_metadata_test_${rand.string(3)}')
|
||||
os.mkdir_all(test_data_dir)!
|
||||
|
||||
// Create separate databases for data and metadata
|
||||
mut db_data := ourdb.new(
|
||||
path: os.join_path(test_data_dir, 'data')
|
||||
incremental_mode: false
|
||||
)!
|
||||
|
||||
mut db_metadata := ourdb.new(
|
||||
path: os.join_path(test_data_dir, 'metadata')
|
||||
incremental_mode: false
|
||||
)!
|
||||
|
||||
// Create VFS with separate databases for data and metadata
|
||||
mut fs := new(mut db_data, mut db_metadata)!
|
||||
return fs
|
||||
}
|
||||
|
||||
fn test_new_metadata_file() ! {
|
||||
mut fs := setup_vfs()!
|
||||
|
||||
// Test creating file metadata
|
||||
metadata := fs.new_metadata(
|
||||
name: 'test_file.txt'
|
||||
path: '/test_file.txt'
|
||||
file_type: .file
|
||||
size: 1024
|
||||
)
|
||||
|
||||
// Verify the metadata
|
||||
assert metadata.name == 'test_file.txt'
|
||||
assert metadata.file_type == .file
|
||||
assert metadata.size == 1024
|
||||
assert metadata.mode == 0o644 // Default mode
|
||||
assert metadata.owner == 'user' // Default owner
|
||||
assert metadata.group == 'user' // Default group
|
||||
assert metadata.id == 1 // First ID
|
||||
}
|
||||
|
||||
fn test_new_metadata_directory() ! {
|
||||
mut fs := setup_vfs()!
|
||||
|
||||
// Test creating directory metadata
|
||||
metadata := fs.new_metadata(
|
||||
name: 'test_dir'
|
||||
path: '/test_dir'
|
||||
file_type: .directory
|
||||
size: 0
|
||||
)
|
||||
|
||||
// Verify the metadata
|
||||
assert metadata.name == 'test_dir'
|
||||
assert metadata.file_type == .directory
|
||||
assert metadata.size == 0
|
||||
assert metadata.mode == 0o644 // Default mode
|
||||
assert metadata.owner == 'user' // Default owner
|
||||
assert metadata.group == 'user' // Default group
|
||||
assert metadata.id == 1 // First ID
|
||||
}
|
||||
|
||||
fn test_new_metadata_symlink() ! {
|
||||
mut fs := setup_vfs()!
|
||||
|
||||
// Test creating symlink metadata
|
||||
metadata := fs.new_metadata(
|
||||
name: 'test_link'
|
||||
path: '/test_link'
|
||||
file_type: .symlink
|
||||
size: 0
|
||||
)
|
||||
|
||||
// Verify the metadata
|
||||
assert metadata.name == 'test_link'
|
||||
assert metadata.file_type == .symlink
|
||||
assert metadata.size == 0
|
||||
assert metadata.mode == 0o644 // Default mode
|
||||
assert metadata.owner == 'user' // Default owner
|
||||
assert metadata.group == 'user' // Default group
|
||||
assert metadata.id == 1 // First ID
|
||||
}
|
||||
|
||||
fn test_new_metadata_custom_permissions() ! {
|
||||
mut fs := setup_vfs()!
|
||||
|
||||
// Test creating metadata with custom permissions
|
||||
metadata := fs.new_metadata(
|
||||
name: 'custom_file.txt'
|
||||
path: '/custom_file.txt'
|
||||
file_type: .file
|
||||
size: 2048
|
||||
mode: 0o755
|
||||
owner: 'admin'
|
||||
group: 'staff'
|
||||
)
|
||||
|
||||
// Verify the metadata
|
||||
assert metadata.name == 'custom_file.txt'
|
||||
assert metadata.file_type == .file
|
||||
assert metadata.size == 2048
|
||||
assert metadata.mode == 0o755
|
||||
assert metadata.owner == 'admin'
|
||||
assert metadata.group == 'staff'
|
||||
assert metadata.id == 1 // First ID
|
||||
}
|
||||
|
||||
fn test_new_metadata_sequential_ids() ! {
|
||||
mut fs := setup_vfs()!
|
||||
|
||||
// Create multiple metadata objects and verify IDs are sequential
|
||||
metadata1 := fs.new_metadata(
|
||||
name: 'file1.txt'
|
||||
path: '/file1.txt'
|
||||
file_type: .file
|
||||
size: 100
|
||||
)
|
||||
assert metadata1.id == 1
|
||||
|
||||
metadata2 := fs.new_metadata(
|
||||
name: 'file2.txt'
|
||||
path: '/file2.txt'
|
||||
file_type: .file
|
||||
size: 200
|
||||
)
|
||||
assert metadata2.id == 2
|
||||
|
||||
metadata3 := fs.new_metadata(
|
||||
name: 'file3.txt'
|
||||
path: '/file3.txt'
|
||||
file_type: .file
|
||||
size: 300
|
||||
)
|
||||
assert metadata3.id == 3
|
||||
}
|
||||
@@ -14,10 +14,6 @@ fn (d &Directory) get_metadata() vfs.Metadata {
|
||||
return d.metadata
|
||||
}
|
||||
|
||||
fn (d &Directory) get_path() string {
|
||||
return d.metadata.path
|
||||
}
|
||||
|
||||
// is_dir returns true if the entry is a directory
|
||||
pub fn (d &Directory) is_dir() bool {
|
||||
return d.metadata.file_type == .directory
|
||||
|
||||
@@ -7,7 +7,6 @@ fn test_directory_get_metadata() {
|
||||
metadata := vfs_mod.Metadata{
|
||||
id: 1
|
||||
name: 'test_dir'
|
||||
path: '/test_dir'
|
||||
file_type: .directory
|
||||
size: 0
|
||||
mode: 0o755
|
||||
@@ -35,39 +34,11 @@ fn test_directory_get_metadata() {
|
||||
assert retrieved_metadata.group == 'user'
|
||||
}
|
||||
|
||||
fn test_directory_get_path() {
|
||||
// Create a directory with metadata
|
||||
metadata := vfs_mod.Metadata{
|
||||
id: 1
|
||||
name: 'test_dir'
|
||||
path: '/test_dir'
|
||||
file_type: .directory
|
||||
size: 0
|
||||
mode: 0o755
|
||||
owner: 'user'
|
||||
group: 'user'
|
||||
created_at: 0
|
||||
modified_at: 0
|
||||
accessed_at: 0
|
||||
}
|
||||
|
||||
dir := Directory{
|
||||
metadata: metadata
|
||||
children: []
|
||||
parent_id: 0
|
||||
}
|
||||
|
||||
// Test get_path
|
||||
path := dir.get_path()
|
||||
assert path == '/test_dir'
|
||||
}
|
||||
|
||||
fn test_directory_is_dir() {
|
||||
// Create a directory with metadata
|
||||
metadata := vfs_mod.Metadata{
|
||||
id: 1
|
||||
name: 'test_dir'
|
||||
path: '/test_dir'
|
||||
file_type: .directory
|
||||
size: 0
|
||||
mode: 0o755
|
||||
@@ -95,7 +66,6 @@ fn test_directory_with_children() {
|
||||
metadata := vfs_mod.Metadata{
|
||||
id: 1
|
||||
name: 'parent_dir'
|
||||
path: '/parent_dir'
|
||||
file_type: .directory
|
||||
size: 0
|
||||
mode: 0o755
|
||||
@@ -124,7 +94,6 @@ fn test_directory_with_parent() {
|
||||
metadata := vfs_mod.Metadata{
|
||||
id: 2
|
||||
name: 'child_dir'
|
||||
path: '/parent_dir/child_dir'
|
||||
file_type: .directory
|
||||
size: 0
|
||||
mode: 0o755
|
||||
|
||||
@@ -19,10 +19,6 @@ fn (f &File) get_metadata() vfs.Metadata {
|
||||
return f.metadata
|
||||
}
|
||||
|
||||
fn (f &File) get_path() string {
|
||||
return f.metadata.path
|
||||
}
|
||||
|
||||
// is_dir returns true if the entry is a directory
|
||||
pub fn (f &File) is_dir() bool {
|
||||
return f.metadata.file_type == .directory
|
||||
|
||||
@@ -30,7 +30,6 @@ fn test_file_get_metadata() {
|
||||
metadata := vfs_mod.Metadata{
|
||||
id: 1
|
||||
name: 'test_file.txt'
|
||||
path: '/test_file.txt'
|
||||
file_type: .file
|
||||
size: 13
|
||||
mode: 0o644
|
||||
@@ -58,39 +57,11 @@ fn test_file_get_metadata() {
|
||||
assert retrieved_metadata.group == 'user'
|
||||
}
|
||||
|
||||
fn test_file_get_path() {
|
||||
// Create a file with metadata
|
||||
metadata := vfs_mod.Metadata{
|
||||
id: 1
|
||||
name: 'test_file.txt'
|
||||
path: '/test_file.txt'
|
||||
file_type: .file
|
||||
size: 13
|
||||
mode: 0o644
|
||||
owner: 'user'
|
||||
group: 'user'
|
||||
created_at: 0
|
||||
modified_at: 0
|
||||
accessed_at: 0
|
||||
}
|
||||
|
||||
file := File{
|
||||
metadata: metadata
|
||||
parent_id: 0
|
||||
chunk_ids: []
|
||||
}
|
||||
|
||||
// Test get_path
|
||||
path := file.get_path()
|
||||
assert path == '/test_file.txt'
|
||||
}
|
||||
|
||||
fn test_file_is_file() {
|
||||
// Create a file with metadata
|
||||
metadata := vfs_mod.Metadata{
|
||||
id: 1
|
||||
name: 'test_file.txt'
|
||||
path: '/test_file.txt'
|
||||
file_type: .file
|
||||
size: 13
|
||||
mode: 0o644
|
||||
@@ -118,7 +89,6 @@ fn test_file_write_read() {
|
||||
metadata := vfs_mod.Metadata{
|
||||
id: 1
|
||||
name: 'test_file.txt'
|
||||
path: '/test_file.txt'
|
||||
file_type: .file
|
||||
size: 13
|
||||
mode: 0o644
|
||||
@@ -153,7 +123,6 @@ fn test_file_rename() {
|
||||
metadata := vfs_mod.Metadata{
|
||||
id: 1
|
||||
name: 'test_file.txt'
|
||||
path: '/test_file.txt'
|
||||
file_type: .file
|
||||
size: 13
|
||||
mode: 0o644
|
||||
@@ -180,7 +149,6 @@ fn test_new_file() ! {
|
||||
metadata := vfs_mod.Metadata{
|
||||
id: 1
|
||||
name: 'test_file.txt'
|
||||
path: '/test_file.txt'
|
||||
file_type: .file
|
||||
size: 13
|
||||
mode: 0o644
|
||||
@@ -202,7 +170,6 @@ fn test_new_file() ! {
|
||||
assert file.metadata.name == 'test_file.txt'
|
||||
assert file.metadata.file_type == .file
|
||||
assert file.metadata.size == 13
|
||||
assert file.get_path() == '/test_file.txt'
|
||||
}
|
||||
|
||||
fn test_copy_file() ! {
|
||||
@@ -210,7 +177,6 @@ fn test_copy_file() ! {
|
||||
original_metadata := vfs_mod.Metadata{
|
||||
id: 1
|
||||
name: 'original.txt'
|
||||
path: '/original.txt'
|
||||
file_type: .file
|
||||
size: 13
|
||||
mode: 0o755
|
||||
@@ -231,7 +197,6 @@ fn test_copy_file() ! {
|
||||
copied_metadata := vfs_mod.Metadata{
|
||||
id: 2 // Different ID
|
||||
name: 'copied.txt'
|
||||
path: '/copied.txt'
|
||||
file_type: .file
|
||||
size: 13
|
||||
mode: 0o755
|
||||
|
||||
@@ -9,10 +9,6 @@ fn (e &FSEntry) get_metadata() vfs.Metadata {
|
||||
return e.metadata
|
||||
}
|
||||
|
||||
fn (e &FSEntry) get_path() string {
|
||||
return e.metadata.path
|
||||
}
|
||||
|
||||
fn (e &FSEntry) is_dir() bool {
|
||||
return e.metadata.file_type == .directory
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ fn test_fsentry_directory() {
|
||||
metadata: vfs_mod.Metadata{
|
||||
id: 1
|
||||
name: 'test_dir'
|
||||
path: '/test_dir'
|
||||
file_type: .directory
|
||||
size: 0
|
||||
mode: 0o755
|
||||
@@ -29,7 +28,6 @@ fn test_fsentry_directory() {
|
||||
assert entry.get_metadata().id == 1
|
||||
assert entry.get_metadata().name == 'test_dir'
|
||||
assert entry.get_metadata().file_type == .directory
|
||||
assert entry.get_path() == '/test_dir'
|
||||
assert entry.is_dir() == true
|
||||
assert entry.is_file() == false
|
||||
assert entry.is_symlink() == false
|
||||
@@ -41,7 +39,6 @@ fn test_fsentry_file() {
|
||||
metadata: vfs_mod.Metadata{
|
||||
id: 2
|
||||
name: 'test_file.txt'
|
||||
path: '/test_file.txt'
|
||||
file_type: .file
|
||||
size: 13
|
||||
mode: 0o644
|
||||
@@ -62,7 +59,6 @@ fn test_fsentry_file() {
|
||||
assert entry.get_metadata().id == 2
|
||||
assert entry.get_metadata().name == 'test_file.txt'
|
||||
assert entry.get_metadata().file_type == .file
|
||||
assert entry.get_path() == '/test_file.txt'
|
||||
assert entry.is_dir() == false
|
||||
assert entry.is_file() == true
|
||||
assert entry.is_symlink() == false
|
||||
@@ -74,7 +70,6 @@ fn test_fsentry_symlink() {
|
||||
metadata: vfs_mod.Metadata{
|
||||
id: 3
|
||||
name: 'test_link'
|
||||
path: '/test_link'
|
||||
file_type: .symlink
|
||||
size: 0
|
||||
mode: 0o777
|
||||
@@ -95,7 +90,6 @@ fn test_fsentry_symlink() {
|
||||
assert entry.get_metadata().id == 3
|
||||
assert entry.get_metadata().name == 'test_link'
|
||||
assert entry.get_metadata().file_type == .symlink
|
||||
assert entry.get_path() == '/test_link'
|
||||
assert entry.is_dir() == false
|
||||
assert entry.is_file() == false
|
||||
assert entry.is_symlink() == true
|
||||
@@ -107,7 +101,6 @@ fn test_fsentry_match() {
|
||||
metadata: vfs_mod.Metadata{
|
||||
id: 1
|
||||
name: 'test_dir'
|
||||
path: '/test_dir'
|
||||
file_type: .directory
|
||||
size: 0
|
||||
mode: 0o755
|
||||
@@ -125,7 +118,6 @@ fn test_fsentry_match() {
|
||||
metadata: vfs_mod.Metadata{
|
||||
id: 2
|
||||
name: 'test_file.txt'
|
||||
path: '/test_file.txt'
|
||||
file_type: .file
|
||||
size: 13
|
||||
mode: 0o644
|
||||
@@ -143,7 +135,6 @@ fn test_fsentry_match() {
|
||||
metadata: vfs_mod.Metadata{
|
||||
id: 3
|
||||
name: 'test_link'
|
||||
path: '/test_link'
|
||||
file_type: .symlink
|
||||
size: 0
|
||||
mode: 0o777
|
||||
|
||||
@@ -26,10 +26,6 @@ fn (s &Symlink) get_metadata() vfs.Metadata {
|
||||
return s.metadata
|
||||
}
|
||||
|
||||
fn (s &Symlink) get_path() string {
|
||||
return s.metadata.path
|
||||
}
|
||||
|
||||
// is_dir returns true if the entry is a directory
|
||||
pub fn (self &Symlink) is_dir() bool {
|
||||
return self.metadata.file_type == .directory
|
||||
|
||||
@@ -7,7 +7,6 @@ fn test_symlink_get_metadata() {
|
||||
metadata := vfs_mod.Metadata{
|
||||
id: 1
|
||||
name: 'test_link'
|
||||
path: '/test_link'
|
||||
file_type: .symlink
|
||||
size: 0
|
||||
mode: 0o777
|
||||
@@ -35,39 +34,11 @@ fn test_symlink_get_metadata() {
|
||||
assert retrieved_metadata.group == 'user'
|
||||
}
|
||||
|
||||
fn test_symlink_get_path() {
|
||||
// Create a symlink with metadata
|
||||
metadata := vfs_mod.Metadata{
|
||||
id: 1
|
||||
name: 'test_link'
|
||||
path: '/test_link'
|
||||
file_type: .symlink
|
||||
size: 0
|
||||
mode: 0o777
|
||||
owner: 'user'
|
||||
group: 'user'
|
||||
created_at: 0
|
||||
modified_at: 0
|
||||
accessed_at: 0
|
||||
}
|
||||
|
||||
symlink := Symlink{
|
||||
metadata: metadata
|
||||
target: '/path/to/target'
|
||||
parent_id: 0
|
||||
}
|
||||
|
||||
// Test get_path
|
||||
path := symlink.get_path()
|
||||
assert path == '/test_link'
|
||||
}
|
||||
|
||||
fn test_symlink_is_symlink() {
|
||||
// Create a symlink with metadata
|
||||
metadata := vfs_mod.Metadata{
|
||||
id: 1
|
||||
name: 'test_link'
|
||||
path: '/test_link'
|
||||
file_type: .symlink
|
||||
size: 0
|
||||
mode: 0o777
|
||||
@@ -95,7 +66,6 @@ fn test_symlink_update_target() ! {
|
||||
metadata := vfs_mod.Metadata{
|
||||
id: 1
|
||||
name: 'test_link'
|
||||
path: '/test_link'
|
||||
file_type: .symlink
|
||||
size: 0
|
||||
mode: 0o777
|
||||
@@ -122,7 +92,6 @@ fn test_symlink_get_target() ! {
|
||||
metadata := vfs_mod.Metadata{
|
||||
id: 1
|
||||
name: 'test_link'
|
||||
path: '/test_link'
|
||||
file_type: .symlink
|
||||
size: 0
|
||||
mode: 0o777
|
||||
@@ -149,7 +118,6 @@ fn test_symlink_with_parent() {
|
||||
metadata := vfs_mod.Metadata{
|
||||
id: 2
|
||||
name: 'test_link'
|
||||
path: '/parent_dir/test_link'
|
||||
file_type: .symlink
|
||||
size: 0
|
||||
mode: 0o777
|
||||
|
||||
@@ -16,15 +16,8 @@ pub fn (mut fs DatabaseVFS) directory_mkdir(mut dir Directory, name_ string) !&D
|
||||
}
|
||||
}
|
||||
|
||||
path := if dir.metadata.path == '/' {
|
||||
'/${name}'
|
||||
} else {
|
||||
"/${dir.metadata.path.trim('/')}/${name}"
|
||||
}
|
||||
|
||||
new_dir := fs.new_directory(
|
||||
name: name,
|
||||
path: path
|
||||
parent_id: dir.metadata.id
|
||||
)!
|
||||
dir.children << new_dir.metadata.id
|
||||
@@ -35,7 +28,6 @@ pub fn (mut fs DatabaseVFS) directory_mkdir(mut dir Directory, name_ string) !&D
|
||||
pub struct NewDirectory {
|
||||
pub:
|
||||
name string @[required] // name of file or directory
|
||||
path string @[required] // name of file or directory
|
||||
mode u32 = 0o755 // file permissions
|
||||
owner string = 'user'
|
||||
group string = 'user'
|
||||
@@ -47,15 +39,15 @@ pub:
|
||||
pub fn (mut fs DatabaseVFS) new_directory(dir NewDirectory) !&Directory {
|
||||
d := Directory{
|
||||
parent_id: dir.parent_id
|
||||
metadata: fs.new_metadata(NewMetadata{
|
||||
metadata: vfs.new_metadata(
|
||||
id: fs.get_next_id()
|
||||
name: dir.name
|
||||
path: dir.path
|
||||
mode: dir.mode
|
||||
owner: dir.owner
|
||||
group: dir.group
|
||||
size: u64(0)
|
||||
file_type: .directory
|
||||
})
|
||||
)
|
||||
children: dir.children
|
||||
}
|
||||
// Save new directory to DB
|
||||
@@ -100,12 +92,6 @@ pub fn (mut fs DatabaseVFS) directory_touch(mut dir Directory, name_ string) !&F
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
path := if dir.metadata.path == '/' {
|
||||
'/${name}'
|
||||
} else {
|
||||
"/${dir.metadata.path.trim('/')}/${name}"
|
||||
}
|
||||
|
||||
// Create new file with correct parent_id
|
||||
mut file_id := fs.save_file(File{
|
||||
@@ -113,7 +99,6 @@ pub fn (mut fs DatabaseVFS) directory_touch(mut dir Directory, name_ string) !&F
|
||||
metadata: vfs.Metadata {
|
||||
id: fs.get_next_id()
|
||||
name: name
|
||||
path: path
|
||||
file_type: .file
|
||||
created_at: time.now().unix()
|
||||
modified_at: time.now().unix()
|
||||
@@ -148,7 +133,9 @@ pub fn (mut fs DatabaseVFS) directory_rm(mut dir Directory, name string) ! {
|
||||
}
|
||||
|
||||
// get entry from db_metadata
|
||||
metadata_bytes := fs.db_metadata.get(fs.get_database_id(entry.metadata.id)!) or { return error('Failed to delete entry: ${err}') }
|
||||
metadata_bytes := fs.db_metadata.get(
|
||||
fs.id_table[entry.metadata.id] or { return error('Failed to delete entry') }
|
||||
) or { return error('Failed to delete entry: ${err}') }
|
||||
|
||||
// Handle file data deletion if it's a file
|
||||
if entry is File {
|
||||
@@ -157,13 +144,18 @@ pub fn (mut fs DatabaseVFS) directory_rm(mut dir Directory, name string) ! {
|
||||
// delete file chunks in data_db
|
||||
for id in file.chunk_ids {
|
||||
log.debug('[DatabaseVFS] Deleting chunk ${id}')
|
||||
fs.db_data.delete(id)!
|
||||
fs.db_data.delete(id) or {
|
||||
log.error('Failed to delete chunk ${id}: ${err}')
|
||||
return error('Failed to delete chunk ${id}: ${err}')
|
||||
}
|
||||
}
|
||||
|
||||
log.debug('[DatabaseVFS] Deleting file metadata ${file.metadata.id}')
|
||||
}
|
||||
|
||||
fs.db_metadata.delete(fs.get_database_id(entry.metadata.id)!) or { return error('Failed to delete entry: ${err}') }
|
||||
fs.db_metadata.delete(
|
||||
fs.id_table[entry.metadata.id] or { return error('Failed to delete entry') }
|
||||
) or { return error('Failed to delete entry: ${err}') }
|
||||
|
||||
// Update children list - make sure we don't remove the wrong child
|
||||
dir.children = dir.children.filter(it != entry.metadata.id).clone()
|
||||
@@ -202,16 +194,10 @@ pub fn (mut fs DatabaseVFS) directory_move(dir_ Directory, args_ MoveDirArgs) !&
|
||||
found = true
|
||||
child_id_to_remove = child_id
|
||||
|
||||
new_path := if args.dst_parent_dir.metadata.path == '/' {
|
||||
'/${args.dst_entry_name}'
|
||||
} else {
|
||||
"/${args.dst_parent_dir.metadata.path.trim('/')}/${args.dst_entry_name}"
|
||||
}
|
||||
// Handle both files and directories
|
||||
if entry is File {
|
||||
mut file_entry := entry as File
|
||||
file_entry.metadata.name = args.dst_entry_name
|
||||
file_entry.metadata.path = new_path
|
||||
file_entry.metadata.modified_at = time.now().unix()
|
||||
file_entry.parent_id = args.dst_parent_dir.metadata.id
|
||||
|
||||
@@ -233,7 +219,6 @@ pub fn (mut fs DatabaseVFS) directory_move(dir_ Directory, args_ MoveDirArgs) !&
|
||||
// Handle directory
|
||||
mut dir_entry := entry as Directory
|
||||
dir_entry.metadata.name = args.dst_entry_name
|
||||
dir_entry.metadata.path = new_path
|
||||
dir_entry.metadata.modified_at = time.now().unix()
|
||||
dir_entry.parent_id = args.dst_parent_dir.metadata.id
|
||||
|
||||
@@ -270,7 +255,6 @@ fn (mut fs DatabaseVFS) move_children_recursive(mut dir Directory) ! {
|
||||
for child in dir.children {
|
||||
if mut child_entry := fs.load_entry(child) {
|
||||
child_entry.parent_id = dir.metadata.id
|
||||
child_entry.metadata.path = '${dir.metadata.path}/${child_entry.metadata.name}'
|
||||
|
||||
if child_entry is Directory {
|
||||
// Recursively move subdirectories
|
||||
@@ -304,15 +288,42 @@ pub fn (mut fs DatabaseVFS) directory_copy(mut dir Directory, args_ CopyDirArgs)
|
||||
for child_id in dir.children {
|
||||
if mut entry := fs.load_entry(child_id) {
|
||||
if entry.metadata.name == args.src_entry_name {
|
||||
if entry is File {
|
||||
return error('${args.src_entry_name} is a file, not a directory')
|
||||
}
|
||||
|
||||
if entry is Symlink {
|
||||
return error('${args.src_entry_name} is a symlink, not a directory')
|
||||
}
|
||||
|
||||
found = true
|
||||
if entry is File {
|
||||
mut file_entry := entry as File
|
||||
|
||||
mut file_data := []u8{}
|
||||
// log.debug('[DatabaseVFS] Got database chunk ids ${chunk_ids}')
|
||||
for id in file_entry.chunk_ids {
|
||||
// there were chunk ids stored with file so file has data
|
||||
if chunk_bytes := fs.db_data.get(id) {
|
||||
file_data << chunk_bytes
|
||||
} else {
|
||||
return error('Failed to fetch file data: ${err}')
|
||||
}
|
||||
}
|
||||
|
||||
mut new_file := File{
|
||||
metadata: Metadata{...file_entry.metadata,
|
||||
id: fs.get_next_id()
|
||||
name: args.dst_entry_name
|
||||
}
|
||||
parent_id: args.dst_parent_dir.metadata.id
|
||||
}
|
||||
fs.save_file(new_file, file_data)!
|
||||
args.dst_parent_dir.children << new_file.metadata.id
|
||||
fs.save_entry(args.dst_parent_dir)!
|
||||
return args.dst_parent_dir
|
||||
} else if entry is Symlink {
|
||||
mut symlink_entry := entry as Symlink
|
||||
mut new_symlink := Symlink{...symlink_entry,
|
||||
parent_id: args.dst_parent_dir.metadata.id
|
||||
}
|
||||
args.dst_parent_dir.children << new_symlink.metadata.id
|
||||
fs.save_entry(args.dst_parent_dir)!
|
||||
return args.dst_parent_dir
|
||||
}
|
||||
|
||||
mut src_dir := entry as Directory
|
||||
|
||||
// Make sure we have the latest version of the source directory
|
||||
@@ -379,7 +390,6 @@ fn (mut fs DatabaseVFS) copy_children_recursive(mut src_dir Directory, mut dst_d
|
||||
metadata: Metadata{
|
||||
...entry_.metadata
|
||||
id: fs.get_next_id()
|
||||
path: '${dst_dir.metadata.path}/${entry_.metadata.name}'
|
||||
}
|
||||
children: []u32{}
|
||||
parent_id: dst_dir.metadata.id
|
||||
@@ -395,7 +405,6 @@ fn (mut fs DatabaseVFS) copy_children_recursive(mut src_dir Directory, mut dst_d
|
||||
metadata: vfs.Metadata{
|
||||
...entry_.metadata
|
||||
id: fs.get_next_id()
|
||||
path: '${dst_dir.metadata.path}/${entry_.metadata.name}'
|
||||
}
|
||||
chunk_ids: entry_.chunk_ids
|
||||
parent_id: dst_dir.metadata.id
|
||||
@@ -409,7 +418,6 @@ fn (mut fs DatabaseVFS) copy_children_recursive(mut src_dir Directory, mut dst_d
|
||||
metadata: Metadata{
|
||||
...entry_.metadata
|
||||
id: fs.get_next_id()
|
||||
path: '${dst_dir.metadata.path}/${entry_.metadata.name}'
|
||||
}
|
||||
target: entry_.target
|
||||
parent_id: dst_dir.metadata.id
|
||||
@@ -436,21 +444,18 @@ pub fn (mut fs DatabaseVFS) directory_rename(dir Directory, src_name string, dst
|
||||
if mut entry is Directory {
|
||||
// Handle directory rename
|
||||
entry.metadata.name = dst_name
|
||||
entry.metadata.path = "${entry.metadata.path.all_before_last('/')}/${dst_name}"
|
||||
entry.metadata.modified_at = time.now().unix()
|
||||
fs.save_entry(entry)!
|
||||
return entry
|
||||
} else if mut entry is File {
|
||||
// Handle file rename
|
||||
entry.metadata.name = dst_name
|
||||
entry.metadata.path = "${entry.metadata.path.all_before_last('/')}/${dst_name}"
|
||||
entry.metadata.modified_at = time.now().unix()
|
||||
fs.save_entry(entry)!
|
||||
return entry
|
||||
} else if mut entry is Symlink {
|
||||
// Handle symlink rename
|
||||
entry.metadata.name = dst_name
|
||||
entry.metadata.path = "${entry.metadata.path.all_before_last('/')}/${dst_name}"
|
||||
entry.metadata.modified_at = time.now().unix()
|
||||
fs.save_entry(entry)!
|
||||
return entry
|
||||
|
||||
@@ -12,12 +12,10 @@ fn setup_fs() !(&DatabaseVFS, string) {
|
||||
// Create separate databases for data and metadata
|
||||
mut db_data := ourdb.new(
|
||||
path: os.join_path(test_data_dir, 'data')
|
||||
incremental_mode: false
|
||||
)!
|
||||
|
||||
mut db_metadata := ourdb.new(
|
||||
path: os.join_path(test_data_dir, 'metadata')
|
||||
incremental_mode: false
|
||||
)!
|
||||
|
||||
// Create VFS with separate databases for data and metadata
|
||||
@@ -38,7 +36,6 @@ fn test_new_directory() ! {
|
||||
// Test creating a new directory
|
||||
mut dir := fs.new_directory(
|
||||
name: 'test_dir'
|
||||
path: '/test_dir'
|
||||
)!
|
||||
|
||||
// Verify the directory
|
||||
@@ -60,7 +57,6 @@ fn test_new_directory_with_custom_permissions() ! {
|
||||
// Test creating a directory with custom permissions
|
||||
mut dir := fs.new_directory(
|
||||
name: 'custom_dir'
|
||||
path: '/custom_dir'
|
||||
mode: 0o700
|
||||
owner: 'admin'
|
||||
group: 'staff'
|
||||
@@ -86,7 +82,6 @@ fn test_copy_directory() ! {
|
||||
metadata: vfs_mod.Metadata{
|
||||
id: 1
|
||||
name: 'original_dir'
|
||||
path: '/original_dir'
|
||||
file_type: .directory
|
||||
size: 0
|
||||
mode: 0o755
|
||||
@@ -123,7 +118,6 @@ fn test_directory_mkdir() ! {
|
||||
// Create a parent directory
|
||||
mut parent_dir := fs.new_directory(
|
||||
name: 'parent_dir'
|
||||
path: '/parent_dir'
|
||||
)!
|
||||
|
||||
// Test creating a subdirectory
|
||||
@@ -155,7 +149,6 @@ fn test_directory_touch() ! {
|
||||
// Create a parent directory
|
||||
mut parent_dir := fs.new_directory(
|
||||
name: 'parent_dir'
|
||||
path: '/parent_dir'
|
||||
)!
|
||||
|
||||
// Test creating a file
|
||||
@@ -196,7 +189,6 @@ fn test_directory_rm() ! {
|
||||
// Create a parent directory
|
||||
mut parent_dir := fs.new_directory(
|
||||
name: 'parent_dir'
|
||||
path: '/parent_dir'
|
||||
)!
|
||||
|
||||
// Create a file to remove
|
||||
@@ -244,7 +236,6 @@ fn test_directory_rename() ! {
|
||||
// Create a parent directory
|
||||
mut parent_dir := fs.new_directory(
|
||||
name: 'parent_dir'
|
||||
path: '/parent_dir'
|
||||
)!
|
||||
|
||||
// Create a subdirectory to rename
|
||||
@@ -273,7 +264,6 @@ fn test_directory_children() ! {
|
||||
// Create a parent directory
|
||||
mut parent_dir := fs.new_directory(
|
||||
name: 'parent_dir'
|
||||
path: '/parent_dir'
|
||||
)!
|
||||
|
||||
// Initially, the directory should be empty
|
||||
@@ -317,8 +307,8 @@ fn test_directory_move() ! {
|
||||
}
|
||||
|
||||
// Create source and destination parent directories
|
||||
mut src_parent := fs.new_directory(name: 'src_parent', path: '/src_parent')!
|
||||
mut dst_parent := fs.new_directory(name: 'dst_parent', path: '/dst_parent')!
|
||||
mut src_parent := fs.new_directory(name: 'src_parent')!
|
||||
mut dst_parent := fs.new_directory(name: 'dst_parent')!
|
||||
|
||||
// Create a directory to move with nested structure
|
||||
mut dir_to_move := fs.directory_mkdir(mut src_parent, 'dir_to_move')!
|
||||
@@ -400,8 +390,8 @@ fn test_directory_copy() ! {
|
||||
}
|
||||
|
||||
// Create source and destination parent directories
|
||||
mut src_parent := fs.new_directory(name: 'src_parent', path: '/src_parent')!
|
||||
mut dst_parent := fs.new_directory(name: 'dst_parent', path: '/dst_parent')!
|
||||
mut src_parent := fs.new_directory(name: 'src_parent')!
|
||||
mut dst_parent := fs.new_directory(name: 'dst_parent')!
|
||||
|
||||
// Create a directory to copy with nested structure
|
||||
mut dir_to_copy := fs.directory_mkdir(mut src_parent, 'dir_to_copy')!
|
||||
@@ -493,7 +483,6 @@ fn test_directory_add_symlink() ! {
|
||||
// Create a parent directory
|
||||
mut parent_dir := fs.new_directory(
|
||||
name: 'parent_dir'
|
||||
path: '/parent_dir'
|
||||
)!
|
||||
|
||||
// Create a symlink
|
||||
@@ -501,7 +490,6 @@ fn test_directory_add_symlink() ! {
|
||||
metadata: vfs_mod.Metadata{
|
||||
id: fs.get_next_id()
|
||||
name: 'test_link'
|
||||
path: '/parent_dir/test_link'
|
||||
file_type: .symlink
|
||||
size: 0
|
||||
mode: 0o777
|
||||
|
||||
@@ -1,90 +0,0 @@
|
||||
module vfs_db
|
||||
|
||||
import freeflowuniverse.herolib.vfs
|
||||
import os
|
||||
import time
|
||||
|
||||
// Implementation of VFSImplementation interface
|
||||
pub fn (mut fs DatabaseVFS) root_get_as_dir() !&Directory {
|
||||
// Try to load root directory from DB if it exists
|
||||
if fs.root_id in fs.id_table {
|
||||
if data := fs.db_metadata.get(fs.get_database_id(fs.root_id)!) {
|
||||
mut loaded_root := decode_directory(data) or {
|
||||
return error('Failed to decode root directory: ${err}')
|
||||
}
|
||||
return &loaded_root
|
||||
}
|
||||
}
|
||||
|
||||
// Create and save new root directory
|
||||
mut myroot := Directory{
|
||||
metadata: vfs.Metadata{
|
||||
id: fs.get_next_id()
|
||||
file_type: .directory
|
||||
name: ''
|
||||
path: '/'
|
||||
created_at: time.now().unix()
|
||||
modified_at: time.now().unix()
|
||||
accessed_at: time.now().unix()
|
||||
mode: 0o755 // default directory permissions
|
||||
owner: 'user' // TODO: get from system
|
||||
group: 'user' // TODO: get from system
|
||||
}
|
||||
parent_id: 0
|
||||
}
|
||||
fs.root_id = fs.save_entry(myroot)!
|
||||
return &myroot
|
||||
}
|
||||
|
||||
fn (mut self DatabaseVFS) get_entry(path_ string) !FSEntry {
|
||||
path := '${path_.trim_left('/').trim_right('/')}'
|
||||
if path == '/' || path == '' || path == '.' {
|
||||
return FSEntry(self.root_get_as_dir()!)
|
||||
}
|
||||
|
||||
parts := path.split('/')
|
||||
mut parent_dir := *self.root_get_as_dir()!
|
||||
for i, part in parts {
|
||||
entry := self.directory_get_entry(parent_dir, part) or {
|
||||
return error('Failed to get entry ${err}')
|
||||
}
|
||||
if i == parts.len - 1 {
|
||||
// last part, means entry is found
|
||||
return entry
|
||||
}
|
||||
if entry is Directory {
|
||||
parent_dir = entry
|
||||
} else {
|
||||
return error('Failed to get entry, expected dir')
|
||||
}
|
||||
}
|
||||
// mut current := *self.root_get_as_dir()!
|
||||
// return self.directory_get_entry(mut current, path) or {
|
||||
return error('Path not found: ${path}')
|
||||
// }
|
||||
}
|
||||
|
||||
// internal function to get an entry of some name from a directory
|
||||
fn (mut self DatabaseVFS) directory_get_entry(dir Directory, name string) ?FSEntry {
|
||||
// mut children := self.directory_children(mut dir, false) or {
|
||||
// panic('this should never happen')
|
||||
// }
|
||||
for child_id in dir.children {
|
||||
if entry := self.load_entry(child_id) {
|
||||
if entry.metadata.name == name {
|
||||
return entry
|
||||
}
|
||||
} else {
|
||||
panic('Filesystem is corrupted, this should never happen ${err}')
|
||||
}
|
||||
}
|
||||
return none
|
||||
}
|
||||
|
||||
fn (mut self DatabaseVFS) get_directory(path string) !&Directory {
|
||||
mut entry := self.get_entry(path)!
|
||||
if mut entry is Directory {
|
||||
return &entry
|
||||
}
|
||||
return error('Not a directory: ${path}')
|
||||
}
|
||||
@@ -2,6 +2,7 @@ module vfs_db
|
||||
|
||||
import freeflowuniverse.herolib.vfs
|
||||
import freeflowuniverse.herolib.core.texttools
|
||||
import arrays
|
||||
import log
|
||||
import os
|
||||
import time
|
||||
@@ -16,30 +17,27 @@ pub fn (mut self DatabaseVFS) file_create(path_ string) !vfs.FSEntry {
|
||||
// Get parent directory
|
||||
parent_path := os.dir(path)
|
||||
file_name := os.base(path)
|
||||
|
||||
mut parent_dir := self.get_directory(parent_path)!
|
||||
log.info('[DatabaseVFS] Creating file ${file_name} in ${parent_path}')
|
||||
log.info('[DatabaseVFS] Creating file ${file_name} in ${parent_path} for ${path_}')
|
||||
mut parent_dir := self.get_directory(parent_path) or {
|
||||
return error('Failed to get parent directory ${parent_path}: ${err}')
|
||||
}
|
||||
entry := self.directory_touch(mut parent_dir, file_name)!
|
||||
log.info('[DatabaseVFS] Created file ${file_name} in ${parent_path}')
|
||||
return entry
|
||||
}
|
||||
|
||||
pub fn (mut self DatabaseVFS) file_read(path_ string) ![]u8 {
|
||||
path := '/${path_.trim_left('/').trim_right('/')}'
|
||||
path := texttools.path_fix(path_)
|
||||
log.info('[DatabaseVFS] Reading file ${path}')
|
||||
mut file := self.get_entry(path)!
|
||||
log.info('[DatabaseVFS] Got file ${path}')
|
||||
if mut file is File {
|
||||
metadata := self.db_metadata.get(self.get_database_id(file.metadata.id)!) or {
|
||||
return error('Failed to get file metadata ${err}')
|
||||
}
|
||||
mut decoded_file := decode_file_metadata(metadata) or { return error('Failed to decode file: ${err}') }
|
||||
println('debugzo-1 ${decoded_file.chunk_ids}')
|
||||
mut file_data := []u8{}
|
||||
// log.debug('[DatabaseVFS] Got database chunk ids ${chunk_ids}')
|
||||
for id in decoded_file.chunk_ids {
|
||||
log.debug('[DatabaseVFS] Getting chunk ${id}')
|
||||
// there were chunk ids stored with file so file has data
|
||||
if chunk_bytes := self.db_data.get(id) {
|
||||
mut file_data := []u8{}
|
||||
// log.debug('[DatabaseVFS] Got database chunk ids ${chunk_ids}')
|
||||
for id in file.chunk_ids {
|
||||
log.debug('[DatabaseVFS] Getting chunk ${id}')
|
||||
// there were chunk ids stored with file so file has data
|
||||
if chunk_bytes := self.db_data.get(id) {
|
||||
file_data << chunk_bytes
|
||||
} else {
|
||||
return error('Failed to fetch file data: ${err}')
|
||||
@@ -51,17 +49,76 @@ pub fn (mut self DatabaseVFS) file_read(path_ string) ![]u8 {
|
||||
}
|
||||
|
||||
pub fn (mut self DatabaseVFS) file_write(path_ string, data []u8) ! {
|
||||
path := os.abs_path(path_)
|
||||
|
||||
path := texttools.path_fix(path_)
|
||||
if mut entry := self.get_entry(path) {
|
||||
if mut entry is File {
|
||||
log.info('[DatabaseVFS] Writing ${data.len} bytes to ${path}')
|
||||
self.save_file(entry, data)!
|
||||
self.save_file(entry, data) or {
|
||||
return error('Failed to save file: ${err}')
|
||||
}
|
||||
} else {
|
||||
panic('handle error')
|
||||
}
|
||||
} else {
|
||||
self.file_create(path)!
|
||||
} else {
|
||||
self.file_create(path) or {
|
||||
return error('Failed to create file: ${err}')
|
||||
}
|
||||
self.file_write(path, data)!
|
||||
}
|
||||
}
|
||||
|
||||
pub fn (mut self DatabaseVFS) file_concatenate(path_ string, data []u8) ! {
|
||||
path := texttools.path_fix(path_)
|
||||
if data.len == 0 {
|
||||
return // Nothing to append
|
||||
}
|
||||
|
||||
if mut entry := self.get_entry(path) {
|
||||
if mut entry is File {
|
||||
log.info('[DatabaseVFS] Appending ${data.len} bytes to ${path}')
|
||||
|
||||
// Split new data into chunks of 64 KB
|
||||
chunks := arrays.chunk(data, (64 * 1024) - 1)
|
||||
mut chunk_ids := entry.chunk_ids.clone() // Start with existing chunk IDs
|
||||
|
||||
// Add new chunks
|
||||
for chunk in chunks {
|
||||
chunk_id := self.db_data.set(data: chunk) or {
|
||||
return error('Failed to save file data chunk: ${err}')
|
||||
}
|
||||
chunk_ids << chunk_id
|
||||
log.debug('[DatabaseVFS] Added chunk ${chunk_id} to ${path}')
|
||||
}
|
||||
|
||||
// Update the file with new chunk IDs and updated size
|
||||
updated_file := File{
|
||||
metadata: vfs.Metadata{
|
||||
...entry.metadata
|
||||
size: entry.metadata.size + u64(data.len)
|
||||
modified_at: time.now().unix()
|
||||
}
|
||||
chunk_ids: chunk_ids
|
||||
parent_id: entry.parent_id
|
||||
}
|
||||
|
||||
// Encode the file with all its metadata
|
||||
metadata_bytes := updated_file.encode()
|
||||
|
||||
// Save the metadata_bytes to metadata_db
|
||||
metadata_db_id := self.db_metadata.set(data: metadata_bytes) or {
|
||||
return error('Failed to save file metadata on id:${entry.metadata.id}: ${err}')
|
||||
}
|
||||
|
||||
self.id_table[entry.metadata.id] = metadata_db_id
|
||||
} else {
|
||||
return error('Not a file: ${path}')
|
||||
}
|
||||
} else {
|
||||
// If file doesn't exist, create it first
|
||||
self.file_create(path) or {
|
||||
return error('Failed to create file: ${err}')
|
||||
}
|
||||
// Then write data to it
|
||||
self.file_write(path, data)!
|
||||
}
|
||||
}
|
||||
@@ -123,7 +180,6 @@ pub fn (mut self DatabaseVFS) link_create(target_path string, link_path string)
|
||||
metadata: vfs.Metadata{
|
||||
id: self.get_next_id()
|
||||
name: link_name
|
||||
path: link_path
|
||||
file_type: .symlink
|
||||
created_at: time.now().unix()
|
||||
modified_at: time.now().unix()
|
||||
@@ -168,12 +224,13 @@ pub fn (mut self DatabaseVFS) link_delete(path string) ! {
|
||||
return true
|
||||
}
|
||||
// self.print() or {panic(err)}
|
||||
log.info('[DatabaseVFS] Checking path exists ${path}')
|
||||
log.debug('[DatabaseVFS] Checking path exists ${path}')
|
||||
self.get_entry(path) or { return false }
|
||||
return true
|
||||
}
|
||||
|
||||
pub fn (mut fs DatabaseVFS) get(path string) !vfs.FSEntry {
|
||||
pub fn (mut fs DatabaseVFS) get(path_ string) !vfs.FSEntry {
|
||||
path := texttools.path_fix(path_)
|
||||
log.info('[DatabaseVFS] Getting filesystem entry ${path}')
|
||||
return fs.get_entry(path)!
|
||||
}
|
||||
@@ -188,7 +245,9 @@ pub fn (mut self DatabaseVFS) rename(old_path string, new_path string) !vfs.FSEn
|
||||
return self.directory_rename(src_parent_dir, src_name, dst_name)!
|
||||
}
|
||||
|
||||
pub fn (mut self DatabaseVFS) copy(src_path string, dst_path string) !vfs.FSEntry {
|
||||
pub fn (mut self DatabaseVFS) copy(src_path_ string, dst_path_ string) !vfs.FSEntry {
|
||||
src_path := texttools.path_fix_absolute(src_path_)
|
||||
dst_path := texttools.path_fix_absolute(dst_path_)
|
||||
log.info('[DatabaseVFS] Copying ${src_path} to ${dst_path}')
|
||||
src_parent_path := os.dir(src_path)
|
||||
dst_parent_path := os.dir(dst_path)
|
||||
@@ -220,13 +279,13 @@ pub fn (mut self DatabaseVFS) copy(src_path string, dst_path string) !vfs.FSEntr
|
||||
|
||||
// copy_file creates a copy of a file
|
||||
pub fn (mut self DatabaseVFS) copy_file(file File) !&File {
|
||||
log.info('[DatabaseVFS] Copying file ${file.metadata.path}')
|
||||
log.info('[DatabaseVFS] Copying file ${file.metadata.name}')
|
||||
|
||||
// Save the file with its metadata and data
|
||||
file_id := self.save_file(file, [])!
|
||||
self.save_file(file, [])!
|
||||
|
||||
// Load the file from the database
|
||||
mut entry := self.load_entry(file_id)!
|
||||
mut entry := self.load_entry(file.metadata.id)!
|
||||
if mut entry is File {
|
||||
return &entry
|
||||
}
|
||||
@@ -234,10 +293,9 @@ pub fn (mut self DatabaseVFS) copy_file(file File) !&File {
|
||||
}
|
||||
|
||||
pub fn (mut self DatabaseVFS) move(src_path string, dst_path string) !vfs.FSEntry {
|
||||
log.info('[DatabaseVFS] Moving ${src_path} to ${dst_path}')
|
||||
|
||||
src_parent_path := os.dir(src_path)
|
||||
dst_parent_path := os.dir(dst_path)
|
||||
log.info('[DatabaseVFS] Moving ${texttools.path_fix(src_path)} to ${texttools.path_fix(dst_path)}')
|
||||
src_parent_path := os.dir(texttools.path_fix_absolute(src_path))
|
||||
dst_parent_path := os.dir(texttools.path_fix_absolute(dst_path))
|
||||
|
||||
if !self.exists(src_parent_path) {
|
||||
return error('${src_parent_path} does not exist')
|
||||
@@ -275,7 +333,7 @@ pub fn (mut self DatabaseVFS) delete(path_ string) ! {
|
||||
mut parent_dir := self.get_directory(parent_path)!
|
||||
|
||||
self.directory_rm(mut parent_dir, file_name) or {
|
||||
log.error('[DatabaseVFS] Failed to remove ${file_name} from ${parent_dir.metadata.path}\n${err}')
|
||||
log.error('[DatabaseVFS] Failed to remove ${file_name} from ${parent_dir.metadata.name}\n${err}')
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,12 +11,10 @@ fn setup_vfs() !(&DatabaseVFS, string) {
|
||||
|
||||
mut db_data := ourdb.new(
|
||||
path: os.join_path(test_data_dir, 'data')
|
||||
incremental_mode: false
|
||||
)!
|
||||
|
||||
mut db_metadata := ourdb.new(
|
||||
path: os.join_path(test_data_dir, 'metadata')
|
||||
incremental_mode: false
|
||||
)!
|
||||
|
||||
mut vfs := new(mut db_data, mut db_metadata)!
|
||||
|
||||
@@ -12,12 +12,10 @@ fn setup_fs() !(&DatabaseVFS, string) {
|
||||
// Create separate databases for data and metadata
|
||||
mut db_data := ourdb.new(
|
||||
path: os.join_path(test_data_dir, 'data')
|
||||
incremental_mode: false
|
||||
)!
|
||||
|
||||
mut db_metadata := ourdb.new(
|
||||
path: os.join_path(test_data_dir, 'metadata')
|
||||
incremental_mode: false
|
||||
)!
|
||||
|
||||
// Create VFS with separate databases for data and metadata
|
||||
@@ -38,7 +36,6 @@ fn test_directory_print_empty() ! {
|
||||
// Create an empty directory
|
||||
mut dir := fs.new_directory(
|
||||
name: 'test_dir'
|
||||
path: '/test_dir'
|
||||
)!
|
||||
|
||||
// Test printing the empty directory
|
||||
@@ -57,7 +54,6 @@ fn test_directory_print_with_contents() ! {
|
||||
// Create a directory with various contents
|
||||
mut dir := fs.new_directory(
|
||||
name: 'test_dir'
|
||||
path: '/test_dir'
|
||||
)!
|
||||
|
||||
// Add a subdirectory
|
||||
@@ -71,7 +67,6 @@ fn test_directory_print_with_contents() ! {
|
||||
metadata: vfs_mod.Metadata{
|
||||
id: fs.get_next_id()
|
||||
name: 'test_link'
|
||||
path: '/test_dir/test_link'
|
||||
file_type: .symlink
|
||||
size: 0
|
||||
mode: 0o777
|
||||
@@ -105,7 +100,6 @@ fn test_directory_printall_simple() ! {
|
||||
// Create a simple directory structure
|
||||
mut dir := fs.new_directory(
|
||||
name: 'root_dir'
|
||||
path: '/root_dir'
|
||||
)!
|
||||
|
||||
// Add a file
|
||||
@@ -128,7 +122,6 @@ fn test_directory_printall_nested() ! {
|
||||
// Create a nested directory structure
|
||||
mut root := fs.new_directory(
|
||||
name: 'root'
|
||||
path: '/root'
|
||||
)!
|
||||
|
||||
// Add a subdirectory
|
||||
@@ -151,7 +144,6 @@ fn test_directory_printall_nested() ! {
|
||||
metadata: vfs_mod.Metadata{
|
||||
id: fs.get_next_id()
|
||||
name: 'test_link'
|
||||
path: '/root/subdir1/subdir2/test_link'
|
||||
file_type: .symlink
|
||||
size: 0
|
||||
mode: 0o777
|
||||
@@ -188,7 +180,6 @@ fn test_directory_printall_empty() ! {
|
||||
// Create an empty directory
|
||||
mut dir := fs.new_directory(
|
||||
name: 'empty_dir'
|
||||
path: '/empty_dir'
|
||||
)!
|
||||
|
||||
// Test printing the empty directory recursively
|
||||
@@ -12,12 +12,10 @@ fn setup_vfs() !(&DatabaseVFS, string) {
|
||||
// Create separate databases for data and metadata
|
||||
mut db_data := ourdb.new(
|
||||
path: os.join_path(test_data_dir, 'data')
|
||||
incremental_mode: false
|
||||
)!
|
||||
|
||||
mut db_metadata := ourdb.new(
|
||||
path: os.join_path(test_data_dir, 'metadata')
|
||||
incremental_mode: false
|
||||
)!
|
||||
|
||||
// Create VFS with separate databases for data and metadata
|
||||
@@ -62,7 +60,6 @@ fn test_save_load_entry() ! {
|
||||
metadata: vfs_mod.Metadata{
|
||||
id: 1
|
||||
name: 'test_dir'
|
||||
path: '/test_dir'
|
||||
file_type: .directory
|
||||
size: 0
|
||||
mode: 0o755
|
||||
@@ -77,11 +74,10 @@ fn test_save_load_entry() ! {
|
||||
}
|
||||
|
||||
// Save the directory
|
||||
saved_id := vfs.save_entry(dir)!
|
||||
assert saved_id == 1
|
||||
vfs.save_entry(dir)!
|
||||
|
||||
// Load the directory
|
||||
loaded_entry := vfs.load_entry(1)!
|
||||
loaded_entry := vfs.load_entry(dir.metadata.id)!
|
||||
|
||||
// Verify it's the same directory
|
||||
loaded_dir := loaded_entry as Directory
|
||||
@@ -101,7 +97,6 @@ fn test_save_load_file_with_data() ! {
|
||||
metadata: vfs_mod.Metadata{
|
||||
id: 2
|
||||
name: 'test_file.txt'
|
||||
path: '/test_file.txt'
|
||||
file_type: .file
|
||||
size: 13
|
||||
mode: 0o644
|
||||
@@ -116,11 +111,10 @@ fn test_save_load_file_with_data() ! {
|
||||
}
|
||||
|
||||
// Save the file
|
||||
saved_id := vfs.save_entry(file)!
|
||||
assert saved_id == 2
|
||||
vfs.save_entry(file)!
|
||||
|
||||
// Load the file
|
||||
loaded_entry := vfs.load_entry(2)!
|
||||
loaded_entry := vfs.load_entry(file.metadata.id)!
|
||||
|
||||
// Verify it's the same file with the same data
|
||||
loaded_file := loaded_entry as File
|
||||
@@ -141,7 +135,6 @@ fn test_save_load_file_without_data() ! {
|
||||
metadata: vfs_mod.Metadata{
|
||||
id: 3
|
||||
name: 'empty_file.txt'
|
||||
path: '/empty_file.txt'
|
||||
file_type: .file
|
||||
size: 0
|
||||
mode: 0o644
|
||||
@@ -156,11 +149,10 @@ fn test_save_load_file_without_data() ! {
|
||||
}
|
||||
|
||||
// Save the file
|
||||
saved_id := vfs.save_entry(file)!
|
||||
assert saved_id == 3
|
||||
vfs.save_entry(file)!
|
||||
|
||||
// Load the file
|
||||
loaded_entry := vfs.load_entry(3)!
|
||||
loaded_entry := vfs.load_entry(file.metadata.id)!
|
||||
|
||||
// Verify it's the same file with empty data
|
||||
loaded_file := loaded_entry as File
|
||||
@@ -181,7 +173,6 @@ fn test_save_load_symlink() ! {
|
||||
metadata: vfs_mod.Metadata{
|
||||
id: 4
|
||||
name: 'test_link'
|
||||
path: '/test_link'
|
||||
file_type: .symlink
|
||||
size: 0
|
||||
mode: 0o777
|
||||
@@ -196,11 +187,10 @@ fn test_save_load_symlink() ! {
|
||||
}
|
||||
|
||||
// Save the symlink
|
||||
saved_id := vfs.save_entry(symlink)!
|
||||
assert saved_id == 4
|
||||
vfs.save_entry(symlink)!
|
||||
|
||||
// Load the symlink
|
||||
loaded_entry := vfs.load_entry(4)!
|
||||
loaded_entry := vfs.load_entry(symlink.metadata.id)!
|
||||
|
||||
// Verify it's the same symlink
|
||||
loaded_symlink := loaded_entry as Symlink
|
||||
@@ -220,6 +210,6 @@ fn test_load_nonexistent_entry() ! {
|
||||
if _ := vfs.load_entry(999) {
|
||||
assert false, 'Expected error when loading non-existent entry'
|
||||
} else {
|
||||
assert err.msg() == 'VFS ID 999 not found.'
|
||||
assert true
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user