webdav completion wip
This commit is contained in:
@@ -31,8 +31,8 @@ pub fn new_server(args ServerArgs) !&Server {
|
||||
}
|
||||
|
||||
// register middlewares for all routes
|
||||
server.use(handler: server.auth_middleware)
|
||||
server.use(handler: middleware_log_request)
|
||||
server.use(handler: server.auth_middleware)
|
||||
server.use(handler: middleware_log_response, after: true)
|
||||
return server
|
||||
}
|
||||
|
||||
@@ -1,33 +1,37 @@
|
||||
module webdav
|
||||
|
||||
import time
|
||||
import encoding.base64
|
||||
import freeflowuniverse.herolib.core.texttools
|
||||
|
||||
fn (server &Server) auth_middleware(mut ctx Context) bool {
|
||||
ctx.set_custom_header('Date', texttools.format_rfc1123(time.utc())) or { return false }
|
||||
|
||||
// return true
|
||||
auth_header := ctx.get_header(.authorization) or {
|
||||
ctx.res.set_status(.unauthorized)
|
||||
ctx.res.header.add(.www_authenticate, 'Basic realm="WebDAV Server"')
|
||||
ctx.send_response_to_client('text', 'unauthorized')
|
||||
ctx.set_header(.www_authenticate, 'Basic realm="/"')
|
||||
ctx.send_response_to_client('', '')
|
||||
return false
|
||||
}
|
||||
if auth_header == '' {
|
||||
ctx.res.set_status(.unauthorized)
|
||||
ctx.res.header.add(.www_authenticate, 'Basic realm="WebDAV Server"')
|
||||
ctx.send_response_to_client('text', 'unauthorized')
|
||||
ctx.set_header(.www_authenticate, 'Basic realm="/"')
|
||||
ctx.send_response_to_client('', '')
|
||||
return false
|
||||
}
|
||||
|
||||
if !auth_header.starts_with('Basic ') {
|
||||
ctx.res.set_status(.unauthorized)
|
||||
ctx.res.header.add(.www_authenticate, 'Basic realm="WebDAV Server"')
|
||||
ctx.send_response_to_client('text', 'unauthorized')
|
||||
ctx.set_header(.www_authenticate, 'Basic realm="/"')
|
||||
ctx.send_response_to_client('', '')
|
||||
return false
|
||||
}
|
||||
auth_decoded := base64.decode_str(auth_header[6..])
|
||||
split_credentials := auth_decoded.split(':')
|
||||
if split_credentials.len != 2 {
|
||||
ctx.res.set_status(.unauthorized)
|
||||
ctx.res.header.add(.www_authenticate, 'Basic realm="WebDAV Server"')
|
||||
ctx.set_header(.www_authenticate, 'Basic realm="/"')
|
||||
ctx.send_response_to_client('', '')
|
||||
return false
|
||||
}
|
||||
@@ -36,14 +40,14 @@ fn (server &Server) auth_middleware(mut ctx Context) bool {
|
||||
if user := server.user_db[username] {
|
||||
if user != hashed_pass {
|
||||
ctx.res.set_status(.unauthorized)
|
||||
ctx.res.header.add(.www_authenticate, 'Basic realm="WebDAV Server"')
|
||||
ctx.send_response_to_client('text', 'unauthorized')
|
||||
ctx.set_header(.www_authenticate, 'Basic realm="/"')
|
||||
ctx.send_response_to_client('', '')
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
ctx.res.set_status(.unauthorized)
|
||||
ctx.res.header.add(.www_authenticate, 'Basic realm="WebDAV Server"')
|
||||
ctx.send_response_to_client('text', 'unauthorized')
|
||||
ctx.set_header(.www_authenticate, 'Basic realm="/"')
|
||||
ctx.send_response_to_client('', '')
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -10,101 +10,220 @@ import veb
|
||||
|
||||
// Property represents a WebDAV property
|
||||
pub interface Property {
|
||||
xml() string
|
||||
xml_name() string
|
||||
xml() xml.XMLNodeContents
|
||||
// xml_name() string
|
||||
// to_xml_node() xml.XMLNode
|
||||
// }
|
||||
}
|
||||
|
||||
type DisplayName = string
|
||||
type GetETag = string
|
||||
type GetLastModified = string
|
||||
type GetContentType = string
|
||||
type GetContentLength = string
|
||||
type QuotaAvailableBytes = u64
|
||||
type QuotaUsedBytes = u64
|
||||
type QuotaUsed = u64
|
||||
type Quota = u64
|
||||
type ResourceType = bool
|
||||
type CreationDate = string
|
||||
type SupportedLock = string
|
||||
type LockDiscovery = string
|
||||
|
||||
fn (p []Property) xml() string {
|
||||
return '<D:propstat>
|
||||
<D:prop>${p.map(it.xml()).join_lines()}</D:prop>
|
||||
<D:status>HTTP/1.1 200 OK</D:status>
|
||||
</D:propstat>'
|
||||
}
|
||||
// fn (p []Property) xml() string {
|
||||
// return '<D:propstat>
|
||||
// <D:prop>${p.map(it.xml()).join_lines()}</D:prop>
|
||||
// <D:status>HTTP/1.1 200 OK</D:status>
|
||||
// </D:propstat>'
|
||||
// }
|
||||
|
||||
fn (p DisplayName) xml() string {
|
||||
return '<D:displayname>${p}</D:displayname>'
|
||||
}
|
||||
|
||||
fn (p DisplayName) xml_name() string {
|
||||
return '<displayname/>'
|
||||
}
|
||||
|
||||
fn (p GetLastModified) xml() string {
|
||||
return '<D:getlastmodified>${p}</D:getlastmodified>'
|
||||
}
|
||||
|
||||
fn (p GetLastModified) xml_name() string {
|
||||
return '<getlastmodified/>'
|
||||
}
|
||||
|
||||
fn (p GetContentType) xml() string {
|
||||
return '<D:getcontenttype>${p}</D:getcontenttype>'
|
||||
}
|
||||
|
||||
fn (p GetContentType) xml_name() string {
|
||||
return '<getcontenttype/>'
|
||||
}
|
||||
|
||||
fn (p GetContentLength) xml() string {
|
||||
return '<D:getcontentlength>${p}</D:getcontentlength>'
|
||||
}
|
||||
|
||||
fn (p GetContentLength) xml_name() string {
|
||||
return '<getcontentlength/>'
|
||||
}
|
||||
|
||||
fn (p ResourceType) xml() string {
|
||||
return if p {
|
||||
'<D:resourcetype><D:collection/></D:resourcetype>'
|
||||
} else {
|
||||
'<D:resourcetype/>'
|
||||
fn (p []Property) xml() xml.XMLNode {
|
||||
return xml.XMLNode{
|
||||
name: 'D:propstat'
|
||||
children: [
|
||||
xml.XMLNode{
|
||||
name: 'D:prop'
|
||||
children: p.map(it.xml())
|
||||
},
|
||||
xml.XMLNode{
|
||||
name: 'D:status'
|
||||
children: [xml.XMLNodeContents('HTTP/1.1 200 OK')]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
fn (p ResourceType) xml_name() string {
|
||||
return '<resourcetype/>'
|
||||
fn (p DisplayName) xml() xml.XMLNodeContents {
|
||||
return xml.XMLNode{
|
||||
name: 'D:displayname'
|
||||
children: [xml.XMLNodeContents(p)]
|
||||
}
|
||||
}
|
||||
|
||||
fn (p CreationDate) xml() string {
|
||||
return '<D:creationdate>${p}</D:creationdate>'
|
||||
fn (p GetETag) xml() xml.XMLNodeContents {
|
||||
return xml.XMLNode{
|
||||
name: 'D:getetag'
|
||||
children: [xml.XMLNodeContents(p)]
|
||||
}
|
||||
}
|
||||
|
||||
fn (p CreationDate) xml_name() string {
|
||||
return '<creationdate/>'
|
||||
fn (p GetLastModified) xml() xml.XMLNodeContents {
|
||||
return xml.XMLNode{
|
||||
name: 'D:getlastmodified'
|
||||
children: [xml.XMLNodeContents(p)]
|
||||
}
|
||||
}
|
||||
|
||||
fn (p SupportedLock) xml() string {
|
||||
return '<D:supportedlock>
|
||||
<D:lockentry>
|
||||
<D:lockscope><D:exclusive/></D:lockscope>
|
||||
<D:locktype><D:write/></D:locktype>
|
||||
</D:lockentry>
|
||||
<D:lockentry>
|
||||
<D:lockscope><D:shared/></D:lockscope>
|
||||
<D:locktype><D:write/></D:locktype>
|
||||
</D:lockentry>
|
||||
</D:supportedlock>'
|
||||
fn (p GetContentType) xml() xml.XMLNodeContents {
|
||||
return xml.XMLNode{
|
||||
name: 'D:getcontenttype'
|
||||
children: [xml.XMLNodeContents(p)]
|
||||
}
|
||||
}
|
||||
|
||||
fn (p SupportedLock) xml_name() string {
|
||||
return '<supportedlock/>'
|
||||
fn (p GetContentLength) xml() xml.XMLNodeContents {
|
||||
return xml.XMLNode{
|
||||
name: 'D:getcontentlength'
|
||||
children: [xml.XMLNodeContents(p)]
|
||||
}
|
||||
}
|
||||
|
||||
fn (p LockDiscovery) xml() string {
|
||||
return '<D:lockdiscovery>${p}</D:lockdiscovery>'
|
||||
fn (p QuotaAvailableBytes) xml() xml.XMLNodeContents {
|
||||
return xml.XMLNode{
|
||||
name: 'D:quota-available-bytes'
|
||||
children: [xml.XMLNodeContents(p.str())]
|
||||
}
|
||||
}
|
||||
|
||||
fn (p LockDiscovery) xml_name() string {
|
||||
return '<lockdiscovery/>'
|
||||
fn (p QuotaUsedBytes) xml() xml.XMLNodeContents {
|
||||
return xml.XMLNode{
|
||||
name: 'D:quota-used-bytes'
|
||||
children: [xml.XMLNodeContents(p.str())]
|
||||
}
|
||||
}
|
||||
|
||||
fn (p Quota) xml() xml.XMLNodeContents {
|
||||
return xml.XMLNode{
|
||||
name: 'D:quota'
|
||||
children: [xml.XMLNodeContents(p.str())]
|
||||
}
|
||||
}
|
||||
|
||||
fn (p QuotaUsed) xml() xml.XMLNodeContents {
|
||||
return xml.XMLNode{
|
||||
name: 'D:quotaused'
|
||||
children: [xml.XMLNodeContents(p.str())]
|
||||
}
|
||||
}
|
||||
|
||||
fn (p ResourceType) xml() xml.XMLNodeContents {
|
||||
if p {
|
||||
// If it's a collection, add the collection element as a child
|
||||
mut children := []xml.XMLNodeContents{}
|
||||
children << xml.XMLNode{
|
||||
name: 'D:collection'
|
||||
}
|
||||
|
||||
return xml.XMLNode{
|
||||
name: 'D:resourcetype'
|
||||
children: children
|
||||
}
|
||||
} else {
|
||||
// If it's not a collection, return an empty resourcetype element
|
||||
return xml.XMLNode{
|
||||
name: 'D:resourcetype'
|
||||
children: []xml.XMLNodeContents{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn (p CreationDate) xml() xml.XMLNodeContents {
|
||||
return xml.XMLNode{
|
||||
name: 'D:creationdate'
|
||||
children: [xml.XMLNodeContents(p)]
|
||||
}
|
||||
}
|
||||
|
||||
fn (p SupportedLock) xml() xml.XMLNodeContents {
|
||||
// Create children for the supportedlock node
|
||||
mut children := []xml.XMLNodeContents{}
|
||||
|
||||
// First lockentry - exclusive
|
||||
mut lockscope1_children := []xml.XMLNodeContents{}
|
||||
lockscope1_children << xml.XMLNode{
|
||||
name: 'D:exclusive'
|
||||
}
|
||||
|
||||
lockscope1 := xml.XMLNode{
|
||||
name: 'D:lockscope'
|
||||
children: lockscope1_children
|
||||
}
|
||||
|
||||
mut locktype1_children := []xml.XMLNodeContents{}
|
||||
locktype1_children << xml.XMLNode{
|
||||
name: 'D:write'
|
||||
}
|
||||
|
||||
locktype1 := xml.XMLNode{
|
||||
name: 'D:locktype'
|
||||
children: locktype1_children
|
||||
}
|
||||
|
||||
mut lockentry1_children := []xml.XMLNodeContents{}
|
||||
lockentry1_children << lockscope1
|
||||
lockentry1_children << locktype1
|
||||
|
||||
lockentry1 := xml.XMLNode{
|
||||
name: 'D:lockentry'
|
||||
children: lockentry1_children
|
||||
}
|
||||
|
||||
// Second lockentry - shared
|
||||
mut lockscope2_children := []xml.XMLNodeContents{}
|
||||
lockscope2_children << xml.XMLNode{
|
||||
name: 'D:shared'
|
||||
}
|
||||
|
||||
lockscope2 := xml.XMLNode{
|
||||
name: 'D:lockscope'
|
||||
children: lockscope2_children
|
||||
}
|
||||
|
||||
mut locktype2_children := []xml.XMLNodeContents{}
|
||||
locktype2_children << xml.XMLNode{
|
||||
name: 'D:write'
|
||||
}
|
||||
|
||||
locktype2 := xml.XMLNode{
|
||||
name: 'D:locktype'
|
||||
children: locktype2_children
|
||||
}
|
||||
|
||||
mut lockentry2_children := []xml.XMLNodeContents{}
|
||||
lockentry2_children << lockscope2
|
||||
lockentry2_children << locktype2
|
||||
|
||||
lockentry2 := xml.XMLNode{
|
||||
name: 'D:lockentry'
|
||||
children: lockentry2_children
|
||||
}
|
||||
|
||||
// Add both lockentries to children
|
||||
children << lockentry1
|
||||
children << lockentry2
|
||||
|
||||
// Return the supportedlock node
|
||||
return xml.XMLNode{
|
||||
name: 'D:supportedlock'
|
||||
children: children
|
||||
}
|
||||
}
|
||||
|
||||
fn (p LockDiscovery) xml() xml.XMLNodeContents {
|
||||
return xml.XMLNode{
|
||||
name: 'D:lockdiscovery'
|
||||
children: [xml.XMLNodeContents(p)]
|
||||
}
|
||||
}
|
||||
|
||||
fn format_iso8601(t time.Time) string {
|
||||
|
||||
@@ -60,7 +60,7 @@ pub fn parse_propfind_xml(req http.Request) !PropfindRequest {
|
||||
return error('Invalid PROPFIND request: root element must be propfind')
|
||||
}
|
||||
|
||||
mut typ := PropfindType.invalid
|
||||
mut typ := PropfindType.allprop
|
||||
mut props := []string{}
|
||||
|
||||
// Check for allprop, propname, or prop elements
|
||||
@@ -120,23 +120,58 @@ pub fn parse_depth(depth_str string) Depth {
|
||||
}
|
||||
|
||||
// Response represents a WebDAV response for a resource
|
||||
pub struct Response {
|
||||
pub struct PropfindResponse {
|
||||
pub:
|
||||
href string
|
||||
found_props []Property
|
||||
not_found_props []Property
|
||||
}
|
||||
|
||||
fn (r Response) xml() string {
|
||||
return '<D:response>\n<D:href>${r.href}</D:href>
|
||||
<D:propstat><D:prop>${r.found_props.map(it.xml()).join_lines()}</D:prop><D:status>HTTP/1.1 200 OK</D:status></D:propstat>
|
||||
</D:response>'
|
||||
fn (r PropfindResponse) xml() xml.XMLNodeContents {
|
||||
return xml.XMLNode{
|
||||
name: 'D:response'
|
||||
children: [
|
||||
xml.XMLNode{
|
||||
name: 'D:href'
|
||||
children: [xml.XMLNodeContents(r.href)]
|
||||
},
|
||||
xml.XMLNode{
|
||||
name: 'D:propstat'
|
||||
children: [
|
||||
xml.XMLNode{
|
||||
name: 'D:prop'
|
||||
children: r.found_props.map(it.xml())
|
||||
},
|
||||
xml.XMLNode{
|
||||
name: 'D:status'
|
||||
children: [xml.XMLNodeContents('HTTP/1.1 200 OK')]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
// generate_propfind_response generates a PROPFIND response XML string from Response structs
|
||||
pub fn (r []Response) xml () string {
|
||||
return '<?xml version="1.0" encoding="UTF-8"?>\n<D:multistatus xmlns:D="DAV:">
|
||||
${r.map(it.xml()).join_lines()}\n</D:multistatus>'
|
||||
pub fn (r []PropfindResponse) xml() string {
|
||||
// Create multistatus root node
|
||||
multistatus_node := xml.XMLNode{
|
||||
name: 'D:multistatus'
|
||||
attributes: {
|
||||
'xmlns:D': 'DAV:'
|
||||
}
|
||||
children: r.map(it.xml())
|
||||
}
|
||||
|
||||
// Create a new XML document with the root node
|
||||
doc := xml.XMLDocument{
|
||||
version: '1.0'
|
||||
root: multistatus_node
|
||||
}
|
||||
|
||||
// Generate XML string
|
||||
doc.validate() or {panic('this should never happen ${err}')}
|
||||
return format_xml(doc.str())
|
||||
}
|
||||
|
||||
fn get_file_content_type(path string) string {
|
||||
@@ -149,3 +184,54 @@ fn get_file_content_type(path string) string {
|
||||
|
||||
return content_type
|
||||
}
|
||||
|
||||
// parse_xml takes an XML string and returns a cleaned version with whitespace removed between tags
|
||||
pub fn format_xml(xml_str string) string {
|
||||
mut result := ''
|
||||
mut i := 0
|
||||
mut in_tag := false
|
||||
mut content_start := 0
|
||||
|
||||
// Process the string character by character
|
||||
for i < xml_str.len {
|
||||
ch := xml_str[i]
|
||||
|
||||
// Start of a tag
|
||||
if ch == `<` {
|
||||
// If we were collecting content between tags, process it
|
||||
if !in_tag && i > content_start {
|
||||
// Get the content between tags and trim whitespace
|
||||
content := xml_str[content_start..i].trim_space()
|
||||
result += content
|
||||
}
|
||||
|
||||
in_tag = true
|
||||
result += '<'
|
||||
}
|
||||
// End of a tag
|
||||
else if ch == `>` {
|
||||
in_tag = false
|
||||
result += '>'
|
||||
content_start = i + 1
|
||||
}
|
||||
// Inside a tag - preserve all characters including whitespace
|
||||
else if in_tag {
|
||||
result += ch.ascii_str()
|
||||
}
|
||||
// Outside a tag - only add non-whitespace or handle whitespace in content
|
||||
else if !in_tag {
|
||||
// We'll collect and process this content when we reach the next tag
|
||||
// or at the end of the string
|
||||
}
|
||||
|
||||
i++
|
||||
}
|
||||
|
||||
// Handle any remaining content at the end of the string
|
||||
if !in_tag && content_start < xml_str.len {
|
||||
content := xml_str[content_start..].trim_space()
|
||||
result += content
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
@@ -1,34 +1,35 @@
|
||||
module webdav
|
||||
|
||||
import time
|
||||
import freeflowuniverse.herolib.core.texttools
|
||||
import freeflowuniverse.herolib.ui.console
|
||||
import encoding.xml
|
||||
import net.urllib
|
||||
import net
|
||||
import net.http.chunked
|
||||
import veb
|
||||
import log
|
||||
import strings
|
||||
|
||||
@[head]
|
||||
pub fn (server &Server) index(mut ctx Context) veb.Result {
|
||||
ctx.set_custom_header('DAV', '1,2') or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_header(.allow, 'OPTIONS, PROPFIND, MKCOL, GET, HEAD, POST, PUT, DELETE, COPY, MOVE')
|
||||
ctx.set_custom_header('MS-Author-Via', 'DAV') or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_header(.access_control_allow_origin, '*')
|
||||
ctx.set_header(.access_control_allow_methods, 'OPTIONS, PROPFIND, MKCOL, GET, HEAD, POST, PUT, DELETE, COPY, MOVE')
|
||||
ctx.set_header(.access_control_allow_headers, 'Authorization, Content-Type')
|
||||
ctx.set_header(.content_length, '0')
|
||||
ctx.set_custom_header('DAV', '1,2') or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('Date', texttools.format_rfc1123(time.utc())) or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('Allow', 'OPTIONS, HEAD, GET, PROPFIND, DELETE, COPY, MOVE, PROPPATCH, LOCK, UNLOCK') or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('MS-Author-Via', 'DAV') or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('Server', 'WsgiDAV-compatible WebDAV Server') or { return ctx.server_error(err.msg()) }
|
||||
return ctx.ok('')
|
||||
}
|
||||
|
||||
@['/:path...'; options]
|
||||
pub fn (server &Server) options(mut ctx Context, path string) veb.Result {
|
||||
ctx.set_custom_header('DAV', '1,2') or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_header(.allow, 'OPTIONS, PROPFIND, MKCOL, GET, HEAD, POST, PUT, DELETE, COPY, MOVE')
|
||||
ctx.set_custom_header('MS-Author-Via', 'DAV') or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_header(.access_control_allow_origin, '*')
|
||||
ctx.set_header(.access_control_allow_methods, 'OPTIONS, PROPFIND, MKCOL, GET, HEAD, POST, PUT, DELETE, COPY, MOVE')
|
||||
ctx.set_header(.access_control_allow_headers, 'Authorization, Content-Type')
|
||||
ctx.set_header(.content_length, '0')
|
||||
ctx.set_custom_header('DAV', '1,2') or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('Date', texttools.format_rfc1123(time.utc())) or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('Allow', 'OPTIONS, HEAD, GET, PROPFIND, DELETE, COPY, MOVE, PROPPATCH, LOCK, UNLOCK') or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('MS-Author-Via', 'DAV') or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('Server', 'WsgiDAV-compatible WebDAV Server') or { return ctx.server_error(err.msg()) }
|
||||
return ctx.ok('')
|
||||
}
|
||||
|
||||
@@ -69,9 +70,11 @@ pub fn (mut server Server) lock(mut ctx Context, path string) veb.Result {
|
||||
return ctx.text('Resource is already locked by a different owner.')
|
||||
}
|
||||
|
||||
// log.debug('[WebDAV] Received lock result ${lock_result.xml()}')
|
||||
// Set WsgiDAV-like headers
|
||||
ctx.res.set_status(.ok)
|
||||
ctx.set_custom_header('Lock-Token', '${lock_result.token}') or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('Lock-Token', 'opaquelocktoken:${lock_result.token}') or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('Date', texttools.format_rfc1123(time.utc())) or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('Server', 'veb WebDAV Server') or { return ctx.server_error(err.msg()) }
|
||||
|
||||
// Create a proper WebDAV lock response
|
||||
return ctx.send_response_to_client('application/xml', lock_result.xml())
|
||||
@@ -81,16 +84,21 @@ pub fn (mut server Server) lock(mut ctx Context, path string) veb.Result {
|
||||
pub fn (mut server Server) unlock(mut ctx Context, path string) veb.Result {
|
||||
resource := ctx.req.url
|
||||
token_ := ctx.get_custom_header('Lock-Token') or { return ctx.server_error(err.msg()) }
|
||||
// Handle the opaquelocktoken: prefix that WsgiDAV uses
|
||||
token := token_.trim_string_left('<').trim_string_right('>')
|
||||
.trim_string_left('opaquelocktoken:')
|
||||
if token.len == 0 {
|
||||
console.print_stderr('Unlock failed: `Lock-Token` header required.')
|
||||
ctx.res.set_status(.bad_request)
|
||||
return ctx.text('Lock failed: `Owner` header missing.')
|
||||
return ctx.text('Lock failed: `Lock-Token` header missing or invalid.')
|
||||
}
|
||||
|
||||
if server.lock_manager.unlock_with_token(resource, token) {
|
||||
// Add WsgiDAV-like headers
|
||||
ctx.set_custom_header('Date', texttools.format_rfc1123(time.utc())) or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('Server', 'veb WebDAV Server') or { return ctx.server_error(err.msg()) }
|
||||
ctx.res.set_status(.no_content)
|
||||
return ctx.text('Lock successfully released')
|
||||
return ctx.text('')
|
||||
}
|
||||
|
||||
console.print_stderr('Resource is not locked or token mismatch.')
|
||||
@@ -106,9 +114,15 @@ pub fn (mut server Server) get_file(mut ctx Context, path string) veb.Result {
|
||||
return ctx.server_error(err.msg())
|
||||
}
|
||||
ext := path.all_after_last('.')
|
||||
content_type := veb.mime_types['.${ext}'] or { 'text/plain' }
|
||||
// ctx.res.header.set(.content_length, file_data.len.str())
|
||||
// ctx.res.set_status(.ok)
|
||||
content_type := veb.mime_types['.${ext}'] or { 'text/plain; charset=utf-8' }
|
||||
|
||||
// Add WsgiDAV-like headers
|
||||
ctx.set_header(.content_length, file_data.len.str())
|
||||
ctx.set_custom_header('Date', texttools.format_rfc1123(time.utc())) or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_header(.accept_ranges, 'bytes')
|
||||
ctx.set_custom_header('ETag', '"${path}-${time.now().unix()}"') or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('Last-Modified', texttools.format_rfc1123(time.utc())) or { return ctx.server_error(err.msg()) }
|
||||
|
||||
return ctx.send_response_to_client(content_type, file_data.bytestr())
|
||||
}
|
||||
|
||||
@@ -125,7 +139,6 @@ pub fn (mut server Server) exists(mut ctx Context, path string) veb.Result {
|
||||
return ctx.server_error('Failed to set DAV header: ${err}')
|
||||
}
|
||||
ctx.set_header(.content_length, '0') // HEAD request, so no body
|
||||
// ctx.set_header(.date, time.now().as_utc().format_rfc1123()) // Correct UTC date format
|
||||
// ctx.set_header(.content_type, 'application/xml') // XML is common for WebDAV metadata
|
||||
ctx.set_custom_header('Allow', 'OPTIONS, GET, HEAD, PROPFIND, PROPPATCH, MKCOL, PUT, DELETE, COPY, MOVE, LOCK, UNLOCK') or {
|
||||
return ctx.server_error('Failed to set Allow header: ${err}')
|
||||
@@ -134,7 +147,7 @@ pub fn (mut server Server) exists(mut ctx Context, path string) veb.Result {
|
||||
ctx.set_custom_header('Cache-Control', 'no-cache, no-store, must-revalidate') or {
|
||||
return ctx.server_error('Failed to set Cache-Control header: ${err}')
|
||||
}
|
||||
ctx.set_custom_header('Last-Modified', time.now().as_utc().format()) or {
|
||||
ctx.set_custom_header('Last-Modified', texttools.format_rfc1123(time.utc())) or {
|
||||
return ctx.server_error('Failed to set Last-Modified header: ${err}')
|
||||
}
|
||||
ctx.res.set_version(.v1_1)
|
||||
@@ -149,6 +162,11 @@ pub fn (mut server Server) delete(mut ctx Context, path string) veb.Result {
|
||||
return ctx.server_error(err.msg())
|
||||
}
|
||||
|
||||
// Add WsgiDAV-like headers
|
||||
ctx.set_custom_header('Date', texttools.format_rfc1123(time.utc())) or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('Server', 'veb WebDAV Server') or { return ctx.server_error(err.msg()) }
|
||||
|
||||
server.vfs.print() or {panic(err)}
|
||||
// Return success response
|
||||
return ctx.no_content()
|
||||
}
|
||||
@@ -168,15 +186,25 @@ pub fn (mut server Server) copy(mut ctx Context, path string) veb.Result {
|
||||
}
|
||||
destination_path_str := destination_url.path
|
||||
|
||||
// Check if destination exists
|
||||
destination_exists := server.vfs.exists(destination_path_str)
|
||||
|
||||
server.vfs.copy(path, destination_path_str) or {
|
||||
log.set_level(.debug)
|
||||
|
||||
println('[WebDAV] Failed to copy: ${err}')
|
||||
log.error('[WebDAV] Failed to copy: ${err}')
|
||||
return ctx.server_error(err.msg())
|
||||
}
|
||||
|
||||
ctx.res.set_status(.ok)
|
||||
return ctx.text('HTTP 200: Successfully copied entry: ${path}')
|
||||
// Add WsgiDAV-like headers
|
||||
ctx.set_custom_header('Date', texttools.format_rfc1123(time.utc())) or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('Server', 'veb WebDAV Server') or { return ctx.server_error(err.msg()) }
|
||||
|
||||
// Return 201 Created if the destination was created, 204 No Content if it was overwritten
|
||||
if destination_exists {
|
||||
return ctx.no_content()
|
||||
} else {
|
||||
ctx.res.set_status(.created)
|
||||
return ctx.text('')
|
||||
}
|
||||
}
|
||||
|
||||
@['/:path...'; move]
|
||||
@@ -194,14 +222,22 @@ pub fn (mut server Server) move(mut ctx Context, path string) veb.Result {
|
||||
}
|
||||
destination_path_str := destination_url.path
|
||||
|
||||
// Check if destination exists
|
||||
destination_exists := server.vfs.exists(destination_path_str)
|
||||
|
||||
log.info('[WebDAV] ${@FN} from ${path} to ${destination_path_str}')
|
||||
server.vfs.move(path, destination_path_str) or {
|
||||
log.error('Failed to move: ${err}')
|
||||
return ctx.server_error(err.msg())
|
||||
}
|
||||
|
||||
ctx.res.set_status(.ok)
|
||||
return ctx.text('HTTP 200: Successfully copied entry: ${path}')
|
||||
// Add WsgiDAV-like headers
|
||||
ctx.set_custom_header('Date', texttools.format_rfc1123(time.utc())) or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('Server', 'veb WebDAV Server') or { return ctx.server_error(err.msg()) }
|
||||
|
||||
// Return 204 No Content for successful move operations (WsgiDAV behavior)
|
||||
ctx.res.set_status(.no_content)
|
||||
return ctx.text('')
|
||||
}
|
||||
|
||||
@['/:path...'; mkcol]
|
||||
@@ -217,29 +253,277 @@ pub fn (mut server Server) mkcol(mut ctx Context, path string) veb.Result {
|
||||
return ctx.server_error(err.msg())
|
||||
}
|
||||
|
||||
// Add WsgiDAV-like headers
|
||||
ctx.set_header(.content_type, 'text/html; charset=utf-8')
|
||||
ctx.set_header(.content_length, '0')
|
||||
ctx.set_custom_header('Date', texttools.format_rfc1123(time.utc())) or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('Server', 'veb WebDAV Server') or { return ctx.server_error(err.msg()) }
|
||||
|
||||
ctx.res.set_status(.created)
|
||||
return ctx.text('HTTP 201: Created')
|
||||
return ctx.text('')
|
||||
}
|
||||
|
||||
@['/:path...'; put]
|
||||
fn (mut server Server) create_or_update(mut ctx Context, path string) veb.Result {
|
||||
if server.vfs.exists(path) {
|
||||
// Check if parent directory exists (RFC 4918 9.7.1: A PUT that would result in the creation of a resource
|
||||
// without an appropriately scoped parent collection MUST fail with a 409 Conflict)
|
||||
parent_path := path.all_before_last('/')
|
||||
if parent_path != '' && !server.vfs.exists(parent_path) {
|
||||
log.error('[WebDAV] Parent directory ${parent_path} does not exist for ${path}')
|
||||
ctx.res.set_status(.conflict)
|
||||
return ctx.text('HTTP 409: Conflict - Parent collection does not exist')
|
||||
}
|
||||
|
||||
is_update := server.vfs.exists(path)
|
||||
if is_update {
|
||||
log.debug('[WebDAV] ${path} exists, updating')
|
||||
if fs_entry := server.vfs.get(path) {
|
||||
log.debug('[WebDAV] Got FSEntry ${fs_entry}')
|
||||
// RFC 4918 9.7.2: PUT for Collections - A PUT request to an existing collection MAY be treated as an error
|
||||
if fs_entry.is_dir() {
|
||||
console.print_stderr('Cannot PUT to a directory: ${path}')
|
||||
log.error('[WebDAV] Cannot PUT to a directory: ${path}')
|
||||
ctx.res.set_status(.method_not_allowed)
|
||||
return ctx.text('HTTP 405: Method Not Allowed')
|
||||
ctx.set_header(.allow, 'OPTIONS, PROPFIND, MKCOL, GET, HEAD, DELETE')
|
||||
return ctx.text('HTTP 405: Method Not Allowed - Cannot PUT to a collection')
|
||||
}
|
||||
} else {
|
||||
return ctx.server_error('failed to get FS Entry ${path}: ${err.msg()}')
|
||||
log.error('[WebDAV] Failed to get FS Entry for ${path}\n${err.msg()}')
|
||||
return ctx.server_error('Failed to get FS Entry ${path}: ${err.msg()}')
|
||||
}
|
||||
} else {
|
||||
server.vfs.file_create(path) or { return ctx.server_error(err.msg()) }
|
||||
log.debug('[WebDAV] ${path} does not exist, creating')
|
||||
server.vfs.file_create(path) or {
|
||||
log.error('[WebDAV] Failed to create file ${path}: ${err.msg()}')
|
||||
return ctx.server_error('Failed to create file: ${err.msg()}')
|
||||
}
|
||||
}
|
||||
if ctx.req.data.len > 0 {
|
||||
data := ctx.req.data.bytes()
|
||||
server.vfs.file_write(path, data) or { return ctx.server_error(err.msg()) }
|
||||
return ctx.ok('HTTP 200: Successfully wrote file: ${path}')
|
||||
|
||||
// Process Content-Type if provided
|
||||
content_type := ctx.req.header.get(.content_type) or { '' }
|
||||
if content_type != '' {
|
||||
log.debug('[WebDAV] Content-Type provided: ${content_type}')
|
||||
}
|
||||
return ctx.ok('HTTP 200: Successfully created file: ${path}')
|
||||
}
|
||||
|
||||
// Check if we have a Content-Length header
|
||||
content_length_str := ctx.req.header.get(.content_length) or { '0' }
|
||||
content_length := content_length_str.int()
|
||||
log.debug('[WebDAV] Content-Length: ${content_length}')
|
||||
|
||||
// Check for chunked transfer encoding
|
||||
transfer_encoding := ctx.req.header.get_custom('Transfer-Encoding') or { '' }
|
||||
is_chunked := transfer_encoding.to_lower().contains('chunked')
|
||||
log.debug('[WebDAV] Transfer-Encoding: ${transfer_encoding}, is_chunked: ${is_chunked}')
|
||||
|
||||
// Handle the file upload based on the request type
|
||||
if is_chunked || content_length > 0 {
|
||||
// Take over the connection to handle streaming data
|
||||
ctx.takeover_conn()
|
||||
|
||||
// Create a buffer for reading chunks
|
||||
mut buffer := []u8{len: 8200} // 8KB buffer for reading chunks
|
||||
mut total_bytes := 0
|
||||
mut all_data := []u8{}
|
||||
|
||||
// Process any data that's already been read
|
||||
if ctx.req.data.len > 0 {
|
||||
all_data << ctx.req.data.bytes()
|
||||
total_bytes += ctx.req.data.len
|
||||
log.debug('[WebDAV] Added ${ctx.req.data.len} initial bytes from request data')
|
||||
}
|
||||
|
||||
// Read data in chunks from the connection
|
||||
if is_chunked {
|
||||
// For chunked encoding, we need to read until we get a zero-length chunk
|
||||
log.info('[WebDAV] Reading chunked data for ${path}')
|
||||
|
||||
// Write initial data to the file
|
||||
if all_data.len > 0 {
|
||||
server.vfs.file_write(path, all_data) or {
|
||||
log.error('[WebDAV] Failed to write initial data to ${path}: ${err.msg()}')
|
||||
// Send error response
|
||||
ctx.res.set_status(.internal_server_error)
|
||||
ctx.res.header.set(.content_type, 'text/plain')
|
||||
ctx.res.header.set(.content_length, '${err.msg().len}')
|
||||
ctx.conn.write(ctx.res.bytestr().bytes()) or {}
|
||||
ctx.conn.write(err.msg().bytes()) or {}
|
||||
ctx.conn.close() or {}
|
||||
return veb.no_result()
|
||||
}
|
||||
}
|
||||
|
||||
// Continue reading chunks from the connection
|
||||
for {
|
||||
// Read a chunk from the connection
|
||||
n := ctx.conn.read(mut buffer) or {
|
||||
if err.code() == net.err_timed_out_code {
|
||||
log.info('[WebDAV] Connection timed out, finished reading')
|
||||
break
|
||||
}
|
||||
log.error('[WebDAV] Error reading from connection: ${err}')
|
||||
break
|
||||
}
|
||||
|
||||
if n <= 0 {
|
||||
log.info('[WebDAV] Reached end of data stream')
|
||||
break
|
||||
}
|
||||
|
||||
|
||||
// Process the chunk using the chunked module
|
||||
chunk := buffer[..n].clone()
|
||||
chunk_str := chunk.bytestr()
|
||||
|
||||
// Try to decode the chunk if it looks like a valid chunked format
|
||||
if chunk_str.contains('\r\n') {
|
||||
log.debug('[WebDAV] Attempting to decode chunked data')
|
||||
decoded := chunked.decode(chunk_str) or {
|
||||
log.error('[WebDAV] Failed to decode chunked data: ${err}')
|
||||
// If decoding fails, just use the raw chunk
|
||||
server.vfs.file_concatenate(path, chunk) or {
|
||||
log.error('[WebDAV] Failed to append chunk to ${path}: ${err.msg()}')
|
||||
// Send error response
|
||||
ctx.res.set_status(.internal_server_error)
|
||||
ctx.res.header.set(.content_type, 'text/plain')
|
||||
ctx.res.header.set(.content_length, '${err.msg().len}')
|
||||
ctx.conn.write(ctx.res.bytestr().bytes()) or {}
|
||||
ctx.conn.write(err.msg().bytes()) or {}
|
||||
ctx.conn.close() or {}
|
||||
return veb.no_result()
|
||||
}
|
||||
}
|
||||
|
||||
// If decoding succeeds, write the decoded data
|
||||
if decoded.len > 0 {
|
||||
log.debug('[WebDAV] Successfully decoded chunked data: ${decoded.len} bytes')
|
||||
server.vfs.file_concatenate(path, decoded.bytes()) or {
|
||||
log.error('[WebDAV] Failed to append decoded chunk to ${path}: ${err.msg()}')
|
||||
// Send error response
|
||||
ctx.res.set_status(.internal_server_error)
|
||||
ctx.res.header.set(.content_type, 'text/plain')
|
||||
ctx.res.header.set(.content_length, '${err.msg().len}')
|
||||
ctx.conn.write(ctx.res.bytestr().bytes()) or {}
|
||||
ctx.conn.write(err.msg().bytes()) or {}
|
||||
ctx.conn.close() or {}
|
||||
return veb.no_result()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If it doesn't look like chunked data, use the raw chunk
|
||||
server.vfs.file_concatenate(path, chunk) or {
|
||||
log.error('[WebDAV] Failed to append chunk to ${path}: ${err.msg()}')
|
||||
// Send error response
|
||||
ctx.res.set_status(.internal_server_error)
|
||||
ctx.res.header.set(.content_type, 'text/plain')
|
||||
ctx.res.header.set(.content_length, '${err.msg().len}')
|
||||
ctx.conn.write(ctx.res.bytestr().bytes()) or {}
|
||||
ctx.conn.write(err.msg().bytes()) or {}
|
||||
ctx.conn.close() or {}
|
||||
return veb.no_result()
|
||||
}
|
||||
}
|
||||
|
||||
total_bytes += n
|
||||
log.debug('[WebDAV] Read ${n} bytes, total: ${total_bytes}')
|
||||
}
|
||||
} else if content_length > 0 {
|
||||
// For Content-Length uploads, read exactly that many bytes
|
||||
log.info('[WebDAV] Reading ${content_length} bytes for ${path}')
|
||||
mut remaining := content_length - all_data.len
|
||||
|
||||
// Write initial data to the file
|
||||
if all_data.len > 0 {
|
||||
server.vfs.file_write(path, all_data) or {
|
||||
log.error('[WebDAV] Failed to write initial data to ${path}: ${err.msg()}')
|
||||
// Send error response
|
||||
ctx.res.set_status(.internal_server_error)
|
||||
ctx.res.header.set(.content_type, 'text/plain')
|
||||
ctx.res.header.set(.content_length, '${err.msg().len}')
|
||||
ctx.conn.write(ctx.res.bytestr().bytes()) or {}
|
||||
ctx.conn.write(err.msg().bytes()) or {}
|
||||
ctx.conn.close() or {}
|
||||
return veb.no_result()
|
||||
}
|
||||
}
|
||||
|
||||
// Continue reading until we've read all the content
|
||||
for remaining > 0 {
|
||||
// Adjust buffer size for the last chunk if needed
|
||||
read_size := if remaining < buffer.len { remaining } else { buffer.len }
|
||||
|
||||
// Read a chunk from the connection
|
||||
n := ctx.conn.read(mut buffer[..read_size]) or {
|
||||
if err.code() == net.err_timed_out_code {
|
||||
log.info('[WebDAV] Connection timed out, finished reading')
|
||||
break
|
||||
}
|
||||
log.error('[WebDAV] Error reading from connection: ${err}')
|
||||
break
|
||||
}
|
||||
|
||||
if n <= 0 {
|
||||
log.info('[WebDAV] Reached end of data stream')
|
||||
break
|
||||
}
|
||||
|
||||
// Append the chunk to our file
|
||||
chunk := buffer[..n].clone()
|
||||
server.vfs.file_concatenate(path, chunk) or {
|
||||
log.error('[WebDAV] Failed to append chunk to ${path}: ${err.msg()}')
|
||||
// Send error response
|
||||
ctx.res.set_status(.internal_server_error)
|
||||
ctx.res.header.set(.content_type, 'text/plain')
|
||||
ctx.res.header.set(.content_length, '${err.msg().len}')
|
||||
ctx.conn.write(ctx.res.bytestr().bytes()) or {}
|
||||
ctx.conn.write(err.msg().bytes()) or {}
|
||||
return veb.no_result()
|
||||
}
|
||||
|
||||
total_bytes += n
|
||||
remaining -= n
|
||||
log.debug('[WebDAV] Read ${n} bytes, remaining: ${remaining}')
|
||||
}
|
||||
}
|
||||
|
||||
log.info('[WebDAV] Successfully wrote ${total_bytes} bytes to ${path}')
|
||||
|
||||
// Send success response
|
||||
ctx.res.header.set(.content_type, 'text/html; charset=utf-8')
|
||||
ctx.res.header.set(.content_length, '0')
|
||||
ctx.res.header.set_custom('Date', texttools.format_rfc1123(time.utc())) or {}
|
||||
ctx.res.header.set_custom('Server', 'veb WebDAV Server') or {}
|
||||
|
||||
if is_update {
|
||||
ctx.res.set_status(.no_content) // 204 No Content
|
||||
} else {
|
||||
ctx.res.set_status(.created) // 201 Created
|
||||
}
|
||||
|
||||
ctx.conn.write(ctx.res.bytestr().bytes()) or {
|
||||
log.error('[WebDAV] Failed to write response: ${err}')
|
||||
}
|
||||
ctx.conn.close() or {}
|
||||
|
||||
return veb.no_result()
|
||||
} else {
|
||||
// Empty PUT is still valid (creates empty file or replaces with empty content)
|
||||
server.vfs.file_write(path, []u8{}) or {
|
||||
log.error('[WebDAV] Failed to write empty data to ${path}: ${err.msg()}')
|
||||
return ctx.server_error('Failed to write file: ${err.msg()}')
|
||||
}
|
||||
log.info('[WebDAV] Created empty file at ${path}')
|
||||
|
||||
// Add WsgiDAV-like headers
|
||||
ctx.set_header(.content_type, 'text/html; charset=utf-8')
|
||||
ctx.set_header(.content_length, '0')
|
||||
ctx.set_custom_header('Date', texttools.format_rfc1123(time.utc())) or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('Server', 'veb WebDAV Server') or { return ctx.server_error(err.msg()) }
|
||||
|
||||
// Set appropriate status code based on whether this was a create or update
|
||||
if is_update {
|
||||
return ctx.no_content()
|
||||
} else {
|
||||
ctx.res.set_status(.created)
|
||||
return ctx.text('')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import freeflowuniverse.herolib.vfs
|
||||
import freeflowuniverse.herolib.vfs.vfs_db
|
||||
import os
|
||||
import time
|
||||
import freeflowuniverse.herolib.core.texttools
|
||||
import net.http
|
||||
import veb
|
||||
|
||||
@@ -21,7 +22,7 @@ fn (mut server Server) propfind(mut ctx Context, path string) veb.Result {
|
||||
})
|
||||
}
|
||||
|
||||
log.debug('[WebDAV] Propfind Request: ${propfind_req.typ} ${propfind_req.depth}')
|
||||
log.debug('[WebDAV] Propfind Request: ${propfind_req.typ}')
|
||||
|
||||
// Check if resource is locked
|
||||
if server.lock_manager.is_locked(ctx.req.url) {
|
||||
@@ -38,29 +39,64 @@ fn (mut server Server) propfind(mut ctx Context, path string) veb.Result {
|
||||
tag: 'resource-must-be-null'
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
responses := server.get_responses(entry, propfind_req, path) or {
|
||||
return ctx.server_error('Failed to get entry properties ${err}')
|
||||
}
|
||||
|
||||
// log.debug('[WebDAV] Propfind responses ${responses}')
|
||||
|
||||
// Add WsgiDAV-like headers
|
||||
ctx.set_header(.content_type, 'application/xml; charset=utf-8')
|
||||
ctx.set_custom_header('Date', texttools.format_rfc1123(time.utc())) or { return ctx.server_error(err.msg()) }
|
||||
ctx.set_custom_header('Server', 'WsgiDAV-compatible WebDAV Server') or { return ctx.server_error(err.msg()) }
|
||||
|
||||
// Create multistatus response using the responses
|
||||
ctx.res.set_status(.multi_status)
|
||||
return ctx.send_response_to_client('application/xml', responses.xml())
|
||||
}
|
||||
|
||||
// get_responses returns all properties for the given path and depth
|
||||
fn (mut server Server) get_responses(entry vfs.FSEntry, req PropfindRequest, path string) ![]Response {
|
||||
mut responses := []Response{}
|
||||
|
||||
// path := server.vfs.get_path(entry)!
|
||||
// returns the properties of a filesystem entry
|
||||
fn (mut server Server) get_entry_property(entry &vfs.FSEntry, name string) !Property {
|
||||
return match name {
|
||||
'creationdate' { Property(CreationDate(format_iso8601(entry.get_metadata().created_time()))) }
|
||||
'getetag' { Property(GetETag(entry.get_metadata().id.str())) }
|
||||
'resourcetype' { Property(ResourceType(entry.is_dir())) }
|
||||
'getlastmodified' { Property(GetLastModified(texttools.format_rfc1123(entry.get_metadata().modified_time()))) }
|
||||
'getcontentlength' { Property(GetContentLength(entry.get_metadata().size.str())) }
|
||||
'quota-available-bytes' { Property(QuotaAvailableBytes(16184098816)) }
|
||||
'quota-used-bytes' { Property(QuotaUsedBytes(16184098816)) }
|
||||
'quotaused' { Property(QuotaUsed(16184098816)) }
|
||||
'quota' { Property(Quota(16184098816)) }
|
||||
else { panic('implement ${name}')}
|
||||
}
|
||||
}
|
||||
|
||||
// main entry response
|
||||
responses << Response {
|
||||
href: path
|
||||
// not_found: entry.get_unfound_properties(req)
|
||||
found_props: server.get_properties(entry)
|
||||
// get_responses returns all properties for the given path and depth
|
||||
fn (mut server Server) get_responses(entry vfs.FSEntry, req PropfindRequest, path string) ![]PropfindResponse {
|
||||
mut responses := []PropfindResponse{}
|
||||
|
||||
if req.typ == .prop {
|
||||
mut properties := []Property{}
|
||||
mut erronous_properties := map[int][]Property{} // properties that have errors indexed by error code
|
||||
for name in req.props {
|
||||
if property := server.get_entry_property(entry, name.trim_string_left('D:')) {
|
||||
properties << property
|
||||
} else {
|
||||
// TODO: implement error reporting
|
||||
}
|
||||
}
|
||||
// main entry response
|
||||
responses << PropfindResponse {
|
||||
href: if entry.is_dir() {'${path.trim_string_right("/")}/'} else {path}
|
||||
// not_found: entry.get_unfound_properties(req)
|
||||
found_props: properties
|
||||
}
|
||||
} else {
|
||||
responses << PropfindResponse {
|
||||
href: if entry.is_dir() {'${path.trim_string_right("/")}/'} else {path}
|
||||
// not_found: entry.get_unfound_properties(req)
|
||||
found_props: server.get_properties(entry)
|
||||
}
|
||||
}
|
||||
|
||||
if !entry.is_dir() || req.depth == .zero {
|
||||
@@ -84,12 +120,19 @@ fn (mut server Server) get_properties(entry &vfs.FSEntry) []Property {
|
||||
mut props := []Property{}
|
||||
|
||||
metadata := entry.get_metadata()
|
||||
|
||||
// Display name
|
||||
props << DisplayName(metadata.name)
|
||||
props << GetLastModified(format_iso8601(metadata.modified_time()))
|
||||
props << GetContentType(if entry.is_dir() {'httpd/unix-directory'} else {get_file_content_type(entry.get_metadata().name)})
|
||||
props << GetLastModified(texttools.format_rfc1123(metadata.modified_time()))
|
||||
|
||||
if entry.is_dir() {
|
||||
props << QuotaAvailableBytes(16184098816)
|
||||
props << QuotaUsedBytes(16184098816)
|
||||
} else {
|
||||
props << GetContentType(if entry.is_dir() {'httpd/unix-directory'} else {get_file_content_type(entry.get_metadata().name)})
|
||||
}
|
||||
props << ResourceType(entry.is_dir())
|
||||
// props << SupportedLock('')
|
||||
// props << LockDiscovery('')
|
||||
|
||||
// Content length (only for files)
|
||||
if !entry.is_dir() {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
<?xml version="1.0" encoding="utf-8" ?>
|
||||
<D:prop xmlns:D="DAV:">
|
||||
<D:lockdiscovery xmlns:D="DAV:">
|
||||
<D:lockdiscovery>
|
||||
<D:activelock>
|
||||
<D:locktype><D:@{l.lock_type}/></D:locktype>
|
||||
<D:lockscope><D:@{l.scope}/></D:lockscope>
|
||||
@@ -10,7 +10,7 @@
|
||||
</D:owner>
|
||||
<D:timeout>Second-@{l.timeout}</D:timeout>
|
||||
<D:locktoken>
|
||||
<D:href>@{l.token}</D:href>
|
||||
<D:href>opaquelocktoken:@{l.token}</D:href>
|
||||
</D:locktoken>
|
||||
<D:lockroot>
|
||||
<D:href>@{l.resource}</D:href>
|
||||
|
||||
@@ -12,6 +12,7 @@ mut:
|
||||
file_create(path string) !FSEntry
|
||||
file_read(path string) ![]u8
|
||||
file_write(path string, data []u8) !
|
||||
file_concatenate(path string, data []u8) !
|
||||
file_delete(path string) !
|
||||
|
||||
// Directory operations
|
||||
@@ -34,6 +35,8 @@ mut:
|
||||
|
||||
// FSEntry Operations
|
||||
get_path(entry &FSEntry) !string
|
||||
|
||||
print() !
|
||||
|
||||
// Cleanup operation
|
||||
destroy() !
|
||||
|
||||
@@ -164,9 +164,9 @@ pub fn (mut fs DatabaseVFS) root_get_as_dir() !&Directory {
|
||||
id: fs.get_next_id()
|
||||
file_type: .directory
|
||||
name: ''
|
||||
created_at: time.now().unix()
|
||||
modified_at: time.now().unix()
|
||||
accessed_at: time.now().unix()
|
||||
created_at: time.utc().unix()
|
||||
modified_at: time.utc().unix()
|
||||
accessed_at: time.utc().unix()
|
||||
mode: 0o755 // default directory permissions
|
||||
owner: 'user' // TODO: get from system
|
||||
group: 'user' // TODO: get from system
|
||||
|
||||
@@ -48,7 +48,7 @@ fn test_directory_encoder_decoder() ! {
|
||||
fn test_file_encoder_decoder() ! {
|
||||
println('Testing encoding/decoding files...')
|
||||
|
||||
current_time := time.now().unix()
|
||||
current_time := time.utc().unix()
|
||||
file := File{
|
||||
metadata: vfs.Metadata{
|
||||
id: u32(current_time)
|
||||
|
||||
@@ -144,7 +144,10 @@ pub fn (mut fs DatabaseVFS) directory_rm(mut dir Directory, name string) ! {
|
||||
// delete file chunks in data_db
|
||||
for id in file.chunk_ids {
|
||||
log.debug('[DatabaseVFS] Deleting chunk ${id}')
|
||||
fs.db_data.delete(id)!
|
||||
fs.db_data.delete(id) or {
|
||||
log.error('Failed to delete chunk ${id}: ${err}')
|
||||
return error('Failed to delete chunk ${id}: ${err}')
|
||||
}
|
||||
}
|
||||
|
||||
log.debug('[DatabaseVFS] Deleting file metadata ${file.metadata.id}')
|
||||
@@ -288,15 +291,26 @@ pub fn (mut fs DatabaseVFS) directory_copy(mut dir Directory, args_ CopyDirArgs)
|
||||
found = true
|
||||
if entry is File {
|
||||
mut file_entry := entry as File
|
||||
|
||||
mut file_data := []u8{}
|
||||
// log.debug('[DatabaseVFS] Got database chunk ids ${chunk_ids}')
|
||||
for id in file_entry.chunk_ids {
|
||||
// there were chunk ids stored with file so file has data
|
||||
if chunk_bytes := fs.db_data.get(id) {
|
||||
file_data << chunk_bytes
|
||||
} else {
|
||||
return error('Failed to fetch file data: ${err}')
|
||||
}
|
||||
}
|
||||
|
||||
mut new_file := File{
|
||||
...file_entry,
|
||||
metadata: Metadata{...file_entry.metadata,
|
||||
id: fs.get_next_id()
|
||||
name: args.dst_entry_name
|
||||
}
|
||||
parent_id: args.dst_parent_dir.metadata.id
|
||||
}
|
||||
fs.save_entry(new_file)!
|
||||
fs.save_file(new_file, file_data)!
|
||||
args.dst_parent_dir.children << new_file.metadata.id
|
||||
fs.save_entry(args.dst_parent_dir)!
|
||||
return args.dst_parent_dir
|
||||
|
||||
@@ -2,6 +2,7 @@ module vfs_db
|
||||
|
||||
import freeflowuniverse.herolib.vfs
|
||||
import freeflowuniverse.herolib.core.texttools
|
||||
import arrays
|
||||
import log
|
||||
import os
|
||||
import time
|
||||
@@ -49,7 +50,6 @@ pub fn (mut self DatabaseVFS) file_read(path_ string) ![]u8 {
|
||||
|
||||
pub fn (mut self DatabaseVFS) file_write(path_ string, data []u8) ! {
|
||||
path := texttools.path_fix(path_)
|
||||
|
||||
if mut entry := self.get_entry(path) {
|
||||
if mut entry is File {
|
||||
log.info('[DatabaseVFS] Writing ${data.len} bytes to ${path}')
|
||||
@@ -59,7 +59,7 @@ pub fn (mut self DatabaseVFS) file_write(path_ string, data []u8) ! {
|
||||
} else {
|
||||
panic('handle error')
|
||||
}
|
||||
} else {
|
||||
} else {
|
||||
self.file_create(path) or {
|
||||
return error('Failed to create file: ${err}')
|
||||
}
|
||||
@@ -67,6 +67,62 @@ pub fn (mut self DatabaseVFS) file_write(path_ string, data []u8) ! {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn (mut self DatabaseVFS) file_concatenate(path_ string, data []u8) ! {
|
||||
path := texttools.path_fix(path_)
|
||||
if data.len == 0 {
|
||||
return // Nothing to append
|
||||
}
|
||||
|
||||
if mut entry := self.get_entry(path) {
|
||||
if mut entry is File {
|
||||
log.info('[DatabaseVFS] Appending ${data.len} bytes to ${path}')
|
||||
|
||||
// Split new data into chunks of 64 KB
|
||||
chunks := arrays.chunk(data, (64 * 1024) - 1)
|
||||
mut chunk_ids := entry.chunk_ids.clone() // Start with existing chunk IDs
|
||||
|
||||
// Add new chunks
|
||||
for chunk in chunks {
|
||||
chunk_id := self.db_data.set(data: chunk) or {
|
||||
return error('Failed to save file data chunk: ${err}')
|
||||
}
|
||||
chunk_ids << chunk_id
|
||||
log.debug('[DatabaseVFS] Added chunk ${chunk_id} to ${path}')
|
||||
}
|
||||
|
||||
// Update the file with new chunk IDs and updated size
|
||||
updated_file := File{
|
||||
metadata: vfs.Metadata{
|
||||
...entry.metadata
|
||||
size: entry.metadata.size + u64(data.len)
|
||||
modified_at: time.now().unix()
|
||||
}
|
||||
chunk_ids: chunk_ids
|
||||
parent_id: entry.parent_id
|
||||
}
|
||||
|
||||
// Encode the file with all its metadata
|
||||
metadata_bytes := updated_file.encode()
|
||||
|
||||
// Save the metadata_bytes to metadata_db
|
||||
metadata_db_id := self.db_metadata.set(data: metadata_bytes) or {
|
||||
return error('Failed to save file metadata on id:${entry.metadata.id}: ${err}')
|
||||
}
|
||||
|
||||
self.id_table[entry.metadata.id] = metadata_db_id
|
||||
} else {
|
||||
return error('Not a file: ${path}')
|
||||
}
|
||||
} else {
|
||||
// If file doesn't exist, create it first
|
||||
self.file_create(path) or {
|
||||
return error('Failed to create file: ${err}')
|
||||
}
|
||||
// Then write data to it
|
||||
self.file_write(path, data)!
|
||||
}
|
||||
}
|
||||
|
||||
pub fn (mut self DatabaseVFS) file_delete(path string) ! {
|
||||
log.info('[DatabaseVFS] Deleting file ${path}')
|
||||
parent_path := os.dir(path)
|
||||
|
||||
Reference in New Issue
Block a user