...
This commit is contained in:
parent
b93894632a
commit
5e4dcbf77c
@ -4,3 +4,7 @@ version = "0.1.0"
|
||||
edition = "2024"
|
||||
|
||||
[dependencies]
|
||||
walkdir = "2.3.3"
|
||||
pulldown-cmark = "0.9.3"
|
||||
thiserror = "1.0.40"
|
||||
lazy_static = "1.4.0"
|
||||
|
412
doctree/src/collection.rs
Normal file
412
doctree/src/collection.rs
Normal file
@ -0,0 +1,412 @@
|
||||
use std::path::{Path, PathBuf};
|
||||
use walkdir::WalkDir;
|
||||
use std::fs;
|
||||
|
||||
use crate::error::{DocTreeError, Result};
|
||||
use crate::storage::RedisStorage;
|
||||
use crate::utils::{name_fix, markdown_to_html, ensure_md_extension};
|
||||
use crate::include::process_includes;
|
||||
|
||||
/// Collection represents a collection of markdown pages and files
|
||||
#[derive(Clone)]
|
||||
pub struct Collection {
|
||||
/// Base path of the collection
|
||||
pub path: PathBuf,
|
||||
|
||||
/// Name of the collection (namefixed)
|
||||
pub name: String,
|
||||
|
||||
/// Redis storage backend
|
||||
pub storage: RedisStorage,
|
||||
}
|
||||
|
||||
/// Builder for Collection
|
||||
pub struct CollectionBuilder {
|
||||
/// Base path of the collection
|
||||
path: PathBuf,
|
||||
|
||||
/// Name of the collection (namefixed)
|
||||
name: String,
|
||||
|
||||
/// Redis storage backend
|
||||
storage: Option<RedisStorage>,
|
||||
}
|
||||
|
||||
impl Collection {
|
||||
/// Create a new CollectionBuilder
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `path` - Base path of the collection
|
||||
/// * `name` - Name of the collection
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A new CollectionBuilder
|
||||
pub fn builder<P: AsRef<Path>>(path: P, name: &str) -> CollectionBuilder {
|
||||
CollectionBuilder {
|
||||
path: path.as_ref().to_path_buf(),
|
||||
name: name_fix(name),
|
||||
storage: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Scan walks over the path and finds all files and .md files
|
||||
/// It stores the relative positions in Redis
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// Ok(()) on success or an error
|
||||
pub fn scan(&self) -> Result<()> {
|
||||
// Delete existing collection data if any
|
||||
self.storage.delete_collection(&self.name)?;
|
||||
|
||||
// Walk through the directory
|
||||
let walker = WalkDir::new(&self.path);
|
||||
for entry_result in walker {
|
||||
// Handle entry errors
|
||||
let entry = match entry_result {
|
||||
Ok(entry) => entry,
|
||||
Err(e) => {
|
||||
// Log the error and continue
|
||||
eprintln!("Error walking directory: {}", e);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
// Skip directories
|
||||
if entry.file_type().is_dir() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Get the relative path from the base path
|
||||
let rel_path = match entry.path().strip_prefix(&self.path) {
|
||||
Ok(path) => path,
|
||||
Err(_) => {
|
||||
// Log the error and continue
|
||||
eprintln!("Failed to get relative path for: {:?}", entry.path());
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
// Get the filename and apply namefix
|
||||
let filename = entry.file_name().to_string_lossy().to_string();
|
||||
let namefixed_filename = name_fix(&filename);
|
||||
|
||||
// Store in Redis using the namefixed filename as the key
|
||||
// Store the original relative path to preserve case and special characters
|
||||
self.storage.store_collection_entry(
|
||||
&self.name,
|
||||
&namefixed_filename,
|
||||
&rel_path.to_string_lossy()
|
||||
)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get a page by name and return its markdown content
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `page_name` - Name of the page
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The page content or an error
|
||||
pub fn page_get(&self, page_name: &str) -> Result<String> {
|
||||
// Apply namefix to the page name
|
||||
let namefixed_page_name = name_fix(page_name);
|
||||
|
||||
// Ensure it has .md extension
|
||||
let namefixed_page_name = ensure_md_extension(&namefixed_page_name);
|
||||
|
||||
// Get the relative path from Redis
|
||||
let rel_path = self.storage.get_collection_entry(&self.name, &namefixed_page_name)
|
||||
.map_err(|_| DocTreeError::PageNotFound(page_name.to_string()))?;
|
||||
|
||||
// Read the file
|
||||
let full_path = self.path.join(rel_path);
|
||||
let content = fs::read_to_string(full_path)
|
||||
.map_err(|e| DocTreeError::IoError(e))?;
|
||||
|
||||
// Skip include processing at this level to avoid infinite recursion
|
||||
// Include processing will be done at the higher level
|
||||
|
||||
Ok(content)
|
||||
}
|
||||
|
||||
/// Create or update a page in the collection
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `page_name` - Name of the page
|
||||
/// * `content` - Content of the page
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// Ok(()) on success or an error
|
||||
pub fn page_set(&self, page_name: &str, content: &str) -> Result<()> {
|
||||
// Apply namefix to the page name
|
||||
let namefixed_page_name = name_fix(page_name);
|
||||
|
||||
// Ensure it has .md extension
|
||||
let namefixed_page_name = ensure_md_extension(&namefixed_page_name);
|
||||
|
||||
// Create the full path
|
||||
let full_path = self.path.join(&namefixed_page_name);
|
||||
|
||||
// Create directories if needed
|
||||
if let Some(parent) = full_path.parent() {
|
||||
fs::create_dir_all(parent).map_err(DocTreeError::IoError)?;
|
||||
}
|
||||
|
||||
// Write content to file
|
||||
fs::write(&full_path, content).map_err(DocTreeError::IoError)?;
|
||||
|
||||
// Update Redis
|
||||
self.storage.store_collection_entry(&self.name, &namefixed_page_name, &namefixed_page_name)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Delete a page from the collection
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `page_name` - Name of the page
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// Ok(()) on success or an error
|
||||
pub fn page_delete(&self, page_name: &str) -> Result<()> {
|
||||
// Apply namefix to the page name
|
||||
let namefixed_page_name = name_fix(page_name);
|
||||
|
||||
// Ensure it has .md extension
|
||||
let namefixed_page_name = ensure_md_extension(&namefixed_page_name);
|
||||
|
||||
// Get the relative path from Redis
|
||||
let rel_path = self.storage.get_collection_entry(&self.name, &namefixed_page_name)
|
||||
.map_err(|_| DocTreeError::PageNotFound(page_name.to_string()))?;
|
||||
|
||||
// Delete the file
|
||||
let full_path = self.path.join(rel_path);
|
||||
fs::remove_file(full_path).map_err(DocTreeError::IoError)?;
|
||||
|
||||
// Remove from Redis
|
||||
self.storage.delete_collection_entry(&self.name, &namefixed_page_name)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// List all pages in the collection
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A vector of page names or an error
|
||||
pub fn page_list(&self) -> Result<Vec<String>> {
|
||||
// Get all keys from Redis
|
||||
let keys = self.storage.list_collection_entries(&self.name)?;
|
||||
|
||||
// Filter to only include .md files
|
||||
let pages = keys.into_iter()
|
||||
.filter(|key| key.ends_with(".md"))
|
||||
.collect();
|
||||
|
||||
Ok(pages)
|
||||
}
|
||||
|
||||
/// Get the URL for a file
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `file_name` - Name of the file
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The URL for the file or an error
|
||||
pub fn file_get_url(&self, file_name: &str) -> Result<String> {
|
||||
// Apply namefix to the file name
|
||||
let namefixed_file_name = name_fix(file_name);
|
||||
|
||||
// Get the relative path from Redis
|
||||
let rel_path = self.storage.get_collection_entry(&self.name, &namefixed_file_name)
|
||||
.map_err(|_| DocTreeError::FileNotFound(file_name.to_string()))?;
|
||||
|
||||
// Construct a URL for the file
|
||||
let url = format!("/collections/{}/files/{}", self.name, rel_path);
|
||||
|
||||
Ok(url)
|
||||
}
|
||||
|
||||
/// Add or update a file in the collection
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `file_name` - Name of the file
|
||||
/// * `content` - Content of the file
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// Ok(()) on success or an error
|
||||
pub fn file_set(&self, file_name: &str, content: &[u8]) -> Result<()> {
|
||||
// Apply namefix to the file name
|
||||
let namefixed_file_name = name_fix(file_name);
|
||||
|
||||
// Create the full path
|
||||
let full_path = self.path.join(&namefixed_file_name);
|
||||
|
||||
// Create directories if needed
|
||||
if let Some(parent) = full_path.parent() {
|
||||
fs::create_dir_all(parent).map_err(DocTreeError::IoError)?;
|
||||
}
|
||||
|
||||
// Write content to file
|
||||
fs::write(&full_path, content).map_err(DocTreeError::IoError)?;
|
||||
|
||||
// Update Redis
|
||||
self.storage.store_collection_entry(&self.name, &namefixed_file_name, &namefixed_file_name)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Delete a file from the collection
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `file_name` - Name of the file
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// Ok(()) on success or an error
|
||||
pub fn file_delete(&self, file_name: &str) -> Result<()> {
|
||||
// Apply namefix to the file name
|
||||
let namefixed_file_name = name_fix(file_name);
|
||||
|
||||
// Get the relative path from Redis
|
||||
let rel_path = self.storage.get_collection_entry(&self.name, &namefixed_file_name)
|
||||
.map_err(|_| DocTreeError::FileNotFound(file_name.to_string()))?;
|
||||
|
||||
// Delete the file
|
||||
let full_path = self.path.join(rel_path);
|
||||
fs::remove_file(full_path).map_err(DocTreeError::IoError)?;
|
||||
|
||||
// Remove from Redis
|
||||
self.storage.delete_collection_entry(&self.name, &namefixed_file_name)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// List all files (non-markdown) in the collection
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A vector of file names or an error
|
||||
pub fn file_list(&self) -> Result<Vec<String>> {
|
||||
// Get all keys from Redis
|
||||
let keys = self.storage.list_collection_entries(&self.name)?;
|
||||
|
||||
// Filter to exclude .md files
|
||||
let files = keys.into_iter()
|
||||
.filter(|key| !key.ends_with(".md"))
|
||||
.collect();
|
||||
|
||||
Ok(files)
|
||||
}
|
||||
|
||||
/// Get the relative path of a page in the collection
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `page_name` - Name of the page
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The relative path of the page or an error
|
||||
pub fn page_get_path(&self, page_name: &str) -> Result<String> {
|
||||
// Apply namefix to the page name
|
||||
let namefixed_page_name = name_fix(page_name);
|
||||
|
||||
// Ensure it has .md extension
|
||||
let namefixed_page_name = ensure_md_extension(&namefixed_page_name);
|
||||
|
||||
// Get the relative path from Redis
|
||||
self.storage.get_collection_entry(&self.name, &namefixed_page_name)
|
||||
.map_err(|_| DocTreeError::PageNotFound(page_name.to_string()))
|
||||
}
|
||||
|
||||
/// Get a page by name and return its HTML content
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `page_name` - Name of the page
|
||||
/// * `doctree` - Optional DocTree instance for include processing
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The HTML content of the page or an error
|
||||
pub fn page_get_html(&self, page_name: &str, doctree: Option<&crate::doctree::DocTree>) -> Result<String> {
|
||||
// Get the markdown content
|
||||
let markdown = self.page_get(page_name)?;
|
||||
|
||||
// Process includes if doctree is provided
|
||||
let processed_markdown = if let Some(dt) = doctree {
|
||||
process_includes(&markdown, &self.name, dt)?
|
||||
} else {
|
||||
markdown
|
||||
};
|
||||
|
||||
// Convert markdown to HTML
|
||||
let html = markdown_to_html(&processed_markdown);
|
||||
|
||||
Ok(html)
|
||||
}
|
||||
|
||||
/// Get information about the Collection
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A map of information
|
||||
pub fn info(&self) -> std::collections::HashMap<String, String> {
|
||||
let mut info = std::collections::HashMap::new();
|
||||
info.insert("name".to_string(), self.name.clone());
|
||||
info.insert("path".to_string(), self.path.to_string_lossy().to_string());
|
||||
info
|
||||
}
|
||||
}
|
||||
|
||||
impl CollectionBuilder {
|
||||
/// Set the storage backend
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `storage` - Redis storage backend
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// Self for method chaining
|
||||
pub fn with_storage(mut self, storage: RedisStorage) -> Self {
|
||||
self.storage = Some(storage);
|
||||
self
|
||||
}
|
||||
|
||||
/// Build the Collection
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A new Collection or an error
|
||||
pub fn build(self) -> Result<Collection> {
|
||||
let storage = self.storage.ok_or_else(|| {
|
||||
DocTreeError::MissingParameter("storage".to_string())
|
||||
})?;
|
||||
|
||||
let collection = Collection {
|
||||
path: self.path,
|
||||
name: self.name,
|
||||
storage,
|
||||
};
|
||||
|
||||
Ok(collection)
|
||||
}
|
||||
}
|
433
doctree/src/doctree.rs
Normal file
433
doctree/src/doctree.rs
Normal file
@ -0,0 +1,433 @@
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use crate::collection::{Collection, CollectionBuilder};
|
||||
use crate::error::{DocTreeError, Result};
|
||||
use crate::storage::RedisStorage;
|
||||
use crate::include::process_includes;
|
||||
use crate::utils::{name_fix, ensure_md_extension};
|
||||
|
||||
// Global variable to track the current collection name
|
||||
// This is for compatibility with the Go implementation
|
||||
lazy_static::lazy_static! {
|
||||
static ref CURRENT_COLLECTION_NAME: Arc<Mutex<Option<String>>> = Arc::new(Mutex::new(None));
|
||||
}
|
||||
|
||||
// Global variable to track the current Collection
|
||||
// This is for compatibility with the Go implementation
|
||||
|
||||
/// DocTree represents a manager for multiple collections
|
||||
pub struct DocTree {
|
||||
/// Map of collections by name
|
||||
pub collections: HashMap<String, Collection>,
|
||||
|
||||
/// Default collection name
|
||||
pub default_collection: Option<String>,
|
||||
|
||||
/// Redis storage backend
|
||||
storage: RedisStorage,
|
||||
|
||||
/// For backward compatibility
|
||||
pub name: String,
|
||||
|
||||
/// For backward compatibility
|
||||
pub path: PathBuf,
|
||||
}
|
||||
|
||||
/// Builder for DocTree
|
||||
pub struct DocTreeBuilder {
|
||||
/// Map of collections by name
|
||||
collections: HashMap<String, Collection>,
|
||||
|
||||
/// Default collection name
|
||||
default_collection: Option<String>,
|
||||
|
||||
/// Redis storage backend
|
||||
storage: Option<RedisStorage>,
|
||||
|
||||
/// For backward compatibility
|
||||
name: Option<String>,
|
||||
|
||||
/// For backward compatibility
|
||||
path: Option<PathBuf>,
|
||||
}
|
||||
|
||||
impl DocTree {
|
||||
/// Create a new DocTreeBuilder
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A new DocTreeBuilder
|
||||
pub fn builder() -> DocTreeBuilder {
|
||||
DocTreeBuilder {
|
||||
collections: HashMap::new(),
|
||||
default_collection: None,
|
||||
storage: None,
|
||||
name: None,
|
||||
path: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Add a collection to the DocTree
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `path` - Base path of the collection
|
||||
/// * `name` - Name of the collection
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The added collection or an error
|
||||
pub fn add_collection<P: AsRef<Path>>(&mut self, path: P, name: &str) -> Result<&Collection> {
|
||||
// Create a new collection
|
||||
let namefixed = name_fix(name);
|
||||
let collection = Collection::builder(path, &namefixed)
|
||||
.with_storage(self.storage.clone())
|
||||
.build()?;
|
||||
|
||||
// Scan the collection
|
||||
collection.scan()?;
|
||||
|
||||
// Add to the collections map
|
||||
self.collections.insert(collection.name.clone(), collection);
|
||||
|
||||
// Return a reference to the added collection
|
||||
self.collections.get(&namefixed).ok_or_else(|| {
|
||||
DocTreeError::CollectionNotFound(namefixed.clone())
|
||||
})
|
||||
}
|
||||
|
||||
/// Get a collection by name
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `name` - Name of the collection
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The collection or an error
|
||||
pub fn get_collection(&self, name: &str) -> Result<&Collection> {
|
||||
// For compatibility with tests, apply namefix
|
||||
let namefixed = name_fix(name);
|
||||
|
||||
// Check if the collection exists
|
||||
self.collections.get(&namefixed).ok_or_else(|| {
|
||||
DocTreeError::CollectionNotFound(name.to_string())
|
||||
})
|
||||
}
|
||||
|
||||
/// Delete a collection from the DocTree
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `name` - Name of the collection
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// Ok(()) on success or an error
|
||||
pub fn delete_collection(&mut self, name: &str) -> Result<()> {
|
||||
// For compatibility with tests, apply namefix
|
||||
let namefixed = name_fix(name);
|
||||
|
||||
// Check if the collection exists
|
||||
if !self.collections.contains_key(&namefixed) {
|
||||
return Err(DocTreeError::CollectionNotFound(name.to_string()));
|
||||
}
|
||||
|
||||
// Delete from Redis
|
||||
self.storage.delete_collection(&namefixed)?;
|
||||
|
||||
// Remove from the collections map
|
||||
self.collections.remove(&namefixed);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// List all collections
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A vector of collection names
|
||||
pub fn list_collections(&self) -> Vec<String> {
|
||||
self.collections.keys().cloned().collect()
|
||||
}
|
||||
|
||||
/// Get a page by name from a specific collection
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `collection_name` - Name of the collection (optional)
|
||||
/// * `page_name` - Name of the page
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The page content or an error
|
||||
pub fn page_get(&self, collection_name: Option<&str>, page_name: &str) -> Result<String> {
|
||||
let (collection_name, page_name) = self.resolve_collection_and_page(collection_name, page_name)?;
|
||||
|
||||
// Get the collection
|
||||
let collection = self.get_collection(&collection_name)?;
|
||||
|
||||
// Get the page content
|
||||
let content = collection.page_get(page_name)?;
|
||||
|
||||
// Process includes
|
||||
let processed_content = process_includes(&content, &collection_name, self)?;
|
||||
|
||||
Ok(processed_content)
|
||||
}
|
||||
|
||||
/// Get a page by name from a specific collection and return its HTML content
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `collection_name` - Name of the collection (optional)
|
||||
/// * `page_name` - Name of the page
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The HTML content or an error
|
||||
pub fn page_get_html(&self, collection_name: Option<&str>, page_name: &str) -> Result<String> {
|
||||
let (collection_name, page_name) = self.resolve_collection_and_page(collection_name, page_name)?;
|
||||
|
||||
// Get the collection
|
||||
let collection = self.get_collection(&collection_name)?;
|
||||
|
||||
// Get the HTML
|
||||
collection.page_get_html(page_name, Some(self))
|
||||
}
|
||||
|
||||
/// Get the URL for a file in a specific collection
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `collection_name` - Name of the collection (optional)
|
||||
/// * `file_name` - Name of the file
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The URL for the file or an error
|
||||
pub fn file_get_url(&self, collection_name: Option<&str>, file_name: &str) -> Result<String> {
|
||||
let (collection_name, file_name) = self.resolve_collection_and_page(collection_name, file_name)?;
|
||||
|
||||
// Get the collection
|
||||
let collection = self.get_collection(&collection_name)?;
|
||||
|
||||
// Get the URL
|
||||
collection.file_get_url(file_name)
|
||||
}
|
||||
|
||||
/// Get the path to a page in the default collection
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `page_name` - Name of the page
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The path to the page or an error
|
||||
pub fn page_get_path(&self, page_name: &str) -> Result<String> {
|
||||
// Check if a default collection is set
|
||||
let default_collection = self.default_collection.as_ref().ok_or_else(|| {
|
||||
DocTreeError::NoDefaultCollection
|
||||
})?;
|
||||
|
||||
// Get the collection
|
||||
let collection = self.get_collection(default_collection)?;
|
||||
|
||||
// Get the path
|
||||
collection.page_get_path(page_name)
|
||||
}
|
||||
|
||||
/// Get information about the DocTree
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A map of information
|
||||
pub fn info(&self) -> HashMap<String, String> {
|
||||
let mut info = HashMap::new();
|
||||
info.insert("name".to_string(), self.name.clone());
|
||||
info.insert("path".to_string(), self.path.to_string_lossy().to_string());
|
||||
info.insert("collections".to_string(), self.collections.len().to_string());
|
||||
info
|
||||
}
|
||||
|
||||
/// Scan the default collection
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// Ok(()) on success or an error
|
||||
pub fn scan(&self) -> Result<()> {
|
||||
// Check if a default collection is set
|
||||
let default_collection = self.default_collection.as_ref().ok_or_else(|| {
|
||||
DocTreeError::NoDefaultCollection
|
||||
})?;
|
||||
|
||||
// Get the collection
|
||||
let collection = self.get_collection(default_collection)?;
|
||||
|
||||
// Scan the collection
|
||||
collection.scan()
|
||||
}
|
||||
|
||||
/// Resolve collection and page names
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `collection_name` - Name of the collection (optional)
|
||||
/// * `page_name` - Name of the page
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A tuple of (collection_name, page_name) or an error
|
||||
fn resolve_collection_and_page<'a>(&self, collection_name: Option<&'a str>, page_name: &'a str) -> Result<(String, &'a str)> {
|
||||
match collection_name {
|
||||
Some(name) => Ok((name_fix(name), page_name)),
|
||||
None => {
|
||||
// Use the default collection
|
||||
let default_collection = self.default_collection.as_ref().ok_or_else(|| {
|
||||
DocTreeError::NoDefaultCollection
|
||||
})?;
|
||||
Ok((default_collection.clone(), page_name))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DocTreeBuilder {
|
||||
/// Set the storage backend
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `storage` - Redis storage backend
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// Self for method chaining
|
||||
pub fn with_storage(mut self, storage: RedisStorage) -> Self {
|
||||
self.storage = Some(storage);
|
||||
self
|
||||
}
|
||||
|
||||
/// Add a collection
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `path` - Base path of the collection
|
||||
/// * `name` - Name of the collection
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// Self for method chaining or an error
|
||||
pub fn with_collection<P: AsRef<Path>>(mut self, path: P, name: &str) -> Result<Self> {
|
||||
// Ensure storage is set
|
||||
let storage = self.storage.as_ref().ok_or_else(|| {
|
||||
DocTreeError::MissingParameter("storage".to_string())
|
||||
})?;
|
||||
|
||||
// Create a new collection
|
||||
let namefixed = name_fix(name);
|
||||
let collection = Collection::builder(path.as_ref(), &namefixed)
|
||||
.with_storage(storage.clone())
|
||||
.build()?;
|
||||
|
||||
// Scan the collection
|
||||
collection.scan()?;
|
||||
|
||||
// Add to the collections map
|
||||
self.collections.insert(collection.name.clone(), collection);
|
||||
|
||||
// For backward compatibility
|
||||
if self.name.is_none() {
|
||||
self.name = Some(namefixed.clone());
|
||||
}
|
||||
if self.path.is_none() {
|
||||
self.path = Some(path.as_ref().to_path_buf());
|
||||
}
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Set the default collection
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `name` - Name of the default collection
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// Self for method chaining
|
||||
pub fn with_default_collection(mut self, name: &str) -> Self {
|
||||
self.default_collection = Some(name_fix(name));
|
||||
self
|
||||
}
|
||||
|
||||
/// Build the DocTree
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A new DocTree or an error
|
||||
pub fn build(self) -> Result<DocTree> {
|
||||
// Ensure storage is set
|
||||
let storage = self.storage.ok_or_else(|| {
|
||||
DocTreeError::MissingParameter("storage".to_string())
|
||||
})?;
|
||||
|
||||
// Create the DocTree
|
||||
let doctree = DocTree {
|
||||
collections: self.collections,
|
||||
default_collection: self.default_collection,
|
||||
storage: storage.clone(),
|
||||
name: self.name.unwrap_or_default(),
|
||||
path: self.path.unwrap_or_else(|| PathBuf::from("")),
|
||||
};
|
||||
|
||||
// Set the global current collection name if a default collection is set
|
||||
if let Some(default_collection) = &doctree.default_collection {
|
||||
let mut current_collection_name = CURRENT_COLLECTION_NAME.lock().unwrap();
|
||||
*current_collection_name = Some(default_collection.clone());
|
||||
}
|
||||
|
||||
Ok(doctree)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/// Create a new DocTree instance
|
||||
///
|
||||
/// For backward compatibility, it also accepts path and name parameters
|
||||
/// to create a DocTree with a single collection
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `args` - Optional path and name for backward compatibility
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A new DocTree or an error
|
||||
pub fn new<P: AsRef<Path>>(args: &[&str]) -> Result<DocTree> {
|
||||
let storage = RedisStorage::new("redis://localhost:6379")?;
|
||||
|
||||
let mut builder = DocTree::builder().with_storage(storage);
|
||||
|
||||
// For backward compatibility with existing code
|
||||
if args.len() == 2 {
|
||||
let path = args[0];
|
||||
let name = args[1];
|
||||
|
||||
// Apply namefix for compatibility with tests
|
||||
let namefixed = name_fix(name);
|
||||
|
||||
// Add the collection
|
||||
builder = builder.with_collection(path, &namefixed)?;
|
||||
|
||||
// Set the default collection
|
||||
builder = builder.with_default_collection(&namefixed);
|
||||
}
|
||||
|
||||
builder.build()
|
||||
}
|
44
doctree/src/error.rs
Normal file
44
doctree/src/error.rs
Normal file
@ -0,0 +1,44 @@
|
||||
use thiserror::Error;
|
||||
|
||||
/// Custom error type for the doctree library
|
||||
#[derive(Error, Debug)]
|
||||
pub enum DocTreeError {
|
||||
/// IO error
|
||||
#[error("IO error: {0}")]
|
||||
IoError(#[from] std::io::Error),
|
||||
|
||||
/// WalkDir error
|
||||
#[error("WalkDir error: {0}")]
|
||||
WalkDirError(String),
|
||||
|
||||
/// Collection not found
|
||||
#[error("Collection not found: {0}")]
|
||||
CollectionNotFound(String),
|
||||
|
||||
/// Page not found
|
||||
#[error("Page not found: {0}")]
|
||||
PageNotFound(String),
|
||||
|
||||
/// File not found
|
||||
#[error("File not found: {0}")]
|
||||
FileNotFound(String),
|
||||
|
||||
/// Invalid include directive
|
||||
#[error("Invalid include directive: {0}")]
|
||||
InvalidIncludeDirective(String),
|
||||
|
||||
/// No default collection set
|
||||
#[error("No default collection set")]
|
||||
NoDefaultCollection,
|
||||
|
||||
/// Invalid number of arguments
|
||||
#[error("Invalid number of arguments")]
|
||||
InvalidArgumentCount,
|
||||
|
||||
/// Missing required parameter
|
||||
#[error("Missing required parameter: {0}")]
|
||||
MissingParameter(String),
|
||||
}
|
||||
|
||||
/// Result type alias for doctree operations
|
||||
pub type Result<T> = std::result::Result<T, DocTreeError>;
|
178
doctree/src/include.rs
Normal file
178
doctree/src/include.rs
Normal file
@ -0,0 +1,178 @@
|
||||
use crate::doctree::DocTree;
|
||||
use crate::error::{DocTreeError, Result};
|
||||
use crate::utils::trim_spaces_and_quotes;
|
||||
|
||||
/// Process includes in markdown content
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `content` - The markdown content to process
|
||||
/// * `current_collection_name` - The name of the current collection
|
||||
/// * `doctree` - The DocTree instance
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The processed content or an error
|
||||
pub fn process_includes(content: &str, current_collection_name: &str, doctree: &DocTree) -> Result<String> {
|
||||
// Find all include directives
|
||||
let lines: Vec<&str> = content.split('\n').collect();
|
||||
let mut result = Vec::with_capacity(lines.len());
|
||||
|
||||
for line in lines {
|
||||
match parse_include_line(line) {
|
||||
Ok((Some(c), Some(p))) => {
|
||||
// Both collection and page specified
|
||||
match handle_include(&p, &c, doctree) {
|
||||
Ok(include_content) => {
|
||||
// Process any nested includes in the included content
|
||||
match process_includes(&include_content, &c, doctree) {
|
||||
Ok(processed_include_content) => {
|
||||
result.push(processed_include_content);
|
||||
},
|
||||
Err(e) => {
|
||||
result.push(format!(">>ERROR: Failed to process nested includes: {}", e));
|
||||
}
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
result.push(format!(">>ERROR: {}", e));
|
||||
}
|
||||
}
|
||||
},
|
||||
Ok((Some(_), None)) => {
|
||||
// Invalid case: collection specified but no page
|
||||
result.push(format!(">>ERROR: Invalid include directive: collection specified but no page name"));
|
||||
},
|
||||
Ok((None, Some(p))) => {
|
||||
// Only page specified, use current collection
|
||||
match handle_include(&p, current_collection_name, doctree) {
|
||||
Ok(include_content) => {
|
||||
// Process any nested includes in the included content
|
||||
match process_includes(&include_content, current_collection_name, doctree) {
|
||||
Ok(processed_include_content) => {
|
||||
result.push(processed_include_content);
|
||||
},
|
||||
Err(e) => {
|
||||
result.push(format!(">>ERROR: Failed to process nested includes: {}", e));
|
||||
}
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
result.push(format!(">>ERROR: {}", e));
|
||||
}
|
||||
}
|
||||
},
|
||||
Ok((None, None)) => {
|
||||
// Not an include directive, keep the line
|
||||
result.push(line.to_string());
|
||||
},
|
||||
Err(e) => {
|
||||
// Error parsing include directive
|
||||
result.push(format!(">>ERROR: Failed to process include directive: {}", e));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result.join("\n"))
|
||||
}
|
||||
|
||||
/// Parse an include directive line
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `line` - The line to parse
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A tuple of (collection_name, page_name) or an error
|
||||
///
|
||||
/// Supports:
|
||||
/// - !!include collectionname:'pagename'
|
||||
/// - !!include collectionname:'pagename.md'
|
||||
/// - !!include 'pagename'
|
||||
/// - !!include collectionname:pagename
|
||||
/// - !!include collectionname:pagename.md
|
||||
/// - !!include name:'pagename'
|
||||
/// - !!include pagename
|
||||
fn parse_include_line(line: &str) -> Result<(Option<String>, Option<String>)> {
|
||||
// Check if the line contains an include directive
|
||||
if !line.contains("!!include") {
|
||||
return Ok((None, None));
|
||||
}
|
||||
|
||||
// Extract the part after !!include
|
||||
let parts: Vec<&str> = line.splitn(2, "!!include").collect();
|
||||
if parts.len() != 2 {
|
||||
return Err(DocTreeError::InvalidIncludeDirective(line.to_string()));
|
||||
}
|
||||
|
||||
// Trim spaces and check if the include part is empty
|
||||
let include_text = trim_spaces_and_quotes(parts[1]);
|
||||
if include_text.is_empty() {
|
||||
return Err(DocTreeError::InvalidIncludeDirective(line.to_string()));
|
||||
}
|
||||
|
||||
// Remove name: prefix if present
|
||||
let include_text = if include_text.starts_with("name:") {
|
||||
let text = include_text.trim_start_matches("name:").trim();
|
||||
if text.is_empty() {
|
||||
return Err(DocTreeError::InvalidIncludeDirective(
|
||||
format!("empty page name after 'name:' prefix: {}", line)
|
||||
));
|
||||
}
|
||||
text.to_string()
|
||||
} else {
|
||||
include_text
|
||||
};
|
||||
|
||||
// Check if it contains a collection reference (has a colon)
|
||||
if include_text.contains(':') {
|
||||
let parts: Vec<&str> = include_text.splitn(2, ':').collect();
|
||||
if parts.len() != 2 {
|
||||
return Err(DocTreeError::InvalidIncludeDirective(
|
||||
format!("malformed collection reference: {}", include_text)
|
||||
));
|
||||
}
|
||||
|
||||
let collection_name = parts[0].trim();
|
||||
let page_name = trim_spaces_and_quotes(parts[1]);
|
||||
|
||||
if collection_name.is_empty() {
|
||||
return Err(DocTreeError::InvalidIncludeDirective(
|
||||
format!("empty collection name in include directive: {}", line)
|
||||
));
|
||||
}
|
||||
|
||||
if page_name.is_empty() {
|
||||
return Err(DocTreeError::InvalidIncludeDirective(
|
||||
format!("empty page name in include directive: {}", line)
|
||||
));
|
||||
}
|
||||
|
||||
Ok((Some(collection_name.to_string()), Some(page_name)))
|
||||
} else {
|
||||
// No collection specified, just a page name
|
||||
Ok((None, Some(include_text)))
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle an include directive
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `page_name` - The name of the page to include
|
||||
/// * `collection_name` - The name of the collection
|
||||
/// * `doctree` - The DocTree instance
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The included content or an error
|
||||
fn handle_include(page_name: &str, collection_name: &str, doctree: &DocTree) -> Result<String> {
|
||||
// Get the collection
|
||||
let collection = doctree.get_collection(collection_name)?;
|
||||
|
||||
// Get the page content
|
||||
let content = collection.page_get(page_name)?;
|
||||
|
||||
Ok(content)
|
||||
}
|
@ -1,14 +1,41 @@
|
||||
pub fn add(left: u64, right: u64) -> u64 {
|
||||
left + right
|
||||
}
|
||||
//! DocTree is a library for managing collections of markdown documents.
|
||||
//!
|
||||
//! It provides functionality for scanning directories, managing collections,
|
||||
//! and processing includes between documents.
|
||||
|
||||
// Import lazy_static
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
mod error;
|
||||
mod storage;
|
||||
mod utils;
|
||||
mod collection;
|
||||
mod doctree;
|
||||
mod include;
|
||||
|
||||
pub use error::{DocTreeError, Result};
|
||||
pub use storage::RedisStorage;
|
||||
pub use collection::{Collection, CollectionBuilder};
|
||||
pub use doctree::{DocTree, DocTreeBuilder, new};
|
||||
pub use include::process_includes;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::path::Path;
|
||||
|
||||
#[test]
|
||||
fn it_works() {
|
||||
let result = add(2, 2);
|
||||
assert_eq!(result, 4);
|
||||
fn test_doctree_builder() {
|
||||
// Create a storage instance
|
||||
let storage = RedisStorage::new("dummy_url").unwrap();
|
||||
|
||||
let doctree = DocTree::builder()
|
||||
.with_storage(storage)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(doctree.collections.len(), 0);
|
||||
assert_eq!(doctree.default_collection, None);
|
||||
}
|
||||
}
|
||||
|
169
doctree/src/storage.rs
Normal file
169
doctree/src/storage.rs
Normal file
@ -0,0 +1,169 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use crate::error::{DocTreeError, Result};
|
||||
|
||||
/// Storage backend for doctree
|
||||
pub struct RedisStorage {
|
||||
// Using a simple in-memory storage for demonstration
|
||||
// In a real implementation, this would be a Redis client
|
||||
collections: Arc<Mutex<HashMap<String, HashMap<String, String>>>>,
|
||||
}
|
||||
|
||||
impl RedisStorage {
|
||||
/// Create a new RedisStorage instance
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `url` - Redis connection URL (e.g., "redis://localhost:6379")
|
||||
/// This is ignored in the in-memory implementation
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A new RedisStorage instance or an error
|
||||
pub fn new(_url: &str) -> Result<Self> {
|
||||
Ok(Self {
|
||||
collections: Arc::new(Mutex::new(HashMap::new())),
|
||||
})
|
||||
}
|
||||
|
||||
/// Store a collection entry
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `collection` - Collection name
|
||||
/// * `key` - Entry key
|
||||
/// * `value` - Entry value
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// Ok(()) on success or an error
|
||||
pub fn store_collection_entry(&self, collection: &str, key: &str, value: &str) -> Result<()> {
|
||||
let mut collections = self.collections.lock().unwrap();
|
||||
|
||||
// Get or create the collection
|
||||
let collection_entries = collections
|
||||
.entry(format!("collections:{}", collection))
|
||||
.or_insert_with(HashMap::new);
|
||||
|
||||
// Store the entry
|
||||
collection_entries.insert(key.to_string(), value.to_string());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get a collection entry
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `collection` - Collection name
|
||||
/// * `key` - Entry key
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The entry value or an error
|
||||
pub fn get_collection_entry(&self, collection: &str, key: &str) -> Result<String> {
|
||||
let collections = self.collections.lock().unwrap();
|
||||
|
||||
// Get the collection
|
||||
let collection_key = format!("collections:{}", collection);
|
||||
let collection_entries = collections.get(&collection_key)
|
||||
.ok_or_else(|| DocTreeError::CollectionNotFound(collection.to_string()))?;
|
||||
|
||||
// Get the entry
|
||||
collection_entries.get(key)
|
||||
.cloned()
|
||||
.ok_or_else(|| DocTreeError::FileNotFound(key.to_string()))
|
||||
}
|
||||
|
||||
/// Delete a collection entry
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `collection` - Collection name
|
||||
/// * `key` - Entry key
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// Ok(()) on success or an error
|
||||
pub fn delete_collection_entry(&self, collection: &str, key: &str) -> Result<()> {
|
||||
let mut collections = self.collections.lock().unwrap();
|
||||
|
||||
// Get the collection
|
||||
let collection_key = format!("collections:{}", collection);
|
||||
let collection_entries = collections.get_mut(&collection_key)
|
||||
.ok_or_else(|| DocTreeError::CollectionNotFound(collection.to_string()))?;
|
||||
|
||||
// Remove the entry
|
||||
collection_entries.remove(key);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// List all entries in a collection
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `collection` - Collection name
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A vector of entry keys or an error
|
||||
pub fn list_collection_entries(&self, collection: &str) -> Result<Vec<String>> {
|
||||
let collections = self.collections.lock().unwrap();
|
||||
|
||||
// Get the collection
|
||||
let collection_key = format!("collections:{}", collection);
|
||||
let collection_entries = collections.get(&collection_key)
|
||||
.ok_or_else(|| DocTreeError::CollectionNotFound(collection.to_string()))?;
|
||||
|
||||
// Get the keys
|
||||
let keys = collection_entries.keys().cloned().collect();
|
||||
|
||||
Ok(keys)
|
||||
}
|
||||
|
||||
/// Delete a collection
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `collection` - Collection name
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// Ok(()) on success or an error
|
||||
pub fn delete_collection(&self, collection: &str) -> Result<()> {
|
||||
let mut collections = self.collections.lock().unwrap();
|
||||
|
||||
// Remove the collection
|
||||
collections.remove(&format!("collections:{}", collection));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check if a collection exists
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `collection` - Collection name
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// true if the collection exists, false otherwise
|
||||
pub fn collection_exists(&self, collection: &str) -> Result<bool> {
|
||||
let collections = self.collections.lock().unwrap();
|
||||
|
||||
// Check if the collection exists
|
||||
let exists = collections.contains_key(&format!("collections:{}", collection));
|
||||
|
||||
Ok(exists)
|
||||
}
|
||||
}
|
||||
|
||||
// Implement Clone for RedisStorage
|
||||
impl Clone for RedisStorage {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
collections: Arc::clone(&self.collections),
|
||||
}
|
||||
}
|
||||
}
|
106
doctree/src/utils.rs
Normal file
106
doctree/src/utils.rs
Normal file
@ -0,0 +1,106 @@
|
||||
use pulldown_cmark::{Parser, Options, html};
|
||||
use std::path::Path;
|
||||
|
||||
/// Fix a name to be used as a key
|
||||
///
|
||||
/// This is equivalent to the tools.NameFix function in the Go implementation.
|
||||
/// It normalizes the name by converting to lowercase, replacing spaces with hyphens, etc.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `name` - The name to fix
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The fixed name
|
||||
pub fn name_fix(name: &str) -> String {
|
||||
// Convert to lowercase
|
||||
let mut result = name.to_lowercase();
|
||||
|
||||
// Replace spaces with hyphens
|
||||
result = result.replace(' ', "-");
|
||||
|
||||
// Remove special characters
|
||||
result = result.chars()
|
||||
.filter(|c| c.is_alphanumeric() || *c == '-' || *c == '.')
|
||||
.collect();
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
/// Convert markdown to HTML
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `markdown` - The markdown content to convert
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The HTML content
|
||||
pub fn markdown_to_html(markdown: &str) -> String {
|
||||
let mut options = Options::empty();
|
||||
options.insert(Options::ENABLE_TABLES);
|
||||
options.insert(Options::ENABLE_FOOTNOTES);
|
||||
options.insert(Options::ENABLE_STRIKETHROUGH);
|
||||
|
||||
let parser = Parser::new_ext(markdown, options);
|
||||
let mut html_output = String::new();
|
||||
html::push_html(&mut html_output, parser);
|
||||
|
||||
html_output
|
||||
}
|
||||
|
||||
/// Trim spaces and quotes from a string
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `s` - The string to trim
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The trimmed string
|
||||
pub fn trim_spaces_and_quotes(s: &str) -> String {
|
||||
let mut result = s.trim().to_string();
|
||||
|
||||
// Remove surrounding quotes
|
||||
if (result.starts_with('\'') && result.ends_with('\'')) ||
|
||||
(result.starts_with('"') && result.ends_with('"')) {
|
||||
result = result[1..result.len()-1].to_string();
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
/// Ensure a string has a .md extension
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `name` - The name to check
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The name with a .md extension
|
||||
pub fn ensure_md_extension(name: &str) -> String {
|
||||
if !name.ends_with(".md") {
|
||||
format!("{}.md", name)
|
||||
} else {
|
||||
name.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the file extension from a path
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `path` - The path to check
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The file extension or an empty string
|
||||
pub fn get_extension(path: &str) -> String {
|
||||
Path::new(path)
|
||||
.extension()
|
||||
.and_then(|ext| ext.to_str())
|
||||
.unwrap_or("")
|
||||
.to_string()
|
||||
}
|
499
doctree_implementation_plan.md
Normal file
499
doctree_implementation_plan.md
Normal file
@ -0,0 +1,499 @@
|
||||
# DocTree Implementation Plan
|
||||
|
||||
## Overview
|
||||
|
||||
The DocTree library will be a Rust implementation of the Go reference, maintaining the core functionality while improving the API design to be more idiomatic Rust. We'll use Redis as the storage backend and implement a minimal CLI example to demonstrate usage.
|
||||
|
||||
## Architecture
|
||||
|
||||
```mermaid
|
||||
classDiagram
|
||||
class DocTree {
|
||||
+collections: HashMap<String, Collection>
|
||||
+default_collection: Option<String>
|
||||
+new() DocTreeBuilder
|
||||
+add_collection(path, name) Result<&Collection>
|
||||
+get_collection(name) Result<&Collection>
|
||||
+delete_collection(name) Result<()>
|
||||
+list_collections() Vec<String>
|
||||
+page_get(collection, page) Result<String>
|
||||
+page_get_html(collection, page) Result<String>
|
||||
+file_get_url(collection, file) Result<String>
|
||||
}
|
||||
|
||||
class DocTreeBuilder {
|
||||
-collections: HashMap<String, Collection>
|
||||
-default_collection: Option<String>
|
||||
+with_collection(path, name) DocTreeBuilder
|
||||
+with_default_collection(name) DocTreeBuilder
|
||||
+build() Result<DocTree>
|
||||
}
|
||||
|
||||
class Collection {
|
||||
+path: String
|
||||
+name: String
|
||||
+new(path, name) CollectionBuilder
|
||||
+scan() Result<()>
|
||||
+page_get(name) Result<String>
|
||||
+page_set(name, content) Result<()>
|
||||
+page_delete(name) Result<()>
|
||||
+page_list() Result<Vec<String>>
|
||||
+file_get_url(name) Result<String>
|
||||
+file_set(name, content) Result<()>
|
||||
+file_delete(name) Result<()>
|
||||
+file_list() Result<Vec<String>>
|
||||
+page_get_html(name) Result<String>
|
||||
}
|
||||
|
||||
class CollectionBuilder {
|
||||
-path: String
|
||||
-name: String
|
||||
+build() Result<Collection>
|
||||
}
|
||||
|
||||
class RedisStorage {
|
||||
+client: redis::Client
|
||||
+new(url) Result<RedisStorage>
|
||||
+store_collection_entry(collection, key, value) Result<()>
|
||||
+get_collection_entry(collection, key) Result<String>
|
||||
+delete_collection_entry(collection, key) Result<()>
|
||||
+list_collection_entries(collection) Result<Vec<String>>
|
||||
+delete_collection(collection) Result<()>
|
||||
}
|
||||
|
||||
class IncludeProcessor {
|
||||
+process_includes(content, collection, doctree) Result<String>
|
||||
}
|
||||
|
||||
DocTree --> DocTreeBuilder : creates
|
||||
DocTree --> "0..*" Collection : contains
|
||||
Collection --> CollectionBuilder : creates
|
||||
DocTree --> RedisStorage : uses
|
||||
Collection --> RedisStorage : uses
|
||||
DocTree --> IncludeProcessor : uses
|
||||
```
|
||||
|
||||
## Implementation Steps
|
||||
|
||||
### 1. Project Setup and Dependencies
|
||||
|
||||
1. Update the Cargo.toml files with necessary dependencies:
|
||||
- redis (for Redis client)
|
||||
- walkdir (for directory traversal)
|
||||
- pulldown-cmark (for Markdown to HTML conversion)
|
||||
- thiserror (for error handling)
|
||||
- clap (for CLI argument parsing in doctreecmd)
|
||||
|
||||
### 2. Core Library Structure
|
||||
|
||||
1. **Error Module**
|
||||
- Create a custom error type using thiserror
|
||||
- Define specific error variants for different failure scenarios
|
||||
|
||||
2. **Storage Module**
|
||||
- Implement the RedisStorage struct to handle Redis operations
|
||||
- Provide methods for storing, retrieving, and deleting collection entries
|
||||
- Implement connection pooling for efficient Redis access
|
||||
|
||||
3. **Utils Module**
|
||||
- Implement utility functions like name_fix (equivalent to tools.NameFix in Go)
|
||||
- Implement markdown to HTML conversion using pulldown-cmark
|
||||
|
||||
### 3. Collection Implementation
|
||||
|
||||
1. **Collection Module**
|
||||
- Implement the Collection struct to represent a collection of documents
|
||||
- Implement the CollectionBuilder for creating Collection instances
|
||||
- Implement methods for scanning directories, managing pages and files
|
||||
|
||||
2. **Collection Builder Pattern**
|
||||
- Create a builder pattern for Collection creation
|
||||
- Allow configuration of Collection properties before building
|
||||
|
||||
### 4. DocTree Implementation
|
||||
|
||||
1. **DocTree Module**
|
||||
- Implement the DocTree struct to manage multiple collections
|
||||
- Implement the DocTreeBuilder for creating DocTree instances
|
||||
- Implement methods for managing collections and accessing documents
|
||||
|
||||
2. **DocTree Builder Pattern**
|
||||
- Create a builder pattern for DocTree creation
|
||||
- Allow adding collections and setting default collection before building
|
||||
|
||||
### 5. Include Processor Implementation
|
||||
|
||||
1. **Include Module**
|
||||
- Implement the IncludeProcessor to handle include directives
|
||||
- Implement parsing of include directives
|
||||
- Implement recursive processing of includes
|
||||
|
||||
### 6. CLI Example
|
||||
|
||||
1. **Update doctreecmd**
|
||||
- Implement a minimal CLI interface using clap
|
||||
- Provide commands for basic operations:
|
||||
- Scanning a directory
|
||||
- Listing collections
|
||||
- Getting page content
|
||||
- Getting HTML content
|
||||
|
||||
## Detailed Module Breakdown
|
||||
|
||||
### Error Module (src/error.rs)
|
||||
|
||||
```rust
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum DocTreeError {
|
||||
#[error("IO error: {0}")]
|
||||
IoError(#[from] std::io::Error),
|
||||
|
||||
#[error("Redis error: {0}")]
|
||||
RedisError(#[from] redis::RedisError),
|
||||
|
||||
#[error("Collection not found: {0}")]
|
||||
CollectionNotFound(String),
|
||||
|
||||
#[error("Page not found: {0}")]
|
||||
PageNotFound(String),
|
||||
|
||||
#[error("File not found: {0}")]
|
||||
FileNotFound(String),
|
||||
|
||||
#[error("Invalid include directive: {0}")]
|
||||
InvalidIncludeDirective(String),
|
||||
|
||||
#[error("No default collection set")]
|
||||
NoDefaultCollection,
|
||||
|
||||
#[error("Invalid number of arguments")]
|
||||
InvalidArgumentCount,
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, DocTreeError>;
|
||||
```
|
||||
|
||||
### Storage Module (src/storage.rs)
|
||||
|
||||
```rust
|
||||
use redis::{Client, Commands, Connection};
|
||||
use crate::error::{DocTreeError, Result};
|
||||
|
||||
pub struct RedisStorage {
|
||||
client: Client,
|
||||
}
|
||||
|
||||
impl RedisStorage {
|
||||
pub fn new(url: &str) -> Result<Self> {
|
||||
let client = Client::open(url)?;
|
||||
Ok(Self { client })
|
||||
}
|
||||
|
||||
pub fn get_connection(&self) -> Result<Connection> {
|
||||
Ok(self.client.get_connection()?)
|
||||
}
|
||||
|
||||
pub fn store_collection_entry(&self, collection: &str, key: &str, value: &str) -> Result<()> {
|
||||
let mut conn = self.get_connection()?;
|
||||
let collection_key = format!("collections:{}", collection);
|
||||
conn.hset(collection_key, key, value)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_collection_entry(&self, collection: &str, key: &str) -> Result<String> {
|
||||
let mut conn = self.get_connection()?;
|
||||
let collection_key = format!("collections:{}", collection);
|
||||
let value: String = conn.hget(collection_key, key)?;
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
// Additional methods for Redis operations
|
||||
}
|
||||
```
|
||||
|
||||
### Utils Module (src/utils.rs)
|
||||
|
||||
```rust
|
||||
use pulldown_cmark::{Parser, Options, html};
|
||||
|
||||
pub fn name_fix(name: &str) -> String {
|
||||
// Implementation of name_fix similar to tools.NameFix in Go
|
||||
// Normalize the name by converting to lowercase, replacing spaces with hyphens, etc.
|
||||
}
|
||||
|
||||
pub fn markdown_to_html(markdown: &str) -> String {
|
||||
let mut options = Options::empty();
|
||||
options.insert(Options::ENABLE_TABLES);
|
||||
options.insert(Options::ENABLE_FOOTNOTES);
|
||||
options.insert(Options::ENABLE_STRIKETHROUGH);
|
||||
|
||||
let parser = Parser::new_ext(markdown, options);
|
||||
let mut html_output = String::new();
|
||||
html::push_html(&mut html_output, parser);
|
||||
|
||||
html_output
|
||||
}
|
||||
```
|
||||
|
||||
### Collection Module (src/collection.rs)
|
||||
|
||||
```rust
|
||||
use std::path::{Path, PathBuf};
|
||||
use walkdir::WalkDir;
|
||||
use crate::error::Result;
|
||||
use crate::storage::RedisStorage;
|
||||
use crate::utils::name_fix;
|
||||
|
||||
pub struct Collection {
|
||||
pub path: PathBuf,
|
||||
pub name: String,
|
||||
storage: RedisStorage,
|
||||
}
|
||||
|
||||
pub struct CollectionBuilder {
|
||||
path: PathBuf,
|
||||
name: String,
|
||||
storage: Option<RedisStorage>,
|
||||
}
|
||||
|
||||
impl Collection {
|
||||
pub fn builder<P: AsRef<Path>>(path: P, name: &str) -> CollectionBuilder {
|
||||
CollectionBuilder {
|
||||
path: path.as_ref().to_path_buf(),
|
||||
name: name_fix(name),
|
||||
storage: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn scan(&self) -> Result<()> {
|
||||
// Implementation of scanning directory and storing in Redis
|
||||
}
|
||||
|
||||
pub fn page_get(&self, page_name: &str) -> Result<String> {
|
||||
// Implementation of getting page content
|
||||
}
|
||||
|
||||
// Additional methods for Collection
|
||||
}
|
||||
|
||||
impl CollectionBuilder {
|
||||
pub fn with_storage(mut self, storage: RedisStorage) -> Self {
|
||||
self.storage = Some(storage);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn build(self) -> Result<Collection> {
|
||||
let storage = self.storage.ok_or_else(|| {
|
||||
std::io::Error::new(std::io::ErrorKind::Other, "Storage not provided")
|
||||
})?;
|
||||
|
||||
let collection = Collection {
|
||||
path: self.path,
|
||||
name: self.name,
|
||||
storage,
|
||||
};
|
||||
|
||||
Ok(collection)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### DocTree Module (src/doctree.rs)
|
||||
|
||||
```rust
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use crate::collection::{Collection, CollectionBuilder};
|
||||
use crate::error::{DocTreeError, Result};
|
||||
use crate::storage::RedisStorage;
|
||||
|
||||
pub struct DocTree {
|
||||
collections: HashMap<String, Collection>,
|
||||
default_collection: Option<String>,
|
||||
storage: RedisStorage,
|
||||
}
|
||||
|
||||
pub struct DocTreeBuilder {
|
||||
collections: HashMap<String, Collection>,
|
||||
default_collection: Option<String>,
|
||||
storage: Option<RedisStorage>,
|
||||
}
|
||||
|
||||
impl DocTree {
|
||||
pub fn builder() -> DocTreeBuilder {
|
||||
DocTreeBuilder {
|
||||
collections: HashMap::new(),
|
||||
default_collection: None,
|
||||
storage: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_collection<P: AsRef<Path>>(&mut self, path: P, name: &str) -> Result<&Collection> {
|
||||
// Implementation of adding a collection
|
||||
}
|
||||
|
||||
// Additional methods for DocTree
|
||||
}
|
||||
|
||||
impl DocTreeBuilder {
|
||||
pub fn with_storage(mut self, storage: RedisStorage) -> Self {
|
||||
self.storage = Some(storage);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_collection<P: AsRef<Path>>(mut self, path: P, name: &str) -> Result<Self> {
|
||||
// Implementation of adding a collection during building
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
pub fn with_default_collection(mut self, name: &str) -> Self {
|
||||
self.default_collection = Some(name.to_string());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn build(self) -> Result<DocTree> {
|
||||
let storage = self.storage.ok_or_else(|| {
|
||||
std::io::Error::new(std::io::ErrorKind::Other, "Storage not provided")
|
||||
})?;
|
||||
|
||||
let doctree = DocTree {
|
||||
collections: self.collections,
|
||||
default_collection: self.default_collection,
|
||||
storage,
|
||||
};
|
||||
|
||||
Ok(doctree)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Include Module (src/include.rs)
|
||||
|
||||
```rust
|
||||
use crate::doctree::DocTree;
|
||||
use crate::error::Result;
|
||||
|
||||
pub fn process_includes(content: &str, collection_name: &str, doctree: &DocTree) -> Result<String> {
|
||||
// Implementation of processing include directives
|
||||
}
|
||||
|
||||
fn parse_include_line(line: &str) -> Result<(Option<String>, Option<String>)> {
|
||||
// Implementation of parsing include directives
|
||||
}
|
||||
|
||||
fn handle_include(page_name: &str, collection_name: &str, doctree: &DocTree) -> Result<String> {
|
||||
// Implementation of handling include directives
|
||||
}
|
||||
```
|
||||
|
||||
### Main Library File (src/lib.rs)
|
||||
|
||||
```rust
|
||||
mod error;
|
||||
mod storage;
|
||||
mod utils;
|
||||
mod collection;
|
||||
mod doctree;
|
||||
mod include;
|
||||
|
||||
pub use error::{DocTreeError, Result};
|
||||
pub use storage::RedisStorage;
|
||||
pub use collection::{Collection, CollectionBuilder};
|
||||
pub use doctree::{DocTree, DocTreeBuilder};
|
||||
pub use include::process_includes;
|
||||
```
|
||||
|
||||
### CLI Example (doctreecmd/src/main.rs)
|
||||
|
||||
```rust
|
||||
use clap::{App, Arg, SubCommand};
|
||||
use doctree::{DocTree, RedisStorage};
|
||||
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let matches = App::new("DocTree CLI")
|
||||
.version("0.1.0")
|
||||
.author("Your Name")
|
||||
.about("A tool to manage document collections")
|
||||
.subcommand(
|
||||
SubCommand::with_name("scan")
|
||||
.about("Scan a directory and create a collection")
|
||||
.arg(Arg::with_name("path").required(true))
|
||||
.arg(Arg::with_name("name").required(true)),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("list")
|
||||
.about("List collections"),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("get")
|
||||
.about("Get page content")
|
||||
.arg(Arg::with_name("collection").required(true))
|
||||
.arg(Arg::with_name("page").required(true)),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
// Implementation of CLI commands
|
||||
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
## Example Usage
|
||||
|
||||
Here's how the library would be used with the builder pattern:
|
||||
|
||||
```rust
|
||||
use doctree::{DocTree, RedisStorage};
|
||||
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Create a Redis storage instance
|
||||
let storage = RedisStorage::new("redis://localhost:6379")?;
|
||||
|
||||
// Create a DocTree instance using the builder pattern
|
||||
let mut doctree = DocTree::builder()
|
||||
.with_storage(storage.clone())
|
||||
.with_collection("path/to/collection", "my-collection")?
|
||||
.with_default_collection("my-collection")
|
||||
.build()?;
|
||||
|
||||
// Get page content
|
||||
let content = doctree.page_get("my-collection", "page-name")?;
|
||||
println!("Page content: {}", content);
|
||||
|
||||
// Get HTML content
|
||||
let html = doctree.page_get_html("my-collection", "page-name")?;
|
||||
println!("HTML content: {}", html);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
1. **Unit Tests**
|
||||
- Test individual components in isolation
|
||||
- Mock Redis for testing storage operations
|
||||
- Test utility functions
|
||||
|
||||
2. **Integration Tests**
|
||||
- Test the interaction between components
|
||||
- Test the builder pattern
|
||||
- Test include processing
|
||||
|
||||
3. **End-to-End Tests**
|
||||
- Test the complete workflow with real files
|
||||
- Test the CLI interface
|
||||
|
||||
## Timeline
|
||||
|
||||
1. **Project Setup and Dependencies**: 1 day
|
||||
2. **Core Library Structure**: 2 days
|
||||
3. **Collection Implementation**: 2 days
|
||||
4. **DocTree Implementation**: 2 days
|
||||
5. **Include Processor Implementation**: 1 day
|
||||
6. **CLI Example**: 1 day
|
||||
7. **Testing and Documentation**: 2 days
|
||||
|
||||
Total estimated time: 11 days
|
@ -4,3 +4,5 @@ version = "0.1.0"
|
||||
edition = "2024"
|
||||
|
||||
[dependencies]
|
||||
doctree = { path = "../doctree" }
|
||||
clap = "3.2.25"
|
||||
|
@ -1,3 +1,78 @@
|
||||
fn main() {
|
||||
println!("Hello, world!");
|
||||
use clap::{App, Arg, SubCommand};
|
||||
use doctree::{DocTree, RedisStorage, Result};
|
||||
use std::path::Path;
|
||||
|
||||
fn main() -> Result<()> {
|
||||
let matches = App::new("DocTree CLI")
|
||||
.version("0.1.0")
|
||||
.author("Your Name")
|
||||
.about("A tool to manage document collections")
|
||||
.subcommand(
|
||||
SubCommand::with_name("scan")
|
||||
.about("Scan a directory and create a collection")
|
||||
.arg(Arg::with_name("path").required(true).help("Path to the directory"))
|
||||
.arg(Arg::with_name("name").required(true).help("Name of the collection")),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("list")
|
||||
.about("List collections"),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("get")
|
||||
.about("Get page content")
|
||||
.arg(Arg::with_name("collection").required(true).help("Name of the collection"))
|
||||
.arg(Arg::with_name("page").required(true).help("Name of the page")),
|
||||
)
|
||||
.subcommand(
|
||||
SubCommand::with_name("html")
|
||||
.about("Get page content as HTML")
|
||||
.arg(Arg::with_name("collection").required(true).help("Name of the collection"))
|
||||
.arg(Arg::with_name("page").required(true).help("Name of the page")),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
// Create a Redis storage instance
|
||||
let storage = RedisStorage::new("redis://localhost:6379")?;
|
||||
|
||||
// Create a DocTree instance
|
||||
let mut doctree = DocTree::builder()
|
||||
.with_storage(storage)
|
||||
.build()?;
|
||||
|
||||
// Handle subcommands
|
||||
if let Some(matches) = matches.subcommand_matches("scan") {
|
||||
let path = matches.value_of("path").unwrap();
|
||||
let name = matches.value_of("name").unwrap();
|
||||
|
||||
println!("Scanning directory: {}", path);
|
||||
doctree.add_collection(Path::new(path), name)?;
|
||||
println!("Collection '{}' created successfully", name);
|
||||
} else if let Some(_) = matches.subcommand_matches("list") {
|
||||
let collections = doctree.list_collections();
|
||||
|
||||
if collections.is_empty() {
|
||||
println!("No collections found");
|
||||
} else {
|
||||
println!("Collections:");
|
||||
for collection in collections {
|
||||
println!("- {}", collection);
|
||||
}
|
||||
}
|
||||
} else if let Some(matches) = matches.subcommand_matches("get") {
|
||||
let collection = matches.value_of("collection").unwrap();
|
||||
let page = matches.value_of("page").unwrap();
|
||||
|
||||
let content = doctree.page_get(Some(collection), page)?;
|
||||
println!("{}", content);
|
||||
} else if let Some(matches) = matches.subcommand_matches("html") {
|
||||
let collection = matches.value_of("collection").unwrap();
|
||||
let page = matches.value_of("page").unwrap();
|
||||
|
||||
let html = doctree.page_get_html(Some(collection), page)?;
|
||||
println!("{}", html);
|
||||
} else {
|
||||
println!("No command specified. Use --help for usage information.");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
0
examples/grid1/.collection
Normal file
0
examples/grid1/.collection
Normal file
8
examples/grid1/benefits/_category_.json
Normal file
8
examples/grid1/benefits/_category_.json
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"label": "AIBox Benefits",
|
||||
"position": 4,
|
||||
"link": {
|
||||
"type": "generated-index",
|
||||
"description": "The benefits of AIBox"
|
||||
}
|
||||
}
|
28
examples/grid1/benefits/revenue_generation.md
Normal file
28
examples/grid1/benefits/revenue_generation.md
Normal file
@ -0,0 +1,28 @@
|
||||
---
|
||||
title: Revenue Generation
|
||||
sidebar_position: 2
|
||||
---
|
||||
|
||||
### Renting Options
|
||||
|
||||
AIBox creates opportunities for revenue generation through resource sharing. The following numbers are suggestive as each AIBox owners can set their own pricing.
|
||||
|
||||
| Plan | Rate | Monthly Potential | Usage Scenario |
|
||||
|------|------|------------------|----------------|
|
||||
| Micro | $0.40/hr | $200-300 | Inference workloads |
|
||||
| Standard | $0.80/hr | $400-600 | Development |
|
||||
| Full GPU | $1.60/hr | $800-1,200 | Training |
|
||||
|
||||
### Proof of Capacity Revenues
|
||||
|
||||
The AIBox implements a tiered proof of capacity reward system, distributing monthly INCA tokens based on hardware configuration
|
||||
|
||||
| Configuration | Monthly Rewards |
|
||||
|---------------|----------------|
|
||||
| Base AIBox | 500-2000 INCA |
|
||||
| 1 GPU AIBox | 1000 INCA |
|
||||
| 2 GPU AIBox | 2000 INCA |
|
||||
|
||||
### Proof of Utilization Revenues
|
||||
|
||||
The AIBox implements a revenue-sharing model wherein device owners receive 80% of INCA tokens utilized for deployments, providing transparent proof of utilization economics.
|
32
examples/grid1/benefits/use_cases.md
Normal file
32
examples/grid1/benefits/use_cases.md
Normal file
@ -0,0 +1,32 @@
|
||||
---
|
||||
title: Use Cases
|
||||
sidebar_position: 3
|
||||
---
|
||||
|
||||
### Personal AI Development
|
||||
|
||||
The AIBox provides an ideal environment for individual developers working on AI projects:
|
||||
- Model training and fine-tuning
|
||||
- Experimental AI architectures
|
||||
- Unrestricted testing and development
|
||||
- Complete control over computing resources
|
||||
|
||||
The system allows developers to run extended training sessions without watching cloud billing meters or dealing with usage restrictions.
|
||||
|
||||
### Shared Resources
|
||||
|
||||
For teams and organizations, AIBox offers efficient resource sharing capabilities:
|
||||
- Multi-user environment
|
||||
- Resource pooling
|
||||
- Cost sharing
|
||||
- Distributed computing
|
||||
|
||||
This makes it particularly valuable for small teams and startups looking to maintain control over their AI infrastructure while managing costs.
|
||||
|
||||
### Commercial Applications
|
||||
|
||||
The system supports various commercial deployments:
|
||||
- AI-as-a-Service
|
||||
- Model hosting
|
||||
- Inference endpoints
|
||||
- Dataset processing
|
8
examples/grid1/getting-started/_category_.json
Normal file
8
examples/grid1/getting-started/_category_.json
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"label": "Getting Started",
|
||||
"position": 5,
|
||||
"link": {
|
||||
"type": "generated-index",
|
||||
"description": "Getting started with the AIBox"
|
||||
}
|
||||
}
|
10
examples/grid1/getting-started/pre_order_process.md
Normal file
10
examples/grid1/getting-started/pre_order_process.md
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
title: Pre-Order Process
|
||||
sidebar_position: 2
|
||||
---
|
||||
|
||||
### How to Order
|
||||
|
||||
The steps to qcquire an AIBox is simple:
|
||||
1. [Select your configuration](./purchase_options.md)
|
||||
2. [Submit pre-order form](https://www2.aibox.threefold.io/signup/)
|
84
examples/grid1/getting-started/purchase_options.md
Normal file
84
examples/grid1/getting-started/purchase_options.md
Normal file
@ -0,0 +1,84 @@
|
||||
---
|
||||
title: Purchase Options
|
||||
sidebar_position: 1
|
||||
---
|
||||
|
||||
### Base AIBox Plan ($1-1500)
|
||||
For experienced builders and hardware enthusiasts who want to customize their AI infrastructure. This plan provides the essential framework while allowing you to select and integrate your own GPU.
|
||||
|
||||
Base Configuration:
|
||||
- GPU: Your choice, with minimum requirement of AMD Radeon RX 7900 XT
|
||||
* Flexibility to use existing GPU or select preferred model
|
||||
* Support for multiple GPU vendors with minimum performance requirements
|
||||
* Full integration support for chosen hardware
|
||||
- Memory: 64-128 GB DDR5
|
||||
* Expandable configuration
|
||||
* High-speed memory modules
|
||||
* ECC support optional
|
||||
- Storage: 2-4 TB of NVMe SSD
|
||||
* PCIe 4.0 support
|
||||
* Configurable RAID options
|
||||
* Expansion capabilities
|
||||
- Integrated Mycelium Network
|
||||
* Full network stack
|
||||
* P2P capabilities
|
||||
* Decentralized computing support
|
||||
|
||||
Rewards Structure:
|
||||
- Proof of Capacity: 500-2000 INCA per month (depending on chosen GPU)
|
||||
- Proof of Utilization: 80% of INCA Revenue
|
||||
- Flexible earning potential based on hardware configuration
|
||||
|
||||
### 1 GPU AIBox Plan ($2-2500)
|
||||
Perfect for individual developers and researchers who need professional-grade AI computing power. This configuration provides enough processing power for smaller but smart models and AI agents.
|
||||
|
||||
Standard Configuration:
|
||||
- 1x AMD Radeon RX 7900 XTX
|
||||
* 24GB VRAM
|
||||
* 61.6 TFLOPS FP32 Performance
|
||||
* 960 GB/s Memory Bandwidth
|
||||
- 64-128 GB DDR5 Memory
|
||||
* Optimal for AI workloads
|
||||
* High-speed data processing
|
||||
* Multi-tasking capability
|
||||
- 2-4 TB of NVMe SSD
|
||||
* Ultra-fast storage access
|
||||
* Ample space for datasets
|
||||
* Quick model loading
|
||||
- Integrated Mycelium
|
||||
* Full network integration
|
||||
* Ready for distributed computing
|
||||
* P2P capabilities enabled
|
||||
|
||||
Rewards Structure:
|
||||
- Proof of Capacity: 1000 INCA per month
|
||||
- Proof of Utilization: 80% of INCA Revenue
|
||||
- Consistent earning potential
|
||||
|
||||
### 2 GPU AIBox Plan ($4-5000)
|
||||
Our most powerful configuration, designed for serious AI researchers and organizations. This setup supports large 48GB models, providing substantial computing power for advanced AI applications.
|
||||
|
||||
Advanced Configuration:
|
||||
- 2x AMD Radeon RX 7900 XTX
|
||||
* Combined 48GB VRAM
|
||||
* 123.2 TFLOPS total FP32 Performance
|
||||
* 1920 GB/s Total Memory Bandwidth
|
||||
- 64-128 GB DDR5 Memory
|
||||
* Maximum performance configuration
|
||||
* Support for multiple large models
|
||||
* Extensive multi-tasking capability
|
||||
- 2-4 TB of NVMe SSD
|
||||
* Enterprise-grade storage
|
||||
* RAID configuration options
|
||||
* Expandable capacity
|
||||
- Integrated Mycelium
|
||||
* Enhanced network capabilities
|
||||
* Full distributed computing support
|
||||
* Advanced P2P features
|
||||
|
||||
Rewards Structure:
|
||||
- Proof of Capacity: 2000 INCA per month
|
||||
- Proof of Utilization: 80% of INCA Revenue
|
||||
- Maximum earning potential
|
||||
|
||||
Each plan includes comprehensive support, setup assistance, and access to the full AIBox ecosystem. Configurations can be further customized within each plan's framework to meet specific requirements.
|
8
examples/grid1/getting-started/support.md
Normal file
8
examples/grid1/getting-started/support.md
Normal file
@ -0,0 +1,8 @@
|
||||
---
|
||||
title: Support
|
||||
sidebar_position: 3
|
||||
---
|
||||
|
||||
Our support team is composed of technically proficient members who understand AI development needs.
|
||||
|
||||
Feel free to reach out the ThreeFold Support [here](https://threefoldfaq.crisp.help/en/) for more information.
|
24
examples/grid1/introduction.md
Normal file
24
examples/grid1/introduction.md
Normal file
@ -0,0 +1,24 @@
|
||||
---
|
||||
title: Introducing AIBox
|
||||
sidebar_position: 1
|
||||
slug: /
|
||||
---
|
||||
|
||||
## AIBox: Powering Community-Driven AI
|
||||
|
||||
The AIBox is built for those who want to explore AI on their own terms. With 2 RX 7900 XTX GPUs and 48GB of memory, it enables running demanding AI models efficiently.
|
||||
|
||||
## Open AI Development
|
||||
|
||||
AIBox offers full control—no cloud restrictions, no unexpected costs. Train models, fine-tune AI systems, and experiment freely with PyTorch, TensorFlow, or low-level GPU programming.
|
||||
|
||||
## More Than Hardware: A Shared Network
|
||||
|
||||
AIBox isn’t just a tool—it’s part of a decentralized AI network. When idle, its GPU power can be shared via Mycelium, benefiting the wider community while generating value. Designed for efficiency, with water cooling and power monitoring, it’s a practical, community-powered step toward open AI development.
|
||||
|
||||
## Expanding the ThreeFold Grid
|
||||
|
||||
Each AIBox integrates into the ThreeFold Grid, a decentralized Internet infrastructure active in over 50 countries. By connecting your AIBox, you contribute to this global network, enhancing its capacity and reach. This integration not only supports your AI endeavors but also strengthens a community-driven Internet ecosystem.
|
||||
|
||||
More info about threefold see: https://www.threefold.io
|
||||
|
8
examples/grid1/overview/_category_.json
Normal file
8
examples/grid1/overview/_category_.json
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"label": "AIBox Overview",
|
||||
"position": 2,
|
||||
"link": {
|
||||
"type": "generated-index",
|
||||
"description": "Overview of the AIBox"
|
||||
}
|
||||
}
|
12
examples/grid1/overview/vision_mission.md
Normal file
12
examples/grid1/overview/vision_mission.md
Normal file
@ -0,0 +1,12 @@
|
||||
---
|
||||
title: Vision & Mission
|
||||
sidebar_position: 2
|
||||
---
|
||||
|
||||
## AI Landscape
|
||||
|
||||
The AI landscape today is dominated by centralized cloud providers, creating barriers for innovation and increasing costs for developers. Our vision is different: we're building tools for a decentralized AI future where computing power isn't monopolized by large cloud providers.
|
||||
|
||||
## High-End AI Hardware
|
||||
|
||||
Our technical goal is straightforward: provide enterprise-grade AI hardware that's both powerful and profitable through resource sharing. We believe that AI development should be accessible to anyone with the technical skills to push boundaries.
|
27
examples/grid1/overview/who_is_aibox_for.md
Normal file
27
examples/grid1/overview/who_is_aibox_for.md
Normal file
@ -0,0 +1,27 @@
|
||||
---
|
||||
title: Who Is AIBox For?
|
||||
sidebar_position: 4
|
||||
---
|
||||
|
||||
The AIBox is for hackers and AI explorers who want a simple, accessible gateway into AI experimentation, while also offering advanced features for those ready to push the boundaries of what's possible.
|
||||
|
||||
### Developers & Hackers
|
||||
Technical capabilities:
|
||||
- Direct GPU programming through ROCm
|
||||
- Custom containerization support
|
||||
- Full Linux kernel access
|
||||
- P2P networking capabilities
|
||||
|
||||
### AI Researchers
|
||||
Research-focused features:
|
||||
- Support for popular ML frameworks (PyTorch, TensorFlow)
|
||||
- Large model training capability (up to 48GB VRAM)
|
||||
- Distributed training support
|
||||
- Dataset management tools
|
||||
|
||||
### Tech Enthusiasts
|
||||
Advanced features:
|
||||
- Water cooling management interface
|
||||
- Power consumption monitoring
|
||||
- Performance benchmarking tools
|
||||
- Resource allocation controls
|
18
examples/grid1/overview/why_decentralized_ai_matters.md
Normal file
18
examples/grid1/overview/why_decentralized_ai_matters.md
Normal file
@ -0,0 +1,18 @@
|
||||
---
|
||||
title: Why Decentralized AI Matters
|
||||
sidebar_position: 3
|
||||
---
|
||||
|
||||
The AIBox gives you complete control over your data privacy with full hardware access while enabling unlimited experimentation without the restrictions of cloud platforms.
|
||||
|
||||
### Data Privacy & Control
|
||||
- Full root access to hardware
|
||||
- No data leaving your premises without explicit permission
|
||||
- Custom firewall rules and network configurations
|
||||
- Ability to air-gap when needed
|
||||
|
||||
### Unlimited Experimentation
|
||||
- Direct GPU access without virtualization overhead
|
||||
- Custom model training without cloud restrictions
|
||||
- Unrestricted model sizes and training durations
|
||||
- Freedom to modify system parameters
|
8
examples/grid1/technical-specs/_category_.json
Normal file
8
examples/grid1/technical-specs/_category_.json
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"label": "Technical Specs",
|
||||
"position": 3,
|
||||
"link": {
|
||||
"type": "generated-index",
|
||||
"description": "Technical aspects of the AIBox"
|
||||
}
|
||||
}
|
35
examples/grid1/technical-specs/features_capabilities.md
Normal file
35
examples/grid1/technical-specs/features_capabilities.md
Normal file
@ -0,0 +1,35 @@
|
||||
---
|
||||
title: Features & Capabilities
|
||||
sidebar_position: 3
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
AIBox combines enterprise-grade hardware capabilities with flexible resource management, creating a powerful platform for AI development and deployment. Each feature is designed to meet the demanding needs of developers and researchers who require both raw computing power and precise control over their resources.
|
||||
|
||||
## VM Management (CloudSlices)
|
||||
|
||||
CloudSlices transforms your AIBox into a multi-tenant powerhouse, enabling you to run multiple isolated environments simultaneously. Unlike traditional virtualization, CloudSlices is optimized for AI workloads, ensuring minimal overhead and maximum GPU utilization.
|
||||
|
||||
Each slice operates as a fully isolated virtual machine with guaranteed resources. The AIBox can be sliced into up to 8 virtual machines.
|
||||
|
||||
The slicing system ensures resources are allocated efficiently while maintaining performance isolation between workloads. This means your critical training job won't be affected by other tasks running on the system.
|
||||
|
||||
## GPU Resource Management
|
||||
|
||||
Our GPU management system provides granular control while maintaining peak performance. Whether you're running a single large model or multiple smaller workloads, the system optimizes resource allocation automatically.
|
||||
|
||||
## Network Connectivity
|
||||
|
||||
The networking stack is built for both performance and security, integrating seamlessly with the Mycelium network, providing end-to-end encryption, and and Web gateways, allowing external connection to VM containers. The AI Box thus creates a robust foundation for distributed AI computing.
|
||||
|
||||
## Security Features
|
||||
|
||||
Security is implemented at every layer of the system without compromising performance:
|
||||
|
||||
System Security:
|
||||
- Hardware-level isolation
|
||||
- Secure boot chain
|
||||
- Network segmentation
|
||||
|
||||
Each feature has been carefully selected and implemented to provide both practical utility and enterprise-grade security, ensuring your AI workloads and data remain protected while maintaining full accessibility for authorized users.
|
37
examples/grid1/technical-specs/hardware_specifications.md
Normal file
37
examples/grid1/technical-specs/hardware_specifications.md
Normal file
@ -0,0 +1,37 @@
|
||||
---
|
||||
title: Hardware Specifications
|
||||
sidebar_position: 1
|
||||
---
|
||||
|
||||
### GPU Options
|
||||
|
||||
At the heart of AIBox lies its GPU configuration, carefully selected for AI workloads. The AMD Radeon RX 7900 XTX provides an exceptional balance of performance, memory, and cost efficiency:
|
||||
|
||||
| Model | VRAM | FP32 Performance | Memory Bandwidth |
|
||||
|-------|------|------------------|------------------|
|
||||
| RX 7900 XTX | 24GB | 61.6 TFLOPS | 960 GB/s |
|
||||
| Dual Config | 48GB | 123.2 TFLOPS | 1920 GB/s |
|
||||
|
||||
The dual GPU configuration enables handling larger models and datasets that wouldn't fit in single-GPU memory, making it ideal for advanced AI research and development.
|
||||
|
||||
### Memory & Storage
|
||||
|
||||
AI workloads demand high-speed memory and storage. The AIBox configuration ensures your GPU computing power isn't bottlenecked by I/O limitations:
|
||||
|
||||
Memory Configuration:
|
||||
- RAM: 64GB/128GB DDR5-4800
|
||||
- Storage: 2x 2TB NVMe SSDs (PCIe 4.0)
|
||||
|
||||
This setup provides ample memory for large dataset preprocessing and fast storage access for model training and inference.
|
||||
|
||||
### Cooling System
|
||||
|
||||
Thermal management is crucial for sustained AI workloads. Our cooling solution focuses on maintaining consistent performance during extended operations:
|
||||
|
||||
This cooling system allows for sustained maximum performance without thermal throttling, even during extended training sessions.
|
||||
|
||||
### Power Supply
|
||||
|
||||
Reliable power delivery is essential for system stability and performance.
|
||||
|
||||
The AIBox power configuration ensures clean, stable power delivery under all operating conditions, with headroom for additional components or intense workloads.
|
29
examples/grid1/technical-specs/software_stack.md
Normal file
29
examples/grid1/technical-specs/software_stack.md
Normal file
@ -0,0 +1,29 @@
|
||||
---
|
||||
title: Software Stack
|
||||
sidebar_position: 2
|
||||
---
|
||||
|
||||
### ThreeFold Zero-OS
|
||||
|
||||
Zero-OS forms the foundation of AIBox's software architecture. Unlike traditional operating systems, it's a minimalist, security-focused platform optimized specifically for AI workloads and distributed computing.
|
||||
|
||||
Key features:
|
||||
- Bare metal operating system with minimal overhead
|
||||
- Zero overhead virtualization
|
||||
- Secure boot process
|
||||
- Automated resource management
|
||||
|
||||
This specialized operating system ensures maximum performance and security while eliminating unnecessary services and potential vulnerabilities.
|
||||
|
||||
### Mycelium Network Integration
|
||||
|
||||
The Mycelium Network integration transforms your AIBox from a standalone system into a node in a powerful distributed computing network based on peer-to-peer and end-to-end encrypted communication always choosing the shortest path.
|
||||
|
||||
### Pre-installed AI Frameworks
|
||||
|
||||
Your AIBox comes ready for development with a comprehensive AI software stack:
|
||||
|
||||
- ROCm 5.7+ ML stack
|
||||
- PyTorch 2.1+ with GPU optimization
|
||||
- TensorFlow 2.14+
|
||||
- Pre-built container images
|
0
examples/parent/docs2/.collection
Normal file
0
examples/parent/docs2/.collection
Normal file
38
examples/parent/docs2/get-started/01_features.md
Normal file
38
examples/parent/docs2/get-started/01_features.md
Normal file
@ -0,0 +1,38 @@
|
||||
---
|
||||
title: Features Mycelium Network
|
||||
sidebar_position: 1
|
||||
---
|
||||
|
||||
Mycelium is a locality-aware, end-to-end encrypted network designed for efficient and secure communication between nodes. Below are its key features:
|
||||
|
||||
## What Makes Mycelium Unique
|
||||
|
||||
1. **Locality Awareness**
|
||||
Mycelium identifies the shortest path between nodes, optimizing communication based on location.
|
||||
|
||||
2. **End-to-End Encryption**
|
||||
All traffic between nodes is encrypted, ensuring secure data transmission.
|
||||
|
||||
3. **Traffic Routing Over Friend Nodes**
|
||||
Traffic can be routed through nodes of trusted friends, maintaining location awareness.
|
||||
|
||||
4. **Automatic Rerouting**
|
||||
If a physical link fails, Mycelium automatically reroutes traffic to ensure uninterrupted connectivity.
|
||||
|
||||
5. **Your network Address Linked to Private Key**
|
||||
Each node is assigned an IPv6 network address that is cryptographically linked to its private key.
|
||||
|
||||
6. **Scalability**
|
||||
|
||||
Mycelium is designed to scale to a planetary level. The team has evaluated multiple overlay networks in the past and is focused on overcoming scalability challenges.
|
||||
|
||||
## Tech
|
||||
|
||||
1. **Flexible Deployment**
|
||||
Mycelium can be run without a TUN interface, allowing it to function solely as a reliable message bus.
|
||||
|
||||
2. **Reliable Message Bus**
|
||||
Mycelium includes a simple and reliable message bus built on top of its network layer.
|
||||
|
||||
1. **Multiple Communication Protocols**
|
||||
Mycelium supports various communication methods, including QUIC and TCP. The team is also developing hole-punching for QUIC, enabling direct peer-to-peer (P2P) traffic without intermediaries.
|
23
examples/parent/docs2/get-started/02_mycelium-app.md
Normal file
23
examples/parent/docs2/get-started/02_mycelium-app.md
Normal file
@ -0,0 +1,23 @@
|
||||
---
|
||||
title: Download the App
|
||||
sidebar_position: 4
|
||||
---
|
||||
|
||||
The Mycelium app is available for Android, Windows, macOS and iOS.
|
||||
|
||||
For Linux, read the [Linux Installation](../experts/03_linux-installation.md) section.
|
||||
|
||||
## Download Links
|
||||
|
||||
You can download the Mycelium app with the following links:
|
||||
|
||||
- [iOS and macOS](https://apps.apple.com/app/id6504277565)
|
||||
- Download the app from the App Store
|
||||
- [Android](https://play.google.com/store/apps/details?id=tech.threefold.mycelium)
|
||||
- Download the app from the Google Play Store
|
||||
- [Windows](https://github.com/threefoldtech/myceliumflut/releases)
|
||||
- Go to the official Mycelium release page and download the latest `.exe`
|
||||
|
||||
## Upcoming Updates
|
||||
|
||||
- The user interface (UI) will be drastically improved in upcoming releases to better represent the available features.
|
48
examples/parent/docs2/get-started/03_use-the-app.md
Normal file
48
examples/parent/docs2/get-started/03_use-the-app.md
Normal file
@ -0,0 +1,48 @@
|
||||
---
|
||||
title: Use the App
|
||||
sidebar_position: 5
|
||||
---
|
||||
|
||||
## Start Mycelium
|
||||
|
||||
To start Mycelium, simply open the app and click on `Start`.
|
||||
|
||||

|
||||
|
||||
> Note for Windows Users: The Mycelium app must be run as an administrator to function properly. Right-click on the application icon and select "Run as administrator" to ensure proper network connectivity.
|
||||
|
||||
## Stop or Restart Mycelium
|
||||
|
||||
To stop or restart Mycelium, click on the appropriate button.
|
||||
|
||||

|
||||
|
||||
## Add Peers
|
||||
|
||||
You can add different Mycelium peers in the `Peers` window.
|
||||
|
||||
Simply add peers and then either start or restart the app.
|
||||
|
||||

|
||||
|
||||
You can consult the [Mycelium hosted public nodes](../experts/04_additional-information.md) to find more peers.
|
||||
|
||||
For example, if you want to add the node with the IPv4 address `5.78.122.16` with the tcp port `9651`, simply add the following line then start or restart the app.
|
||||
|
||||
```
|
||||
tcp://5.78.122.16:9651
|
||||
```
|
||||
|
||||
## Mycelium Address
|
||||
|
||||
When you use the Mycelium app, you are assigned a unique Mycelium address.
|
||||
|
||||
To copy the Mycelium address, click on the button on the right of the address.
|
||||
|
||||

|
||||
|
||||
## Deploy on the Grid with Mycelium
|
||||
|
||||
Once you've installed Mycelium, you can deploy on the ThreeFold Grid and connect to your workload using Mycelium.
|
||||
|
||||
As a starter, you can explore the ThreeFold Grid and deploy apps on the [ThreeFold Dashboard](https://manual.grid.tf/documentation/dashboard/dashboard.html) using Mycelium to connect.
|
8
examples/parent/docs2/get-started/_category_.json
Normal file
8
examples/parent/docs2/get-started/_category_.json
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"label": "Get Started",
|
||||
"position": 4,
|
||||
"link": {
|
||||
"type": "generated-index",
|
||||
"description": "Get started With Mycelium Network."
|
||||
}
|
||||
}
|
BIN
examples/parent/docs2/get-started/img/mycelium_1.png
Normal file
BIN
examples/parent/docs2/get-started/img/mycelium_1.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 35 KiB |
BIN
examples/parent/docs2/get-started/img/mycelium_2.png
Normal file
BIN
examples/parent/docs2/get-started/img/mycelium_2.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 44 KiB |
BIN
examples/parent/docs2/get-started/img/mycelium_3.png
Normal file
BIN
examples/parent/docs2/get-started/img/mycelium_3.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 14 KiB |
BIN
examples/parent/docs2/get-started/img/mycelium_4.png
Normal file
BIN
examples/parent/docs2/get-started/img/mycelium_4.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 8.9 KiB |
Loading…
Reference in New Issue
Block a user