refactor: Normalize page and collection names

- Use `name_fix_no_underscore_no_ext` for consistent naming
- Remove underscores and special characters from names
- Add tests for name normalization functions
- Ensure page and collection names are consistently formatted
- Update link parsing to use normalized names
This commit is contained in:
Mahmoud-Emad
2025-11-04 12:28:13 +02:00
parent ef14bc6d82
commit ecfe77a2dc
4 changed files with 132 additions and 22 deletions

View File

@@ -15,7 +15,7 @@ pub mut:
// Create a new collection
fn (mut self Atlas) add_collection(mut path pathlib.Path) !Collection {
mut name := path.name_fix_no_ext()
mut name := path.name_fix_no_underscore_no_ext()
mut filepath := path.file_get('.collection')!
content := filepath.read()!
if content.trim_space() != '' {
@@ -24,7 +24,9 @@ fn (mut self Atlas) add_collection(mut path pathlib.Path) !Collection {
name = params.get('name')!
}
}
name = texttools.name_fix(name)
// Normalize collection name to remove underscores and special chars
// This ensures collection_a and collectiona both become 'collectiona'
name = texttools.name_fix_no_underscore_no_ext(name)
console.print_item("Adding collection '${name}' to Atlas '${self.name}' at path '${path.path}'")
if name in self.collections {
@@ -90,6 +92,7 @@ pub fn (a Atlas) groups_get(session Session) []&Group {
return matching
}
//////////////////SCAN
// Scan a path for collections
@@ -117,7 +120,7 @@ fn (mut a Atlas) scan_(mut dir pathlib.Path, ignore_ []string) ! {
// Check if this directory is a collection
if dir.file_exists('.collection') {
collname := dir.name_fix_no_ext()
collname := dir.name_fix_no_underscore_no_ext()
if collname.to_lower() in ignore_ {
return
}

View File

@@ -45,12 +45,13 @@ fn (mut c Collection) init_post() ! {
c.init_git_info()!
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
// Add a page to the collection
fn (mut c Collection) add_page(mut path pathlib.Path) ! {
name := path.name_fix_no_ext()
// Use name_fix_no_underscore_no_ext to ensure consistent naming
// This ensures token_system.md and tokensystem.md both become 'tokensystem'
name := path.name_fix_no_underscore_no_ext()
if name in c.pages {
return error('Page ${name} already exists in collection ${c.name}')
}
@@ -129,7 +130,6 @@ pub fn (c Collection) file_or_image_get(name string) !&File {
return f
}
// Check if page exists
pub fn (c Collection) page_exists(name string) bool {
return name in c.pages
@@ -152,9 +152,6 @@ pub fn (c Collection) file_or_image_exists(name string) bool {
return true
}
@[params]
pub struct CollectionErrorArgs {
pub mut:
@@ -243,7 +240,7 @@ pub fn (c Collection) print_errors() {
pub fn (mut c Collection) validate_links() ! {
for _, mut page in c.pages {
content := page.content(include: true)!
page.links=page.find_links(content)! // will walk over links see if errors and add errors
page.links = page.find_links(content)! // will walk over links see if errors and add errors
}
}

View File

@@ -97,20 +97,20 @@ fn (mut p Page) find_links(content string) ![]Link {
mut is_file_link := false
//if no . in file then it means it's a page link (binaries with . are not supported in other words)
if target.contains(".") && (! target.trim_space().to_lower().ends_with(".md")) {
// if no . in file then it means it's a page link (binaries with . are not supported in other words)
if target.contains('.') && (!target.trim_space().to_lower().ends_with('.md')) {
is_file_link = true
is_image_link = false //means it's a file link, not an image link
is_image_link = false // means it's a file link, not an image link
}
mut link := Link{
src: line[open_bracket..close_paren + 1]
text: text
target: target.trim_space()
line: line_idx + 1
is_file_link: is_file_link
src: line[open_bracket..close_paren + 1]
text: text
target: target.trim_space()
line: line_idx + 1
is_file_link: is_file_link
is_image_link: is_image_link
page: &p
page: &p
}
p.parse_link_target(mut link)
@@ -155,7 +155,8 @@ fn (mut p Page) parse_link_target(mut link Link) {
if target.contains(':') {
parts := target.split(':')
if parts.len >= 2 {
link.target_collection_name = texttools.name_fix(parts[0])
// Normalize collection name to remove underscores
link.target_collection_name = texttools.name_fix_no_underscore_no_ext(parts[0])
link.target_item_name = normalize_page_name(parts[1])
}
} else {
@@ -273,11 +274,14 @@ fn (mut p Page) process_links(mut export_dir pathlib.Path) !string {
/////////////TOOLS//////////////////////////////////
// Normalize page name (remove .md, apply name_fix)
// Normalize page name (remove .md, underscores, and apply name_fix)
// This ensures consistent naming: token_system, token-system, TokenSystem all become tokensystem
fn normalize_page_name(name string) string {
mut clean := name
if clean.ends_with('.md') {
clean = clean[0..clean.len - 3]
}
return texttools.name_fix(clean)
// Use name_fix_no_underscore_no_ext to remove underscores and normalize
// This ensures token_system and tokensystem both become tokensystem
return texttools.name_fix_no_underscore_no_ext(clean)
}

View File

@@ -0,0 +1,106 @@
module atlas
import incubaid.herolib.core.texttools
// Test that normalize_page_name removes underscores and normalizes consistently
fn test_normalize_page_name() {
// Test basic normalization
assert normalize_page_name('token_system') == 'tokensystem'
assert normalize_page_name('token-system') == 'tokensystem'
assert normalize_page_name('TokenSystem') == 'tokensystem'
assert normalize_page_name('Token System') == 'tokensystem'
// Test with .md extension
assert normalize_page_name('token_system.md') == 'tokensystem'
assert normalize_page_name('token-system.md') == 'tokensystem'
assert normalize_page_name('TokenSystem.md') == 'tokensystem'
// Test edge cases
assert normalize_page_name('token__system') == 'tokensystem'
assert normalize_page_name('token___system') == 'tokensystem'
assert normalize_page_name('_token_system_') == 'tokensystem'
// Test special characters
assert normalize_page_name('token@system') == 'tokensystem'
assert normalize_page_name('token!system') == 'tokensystem'
// Note: # is treated specially (truncates at #, like URL anchors)
assert normalize_page_name('token#system') == 'token'
}
// Test collection name normalization
fn test_collection_name_normalization() {
// All these should normalize to the same value
assert texttools.name_fix_no_underscore_no_ext('my_collection') == 'mycollection'
assert texttools.name_fix_no_underscore_no_ext('my-collection') == 'mycollection'
assert texttools.name_fix_no_underscore_no_ext('MyCollection') == 'mycollection'
assert texttools.name_fix_no_underscore_no_ext('My Collection') == 'mycollection'
assert texttools.name_fix_no_underscore_no_ext('my collection') == 'mycollection'
}
// Test that different link formats resolve to the same target
fn test_link_target_normalization() {
// All these should normalize to 'tokensystem'
test_cases := [
'token_system',
'token-system',
'TokenSystem',
'token_system.md',
'token-system.md',
'TokenSystem.md',
'TOKEN_SYSTEM',
'Token_System',
]
for test_case in test_cases {
normalized := normalize_page_name(test_case)
assert normalized == 'tokensystem', 'Expected "${test_case}" to normalize to "tokensystem", got "${normalized}"'
}
}
// Test collection name in links
fn test_collection_name_in_links() {
// All these should normalize to 'collectiona'
test_cases := [
'collection_a',
'collection-a',
'CollectionA',
'Collection_A',
'COLLECTION_A',
]
for test_case in test_cases {
normalized := texttools.name_fix_no_underscore_no_ext(test_case)
assert normalized == 'collectiona', 'Expected "${test_case}" to normalize to "collectiona", got "${normalized}"'
}
}
// Test real-world examples
fn test_real_world_examples() {
// Common documentation page names
assert normalize_page_name('getting_started.md') == 'gettingstarted'
assert normalize_page_name('api_reference.md') == 'apireference'
assert normalize_page_name('user-guide.md') == 'userguide'
assert normalize_page_name('FAQ.md') == 'faq'
assert normalize_page_name('README.md') == 'readme'
// Technical terms
assert normalize_page_name('token_system.md') == 'tokensystem'
assert normalize_page_name('mycelium_cloud.md') == 'myceliumcloud'
assert normalize_page_name('tf_grid.md') == 'tfgrid'
}
// Test that normalization is idempotent (applying it twice gives same result)
fn test_normalization_idempotent() {
test_cases := [
'token_system',
'TokenSystem',
'token-system',
'Token System',
]
for test_case in test_cases {
first := normalize_page_name(test_case)
second := normalize_page_name(first)
assert first == second, 'Normalization should be idempotent: "${test_case}" -> "${first}" -> "${second}"'
}
}