From 09eb85be6c57dc5030b4fdb6a04b14be4842d0b8 Mon Sep 17 00:00:00 2001 From: octospacc Date: Thu, 15 Dec 2022 16:29:22 +0100 Subject: [PATCH] Fixes and attempted fixes --- Source/Modules/Elements.py | 33 +++++++++--- Source/Modules/Site.py | 103 +++++++++++++++++++++++-------------- TODO | 4 +- 3 files changed, 95 insertions(+), 45 deletions(-) diff --git a/Source/Modules/Elements.py b/Source/Modules/Elements.py index e3e6ad2..9d65181 100644 --- a/Source/Modules/Elements.py +++ b/Source/Modules/Elements.py @@ -46,6 +46,28 @@ HTMLCommentsBlock = '

{StrComments}

Item 1 +# :Item 4 //
  • Item 4
  • +def GenHTMLTreeList(MetaList:str, Type:str='ul'): + HTML = '' + Lines = MetaList.splitlines() + CurDepth, NextDepth, PrevDepth = 0, 0, 0 + for i,e in enumerate(Lines): + CurDepth = e.find(':') + NextDepth = Lines[i+1].find(':') if i+1 < len(Lines) else 0 + HTML += '\n
  • ' + e[CurDepth+1:] + if NextDepth == CurDepth: + HTML += '
  • ' + elif NextDepth > CurDepth: + HTML += f'\n<{Type}>' * (NextDepth - CurDepth) + elif NextDepth < CurDepth: + HTML += f'\n' * (CurDepth - NextDepth) + '' + PrevDepth = CurDepth + return f'<{Type}>{HTML}\n' + def MakeLinkableTitle(Line, Title, DashTitle, Type): if Type == 'md': Index = Title.split(' ')[0].count('#') @@ -68,7 +90,7 @@ def GetTitle(FileName, Meta, Titles, Prefer='MetaTitle', BlogName=None): Title = Meta['Title'] if Meta['Title'] else Titles[0].lstrip('#') if Titles else FileName elif Prefer == 'HTMLTitle': Title = Meta['HTMLTitle'] if Meta['HTMLTitle'] else Meta['Title'] if Meta['Title'] else Titles[0].lstrip('#') if Titles else FileName - if BlogName and 'Blog' in Meta['Categories']: + if Meta['Type'] == 'Post' and BlogName and 'Blog' in Meta['Categories']: Title += ' - ' + BlogName return Title @@ -114,16 +136,15 @@ def MakeListTitle(File, Meta, Titles, Prefer, SiteRoot, BlogName, PathPrefix='') def FormatTitles(Titles, Flatten=False): # TODO: Somehow titles written in Pug can end up here and don't work, they should be handled - HTMLTitles, DashyTitles = '', [] + List, DashyTitles = '', [] for t in Titles: n = 0 if Flatten else t.split(' ')[0].count('#') + Level = '.' * (n-1) + ':' Title = MkSoup(t.lstrip('#')).get_text() DashyTitle = DashifyTitle(Title, DashyTitles) DashyTitles += [DashyTitle] - Start = '' * (n - 1) - HTMLTitles += f'
  • {Start}{html.escape(Title)}{End}
  • ' - return f'' + List += f'{Level}{html.escape(Title)}\n' + return GenHTMLTreeList(List) # Clean up a generic HTML tree such that it's compliant with the HTML Journal standard # (https://m15o.ichi.city/site/subscribing-to-a-journal-page.html); diff --git a/Source/Modules/Site.py b/Source/Modules/Site.py index 1d80042..aef0bf0 100644 --- a/Source/Modules/Site.py +++ b/Source/Modules/Site.py @@ -19,28 +19,6 @@ from Modules.Markdown import * from Modules.Pug import * from Modules.Utils import * -# Generate HTML tree/nested list from our internal metaformat, such as: -# :Item 1 \\
  • Item 1
  • -# :Item 4 //
  • Item 4
  • -def GenHTMLTreeList(MetaList:str, Type:str='ul'): - HTML = '' - Lines = MetaList.splitlines() - CurDepth, NextDepth, PrevDepth = 0, 0, 0 - for i,e in enumerate(Lines): - CurDepth = e.find(':') - NextDepth = Lines[i+1].find(':') if i+1 < len(Lines) else 0 - HTML += '\n
  • ' + e[CurDepth+1:] - if NextDepth == CurDepth: - HTML += '
  • ' - elif NextDepth > CurDepth: - HTML += f'\n<{Type}>' * (NextDepth - CurDepth) - elif NextDepth < CurDepth: - HTML += f'\n' * (CurDepth - NextDepth) + '' - PrevDepth = CurDepth - return f'<{Type}>{HTML}\n' - # Menu styles: # - Simple: Default, Flat, Line # - Others: Excerpt, Image, Preview (Excerpt + Image), Full @@ -134,6 +112,22 @@ def TemplatePreprocessor(Text): Meta.update({i:MetaDefault[i]}) return Meta +def FindPreprocLine(Line, Meta, Macros): + Changed = False + Line = Line.lstrip().rstrip() + lll = CheckHTMLCommentLine(Line) + if Line.startswith('//') or lll: # Find preprocessor lines + lll = Line[2:].lstrip() + if lll.startswith('%'): + Meta += lll[1:].lstrip() + '\n' + Changed = True + elif lll.startswith('$'): + Macros += lll[1:].lstrip() + '\n' + Changed = True + #if ll.startswith(''): # Find comment and code blocks + # IgnoreBlocksStart += [l] + return (Meta, Macros, Changed) + def PagePreprocessor(Path, TempPath, Type, SiteTemplate, SiteRoot, GlobalMacros, CategoryUncategorized, LightRun=False): File = ReadFile(Path) Path = Path.lower() @@ -154,23 +148,30 @@ def PagePreprocessor(Path, TempPath, Type, SiteTemplate, SiteRoot, GlobalMacros, 'UpdatedOn': '', 'EditedOn': '', 'Order': None, - 'Language': None} + 'Language': None, + 'Downsync': None} # Find all positions of '', add them in a list=[[pos0,pos1,line0,line1],...] for l in File.splitlines(): ll = l.lstrip().rstrip() - lll = CheckHTMLCommentLine(ll) - if ll.startswith('//') or lll: # Find preprocessor lines - lll = ll[2:].lstrip() - if lll.startswith('%'): - Meta += lll[1:].lstrip() + '\n' - elif lll.startswith('$'): - Macros += lll[1:].lstrip() + '\n' - #if ll.startswith(''): # Find comment and code blocks - # IgnoreBlocksStart += [l] - else: # Find headings + Meta, Macros, Changed = FindPreprocLine(ll, Meta, Macros) + if not Changed: # Find headings #if line in ignore block: # continue Headings = ('h1', 'h2', 'h3', 'h4', 'h5', 'h6') + #if Path.endswith(FileExtensions['HTML']): + # if ll[1:].startswith(Headings): + # if ll[3:].startswith((" class='NoTitle", ' class="NoTitle')): + # Content += l + '\n' + # elif ll.replace(' ', ' ').startswith('// %'): + # pass + # else: + # Title = '#'*int(ll[2]) + ' ' + ll[4:] + # DashTitle = DashifyTitle(Title.lstrip('#'), DashyTitles) + # DashyTitles += [DashTitle] + # Titles += [Title] + # Content += MakeLinkableTitle(l, Title, DashTitle, 'pug') + '\n' + # else: + # Content += l + '\n' if Path.endswith(FileExtensions['HTML']) and not HTMLTitlesFound: Soup = BeautifulSoup(File, 'html.parser') Tags = Soup.find_all() @@ -181,8 +182,15 @@ def PagePreprocessor(Path, TempPath, Type, SiteTemplate, SiteRoot, GlobalMacros, DashyTitles += [DashTitle] Titles += [Title] t.replace_with(MakeLinkableTitle(None, Title, DashTitle, 'md')) - Content = str(Soup.prettify(formatter=None)) HTMLTitlesFound = True + Content = '' + TmpContent = str(Soup.prettify(formatter=None)) + for cl in TmpContent.splitlines(): + _, _, IsMetaLine = FindPreprocLine(cl, Meta, Macros) + if not IsMetaLine: + #print(cl) + Content += cl + '\n' + break elif Path.endswith(FileExtensions['Markdown']): lsuffix = '' if ll.startswith(('-', '+', '*')): @@ -196,13 +204,13 @@ def PagePreprocessor(Path, TempPath, Type, SiteTemplate, SiteRoot, GlobalMacros, Content += l + '\n' continue else: - Title = '#'*h + str(ll[3:]) + Title = '#'*int(ll[2]) + ' ' + ll[4:] DashTitle = DashifyTitle(MkSoup(Title.lstrip('#')).get_text(), DashyTitles) DashyTitles += [DashTitle] Titles += [Title] Title = MakeLinkableTitle(None, Title, DashTitle, 'md') - Title = Title.replace('> ', Item[4]) + # print(f'[staticoso:Transclude:{SrcPrefix}{Item[0]}]', Item[4]) + # Operated = True + # if not Operated: + # break + return MadePages diff --git a/TODO b/TODO index 13bd08b..faaf592 100644 --- a/TODO +++ b/TODO @@ -1,3 +1,5 @@ +- Misskey for ActivityPub +- Section marking in pages (for use with external translators) - Choosing to use HTML or CSS styling for default internal snippets - Pages transclusion + probably drop StaticParts (would be redundant) - User macros with arguments @@ -22,7 +24,7 @@ - Override internal HTML snippets (meta lines, page lists, redirects, ...) with config file in Templates/NAME.ini - Specify input folder(s) - Show page size/words/time in meta line -- Add feed support for diary-like pages +- Add feed support for HTML Journal pages - Fix excess whitespace in some section/menu titles - Parity presence for [] and <> internal macro enclosure, + streamline the code for that - Investigate a strange bug with Macros