""" ================================= | | This file is part of | | staticoso | | Just a simple Static Site Generator | | | | Licensed under the AGPLv3 license | | Copyright (C) 2022, OctoSpacc | | ================================= """ from base64 import b64encode from Modules.Globals import * from Modules.HTML import * from Modules.Utils import * JournalHeadings = ('h2','h3','h4','h5') JournalTitleDecorators = {'(':')', '[':']', '{':'}'} #JournalStyles = { # "Default": {}, # "details": {} #} HTMLSectionTitleLine = '» {Title}' PugSectionTitleLine = "{Start}{Heading}.SectionHeading.staticoso-SectionHeading #[span.SectionLink.staticoso-SectionLink #[a(href='#{DashTitle}') #[span »]] ]#[span#{DashTitle}.SectionTitle.staticoso-SectionTitle {Rest}]" CategoryPageTemplate = """\ // Title: {Name} // Type: Page // Index: True # {Name}
""" RedirectPageTemplate = """\ {TitlePrefix}Redirect

{StrClick} {StrRedirect}.

""" HTMLCommentsBlock = '

{StrComments}

{StrOpen} ↗️' def DashifyTitle(Title:str, Done:list=[]): return UndupeStr(DashifyStr(Title.lstrip(' ').rstrip(' ')), Done, '-') # Generate HTML tree list (nested list) from our internal metaformat, such as: # :Item 1 \\
  • Item 1
  • # :Item 4 //
  • Item 4
  • def GenHTMLTreeList(MetaList:str, Type:str='ul', Class:str=""): HTML = '' Lines = MetaList.splitlines() CurDepth, NextDepth, PrevDepth = 0, 0, 0 for i,e in enumerate(Lines): CurDepth = e.find(':') NextDepth = Lines[i+1].find(':') if i+1 < len(Lines) else 0 HTML += '\n
  • ' + e[CurDepth+1:] if NextDepth == CurDepth: HTML += '
  • ' elif NextDepth > CurDepth: HTML += f'\n<{Type}>' * (NextDepth - CurDepth) elif NextDepth < CurDepth: HTML += f'\n' * (CurDepth - NextDepth) + '' PrevDepth = CurDepth return f'<{Type} class="staticoso-TreeList {Class}">{HTML}\n' def MakeLinkableTitle(Line:str, Title:str, DashTitle:str, Type:str): if Type == 'md': Index = Title.split(' ')[0].count('#') return HTMLSectionTitleLine.format( Index=Index, DashTitle=DashTitle, Title=Title[Index+1:]) elif Type == 'pug': Index = Line.find('h') return PugSectionTitleLine.format( Start=Line[:Index], Heading=Line[Index:Index+2], Rest=Line[Index+2:], DashTitle=DashTitle) def GetTitle(FileName:str, Meta:dict, Titles:list, Prefer:str='MetaTitle', BlogName:str=None): if Prefer == 'BodyTitle': Title = Titles[0].lstrip('#') if Titles else Meta['Title'] if Meta['Title'] else FileName elif Prefer == 'MetaTitle': Title = Meta['Title'] if Meta['Title'] else Titles[0].lstrip('#') if Titles else FileName elif Prefer == 'HTMLTitle': Title = Meta['HTMLTitle'] if Meta['HTMLTitle'] else Meta['Title'] if Meta['Title'] else Titles[0].lstrip('#') if Titles else FileName if Meta['Type'] == 'Post' and BlogName and 'Blog' in Meta['Categories']: Title += ' - ' + BlogName return Title def GetDescription(Meta:dict, BodyDescription:str, Prefer:str='MetaDescription'): if Prefer == 'BodyDescription': Description = BodyDescription if BodyDescription else Meta['Description'] if Meta['Description'] else '' elif Prefer == 'MetaDescription': Description = Meta['Description'] if Meta['Description'] else BodyDescription if BodyDescription else '' return Description def GetImage(Meta:dict, BodyImage:str, Prefer:str='MetaImage'): if Prefer == 'BodyImage': Image = BodyImage if BodyImage else Meta['Image'] if Meta['Image'] else '' elif Prefer == 'MetaImage': Image = Meta['Image'] if Meta['Image'] else BodyImage if BodyImage else '' return Image def MakeContentHeader(Meta:dict, Locale:dict, Categories:str=''): Header = '' for e in ['CreatedOn', 'EditedOn']: if Meta[e]: Header += f'{Locale[e]}: {Meta[e]}
    ' if Categories: Header += f'{Locale["Categories"]}:{Categories.removesuffix(" ")}
    ' if Meta['Index'].lower() in PageIndexStrNeg: Header += f'{Locale["Unlisted"]}
    ' return f'

    {Header}

    ' def MakeCategoryLine(File:str, Meta:dict): Categories = '' for Cat in Meta['Categories']: Categories += f' {html.escape(Cat)} ' return Categories def MakeListTitle(File:str, Meta:dict, Titles:list, Prefer:str, BlogName:str, PathPrefix:str=''): Title = GetTitle(File.split('/')[-1], Meta, Titles, Prefer, BlogName).lstrip().rstrip() Link = False if Meta['Index'] == 'Unlinked' else True if Link: Href = f'{PathPrefix}{StripExt(File)}.html' Title = f'{Title}' #else: # Title = f'{Title}' if Meta['Type'] == 'Post': CreatedOn = Meta['CreatedOn'] if Meta['CreatedOn'] else '?' Title = f"[] {Title}" return Title def FormatTitles(Titles:list, Flatten=False): # TODO: Somehow titles written in Pug can end up here and don't work, they should be handled List, DashyTitles = '', [] for t in Titles: n = 0 if Flatten else t.split(' ')[0].count('#') Level = '.' * (n-1) + ':' Title = MkSoup(t.lstrip('#')).get_text() DashyTitle = DashifyTitle(Title, DashyTitles) DashyTitles += [DashyTitle] List += f'{Level}{html.escape(Title)}\n' return GenHTMLTreeList(List) # Clean up a generic HTML tree such that it's compliant with the HTML Journal standard # (https://m15o.ichi.city/site/subscribing-to-a-journal-page.html); # basis is: find an element with the JournalBody attr., and group its direct children as
    s def MakeHTMLJournal(Flags, Locale, FilePath, HTML): Soup, Journal, Entries = MkSoup(HTML), '', [] for t in Soup.find_all(attrs={"htmljournal":True}): #JournalStyle = JournalStyles[t.attrs["journalstyle"]] if 'journalstyle' in t.attrs and t.attrs["journalstyle"] in JournalStyles else JournalStyles['Default'] for c in t.children: # Entries, some might be entirely grouped in their own element but others could not, use headings as separators for ct in MkSoup(str(c)).find_all(): # Transform (almost, for now I reserve some) any heading into h2 and remove any attributes if ct.name in JournalHeadings: Title = ct.text.strip().removeprefix('»').strip() Chr0 = Title[0] # Remove leading symbols before date if Chr0 in JournalTitleDecorators.keys(): Idx = Title.find(JournalTitleDecorators[Chr0]) Title = Title[1:Idx] + ' - ' + Title[Idx+2:] if Journal: Journal += '\n

    \n' Journal += f'\n
    \n

    {Title}

    \n' elif ct.name == 'p': # We should handle any type to preserve
    and things Journal += str(ct) FileName = FilePath.split('/')[-1] URL = f'{Flags["SiteDomain"]}/{StripExt(FilePath)}.Journal.html' Redirect = f"""""" if Flags["JournalRedirect"] else '' # Instead of copying stuff from the full page, for now we use dedicated title, header, footer, and pagination Title = t.attrs["journaltitle"] if 'journaltitle' in t.attrs else f'"{StripExt(FileName)}" Journal - {Flags["SiteName"]}' if Flags["SiteName"] else f'"{StripExt(FileName)}" Journal' FeedLink = f"""Journal Atom Feed""" if Flags["SiteDomain"] else '' Header = t.attrs["journalheader"] if 'journalheader' in t.attrs else f"""\

    {Locale["StrippedDownNotice"].format(Link="./"+FileName)} Valid HTML Journal {FeedLink}

    """ Journal = f"""\

    {Title}


    {Journal}

    """ return Journal