mirror of https://gitlab.com/octtspacc/staticoso
Minor changes; Mastodon posting message with better description + tags
This commit is contained in:
parent
ad0a8be875
commit
784d8e4732
|
@ -11,6 +11,7 @@ import time
|
||||||
from Libs.dateutil.parser import parse as date_parse
|
from Libs.dateutil.parser import parse as date_parse
|
||||||
from Libs.mastodon import Mastodon
|
from Libs.mastodon import Mastodon
|
||||||
from Modules.HTML import *
|
from Modules.HTML import *
|
||||||
|
from Modules.Logging import *
|
||||||
from Modules.Utils import *
|
from Modules.Utils import *
|
||||||
|
|
||||||
def MastodonGetSession(InstanceURL, Token):
|
def MastodonGetSession(InstanceURL, Token):
|
||||||
|
@ -18,22 +19,19 @@ def MastodonGetSession(InstanceURL, Token):
|
||||||
api_base_url=InstanceURL,
|
api_base_url=InstanceURL,
|
||||||
access_token=Token)
|
access_token=Token)
|
||||||
|
|
||||||
def MastodonGetMyID(Session):
|
|
||||||
return Session.me()['id']
|
|
||||||
|
|
||||||
def MastodonGetPostsFromUserID(Session, UserID):
|
def MastodonGetPostsFromUserID(Session, UserID):
|
||||||
return Session.account_statuses(
|
return Session.account_statuses(
|
||||||
UserID,
|
UserID,
|
||||||
exclude_replies=True)
|
exclude_replies=True)
|
||||||
|
|
||||||
def MastodonDoPost(Session, Text, Lang=None, Visibility='public'):
|
def MastodonDoPost(Session, Text:str, Lang:str=None, Visibility:str='public'):
|
||||||
if Text:
|
if Text:
|
||||||
return Session.status_post(
|
return Session.status_post(
|
||||||
Text,
|
Text,
|
||||||
language=Lang,
|
language=Lang,
|
||||||
visibility=Visibility)
|
visibility=Visibility)
|
||||||
|
|
||||||
def MastodonGetLinkFromPost(Post, Domain=None):
|
def MastodonGetLinkFromPost(Post, Domain:str=None):
|
||||||
Parse = MkSoup(Post['content'])
|
Parse = MkSoup(Post['content'])
|
||||||
if Parse.a:
|
if Parse.a:
|
||||||
Link = Parse.find_all('a')[-1]['href']
|
Link = Parse.find_all('a')[-1]['href']
|
||||||
|
@ -43,55 +41,65 @@ def MastodonGetLinkFromPost(Post, Domain=None):
|
||||||
'Link': Link}
|
'Link': Link}
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def MastodonGetAllLinkPosts(Session, Domain=None):
|
def MastodonGetAllLinkPosts(Session, Domain:str=None):
|
||||||
Posts = []
|
Posts = []
|
||||||
for p in MastodonGetPostsFromUserID(Session, MastodonGetMyID(Session)):
|
for p in MastodonGetPostsFromUserID(Session, Session.me()['id']):
|
||||||
Post = MastodonGetLinkFromPost(p, Domain)
|
Post = MastodonGetLinkFromPost(p, Domain)
|
||||||
if Post:
|
if Post:
|
||||||
Posts += [Post]
|
Posts += [Post]
|
||||||
return Posts
|
return Posts
|
||||||
|
|
||||||
def MastodonShare(Flags, Pages, Locale):
|
# TODO:
|
||||||
|
# - Get post lang from page lang instead of site
|
||||||
|
# - Fix message including some messed up paragraphs with the new methods
|
||||||
|
def MastodonShare(Flags:dict, Pages:list, Locale:dict):
|
||||||
|
f = NameSpace(Flags)
|
||||||
SaidPosting = False
|
SaidPosting = False
|
||||||
SiteDomain, SiteLang = Flags['SiteDomain'], Flags['SiteLang']
|
NoteLimit, UrlLen = 500, 24
|
||||||
InstanceURL, Token = Flags['MastodonURL'], Flags['MastodonToken']
|
Token = f.MastodonToken
|
||||||
TypeFilter, HoursLimit, CategoryFilter = Flags['ActivityPubTypeFilter'], Flags['ActivityPubHoursLimit'], Flags['FeedCategoryFilter']
|
Check = ';Debug=True'
|
||||||
Session = MastodonGetSession(InstanceURL, Token)
|
if Token.endswith(Check):
|
||||||
Posts = MastodonGetAllLinkPosts(Session, SiteDomain)
|
Debug = True
|
||||||
Pages.sort()
|
Token = Token[:-len(Check)]
|
||||||
|
else:
|
||||||
|
Debug = False
|
||||||
|
TypeFilter, HoursLimit, CategoryFilter = f.ActivityPubTypeFilter, f.ActivityPubHoursLimit, f.FeedCategoryFilter
|
||||||
|
Session = MastodonGetSession(f.MastodonURL, Token)
|
||||||
|
Posts = MastodonGetAllLinkPosts(Session, f.SiteDomain)
|
||||||
|
Pages.sort() # Ensure new posts are sent in order from oldest to newest
|
||||||
for File, Content, Titles, Meta, ContentHTML, SlimHTML, Description, Image in Pages:
|
for File, Content, Titles, Meta, ContentHTML, SlimHTML, Description, Image in Pages:
|
||||||
if (not TypeFilter or (TypeFilter and (Meta['Type'] == TypeFilter or TypeFilter == '*'))) and (not CategoryFilter or (CategoryFilter and (CategoryFilter in Meta['Categories'] or CategoryFilter == '*'))):
|
if (not TypeFilter or (TypeFilter and (Meta['Type'] == TypeFilter or TypeFilter == '*'))) and (not CategoryFilter or (CategoryFilter and (CategoryFilter in Meta['Categories'] or CategoryFilter == '*'))):
|
||||||
URL = f"{SiteDomain}/{StripExt(File)}.html"
|
URL = f"{f.SiteDomain}/{StripExt(File)}.html"
|
||||||
DoPost = True
|
DoPost = True
|
||||||
for p in Posts:
|
for p in Posts:
|
||||||
if p['Link'] in [URL]+Meta['URLs']:
|
# If already a post linking to this page exists on the net, don't repost
|
||||||
|
if p['Link'] in [URL]+Meta['URLs'] and not Debug:
|
||||||
DoPost = False
|
DoPost = False
|
||||||
break
|
break
|
||||||
|
|
||||||
if DoPost and Meta['Feed'] == 'True' and (not HoursLimit or (Meta['CreatedOn'] and time.time() - time.mktime(date_parse(Meta['CreatedOn']).timetuple()) < 60*60*HoursLimit)):
|
if DoPost and Meta['Feed'] == 'True' and (not HoursLimit or (Meta['CreatedOn'] and time.time() - time.mktime(date_parse(Meta['CreatedOn']).timetuple()) < 60*60*HoursLimit)):
|
||||||
Desc = ''
|
Read = f'\n\n...{Locale["ReadFullPost"]}:\n'
|
||||||
Paragraphs = MkSoup(ContentHTML).p.get_text().split('\n')
|
Hashtags = ''
|
||||||
Read = '...' + Locale['ReadFullPost'] + ':\n'
|
for Cat in Meta['Categories']:
|
||||||
for p in Paragraphs:
|
Hashtags += f' #{Cat.replace("-", "")}'
|
||||||
if p and len(Read+Desc+p)+25 < 500:
|
Hashtags = '\n\n' + Hashtags.strip()
|
||||||
Desc += p + '\n\n'
|
Desc = LimitText(HtmlParagraphsToText(ContentHTML, '\n'), NoteLimit - len(Read) - UrlLen - len(Hashtags))
|
||||||
else:
|
|
||||||
if Desc:
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
Desc = p[:500-25-5-len(Read)] + '...'
|
|
||||||
|
|
||||||
if not SaidPosting:
|
if not SaidPosting:
|
||||||
print("[I] Posting to Mastodon")
|
logging.info("Posting to Mastodon")
|
||||||
SaidPosting = True
|
SaidPosting = True
|
||||||
|
|
||||||
|
if Debug:
|
||||||
|
Text = Desc + Read + URL + Hashtags
|
||||||
|
print(f'{len(Desc+Read+Hashtags)+UrlLen}:\n{Text}\n\n\n\n')
|
||||||
|
else:
|
||||||
time.sleep(5) # Prevent flooding
|
time.sleep(5) # Prevent flooding
|
||||||
Post = MastodonGetLinkFromPost(
|
Post = MastodonGetLinkFromPost(
|
||||||
Post=MastodonDoPost(
|
Post=MastodonDoPost(
|
||||||
Session,
|
Session,
|
||||||
Text=Desc+Read+URL,
|
Text=Desc+Read+URL+Hashtags,
|
||||||
Lang=SiteLang),
|
Lang=f.SiteLang),
|
||||||
Domain=SiteDomain)
|
Domain=f.SiteDomain)
|
||||||
if Post:
|
if Post:
|
||||||
Posts += [Post]
|
Posts += [Post]
|
||||||
|
|
||||||
|
|
|
@ -17,8 +17,11 @@ from Modules.Utils import *
|
||||||
warnings.filterwarnings('ignore', message='The input looks more like a filename than markup.')
|
warnings.filterwarnings('ignore', message='The input looks more like a filename than markup.')
|
||||||
warnings.filterwarnings('ignore', message='The soupsieve package is not installed.')
|
warnings.filterwarnings('ignore', message='The soupsieve package is not installed.')
|
||||||
|
|
||||||
def MkSoup(Html:str):
|
def MkSoup(Html):
|
||||||
|
if type(Html) == str:
|
||||||
return BeautifulSoup(Html, 'html.parser')
|
return BeautifulSoup(Html, 'html.parser')
|
||||||
|
elif type(Html) == BeautifulSoup:
|
||||||
|
return Html
|
||||||
|
|
||||||
def StripAttrs(Html:str):
|
def StripAttrs(Html:str):
|
||||||
Soup = MkSoup(Html)
|
Soup = MkSoup(Html)
|
||||||
|
@ -88,6 +91,14 @@ def SquareFnrefs(Html:str): # Different combinations of formatting for Soup .pre
|
||||||
s.replace_with(f'[{t}]')
|
s.replace_with(f'[{t}]')
|
||||||
return str(Soup.prettify(formatter=None))
|
return str(Soup.prettify(formatter=None))
|
||||||
|
|
||||||
|
def HtmlParagraphsToText(Html:str, Sep:str='\n\n'):
|
||||||
|
Soup, Text = MkSoup(Html), ''
|
||||||
|
for Par in Soup.find_all('p'):
|
||||||
|
Par = Par.get_text().strip()
|
||||||
|
if Par:
|
||||||
|
Text += Par + Sep
|
||||||
|
return Text
|
||||||
|
|
||||||
def DoMinifyHTML(Html:str, KeepComments:bool):
|
def DoMinifyHTML(Html:str, KeepComments:bool):
|
||||||
return htmlmin.minify(
|
return htmlmin.minify(
|
||||||
input=Html,
|
input=Html,
|
||||||
|
|
|
@ -9,6 +9,7 @@
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import sys
|
import sys
|
||||||
|
from traceback import format_exc as TracebackText
|
||||||
from Modules.Config import *
|
from Modules.Config import *
|
||||||
|
|
||||||
LoggingFormat = '[%(levelname)s] %(message)s'
|
LoggingFormat = '[%(levelname)s] %(message)s'
|
||||||
|
|
|
@ -77,12 +77,8 @@ def PatchHtml(Flags:dict, Pages:list, Page:dict, Context:dict, Snippets:dict, Lo
|
||||||
BodyDescription, BodyImage = '', ''
|
BodyDescription, BodyImage = '', ''
|
||||||
if not File.lower().endswith('.txt'):
|
if not File.lower().endswith('.txt'):
|
||||||
Soup = MkSoup(Content)
|
Soup = MkSoup(Content)
|
||||||
if not BodyDescription:# and Soup.p:
|
if not BodyDescription:
|
||||||
#BodyDescription = Soup.p.get_text()[:150].replace('\n', ' ').replace('"', "'") + '...'
|
BodyDescription = html.escape(LimitText(HtmlParagraphsToText(Soup, Sep='\n'), 150).replace('\n', ' '))
|
||||||
for t in Soup.find_all('p'):
|
|
||||||
if t.get_text():
|
|
||||||
BodyDescription = t.get_text()[:150].replace('\n', ' ').replace('"', "'") + '...'
|
|
||||||
break
|
|
||||||
if not BodyImage and Soup.img and Soup.img['src']:
|
if not BodyImage and Soup.img and Soup.img['src']:
|
||||||
BodyImage = Soup.img['src']
|
BodyImage = Soup.img['src']
|
||||||
|
|
||||||
|
@ -338,12 +334,6 @@ def HandlePage(Flags:dict, Page:list, Pages:list, Categories, LimitFiles, Snippe
|
||||||
|
|
||||||
return {"File": File, "Content": Content, "Titles": Titles, "Meta": Meta, "ContentHtml": ContentHTML, "SlimHtml": SlimHTML, "Description": Description, "Image": Image}
|
return {"File": File, "Content": Content, "Titles": Titles, "Meta": Meta, "ContentHtml": ContentHTML, "SlimHtml": SlimHTML, "Description": Description, "Image": Image}
|
||||||
|
|
||||||
def MultiprocPagePreprocessor(d:dict):
|
|
||||||
return PagePreprocessor(d['Flags'], d['Page'], d['GlobalMacros'], d['LightRun'])
|
|
||||||
|
|
||||||
def MultiprocHandlePage(d:dict):
|
|
||||||
return HandlePage(d['Flags'], d['Page'], d['Pages'], d['Categories'], d['LimitFiles'], d['Snippets'], d['ConfMenu'], d['Locale'])
|
|
||||||
|
|
||||||
def FindPagesPaths():
|
def FindPagesPaths():
|
||||||
Paths = {"Pages":[], "Posts":[]}
|
Paths = {"Pages":[], "Posts":[]}
|
||||||
for Ext in FileExtensions['Pages']:
|
for Ext in FileExtensions['Pages']:
|
||||||
|
@ -400,12 +390,25 @@ def PreprocessSourcePages(Flags:dict, PagesPaths:dict, LimitFiles, GlobalMacros:
|
||||||
MultiprocPages += [{'Flags': Flags, 'Page': [f"{Type}s/{File}", TempPath, Type, None], 'GlobalMacros': GlobalMacros, 'LightRun': LightRun}]
|
MultiprocPages += [{'Flags': Flags, 'Page': [f"{Type}s/{File}", TempPath, Type, None], 'GlobalMacros': GlobalMacros, 'LightRun': LightRun}]
|
||||||
return DoMultiProc(MultiprocPagePreprocessor, MultiprocPages, PoolSize, True)
|
return DoMultiProc(MultiprocPagePreprocessor, MultiprocPages, PoolSize, True)
|
||||||
|
|
||||||
|
def MultiprocPagePreprocessor(d:dict):
|
||||||
|
return PagePreprocessor(d['Flags'], d['Page'], d['GlobalMacros'], d['LightRun'])
|
||||||
|
|
||||||
def WriteProcessedPages(Flags:dict, Pages:list, Categories, ConfMenu, Snippets, LimitFiles, PoolSize:int, Locale:dict):
|
def WriteProcessedPages(Flags:dict, Pages:list, Categories, ConfMenu, Snippets, LimitFiles, PoolSize:int, Locale:dict):
|
||||||
MultiprocPages = []
|
MultiprocPages = []
|
||||||
for i, Page in enumerate(Pages):
|
for i, Page in enumerate(Pages):
|
||||||
MultiprocPages += [{'Flags': Flags, 'Page': Page, 'Pages': Pages, 'Categories': Categories, 'LimitFiles': LimitFiles, 'Snippets': Snippets, 'ConfMenu': ConfMenu, 'Locale': Locale}]
|
MultiprocPages += [{'Flags': Flags, 'Page': Page, 'Pages': Pages, 'Categories': Categories, 'LimitFiles': LimitFiles, 'Snippets': Snippets, 'ConfMenu': ConfMenu, 'Locale': Locale}]
|
||||||
return DoMultiProc(MultiprocHandlePage, MultiprocPages, PoolSize, True)
|
return DoMultiProc(MultiprocHandlePage, MultiprocPages, PoolSize, True)
|
||||||
|
|
||||||
|
def MultiprocHandlePage(d:dict):
|
||||||
|
return HandlePage(d['Flags'], d['Page'], d['Pages'], d['Categories'], d['LimitFiles'], d['Snippets'], d['ConfMenu'], d['Locale'])
|
||||||
|
|
||||||
|
def HandleTransclusionsCaller(Base:str, Caller:str, Pages:list):
|
||||||
|
MultiPages = []
|
||||||
|
return DoMultiProc(MultiprocHandleTransclusions, MultiPages, PoolSize, True)
|
||||||
|
|
||||||
|
def MultiprocHandleTransclusions(d:dict):
|
||||||
|
return
|
||||||
|
|
||||||
def MakeSite(Flags:dict, LimitFiles, Snippets, ConfMenu, GlobalMacros:dict, Locale:dict, Threads:int):
|
def MakeSite(Flags:dict, LimitFiles, Snippets, ConfMenu, GlobalMacros:dict, Locale:dict, Threads:int):
|
||||||
Pages, MadePages, Categories = [], [], {}
|
Pages, MadePages, Categories = [], [], {}
|
||||||
PoolSize = cpu_count() if Threads <= 0 else Threads
|
PoolSize = cpu_count() if Threads <= 0 else Threads
|
||||||
|
@ -442,4 +445,8 @@ def MakeSite(Flags:dict, LimitFiles, Snippets, ConfMenu, GlobalMacros:dict, Loca
|
||||||
logging.info("Writing Pages")
|
logging.info("Writing Pages")
|
||||||
MadePages = WriteProcessedPages(Flags, Pages, Categories, ConfMenu, Snippets, LimitFiles, PoolSize, Locale)
|
MadePages = WriteProcessedPages(Flags, Pages, Categories, ConfMenu, Snippets, LimitFiles, PoolSize, Locale)
|
||||||
|
|
||||||
|
# TODO: Finish this and remove the transclusion feature from above
|
||||||
|
#logging.info("Resolving Page Transclusions")
|
||||||
|
#HandleTransclusionsCaller(Pages)
|
||||||
|
|
||||||
return MadePages
|
return MadePages
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
| Copyright (C) 2022-2023, OctoSpacc |
|
| Copyright (C) 2022-2023, OctoSpacc |
|
||||||
| ================================== """
|
| ================================== """
|
||||||
|
|
||||||
|
from Modules.Elements import *
|
||||||
from Modules.Logging import *
|
from Modules.Logging import *
|
||||||
from Modules.Utils import *
|
from Modules.Utils import *
|
||||||
|
|
||||||
|
@ -19,13 +20,14 @@ except:
|
||||||
|
|
||||||
def ApplySocialIntegrations(Flags, Pages, LimitFiles, Locale):
|
def ApplySocialIntegrations(Flags, Pages, LimitFiles, Locale):
|
||||||
f = NameSpace(Flags)
|
f = NameSpace(Flags)
|
||||||
FinalPaths = []
|
FinalPaths, MastodonPosts = [], []
|
||||||
|
|
||||||
if ActivityPub and f.MastodonURL and f.MastodonToken and f.SiteDomain:
|
if ActivityPub and f.MastodonURL and f.MastodonToken and f.SiteDomain:
|
||||||
logging.info("Mastodon Operations")
|
logging.info("Mastodon Operations")
|
||||||
|
try:
|
||||||
MastodonPosts = MastodonShare(Flags, Pages, Locale)
|
MastodonPosts = MastodonShare(Flags, Pages, Locale)
|
||||||
else:
|
except:
|
||||||
MastodonPosts = []
|
print(TracebackText())
|
||||||
|
|
||||||
for File, Content, Titles, Meta, ContentHTML, SlimHTML, Description, Image in Pages:
|
for File, Content, Titles, Meta, ContentHTML, SlimHTML, Description, Image in Pages:
|
||||||
if IsLightRun(File, LimitFiles):
|
if IsLightRun(File, LimitFiles):
|
||||||
|
@ -34,7 +36,7 @@ def ApplySocialIntegrations(Flags, Pages, LimitFiles, Locale):
|
||||||
Content = ReadFile(File)
|
Content = ReadFile(File)
|
||||||
Post = ''
|
Post = ''
|
||||||
for p in MastodonPosts:
|
for p in MastodonPosts:
|
||||||
if p['Link'] == SiteDomain + '/' + File[len(f'{f.OutDir}/'):]:
|
if p['Link'] == f.SiteDomain + '/' + File[len(f'{f.OutDir}/'):]:
|
||||||
Post = HTMLCommentsBlock.format(
|
Post = HTMLCommentsBlock.format(
|
||||||
StrComments=Locale['Comments'],
|
StrComments=Locale['Comments'],
|
||||||
StrOpen=Locale['OpenInNewTab'],
|
StrOpen=Locale['OpenInNewTab'],
|
||||||
|
@ -42,6 +44,7 @@ def ApplySocialIntegrations(Flags, Pages, LimitFiles, Locale):
|
||||||
break
|
break
|
||||||
#Content = ReplWithEsc(Content, '[staticoso:Comments]', Post)
|
#Content = ReplWithEsc(Content, '[staticoso:Comments]', Post)
|
||||||
Content = ReplWithEsc(Content, '<staticoso:Comments>', Post)
|
Content = ReplWithEsc(Content, '<staticoso:Comments>', Post)
|
||||||
|
Content = ReplWithEsc(Content, '<staticoso:comments>', Post)
|
||||||
WriteFile(File, Content)
|
WriteFile(File, Content)
|
||||||
FinalPaths += [File]
|
FinalPaths += [File]
|
||||||
|
|
||||||
|
|
|
@ -90,7 +90,7 @@ def FindAllIndex(Str:str, Sub:str):
|
||||||
i = Str.find(Sub, i+1)
|
i = Str.find(Sub, i+1)
|
||||||
|
|
||||||
# Replace substrings in a string, except when an escape char is prepended
|
# Replace substrings in a string, except when an escape char is prepended
|
||||||
def ReplWithEsc(Str:str, Find:str, Repl:str, Esc:str='\\'):
|
def ReplWithEsc(Str:str, Find:str, Repl:str, Html:bool=True, Esc:str='\\'):
|
||||||
New = ''
|
New = ''
|
||||||
Sects = Str.split(Find)
|
Sects = Str.split(Find)
|
||||||
# Every time a substring is found
|
# Every time a substring is found
|
||||||
|
@ -100,16 +100,17 @@ def ReplWithEsc(Str:str, Find:str, Repl:str, Esc:str='\\'):
|
||||||
New += e
|
New += e
|
||||||
# Wrapping parts of the escaped substrings in HTML tags is done to avoid multiple calls of this function nullifying escaping
|
# Wrapping parts of the escaped substrings in HTML tags is done to avoid multiple calls of this function nullifying escaping
|
||||||
elif i > 0:
|
elif i > 0:
|
||||||
# If prev. split ends with 2 times the escape (= escaping of the escape)
|
# If prev. split ends with 2 times the escape (escaping of the escape)
|
||||||
if Sects[i-1].endswith(Esc*2):
|
if Sects[i-1].endswith(Esc*2):
|
||||||
Wrap = f'<span>{New[-1]}</span>'
|
Wrap1 = f'<span>{New[-1]}</span>' if Html else New[-1]
|
||||||
New = New[:-2] + Wrap
|
Wrap2 = f'<span>{New[-2]}</span>' if Html else New[-2]
|
||||||
|
New = New[:-3] + Wrap2 + Wrap1
|
||||||
New += Repl + e
|
New += Repl + e
|
||||||
# If prev. split ends with 1 time the escape (escaping of the substring)
|
# If prev. split ends with 1 time the escape (escaping of the substring)
|
||||||
elif Sects[i-1].endswith(Esc):
|
elif Sects[i-1].endswith(Esc):
|
||||||
New = New[:-1]
|
New = New[:-1]
|
||||||
Wrap = f'<span>{Find[0]}</span>'
|
Wrap1 = f'<span>{Find[0]}</span>' if Html else Find[0]
|
||||||
New += Wrap + Find[1:] + e
|
New += Wrap1 + Find[1:] + e
|
||||||
# If no escape char
|
# If no escape char
|
||||||
else:
|
else:
|
||||||
New += Repl + e
|
New += Repl + e
|
||||||
|
@ -216,3 +217,37 @@ def WhileFuncResultChanges(Func, Args:dict, ResultKey:str):
|
||||||
Result = Func(**Args)
|
Result = Func(**Args)
|
||||||
if ResultOld == Result:
|
if ResultOld == Result:
|
||||||
return Result
|
return Result
|
||||||
|
|
||||||
|
# Ellipsize text if it isn't already, optionally writing over the last chars instead of appending
|
||||||
|
def TryEllipsizeText(Text:str, Overwrite:bool=False, Ellipses:str='...'):
|
||||||
|
if not Text.endswith(Ellipses):
|
||||||
|
if Overwrite:
|
||||||
|
Text = Text[:-len(Ellipses)] + Ellipses
|
||||||
|
# Append normally
|
||||||
|
else:
|
||||||
|
Text += Ellipses
|
||||||
|
return Text
|
||||||
|
|
||||||
|
# Limit the length of a text, and account for if paragraphs should be sliced or entirely deleted to fit the limit
|
||||||
|
def LimitText(Text:str, MaxChars:int, SliceParagraphs:bool=False, ParagraphSep:str='\n'):
|
||||||
|
New = ''
|
||||||
|
Paras = Text.split(ParagraphSep)
|
||||||
|
if not Paras:
|
||||||
|
return ''
|
||||||
|
# The first paragraph; must always be present, ellipsized if needed
|
||||||
|
New = Paras[0]
|
||||||
|
if len(New) > MaxChars:
|
||||||
|
New = TryEllipsizeText(New[:MaxChars], Overwrite=True)
|
||||||
|
# Add a newline to the first paragraph if it wasn't ellipsized
|
||||||
|
else:
|
||||||
|
New += ParagraphSep
|
||||||
|
# All other paragraphs
|
||||||
|
for Par in Paras[1:]:
|
||||||
|
# If adding this paragraph to the new text would go over the limit, and we are allowed to slice, append and ellipsize it
|
||||||
|
if len(New + Par) > MaxChars:
|
||||||
|
if SliceParagraphs:
|
||||||
|
New = TryEllipsizeText(New[:MaxChars], Overwrite=True)
|
||||||
|
# If we still are whitin the limit, just append the paragraph
|
||||||
|
else:
|
||||||
|
New += Par + ParagraphSep
|
||||||
|
return New.strip()
|
||||||
|
|
Loading…
Reference in New Issue