Status prints

This commit is contained in:
octospacc 2022-07-01 16:39:54 +02:00
parent 879e0766b4
commit e44b4a0d84
4 changed files with 36 additions and 33 deletions

View File

@ -457,6 +457,7 @@ def Main(Args, FeedEntries):
if Args.GemtextOut: if Args.GemtextOut:
shutil.copytree('Posts', 'public.gmi/Posts', ignore=IgnoreFiles) shutil.copytree('Posts', 'public.gmi/Posts', ignore=IgnoreFiles)
print("[I] Generating HTML")
Pages = MakeSite( Pages = MakeSite(
TemplatesText=LoadFromDir('Templates', '*.html'), TemplatesText=LoadFromDir('Templates', '*.html'),
PartsText=LoadFromDir('Parts', '*.html'), PartsText=LoadFromDir('Parts', '*.html'),
@ -474,6 +475,7 @@ def Main(Args, FeedEntries):
MarkdownExts=literal_eval(Args.MarkdownExts) if Args.MarkdownExts else ['attr_list']) MarkdownExts=literal_eval(Args.MarkdownExts) if Args.MarkdownExts else ['attr_list'])
if FeedEntries != 0: if FeedEntries != 0:
print("[I] Generating Feeds")
MakeFeed( MakeFeed(
Pages=Pages, Pages=Pages,
SiteName=SiteName, SiteName=SiteName,
@ -484,6 +486,7 @@ def Main(Args, FeedEntries):
Minify=True if Args.Minify and Args.Minify not in ('False', 'None') else False) Minify=True if Args.Minify and Args.Minify not in ('False', 'None') else False)
if MastodonURL and MastodonToken and SiteDomain: if MastodonURL and MastodonToken and SiteDomain:
print("[I] Mastodon Stuff")
MastodonPosts = MastodonShare( MastodonPosts = MastodonShare(
MastodonURL, MastodonURL,
MastodonToken, MastodonToken,
@ -509,10 +512,12 @@ def Main(Args, FeedEntries):
WriteFile(File, Content) WriteFile(File, Content)
if Args.GemtextOut: if Args.GemtextOut:
print("[I] Generating Gemtext")
GemtextCompileList(Pages) GemtextCompileList(Pages)
DelTmp() DelTmp()
os.system("cp -R Assets/* public/") os.system("cp -R Assets/* public/")
print("[I] Done!")
if __name__ == '__main__': if __name__ == '__main__':
Parser = argparse.ArgumentParser() Parser = argparse.ArgumentParser()
@ -538,8 +543,8 @@ if __name__ == '__main__':
from Modules.Feed import * from Modules.Feed import *
FeedEntries = Args.FeedEntries if Args.FeedEntries else 10 FeedEntries = Args.FeedEntries if Args.FeedEntries else 10
except: except:
print("[E] Can't load the Atom/RSS feed libraries. Their generation is disabled.")
FeedEntries = 0 FeedEntries = 0
print("[W] Warning: Can't load the Atom/RSS feed libraries. Their generation is disabled.")
Main( Main(
Args=Args, Args=Args,

View File

@ -19,7 +19,7 @@ except ModuleNotFoundError:
import requests import requests
from requests.models import urlencode from requests.models import urlencode
from .. import dateutil from .. import dateutil
from ..dateutil import parser as dateutil_parser #import dateutil.parser from ..dateutil import parser as dateutil_parser
import re import re
import copy import copy
import threading import threading

View File

@ -26,27 +26,33 @@ def MastodonGetPostsFromUserID(Session, UserID):
def MastodonDoPost(Session, Text, Lang=None, Visibility='public'): def MastodonDoPost(Session, Text, Lang=None, Visibility='public'):
if Text: if Text:
Session.status_post( return Session.status_post(
Text, Text,
language=Lang, language=Lang,
visibility=Visibility) visibility=Visibility)
def MastodonGetLinkPosts(Session, Domain=None): def MastodonGetLinkFromPost(Post, Domain=None):
Parse = BeautifulSoup(Post['content'], 'html.parser')
if Parse.a:
Link = Parse.find_all('a')[-1]['href']
if not Domain or (Domain and Link.startswith(Domain)):
return {
'Post': Post['uri'],
'Link': Link}
return None
def MastodonGetAllLinkPosts(Session, Domain=None):
Posts = [] Posts = []
for i,e in enumerate(MastodonGetPostsFromUserID(Session, MastodonGetMyID(Session))): for p in MastodonGetPostsFromUserID(Session, MastodonGetMyID(Session)):
Parse = BeautifulSoup(e['content'], 'html.parser') Post = MastodonGetLinkFromPost(p, Domain)
if Parse.a: if Post:
Link = Parse.find_all('a')[-1]['href'] Posts += [Post]
if not Domain or (Domain and Link.startswith(Domain)):
Posts += [{
'Post': e['uri'],
'Link': Link}]
return Posts return Posts
# TODO: Set a limit/cooldown on how many new posts at a time can be posted, or ignore posts older than date X.. otherwise if someone starts using this after having written 100 blog posts, bad things will happen # TODO: Set a limit/cooldown on how many new posts at a time can be posted, or ignore posts older than date X.. otherwise if someone starts using this after having written 100 blog posts, bad things will happen
def MastodonShare(MastodonURL, MastodonToken, Pages, SiteDomain, SiteLang, Locale): def MastodonShare(MastodonURL, MastodonToken, Pages, SiteDomain, SiteLang, Locale):
Session = MastodonGetSession(MastodonURL, MastodonToken) Session = MastodonGetSession(MastodonURL, MastodonToken)
Posts = MastodonGetLinkPosts(Session, SiteDomain) Posts = MastodonGetAllLinkPosts(Session, SiteDomain)
Pages.sort() Pages.sort()
for File, Content, Titles, Meta, HTMLContent, Description, Image in Pages: for File, Content, Titles, Meta, HTMLContent, Description, Image in Pages:
if Meta['Type'] == 'Post': if Meta['Type'] == 'Post':
@ -69,8 +75,13 @@ def MastodonShare(MastodonURL, MastodonToken, Pages, SiteDomain, SiteLang, Local
DoPost = False DoPost = False
break break
if DoPost: if DoPost:
MastodonDoPost( sleep(3)
Session, Post = MastodonGetLinkFromPost(
Desc + Read + URL, Post=MastodonDoPost(
SiteLang) Session,
Text=Desc+Read+URL,
Lang=SiteLang),
Domain=SiteDomain)
if Post:
Posts += [Post]
return Posts return Posts

View File

@ -12,6 +12,7 @@
from Libs.bs4 import BeautifulSoup from Libs.bs4 import BeautifulSoup
from Modules.Utils import * from Modules.Utils import *
"""
ClosedTags = ( ClosedTags = (
'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'p', 'span', 'pre', 'code', 'p', 'span', 'pre', 'code',
@ -20,6 +21,7 @@ ClosedTags = (
'ol', 'ul', 'li', 'dl', 'dt', 'dd') 'ol', 'ul', 'li', 'dl', 'dt', 'dd')
OpenTags = ( OpenTags = (
'img') 'img')
"""
def GemtextCompileList(Pages): def GemtextCompileList(Pages):
Cmd = '' Cmd = ''
@ -41,6 +43,7 @@ def ParseTag(Content):
Parse = BeautifulSoup(str(Content), 'html.parser') Parse = BeautifulSoup(str(Content), 'html.parser')
Tag = Parse.find() Tag = Parse.find()
"""
def HTML2Gemtext(Pages, SiteName, SiteTagline): def HTML2Gemtext(Pages, SiteName, SiteTagline):
#os.mkdir('public.gmi') #os.mkdir('public.gmi')
for File, Content, Titles, Meta, HTMLContent, Description, Image in Pages: for File, Content, Titles, Meta, HTMLContent, Description, Image in Pages:
@ -78,20 +81,4 @@ def HTML2Gemtext(Pages, SiteName, SiteTagline):
PagePath = 'public.gmi/{}.gmi'.format(StripExt(File)) PagePath = 'public.gmi/{}.gmi'.format(StripExt(File))
WriteFile(PagePath, Gemtext) WriteFile(PagePath, Gemtext)
#exit() #exit()
""" Gemtext:
# h1
## h2
### h3
* li
* li
=> [protocol://]URL Link Description
> Quote
```
Preformatted
```
""" """