Transition to monorepo on a new Dev branch

This commit is contained in:
2023-02-22 10:29:46 +01:00
parent 8436f03ec7
commit 3a483dc0ef
3712 changed files with 751 additions and 11 deletions

376
App/Source/Build.py Normal file
View File

@@ -0,0 +1,376 @@
#!/usr/bin/env python3
""" ================================= |
| This file is part of |
| staticoso |
| Just a simple Static Site Generator |
| |
| Licensed under the AGPLv3 license |
| Copyright (C) 2022, OctoSpacc |
| ================================= """
import argparse
import os
import shutil
import time
from ast import literal_eval
from datetime import datetime
from pathlib import Path
from Modules.Config import *
from Modules.Gemini import *
from Modules.Globals import *
from Modules.Logging import *
from Modules.Markdown import *
from Modules.Site import *
from Modules.Sitemap import *
from Modules.Utils import *
try:
from Modules.ActivityPub import *
ActivityPub = True
except:
logging.warning("⚠ Can't load the ActivityPub module. Its use is disabled. Make sure the 'requests' library is installed.")
ActivityPub = False
from Libs import rcssmin
cssmin = rcssmin._make_cssmin(python_only=True)
def ResetOutDir(OutDir):
for e in (OutDir, f"{OutDir}.gmi"):
try:
shutil.rmtree(e)
except FileNotFoundError:
pass
def DelTmp(OutDir):
for Ext in FileExtensions['Tmp']:
for File in Path(OutDir).rglob(f"*.{Ext}"):
os.remove(File)
for Dir in (OutDir, f"{OutDir}.gmi"):
for File in Path(Dir).rglob('*.tmp'):
os.remove(File)
def SetSorting(Sorting):
Default = {
'Pages':'Standard',
'Posts':'Inverse'}
for i in Default:
if i not in Sorting:
Sorting.update({i:Default[i]})
return Sorting
def GetConfMenu(Entries, MarkdownExts):
if Entries:
Menu, Max = [], 0
for i in Entries:
if int(i) > Max:
Max = int(i)
Menu = [None] * (Max+1)
for i in Entries:
e = Entries[i]
if not ((e.startswith('<') or e.startswith('[') or e.startswith('- ')) and (e.endswith('>') or e.endswith(')') or e.endswith('}'))):
if not e.lower().endswith(FileExtensions['HTML']):
e += '.html'
Menu[int(i)] = e
return Menu
def CheckSafeOutDir(OutDir):
InDir = os.path.realpath(os.getcwd())
OutDir = os.path.realpath(OutDir)
OutFolder = OutDir.split('/')[-1]
if InDir == OutDir:
logging.error(f"⛔ Output and Input directories ({OutDir}) can't be the same. Exiting.")
exit(1)
elif OutFolder in ReservedPaths and f"{InDir}/{OutFolder}" == OutDir:
logging.error(f"⛔ Output directory {OutDir} can't be a reserved subdirectory of the Input. Exiting.")
exit(1)
def GetModifiedFiles(OutDir):
All, Mod = [], []
for Path in ('Pages', 'Posts'):
for Root, Dirs, Files in os.walk(Path):
for File in Files:
Src = os.path.join(Root,File)
SrcTime = int(os.path.getmtime(Src))
if Path == 'Pages':
Tmp = '/'.join(Src.split('/')[1:])
elif Path == 'Posts':
Tmp = Src
Obj = f"{OutDir}/{StripExt(Tmp)}.html"
try:
ObjTime = int(os.path.getmtime(Obj))
except FileNotFoundError:
ObjTime = 0
All += [{'Tmp':Tmp, 'SrcTime':SrcTime, 'ObjTime':ObjTime}]
for File in All:
if File['SrcTime'] > File['ObjTime']:
Mod += [File['Tmp']]
return Mod
def WriteRedirects(Flags, Pages, FinalPaths, Locale):
OutDir, SiteName, SiteDomain = Flags['OutDir'], Flags['SiteName'], Flags['SiteDomain']
for File, Content, Titles, Meta, ContentHTML, SlimHTML, Description, Image in Pages:
for URL in Meta['URLs']:
DestFile = f"{OutDir}/{URL}"
if DestFile not in FinalPaths:
DestURL = f"{GetPathLevels(URL)}{StripExt(File)}.html"
mkdirps(os.path.dirname(DestFile))
WriteFile(DestFile, RedirectPageTemplate.format(
SiteDomain=SiteDomain,
DestURL=DestURL,
TitlePrefix=f"{SiteName} - " if SiteName else '',
StrClick=Locale['ClickHere'],
StrRedirect=Locale['IfNotRedirected']))
def Main(Args, FeedEntries):
Flags, Snippets, FinalPaths = {}, {}, []
HavePages, HavePosts = False, False
SiteConf = LoadConfFile('Site.ini')
#ConfigLogging(DefConfOptChoose('Logging', Args.Logging, ReadConf(SiteConf, 'staticoso', 'Logging')))
#if Args.InputDir:
# os.chdir(Args.InputDir)
# print(f"[I] Current directory: {Args.InputDir}")
SiteName = Flags['SiteName'] = OptChoose('', Args.SiteName, ReadConf(SiteConf, 'Site', 'Name'))
if SiteName:
logging.info(f"Compiling: {SiteName}")
OutDir = Flags['OutDir'] = DefConfOptChoose('OutDir', Args.OutputDir, ReadConf(SiteConf, 'Site', 'OutputDir'))
OutDir = Flags['OutDir'] = OutDir.removesuffix('/')
CheckSafeOutDir(OutDir)
logging.info(f"Outputting to: {OutDir}/")
Threads = Args.Threads if Args.Threads else DefConf['Threads']
DiffBuild = Args.DiffBuild if Args.DiffBuild else DefConf['DiffBuild']
BlogName = Flags['BlogName'] = OptChoose('', Args.BlogName, ReadConf(SiteConf, 'Site', 'BlogName'))
SiteTagline = Flags['SiteTagline'] = OptChoose('', Args.SiteTagline, ReadConf(SiteConf, 'Site', 'Tagline'))
SiteTemplate = Flags['SiteTemplate'] = DefConfOptChoose('SiteTemplate', Args.SiteTemplate, ReadConf(SiteConf, 'Site', 'Template'))
SiteDomain = Flags['SiteDomain'] = OptChoose('', Args.SiteDomain, ReadConf(SiteConf, 'Site', 'Domain'))
SiteRoot = Flags['SiteRoot'] = OptChoose('/', Args.SiteRoot, ReadConf(SiteConf, 'Site', 'Root'))
SiteLang = Flags['SiteLang'] = DefConfOptChoose('SiteLang', Args.SiteLanguage, ReadConf(SiteConf, 'Site', 'Language'))
Sorting = Flags['Sorting'] = literal_eval(OptChoose('{}', Args.Sorting, ReadConf(SiteConf, 'Site', 'Sorting')))
Sorting = Flags['Sorting'] = SetSorting(Sorting)
NoScripts = Flags['NoScripts'] = StrBoolChoose(False, Args.NoScripts, ReadConf(SiteConf, 'Site', 'NoScripts'))
FolderRoots = Flags['FolderRoots'] = literal_eval(Args.FolderRoots) if Args.FolderRoots else {}
ActivityPubTypeFilter = Flags['ActivityPubTypeFilter'] = DefConfOptChoose('ActivityPubTypeFilter', Args.ActivityPubTypeFilter, ReadConf(SiteConf, 'ActivityPub', 'TypeFilter'))
ActivityPubHoursLimit = Flags['ActivityPubHoursLimit'] = DefConfOptChoose('ActivityPubHoursLimit', Args.ActivityPubHoursLimit, ReadConf(SiteConf, 'ActivityPub', 'HoursLimit'))
MastodonURL = Flags['MastodonURL'] = OptChoose('', Args.MastodonURL, ReadConf(SiteConf, 'Mastodon', 'URL'))
MastodonToken = Flags['MastodonToken'] = OptChoose('', Args.MastodonToken, ReadConf(SiteConf, 'Mastodon', 'Token'))
MarkdownExts = Flags['MarkdownExts'] = literal_eval(OptionChoose(str(MarkdownExtsDefault), Args.MarkdownExts, ReadConf(SiteConf, 'Markdown', 'Exts')))
SitemapOutput = Flags['SitemapOutput'] = StrBoolChoose(True, Args.SitemapOutput, ReadConf(SiteConf, 'Sitemap', 'Output'))
MinifyOutput = Flags['MinifyOutput'] = StrBoolChoose(False, Args.MinifyOutput, ReadConf(SiteConf, 'Minify', 'Output'))
MinifyAssets = Flags['MinifyAssets'] = StrBoolChoose(False, Args.MinifyAssets, ReadConf(SiteConf, 'Minify', 'Assets'))
MinifyKeepComments = Flags['MinifyKeepComments'] = StrBoolChoose(False, Args.MinifyKeepComments, ReadConf(SiteConf, 'Minify', 'KeepComments'))
ImgAltToTitle = Flags['ImgAltToTitle'] = StrBoolChoose(True, Args.ImgAltToTitle, ReadConf(SiteConf, 'Site', 'ImgAltToTitle'))
ImgTitleToAlt = Flags['ImgTitleToAlt'] = StrBoolChoose(False, Args.ImgTitleToAlt, ReadConf(SiteConf, 'Site', 'ImgTitleToAlt'))
HTMLFixPre = Flags['HTMLFixPre'] = StrBoolChoose(False, Args.HTMLFixPre, ReadConf(SiteConf, 'Site', 'HTMLFixPre'))
CategoriesAutomatic = Flags['CategoriesAutomatic'] = StrBoolChoose(False, Args.CategoriesAutomatic, ReadConf(SiteConf, 'Categories', 'Automatic'))
CategoriesUncategorized = Flags['CategoriesUncategorized'] = DefConfOptChoose('CategoriesUncategorized', Args.CategoriesUncategorized, ReadConf(SiteConf, 'Categories', 'Uncategorized'))
GemtextOutput = Flags['GemtextOutput'] = StrBoolChoose(False, Args.GemtextOutput, ReadConf(SiteConf, 'Gemtext', 'Output'))
GemtextHeader = Flags['GemtextHeader'] = Args.GemtextHeader if Args.GemtextHeader else ReadConf(SiteConf, 'Gemtext', 'Header') if ReadConf(SiteConf, 'Gemtext', 'Header') else f"# {SiteName}\n\n" if SiteName else ''
FeedCategoryFilter = Flags['FeedCategoryFilter'] = DefConfOptChoose('FeedCategoryFilter', Args.FeedCategoryFilter, ReadConf(SiteConf, 'Feed', 'CategoryFilter'))
FeedEntries = Flags['FeedEntries'] = int(FeedEntries) if (FeedEntries or FeedEntries == 0) and FeedEntries != 'Default' else int(ReadConf(SiteConf, 'Feed', 'Entries')) if ReadConf(SiteConf, 'Feed', 'Entries') else DefConf['FeedEntries']
JournalRedirect = Flags["JournalRedirect"] = StrBoolChoose(DefConf["JournalRedirect"], Args.JournalRedirect, ReadConf(SiteConf, 'Journal', 'Redirect'))
DynamicParts = Flags['DynamicParts'] = literal_eval(OptionChoose('{}', Args.DynamicParts, ReadConf(SiteConf, 'Site', 'DynamicParts')))
DynamicPartsText = Snippets['DynamicParts'] = LoadFromDir('DynamicParts', ['*.htm', '*.html'])
StaticPartsText = Snippets['StaticParts'] = LoadFromDir('StaticParts', ['*.htm', '*.html'])
TemplatesText = Snippets['Templates'] = LoadFromDir('Templates', ['*.htm', '*.html'])
MenuEntries = ReadConf(SiteConf, 'Menu')
if MenuEntries:
ConfMenu = GetConfMenu(MenuEntries, MarkdownExts)
else:
ConfMenu = []
SiteDomain = Flags['SiteDomain'] = SiteDomain.removesuffix('/')
Locale = LoadLocale(SiteLang)
if DiffBuild:
logging.info("Build mode: Differential")
LimitFiles = GetModifiedFiles(OutDir)
else:
logging.info("Build mode: Clean")
ResetOutDir(OutDir)
LimitFiles = False
if os.path.isdir('Pages'):
HavePages = True
shutil.copytree('Pages', OutDir, dirs_exist_ok=True)
if Flags['GemtextOutput']:
shutil.copytree('Pages', f"{OutDir}.gmi", ignore=IgnoreFiles, dirs_exist_ok=True)
if os.path.isdir('Posts'):
HavePosts = True
shutil.copytree('Posts', f"{OutDir}/Posts", dirs_exist_ok=True)
if Flags['GemtextOutput']:
shutil.copytree('Posts', f"{OutDir}.gmi/Posts", ignore=IgnoreFiles, dirs_exist_ok=True)
if not (HavePages or HavePosts):
logging.error("⛔ No Pages or posts found. Nothing to do, exiting!")
exit(1)
logging.info("Generating HTML")
Pages = MakeSite(
Flags=Flags,
LimitFiles=LimitFiles,
Snippets=Snippets,
ConfMenu=ConfMenu,
GlobalMacros=ReadConf(SiteConf, 'Macros'),
Locale=Locale,
Threads=Threads)
if FeedEntries != 0:
logging.info("Generating Feeds")
for FeedType in (True, False):
MakeFeed(Flags, Pages, FeedType)
if ActivityPub and MastodonURL and MastodonToken and SiteDomain:
logging.info("Mastodon Stuff")
MastodonPosts = MastodonShare(Flags, Pages, Locale)
else:
MastodonPosts = []
for File, Content, Titles, Meta, ContentHTML, SlimHTML, Description, Image in Pages:
if IsLightRun(File, LimitFiles):
continue
File = f"{OutDir}/{StripExt(File)}.html"
Content = ReadFile(File)
Post = ''
for p in MastodonPosts:
if p['Link'] == SiteDomain + '/' + File[len(f"{OutDir}/"):]:
Post = HTMLCommentsBlock.format(
StrComments=Locale['Comments'],
StrOpen=Locale['OpenInNewTab'],
URL=p['Post'])
break
Content = ReplWithEsc(Content, '[staticoso:Comments]', Post)
Content = ReplWithEsc(Content, '<staticoso:Comments>', Post)
WriteFile(File, Content)
FinalPaths += [File]
logging.debug("Creating Redirects")
WriteRedirects(Flags, Pages, FinalPaths, Locale)
if Flags['GemtextOutput']:
logging.info("Generating Gemtext")
GemtextCompileList(Flags, Pages, LimitFiles)
logging.info("Cleaning Temporary Files")
DelTmp(OutDir)
if Flags['SitemapOutput']:
logging.info("Generating Sitemap")
MakeSitemap(Flags, Pages)
logging.info("Preparing Assets")
#os.system(f"cp -R Assets/* {OutDir}/")
if Flags['MinifyAssets']:
shutil.copytree('Assets', OutDir, ignore=IgnoreFiles, dirs_exist_ok=True)
for File in Path('Assets').rglob('*'):
if os.path.isfile(File):
Dest = f"{OutDir}/{str(File)[len('Assets')+1:]}"
if str(File).lower().endswith(FileExtensions['HTML']):
WriteFile(Dest, DoMinifyHTML(ReadFile(File), MinifyKeepComments))
elif str(File).lower().endswith('.css'):
WriteFile(Dest, cssmin(ReadFile(File), MinifyKeepComments))
else:
shutil.copy2(File, Dest)
else:
shutil.copytree('Assets', OutDir, dirs_exist_ok=True)
#def DoSiteBuild(Arg=None):
# #try:
# # SiteEditObserver.stop()
# # SiteEditObserver.join()
# #except:
# # pass
# Main(Args=Args, FeedEntries=FeedEntries)
# logging.info(f"✅ Done! ({round(time.time()-StartTime, 3)}s)")
# #SiteEditObserver.start()
if __name__ == '__main__':
StartTime = time.time()
Parser = argparse.ArgumentParser()
Parser.add_argument('--Logging', type=str) # Levels: Debug, Info, Warning, Error.
Parser.add_argument('--Threads', type=str) # Set 0 to use all CPU cores
Parser.add_argument('--DiffBuild', type=str)
Parser.add_argument('--OutputDir', type=str)
#Parser.add_argument('--InputDir', type=str)
Parser.add_argument('--Sorting', type=str)
Parser.add_argument('--SiteLang', type=str) # DEPRECATED
Parser.add_argument('--SiteLanguage', type=str)
Parser.add_argument('--SiteRoot', type=str)
Parser.add_argument('--SiteName', type=str)
Parser.add_argument('--BlogName', type=str)
Parser.add_argument('--SiteTemplate', type=str)
Parser.add_argument('--SiteDomain', type=str)
Parser.add_argument('--MinifyOutput', type=str)
Parser.add_argument('--MinifyAssets', type=str)
Parser.add_argument('--MinifyKeepComments', type=str)
Parser.add_argument('--NoScripts', type=str)
Parser.add_argument('--ImgAltToTitle', type=str)
Parser.add_argument('--ImgTitleToAlt', type=str)
Parser.add_argument('--HTMLFixPre', type=str)
Parser.add_argument('--GemtextOutput', type=str)
Parser.add_argument('--GemtextHeader', type=str)
Parser.add_argument('--SiteTagline', type=str)
Parser.add_argument('--SitemapOutput', type=str)
Parser.add_argument('--JournalRedirect', type=str)
Parser.add_argument('--FeedEntries', type=str)
Parser.add_argument('--FolderRoots', type=str)
Parser.add_argument('--DynamicParts', type=str)
Parser.add_argument('--MarkdownExts', type=str)
Parser.add_argument('--MastodonURL', type=str)
Parser.add_argument('--MastodonToken', type=str)
Parser.add_argument('--FeedCategoryFilter', type=str)
Parser.add_argument('--ActivityPubTypeFilter', type=str, help=argparse.SUPPRESS)
Parser.add_argument('--ActivityPubHoursLimit', type=int)
Parser.add_argument('--CategoriesAutomatic', type=str)
Parser.add_argument('--CategoriesUncategorized', type=str)
Args = Parser.parse_args()
ConfigLogging(Args.Logging)
try:
import lxml
from Modules.Feed import *
FeedEntries = Args.FeedEntries if Args.FeedEntries else 'Default'
except:
logging.warning("⚠ Can't load the XML libraries. XML Feeds Generation is Disabled. Make sure the 'lxml' library is installed.")
FeedEntries = 0
#from watchdog.observers import Observer
#from watchdog.events import LoggingEventHandler
#SiteEditEvent = LoggingEventHandler()
#SiteEditEvent.on_created = DoSiteBuild
#SiteEditEvent.on_deleted = DoSiteBuild
#SiteEditEvent.on_modified = DoSiteBuild
#SiteEditEvent.on_moved = DoSiteBuild
#SiteEditObserver = Observer()
#SiteEditObserver.schedule(SiteEditEvent, ".", recursive=True)
#SiteEditObserver.start()
Main(Args=Args, FeedEntries=FeedEntries)
logging.info(f"✅ Done! ({round(time.time()-StartTime, 3)}s)")
#DoSiteBuild()
#try:
# while True:
# pass
#except KeyboardInterrupt:
# logging.info("Stopped.")
#finally:
# SiteEditObserver.stop()
# SiteEditObserver.join()

View File

@@ -0,0 +1,30 @@
Beautiful Soup is made available under the MIT license:
Copyright (c) 2004-2022 Leonard Richardson
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Beautiful Soup incorporates code from the html5lib library, which is
also made available under the MIT license. Copyright (c) 2006-2013
James Graham and other contributors
Beautiful Soup depends on the soupsieve library, which is also made
available under the MIT license. Copyright (c) 2018 Isaac Muse

View File

@@ -0,0 +1,812 @@
"""Beautiful Soup Elixir and Tonic - "The Screen-Scraper's Friend".
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup uses a pluggable XML or HTML parser to parse a
(possibly invalid) document into a tree representation. Beautiful Soup
provides methods and Pythonic idioms that make it easy to navigate,
search, and modify the parse tree.
Beautiful Soup works with Python 3.5 and up. It works better if lxml
and/or html5lib is installed.
For more than you ever wanted to know about Beautiful Soup, see the
documentation: http://www.crummy.com/software/BeautifulSoup/bs4/doc/
"""
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "4.11.1"
__copyright__ = "Copyright (c) 2004-2022 Leonard Richardson"
# Use of this source code is governed by the MIT license.
__license__ = "MIT"
__all__ = ['BeautifulSoup']
from collections import Counter
import os
import re
import sys
import traceback
import warnings
# The very first thing we do is give a useful error if someone is
# running this code under Python 2.
if sys.version_info.major < 3:
raise ImportError('You are trying to use a Python 3-specific version of Beautiful Soup under Python 2. This will not work. The final version of Beautiful Soup to support Python 2 was 4.9.3.')
from .builder import (
builder_registry,
ParserRejectedMarkup,
XMLParsedAsHTMLWarning,
)
from .dammit import UnicodeDammit
from .element import (
CData,
Comment,
DEFAULT_OUTPUT_ENCODING,
Declaration,
Doctype,
NavigableString,
PageElement,
ProcessingInstruction,
PYTHON_SPECIFIC_ENCODINGS,
ResultSet,
Script,
Stylesheet,
SoupStrainer,
Tag,
TemplateString,
)
# Define some custom warnings.
class GuessedAtParserWarning(UserWarning):
"""The warning issued when BeautifulSoup has to guess what parser to
use -- probably because no parser was specified in the constructor.
"""
class MarkupResemblesLocatorWarning(UserWarning):
"""The warning issued when BeautifulSoup is given 'markup' that
actually looks like a resource locator -- a URL or a path to a file
on disk.
"""
class BeautifulSoup(Tag):
"""A data structure representing a parsed HTML or XML document.
Most of the methods you'll call on a BeautifulSoup object are inherited from
PageElement or Tag.
Internally, this class defines the basic interface called by the
tree builders when converting an HTML/XML document into a data
structure. The interface abstracts away the differences between
parsers. To write a new tree builder, you'll need to understand
these methods as a whole.
These methods will be called by the BeautifulSoup constructor:
* reset()
* feed(markup)
The tree builder may call these methods from its feed() implementation:
* handle_starttag(name, attrs) # See note about return value
* handle_endtag(name)
* handle_data(data) # Appends to the current data node
* endData(containerClass) # Ends the current data node
No matter how complicated the underlying parser is, you should be
able to build a tree using 'start tag' events, 'end tag' events,
'data' events, and "done with data" events.
If you encounter an empty-element tag (aka a self-closing tag,
like HTML's <br> tag), call handle_starttag and then
handle_endtag.
"""
# Since BeautifulSoup subclasses Tag, it's possible to treat it as
# a Tag with a .name. This name makes it clear the BeautifulSoup
# object isn't a real markup tag.
ROOT_TAG_NAME = '[document]'
# If the end-user gives no indication which tree builder they
# want, look for one with these features.
DEFAULT_BUILDER_FEATURES = ['html', 'fast']
# A string containing all ASCII whitespace characters, used in
# endData() to detect data chunks that seem 'empty'.
ASCII_SPACES = '\x20\x0a\x09\x0c\x0d'
NO_PARSER_SPECIFIED_WARNING = "No parser was explicitly specified, so I'm using the best available %(markup_type)s parser for this system (\"%(parser)s\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n\nThe code that caused this warning is on line %(line_number)s of the file %(filename)s. To get rid of this warning, pass the additional argument 'features=\"%(parser)s\"' to the BeautifulSoup constructor.\n"
def __init__(self, markup="", features=None, builder=None,
parse_only=None, from_encoding=None, exclude_encodings=None,
element_classes=None, **kwargs):
"""Constructor.
:param markup: A string or a file-like object representing
markup to be parsed.
:param features: Desirable features of the parser to be
used. This may be the name of a specific parser ("lxml",
"lxml-xml", "html.parser", or "html5lib") or it may be the
type of markup to be used ("html", "html5", "xml"). It's
recommended that you name a specific parser, so that
Beautiful Soup gives you the same results across platforms
and virtual environments.
:param builder: A TreeBuilder subclass to instantiate (or
instance to use) instead of looking one up based on
`features`. You only need to use this if you've implemented a
custom TreeBuilder.
:param parse_only: A SoupStrainer. Only parts of the document
matching the SoupStrainer will be considered. This is useful
when parsing part of a document that would otherwise be too
large to fit into memory.
:param from_encoding: A string indicating the encoding of the
document to be parsed. Pass this in if Beautiful Soup is
guessing wrongly about the document's encoding.
:param exclude_encodings: A list of strings indicating
encodings known to be wrong. Pass this in if you don't know
the document's encoding but you know Beautiful Soup's guess is
wrong.
:param element_classes: A dictionary mapping BeautifulSoup
classes like Tag and NavigableString, to other classes you'd
like to be instantiated instead as the parse tree is
built. This is useful for subclassing Tag or NavigableString
to modify default behavior.
:param kwargs: For backwards compatibility purposes, the
constructor accepts certain keyword arguments used in
Beautiful Soup 3. None of these arguments do anything in
Beautiful Soup 4; they will result in a warning and then be
ignored.
Apart from this, any keyword arguments passed into the
BeautifulSoup constructor are propagated to the TreeBuilder
constructor. This makes it possible to configure a
TreeBuilder by passing in arguments, not just by saying which
one to use.
"""
if 'convertEntities' in kwargs:
del kwargs['convertEntities']
warnings.warn(
"BS4 does not respect the convertEntities argument to the "
"BeautifulSoup constructor. Entities are always converted "
"to Unicode characters.")
if 'markupMassage' in kwargs:
del kwargs['markupMassage']
warnings.warn(
"BS4 does not respect the markupMassage argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for any necessary markup massage.")
if 'smartQuotesTo' in kwargs:
del kwargs['smartQuotesTo']
warnings.warn(
"BS4 does not respect the smartQuotesTo argument to the "
"BeautifulSoup constructor. Smart quotes are always converted "
"to Unicode characters.")
if 'selfClosingTags' in kwargs:
del kwargs['selfClosingTags']
warnings.warn(
"BS4 does not respect the selfClosingTags argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for understanding self-closing tags.")
if 'isHTML' in kwargs:
del kwargs['isHTML']
warnings.warn(
"BS4 does not respect the isHTML argument to the "
"BeautifulSoup constructor. Suggest you use "
"features='lxml' for HTML and features='lxml-xml' for "
"XML.")
def deprecated_argument(old_name, new_name):
if old_name in kwargs:
warnings.warn(
'The "%s" argument to the BeautifulSoup constructor '
'has been renamed to "%s."' % (old_name, new_name),
DeprecationWarning
)
return kwargs.pop(old_name)
return None
parse_only = parse_only or deprecated_argument(
"parseOnlyThese", "parse_only")
from_encoding = from_encoding or deprecated_argument(
"fromEncoding", "from_encoding")
if from_encoding and isinstance(markup, str):
warnings.warn("You provided Unicode markup but also provided a value for from_encoding. Your from_encoding will be ignored.")
from_encoding = None
self.element_classes = element_classes or dict()
# We need this information to track whether or not the builder
# was specified well enough that we can omit the 'you need to
# specify a parser' warning.
original_builder = builder
original_features = features
if isinstance(builder, type):
# A builder class was passed in; it needs to be instantiated.
builder_class = builder
builder = None
elif builder is None:
if isinstance(features, str):
features = [features]
if features is None or len(features) == 0:
features = self.DEFAULT_BUILDER_FEATURES
builder_class = builder_registry.lookup(*features)
if builder_class is None:
raise FeatureNotFound(
"Couldn't find a tree builder with the features you "
"requested: %s. Do you need to install a parser library?"
% ",".join(features))
# At this point either we have a TreeBuilder instance in
# builder, or we have a builder_class that we can instantiate
# with the remaining **kwargs.
if builder is None:
builder = builder_class(**kwargs)
if not original_builder and not (
original_features == builder.NAME or
original_features in builder.ALTERNATE_NAMES
) and markup:
# The user did not tell us which TreeBuilder to use,
# and we had to guess. Issue a warning.
if builder.is_xml:
markup_type = "XML"
else:
markup_type = "HTML"
# This code adapted from warnings.py so that we get the same line
# of code as our warnings.warn() call gets, even if the answer is wrong
# (as it may be in a multithreading situation).
caller = None
try:
caller = sys._getframe(1)
except ValueError:
pass
if caller:
globals = caller.f_globals
line_number = caller.f_lineno
else:
globals = sys.__dict__
line_number= 1
filename = globals.get('__file__')
if filename:
fnl = filename.lower()
if fnl.endswith((".pyc", ".pyo")):
filename = filename[:-1]
if filename:
# If there is no filename at all, the user is most likely in a REPL,
# and the warning is not necessary.
values = dict(
filename=filename,
line_number=line_number,
parser=builder.NAME,
markup_type=markup_type
)
warnings.warn(
self.NO_PARSER_SPECIFIED_WARNING % values,
GuessedAtParserWarning, stacklevel=2
)
else:
if kwargs:
warnings.warn("Keyword arguments to the BeautifulSoup constructor will be ignored. These would normally be passed into the TreeBuilder constructor, but a TreeBuilder instance was passed in as `builder`.")
self.builder = builder
self.is_xml = builder.is_xml
self.known_xml = self.is_xml
self._namespaces = dict()
self.parse_only = parse_only
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
elif len(markup) <= 256 and (
(isinstance(markup, bytes) and not b'<' in markup)
or (isinstance(markup, str) and not '<' in markup)
):
# Issue warnings for a couple beginner problems
# involving passing non-markup to Beautiful Soup.
# Beautiful Soup will still parse the input as markup,
# since that is sometimes the intended behavior.
if not self._markup_is_url(markup):
self._markup_resembles_filename(markup)
rejections = []
success = False
for (self.markup, self.original_encoding, self.declared_html_encoding,
self.contains_replacement_characters) in (
self.builder.prepare_markup(
markup, from_encoding, exclude_encodings=exclude_encodings)):
self.reset()
self.builder.initialize_soup(self)
try:
self._feed()
success = True
break
except ParserRejectedMarkup as e:
rejections.append(e)
pass
if not success:
other_exceptions = [str(e) for e in rejections]
raise ParserRejectedMarkup(
"The markup you provided was rejected by the parser. Trying a different parser or a different encoding may help.\n\nOriginal exception(s) from parser:\n " + "\n ".join(other_exceptions)
)
# Clear out the markup and remove the builder's circular
# reference to this object.
self.markup = None
self.builder.soup = None
def __copy__(self):
"""Copy a BeautifulSoup object by converting the document to a string and parsing it again."""
copy = type(self)(
self.encode('utf-8'), builder=self.builder, from_encoding='utf-8'
)
# Although we encoded the tree to UTF-8, that may not have
# been the encoding of the original markup. Set the copy's
# .original_encoding to reflect the original object's
# .original_encoding.
copy.original_encoding = self.original_encoding
return copy
def __getstate__(self):
# Frequently a tree builder can't be pickled.
d = dict(self.__dict__)
if 'builder' in d and d['builder'] is not None and not self.builder.picklable:
d['builder'] = None
return d
@classmethod
def _decode_markup(cls, markup):
"""Ensure `markup` is bytes so it's safe to send into warnings.warn.
TODO: warnings.warn had this problem back in 2010 but it might not
anymore.
"""
if isinstance(markup, bytes):
decoded = markup.decode('utf-8', 'replace')
else:
decoded = markup
return decoded
@classmethod
def _markup_is_url(cls, markup):
"""Error-handling method to raise a warning if incoming markup looks
like a URL.
:param markup: A string.
:return: Whether or not the markup resembles a URL
closely enough to justify a warning.
"""
if isinstance(markup, bytes):
space = b' '
cant_start_with = (b"http:", b"https:")
elif isinstance(markup, str):
space = ' '
cant_start_with = ("http:", "https:")
else:
return False
if any(markup.startswith(prefix) for prefix in cant_start_with):
if not space in markup:
warnings.warn(
'The input looks more like a URL than markup. You may want to use'
' an HTTP client like requests to get the document behind'
' the URL, and feed that document to Beautiful Soup.',
MarkupResemblesLocatorWarning
)
return True
return False
@classmethod
def _markup_resembles_filename(cls, markup):
"""Error-handling method to raise a warning if incoming markup
resembles a filename.
:param markup: A bytestring or string.
:return: Whether or not the markup resembles a filename
closely enough to justify a warning.
"""
path_characters = '/\\'
extensions = ['.html', '.htm', '.xml', '.xhtml', '.txt']
if isinstance(markup, bytes):
path_characters = path_characters.encode("utf8")
extensions = [x.encode('utf8') for x in extensions]
filelike = False
if any(x in markup for x in path_characters):
filelike = True
else:
lower = markup.lower()
if any(lower.endswith(ext) for ext in extensions):
filelike = True
if filelike:
warnings.warn(
'The input looks more like a filename than markup. You may'
' want to open this file and pass the filehandle into'
' Beautiful Soup.',
MarkupResemblesLocatorWarning
)
return True
return False
def _feed(self):
"""Internal method that parses previously set markup, creating a large
number of Tag and NavigableString objects.
"""
# Convert the document to Unicode.
self.builder.reset()
self.builder.feed(self.markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def reset(self):
"""Reset this object to a state as though it had never parsed any
markup.
"""
Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME)
self.hidden = 1
self.builder.reset()
self.current_data = []
self.currentTag = None
self.tagStack = []
self.open_tag_counter = Counter()
self.preserve_whitespace_tag_stack = []
self.string_container_stack = []
self.pushTag(self)
def new_tag(self, name, namespace=None, nsprefix=None, attrs={},
sourceline=None, sourcepos=None, **kwattrs):
"""Create a new Tag associated with this BeautifulSoup object.
:param name: The name of the new Tag.
:param namespace: The URI of the new Tag's XML namespace, if any.
:param prefix: The prefix for the new Tag's XML namespace, if any.
:param attrs: A dictionary of this Tag's attribute values; can
be used instead of `kwattrs` for attributes like 'class'
that are reserved words in Python.
:param sourceline: The line number where this tag was
(purportedly) found in its source document.
:param sourcepos: The character position within `sourceline` where this
tag was (purportedly) found.
:param kwattrs: Keyword arguments for the new Tag's attribute values.
"""
kwattrs.update(attrs)
return self.element_classes.get(Tag, Tag)(
None, self.builder, name, namespace, nsprefix, kwattrs,
sourceline=sourceline, sourcepos=sourcepos
)
def string_container(self, base_class=None):
container = base_class or NavigableString
# There may be a general override of NavigableString.
container = self.element_classes.get(
container, container
)
# On top of that, we may be inside a tag that needs a special
# container class.
if self.string_container_stack and container is NavigableString:
container = self.builder.string_containers.get(
self.string_container_stack[-1].name, container
)
return container
def new_string(self, s, subclass=None):
"""Create a new NavigableString associated with this BeautifulSoup
object.
"""
container = self.string_container(subclass)
return container(s)
def insert_before(self, *args):
"""This method is part of the PageElement API, but `BeautifulSoup` doesn't implement
it because there is nothing before or after it in the parse tree.
"""
raise NotImplementedError("BeautifulSoup objects don't support insert_before().")
def insert_after(self, *args):
"""This method is part of the PageElement API, but `BeautifulSoup` doesn't implement
it because there is nothing before or after it in the parse tree.
"""
raise NotImplementedError("BeautifulSoup objects don't support insert_after().")
def popTag(self):
"""Internal method called by _popToTag when a tag is closed."""
tag = self.tagStack.pop()
if tag.name in self.open_tag_counter:
self.open_tag_counter[tag.name] -= 1
if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]:
self.preserve_whitespace_tag_stack.pop()
if self.string_container_stack and tag == self.string_container_stack[-1]:
self.string_container_stack.pop()
#print("Pop", tag.name)
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
"""Internal method called by handle_starttag when a tag is opened."""
#print("Push", tag.name)
if self.currentTag is not None:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
if tag.name != self.ROOT_TAG_NAME:
self.open_tag_counter[tag.name] += 1
if tag.name in self.builder.preserve_whitespace_tags:
self.preserve_whitespace_tag_stack.append(tag)
if tag.name in self.builder.string_containers:
self.string_container_stack.append(tag)
def endData(self, containerClass=None):
"""Method called by the TreeBuilder when the end of a data segment
occurs.
"""
if self.current_data:
current_data = ''.join(self.current_data)
# If whitespace is not preserved, and this string contains
# nothing but ASCII spaces, replace it with a single space
# or newline.
if not self.preserve_whitespace_tag_stack:
strippable = True
for i in current_data:
if i not in self.ASCII_SPACES:
strippable = False
break
if strippable:
if '\n' in current_data:
current_data = '\n'
else:
current_data = ' '
# Reset the data collector.
self.current_data = []
# Should we add this string to the tree at all?
if self.parse_only and len(self.tagStack) <= 1 and \
(not self.parse_only.text or \
not self.parse_only.search(current_data)):
return
containerClass = self.string_container(containerClass)
o = containerClass(current_data)
self.object_was_parsed(o)
def object_was_parsed(self, o, parent=None, most_recent_element=None):
"""Method called by the TreeBuilder to integrate an object into the parse tree."""
if parent is None:
parent = self.currentTag
if most_recent_element is not None:
previous_element = most_recent_element
else:
previous_element = self._most_recent_element
next_element = previous_sibling = next_sibling = None
if isinstance(o, Tag):
next_element = o.next_element
next_sibling = o.next_sibling
previous_sibling = o.previous_sibling
if previous_element is None:
previous_element = o.previous_element
fix = parent.next_element is not None
o.setup(parent, previous_element, next_element, previous_sibling, next_sibling)
self._most_recent_element = o
parent.contents.append(o)
# Check if we are inserting into an already parsed node.
if fix:
self._linkage_fixer(parent)
def _linkage_fixer(self, el):
"""Make sure linkage of this fragment is sound."""
first = el.contents[0]
child = el.contents[-1]
descendant = child
if child is first and el.parent is not None:
# Parent should be linked to first child
el.next_element = child
# We are no longer linked to whatever this element is
prev_el = child.previous_element
if prev_el is not None and prev_el is not el:
prev_el.next_element = None
# First child should be linked to the parent, and no previous siblings.
child.previous_element = el
child.previous_sibling = None
# We have no sibling as we've been appended as the last.
child.next_sibling = None
# This index is a tag, dig deeper for a "last descendant"
if isinstance(child, Tag) and child.contents:
descendant = child._last_descendant(False)
# As the final step, link last descendant. It should be linked
# to the parent's next sibling (if found), else walk up the chain
# and find a parent with a sibling. It should have no next sibling.
descendant.next_element = None
descendant.next_sibling = None
target = el
while True:
if target is None:
break
elif target.next_sibling is not None:
descendant.next_element = target.next_sibling
target.next_sibling.previous_element = child
break
target = target.parent
def _popToTag(self, name, nsprefix=None, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag.
If there are no open tags with the given name, nothing will be
popped.
:param name: Pop up to the most recent tag with this name.
:param nsprefix: The namespace prefix that goes with `name`.
:param inclusivePop: It this is false, pops the tag stack up
to but *not* including the most recent instqance of the
given tag.
"""
#print("Popping to %s" % name)
if name == self.ROOT_TAG_NAME:
# The BeautifulSoup object itself can never be popped.
return
most_recently_popped = None
stack_size = len(self.tagStack)
for i in range(stack_size - 1, 0, -1):
if not self.open_tag_counter.get(name):
break
t = self.tagStack[i]
if (name == t.name and nsprefix == t.prefix):
if inclusivePop:
most_recently_popped = self.popTag()
break
most_recently_popped = self.popTag()
return most_recently_popped
def handle_starttag(self, name, namespace, nsprefix, attrs, sourceline=None,
sourcepos=None, namespaces=None):
"""Called by the tree builder when a new tag is encountered.
:param name: Name of the tag.
:param nsprefix: Namespace prefix for the tag.
:param attrs: A dictionary of attribute values.
:param sourceline: The line number where this tag was found in its
source document.
:param sourcepos: The character position within `sourceline` where this
tag was found.
:param namespaces: A dictionary of all namespace prefix mappings
currently in scope in the document.
If this method returns None, the tag was rejected by an active
SoupStrainer. You should proceed as if the tag had not occurred
in the document. For instance, if this was a self-closing tag,
don't call handle_endtag.
"""
# print("Start tag %s: %s" % (name, attrs))
self.endData()
if (self.parse_only and len(self.tagStack) <= 1
and (self.parse_only.text
or not self.parse_only.search_tag(name, attrs))):
return None
tag = self.element_classes.get(Tag, Tag)(
self, self.builder, name, namespace, nsprefix, attrs,
self.currentTag, self._most_recent_element,
sourceline=sourceline, sourcepos=sourcepos,
namespaces=namespaces
)
if tag is None:
return tag
if self._most_recent_element is not None:
self._most_recent_element.next_element = tag
self._most_recent_element = tag
self.pushTag(tag)
return tag
def handle_endtag(self, name, nsprefix=None):
"""Called by the tree builder when an ending tag is encountered.
:param name: Name of the tag.
:param nsprefix: Namespace prefix for the tag.
"""
#print("End tag: " + name)
self.endData()
self._popToTag(name, nsprefix)
def handle_data(self, data):
"""Called by the tree builder when a chunk of textual data is encountered."""
self.current_data.append(data)
def decode(self, pretty_print=False,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a string or Unicode representation of the parse tree
as an HTML or XML document.
:param pretty_print: If this is True, indentation will be used to
make the document more readable.
:param eventual_encoding: The encoding of the final document.
If this is None, the document will be a Unicode string.
"""
if self.is_xml:
# Print the XML declaration
encoding_part = ''
if eventual_encoding in PYTHON_SPECIFIC_ENCODINGS:
# This is a special Python encoding; it can't actually
# go into an XML document because it means nothing
# outside of Python.
eventual_encoding = None
if eventual_encoding != None:
encoding_part = ' encoding="%s"' % eventual_encoding
prefix = '<?xml version="1.0"%s?>\n' % encoding_part
else:
prefix = ''
if not pretty_print:
indent_level = None
else:
indent_level = 0
return prefix + super(BeautifulSoup, self).decode(
indent_level, eventual_encoding, formatter)
# Aliases to make it easier to get started quickly, e.g. 'from bs4 import _soup'
_s = BeautifulSoup
_soup = BeautifulSoup
class BeautifulStoneSoup(BeautifulSoup):
"""Deprecated interface to an XML parser."""
def __init__(self, *args, **kwargs):
kwargs['features'] = 'xml'
warnings.warn(
'The BeautifulStoneSoup class is deprecated. Instead of using '
'it, pass features="xml" into the BeautifulSoup constructor.',
DeprecationWarning
)
super(BeautifulStoneSoup, self).__init__(*args, **kwargs)
class StopParsing(Exception):
"""Exception raised by a TreeBuilder if it's unable to continue parsing."""
pass
class FeatureNotFound(ValueError):
"""Exception raised by the BeautifulSoup constructor if no parser with the
requested features is found.
"""
pass
#If this file is run as a script, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print((soup.prettify()))

View File

@@ -0,0 +1,631 @@
# Use of this source code is governed by the MIT license.
__license__ = "MIT"
from collections import defaultdict
import itertools
import re
import warnings
import sys
from ..element import (
CharsetMetaAttributeValue,
ContentMetaAttributeValue,
RubyParenthesisString,
RubyTextString,
Stylesheet,
Script,
TemplateString,
nonwhitespace_re
)
__all__ = [
'HTMLTreeBuilder',
'SAXTreeBuilder',
'TreeBuilder',
'TreeBuilderRegistry',
]
# Some useful features for a TreeBuilder to have.
FAST = 'fast'
PERMISSIVE = 'permissive'
STRICT = 'strict'
XML = 'xml'
HTML = 'html'
HTML_5 = 'html5'
class XMLParsedAsHTMLWarning(UserWarning):
"""The warning issued when an HTML parser is used to parse
XML that is not XHTML.
"""
MESSAGE = """It looks like you're parsing an XML document using an HTML parser. If this really is an HTML document (maybe it's XHTML?), you can ignore or filter this warning. If it's XML, you should know that using an XML parser will be more reliable. To parse this document as XML, make sure you have the lxml package installed, and pass the keyword argument `features="xml"` into the BeautifulSoup constructor."""
class TreeBuilderRegistry(object):
"""A way of looking up TreeBuilder subclasses by their name or by desired
features.
"""
def __init__(self):
self.builders_for_feature = defaultdict(list)
self.builders = []
def register(self, treebuilder_class):
"""Register a treebuilder based on its advertised features.
:param treebuilder_class: A subclass of Treebuilder. its .features
attribute should list its features.
"""
for feature in treebuilder_class.features:
self.builders_for_feature[feature].insert(0, treebuilder_class)
self.builders.insert(0, treebuilder_class)
def lookup(self, *features):
"""Look up a TreeBuilder subclass with the desired features.
:param features: A list of features to look for. If none are
provided, the most recently registered TreeBuilder subclass
will be used.
:return: A TreeBuilder subclass, or None if there's no
registered subclass with all the requested features.
"""
if len(self.builders) == 0:
# There are no builders at all.
return None
if len(features) == 0:
# They didn't ask for any features. Give them the most
# recently registered builder.
return self.builders[0]
# Go down the list of features in order, and eliminate any builders
# that don't match every feature.
features = list(features)
features.reverse()
candidates = None
candidate_set = None
while len(features) > 0:
feature = features.pop()
we_have_the_feature = self.builders_for_feature.get(feature, [])
if len(we_have_the_feature) > 0:
if candidates is None:
candidates = we_have_the_feature
candidate_set = set(candidates)
else:
# Eliminate any candidates that don't have this feature.
candidate_set = candidate_set.intersection(
set(we_have_the_feature))
# The only valid candidates are the ones in candidate_set.
# Go through the original list of candidates and pick the first one
# that's in candidate_set.
if candidate_set is None:
return None
for candidate in candidates:
if candidate in candidate_set:
return candidate
return None
# The BeautifulSoup class will take feature lists from developers and use them
# to look up builders in this registry.
builder_registry = TreeBuilderRegistry()
class TreeBuilder(object):
"""Turn a textual document into a Beautiful Soup object tree."""
NAME = "[Unknown tree builder]"
ALTERNATE_NAMES = []
features = []
is_xml = False
picklable = False
empty_element_tags = None # A tag will be considered an empty-element
# tag when and only when it has no contents.
# A value for these tag/attribute combinations is a space- or
# comma-separated list of CDATA, rather than a single CDATA.
DEFAULT_CDATA_LIST_ATTRIBUTES = {}
# Whitespace should be preserved inside these tags.
DEFAULT_PRESERVE_WHITESPACE_TAGS = set()
# The textual contents of tags with these names should be
# instantiated with some class other than NavigableString.
DEFAULT_STRING_CONTAINERS = {}
USE_DEFAULT = object()
# Most parsers don't keep track of line numbers.
TRACKS_LINE_NUMBERS = False
def __init__(self, multi_valued_attributes=USE_DEFAULT,
preserve_whitespace_tags=USE_DEFAULT,
store_line_numbers=USE_DEFAULT,
string_containers=USE_DEFAULT,
):
"""Constructor.
:param multi_valued_attributes: If this is set to None, the
TreeBuilder will not turn any values for attributes like
'class' into lists. Setting this to a dictionary will
customize this behavior; look at DEFAULT_CDATA_LIST_ATTRIBUTES
for an example.
Internally, these are called "CDATA list attributes", but that
probably doesn't make sense to an end-user, so the argument name
is `multi_valued_attributes`.
:param preserve_whitespace_tags: A list of tags to treat
the way <pre> tags are treated in HTML. Tags in this list
are immune from pretty-printing; their contents will always be
output as-is.
:param string_containers: A dictionary mapping tag names to
the classes that should be instantiated to contain the textual
contents of those tags. The default is to use NavigableString
for every tag, no matter what the name. You can override the
default by changing DEFAULT_STRING_CONTAINERS.
:param store_line_numbers: If the parser keeps track of the
line numbers and positions of the original markup, that
information will, by default, be stored in each corresponding
`Tag` object. You can turn this off by passing
store_line_numbers=False. If the parser you're using doesn't
keep track of this information, then setting store_line_numbers=True
will do nothing.
"""
self.soup = None
if multi_valued_attributes is self.USE_DEFAULT:
multi_valued_attributes = self.DEFAULT_CDATA_LIST_ATTRIBUTES
self.cdata_list_attributes = multi_valued_attributes
if preserve_whitespace_tags is self.USE_DEFAULT:
preserve_whitespace_tags = self.DEFAULT_PRESERVE_WHITESPACE_TAGS
self.preserve_whitespace_tags = preserve_whitespace_tags
if store_line_numbers == self.USE_DEFAULT:
store_line_numbers = self.TRACKS_LINE_NUMBERS
self.store_line_numbers = store_line_numbers
if string_containers == self.USE_DEFAULT:
string_containers = self.DEFAULT_STRING_CONTAINERS
self.string_containers = string_containers
def initialize_soup(self, soup):
"""The BeautifulSoup object has been initialized and is now
being associated with the TreeBuilder.
:param soup: A BeautifulSoup object.
"""
self.soup = soup
def reset(self):
"""Do any work necessary to reset the underlying parser
for a new document.
By default, this does nothing.
"""
pass
def can_be_empty_element(self, tag_name):
"""Might a tag with this name be an empty-element tag?
The final markup may or may not actually present this tag as
self-closing.
For instance: an HTMLBuilder does not consider a <p> tag to be
an empty-element tag (it's not in
HTMLBuilder.empty_element_tags). This means an empty <p> tag
will be presented as "<p></p>", not "<p/>" or "<p>".
The default implementation has no opinion about which tags are
empty-element tags, so a tag will be presented as an
empty-element tag if and only if it has no children.
"<foo></foo>" will become "<foo/>", and "<foo>bar</foo>" will
be left alone.
:param tag_name: The name of a markup tag.
"""
if self.empty_element_tags is None:
return True
return tag_name in self.empty_element_tags
def feed(self, markup):
"""Run some incoming markup through some parsing process,
populating the `BeautifulSoup` object in self.soup.
This method is not implemented in TreeBuilder; it must be
implemented in subclasses.
:return: None.
"""
raise NotImplementedError()
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None, exclude_encodings=None):
"""Run any preliminary steps necessary to make incoming markup
acceptable to the parser.
:param markup: Some markup -- probably a bytestring.
:param user_specified_encoding: The user asked to try this encoding.
:param document_declared_encoding: The markup itself claims to be
in this encoding. NOTE: This argument is not used by the
calling code and can probably be removed.
:param exclude_encodings: The user asked _not_ to try any of
these encodings.
:yield: A series of 4-tuples:
(markup, encoding, declared encoding,
has undergone character replacement)
Each 4-tuple represents a strategy for converting the
document to Unicode and parsing it. Each strategy will be tried
in turn.
By default, the only strategy is to parse the markup
as-is. See `LXMLTreeBuilderForXML` and
`HTMLParserTreeBuilder` for implementations that take into
account the quirks of particular parsers.
"""
yield markup, None, None, False
def test_fragment_to_document(self, fragment):
"""Wrap an HTML fragment to make it look like a document.
Different parsers do this differently. For instance, lxml
introduces an empty <head> tag, and html5lib
doesn't. Abstracting this away lets us write simple tests
which run HTML fragments through the parser and compare the
results against other HTML fragments.
This method should not be used outside of tests.
:param fragment: A string -- fragment of HTML.
:return: A string -- a full HTML document.
"""
return fragment
def set_up_substitutions(self, tag):
"""Set up any substitutions that will need to be performed on
a `Tag` when it's output as a string.
By default, this does nothing. See `HTMLTreeBuilder` for a
case where this is used.
:param tag: A `Tag`
:return: Whether or not a substitution was performed.
"""
return False
def _replace_cdata_list_attribute_values(self, tag_name, attrs):
"""When an attribute value is associated with a tag that can
have multiple values for that attribute, convert the string
value to a list of strings.
Basically, replaces class="foo bar" with class=["foo", "bar"]
NOTE: This method modifies its input in place.
:param tag_name: The name of a tag.
:param attrs: A dictionary containing the tag's attributes.
Any appropriate attribute values will be modified in place.
"""
if not attrs:
return attrs
if self.cdata_list_attributes:
universal = self.cdata_list_attributes.get('*', [])
tag_specific = self.cdata_list_attributes.get(
tag_name.lower(), None)
for attr in list(attrs.keys()):
if attr in universal or (tag_specific and attr in tag_specific):
# We have a "class"-type attribute whose string
# value is a whitespace-separated list of
# values. Split it into a list.
value = attrs[attr]
if isinstance(value, str):
values = nonwhitespace_re.findall(value)
else:
# html5lib sometimes calls setAttributes twice
# for the same tag when rearranging the parse
# tree. On the second call the attribute value
# here is already a list. If this happens,
# leave the value alone rather than trying to
# split it again.
values = value
attrs[attr] = values
return attrs
class SAXTreeBuilder(TreeBuilder):
"""A Beautiful Soup treebuilder that listens for SAX events.
This is not currently used for anything, but it demonstrates
how a simple TreeBuilder would work.
"""
def feed(self, markup):
raise NotImplementedError()
def close(self):
pass
def startElement(self, name, attrs):
attrs = dict((key[1], value) for key, value in list(attrs.items()))
#print("Start %s, %r" % (name, attrs))
self.soup.handle_starttag(name, attrs)
def endElement(self, name):
#print("End %s" % name)
self.soup.handle_endtag(name)
def startElementNS(self, nsTuple, nodeName, attrs):
# Throw away (ns, nodeName) for now.
self.startElement(nodeName, attrs)
def endElementNS(self, nsTuple, nodeName):
# Throw away (ns, nodeName) for now.
self.endElement(nodeName)
#handler.endElementNS((ns, node.nodeName), node.nodeName)
def startPrefixMapping(self, prefix, nodeValue):
# Ignore the prefix for now.
pass
def endPrefixMapping(self, prefix):
# Ignore the prefix for now.
# handler.endPrefixMapping(prefix)
pass
def characters(self, content):
self.soup.handle_data(content)
def startDocument(self):
pass
def endDocument(self):
pass
class HTMLTreeBuilder(TreeBuilder):
"""This TreeBuilder knows facts about HTML.
Such as which tags are empty-element tags.
"""
empty_element_tags = set([
# These are from HTML5.
'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen', 'link', 'menuitem', 'meta', 'param', 'source', 'track', 'wbr',
# These are from earlier versions of HTML and are removed in HTML5.
'basefont', 'bgsound', 'command', 'frame', 'image', 'isindex', 'nextid', 'spacer'
])
# The HTML standard defines these as block-level elements. Beautiful
# Soup does not treat these elements differently from other elements,
# but it may do so eventually, and this information is available if
# you need to use it.
block_elements = set(["address", "article", "aside", "blockquote", "canvas", "dd", "div", "dl", "dt", "fieldset", "figcaption", "figure", "footer", "form", "h1", "h2", "h3", "h4", "h5", "h6", "header", "hr", "li", "main", "nav", "noscript", "ol", "output", "p", "pre", "section", "table", "tfoot", "ul", "video"])
# These HTML tags need special treatment so they can be
# represented by a string class other than NavigableString.
#
# For some of these tags, it's because the HTML standard defines
# an unusual content model for them. I made this list by going
# through the HTML spec
# (https://html.spec.whatwg.org/#metadata-content) and looking for
# "metadata content" elements that can contain strings.
#
# The Ruby tags (<rt> and <rp>) are here despite being normal
# "phrasing content" tags, because the content they contain is
# qualitatively different from other text in the document, and it
# can be useful to be able to distinguish it.
#
# TODO: Arguably <noscript> could go here but it seems
# qualitatively different from the other tags.
DEFAULT_STRING_CONTAINERS = {
'rt' : RubyTextString,
'rp' : RubyParenthesisString,
'style': Stylesheet,
'script': Script,
'template': TemplateString,
}
# The HTML standard defines these attributes as containing a
# space-separated list of values, not a single value. That is,
# class="foo bar" means that the 'class' attribute has two values,
# 'foo' and 'bar', not the single value 'foo bar'. When we
# encounter one of these attributes, we will parse its value into
# a list of values if possible. Upon output, the list will be
# converted back into a string.
DEFAULT_CDATA_LIST_ATTRIBUTES = {
"*" : ['class', 'accesskey', 'dropzone'],
"a" : ['rel', 'rev'],
"link" : ['rel', 'rev'],
"td" : ["headers"],
"th" : ["headers"],
"td" : ["headers"],
"form" : ["accept-charset"],
"object" : ["archive"],
# These are HTML5 specific, as are *.accesskey and *.dropzone above.
"area" : ["rel"],
"icon" : ["sizes"],
"iframe" : ["sandbox"],
"output" : ["for"],
}
DEFAULT_PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea'])
def set_up_substitutions(self, tag):
"""Replace the declared encoding in a <meta> tag with a placeholder,
to be substituted when the tag is output to a string.
An HTML document may come in to Beautiful Soup as one
encoding, but exit in a different encoding, and the <meta> tag
needs to be changed to reflect this.
:param tag: A `Tag`
:return: Whether or not a substitution was performed.
"""
# We are only interested in <meta> tags
if tag.name != 'meta':
return False
http_equiv = tag.get('http-equiv')
content = tag.get('content')
charset = tag.get('charset')
# We are interested in <meta> tags that say what encoding the
# document was originally in. This means HTML 5-style <meta>
# tags that provide the "charset" attribute. It also means
# HTML 4-style <meta> tags that provide the "content"
# attribute and have "http-equiv" set to "content-type".
#
# In both cases we will replace the value of the appropriate
# attribute with a standin object that can take on any
# encoding.
meta_encoding = None
if charset is not None:
# HTML 5 style:
# <meta charset="utf8">
meta_encoding = charset
tag['charset'] = CharsetMetaAttributeValue(charset)
elif (content is not None and http_equiv is not None
and http_equiv.lower() == 'content-type'):
# HTML 4 style:
# <meta http-equiv="content-type" content="text/html; charset=utf8">
tag['content'] = ContentMetaAttributeValue(content)
return (meta_encoding is not None)
class DetectsXMLParsedAsHTML(object):
"""A mixin class for any class (a TreeBuilder, or some class used by a
TreeBuilder) that's in a position to detect whether an XML
document is being incorrectly parsed as HTML, and issue an
appropriate warning.
This requires being able to observe an incoming processing
instruction that might be an XML declaration, and also able to
observe tags as they're opened. If you can't do that for a given
TreeBuilder, there's a less reliable implementation based on
examining the raw markup.
"""
# Regular expression for seeing if markup has an <html> tag.
LOOKS_LIKE_HTML = re.compile("<[^ +]html", re.I)
LOOKS_LIKE_HTML_B = re.compile(b"<[^ +]html", re.I)
XML_PREFIX = '<?xml'
XML_PREFIX_B = b'<?xml'
@classmethod
def warn_if_markup_looks_like_xml(cls, markup):
"""Perform a check on some markup to see if it looks like XML
that's not XHTML. If so, issue a warning.
This is much less reliable than doing the check while parsing,
but some of the tree builders can't do that.
:return: True if the markup looks like non-XHTML XML, False
otherwise.
"""
if isinstance(markup, bytes):
prefix = cls.XML_PREFIX_B
looks_like_html = cls.LOOKS_LIKE_HTML_B
else:
prefix = cls.XML_PREFIX
looks_like_html = cls.LOOKS_LIKE_HTML
if (markup is not None
and markup.startswith(prefix)
and not looks_like_html.search(markup[:500])
):
cls._warn()
return True
return False
@classmethod
def _warn(cls):
"""Issue a warning about XML being parsed as HTML."""
warnings.warn(
XMLParsedAsHTMLWarning.MESSAGE, XMLParsedAsHTMLWarning
)
def _initialize_xml_detector(self):
"""Call this method before parsing a document."""
self._first_processing_instruction = None
self._root_tag = None
def _document_might_be_xml(self, processing_instruction):
"""Call this method when encountering an XML declaration, or a
"processing instruction" that might be an XML declaration.
"""
if (self._first_processing_instruction is not None
or self._root_tag is not None):
# The document has already started. Don't bother checking
# anymore.
return
self._first_processing_instruction = processing_instruction
# We won't know until we encounter the first tag whether or
# not this is actually a problem.
def _root_tag_encountered(self, name):
"""Call this when you encounter the document's root tag.
This is where we actually check whether an XML document is
being incorrectly parsed as HTML, and issue the warning.
"""
if self._root_tag is not None:
# This method was incorrectly called multiple times. Do
# nothing.
return
self._root_tag = name
if (name != 'html' and self._first_processing_instruction is not None
and self._first_processing_instruction.lower().startswith('xml ')):
# We encountered an XML declaration and then a tag other
# than 'html'. This is a reliable indicator that a
# non-XHTML document is being parsed as XML.
self._warn()
def register_treebuilders_from(module):
"""Copy TreeBuilders from the given module into this module."""
this_module = sys.modules[__name__]
for name in module.__all__:
obj = getattr(module, name)
if issubclass(obj, TreeBuilder):
setattr(this_module, name, obj)
this_module.__all__.append(name)
# Register the builder while we're at it.
this_module.builder_registry.register(obj)
class ParserRejectedMarkup(Exception):
"""An Exception to be raised when the underlying parser simply
refuses to parse the given markup.
"""
def __init__(self, message_or_exception):
"""Explain why the parser rejected the given markup, either
with a textual explanation or another exception.
"""
if isinstance(message_or_exception, Exception):
e = message_or_exception
message_or_exception = "%s: %s" % (e.__class__.__name__, str(e))
super(ParserRejectedMarkup, self).__init__(message_or_exception)
# Builders are registered in reverse order of priority, so that custom
# builder registrations will take precedence. In general, we want lxml
# to take precedence over html5lib, because it's faster. And we only
# want to use HTMLParser as a last resort.
from . import _htmlparser
register_treebuilders_from(_htmlparser)
try:
from . import _html5lib
register_treebuilders_from(_html5lib)
except ImportError:
# They don't have html5lib installed.
pass
try:
from . import _lxml
register_treebuilders_from(_lxml)
except ImportError:
# They don't have lxml installed.
pass

View File

@@ -0,0 +1,473 @@
# Use of this source code is governed by the MIT license.
__license__ = "MIT"
__all__ = [
'HTML5TreeBuilder',
]
import warnings
import re
from bs4.builder import (
DetectsXMLParsedAsHTML,
PERMISSIVE,
HTML,
HTML_5,
HTMLTreeBuilder,
)
from bs4.element import (
NamespacedAttribute,
nonwhitespace_re,
)
import html5lib
from html5lib.constants import (
namespaces,
prefixes,
)
from bs4.element import (
Comment,
Doctype,
NavigableString,
Tag,
)
try:
# Pre-0.99999999
from html5lib.treebuilders import _base as treebuilder_base
new_html5lib = False
except ImportError as e:
# 0.99999999 and up
from html5lib.treebuilders import base as treebuilder_base
new_html5lib = True
class HTML5TreeBuilder(HTMLTreeBuilder):
"""Use html5lib to build a tree.
Note that this TreeBuilder does not support some features common
to HTML TreeBuilders. Some of these features could theoretically
be implemented, but at the very least it's quite difficult,
because html5lib moves the parse tree around as it's being built.
* This TreeBuilder doesn't use different subclasses of NavigableString
based on the name of the tag in which the string was found.
* You can't use a SoupStrainer to parse only part of a document.
"""
NAME = "html5lib"
features = [NAME, PERMISSIVE, HTML_5, HTML]
# html5lib can tell us which line number and position in the
# original file is the source of an element.
TRACKS_LINE_NUMBERS = True
def prepare_markup(self, markup, user_specified_encoding,
document_declared_encoding=None, exclude_encodings=None):
# Store the user-specified encoding for use later on.
self.user_specified_encoding = user_specified_encoding
# document_declared_encoding and exclude_encodings aren't used
# ATM because the html5lib TreeBuilder doesn't use
# UnicodeDammit.
if exclude_encodings:
warnings.warn("You provided a value for exclude_encoding, but the html5lib tree builder doesn't support exclude_encoding.")
# html5lib only parses HTML, so if it's given XML that's worth
# noting.
DetectsXMLParsedAsHTML.warn_if_markup_looks_like_xml(markup)
yield (markup, None, None, False)
# These methods are defined by Beautiful Soup.
def feed(self, markup):
if self.soup.parse_only is not None:
warnings.warn("You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.")
parser = html5lib.HTMLParser(tree=self.create_treebuilder)
self.underlying_builder.parser = parser
extra_kwargs = dict()
if not isinstance(markup, str):
if new_html5lib:
extra_kwargs['override_encoding'] = self.user_specified_encoding
else:
extra_kwargs['encoding'] = self.user_specified_encoding
doc = parser.parse(markup, **extra_kwargs)
# Set the character encoding detected by the tokenizer.
if isinstance(markup, str):
# We need to special-case this because html5lib sets
# charEncoding to UTF-8 if it gets Unicode input.
doc.original_encoding = None
else:
original_encoding = parser.tokenizer.stream.charEncoding[0]
if not isinstance(original_encoding, str):
# In 0.99999999 and up, the encoding is an html5lib
# Encoding object. We want to use a string for compatibility
# with other tree builders.
original_encoding = original_encoding.name
doc.original_encoding = original_encoding
self.underlying_builder.parser = None
def create_treebuilder(self, namespaceHTMLElements):
self.underlying_builder = TreeBuilderForHtml5lib(
namespaceHTMLElements, self.soup,
store_line_numbers=self.store_line_numbers
)
return self.underlying_builder
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return '<html><head></head><body>%s</body></html>' % fragment
class TreeBuilderForHtml5lib(treebuilder_base.TreeBuilder):
def __init__(self, namespaceHTMLElements, soup=None,
store_line_numbers=True, **kwargs):
if soup:
self.soup = soup
else:
from bs4 import BeautifulSoup
# TODO: Why is the parser 'html.parser' here? To avoid an
# infinite loop?
self.soup = BeautifulSoup(
"", "html.parser", store_line_numbers=store_line_numbers,
**kwargs
)
# TODO: What are **kwargs exactly? Should they be passed in
# here in addition to/instead of being passed to the BeautifulSoup
# constructor?
super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements)
# This will be set later to an html5lib.html5parser.HTMLParser
# object, which we can use to track the current line number.
self.parser = None
self.store_line_numbers = store_line_numbers
def documentClass(self):
self.soup.reset()
return Element(self.soup, self.soup, None)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = Doctype.for_name_and_ids(name, publicId, systemId)
self.soup.object_was_parsed(doctype)
def elementClass(self, name, namespace):
kwargs = {}
if self.parser and self.store_line_numbers:
# This represents the point immediately after the end of the
# tag. We don't know when the tag started, but we do know
# where it ended -- the character just before this one.
sourceline, sourcepos = self.parser.tokenizer.stream.position()
kwargs['sourceline'] = sourceline
kwargs['sourcepos'] = sourcepos-1
tag = self.soup.new_tag(name, namespace, **kwargs)
return Element(tag, self.soup, namespace)
def commentClass(self, data):
return TextNode(Comment(data), self.soup)
def fragmentClass(self):
from bs4 import BeautifulSoup
# TODO: Why is the parser 'html.parser' here? To avoid an
# infinite loop?
self.soup = BeautifulSoup("", "html.parser")
self.soup.name = "[document_fragment]"
return Element(self.soup, self.soup, None)
def appendChild(self, node):
# XXX This code is not covered by the BS4 tests.
self.soup.append(node.element)
def getDocument(self):
return self.soup
def getFragment(self):
return treebuilder_base.TreeBuilder.getFragment(self).element
def testSerializer(self, element):
from bs4 import BeautifulSoup
rv = []
doctype_re = re.compile(r'^(.*?)(?: PUBLIC "(.*?)"(?: "(.*?)")?| SYSTEM "(.*?)")?$')
def serializeElement(element, indent=0):
if isinstance(element, BeautifulSoup):
pass
if isinstance(element, Doctype):
m = doctype_re.match(element)
if m:
name = m.group(1)
if m.lastindex > 1:
publicId = m.group(2) or ""
systemId = m.group(3) or m.group(4) or ""
rv.append("""|%s<!DOCTYPE %s "%s" "%s">""" %
(' ' * indent, name, publicId, systemId))
else:
rv.append("|%s<!DOCTYPE %s>" % (' ' * indent, name))
else:
rv.append("|%s<!DOCTYPE >" % (' ' * indent,))
elif isinstance(element, Comment):
rv.append("|%s<!-- %s -->" % (' ' * indent, element))
elif isinstance(element, NavigableString):
rv.append("|%s\"%s\"" % (' ' * indent, element))
else:
if element.namespace:
name = "%s %s" % (prefixes[element.namespace],
element.name)
else:
name = element.name
rv.append("|%s<%s>" % (' ' * indent, name))
if element.attrs:
attributes = []
for name, value in list(element.attrs.items()):
if isinstance(name, NamespacedAttribute):
name = "%s %s" % (prefixes[name.namespace], name.name)
if isinstance(value, list):
value = " ".join(value)
attributes.append((name, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
indent += 2
for child in element.children:
serializeElement(child, indent)
serializeElement(element, 0)
return "\n".join(rv)
class AttrList(object):
def __init__(self, element):
self.element = element
self.attrs = dict(self.element.attrs)
def __iter__(self):
return list(self.attrs.items()).__iter__()
def __setitem__(self, name, value):
# If this attribute is a multi-valued attribute for this element,
# turn its value into a list.
list_attr = self.element.cdata_list_attributes or {}
if (name in list_attr.get('*')
or (self.element.name in list_attr
and name in list_attr[self.element.name])):
# A node that is being cloned may have already undergone
# this procedure.
if not isinstance(value, list):
value = nonwhitespace_re.findall(value)
self.element[name] = value
def items(self):
return list(self.attrs.items())
def keys(self):
return list(self.attrs.keys())
def __len__(self):
return len(self.attrs)
def __getitem__(self, name):
return self.attrs[name]
def __contains__(self, name):
return name in list(self.attrs.keys())
class Element(treebuilder_base.Node):
def __init__(self, element, soup, namespace):
treebuilder_base.Node.__init__(self, element.name)
self.element = element
self.soup = soup
self.namespace = namespace
def appendChild(self, node):
string_child = child = None
if isinstance(node, str):
# Some other piece of code decided to pass in a string
# instead of creating a TextElement object to contain the
# string.
string_child = child = node
elif isinstance(node, Tag):
# Some other piece of code decided to pass in a Tag
# instead of creating an Element object to contain the
# Tag.
child = node
elif node.element.__class__ == NavigableString:
string_child = child = node.element
node.parent = self
else:
child = node.element
node.parent = self
if not isinstance(child, str) and child.parent is not None:
node.element.extract()
if (string_child is not None and self.element.contents
and self.element.contents[-1].__class__ == NavigableString):
# We are appending a string onto another string.
# TODO This has O(n^2) performance, for input like
# "a</a>a</a>a</a>..."
old_element = self.element.contents[-1]
new_element = self.soup.new_string(old_element + string_child)
old_element.replace_with(new_element)
self.soup._most_recent_element = new_element
else:
if isinstance(node, str):
# Create a brand new NavigableString from this string.
child = self.soup.new_string(node)
# Tell Beautiful Soup to act as if it parsed this element
# immediately after the parent's last descendant. (Or
# immediately after the parent, if it has no children.)
if self.element.contents:
most_recent_element = self.element._last_descendant(False)
elif self.element.next_element is not None:
# Something from further ahead in the parse tree is
# being inserted into this earlier element. This is
# very annoying because it means an expensive search
# for the last element in the tree.
most_recent_element = self.soup._last_descendant()
else:
most_recent_element = self.element
self.soup.object_was_parsed(
child, parent=self.element,
most_recent_element=most_recent_element)
def getAttributes(self):
if isinstance(self.element, Comment):
return {}
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes is not None and len(attributes) > 0:
converted_attributes = []
for name, value in list(attributes.items()):
if isinstance(name, tuple):
new_name = NamespacedAttribute(*name)
del attributes[name]
attributes[new_name] = value
self.soup.builder._replace_cdata_list_attribute_values(
self.name, attributes)
for name, value in list(attributes.items()):
self.element[name] = value
# The attributes may contain variables that need substitution.
# Call set_up_substitutions manually.
#
# The Tag constructor called this method when the Tag was created,
# but we just set/changed the attributes, so call it again.
self.soup.builder.set_up_substitutions(self.element)
attributes = property(getAttributes, setAttributes)
def insertText(self, data, insertBefore=None):
text = TextNode(self.soup.new_string(data), self.soup)
if insertBefore:
self.insertBefore(text, insertBefore)
else:
self.appendChild(text)
def insertBefore(self, node, refNode):
index = self.element.index(refNode.element)
if (node.element.__class__ == NavigableString and self.element.contents
and self.element.contents[index-1].__class__ == NavigableString):
# (See comments in appendChild)
old_node = self.element.contents[index-1]
new_str = self.soup.new_string(old_node + node.element)
old_node.replace_with(new_str)
else:
self.element.insert(index, node.element)
node.parent = self
def removeChild(self, node):
node.element.extract()
def reparentChildren(self, new_parent):
"""Move all of this tag's children into another tag."""
# print("MOVE", self.element.contents)
# print("FROM", self.element)
# print("TO", new_parent.element)
element = self.element
new_parent_element = new_parent.element
# Determine what this tag's next_element will be once all the children
# are removed.
final_next_element = element.next_sibling
new_parents_last_descendant = new_parent_element._last_descendant(False, False)
if len(new_parent_element.contents) > 0:
# The new parent already contains children. We will be
# appending this tag's children to the end.
new_parents_last_child = new_parent_element.contents[-1]
new_parents_last_descendant_next_element = new_parents_last_descendant.next_element
else:
# The new parent contains no children.
new_parents_last_child = None
new_parents_last_descendant_next_element = new_parent_element.next_element
to_append = element.contents
if len(to_append) > 0:
# Set the first child's previous_element and previous_sibling
# to elements within the new parent
first_child = to_append[0]
if new_parents_last_descendant is not None:
first_child.previous_element = new_parents_last_descendant
else:
first_child.previous_element = new_parent_element
first_child.previous_sibling = new_parents_last_child
if new_parents_last_descendant is not None:
new_parents_last_descendant.next_element = first_child
else:
new_parent_element.next_element = first_child
if new_parents_last_child is not None:
new_parents_last_child.next_sibling = first_child
# Find the very last element being moved. It is now the
# parent's last descendant. It has no .next_sibling and
# its .next_element is whatever the previous last
# descendant had.
last_childs_last_descendant = to_append[-1]._last_descendant(False, True)
last_childs_last_descendant.next_element = new_parents_last_descendant_next_element
if new_parents_last_descendant_next_element is not None:
# TODO: This code has no test coverage and I'm not sure
# how to get html5lib to go through this path, but it's
# just the other side of the previous line.
new_parents_last_descendant_next_element.previous_element = last_childs_last_descendant
last_childs_last_descendant.next_sibling = None
for child in to_append:
child.parent = new_parent_element
new_parent_element.contents.append(child)
# Now that this element has no children, change its .next_element.
element.contents = []
element.next_element = final_next_element
# print("DONE WITH MOVE")
# print("FROM", self.element)
# print("TO", new_parent_element)
def cloneNode(self):
tag = self.soup.new_tag(self.element.name, self.namespace)
node = Element(tag, self.soup, self.namespace)
for key,value in self.attributes:
node.attributes[key] = value
return node
def hasContent(self):
return self.element.contents
def getNameTuple(self):
if self.namespace == None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TextNode(Element):
def __init__(self, element, soup):
treebuilder_base.Node.__init__(self, None)
self.element = element
self.soup = soup
def cloneNode(self):
raise NotImplementedError

View File

@@ -0,0 +1,499 @@
# encoding: utf-8
"""Use the HTMLParser library to parse HTML files that aren't too bad."""
# Use of this source code is governed by the MIT license.
__license__ = "MIT"
__all__ = [
'HTMLParserTreeBuilder',
]
from html.parser import HTMLParser
try:
from html.parser import HTMLParseError
except ImportError as e:
# HTMLParseError is removed in Python 3.5. Since it can never be
# thrown in 3.5, we can just define our own class as a placeholder.
class HTMLParseError(Exception):
pass
import sys
import warnings
# Starting in Python 3.2, the HTMLParser constructor takes a 'strict'
# argument, which we'd like to set to False. Unfortunately,
# http://bugs.python.org/issue13273 makes strict=True a better bet
# before Python 3.2.3.
#
# At the end of this file, we monkeypatch HTMLParser so that
# strict=True works well on Python 3.2.2.
major, minor, release = sys.version_info[:3]
CONSTRUCTOR_TAKES_STRICT = major == 3 and minor == 2 and release >= 3
CONSTRUCTOR_STRICT_IS_DEPRECATED = major == 3 and minor == 3
CONSTRUCTOR_TAKES_CONVERT_CHARREFS = major == 3 and minor >= 4
from ..element import (
CData,
Comment,
Declaration,
Doctype,
ProcessingInstruction,
)
from ..dammit import EntitySubstitution, UnicodeDammit
from ..builder import (
DetectsXMLParsedAsHTML,
HTML,
HTMLTreeBuilder,
STRICT,
)
HTMLPARSER = 'html.parser'
class BeautifulSoupHTMLParser(HTMLParser, DetectsXMLParsedAsHTML):
"""A subclass of the Python standard library's HTMLParser class, which
listens for HTMLParser events and translates them into calls
to Beautiful Soup's tree construction API.
"""
# Strategies for handling duplicate attributes
IGNORE = 'ignore'
REPLACE = 'replace'
def __init__(self, *args, **kwargs):
"""Constructor.
:param on_duplicate_attribute: A strategy for what to do if a
tag includes the same attribute more than once. Accepted
values are: REPLACE (replace earlier values with later
ones, the default), IGNORE (keep the earliest value
encountered), or a callable. A callable must take three
arguments: the dictionary of attributes already processed,
the name of the duplicate attribute, and the most recent value
encountered.
"""
self.on_duplicate_attribute = kwargs.pop(
'on_duplicate_attribute', self.REPLACE
)
HTMLParser.__init__(self, *args, **kwargs)
# Keep a list of empty-element tags that were encountered
# without an explicit closing tag. If we encounter a closing tag
# of this type, we'll associate it with one of those entries.
#
# This isn't a stack because we don't care about the
# order. It's a list of closing tags we've already handled and
# will ignore, assuming they ever show up.
self.already_closed_empty_element = []
self._initialize_xml_detector()
def error(self, msg):
"""In Python 3, HTMLParser subclasses must implement error(), although
this requirement doesn't appear to be documented.
In Python 2, HTMLParser implements error() by raising an exception,
which we don't want to do.
In any event, this method is called only on very strange
markup and our best strategy is to pretend it didn't happen
and keep going.
"""
warnings.warn(msg)
def handle_startendtag(self, name, attrs):
"""Handle an incoming empty-element tag.
This is only called when the markup looks like <tag/>.
:param name: Name of the tag.
:param attrs: Dictionary of the tag's attributes.
"""
# is_startend() tells handle_starttag not to close the tag
# just because its name matches a known empty-element tag. We
# know that this is an empty-element tag and we want to call
# handle_endtag ourselves.
tag = self.handle_starttag(name, attrs, handle_empty_element=False)
self.handle_endtag(name)
def handle_starttag(self, name, attrs, handle_empty_element=True):
"""Handle an opening tag, e.g. '<tag>'
:param name: Name of the tag.
:param attrs: Dictionary of the tag's attributes.
:param handle_empty_element: True if this tag is known to be
an empty-element tag (i.e. there is not expected to be any
closing tag).
"""
# XXX namespace
attr_dict = {}
for key, value in attrs:
# Change None attribute values to the empty string
# for consistency with the other tree builders.
if value is None:
value = ''
if key in attr_dict:
# A single attribute shows up multiple times in this
# tag. How to handle it depends on the
# on_duplicate_attribute setting.
on_dupe = self.on_duplicate_attribute
if on_dupe == self.IGNORE:
pass
elif on_dupe in (None, self.REPLACE):
attr_dict[key] = value
else:
on_dupe(attr_dict, key, value)
else:
attr_dict[key] = value
attrvalue = '""'
#print("START", name)
sourceline, sourcepos = self.getpos()
tag = self.soup.handle_starttag(
name, None, None, attr_dict, sourceline=sourceline,
sourcepos=sourcepos
)
if tag and tag.is_empty_element and handle_empty_element:
# Unlike other parsers, html.parser doesn't send separate end tag
# events for empty-element tags. (It's handled in
# handle_startendtag, but only if the original markup looked like
# <tag/>.)
#
# So we need to call handle_endtag() ourselves. Since we
# know the start event is identical to the end event, we
# don't want handle_endtag() to cross off any previous end
# events for tags of this name.
self.handle_endtag(name, check_already_closed=False)
# But we might encounter an explicit closing tag for this tag
# later on. If so, we want to ignore it.
self.already_closed_empty_element.append(name)
if self._root_tag is None:
self._root_tag_encountered(name)
def handle_endtag(self, name, check_already_closed=True):
"""Handle a closing tag, e.g. '</tag>'
:param name: A tag name.
:param check_already_closed: True if this tag is expected to
be the closing portion of an empty-element tag,
e.g. '<tag></tag>'.
"""
#print("END", name)
if check_already_closed and name in self.already_closed_empty_element:
# This is a redundant end tag for an empty-element tag.
# We've already called handle_endtag() for it, so just
# check it off the list.
#print("ALREADY CLOSED", name)
self.already_closed_empty_element.remove(name)
else:
self.soup.handle_endtag(name)
def handle_data(self, data):
"""Handle some textual data that shows up between tags."""
self.soup.handle_data(data)
def handle_charref(self, name):
"""Handle a numeric character reference by converting it to the
corresponding Unicode character and treating it as textual
data.
:param name: Character number, possibly in hexadecimal.
"""
# XXX workaround for a bug in HTMLParser. Remove this once
# it's fixed in all supported versions.
# http://bugs.python.org/issue13633
if name.startswith('x'):
real_name = int(name.lstrip('x'), 16)
elif name.startswith('X'):
real_name = int(name.lstrip('X'), 16)
else:
real_name = int(name)
data = None
if real_name < 256:
# HTML numeric entities are supposed to reference Unicode
# code points, but sometimes they reference code points in
# some other encoding (ahem, Windows-1252). E.g. &#147;
# instead of &#201; for LEFT DOUBLE QUOTATION MARK. This
# code tries to detect this situation and compensate.
for encoding in (self.soup.original_encoding, 'windows-1252'):
if not encoding:
continue
try:
data = bytearray([real_name]).decode(encoding)
except UnicodeDecodeError as e:
pass
if not data:
try:
data = chr(real_name)
except (ValueError, OverflowError) as e:
pass
data = data or "\N{REPLACEMENT CHARACTER}"
self.handle_data(data)
def handle_entityref(self, name):
"""Handle a named entity reference by converting it to the
corresponding Unicode character(s) and treating it as textual
data.
:param name: Name of the entity reference.
"""
character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name)
if character is not None:
data = character
else:
# If this were XML, it would be ambiguous whether "&foo"
# was an character entity reference with a missing
# semicolon or the literal string "&foo". Since this is
# HTML, we have a complete list of all character entity references,
# and this one wasn't found, so assume it's the literal string "&foo".
data = "&%s" % name
self.handle_data(data)
def handle_comment(self, data):
"""Handle an HTML comment.
:param data: The text of the comment.
"""
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(Comment)
def handle_decl(self, data):
"""Handle a DOCTYPE declaration.
:param data: The text of the declaration.
"""
self.soup.endData()
data = data[len("DOCTYPE "):]
self.soup.handle_data(data)
self.soup.endData(Doctype)
def unknown_decl(self, data):
"""Handle a declaration of unknown type -- probably a CDATA block.
:param data: The text of the declaration.
"""
if data.upper().startswith('CDATA['):
cls = CData
data = data[len('CDATA['):]
else:
cls = Declaration
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(cls)
def handle_pi(self, data):
"""Handle a processing instruction.
:param data: The text of the instruction.
"""
self.soup.endData()
self.soup.handle_data(data)
self._document_might_be_xml(data)
self.soup.endData(ProcessingInstruction)
class HTMLParserTreeBuilder(HTMLTreeBuilder):
"""A Beautiful soup `TreeBuilder` that uses the `HTMLParser` parser,
found in the Python standard library.
"""
is_xml = False
picklable = True
NAME = HTMLPARSER
features = [NAME, HTML, STRICT]
# The html.parser knows which line number and position in the
# original file is the source of an element.
TRACKS_LINE_NUMBERS = True
def __init__(self, parser_args=None, parser_kwargs=None, **kwargs):
"""Constructor.
:param parser_args: Positional arguments to pass into
the BeautifulSoupHTMLParser constructor, once it's
invoked.
:param parser_kwargs: Keyword arguments to pass into
the BeautifulSoupHTMLParser constructor, once it's
invoked.
:param kwargs: Keyword arguments for the superclass constructor.
"""
# Some keyword arguments will be pulled out of kwargs and placed
# into parser_kwargs.
extra_parser_kwargs = dict()
for arg in ('on_duplicate_attribute',):
if arg in kwargs:
value = kwargs.pop(arg)
extra_parser_kwargs[arg] = value
super(HTMLParserTreeBuilder, self).__init__(**kwargs)
parser_args = parser_args or []
parser_kwargs = parser_kwargs or {}
parser_kwargs.update(extra_parser_kwargs)
if CONSTRUCTOR_TAKES_STRICT and not CONSTRUCTOR_STRICT_IS_DEPRECATED:
parser_kwargs['strict'] = False
if CONSTRUCTOR_TAKES_CONVERT_CHARREFS:
parser_kwargs['convert_charrefs'] = False
self.parser_args = (parser_args, parser_kwargs)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None, exclude_encodings=None):
"""Run any preliminary steps necessary to make incoming markup
acceptable to the parser.
:param markup: Some markup -- probably a bytestring.
:param user_specified_encoding: The user asked to try this encoding.
:param document_declared_encoding: The markup itself claims to be
in this encoding.
:param exclude_encodings: The user asked _not_ to try any of
these encodings.
:yield: A series of 4-tuples:
(markup, encoding, declared encoding,
has undergone character replacement)
Each 4-tuple represents a strategy for converting the
document to Unicode and parsing it. Each strategy will be tried
in turn.
"""
if isinstance(markup, str):
# Parse Unicode as-is.
yield (markup, None, None, False)
return
# Ask UnicodeDammit to sniff the most likely encoding.
# This was provided by the end-user; treat it as a known
# definite encoding per the algorithm laid out in the HTML5
# spec. (See the EncodingDetector class for details.)
known_definite_encodings = [user_specified_encoding]
# This was found in the document; treat it as a slightly lower-priority
# user encoding.
user_encodings = [document_declared_encoding]
try_encodings = [user_specified_encoding, document_declared_encoding]
dammit = UnicodeDammit(
markup,
known_definite_encodings=known_definite_encodings,
user_encodings=user_encodings,
is_html=True,
exclude_encodings=exclude_encodings
)
yield (dammit.markup, dammit.original_encoding,
dammit.declared_html_encoding,
dammit.contains_replacement_characters)
def feed(self, markup):
"""Run some incoming markup through some parsing process,
populating the `BeautifulSoup` object in self.soup.
"""
args, kwargs = self.parser_args
parser = BeautifulSoupHTMLParser(*args, **kwargs)
parser.soup = self.soup
try:
parser.feed(markup)
parser.close()
except HTMLParseError as e:
warnings.warn(RuntimeWarning(
"Python's built-in HTMLParser cannot parse the given document. This is not a bug in Beautiful Soup. The best solution is to install an external parser (lxml or html5lib), and use Beautiful Soup with that parser. See http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser for help."))
raise e
parser.already_closed_empty_element = []
# Patch 3.2 versions of HTMLParser earlier than 3.2.3 to use some
# 3.2.3 code. This ensures they don't treat markup like <p></p> as a
# string.
#
# XXX This code can be removed once most Python 3 users are on 3.2.3.
if major == 3 and minor == 2 and not CONSTRUCTOR_TAKES_STRICT:
import re
attrfind_tolerant = re.compile(
r'\s*((?<=[\'"\s])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?')
HTMLParserTreeBuilder.attrfind_tolerant = attrfind_tolerant
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
BeautifulSoupHTMLParser.locatestarttagend = locatestarttagend
from html.parser import tagfind, attrfind
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = rawdata[i+1:k].lower()
while k < endpos:
if self.strict:
m = attrfind.match(rawdata, k)
else:
m = attrfind_tolerant.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
if self.strict:
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
BeautifulSoupHTMLParser.parse_starttag = parse_starttag
BeautifulSoupHTMLParser.set_cdata_mode = set_cdata_mode
CONSTRUCTOR_TAKES_STRICT = True

View File

@@ -0,0 +1,386 @@
# Use of this source code is governed by the MIT license.
__license__ = "MIT"
__all__ = [
'LXMLTreeBuilderForXML',
'LXMLTreeBuilder',
]
try:
from collections.abc import Callable # Python 3.6
except ImportError as e:
from collections import Callable
from io import BytesIO
from io import StringIO
from lxml import etree
from bs4.element import (
Comment,
Doctype,
NamespacedAttribute,
ProcessingInstruction,
XMLProcessingInstruction,
)
from bs4.builder import (
DetectsXMLParsedAsHTML,
FAST,
HTML,
HTMLTreeBuilder,
PERMISSIVE,
ParserRejectedMarkup,
TreeBuilder,
XML)
from bs4.dammit import EncodingDetector
LXML = 'lxml'
def _invert(d):
"Invert a dictionary."
return dict((v,k) for k, v in list(d.items()))
class LXMLTreeBuilderForXML(TreeBuilder):
DEFAULT_PARSER_CLASS = etree.XMLParser
is_xml = True
processing_instruction_class = XMLProcessingInstruction
NAME = "lxml-xml"
ALTERNATE_NAMES = ["xml"]
# Well, it's permissive by XML parser standards.
features = [NAME, LXML, XML, FAST, PERMISSIVE]
CHUNK_SIZE = 512
# This namespace mapping is specified in the XML Namespace
# standard.
DEFAULT_NSMAPS = dict(xml='http://www.w3.org/XML/1998/namespace')
DEFAULT_NSMAPS_INVERTED = _invert(DEFAULT_NSMAPS)
# NOTE: If we parsed Element objects and looked at .sourceline,
# we'd be able to see the line numbers from the original document.
# But instead we build an XMLParser or HTMLParser object to serve
# as the target of parse messages, and those messages don't include
# line numbers.
# See: https://bugs.launchpad.net/lxml/+bug/1846906
def initialize_soup(self, soup):
"""Let the BeautifulSoup object know about the standard namespace
mapping.
:param soup: A `BeautifulSoup`.
"""
super(LXMLTreeBuilderForXML, self).initialize_soup(soup)
self._register_namespaces(self.DEFAULT_NSMAPS)
def _register_namespaces(self, mapping):
"""Let the BeautifulSoup object know about namespaces encountered
while parsing the document.
This might be useful later on when creating CSS selectors.
This will track (almost) all namespaces, even ones that were
only in scope for part of the document. If two namespaces have
the same prefix, only the first one encountered will be
tracked. Un-prefixed namespaces are not tracked.
:param mapping: A dictionary mapping namespace prefixes to URIs.
"""
for key, value in list(mapping.items()):
# This is 'if key' and not 'if key is not None' because we
# don't track un-prefixed namespaces. Soupselect will
# treat an un-prefixed namespace as the default, which
# causes confusion in some cases.
if key and key not in self.soup._namespaces:
# Let the BeautifulSoup object know about a new namespace.
# If there are multiple namespaces defined with the same
# prefix, the first one in the document takes precedence.
self.soup._namespaces[key] = value
def default_parser(self, encoding):
"""Find the default parser for the given encoding.
:param encoding: A string.
:return: Either a parser object or a class, which
will be instantiated with default arguments.
"""
if self._default_parser is not None:
return self._default_parser
return etree.XMLParser(
target=self, strip_cdata=False, recover=True, encoding=encoding)
def parser_for(self, encoding):
"""Instantiate an appropriate parser for the given encoding.
:param encoding: A string.
:return: A parser object such as an `etree.XMLParser`.
"""
# Use the default parser.
parser = self.default_parser(encoding)
if isinstance(parser, Callable):
# Instantiate the parser with default arguments
parser = parser(
target=self, strip_cdata=False, recover=True, encoding=encoding
)
return parser
def __init__(self, parser=None, empty_element_tags=None, **kwargs):
# TODO: Issue a warning if parser is present but not a
# callable, since that means there's no way to create new
# parsers for different encodings.
self._default_parser = parser
if empty_element_tags is not None:
self.empty_element_tags = set(empty_element_tags)
self.soup = None
self.nsmaps = [self.DEFAULT_NSMAPS_INVERTED]
self.active_namespace_prefixes = [dict(self.DEFAULT_NSMAPS)]
super(LXMLTreeBuilderForXML, self).__init__(**kwargs)
def _getNsTag(self, tag):
# Split the namespace URL out of a fully-qualified lxml tag
# name. Copied from lxml's src/lxml/sax.py.
if tag[0] == '{':
return tuple(tag[1:].split('}', 1))
else:
return (None, tag)
def prepare_markup(self, markup, user_specified_encoding=None,
exclude_encodings=None,
document_declared_encoding=None):
"""Run any preliminary steps necessary to make incoming markup
acceptable to the parser.
lxml really wants to get a bytestring and convert it to
Unicode itself. So instead of using UnicodeDammit to convert
the bytestring to Unicode using different encodings, this
implementation uses EncodingDetector to iterate over the
encodings, and tell lxml to try to parse the document as each
one in turn.
:param markup: Some markup -- hopefully a bytestring.
:param user_specified_encoding: The user asked to try this encoding.
:param document_declared_encoding: The markup itself claims to be
in this encoding.
:param exclude_encodings: The user asked _not_ to try any of
these encodings.
:yield: A series of 4-tuples:
(markup, encoding, declared encoding,
has undergone character replacement)
Each 4-tuple represents a strategy for converting the
document to Unicode and parsing it. Each strategy will be tried
in turn.
"""
is_html = not self.is_xml
if is_html:
self.processing_instruction_class = ProcessingInstruction
# We're in HTML mode, so if we're given XML, that's worth
# noting.
DetectsXMLParsedAsHTML.warn_if_markup_looks_like_xml(markup)
else:
self.processing_instruction_class = XMLProcessingInstruction
if isinstance(markup, str):
# We were given Unicode. Maybe lxml can parse Unicode on
# this system?
# TODO: This is a workaround for
# https://bugs.launchpad.net/lxml/+bug/1948551.
# We can remove it once the upstream issue is fixed.
if len(markup) > 0 and markup[0] == u'\N{BYTE ORDER MARK}':
markup = markup[1:]
yield markup, None, document_declared_encoding, False
if isinstance(markup, str):
# No, apparently not. Convert the Unicode to UTF-8 and
# tell lxml to parse it as UTF-8.
yield (markup.encode("utf8"), "utf8",
document_declared_encoding, False)
# This was provided by the end-user; treat it as a known
# definite encoding per the algorithm laid out in the HTML5
# spec. (See the EncodingDetector class for details.)
known_definite_encodings = [user_specified_encoding]
# This was found in the document; treat it as a slightly lower-priority
# user encoding.
user_encodings = [document_declared_encoding]
detector = EncodingDetector(
markup, known_definite_encodings=known_definite_encodings,
user_encodings=user_encodings, is_html=is_html,
exclude_encodings=exclude_encodings
)
for encoding in detector.encodings:
yield (detector.markup, encoding, document_declared_encoding, False)
def feed(self, markup):
if isinstance(markup, bytes):
markup = BytesIO(markup)
elif isinstance(markup, str):
markup = StringIO(markup)
# Call feed() at least once, even if the markup is empty,
# or the parser won't be initialized.
data = markup.read(self.CHUNK_SIZE)
try:
self.parser = self.parser_for(self.soup.original_encoding)
self.parser.feed(data)
while len(data) != 0:
# Now call feed() on the rest of the data, chunk by chunk.
data = markup.read(self.CHUNK_SIZE)
if len(data) != 0:
self.parser.feed(data)
self.parser.close()
except (UnicodeDecodeError, LookupError, etree.ParserError) as e:
raise ParserRejectedMarkup(e)
def close(self):
self.nsmaps = [self.DEFAULT_NSMAPS_INVERTED]
def start(self, name, attrs, nsmap={}):
# Make sure attrs is a mutable dict--lxml may send an immutable dictproxy.
attrs = dict(attrs)
nsprefix = None
# Invert each namespace map as it comes in.
if len(nsmap) == 0 and len(self.nsmaps) > 1:
# There are no new namespaces for this tag, but
# non-default namespaces are in play, so we need a
# separate tag stack to know when they end.
self.nsmaps.append(None)
elif len(nsmap) > 0:
# A new namespace mapping has come into play.
# First, Let the BeautifulSoup object know about it.
self._register_namespaces(nsmap)
# Then, add it to our running list of inverted namespace
# mappings.
self.nsmaps.append(_invert(nsmap))
# The currently active namespace prefixes have
# changed. Calculate the new mapping so it can be stored
# with all Tag objects created while these prefixes are in
# scope.
current_mapping = dict(self.active_namespace_prefixes[-1])
current_mapping.update(nsmap)
# We should not track un-prefixed namespaces as we can only hold one
# and it will be recognized as the default namespace by soupsieve,
# which may be confusing in some situations.
if '' in current_mapping:
del current_mapping['']
self.active_namespace_prefixes.append(current_mapping)
# Also treat the namespace mapping as a set of attributes on the
# tag, so we can recreate it later.
attrs = attrs.copy()
for prefix, namespace in list(nsmap.items()):
attribute = NamespacedAttribute(
"xmlns", prefix, "http://www.w3.org/2000/xmlns/")
attrs[attribute] = namespace
# Namespaces are in play. Find any attributes that came in
# from lxml with namespaces attached to their names, and
# turn then into NamespacedAttribute objects.
new_attrs = {}
for attr, value in list(attrs.items()):
namespace, attr = self._getNsTag(attr)
if namespace is None:
new_attrs[attr] = value
else:
nsprefix = self._prefix_for_namespace(namespace)
attr = NamespacedAttribute(nsprefix, attr, namespace)
new_attrs[attr] = value
attrs = new_attrs
namespace, name = self._getNsTag(name)
nsprefix = self._prefix_for_namespace(namespace)
self.soup.handle_starttag(
name, namespace, nsprefix, attrs,
namespaces=self.active_namespace_prefixes[-1]
)
def _prefix_for_namespace(self, namespace):
"""Find the currently active prefix for the given namespace."""
if namespace is None:
return None
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
return inverted_nsmap[namespace]
return None
def end(self, name):
self.soup.endData()
completed_tag = self.soup.tagStack[-1]
namespace, name = self._getNsTag(name)
nsprefix = None
if namespace is not None:
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
nsprefix = inverted_nsmap[namespace]
break
self.soup.handle_endtag(name, nsprefix)
if len(self.nsmaps) > 1:
# This tag, or one of its parents, introduced a namespace
# mapping, so pop it off the stack.
out_of_scope_nsmap = self.nsmaps.pop()
if out_of_scope_nsmap is not None:
# This tag introduced a namespace mapping which is no
# longer in scope. Recalculate the currently active
# namespace prefixes.
self.active_namespace_prefixes.pop()
def pi(self, target, data):
self.soup.endData()
data = target + ' ' + data
self.soup.handle_data(data)
self.soup.endData(self.processing_instruction_class)
def data(self, content):
self.soup.handle_data(content)
def doctype(self, name, pubid, system):
self.soup.endData()
doctype = Doctype.for_name_and_ids(name, pubid, system)
self.soup.object_was_parsed(doctype)
def comment(self, content):
"Handle comments as Comment objects."
self.soup.endData()
self.soup.handle_data(content)
self.soup.endData(Comment)
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return '<?xml version="1.0" encoding="utf-8"?>\n%s' % fragment
class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
NAME = LXML
ALTERNATE_NAMES = ["lxml-html"]
features = ALTERNATE_NAMES + [NAME, HTML, FAST, PERMISSIVE]
is_xml = False
processing_instruction_class = ProcessingInstruction
def default_parser(self, encoding):
return etree.HTMLParser
def feed(self, markup):
encoding = self.soup.original_encoding
try:
self.parser = self.parser_for(encoding)
self.parser.feed(markup)
self.parser.close()
except (UnicodeDecodeError, LookupError, etree.ParserError) as e:
raise ParserRejectedMarkup(e)
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return '<html><body>%s</body></html>' % fragment

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,248 @@
"""Diagnostic functions, mainly for use when doing tech support."""
# Use of this source code is governed by the MIT license.
__license__ = "MIT"
import cProfile
from io import BytesIO
from html.parser import HTMLParser
import bs4
from bs4 import BeautifulSoup, __version__
from bs4.builder import builder_registry
import os
import pstats
import random
import tempfile
import time
import traceback
import sys
import cProfile
def diagnose(data):
"""Diagnostic suite for isolating common problems.
:param data: A string containing markup that needs to be explained.
:return: None; diagnostics are printed to standard output.
"""
print(("Diagnostic running on Beautiful Soup %s" % __version__))
print(("Python version %s" % sys.version))
basic_parsers = ["html.parser", "html5lib", "lxml"]
for name in basic_parsers:
for builder in builder_registry.builders:
if name in builder.features:
break
else:
basic_parsers.remove(name)
print((
"I noticed that %s is not installed. Installing it may help." %
name))
if 'lxml' in basic_parsers:
basic_parsers.append("lxml-xml")
try:
from lxml import etree
print(("Found lxml version %s" % ".".join(map(str,etree.LXML_VERSION))))
except ImportError as e:
print(
"lxml is not installed or couldn't be imported.")
if 'html5lib' in basic_parsers:
try:
import html5lib
print(("Found html5lib version %s" % html5lib.__version__))
except ImportError as e:
print(
"html5lib is not installed or couldn't be imported.")
if hasattr(data, 'read'):
data = data.read()
elif data.startswith("http:") or data.startswith("https:"):
print(('"%s" looks like a URL. Beautiful Soup is not an HTTP client.' % data))
print("You need to use some other library to get the document behind the URL, and feed that document to Beautiful Soup.")
return
else:
try:
if os.path.exists(data):
print(('"%s" looks like a filename. Reading data from the file.' % data))
with open(data) as fp:
data = fp.read()
except ValueError:
# This can happen on some platforms when the 'filename' is
# too long. Assume it's data and not a filename.
pass
print("")
for parser in basic_parsers:
print(("Trying to parse your markup with %s" % parser))
success = False
try:
soup = BeautifulSoup(data, features=parser)
success = True
except Exception as e:
print(("%s could not parse the markup." % parser))
traceback.print_exc()
if success:
print(("Here's what %s did with the markup:" % parser))
print((soup.prettify()))
print(("-" * 80))
def lxml_trace(data, html=True, **kwargs):
"""Print out the lxml events that occur during parsing.
This lets you see how lxml parses a document when no Beautiful
Soup code is running. You can use this to determine whether
an lxml-specific problem is in Beautiful Soup's lxml tree builders
or in lxml itself.
:param data: Some markup.
:param html: If True, markup will be parsed with lxml's HTML parser.
if False, lxml's XML parser will be used.
"""
from lxml import etree
recover = kwargs.pop('recover', True)
if isinstance(data, str):
data = data.encode("utf8")
reader = BytesIO(data)
for event, element in etree.iterparse(
reader, html=html, recover=recover, **kwargs
):
print(("%s, %4s, %s" % (event, element.tag, element.text)))
class AnnouncingParser(HTMLParser):
"""Subclass of HTMLParser that announces parse events, without doing
anything else.
You can use this to get a picture of how html.parser sees a given
document. The easiest way to do this is to call `htmlparser_trace`.
"""
def _p(self, s):
print(s)
def handle_starttag(self, name, attrs):
self._p("%s START" % name)
def handle_endtag(self, name):
self._p("%s END" % name)
def handle_data(self, data):
self._p("%s DATA" % data)
def handle_charref(self, name):
self._p("%s CHARREF" % name)
def handle_entityref(self, name):
self._p("%s ENTITYREF" % name)
def handle_comment(self, data):
self._p("%s COMMENT" % data)
def handle_decl(self, data):
self._p("%s DECL" % data)
def unknown_decl(self, data):
self._p("%s UNKNOWN-DECL" % data)
def handle_pi(self, data):
self._p("%s PI" % data)
def htmlparser_trace(data):
"""Print out the HTMLParser events that occur during parsing.
This lets you see how HTMLParser parses a document when no
Beautiful Soup code is running.
:param data: Some markup.
"""
parser = AnnouncingParser()
parser.feed(data)
_vowels = "aeiou"
_consonants = "bcdfghjklmnpqrstvwxyz"
def rword(length=5):
"Generate a random word-like string."
s = ''
for i in range(length):
if i % 2 == 0:
t = _consonants
else:
t = _vowels
s += random.choice(t)
return s
def rsentence(length=4):
"Generate a random sentence-like string."
return " ".join(rword(random.randint(4,9)) for i in range(length))
def rdoc(num_elements=1000):
"""Randomly generate an invalid HTML document."""
tag_names = ['p', 'div', 'span', 'i', 'b', 'script', 'table']
elements = []
for i in range(num_elements):
choice = random.randint(0,3)
if choice == 0:
# New tag.
tag_name = random.choice(tag_names)
elements.append("<%s>" % tag_name)
elif choice == 1:
elements.append(rsentence(random.randint(1,4)))
elif choice == 2:
# Close a tag.
tag_name = random.choice(tag_names)
elements.append("</%s>" % tag_name)
return "<html>" + "\n".join(elements) + "</html>"
def benchmark_parsers(num_elements=100000):
"""Very basic head-to-head performance benchmark."""
print(("Comparative parser benchmark on Beautiful Soup %s" % __version__))
data = rdoc(num_elements)
print(("Generated a large invalid HTML document (%d bytes)." % len(data)))
for parser in ["lxml", ["lxml", "html"], "html5lib", "html.parser"]:
success = False
try:
a = time.time()
soup = BeautifulSoup(data, parser)
b = time.time()
success = True
except Exception as e:
print(("%s could not parse the markup." % parser))
traceback.print_exc()
if success:
print(("BS4+%s parsed the markup in %.2fs." % (parser, b-a)))
from lxml import etree
a = time.time()
etree.HTML(data)
b = time.time()
print(("Raw lxml parsed the markup in %.2fs." % (b-a)))
import html5lib
parser = html5lib.HTMLParser()
a = time.time()
parser.parse(data)
b = time.time()
print(("Raw html5lib parsed the markup in %.2fs." % (b-a)))
def profile(num_elements=100000, parser="lxml"):
"""Use Python's profiler on a randomly generated document."""
filehandle = tempfile.NamedTemporaryFile()
filename = filehandle.name
data = rdoc(num_elements)
vars = dict(bs4=bs4, data=data, parser=parser)
cProfile.runctx('bs4.BeautifulSoup(data, parser)' , vars, vars, filename)
stats = pstats.Stats(filename)
# stats.strip_dirs()
stats.sort_stats("cumulative")
stats.print_stats('_html5lib|bs4', 50)
# If this file is run as a script, standard input is diagnosed.
if __name__ == '__main__':
diagnose(sys.stdin.read())

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,185 @@
from .dammit import EntitySubstitution
class Formatter(EntitySubstitution):
"""Describes a strategy to use when outputting a parse tree to a string.
Some parts of this strategy come from the distinction between
HTML4, HTML5, and XML. Others are configurable by the user.
Formatters are passed in as the `formatter` argument to methods
like `PageElement.encode`. Most people won't need to think about
formatters, and most people who need to think about them can pass
in one of these predefined strings as `formatter` rather than
making a new Formatter object:
For HTML documents:
* 'html' - HTML entity substitution for generic HTML documents. (default)
* 'html5' - HTML entity substitution for HTML5 documents, as
well as some optimizations in the way tags are rendered.
* 'minimal' - Only make the substitutions necessary to guarantee
valid HTML.
* None - Do not perform any substitution. This will be faster
but may result in invalid markup.
For XML documents:
* 'html' - Entity substitution for XHTML documents.
* 'minimal' - Only make the substitutions necessary to guarantee
valid XML. (default)
* None - Do not perform any substitution. This will be faster
but may result in invalid markup.
"""
# Registries of XML and HTML formatters.
XML_FORMATTERS = {}
HTML_FORMATTERS = {}
HTML = 'html'
XML = 'xml'
HTML_DEFAULTS = dict(
cdata_containing_tags=set(["script", "style"]),
)
def _default(self, language, value, kwarg):
if value is not None:
return value
if language == self.XML:
return set()
return self.HTML_DEFAULTS[kwarg]
def __init__(
self, language=None, entity_substitution=None,
void_element_close_prefix='/', cdata_containing_tags=None,
empty_attributes_are_booleans=False, indent=1,
):
"""Constructor.
:param language: This should be Formatter.XML if you are formatting
XML markup and Formatter.HTML if you are formatting HTML markup.
:param entity_substitution: A function to call to replace special
characters with XML/HTML entities. For examples, see
bs4.dammit.EntitySubstitution.substitute_html and substitute_xml.
:param void_element_close_prefix: By default, void elements
are represented as <tag/> (XML rules) rather than <tag>
(HTML rules). To get <tag>, pass in the empty string.
:param cdata_containing_tags: The list of tags that are defined
as containing CDATA in this dialect. For example, in HTML,
<script> and <style> tags are defined as containing CDATA,
and their contents should not be formatted.
:param blank_attributes_are_booleans: Render attributes whose value
is the empty string as HTML-style boolean attributes.
(Attributes whose value is None are always rendered this way.)
:param indent: If indent is a non-negative integer or string,
then the contents of elements will be indented
appropriately when pretty-printing. An indent level of 0,
negative, or "" will only insert newlines. Using a
positive integer indent indents that many spaces per
level. If indent is a string (such as "\t"), that string
is used to indent each level. The default behavior to
indent one space per level.
"""
self.language = language
self.entity_substitution = entity_substitution
self.void_element_close_prefix = void_element_close_prefix
self.cdata_containing_tags = self._default(
language, cdata_containing_tags, 'cdata_containing_tags'
)
self.empty_attributes_are_booleans=empty_attributes_are_booleans
if indent is None:
indent = 0
if isinstance(indent, int):
if indent < 0:
indent = 0
indent = ' ' * indent
elif isinstance(indent, str):
indent = indent
else:
indent = ' '
self.indent = indent
def substitute(self, ns):
"""Process a string that needs to undergo entity substitution.
This may be a string encountered in an attribute value or as
text.
:param ns: A string.
:return: A string with certain characters replaced by named
or numeric entities.
"""
if not self.entity_substitution:
return ns
from .element import NavigableString
if (isinstance(ns, NavigableString)
and ns.parent is not None
and ns.parent.name in self.cdata_containing_tags):
# Do nothing.
return ns
# Substitute.
return self.entity_substitution(ns)
def attribute_value(self, value):
"""Process the value of an attribute.
:param ns: A string.
:return: A string with certain characters replaced by named
or numeric entities.
"""
return self.substitute(value)
def attributes(self, tag):
"""Reorder a tag's attributes however you want.
By default, attributes are sorted alphabetically. This makes
behavior consistent between Python 2 and Python 3, and preserves
backwards compatibility with older versions of Beautiful Soup.
If `empty_boolean_attributes` is True, then attributes whose
values are set to the empty string will be treated as boolean
attributes.
"""
if tag.attrs is None:
return []
return sorted(
(k, (None if self.empty_attributes_are_booleans and v == '' else v))
for k, v in list(tag.attrs.items())
)
class HTMLFormatter(Formatter):
"""A generic Formatter for HTML."""
REGISTRY = {}
def __init__(self, *args, **kwargs):
return super(HTMLFormatter, self).__init__(self.HTML, *args, **kwargs)
class XMLFormatter(Formatter):
"""A generic Formatter for XML."""
REGISTRY = {}
def __init__(self, *args, **kwargs):
return super(XMLFormatter, self).__init__(self.XML, *args, **kwargs)
# Set up aliases for the default formatters.
HTMLFormatter.REGISTRY['html'] = HTMLFormatter(
entity_substitution=EntitySubstitution.substitute_html
)
HTMLFormatter.REGISTRY["html5"] = HTMLFormatter(
entity_substitution=EntitySubstitution.substitute_html,
void_element_close_prefix=None,
empty_attributes_are_booleans=True,
)
HTMLFormatter.REGISTRY["minimal"] = HTMLFormatter(
entity_substitution=EntitySubstitution.substitute_xml
)
HTMLFormatter.REGISTRY[None] = HTMLFormatter(
entity_substitution=None
)
XMLFormatter.REGISTRY["html"] = XMLFormatter(
entity_substitution=EntitySubstitution.substitute_html
)
XMLFormatter.REGISTRY["minimal"] = XMLFormatter(
entity_substitution=EntitySubstitution.substitute_xml
)
XMLFormatter.REGISTRY[None] = Formatter(
Formatter(Formatter.XML, entity_substitution=None)
)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,29 @@
import pytest
from unittest.mock import patch
from bs4.builder import DetectsXMLParsedAsHTML
class TestDetectsXMLParsedAsHTML(object):
@pytest.mark.parametrize(
"markup,looks_like_xml",
[("No xml declaration", False),
("<html>obviously HTML</html", False),
("<?xml ><html>Actually XHTML</html>", False),
("<?xml> < html>Tricky XHTML</html>", False),
("<?xml ><no-html-tag>", True),
]
)
def test_warn_if_markup_looks_like_xml(self, markup, looks_like_xml):
# Test of our ability to guess at whether markup looks XML-ish
# _and_ not HTML-ish.
with patch('bs4.builder.DetectsXMLParsedAsHTML._warn') as mock:
for data in markup, markup.encode('utf8'):
result = DetectsXMLParsedAsHTML.warn_if_markup_looks_like_xml(
data
)
assert result == looks_like_xml
if looks_like_xml:
assert mock.called
else:
assert not mock.called
mock.reset_mock()

View File

@@ -0,0 +1,136 @@
"""Tests of the builder registry."""
import pytest
import warnings
from bs4 import BeautifulSoup
from bs4.builder import (
builder_registry as registry,
HTMLParserTreeBuilder,
TreeBuilderRegistry,
)
try:
from bs4.builder import HTML5TreeBuilder
HTML5LIB_PRESENT = True
except ImportError:
HTML5LIB_PRESENT = False
try:
from bs4.builder import (
LXMLTreeBuilderForXML,
LXMLTreeBuilder,
)
LXML_PRESENT = True
except ImportError:
LXML_PRESENT = False
class TestBuiltInRegistry(object):
"""Test the built-in registry with the default builders registered."""
def test_combination(self):
assert registry.lookup('strict', 'html') == HTMLParserTreeBuilder
if LXML_PRESENT:
assert registry.lookup('fast', 'html') == LXMLTreeBuilder
assert registry.lookup('permissive', 'xml') == LXMLTreeBuilderForXML
if HTML5LIB_PRESENT:
assert registry.lookup('html5lib', 'html') == HTML5TreeBuilder
def test_lookup_by_markup_type(self):
if LXML_PRESENT:
assert registry.lookup('html') == LXMLTreeBuilder
assert registry.lookup('xml') == LXMLTreeBuilderForXML
else:
assert registry.lookup('xml') == None
if HTML5LIB_PRESENT:
assert registry.lookup('html') == HTML5TreeBuilder
else:
assert registry.lookup('html') == HTMLParserTreeBuilder
def test_named_library(self):
if LXML_PRESENT:
assert registry.lookup('lxml', 'xml') == LXMLTreeBuilderForXML
assert registry.lookup('lxml', 'html') == LXMLTreeBuilder
if HTML5LIB_PRESENT:
assert registry.lookup('html5lib') == HTML5TreeBuilder
assert registry.lookup('html.parser') == HTMLParserTreeBuilder
def test_beautifulsoup_constructor_does_lookup(self):
with warnings.catch_warnings(record=True) as w:
# This will create a warning about not explicitly
# specifying a parser, but we'll ignore it.
# You can pass in a string.
BeautifulSoup("", features="html")
# Or a list of strings.
BeautifulSoup("", features=["html", "fast"])
pass
# You'll get an exception if BS can't find an appropriate
# builder.
with pytest.raises(ValueError):
BeautifulSoup("", features="no-such-feature")
class TestRegistry(object):
"""Test the TreeBuilderRegistry class in general."""
def setup_method(self):
self.registry = TreeBuilderRegistry()
def builder_for_features(self, *feature_list):
cls = type('Builder_' + '_'.join(feature_list),
(object,), {'features' : feature_list})
self.registry.register(cls)
return cls
def test_register_with_no_features(self):
builder = self.builder_for_features()
# Since the builder advertises no features, you can't find it
# by looking up features.
assert self.registry.lookup('foo') is None
# But you can find it by doing a lookup with no features, if
# this happens to be the only registered builder.
assert self.registry.lookup() == builder
def test_register_with_features_makes_lookup_succeed(self):
builder = self.builder_for_features('foo', 'bar')
assert self.registry.lookup('foo') is builder
assert self.registry.lookup('bar') is builder
def test_lookup_fails_when_no_builder_implements_feature(self):
builder = self.builder_for_features('foo', 'bar')
assert self.registry.lookup('baz') is None
def test_lookup_gets_most_recent_registration_when_no_feature_specified(self):
builder1 = self.builder_for_features('foo')
builder2 = self.builder_for_features('bar')
assert self.registry.lookup() == builder2
def test_lookup_fails_when_no_tree_builders_registered(self):
assert self.registry.lookup() is None
def test_lookup_gets_most_recent_builder_supporting_all_features(self):
has_one = self.builder_for_features('foo')
has_the_other = self.builder_for_features('bar')
has_both_early = self.builder_for_features('foo', 'bar', 'baz')
has_both_late = self.builder_for_features('foo', 'bar', 'quux')
lacks_one = self.builder_for_features('bar')
has_the_other = self.builder_for_features('foo')
# There are two builders featuring 'foo' and 'bar', but
# the one that also features 'quux' was registered later.
assert self.registry.lookup('foo', 'bar') == has_both_late
# There is only one builder featuring 'foo', 'bar', and 'baz'.
assert self.registry.lookup('foo', 'bar', 'baz') == has_both_early
def test_lookup_fails_when_cannot_reconcile_requested_features(self):
builder1 = self.builder_for_features('foo', 'bar')
builder2 = self.builder_for_features('foo', 'baz')
assert self.registry.lookup('bar', 'baz') is None

View File

@@ -0,0 +1,371 @@
# encoding: utf-8
import pytest
import logging
import bs4
from bs4 import BeautifulSoup
from bs4.dammit import (
EntitySubstitution,
EncodingDetector,
UnicodeDammit,
)
class TestUnicodeDammit(object):
"""Standalone tests of UnicodeDammit."""
def test_unicode_input(self):
markup = "I'm already Unicode! \N{SNOWMAN}"
dammit = UnicodeDammit(markup)
assert dammit.unicode_markup == markup
def test_smart_quotes_to_unicode(self):
markup = b"<foo>\x91\x92\x93\x94</foo>"
dammit = UnicodeDammit(markup)
assert dammit.unicode_markup == "<foo>\u2018\u2019\u201c\u201d</foo>"
def test_smart_quotes_to_xml_entities(self):
markup = b"<foo>\x91\x92\x93\x94</foo>"
dammit = UnicodeDammit(markup, smart_quotes_to="xml")
assert dammit.unicode_markup == "<foo>&#x2018;&#x2019;&#x201C;&#x201D;</foo>"
def test_smart_quotes_to_html_entities(self):
markup = b"<foo>\x91\x92\x93\x94</foo>"
dammit = UnicodeDammit(markup, smart_quotes_to="html")
assert dammit.unicode_markup == "<foo>&lsquo;&rsquo;&ldquo;&rdquo;</foo>"
def test_smart_quotes_to_ascii(self):
markup = b"<foo>\x91\x92\x93\x94</foo>"
dammit = UnicodeDammit(markup, smart_quotes_to="ascii")
assert dammit.unicode_markup == """<foo>''""</foo>"""
def test_detect_utf8(self):
utf8 = b"Sacr\xc3\xa9 bleu! \xe2\x98\x83"
dammit = UnicodeDammit(utf8)
assert dammit.original_encoding.lower() == 'utf-8'
assert dammit.unicode_markup == 'Sacr\xe9 bleu! \N{SNOWMAN}'
def test_convert_hebrew(self):
hebrew = b"\xed\xe5\xec\xf9"
dammit = UnicodeDammit(hebrew, ["iso-8859-8"])
assert dammit.original_encoding.lower() == 'iso-8859-8'
assert dammit.unicode_markup == '\u05dd\u05d5\u05dc\u05e9'
def test_dont_see_smart_quotes_where_there_are_none(self):
utf_8 = b"\343\202\261\343\203\274\343\202\277\343\202\244 Watch"
dammit = UnicodeDammit(utf_8)
assert dammit.original_encoding.lower() == 'utf-8'
assert dammit.unicode_markup.encode("utf-8") == utf_8
def test_ignore_inappropriate_codecs(self):
utf8_data = "Räksmörgås".encode("utf-8")
dammit = UnicodeDammit(utf8_data, ["iso-8859-8"])
assert dammit.original_encoding.lower() == 'utf-8'
def test_ignore_invalid_codecs(self):
utf8_data = "Räksmörgås".encode("utf-8")
for bad_encoding in ['.utf8', '...', 'utF---16.!']:
dammit = UnicodeDammit(utf8_data, [bad_encoding])
assert dammit.original_encoding.lower() == 'utf-8'
def test_exclude_encodings(self):
# This is UTF-8.
utf8_data = "Räksmörgås".encode("utf-8")
# But if we exclude UTF-8 from consideration, the guess is
# Windows-1252.
dammit = UnicodeDammit(utf8_data, exclude_encodings=["utf-8"])
assert dammit.original_encoding.lower() == 'windows-1252'
# And if we exclude that, there is no valid guess at all.
dammit = UnicodeDammit(
utf8_data, exclude_encodings=["utf-8", "windows-1252"])
assert dammit.original_encoding == None
class TestEncodingDetector(object):
def test_encoding_detector_replaces_junk_in_encoding_name_with_replacement_character(self):
detected = EncodingDetector(
b'<?xml version="1.0" encoding="UTF-\xdb" ?>')
encodings = list(detected.encodings)
assert 'utf-\N{REPLACEMENT CHARACTER}' in encodings
def test_detect_html5_style_meta_tag(self):
for data in (
b'<html><meta charset="euc-jp" /></html>',
b"<html><meta charset='euc-jp' /></html>",
b"<html><meta charset=euc-jp /></html>",
b"<html><meta charset=euc-jp/></html>"):
dammit = UnicodeDammit(data, is_html=True)
assert "euc-jp" == dammit.original_encoding
def test_last_ditch_entity_replacement(self):
# This is a UTF-8 document that contains bytestrings
# completely incompatible with UTF-8 (ie. encoded with some other
# encoding).
#
# Since there is no consistent encoding for the document,
# Unicode, Dammit will eventually encode the document as UTF-8
# and encode the incompatible characters as REPLACEMENT
# CHARACTER.
#
# If chardet is installed, it will detect that the document
# can be converted into ISO-8859-1 without errors. This happens
# to be the wrong encoding, but it is a consistent encoding, so the
# code we're testing here won't run.
#
# So we temporarily disable chardet if it's present.
doc = b"""\357\273\277<?xml version="1.0" encoding="UTF-8"?>
<html><b>\330\250\330\252\330\261</b>
<i>\310\322\321\220\312\321\355\344</i></html>"""
chardet = bs4.dammit.chardet_dammit
logging.disable(logging.WARNING)
try:
def noop(str):
return None
bs4.dammit.chardet_dammit = noop
dammit = UnicodeDammit(doc)
assert True == dammit.contains_replacement_characters
assert "\ufffd" in dammit.unicode_markup
soup = BeautifulSoup(doc, "html.parser")
assert soup.contains_replacement_characters
finally:
logging.disable(logging.NOTSET)
bs4.dammit.chardet_dammit = chardet
def test_byte_order_mark_removed(self):
# A document written in UTF-16LE will have its byte order marker stripped.
data = b'\xff\xfe<\x00a\x00>\x00\xe1\x00\xe9\x00<\x00/\x00a\x00>\x00'
dammit = UnicodeDammit(data)
assert "<a>áé</a>" == dammit.unicode_markup
assert "utf-16le" == dammit.original_encoding
def test_known_definite_versus_user_encodings(self):
# The known_definite_encodings are used before sniffing the
# byte-order mark; the user_encodings are used afterwards.
# Here's a document in UTF-16LE.
data = b'\xff\xfe<\x00a\x00>\x00\xe1\x00\xe9\x00<\x00/\x00a\x00>\x00'
dammit = UnicodeDammit(data)
# We can process it as UTF-16 by passing it in as a known
# definite encoding.
before = UnicodeDammit(data, known_definite_encodings=["utf-16"])
assert "utf-16" == before.original_encoding
# If we pass UTF-18 as a user encoding, it's not even
# tried--the encoding sniffed from the byte-order mark takes
# precedence.
after = UnicodeDammit(data, user_encodings=["utf-8"])
assert "utf-16le" == after.original_encoding
assert ["utf-16le"] == [x[0] for x in dammit.tried_encodings]
# Here's a document in ISO-8859-8.
hebrew = b"\xed\xe5\xec\xf9"
dammit = UnicodeDammit(hebrew, known_definite_encodings=["utf-8"],
user_encodings=["iso-8859-8"])
# The known_definite_encodings don't work, BOM sniffing does
# nothing (it only works for a few UTF encodings), but one of
# the user_encodings does work.
assert "iso-8859-8" == dammit.original_encoding
assert ["utf-8", "iso-8859-8"] == [x[0] for x in dammit.tried_encodings]
def test_deprecated_override_encodings(self):
# override_encodings is a deprecated alias for
# known_definite_encodings.
hebrew = b"\xed\xe5\xec\xf9"
dammit = UnicodeDammit(
hebrew,
known_definite_encodings=["shift-jis"],
override_encodings=["utf-8"],
user_encodings=["iso-8859-8"],
)
assert "iso-8859-8" == dammit.original_encoding
# known_definite_encodings and override_encodings were tried
# before user_encodings.
assert ["shift-jis", "utf-8", "iso-8859-8"] == (
[x[0] for x in dammit.tried_encodings]
)
def test_detwingle(self):
# Here's a UTF8 document.
utf8 = ("\N{SNOWMAN}" * 3).encode("utf8")
# Here's a Windows-1252 document.
windows_1252 = (
"\N{LEFT DOUBLE QUOTATION MARK}Hi, I like Windows!"
"\N{RIGHT DOUBLE QUOTATION MARK}").encode("windows_1252")
# Through some unholy alchemy, they've been stuck together.
doc = utf8 + windows_1252 + utf8
# The document can't be turned into UTF-8:
with pytest.raises(UnicodeDecodeError):
doc.decode("utf8")
# Unicode, Dammit thinks the whole document is Windows-1252,
# and decodes it into "☃☃☃“Hi, I like Windows!”☃☃☃"
# But if we run it through fix_embedded_windows_1252, it's fixed:
fixed = UnicodeDammit.detwingle(doc)
assert "☃☃☃“Hi, I like Windows!”☃☃☃" == fixed.decode("utf8")
def test_detwingle_ignores_multibyte_characters(self):
# Each of these characters has a UTF-8 representation ending
# in \x93. \x93 is a smart quote if interpreted as
# Windows-1252. But our code knows to skip over multibyte
# UTF-8 characters, so they'll survive the process unscathed.
for tricky_unicode_char in (
"\N{LATIN SMALL LIGATURE OE}", # 2-byte char '\xc5\x93'
"\N{LATIN SUBSCRIPT SMALL LETTER X}", # 3-byte char '\xe2\x82\x93'
"\xf0\x90\x90\x93", # This is a CJK character, not sure which one.
):
input = tricky_unicode_char.encode("utf8")
assert input.endswith(b'\x93')
output = UnicodeDammit.detwingle(input)
assert output == input
def test_find_declared_encoding(self):
# Test our ability to find a declared encoding inside an
# XML or HTML document.
#
# Even if the document comes in as Unicode, it may be
# interesting to know what encoding was claimed
# originally.
html_unicode = '<html><head><meta charset="utf-8"></head></html>'
html_bytes = html_unicode.encode("ascii")
xml_unicode= '<?xml version="1.0" encoding="ISO-8859-1" ?>'
xml_bytes = xml_unicode.encode("ascii")
m = EncodingDetector.find_declared_encoding
assert m(html_unicode, is_html=False) is None
assert "utf-8" == m(html_unicode, is_html=True)
assert "utf-8" == m(html_bytes, is_html=True)
assert "iso-8859-1" == m(xml_unicode)
assert "iso-8859-1" == m(xml_bytes)
# Normally, only the first few kilobytes of a document are checked for
# an encoding.
spacer = b' ' * 5000
assert m(spacer + html_bytes) is None
assert m(spacer + xml_bytes) is None
# But you can tell find_declared_encoding to search an entire
# HTML document.
assert (
m(spacer + html_bytes, is_html=True, search_entire_document=True)
== "utf-8"
)
# The XML encoding declaration has to be the very first thing
# in the document. We'll allow whitespace before the document
# starts, but nothing else.
assert m(xml_bytes, search_entire_document=True) == "iso-8859-1"
assert m(b' ' + xml_bytes, search_entire_document=True) == "iso-8859-1"
assert m(b'a' + xml_bytes, search_entire_document=True) is None
class TestEntitySubstitution(object):
"""Standalone tests of the EntitySubstitution class."""
def setup_method(self):
self.sub = EntitySubstitution
def test_simple_html_substitution(self):
# Unicode characters corresponding to named HTML entites
# are substituted, and no others.
s = "foo\u2200\N{SNOWMAN}\u00f5bar"
assert self.sub.substitute_html(s) == "foo&forall;\N{SNOWMAN}&otilde;bar"
def test_smart_quote_substitution(self):
# MS smart quotes are a common source of frustration, so we
# give them a special test.
quotes = b"\x91\x92foo\x93\x94"
dammit = UnicodeDammit(quotes)
assert self.sub.substitute_html(dammit.markup) == "&lsquo;&rsquo;foo&ldquo;&rdquo;"
def test_html5_entity(self):
# Some HTML5 entities correspond to single- or multi-character
# Unicode sequences.
for entity, u in (
# A few spot checks of our ability to recognize
# special character sequences and convert them
# to named entities.
('&models;', '\u22a7'),
('&Nfr;', '\U0001d511'),
('&ngeqq;', '\u2267\u0338'),
('&not;', '\xac'),
('&Not;', '\u2aec'),
# We _could_ convert | to &verbarr;, but we don't, because
# | is an ASCII character.
('|' '|'),
# Similarly for the fj ligature, which we could convert to
# &fjlig;, but we don't.
("fj", "fj"),
# We do convert _these_ ASCII characters to HTML entities,
# because that's required to generate valid HTML.
('&gt;', '>'),
('&lt;', '<'),
('&amp;', '&'),
):
template = '3 %s 4'
raw = template % u
with_entities = template % entity
assert self.sub.substitute_html(raw) == with_entities
def test_html5_entity_with_variation_selector(self):
# Some HTML5 entities correspond either to a single-character
# Unicode sequence _or_ to the same character plus U+FE00,
# VARIATION SELECTOR 1. We can handle this.
data = "fjords \u2294 penguins"
markup = "fjords &sqcup; penguins"
assert self.sub.substitute_html(data) == markup
data = "fjords \u2294\ufe00 penguins"
markup = "fjords &sqcups; penguins"
assert self.sub.substitute_html(data) == markup
def test_xml_converstion_includes_no_quotes_if_make_quoted_attribute_is_false(self):
s = 'Welcome to "my bar"'
assert self.sub.substitute_xml(s, False) == s
def test_xml_attribute_quoting_normally_uses_double_quotes(self):
assert self.sub.substitute_xml("Welcome", True) == '"Welcome"'
assert self.sub.substitute_xml("Bob's Bar", True) == '"Bob\'s Bar"'
def test_xml_attribute_quoting_uses_single_quotes_when_value_contains_double_quotes(self):
s = 'Welcome to "my bar"'
assert self.sub.substitute_xml(s, True) == "'Welcome to \"my bar\"'"
def test_xml_attribute_quoting_escapes_single_quotes_when_value_contains_both_single_and_double_quotes(self):
s = 'Welcome to "Bob\'s Bar"'
assert self.sub.substitute_xml(s, True) == '"Welcome to &quot;Bob\'s Bar&quot;"'
def test_xml_quotes_arent_escaped_when_value_is_not_being_quoted(self):
quoted = 'Welcome to "Bob\'s Bar"'
assert self.sub.substitute_xml(quoted) == quoted
def test_xml_quoting_handles_angle_brackets(self):
assert self.sub.substitute_xml("foo<bar>") == "foo&lt;bar&gt;"
def test_xml_quoting_handles_ampersands(self):
assert self.sub.substitute_xml("AT&T") == "AT&amp;T"
def test_xml_quoting_including_ampersands_when_they_are_part_of_an_entity(self):
assert self.sub.substitute_xml("&Aacute;T&T") == "&amp;Aacute;T&amp;T"
def test_xml_quoting_ignoring_ampersands_when_they_are_part_of_an_entity(self):
assert self.sub.substitute_xml_containing_entities("&Aacute;T&T") == "&Aacute;T&amp;T"
def test_quotes_not_html_substituted(self):
"""There's no need to do this except inside attribute values."""
text = 'Bob\'s "bar"'
assert self.sub.substitute_html(text) == text

View File

@@ -0,0 +1,38 @@
"Test harness for doctests."
# TODO: Pretty sure this isn't used and should be deleted.
# pylint: disable-msg=E0611,W0142
__metaclass__ = type
__all__ = [
'additional_tests',
]
import atexit
import doctest
import os
#from pkg_resources import (
# resource_filename, resource_exists, resource_listdir, cleanup_resources)
import unittest
DOCTEST_FLAGS = (
doctest.ELLIPSIS |
doctest.NORMALIZE_WHITESPACE |
doctest.REPORT_NDIFF)
# def additional_tests():
# "Run the doc tests (README.txt and docs/*, if any exist)"
# doctest_files = [
# os.path.abspath(resource_filename('bs4', 'README.txt'))]
# if resource_exists('bs4', 'docs'):
# for name in resource_listdir('bs4', 'docs'):
# if name.endswith('.txt'):
# doctest_files.append(
# os.path.abspath(
# resource_filename('bs4', 'docs/%s' % name)))
# kwargs = dict(module_relative=False, optionflags=DOCTEST_FLAGS)
# atexit.register(cleanup_resources)
# return unittest.TestSuite((
# doctest.DocFileSuite(*doctest_files, **kwargs)))

View File

@@ -0,0 +1,74 @@
"""Tests of classes in element.py.
The really big classes -- Tag, PageElement, and NavigableString --
are tested in separate files.
"""
from bs4.element import (
CharsetMetaAttributeValue,
ContentMetaAttributeValue,
NamespacedAttribute,
)
from . import SoupTest
class TestNamedspacedAttribute(object):
def test_name_may_be_none_or_missing(self):
a = NamespacedAttribute("xmlns", None)
assert a == "xmlns"
a = NamespacedAttribute("xmlns", "")
assert a == "xmlns"
a = NamespacedAttribute("xmlns")
assert a == "xmlns"
def test_namespace_may_be_none_or_missing(self):
a = NamespacedAttribute(None, "tag")
assert a == "tag"
a = NamespacedAttribute("", "tag")
assert a == "tag"
def test_attribute_is_equivalent_to_colon_separated_string(self):
a = NamespacedAttribute("a", "b")
assert "a:b" == a
def test_attributes_are_equivalent_if_prefix_and_name_identical(self):
a = NamespacedAttribute("a", "b", "c")
b = NamespacedAttribute("a", "b", "c")
assert a == b
# The actual namespace is not considered.
c = NamespacedAttribute("a", "b", None)
assert a == c
# But name and prefix are important.
d = NamespacedAttribute("a", "z", "c")
assert a != d
e = NamespacedAttribute("z", "b", "c")
assert a != e
class TestAttributeValueWithCharsetSubstitution(object):
"""Certain attributes are designed to have the charset of the
final document substituted into their value.
"""
def test_content_meta_attribute_value(self):
# The value of a CharsetMetaAttributeValue is whatever
# encoding the string is in.
value = CharsetMetaAttributeValue("euc-jp")
assert "euc-jp" == value
assert "euc-jp" == value.original_value
assert "utf8" == value.encode("utf8")
assert "ascii" == value.encode("ascii")
def test_content_meta_attribute_value(self):
value = ContentMetaAttributeValue("text/html; charset=euc-jp")
assert "text/html; charset=euc-jp" == value
assert "text/html; charset=euc-jp" == value.original_value
assert "text/html; charset=utf8" == value.encode("utf8")
assert "text/html; charset=ascii" == value.encode("ascii")

View File

@@ -0,0 +1,113 @@
import pytest
from bs4.element import Tag
from bs4.formatter import (
Formatter,
HTMLFormatter,
XMLFormatter,
)
from . import SoupTest
class TestFormatter(SoupTest):
def test_default_attributes(self):
# Test the default behavior of Formatter.attributes().
formatter = Formatter()
tag = Tag(name="tag")
tag['b'] = 1
tag['a'] = 2
# Attributes come out sorted by name. In Python 3, attributes
# normally come out of a dictionary in the order they were
# added.
assert [('a', 2), ('b', 1)] == formatter.attributes(tag)
# This works even if Tag.attrs is None, though this shouldn't
# normally happen.
tag.attrs = None
assert [] == formatter.attributes(tag)
assert ' ' == formatter.indent
def test_sort_attributes(self):
# Test the ability to override Formatter.attributes() to,
# e.g., disable the normal sorting of attributes.
class UnsortedFormatter(Formatter):
def attributes(self, tag):
self.called_with = tag
for k, v in sorted(tag.attrs.items()):
if k == 'ignore':
continue
yield k,v
soup = self.soup('<p cval="1" aval="2" ignore="ignored"></p>')
formatter = UnsortedFormatter()
decoded = soup.decode(formatter=formatter)
# attributes() was called on the <p> tag. It filtered out one
# attribute and sorted the other two.
assert formatter.called_with == soup.p
assert '<p aval="2" cval="1"></p>' == decoded
def test_empty_attributes_are_booleans(self):
# Test the behavior of empty_attributes_are_booleans as well
# as which Formatters have it enabled.
for name in ('html', 'minimal', None):
formatter = HTMLFormatter.REGISTRY[name]
assert False == formatter.empty_attributes_are_booleans
formatter = XMLFormatter.REGISTRY[None]
assert False == formatter.empty_attributes_are_booleans
formatter = HTMLFormatter.REGISTRY['html5']
assert True == formatter.empty_attributes_are_booleans
# Verify that the constructor sets the value.
formatter = Formatter(empty_attributes_are_booleans=True)
assert True == formatter.empty_attributes_are_booleans
# Now demonstrate what it does to markup.
for markup in (
"<option selected></option>",
'<option selected=""></option>'
):
soup = self.soup(markup)
for formatter in ('html', 'minimal', 'xml', None):
assert b'<option selected=""></option>' == soup.option.encode(formatter='html')
assert b'<option selected></option>' == soup.option.encode(formatter='html5')
@pytest.mark.parametrize(
"indent,expect",
[
(None, '<a>\n<b>\ntext\n</b>\n</a>'),
(-1, '<a>\n<b>\ntext\n</b>\n</a>'),
(0, '<a>\n<b>\ntext\n</b>\n</a>'),
("", '<a>\n<b>\ntext\n</b>\n</a>'),
(1, '<a>\n <b>\n text\n </b>\n</a>'),
(2, '<a>\n <b>\n text\n </b>\n</a>'),
("\t", '<a>\n\t<b>\n\t\ttext\n\t</b>\n</a>'),
('abc', '<a>\nabc<b>\nabcabctext\nabc</b>\n</a>'),
# Some invalid inputs -- the default behavior is used.
(object(), '<a>\n <b>\n text\n </b>\n</a>'),
(b'bytes', '<a>\n <b>\n text\n </b>\n</a>'),
]
)
def test_indent(self, indent, expect):
# Pretty-print a tree with a Formatter set to
# indent in a certain way and verify the results.
soup = self.soup("<a><b>text</b></a>")
formatter = Formatter(indent=indent)
assert soup.prettify(formatter=formatter) == expect
# Pretty-printing only happens with prettify(), not
# encode().
assert soup.encode(formatter=formatter) != expect
def test_default_indent_value(self):
formatter = Formatter()
assert formatter.indent == ' '

View File

@@ -0,0 +1,223 @@
"""Tests to ensure that the html5lib tree builder generates good trees."""
import warnings
try:
from bs4.builder import HTML5TreeBuilder
HTML5LIB_PRESENT = True
except ImportError as e:
HTML5LIB_PRESENT = False
from bs4.element import SoupStrainer
from . import (
HTML5TreeBuilderSmokeTest,
SoupTest,
skipIf,
)
@skipIf(
not HTML5LIB_PRESENT,
"html5lib seems not to be present, not testing its tree builder.")
class TestHTML5LibBuilder(SoupTest, HTML5TreeBuilderSmokeTest):
"""See ``HTML5TreeBuilderSmokeTest``."""
@property
def default_builder(self):
return HTML5TreeBuilder
def test_soupstrainer(self):
# The html5lib tree builder does not support SoupStrainers.
strainer = SoupStrainer("b")
markup = "<p>A <b>bold</b> statement.</p>"
with warnings.catch_warnings(record=True) as w:
soup = self.soup(markup, parse_only=strainer)
assert soup.decode() == self.document_for(markup)
assert "the html5lib tree builder doesn't support parse_only" in str(w[0].message)
def test_correctly_nested_tables(self):
"""html5lib inserts <tbody> tags where other parsers don't."""
markup = ('<table id="1">'
'<tr>'
"<td>Here's another table:"
'<table id="2">'
'<tr><td>foo</td></tr>'
'</table></td>')
self.assert_soup(
markup,
'<table id="1"><tbody><tr><td>Here\'s another table:'
'<table id="2"><tbody><tr><td>foo</td></tr></tbody></table>'
'</td></tr></tbody></table>')
self.assert_soup(
"<table><thead><tr><td>Foo</td></tr></thead>"
"<tbody><tr><td>Bar</td></tr></tbody>"
"<tfoot><tr><td>Baz</td></tr></tfoot></table>")
def test_xml_declaration_followed_by_doctype(self):
markup = '''<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html>
<html>
<head>
</head>
<body>
<p>foo</p>
</body>
</html>'''
soup = self.soup(markup)
# Verify that we can reach the <p> tag; this means the tree is connected.
assert b"<p>foo</p>" == soup.p.encode()
def test_reparented_markup(self):
markup = '<p><em>foo</p>\n<p>bar<a></a></em></p>'
soup = self.soup(markup)
assert "<body><p><em>foo</em></p><em>\n</em><p><em>bar<a></a></em></p></body>" == soup.body.decode()
assert 2 == len(soup.find_all('p'))
def test_reparented_markup_ends_with_whitespace(self):
markup = '<p><em>foo</p>\n<p>bar<a></a></em></p>\n'
soup = self.soup(markup)
assert "<body><p><em>foo</em></p><em>\n</em><p><em>bar<a></a></em></p>\n</body>" == soup.body.decode()
assert 2 == len(soup.find_all('p'))
def test_reparented_markup_containing_identical_whitespace_nodes(self):
"""Verify that we keep the two whitespace nodes in this
document distinct when reparenting the adjacent <tbody> tags.
"""
markup = '<table> <tbody><tbody><ims></tbody> </table>'
soup = self.soup(markup)
space1, space2 = soup.find_all(string=' ')
tbody1, tbody2 = soup.find_all('tbody')
assert space1.next_element is tbody1
assert tbody2.next_element is space2
def test_reparented_markup_containing_children(self):
markup = '<div><a>aftermath<p><noscript>target</noscript>aftermath</a></p></div>'
soup = self.soup(markup)
noscript = soup.noscript
assert "target" == noscript.next_element
target = soup.find(string='target')
# The 'aftermath' string was duplicated; we want the second one.
final_aftermath = soup.find_all(string='aftermath')[-1]
# The <noscript> tag was moved beneath a copy of the <a> tag,
# but the 'target' string within is still connected to the
# (second) 'aftermath' string.
assert final_aftermath == target.next_element
assert target == final_aftermath.previous_element
def test_processing_instruction(self):
"""Processing instructions become comments."""
markup = b"""<?PITarget PIContent?>"""
soup = self.soup(markup)
assert str(soup).startswith("<!--?PITarget PIContent?-->")
def test_cloned_multivalue_node(self):
markup = b"""<a class="my_class"><p></a>"""
soup = self.soup(markup)
a1, a2 = soup.find_all('a')
assert a1 == a2
assert a1 is not a2
def test_foster_parenting(self):
markup = b"""<table><td></tbody>A"""
soup = self.soup(markup)
assert "<body>A<table><tbody><tr><td></td></tr></tbody></table></body>" == soup.body.decode()
def test_extraction(self):
"""
Test that extraction does not destroy the tree.
https://bugs.launchpad.net/beautifulsoup/+bug/1782928
"""
markup = """
<html><head></head>
<style>
</style><script></script><body><p>hello</p></body></html>
"""
soup = self.soup(markup)
[s.extract() for s in soup('script')]
[s.extract() for s in soup('style')]
assert len(soup.find_all("p")) == 1
def test_empty_comment(self):
"""
Test that empty comment does not break structure.
https://bugs.launchpad.net/beautifulsoup/+bug/1806598
"""
markup = """
<html>
<body>
<form>
<!----><input type="text">
</form>
</body>
</html>
"""
soup = self.soup(markup)
inputs = []
for form in soup.find_all('form'):
inputs.extend(form.find_all('input'))
assert len(inputs) == 1
def test_tracking_line_numbers(self):
# The html.parser TreeBuilder keeps track of line number and
# position of each element.
markup = "\n <p>\n\n<sourceline>\n<b>text</b></sourceline><sourcepos></p>"
soup = self.soup(markup)
assert 2 == soup.p.sourceline
assert 5 == soup.p.sourcepos
assert "sourceline" == soup.p.find('sourceline').name
# You can deactivate this behavior.
soup = self.soup(markup, store_line_numbers=False)
assert "sourceline" == soup.p.sourceline.name
assert "sourcepos" == soup.p.sourcepos.name
def test_special_string_containers(self):
# The html5lib tree builder doesn't support this standard feature,
# because there's no way of knowing, when a string is created,
# where in the tree it will eventually end up.
pass
def test_html5_attributes(self):
# The html5lib TreeBuilder can convert any entity named in
# the HTML5 spec to a sequence of Unicode characters, and
# convert those Unicode characters to a (potentially
# different) named entity on the way out.
#
# This is a copy of the same test from
# HTMLParserTreeBuilderSmokeTest. It's not in the superclass
# because the lxml HTML TreeBuilder _doesn't_ work this way.
for input_element, output_unicode, output_element in (
("&RightArrowLeftArrow;", '\u21c4', b'&rlarr;'),
('&models;', '\u22a7', b'&models;'),
('&Nfr;', '\U0001d511', b'&Nfr;'),
('&ngeqq;', '\u2267\u0338', b'&ngeqq;'),
('&not;', '\xac', b'&not;'),
('&Not;', '\u2aec', b'&Not;'),
('&quot;', '"', b'"'),
('&there4;', '\u2234', b'&there4;'),
('&Therefore;', '\u2234', b'&there4;'),
('&therefore;', '\u2234', b'&there4;'),
("&fjlig;", 'fj', b'fj'),
("&sqcup;", '\u2294', b'&sqcup;'),
("&sqcups;", '\u2294\ufe00', b'&sqcups;'),
("&apos;", "'", b"'"),
("&verbar;", "|", b"|"),
):
markup = '<div>%s</div>' % input_element
div = self.soup(markup).div
without_element = div.encode()
expect = b"<div>%s</div>" % output_unicode.encode("utf8")
assert without_element == expect
with_element = div.encode(formatter="html")
expect = b"<div>%s</div>" % output_element
assert with_element == expect

View File

@@ -0,0 +1,136 @@
"""Tests to ensure that the html.parser tree builder generates good
trees."""
from pdb import set_trace
import pickle
import warnings
from bs4.builder import (
HTMLParserTreeBuilder,
XMLParsedAsHTMLWarning,
)
from bs4.builder._htmlparser import BeautifulSoupHTMLParser
from . import SoupTest, HTMLTreeBuilderSmokeTest
class TestHTMLParserTreeBuilder(SoupTest, HTMLTreeBuilderSmokeTest):
default_builder = HTMLParserTreeBuilder
def test_namespaced_system_doctype(self):
# html.parser can't handle namespaced doctypes, so skip this one.
pass
def test_namespaced_public_doctype(self):
# html.parser can't handle namespaced doctypes, so skip this one.
pass
def test_builder_is_pickled(self):
"""Unlike most tree builders, HTMLParserTreeBuilder and will
be restored after pickling.
"""
tree = self.soup("<a><b>foo</a>")
dumped = pickle.dumps(tree, 2)
loaded = pickle.loads(dumped)
assert isinstance(loaded.builder, type(tree.builder))
def test_redundant_empty_element_closing_tags(self):
self.assert_soup('<br></br><br></br><br></br>', "<br/><br/><br/>")
self.assert_soup('</br></br></br>', "")
def test_empty_element(self):
# This verifies that any buffered data present when the parser
# finishes working is handled.
self.assert_soup("foo &# bar", "foo &amp;# bar")
def test_tracking_line_numbers(self):
# The html.parser TreeBuilder keeps track of line number and
# position of each element.
markup = "\n <p>\n\n<sourceline>\n<b>text</b></sourceline><sourcepos></p>"
soup = self.soup(markup)
assert 2 == soup.p.sourceline
assert 3 == soup.p.sourcepos
assert "sourceline" == soup.p.find('sourceline').name
# You can deactivate this behavior.
soup = self.soup(markup, store_line_numbers=False)
assert "sourceline" == soup.p.sourceline.name
assert "sourcepos" == soup.p.sourcepos.name
def test_on_duplicate_attribute(self):
# The html.parser tree builder has a variety of ways of
# handling a tag that contains the same attribute multiple times.
markup = '<a class="cls" href="url1" href="url2" href="url3" id="id">'
# If you don't provide any particular value for
# on_duplicate_attribute, later values replace earlier values.
soup = self.soup(markup)
assert "url3" == soup.a['href']
assert ["cls"] == soup.a['class']
assert "id" == soup.a['id']
# You can also get this behavior explicitly.
def assert_attribute(on_duplicate_attribute, expected):
soup = self.soup(
markup, on_duplicate_attribute=on_duplicate_attribute
)
assert expected == soup.a['href']
# Verify that non-duplicate attributes are treated normally.
assert ["cls"] == soup.a['class']
assert "id" == soup.a['id']
assert_attribute(None, "url3")
assert_attribute(BeautifulSoupHTMLParser.REPLACE, "url3")
# You can ignore subsequent values in favor of the first.
assert_attribute(BeautifulSoupHTMLParser.IGNORE, "url1")
# And you can pass in a callable that does whatever you want.
def accumulate(attrs, key, value):
if not isinstance(attrs[key], list):
attrs[key] = [attrs[key]]
attrs[key].append(value)
assert_attribute(accumulate, ["url1", "url2", "url3"])
def test_html5_attributes(self):
# The html.parser TreeBuilder can convert any entity named in
# the HTML5 spec to a sequence of Unicode characters, and
# convert those Unicode characters to a (potentially
# different) named entity on the way out.
for input_element, output_unicode, output_element in (
("&RightArrowLeftArrow;", '\u21c4', b'&rlarr;'),
('&models;', '\u22a7', b'&models;'),
('&Nfr;', '\U0001d511', b'&Nfr;'),
('&ngeqq;', '\u2267\u0338', b'&ngeqq;'),
('&not;', '\xac', b'&not;'),
('&Not;', '\u2aec', b'&Not;'),
('&quot;', '"', b'"'),
('&there4;', '\u2234', b'&there4;'),
('&Therefore;', '\u2234', b'&there4;'),
('&therefore;', '\u2234', b'&there4;'),
("&fjlig;", 'fj', b'fj'),
("&sqcup;", '\u2294', b'&sqcup;'),
("&sqcups;", '\u2294\ufe00', b'&sqcups;'),
("&apos;", "'", b"'"),
("&verbar;", "|", b"|"),
):
markup = '<div>%s</div>' % input_element
div = self.soup(markup).div
without_element = div.encode()
expect = b"<div>%s</div>" % output_unicode.encode("utf8")
assert without_element == expect
with_element = div.encode(formatter="html")
expect = b"<div>%s</div>" % output_element
assert with_element == expect
class TestHTMLParserSubclass(SoupTest):
def test_error(self):
"""Verify that our HTMLParser subclass implements error() in a way
that doesn't cause a crash.
"""
parser = BeautifulSoupHTMLParser()
with warnings.catch_warnings(record=True) as warns:
parser.error("don't crash")
[warning] = warns
assert "don't crash" == str(warning.message)

View File

@@ -0,0 +1,199 @@
"""Tests to ensure that the lxml tree builder generates good trees."""
import pickle
import re
import warnings
try:
import lxml.etree
LXML_PRESENT = True
LXML_VERSION = lxml.etree.LXML_VERSION
except ImportError as e:
LXML_PRESENT = False
LXML_VERSION = (0,)
if LXML_PRESENT:
from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML
from bs4 import (
BeautifulSoup,
BeautifulStoneSoup,
)
from bs4.element import Comment, Doctype, SoupStrainer
from . import (
HTMLTreeBuilderSmokeTest,
XMLTreeBuilderSmokeTest,
SoupTest,
skipIf,
)
@skipIf(
not LXML_PRESENT,
"lxml seems not to be present, not testing its tree builder.")
class TestLXMLTreeBuilder(SoupTest, HTMLTreeBuilderSmokeTest):
"""See ``HTMLTreeBuilderSmokeTest``."""
@property
def default_builder(self):
return LXMLTreeBuilder
def test_out_of_range_entity(self):
self.assert_soup(
"<p>foo&#10000000000000;bar</p>", "<p>foobar</p>")
self.assert_soup(
"<p>foo&#x10000000000000;bar</p>", "<p>foobar</p>")
self.assert_soup(
"<p>foo&#1000000000;bar</p>", "<p>foobar</p>")
def test_entities_in_foreign_document_encoding(self):
# We can't implement this case correctly because by the time we
# hear about markup like "&#147;", it's been (incorrectly) converted into
# a string like u'\x93'
pass
# In lxml < 2.3.5, an empty doctype causes a segfault. Skip this
# test if an old version of lxml is installed.
@skipIf(
not LXML_PRESENT or LXML_VERSION < (2,3,5,0),
"Skipping doctype test for old version of lxml to avoid segfault.")
def test_empty_doctype(self):
soup = self.soup("<!DOCTYPE>")
doctype = soup.contents[0]
assert "" == doctype.strip()
def test_beautifulstonesoup_is_xml_parser(self):
# Make sure that the deprecated BSS class uses an xml builder
# if one is installed.
with warnings.catch_warnings(record=True) as w:
soup = BeautifulStoneSoup("<b />")
assert "<b/>" == str(soup.b)
assert "BeautifulStoneSoup class is deprecated" in str(w[0].message)
def test_tracking_line_numbers(self):
# The lxml TreeBuilder cannot keep track of line numbers from
# the original markup. Even if you ask for line numbers, we
# don't have 'em.
#
# This means that if you have a tag like <sourceline> or
# <sourcepos>, attribute access will find it rather than
# giving you a numeric answer.
soup = self.soup(
"\n <p>\n\n<sourceline>\n<b>text</b></sourceline><sourcepos></p>",
store_line_numbers=True
)
assert "sourceline" == soup.p.sourceline.name
assert "sourcepos" == soup.p.sourcepos.name
@skipIf(
not LXML_PRESENT,
"lxml seems not to be present, not testing its XML tree builder.")
class TestLXMLXMLTreeBuilder(SoupTest, XMLTreeBuilderSmokeTest):
"""See ``HTMLTreeBuilderSmokeTest``."""
@property
def default_builder(self):
return LXMLTreeBuilderForXML
def test_namespace_indexing(self):
soup = self.soup(
'<?xml version="1.1"?>\n'
'<root>'
'<tag xmlns="http://unprefixed-namespace.com">content</tag>'
'<prefix:tag2 xmlns:prefix="http://prefixed-namespace.com">content</prefix:tag2>'
'<prefix2:tag3 xmlns:prefix2="http://another-namespace.com">'
'<subtag xmlns="http://another-unprefixed-namespace.com">'
'<subsubtag xmlns="http://yet-another-unprefixed-namespace.com">'
'</prefix2:tag3>'
'</root>'
)
# The BeautifulSoup object includes every namespace prefix
# defined in the entire document. This is the default set of
# namespaces used by soupsieve.
#
# Un-prefixed namespaces are not included, and if a given
# prefix is defined twice, only the first prefix encountered
# in the document shows up here.
assert soup._namespaces == {
'xml': 'http://www.w3.org/XML/1998/namespace',
'prefix': 'http://prefixed-namespace.com',
'prefix2': 'http://another-namespace.com'
}
# A Tag object includes only the namespace prefixes
# that were in scope when it was parsed.
# We do not track un-prefixed namespaces as we can only hold
# one (the first one), and it will be recognized as the
# default namespace by soupsieve, even when operating from a
# tag with a different un-prefixed namespace.
assert soup.tag._namespaces == {
'xml': 'http://www.w3.org/XML/1998/namespace',
}
assert soup.tag2._namespaces == {
'prefix': 'http://prefixed-namespace.com',
'xml': 'http://www.w3.org/XML/1998/namespace',
}
assert soup.subtag._namespaces == {
'prefix2': 'http://another-namespace.com',
'xml': 'http://www.w3.org/XML/1998/namespace',
}
assert soup.subsubtag._namespaces == {
'prefix2': 'http://another-namespace.com',
'xml': 'http://www.w3.org/XML/1998/namespace',
}
def test_namespace_interaction_with_select_and_find(self):
# Demonstrate how namespaces interact with select* and
# find* methods.
soup = self.soup(
'<?xml version="1.1"?>\n'
'<root>'
'<tag xmlns="http://unprefixed-namespace.com">content</tag>'
'<prefix:tag2 xmlns:prefix="http://prefixed-namespace.com">content</tag>'
'<subtag xmlns:prefix="http://another-namespace-same-prefix.com">'
'<prefix:tag3>'
'</subtag>'
'</root>'
)
# soupselect uses namespace URIs.
assert soup.select_one('tag').name == 'tag'
assert soup.select_one('prefix|tag2').name == 'tag2'
# If a prefix is declared more than once, only the first usage
# is registered with the BeautifulSoup object.
assert soup.select_one('prefix|tag3') is None
# But you can always explicitly specify a namespace dictionary.
assert soup.select_one(
'prefix|tag3', namespaces=soup.subtag._namespaces
).name == 'tag3'
# And a Tag (as opposed to the BeautifulSoup object) will
# have a set of default namespaces scoped to that Tag.
assert soup.subtag.select_one('prefix|tag3').name=='tag3'
# the find() methods aren't fully namespace-aware; they just
# look at prefixes.
assert soup.find('tag').name == 'tag'
assert soup.find('prefix:tag2').name == 'tag2'
assert soup.find('prefix:tag3').name == 'tag3'
assert soup.subtag.find('prefix:tag3').name == 'tag3'
def test_pickle_removes_builder(self):
# The lxml TreeBuilder is not picklable, so it won't be
# preserved in a pickle/unpickle operation.
soup = self.soup("<a>some markup</a>")
assert isinstance(soup.builder, self.default_builder)
pickled = pickle.dumps(soup)
unpickled = pickle.loads(pickled)
assert "some markup" == unpickled.a.string
assert unpickled.builder is None

View File

@@ -0,0 +1,144 @@
import pytest
from bs4.element import (
CData,
Comment,
Declaration,
Doctype,
NavigableString,
RubyParenthesisString,
RubyTextString,
Script,
Stylesheet,
TemplateString,
)
from . import SoupTest
class TestNavigableString(SoupTest):
def test_text_acquisition_methods(self):
# These methods are intended for use against Tag, but they
# work on NavigableString as well,
s = NavigableString("fee ")
cdata = CData("fie ")
comment = Comment("foe ")
assert "fee " == s.get_text()
assert "fee" == s.get_text(strip=True)
assert ["fee "] == list(s.strings)
assert ["fee"] == list(s.stripped_strings)
assert ["fee "] == list(s._all_strings())
assert "fie " == cdata.get_text()
assert "fie" == cdata.get_text(strip=True)
assert ["fie "] == list(cdata.strings)
assert ["fie"] == list(cdata.stripped_strings)
assert ["fie "] == list(cdata._all_strings())
# Since a Comment isn't normally considered 'text',
# these methods generally do nothing.
assert "" == comment.get_text()
assert [] == list(comment.strings)
assert [] == list(comment.stripped_strings)
assert [] == list(comment._all_strings())
# Unless you specifically say that comments are okay.
assert "foe" == comment.get_text(strip=True, types=Comment)
assert "foe " == comment.get_text(types=(Comment, NavigableString))
def test_string_has_immutable_name_property(self):
# string.name is defined as None and can't be modified
string = self.soup("s").string
assert None == string.name
with pytest.raises(AttributeError):
string.name = 'foo'
class TestNavigableStringSubclasses(SoupTest):
def test_cdata(self):
# None of the current builders turn CDATA sections into CData
# objects, but you can create them manually.
soup = self.soup("")
cdata = CData("foo")
soup.insert(1, cdata)
assert str(soup) == "<![CDATA[foo]]>"
assert soup.find(string="foo") == "foo"
assert soup.contents[0] == "foo"
def test_cdata_is_never_formatted(self):
"""Text inside a CData object is passed into the formatter.
But the return value is ignored.
"""
self.count = 0
def increment(*args):
self.count += 1
return "BITTER FAILURE"
soup = self.soup("")
cdata = CData("<><><>")
soup.insert(1, cdata)
assert b"<![CDATA[<><><>]]>" == soup.encode(formatter=increment)
assert 1 == self.count
def test_doctype_ends_in_newline(self):
# Unlike other NavigableString subclasses, a DOCTYPE always ends
# in a newline.
doctype = Doctype("foo")
soup = self.soup("")
soup.insert(1, doctype)
assert soup.encode() == b"<!DOCTYPE foo>\n"
def test_declaration(self):
d = Declaration("foo")
assert "<?foo?>" == d.output_ready()
def test_default_string_containers(self):
# In some cases, we use different NavigableString subclasses for
# the same text in different tags.
soup = self.soup(
"<div>text</div><script>text</script><style>text</style>"
)
assert [NavigableString, Script, Stylesheet] == [
x.__class__ for x in soup.find_all(string=True)
]
# The TemplateString is a little unusual because it's generally found
# _inside_ children of a <template> element, not a direct child of the
# <template> element.
soup = self.soup(
"<template>Some text<p>In a tag</p></template>Some text outside"
)
assert all(
isinstance(x, TemplateString)
for x in soup.template._all_strings(types=None)
)
# Once the <template> tag closed, we went back to using
# NavigableString.
outside = soup.template.next_sibling
assert isinstance(outside, NavigableString)
assert not isinstance(outside, TemplateString)
# The TemplateString is also unusual because it can contain
# NavigableString subclasses of _other_ types, such as
# Comment.
markup = b"<template>Some text<p>In a tag</p><!--with a comment--></template>"
soup = self.soup(markup)
assert markup == soup.template.encode("utf8")
def test_ruby_strings(self):
markup = "<ruby>漢 <rp>(</rp><rt>kan</rt><rp>)</rp> 字 <rp>(</rp><rt>ji</rt><rp>)</rp></ruby>"
soup = self.soup(markup)
assert isinstance(soup.rp.string, RubyParenthesisString)
assert isinstance(soup.rt.string, RubyTextString)
# Just as a demo, here's what this means for get_text usage.
assert "漢字" == soup.get_text(strip=True)
assert "漢(kan)字(ji)" == soup.get_text(
strip=True,
types=(NavigableString, RubyTextString, RubyParenthesisString)
)

View File

@@ -0,0 +1,751 @@
"""Tests of the bs4.element.PageElement class"""
import copy
import pickle
import pytest
from soupsieve import SelectorSyntaxError
from bs4 import BeautifulSoup
from bs4.element import (
Comment,
SoupStrainer,
)
from . import SoupTest
class TestEncoding(SoupTest):
"""Test the ability to encode objects into strings."""
def test_unicode_string_can_be_encoded(self):
html = "<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
assert soup.b.string.encode("utf-8") == "\N{SNOWMAN}".encode("utf-8")
def test_tag_containing_unicode_string_can_be_encoded(self):
html = "<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
assert soup.b.encode("utf-8") == html.encode("utf-8")
def test_encoding_substitutes_unrecognized_characters_by_default(self):
html = "<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
assert soup.b.encode("ascii") == b"<b>&#9731;</b>"
def test_encoding_can_be_made_strict(self):
html = "<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
with pytest.raises(UnicodeEncodeError):
soup.encode("ascii", errors="strict")
def test_decode_contents(self):
html = "<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
assert "\N{SNOWMAN}" == soup.b.decode_contents()
def test_encode_contents(self):
html = "<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
assert "\N{SNOWMAN}".encode("utf8") == soup.b.encode_contents(
encoding="utf8"
)
def test_deprecated_renderContents(self):
html = "<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
assert "\N{SNOWMAN}".encode("utf8") == soup.b.renderContents()
def test_repr(self):
html = "<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
assert html == repr(soup)
class TestFormatters(SoupTest):
"""Test the formatting feature, used by methods like decode() and
prettify(), and the formatters themselves.
"""
def test_default_formatter_is_minimal(self):
markup = "<b>&lt;&lt;Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</b>"
soup = self.soup(markup)
decoded = soup.decode(formatter="minimal")
# The < is converted back into &lt; but the e-with-acute is left alone.
assert decoded == self.document_for(
"<b>&lt;&lt;Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</b>"
)
def test_formatter_html(self):
markup = "<br><b>&lt;&lt;Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</b>"
soup = self.soup(markup)
decoded = soup.decode(formatter="html")
assert decoded == self.document_for(
"<br/><b>&lt;&lt;Sacr&eacute; bleu!&gt;&gt;</b>"
)
def test_formatter_html5(self):
markup = "<br><b>&lt;&lt;Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</b>"
soup = self.soup(markup)
decoded = soup.decode(formatter="html5")
assert decoded == self.document_for(
"<br><b>&lt;&lt;Sacr&eacute; bleu!&gt;&gt;</b>"
)
def test_formatter_minimal(self):
markup = "<b>&lt;&lt;Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</b>"
soup = self.soup(markup)
decoded = soup.decode(formatter="minimal")
# The < is converted back into &lt; but the e-with-acute is left alone.
assert decoded == self.document_for(
"<b>&lt;&lt;Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</b>"
)
def test_formatter_null(self):
markup = "<b>&lt;&lt;Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;</b>"
soup = self.soup(markup)
decoded = soup.decode(formatter=None)
# Neither the angle brackets nor the e-with-acute are converted.
# This is not valid HTML, but it's what the user wanted.
assert decoded == self.document_for(
"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
)
def test_formatter_custom(self):
markup = "<b>&lt;foo&gt;</b><b>bar</b><br/>"
soup = self.soup(markup)
decoded = soup.decode(formatter = lambda x: x.upper())
# Instead of normal entity conversion code, the custom
# callable is called on every string.
assert decoded == self.document_for("<b><FOO></b><b>BAR</b><br/>")
def test_formatter_is_run_on_attribute_values(self):
markup = '<a href="http://a.com?a=b&c=é">e</a>'
soup = self.soup(markup)
a = soup.a
expect_minimal = '<a href="http://a.com?a=b&amp;c=é">e</a>'
assert expect_minimal == a.decode()
assert expect_minimal == a.decode(formatter="minimal")
expect_html = '<a href="http://a.com?a=b&amp;c=&eacute;">e</a>'
assert expect_html == a.decode(formatter="html")
assert markup == a.decode(formatter=None)
expect_upper = '<a href="HTTP://A.COM?A=B&C=É">E</a>'
assert expect_upper == a.decode(formatter=lambda x: x.upper())
def test_formatter_skips_script_tag_for_html_documents(self):
doc = """
<script type="text/javascript">
console.log("< < hey > > ");
</script>
"""
encoded = BeautifulSoup(doc, 'html.parser').encode()
assert b"< < hey > >" in encoded
def test_formatter_skips_style_tag_for_html_documents(self):
doc = """
<style type="text/css">
console.log("< < hey > > ");
</style>
"""
encoded = BeautifulSoup(doc, 'html.parser').encode()
assert b"< < hey > >" in encoded
def test_prettify_leaves_preformatted_text_alone(self):
soup = self.soup("<div> foo <pre> \tbar\n \n </pre> baz <textarea> eee\nfff\t</textarea></div>")
# Everything outside the <pre> tag is reformatted, but everything
# inside is left alone.
assert '<div>\n foo\n <pre> \tbar\n \n </pre>\n baz\n <textarea> eee\nfff\t</textarea>\n</div>' == soup.div.prettify()
def test_prettify_accepts_formatter_function(self):
soup = BeautifulSoup("<html><body>foo</body></html>", 'html.parser')
pretty = soup.prettify(formatter = lambda x: x.upper())
assert "FOO" in pretty
def test_prettify_outputs_unicode_by_default(self):
soup = self.soup("<a></a>")
assert str == type(soup.prettify())
def test_prettify_can_encode_data(self):
soup = self.soup("<a></a>")
assert bytes == type(soup.prettify("utf-8"))
def test_html_entity_substitution_off_by_default(self):
markup = "<b>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</b>"
soup = self.soup(markup)
encoded = soup.b.encode("utf-8")
assert encoded == markup.encode('utf-8')
def test_encoding_substitution(self):
# Here's the <meta> tag saying that a document is
# encoded in Shift-JIS.
meta_tag = ('<meta content="text/html; charset=x-sjis" '
'http-equiv="Content-type"/>')
soup = self.soup(meta_tag)
# Parse the document, and the charset apprears unchanged.
assert soup.meta['content'] == 'text/html; charset=x-sjis'
# Encode the document into some encoding, and the encoding is
# substituted into the meta tag.
utf_8 = soup.encode("utf-8")
assert b"charset=utf-8" in utf_8
euc_jp = soup.encode("euc_jp")
assert b"charset=euc_jp" in euc_jp
shift_jis = soup.encode("shift-jis")
assert b"charset=shift-jis" in shift_jis
utf_16_u = soup.encode("utf-16").decode("utf-16")
assert "charset=utf-16" in utf_16_u
def test_encoding_substitution_doesnt_happen_if_tag_is_strained(self):
markup = ('<head><meta content="text/html; charset=x-sjis" '
'http-equiv="Content-type"/></head><pre>foo</pre>')
# Beautiful Soup used to try to rewrite the meta tag even if the
# meta tag got filtered out by the strainer. This test makes
# sure that doesn't happen.
strainer = SoupStrainer('pre')
soup = self.soup(markup, parse_only=strainer)
assert soup.contents[0].name == 'pre'
class TestCSSSelectors(SoupTest):
"""Test basic CSS selector functionality.
This functionality is implemented in soupsieve, which has a much
more comprehensive test suite, so this is basically an extra check
that soupsieve works as expected.
"""
HTML = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<title>The title</title>
<link rel="stylesheet" href="blah.css" type="text/css" id="l1">
</head>
<body>
<custom-dashed-tag class="dashed" id="dash1">Hello there.</custom-dashed-tag>
<div id="main" class="fancy">
<div id="inner">
<h1 id="header1">An H1</h1>
<p>Some text</p>
<p class="onep" id="p1">Some more text</p>
<h2 id="header2">An H2</h2>
<p class="class1 class2 class3" id="pmulti">Another</p>
<a href="http://bob.example.org/" rel="friend met" id="bob">Bob</a>
<h2 id="header3">Another H2</h2>
<a id="me" href="http://simonwillison.net/" rel="me">me</a>
<span class="s1">
<a href="#" id="s1a1">span1a1</a>
<a href="#" id="s1a2">span1a2 <span id="s1a2s1">test</span></a>
<span class="span2">
<a href="#" id="s2a1">span2a1</a>
</span>
<span class="span3"></span>
<custom-dashed-tag class="dashed" id="dash2"/>
<div data-tag="dashedvalue" id="data1"/>
</span>
</div>
<x id="xid">
<z id="zida"/>
<z id="zidab"/>
<z id="zidac"/>
</x>
<y id="yid">
<z id="zidb"/>
</y>
<p lang="en" id="lang-en">English</p>
<p lang="en-gb" id="lang-en-gb">English UK</p>
<p lang="en-us" id="lang-en-us">English US</p>
<p lang="fr" id="lang-fr">French</p>
</div>
<div id="footer">
</div>
"""
def setup_method(self):
self.soup = BeautifulSoup(self.HTML, 'html.parser')
def assert_selects(self, selector, expected_ids, **kwargs):
el_ids = [el['id'] for el in self.soup.select(selector, **kwargs)]
el_ids.sort()
expected_ids.sort()
assert expected_ids == el_ids, "Selector %s, expected [%s], got [%s]" % (
selector, ', '.join(expected_ids), ', '.join(el_ids)
)
assertSelect = assert_selects
def assert_select_multiple(self, *tests):
for selector, expected_ids in tests:
self.assert_selects(selector, expected_ids)
def test_one_tag_one(self):
els = self.soup.select('title')
assert len(els) == 1
assert els[0].name == 'title'
assert els[0].contents == ['The title']
def test_one_tag_many(self):
els = self.soup.select('div')
assert len(els) == 4
for div in els:
assert div.name == 'div'
el = self.soup.select_one('div')
assert 'main' == el['id']
def test_select_one_returns_none_if_no_match(self):
match = self.soup.select_one('nonexistenttag')
assert None == match
def test_tag_in_tag_one(self):
els = self.soup.select('div div')
self.assert_selects('div div', ['inner', 'data1'])
def test_tag_in_tag_many(self):
for selector in ('html div', 'html body div', 'body div'):
self.assert_selects(selector, ['data1', 'main', 'inner', 'footer'])
def test_limit(self):
self.assert_selects('html div', ['main'], limit=1)
self.assert_selects('html body div', ['inner', 'main'], limit=2)
self.assert_selects('body div', ['data1', 'main', 'inner', 'footer'],
limit=10)
def test_tag_no_match(self):
assert len(self.soup.select('del')) == 0
def test_invalid_tag(self):
with pytest.raises(SelectorSyntaxError):
self.soup.select('tag%t')
def test_select_dashed_tag_ids(self):
self.assert_selects('custom-dashed-tag', ['dash1', 'dash2'])
def test_select_dashed_by_id(self):
dashed = self.soup.select('custom-dashed-tag[id=\"dash2\"]')
assert dashed[0].name == 'custom-dashed-tag'
assert dashed[0]['id'] == 'dash2'
def test_dashed_tag_text(self):
assert self.soup.select('body > custom-dashed-tag')[0].text == 'Hello there.'
def test_select_dashed_matches_find_all(self):
assert self.soup.select('custom-dashed-tag') == self.soup.find_all('custom-dashed-tag')
def test_header_tags(self):
self.assert_select_multiple(
('h1', ['header1']),
('h2', ['header2', 'header3']),
)
def test_class_one(self):
for selector in ('.onep', 'p.onep', 'html p.onep'):
els = self.soup.select(selector)
assert len(els) == 1
assert els[0].name == 'p'
assert els[0]['class'] == ['onep']
def test_class_mismatched_tag(self):
els = self.soup.select('div.onep')
assert len(els) == 0
def test_one_id(self):
for selector in ('div#inner', '#inner', 'div div#inner'):
self.assert_selects(selector, ['inner'])
def test_bad_id(self):
els = self.soup.select('#doesnotexist')
assert len(els) == 0
def test_items_in_id(self):
els = self.soup.select('div#inner p')
assert len(els) == 3
for el in els:
assert el.name == 'p'
assert els[1]['class'] == ['onep']
assert not els[0].has_attr('class')
def test_a_bunch_of_emptys(self):
for selector in ('div#main del', 'div#main div.oops', 'div div#main'):
assert len(self.soup.select(selector)) == 0
def test_multi_class_support(self):
for selector in ('.class1', 'p.class1', '.class2', 'p.class2',
'.class3', 'p.class3', 'html p.class2', 'div#inner .class2'):
self.assert_selects(selector, ['pmulti'])
def test_multi_class_selection(self):
for selector in ('.class1.class3', '.class3.class2',
'.class1.class2.class3'):
self.assert_selects(selector, ['pmulti'])
def test_child_selector(self):
self.assert_selects('.s1 > a', ['s1a1', 's1a2'])
self.assert_selects('.s1 > a span', ['s1a2s1'])
def test_child_selector_id(self):
self.assert_selects('.s1 > a#s1a2 span', ['s1a2s1'])
def test_attribute_equals(self):
self.assert_select_multiple(
('p[class="onep"]', ['p1']),
('p[id="p1"]', ['p1']),
('[class="onep"]', ['p1']),
('[id="p1"]', ['p1']),
('link[rel="stylesheet"]', ['l1']),
('link[type="text/css"]', ['l1']),
('link[href="blah.css"]', ['l1']),
('link[href="no-blah.css"]', []),
('[rel="stylesheet"]', ['l1']),
('[type="text/css"]', ['l1']),
('[href="blah.css"]', ['l1']),
('[href="no-blah.css"]', []),
('p[href="no-blah.css"]', []),
('[href="no-blah.css"]', []),
)
def test_attribute_tilde(self):
self.assert_select_multiple(
('p[class~="class1"]', ['pmulti']),
('p[class~="class2"]', ['pmulti']),
('p[class~="class3"]', ['pmulti']),
('[class~="class1"]', ['pmulti']),
('[class~="class2"]', ['pmulti']),
('[class~="class3"]', ['pmulti']),
('a[rel~="friend"]', ['bob']),
('a[rel~="met"]', ['bob']),
('[rel~="friend"]', ['bob']),
('[rel~="met"]', ['bob']),
)
def test_attribute_startswith(self):
self.assert_select_multiple(
('[rel^="style"]', ['l1']),
('link[rel^="style"]', ['l1']),
('notlink[rel^="notstyle"]', []),
('[rel^="notstyle"]', []),
('link[rel^="notstyle"]', []),
('link[href^="bla"]', ['l1']),
('a[href^="http://"]', ['bob', 'me']),
('[href^="http://"]', ['bob', 'me']),
('[id^="p"]', ['pmulti', 'p1']),
('[id^="m"]', ['me', 'main']),
('div[id^="m"]', ['main']),
('a[id^="m"]', ['me']),
('div[data-tag^="dashed"]', ['data1'])
)
def test_attribute_endswith(self):
self.assert_select_multiple(
('[href$=".css"]', ['l1']),
('link[href$=".css"]', ['l1']),
('link[id$="1"]', ['l1']),
('[id$="1"]', ['data1', 'l1', 'p1', 'header1', 's1a1', 's2a1', 's1a2s1', 'dash1']),
('div[id$="1"]', ['data1']),
('[id$="noending"]', []),
)
def test_attribute_contains(self):
self.assert_select_multiple(
# From test_attribute_startswith
('[rel*="style"]', ['l1']),
('link[rel*="style"]', ['l1']),
('notlink[rel*="notstyle"]', []),
('[rel*="notstyle"]', []),
('link[rel*="notstyle"]', []),
('link[href*="bla"]', ['l1']),
('[href*="http://"]', ['bob', 'me']),
('[id*="p"]', ['pmulti', 'p1']),
('div[id*="m"]', ['main']),
('a[id*="m"]', ['me']),
# From test_attribute_endswith
('[href*=".css"]', ['l1']),
('link[href*=".css"]', ['l1']),
('link[id*="1"]', ['l1']),
('[id*="1"]', ['data1', 'l1', 'p1', 'header1', 's1a1', 's1a2', 's2a1', 's1a2s1', 'dash1']),
('div[id*="1"]', ['data1']),
('[id*="noending"]', []),
# New for this test
('[href*="."]', ['bob', 'me', 'l1']),
('a[href*="."]', ['bob', 'me']),
('link[href*="."]', ['l1']),
('div[id*="n"]', ['main', 'inner']),
('div[id*="nn"]', ['inner']),
('div[data-tag*="edval"]', ['data1'])
)
def test_attribute_exact_or_hypen(self):
self.assert_select_multiple(
('p[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']),
('[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']),
('p[lang|="fr"]', ['lang-fr']),
('p[lang|="gb"]', []),
)
def test_attribute_exists(self):
self.assert_select_multiple(
('[rel]', ['l1', 'bob', 'me']),
('link[rel]', ['l1']),
('a[rel]', ['bob', 'me']),
('[lang]', ['lang-en', 'lang-en-gb', 'lang-en-us', 'lang-fr']),
('p[class]', ['p1', 'pmulti']),
('[blah]', []),
('p[blah]', []),
('div[data-tag]', ['data1'])
)
def test_quoted_space_in_selector_name(self):
html = """<div style="display: wrong">nope</div>
<div style="display: right">yes</div>
"""
soup = BeautifulSoup(html, 'html.parser')
[chosen] = soup.select('div[style="display: right"]')
assert "yes" == chosen.string
def test_unsupported_pseudoclass(self):
with pytest.raises(NotImplementedError):
self.soup.select("a:no-such-pseudoclass")
with pytest.raises(SelectorSyntaxError):
self.soup.select("a:nth-of-type(a)")
def test_nth_of_type(self):
# Try to select first paragraph
els = self.soup.select('div#inner p:nth-of-type(1)')
assert len(els) == 1
assert els[0].string == 'Some text'
# Try to select third paragraph
els = self.soup.select('div#inner p:nth-of-type(3)')
assert len(els) == 1
assert els[0].string == 'Another'
# Try to select (non-existent!) fourth paragraph
els = self.soup.select('div#inner p:nth-of-type(4)')
assert len(els) == 0
# Zero will select no tags.
els = self.soup.select('div p:nth-of-type(0)')
assert len(els) == 0
def test_nth_of_type_direct_descendant(self):
els = self.soup.select('div#inner > p:nth-of-type(1)')
assert len(els) == 1
assert els[0].string == 'Some text'
def test_id_child_selector_nth_of_type(self):
self.assert_selects('#inner > p:nth-of-type(2)', ['p1'])
def test_select_on_element(self):
# Other tests operate on the tree; this operates on an element
# within the tree.
inner = self.soup.find("div", id="main")
selected = inner.select("div")
# The <div id="inner"> tag was selected. The <div id="footer">
# tag was not.
self.assert_selects_ids(selected, ['inner', 'data1'])
def test_overspecified_child_id(self):
self.assert_selects(".fancy #inner", ['inner'])
self.assert_selects(".normal #inner", [])
def test_adjacent_sibling_selector(self):
self.assert_selects('#p1 + h2', ['header2'])
self.assert_selects('#p1 + h2 + p', ['pmulti'])
self.assert_selects('#p1 + #header2 + .class1', ['pmulti'])
assert [] == self.soup.select('#p1 + p')
def test_general_sibling_selector(self):
self.assert_selects('#p1 ~ h2', ['header2', 'header3'])
self.assert_selects('#p1 ~ #header2', ['header2'])
self.assert_selects('#p1 ~ h2 + a', ['me'])
self.assert_selects('#p1 ~ h2 + [rel="me"]', ['me'])
assert [] == self.soup.select('#inner ~ h2')
def test_dangling_combinator(self):
with pytest.raises(SelectorSyntaxError):
self.soup.select('h1 >')
def test_sibling_combinator_wont_select_same_tag_twice(self):
self.assert_selects('p[lang] ~ p', ['lang-en-gb', 'lang-en-us', 'lang-fr'])
# Test the selector grouping operator (the comma)
def test_multiple_select(self):
self.assert_selects('x, y', ['xid', 'yid'])
def test_multiple_select_with_no_space(self):
self.assert_selects('x,y', ['xid', 'yid'])
def test_multiple_select_with_more_space(self):
self.assert_selects('x, y', ['xid', 'yid'])
def test_multiple_select_duplicated(self):
self.assert_selects('x, x', ['xid'])
def test_multiple_select_sibling(self):
self.assert_selects('x, y ~ p[lang=fr]', ['xid', 'lang-fr'])
def test_multiple_select_tag_and_direct_descendant(self):
self.assert_selects('x, y > z', ['xid', 'zidb'])
def test_multiple_select_direct_descendant_and_tags(self):
self.assert_selects('div > x, y, z', ['xid', 'yid', 'zida', 'zidb', 'zidab', 'zidac'])
def test_multiple_select_indirect_descendant(self):
self.assert_selects('div x,y, z', ['xid', 'yid', 'zida', 'zidb', 'zidab', 'zidac'])
def test_invalid_multiple_select(self):
with pytest.raises(SelectorSyntaxError):
self.soup.select(',x, y')
with pytest.raises(SelectorSyntaxError):
self.soup.select('x,,y')
def test_multiple_select_attrs(self):
self.assert_selects('p[lang=en], p[lang=en-gb]', ['lang-en', 'lang-en-gb'])
def test_multiple_select_ids(self):
self.assert_selects('x, y > z[id=zida], z[id=zidab], z[id=zidb]', ['xid', 'zidb', 'zidab'])
def test_multiple_select_nested(self):
self.assert_selects('body > div > x, y > z', ['xid', 'zidb'])
def test_select_duplicate_elements(self):
# When markup contains duplicate elements, a multiple select
# will find all of them.
markup = '<div class="c1"/><div class="c2"/><div class="c1"/>'
soup = BeautifulSoup(markup, 'html.parser')
selected = soup.select(".c1, .c2")
assert 3 == len(selected)
# Verify that find_all finds the same elements, though because
# of an implementation detail it finds them in a different
# order.
for element in soup.find_all(class_=['c1', 'c2']):
assert element in selected
class TestPersistence(SoupTest):
"Testing features like pickle and deepcopy."
def setup_method(self):
self.page = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"
"http://www.w3.org/TR/REC-html40/transitional.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Beautiful Soup: We called him Tortoise because he taught us.</title>
<link rev="made" href="mailto:leonardr@segfault.org">
<meta name="Description" content="Beautiful Soup: an HTML parser optimized for screen-scraping.">
<meta name="generator" content="Markov Approximation 1.4 (module: leonardr)">
<meta name="author" content="Leonard Richardson">
</head>
<body>
<a href="foo">foo</a>
<a href="foo"><b>bar</b></a>
</body>
</html>"""
self.tree = self.soup(self.page)
def test_pickle_and_unpickle_identity(self):
# Pickling a tree, then unpickling it, yields a tree identical
# to the original.
dumped = pickle.dumps(self.tree, 2)
loaded = pickle.loads(dumped)
assert loaded.__class__ == BeautifulSoup
assert loaded.decode() == self.tree.decode()
def test_deepcopy_identity(self):
# Making a deepcopy of a tree yields an identical tree.
copied = copy.deepcopy(self.tree)
assert copied.decode() == self.tree.decode()
def test_copy_preserves_encoding(self):
soup = BeautifulSoup(b'<p>&nbsp;</p>', 'html.parser')
encoding = soup.original_encoding
copy = soup.__copy__()
assert "<p> </p>" == str(copy)
assert encoding == copy.original_encoding
def test_copy_preserves_builder_information(self):
tag = self.soup('<p></p>').p
# Simulate a tag obtained from a source file.
tag.sourceline = 10
tag.sourcepos = 33
copied = tag.__copy__()
# The TreeBuilder object is no longer availble, but information
# obtained from it gets copied over to the new Tag object.
assert tag.sourceline == copied.sourceline
assert tag.sourcepos == copied.sourcepos
assert tag.can_be_empty_element == copied.can_be_empty_element
assert tag.cdata_list_attributes == copied.cdata_list_attributes
assert tag.preserve_whitespace_tags == copied.preserve_whitespace_tags
def test_unicode_pickle(self):
# A tree containing Unicode characters can be pickled.
html = "<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
dumped = pickle.dumps(soup, pickle.HIGHEST_PROTOCOL)
loaded = pickle.loads(dumped)
assert loaded.decode() == soup.decode()
def test_copy_navigablestring_is_not_attached_to_tree(self):
html = "<b>Foo<a></a></b><b>Bar</b>"
soup = self.soup(html)
s1 = soup.find(string="Foo")
s2 = copy.copy(s1)
assert s1 == s2
assert None == s2.parent
assert None == s2.next_element
assert None != s1.next_sibling
assert None == s2.next_sibling
assert None == s2.previous_element
def test_copy_navigablestring_subclass_has_same_type(self):
html = "<b><!--Foo--></b>"
soup = self.soup(html)
s1 = soup.string
s2 = copy.copy(s1)
assert s1 == s2
assert isinstance(s2, Comment)
def test_copy_entire_soup(self):
html = "<div><b>Foo<a></a></b><b>Bar</b></div>end"
soup = self.soup(html)
soup_copy = copy.copy(soup)
assert soup == soup_copy
def test_copy_tag_copies_contents(self):
html = "<div><b>Foo<a></a></b><b>Bar</b></div>end"
soup = self.soup(html)
div = soup.div
div_copy = copy.copy(div)
# The two tags look the same, and evaluate to equal.
assert str(div) == str(div_copy)
assert div == div_copy
# But they're not the same object.
assert div is not div_copy
# And they don't have the same relation to the parse tree. The
# copy is not associated with a parse tree at all.
assert None == div_copy.parent
assert None == div_copy.previous_element
assert None == div_copy.find(string='Bar').next_element
assert None != div.find(string='Bar').next_element

View File

@@ -0,0 +1,462 @@
# -*- coding: utf-8 -*-
"""Tests of Beautiful Soup as a whole."""
from pdb import set_trace
import logging
import os
import pickle
import pytest
import sys
import tempfile
from bs4 import (
BeautifulSoup,
BeautifulStoneSoup,
GuessedAtParserWarning,
MarkupResemblesLocatorWarning,
dammit,
)
from bs4.builder import (
builder_registry,
TreeBuilder,
ParserRejectedMarkup,
)
from bs4.element import (
Comment,
SoupStrainer,
Tag,
NavigableString,
)
from . import (
default_builder,
SoupTest,
skipIf,
)
import warnings
try:
from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML
LXML_PRESENT = True
except ImportError as e:
LXML_PRESENT = False
PYTHON_3_PRE_3_2 = (sys.version_info[0] == 3 and sys.version_info < (3,2))
class TestConstructor(SoupTest):
def test_short_unicode_input(self):
data = "<h1>éé</h1>"
soup = self.soup(data)
assert "éé" == soup.h1.string
def test_embedded_null(self):
data = "<h1>foo\0bar</h1>"
soup = self.soup(data)
assert "foo\0bar" == soup.h1.string
def test_exclude_encodings(self):
utf8_data = "Räksmörgås".encode("utf-8")
soup = self.soup(utf8_data, exclude_encodings=["utf-8"])
assert "windows-1252" == soup.original_encoding
def test_custom_builder_class(self):
# Verify that you can pass in a custom Builder class and
# it'll be instantiated with the appropriate keyword arguments.
class Mock(object):
def __init__(self, **kwargs):
self.called_with = kwargs
self.is_xml = True
self.store_line_numbers = False
self.cdata_list_attributes = []
self.preserve_whitespace_tags = []
self.string_containers = {}
def initialize_soup(self, soup):
pass
def feed(self, markup):
self.fed = markup
def reset(self):
pass
def ignore(self, ignore):
pass
set_up_substitutions = can_be_empty_element = ignore
def prepare_markup(self, *args, **kwargs):
yield "prepared markup", "original encoding", "declared encoding", "contains replacement characters"
kwargs = dict(
var="value",
# This is a deprecated BS3-era keyword argument, which
# will be stripped out.
convertEntities=True,
)
with warnings.catch_warnings(record=True):
soup = BeautifulSoup('', builder=Mock, **kwargs)
assert isinstance(soup.builder, Mock)
assert dict(var="value") == soup.builder.called_with
assert "prepared markup" == soup.builder.fed
# You can also instantiate the TreeBuilder yourself. In this
# case, that specific object is used and any keyword arguments
# to the BeautifulSoup constructor are ignored.
builder = Mock(**kwargs)
with warnings.catch_warnings(record=True) as w:
soup = BeautifulSoup(
'', builder=builder, ignored_value=True,
)
msg = str(w[0].message)
assert msg.startswith("Keyword arguments to the BeautifulSoup constructor will be ignored.")
assert builder == soup.builder
assert kwargs == builder.called_with
def test_parser_markup_rejection(self):
# If markup is completely rejected by the parser, an
# explanatory ParserRejectedMarkup exception is raised.
class Mock(TreeBuilder):
def feed(self, *args, **kwargs):
raise ParserRejectedMarkup("Nope.")
def prepare_markup(self, *args, **kwargs):
# We're going to try two different ways of preparing this markup,
# but feed() will reject both of them.
yield markup, None, None, False
yield markup, None, None, False
import re
with pytest.raises(ParserRejectedMarkup) as exc_info:
BeautifulSoup('', builder=Mock)
assert "The markup you provided was rejected by the parser. Trying a different parser or a different encoding may help." in str(exc_info.value)
def test_cdata_list_attributes(self):
# Most attribute values are represented as scalars, but the
# HTML standard says that some attributes, like 'class' have
# space-separated lists as values.
markup = '<a id=" an id " class=" a class "></a>'
soup = self.soup(markup)
# Note that the spaces are stripped for 'class' but not for 'id'.
a = soup.a
assert " an id " == a['id']
assert ["a", "class"] == a['class']
# TreeBuilder takes an argument called 'mutli_valued_attributes' which lets
# you customize or disable this. As always, you can customize the TreeBuilder
# by passing in a keyword argument to the BeautifulSoup constructor.
soup = self.soup(markup, builder=default_builder, multi_valued_attributes=None)
assert " a class " == soup.a['class']
# Here are two ways of saying that `id` is a multi-valued
# attribute in this context, but 'class' is not.
for switcheroo in ({'*': 'id'}, {'a': 'id'}):
with warnings.catch_warnings(record=True) as w:
# This will create a warning about not explicitly
# specifying a parser, but we'll ignore it.
soup = self.soup(markup, builder=None, multi_valued_attributes=switcheroo)
a = soup.a
assert ["an", "id"] == a['id']
assert " a class " == a['class']
def test_replacement_classes(self):
# Test the ability to pass in replacements for element classes
# which will be used when building the tree.
class TagPlus(Tag):
pass
class StringPlus(NavigableString):
pass
class CommentPlus(Comment):
pass
soup = self.soup(
"<a><b>foo</b>bar</a><!--whee-->",
element_classes = {
Tag: TagPlus,
NavigableString: StringPlus,
Comment: CommentPlus,
}
)
# The tree was built with TagPlus, StringPlus, and CommentPlus objects,
# rather than Tag, String, and Comment objects.
assert all(
isinstance(x, (TagPlus, StringPlus, CommentPlus))
for x in soup.recursiveChildGenerator()
)
def test_alternate_string_containers(self):
# Test the ability to customize the string containers for
# different types of tags.
class PString(NavigableString):
pass
class BString(NavigableString):
pass
soup = self.soup(
"<div>Hello.<p>Here is <b>some <i>bolded</i></b> text",
string_containers = {
'b': BString,
'p': PString,
}
)
# The string before the <p> tag is a regular NavigableString.
assert isinstance(soup.div.contents[0], NavigableString)
# The string inside the <p> tag, but not inside the <i> tag,
# is a PString.
assert isinstance(soup.p.contents[0], PString)
# Every string inside the <b> tag is a BString, even the one that
# was also inside an <i> tag.
for s in soup.b.strings:
assert isinstance(s, BString)
# Now that parsing was complete, the string_container_stack
# (where this information was kept) has been cleared out.
assert [] == soup.string_container_stack
class TestWarnings(SoupTest):
def _assert_warning(self, warnings, cls):
for w in warnings:
if isinstance(w.message, cls):
return w
raise Exception("%s warning not found in %r" % (cls, warnings))
def _assert_no_parser_specified(self, w):
warning = self._assert_warning(w, GuessedAtParserWarning)
message = str(warning.message)
assert message.startswith(BeautifulSoup.NO_PARSER_SPECIFIED_WARNING[:60])
def test_warning_if_no_parser_specified(self):
with warnings.catch_warnings(record=True) as w:
soup = BeautifulSoup("<a><b></b></a>")
self._assert_no_parser_specified(w)
def test_warning_if_parser_specified_too_vague(self):
with warnings.catch_warnings(record=True) as w:
soup = BeautifulSoup("<a><b></b></a>", "html")
self._assert_no_parser_specified(w)
def test_no_warning_if_explicit_parser_specified(self):
with warnings.catch_warnings(record=True) as w:
soup = BeautifulSoup("<a><b></b></a>", "html.parser")
assert [] == w
def test_parseOnlyThese_renamed_to_parse_only(self):
with warnings.catch_warnings(record=True) as w:
soup = self.soup("<a><b></b></a>", parseOnlyThese=SoupStrainer("b"))
msg = str(w[0].message)
assert "parseOnlyThese" in msg
assert "parse_only" in msg
assert b"<b></b>" == soup.encode()
def test_fromEncoding_renamed_to_from_encoding(self):
with warnings.catch_warnings(record=True) as w:
utf8 = b"\xc3\xa9"
soup = self.soup(utf8, fromEncoding="utf8")
msg = str(w[0].message)
assert "fromEncoding" in msg
assert "from_encoding" in msg
assert "utf8" == soup.original_encoding
def test_unrecognized_keyword_argument(self):
with pytest.raises(TypeError):
self.soup("<a>", no_such_argument=True)
@pytest.mark.parametrize(
"extension",
['markup.html', 'markup.htm', 'markup.HTML', 'markup.txt',
'markup.xhtml', 'markup.xml', "/home/user/file", "c:\\user\file"]
)
def test_resembles_filename_warning(self, extension):
# A warning is issued if the "markup" looks like the name of
# an HTML or text file, or a full path to a file on disk.
with warnings.catch_warnings(record=True) as w:
soup = self.soup("markup" + extension)
warning = self._assert_warning(w, MarkupResemblesLocatorWarning)
assert "looks more like a filename" in str(warning.message)
@pytest.mark.parametrize(
"extension",
['markuphtml', 'markup.com', '', 'markup.js']
)
def test_resembles_filename_no_warning(self, extension):
# The 'looks more like a filename' warning is not issued if
# the markup looks like a bare string, a domain name, or a
# file that's not an HTML file.
with warnings.catch_warnings(record=True) as w:
soup = self.soup("markup" + extension)
assert [] == w
def test_url_warning_with_bytes_url(self):
url = b"http://www.crummybytes.com/"
with warnings.catch_warnings(record=True) as warning_list:
soup = self.soup(url)
warning = self._assert_warning(
warning_list, MarkupResemblesLocatorWarning
)
assert "looks more like a URL" in str(warning.message)
assert url not in str(warning.message).encode("utf8")
def test_url_warning_with_unicode_url(self):
url = "http://www.crummyunicode.com/"
with warnings.catch_warnings(record=True) as warning_list:
# note - this url must differ from the bytes one otherwise
# python's warnings system swallows the second warning
soup = self.soup(url)
warning = self._assert_warning(
warning_list, MarkupResemblesLocatorWarning
)
assert "looks more like a URL" in str(warning.message)
assert url not in str(warning.message)
def test_url_warning_with_bytes_and_space(self):
# Here the markup contains something besides a URL, so no warning
# is issued.
with warnings.catch_warnings(record=True) as warning_list:
soup = self.soup(b"http://www.crummybytes.com/ is great")
assert not any("looks more like a URL" in str(w.message)
for w in warning_list)
def test_url_warning_with_unicode_and_space(self):
with warnings.catch_warnings(record=True) as warning_list:
soup = self.soup("http://www.crummyunicode.com/ is great")
assert not any("looks more like a URL" in str(w.message)
for w in warning_list)
class TestSelectiveParsing(SoupTest):
def test_parse_with_soupstrainer(self):
markup = "No<b>Yes</b><a>No<b>Yes <c>Yes</c></b>"
strainer = SoupStrainer("b")
soup = self.soup(markup, parse_only=strainer)
assert soup.encode() == b"<b>Yes</b><b>Yes <c>Yes</c></b>"
class TestNewTag(SoupTest):
"""Test the BeautifulSoup.new_tag() method."""
def test_new_tag(self):
soup = self.soup("")
new_tag = soup.new_tag("foo", bar="baz", attrs={"name": "a name"})
assert isinstance(new_tag, Tag)
assert "foo" == new_tag.name
assert dict(bar="baz", name="a name") == new_tag.attrs
assert None == new_tag.parent
def test_tag_inherits_self_closing_rules_from_builder(self):
if LXML_PRESENT:
xml_soup = BeautifulSoup("", "lxml-xml")
xml_br = xml_soup.new_tag("br")
xml_p = xml_soup.new_tag("p")
# Both the <br> and <p> tag are empty-element, just because
# they have no contents.
assert b"<br/>" == xml_br.encode()
assert b"<p/>" == xml_p.encode()
html_soup = BeautifulSoup("", "html.parser")
html_br = html_soup.new_tag("br")
html_p = html_soup.new_tag("p")
# The HTML builder users HTML's rules about which tags are
# empty-element tags, and the new tags reflect these rules.
assert b"<br/>" == html_br.encode()
assert b"<p></p>" == html_p.encode()
class TestNewString(SoupTest):
"""Test the BeautifulSoup.new_string() method."""
def test_new_string_creates_navigablestring(self):
soup = self.soup("")
s = soup.new_string("foo")
assert "foo" == s
assert isinstance(s, NavigableString)
def test_new_string_can_create_navigablestring_subclass(self):
soup = self.soup("")
s = soup.new_string("foo", Comment)
assert "foo" == s
assert isinstance(s, Comment)
class TestPickle(SoupTest):
# Test our ability to pickle the BeautifulSoup object itself.
def test_normal_pickle(self):
soup = self.soup("<a>some markup</a>")
pickled = pickle.dumps(soup)
unpickled = pickle.loads(pickled)
assert "some markup" == unpickled.a.string
def test_pickle_with_no_builder(self):
# We had a bug that prevented pickling from working if
# the builder wasn't set.
soup = self.soup("some markup")
soup.builder = None
pickled = pickle.dumps(soup)
unpickled = pickle.loads(pickled)
assert "some markup" == unpickled.string
class TestEncodingConversion(SoupTest):
# Test Beautiful Soup's ability to decode and encode from various
# encodings.
def setup_method(self):
self.unicode_data = '<html><head><meta charset="utf-8"/></head><body><foo>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</foo></body></html>'
self.utf8_data = self.unicode_data.encode("utf-8")
# Just so you know what it looks like.
assert self.utf8_data == b'<html><head><meta charset="utf-8"/></head><body><foo>Sacr\xc3\xa9 bleu!</foo></body></html>'
def test_ascii_in_unicode_out(self):
# ASCII input is converted to Unicode. The original_encoding
# attribute is set to 'utf-8', a superset of ASCII.
chardet = dammit.chardet_dammit
logging.disable(logging.WARNING)
try:
def noop(str):
return None
# Disable chardet, which will realize that the ASCII is ASCII.
dammit.chardet_dammit = noop
ascii = b"<foo>a</foo>"
soup_from_ascii = self.soup(ascii)
unicode_output = soup_from_ascii.decode()
assert isinstance(unicode_output, str)
assert unicode_output == self.document_for(ascii.decode())
assert soup_from_ascii.original_encoding.lower() == "utf-8"
finally:
logging.disable(logging.NOTSET)
dammit.chardet_dammit = chardet
def test_unicode_in_unicode_out(self):
# Unicode input is left alone. The original_encoding attribute
# is not set.
soup_from_unicode = self.soup(self.unicode_data)
assert soup_from_unicode.decode() == self.unicode_data
assert soup_from_unicode.foo.string == 'Sacr\xe9 bleu!'
assert soup_from_unicode.original_encoding == None
def test_utf8_in_unicode_out(self):
# UTF-8 input is converted to Unicode. The original_encoding
# attribute is set.
soup_from_utf8 = self.soup(self.utf8_data)
assert soup_from_utf8.decode() == self.unicode_data
assert soup_from_utf8.foo.string == 'Sacr\xe9 bleu!'
def test_utf8_out(self):
# The internal data structures can be encoded as UTF-8.
soup_from_unicode = self.soup(self.unicode_data)
assert soup_from_unicode.encode('utf-8') == self.utf8_data
@skipIf(
PYTHON_3_PRE_3_2,
"Bad HTMLParser detected; skipping test of non-ASCII characters in attribute name.")
def test_attribute_name_containing_unicode_characters(self):
markup = '<div><a \N{SNOWMAN}="snowman"></a></div>'
assert self.soup(markup).div.encode("utf8") == markup.encode("utf8")

View File

@@ -0,0 +1,221 @@
import warnings
from bs4.element import (
Comment,
NavigableString,
)
from . import SoupTest
class TestTag(SoupTest):
"""Test various methods of Tag which aren't so complicated they
need their own classes.
"""
def test__should_pretty_print(self):
# Test the rules about when a tag should be pretty-printed.
tag = self.soup("").new_tag("a_tag")
# No list of whitespace-preserving tags -> pretty-print
tag._preserve_whitespace_tags = None
assert True == tag._should_pretty_print(0)
# List exists but tag is not on the list -> pretty-print
tag.preserve_whitespace_tags = ["some_other_tag"]
assert True == tag._should_pretty_print(1)
# Indent level is None -> don't pretty-print
assert False == tag._should_pretty_print(None)
# Tag is on the whitespace-preserving list -> don't pretty-print
tag.preserve_whitespace_tags = ["some_other_tag", "a_tag"]
assert False == tag._should_pretty_print(1)
def test_len(self):
"""The length of a Tag is its number of children."""
soup = self.soup("<top>1<b>2</b>3</top>")
# The BeautifulSoup object itself contains one element: the
# <top> tag.
assert len(soup.contents) == 1
assert len(soup) == 1
# The <top> tag contains three elements: the text node "1", the
# <b> tag, and the text node "3".
assert len(soup.top) == 3
assert len(soup.top.contents) == 3
def test_member_access_invokes_find(self):
"""Accessing a Python member .foo invokes find('foo')"""
soup = self.soup('<b><i></i></b>')
assert soup.b == soup.find('b')
assert soup.b.i == soup.find('b').find('i')
assert soup.a == None
def test_deprecated_member_access(self):
soup = self.soup('<b><i></i></b>')
with warnings.catch_warnings(record=True) as w:
tag = soup.bTag
assert soup.b == tag
assert '.bTag is deprecated, use .find("b") instead. If you really were looking for a tag called bTag, use .find("bTag")' == str(w[0].message)
def test_has_attr(self):
"""has_attr() checks for the presence of an attribute.
Please note note: has_attr() is different from
__in__. has_attr() checks the tag's attributes and __in__
checks the tag's chidlren.
"""
soup = self.soup("<foo attr='bar'>")
assert soup.foo.has_attr('attr')
assert not soup.foo.has_attr('attr2')
def test_attributes_come_out_in_alphabetical_order(self):
markup = '<b a="1" z="5" m="3" f="2" y="4"></b>'
self.assertSoupEquals(markup, '<b a="1" f="2" m="3" y="4" z="5"></b>')
def test_string(self):
# A Tag that contains only a text node makes that node
# available as .string.
soup = self.soup("<b>foo</b>")
assert soup.b.string == 'foo'
def test_empty_tag_has_no_string(self):
# A Tag with no children has no .stirng.
soup = self.soup("<b></b>")
assert soup.b.string == None
def test_tag_with_multiple_children_has_no_string(self):
# A Tag with no children has no .string.
soup = self.soup("<a>foo<b></b><b></b></b>")
assert soup.b.string == None
soup = self.soup("<a>foo<b></b>bar</b>")
assert soup.b.string == None
# Even if all the children are strings, due to trickery,
# it won't work--but this would be a good optimization.
soup = self.soup("<a>foo</b>")
soup.a.insert(1, "bar")
assert soup.a.string == None
def test_tag_with_recursive_string_has_string(self):
# A Tag with a single child which has a .string inherits that
# .string.
soup = self.soup("<a><b>foo</b></a>")
assert soup.a.string == "foo"
assert soup.string == "foo"
def test_lack_of_string(self):
"""Only a Tag containing a single text node has a .string."""
soup = self.soup("<b>f<i>e</i>o</b>")
assert soup.b.string is None
soup = self.soup("<b></b>")
assert soup.b.string is None
def test_all_text(self):
"""Tag.text and Tag.get_text(sep=u"") -> all child text, concatenated"""
soup = self.soup("<a>a<b>r</b> <r> t </r></a>")
assert soup.a.text == "ar t "
assert soup.a.get_text(strip=True) == "art"
assert soup.a.get_text(",") == "a,r, , t "
assert soup.a.get_text(",", strip=True) == "a,r,t"
def test_get_text_ignores_special_string_containers(self):
soup = self.soup("foo<!--IGNORE-->bar")
assert soup.get_text() == "foobar"
assert soup.get_text(types=(NavigableString, Comment)) == "fooIGNOREbar"
assert soup.get_text(types=None) == "fooIGNOREbar"
soup = self.soup("foo<style>CSS</style><script>Javascript</script>bar")
assert soup.get_text() == "foobar"
def test_all_strings_ignores_special_string_containers(self):
soup = self.soup("foo<!--IGNORE-->bar")
assert ['foo', 'bar'] == list(soup.strings)
soup = self.soup("foo<style>CSS</style><script>Javascript</script>bar")
assert ['foo', 'bar'] == list(soup.strings)
def test_string_methods_inside_special_string_container_tags(self):
# Strings inside tags like <script> are generally ignored by
# methods like get_text, because they're not what humans
# consider 'text'. But if you call get_text on the <script>
# tag itself, those strings _are_ considered to be 'text',
# because there's nothing else you might be looking for.
style = self.soup("<div>a<style>Some CSS</style></div>")
template = self.soup("<div>a<template><p>Templated <b>text</b>.</p><!--With a comment.--></template></div>")
script = self.soup("<div>a<script><!--a comment-->Some text</script></div>")
assert style.div.get_text() == "a"
assert list(style.div.strings) == ["a"]
assert style.div.style.get_text() == "Some CSS"
assert list(style.div.style.strings) == ['Some CSS']
# The comment is not picked up here. That's because it was
# parsed into a Comment object, which is not considered
# interesting by template.strings.
assert template.div.get_text() == "a"
assert list(template.div.strings) == ["a"]
assert template.div.template.get_text() == "Templated text."
assert list(template.div.template.strings) == ["Templated ", "text", "."]
# The comment is included here, because it didn't get parsed
# into a Comment object--it's part of the Script string.
assert script.div.get_text() == "a"
assert list(script.div.strings) == ["a"]
assert script.div.script.get_text() == "<!--a comment-->Some text"
assert list(script.div.script.strings) == ['<!--a comment-->Some text']
class TestMultiValuedAttributes(SoupTest):
"""Test the behavior of multi-valued attributes like 'class'.
The values of such attributes are always presented as lists.
"""
def test_single_value_becomes_list(self):
soup = self.soup("<a class='foo'>")
assert ["foo"] ==soup.a['class']
def test_multiple_values_becomes_list(self):
soup = self.soup("<a class='foo bar'>")
assert ["foo", "bar"] == soup.a['class']
def test_multiple_values_separated_by_weird_whitespace(self):
soup = self.soup("<a class='foo\tbar\nbaz'>")
assert ["foo", "bar", "baz"] ==soup.a['class']
def test_attributes_joined_into_string_on_output(self):
soup = self.soup("<a class='foo\tbar'>")
assert b'<a class="foo bar"></a>' == soup.a.encode()
def test_get_attribute_list(self):
soup = self.soup("<a id='abc def'>")
assert ['abc def'] == soup.a.get_attribute_list('id')
def test_accept_charset(self):
soup = self.soup('<form accept-charset="ISO-8859-1 UTF-8">')
assert ['ISO-8859-1', 'UTF-8'] == soup.form['accept-charset']
def test_cdata_attribute_applying_only_to_one_tag(self):
data = '<a accept-charset="ISO-8859-1 UTF-8"></a>'
soup = self.soup(data)
# We saw in another test that accept-charset is a cdata-list
# attribute for the <form> tag. But it's not a cdata-list
# attribute for any other tag.
assert 'ISO-8859-1 UTF-8' == soup.a['accept-charset']
def test_customization(self):
# It's possible to change which attributes of which tags
# are treated as multi-valued attributes.
#
# Here, 'id' is a multi-valued attribute and 'class' is not.
#
# TODO: This code is in the builder and should be tested there.
soup = self.soup(
'<a class="foo" id="bar">', multi_valued_attributes={ '*' : 'id' }
)
assert soup.a['class'] == 'foo'
assert soup.a['id'] == ['bar']

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,54 @@
Copyright 2017- Paul Ganssle <paul@ganssle.io>
Copyright 2017- dateutil contributors (see AUTHORS file)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
The above license applies to all contributions after 2017-12-01, as well as
all contributions that have been re-licensed (see AUTHORS file for the list of
contributors who have re-licensed their code).
--------------------------------------------------------------------------------
dateutil - Extensions to the standard Python datetime module.
Copyright (c) 2003-2011 - Gustavo Niemeyer <gustavo@niemeyer.net>
Copyright (c) 2012-2014 - Tomi Pieviläinen <tomi.pievilainen@iki.fi>
Copyright (c) 2014-2016 - Yaron de Leeuw <me@jarondl.net>
Copyright (c) 2015- - Paul Ganssle <paul@ganssle.io>
Copyright (c) 2015- - dateutil contributors (see AUTHORS file)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The above BSD License Applies to all code, even that also covered by Apache 2.0.

View File

@@ -0,0 +1,8 @@
# -*- coding: utf-8 -*-
try:
from ._version import version as __version__
except ImportError:
__version__ = 'unknown'
__all__ = ['easter', 'parser', 'relativedelta', 'rrule', 'tz',
'utils', 'zoneinfo']

View File

@@ -0,0 +1,43 @@
"""
Common code used in multiple modules.
"""
class weekday(object):
__slots__ = ["weekday", "n"]
def __init__(self, weekday, n=None):
self.weekday = weekday
self.n = n
def __call__(self, n):
if n == self.n:
return self
else:
return self.__class__(self.weekday, n)
def __eq__(self, other):
try:
if self.weekday != other.weekday or self.n != other.n:
return False
except AttributeError:
return False
return True
def __hash__(self):
return hash((
self.weekday,
self.n,
))
def __ne__(self, other):
return not (self == other)
def __repr__(self):
s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
if not self.n:
return s
else:
return "%s(%+d)" % (s, self.n)
# vim:ts=4:sw=4:et

View File

@@ -0,0 +1,5 @@
# coding: utf-8
# file generated by setuptools_scm
# don't change, don't track in version control
version = '2.8.2'
version_tuple = (2, 8, 2)

View File

@@ -0,0 +1,89 @@
# -*- coding: utf-8 -*-
"""
This module offers a generic Easter computing method for any given year, using
Western, Orthodox or Julian algorithms.
"""
import datetime
__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"]
EASTER_JULIAN = 1
EASTER_ORTHODOX = 2
EASTER_WESTERN = 3
def easter(year, method=EASTER_WESTERN):
"""
This method was ported from the work done by GM Arts,
on top of the algorithm by Claus Tondering, which was
based in part on the algorithm of Ouding (1940), as
quoted in "Explanatory Supplement to the Astronomical
Almanac", P. Kenneth Seidelmann, editor.
This algorithm implements three different Easter
calculation methods:
1. Original calculation in Julian calendar, valid in
dates after 326 AD
2. Original method, with date converted to Gregorian
calendar, valid in years 1583 to 4099
3. Revised method, in Gregorian calendar, valid in
years 1583 to 4099 as well
These methods are represented by the constants:
* ``EASTER_JULIAN = 1``
* ``EASTER_ORTHODOX = 2``
* ``EASTER_WESTERN = 3``
The default method is method 3.
More about the algorithm may be found at:
`GM Arts: Easter Algorithms <http://www.gmarts.org/index.php?go=415>`_
and
`The Calendar FAQ: Easter <https://www.tondering.dk/claus/cal/easter.php>`_
"""
if not (1 <= method <= 3):
raise ValueError("invalid method")
# g - Golden year - 1
# c - Century
# h - (23 - Epact) mod 30
# i - Number of days from March 21 to Paschal Full Moon
# j - Weekday for PFM (0=Sunday, etc)
# p - Number of days from March 21 to Sunday on or before PFM
# (-6 to 28 methods 1 & 3, to 56 for method 2)
# e - Extra days to add for method 2 (converting Julian
# date to Gregorian date)
y = year
g = y % 19
e = 0
if method < 3:
# Old method
i = (19*g + 15) % 30
j = (y + y//4 + i) % 7
if method == 2:
# Extra dates to convert Julian to Gregorian date
e = 10
if y > 1600:
e = e + y//100 - 16 - (y//100 - 16)//4
else:
# New method
c = y//100
h = (c - c//4 - (8*c + 13)//25 + 19*g + 15) % 30
i = h - (h//28)*(1 - (h//28)*(29//(h + 1))*((21 - g)//11))
j = (y + y//4 + i + 2 - c + c//4) % 7
# p can be from -6 to 56 corresponding to dates 22 March to 23 May
# (later dates apply to method 2, although 23 May never actually occurs)
p = i - j + e
d = 1 + (p + 27 + (p + 6)//40) % 31
m = 3 + (p + 26)//30
return datetime.date(int(y), int(m), int(d))

View File

@@ -0,0 +1,61 @@
# -*- coding: utf-8 -*-
from ._parser import parse, parser, parserinfo, ParserError
from ._parser import DEFAULTPARSER, DEFAULTTZPARSER
from ._parser import UnknownTimezoneWarning
from ._parser import __doc__
from .isoparser import isoparser, isoparse
__all__ = ['parse', 'parser', 'parserinfo',
'isoparse', 'isoparser',
'ParserError',
'UnknownTimezoneWarning']
###
# Deprecate portions of the private interface so that downstream code that
# is improperly relying on it is given *some* notice.
def __deprecated_private_func(f):
from functools import wraps
import warnings
msg = ('{name} is a private function and may break without warning, '
'it will be moved and or renamed in future versions.')
msg = msg.format(name=f.__name__)
@wraps(f)
def deprecated_func(*args, **kwargs):
warnings.warn(msg, DeprecationWarning)
return f(*args, **kwargs)
return deprecated_func
def __deprecate_private_class(c):
import warnings
msg = ('{name} is a private class and may break without warning, '
'it will be moved and or renamed in future versions.')
msg = msg.format(name=c.__name__)
class private_class(c):
__doc__ = c.__doc__
def __init__(self, *args, **kwargs):
warnings.warn(msg, DeprecationWarning)
super(private_class, self).__init__(*args, **kwargs)
private_class.__name__ = c.__name__
return private_class
from ._parser import _timelex, _resultbase
from ._parser import _tzparser, _parsetz
_timelex = __deprecate_private_class(_timelex)
_tzparser = __deprecate_private_class(_tzparser)
_resultbase = __deprecate_private_class(_resultbase)
_parsetz = __deprecated_private_func(_parsetz)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,416 @@
# -*- coding: utf-8 -*-
"""
This module offers a parser for ISO-8601 strings
It is intended to support all valid date, time and datetime formats per the
ISO-8601 specification.
..versionadded:: 2.7.0
"""
from datetime import datetime, timedelta, time, date
import calendar
from ...dateutil import tz
from functools import wraps
import re
from ... import six
__all__ = ["isoparse", "isoparser"]
def _takes_ascii(f):
@wraps(f)
def func(self, str_in, *args, **kwargs):
# If it's a stream, read the whole thing
str_in = getattr(str_in, 'read', lambda: str_in)()
# If it's unicode, turn it into bytes, since ISO-8601 only covers ASCII
if isinstance(str_in, six.text_type):
# ASCII is the same in UTF-8
try:
str_in = str_in.encode('ascii')
except UnicodeEncodeError as e:
msg = 'ISO-8601 strings should contain only ASCII characters'
six.raise_from(ValueError(msg), e)
return f(self, str_in, *args, **kwargs)
return func
class isoparser(object):
def __init__(self, sep=None):
"""
:param sep:
A single character that separates date and time portions. If
``None``, the parser will accept any single character.
For strict ISO-8601 adherence, pass ``'T'``.
"""
if sep is not None:
if (len(sep) != 1 or ord(sep) >= 128 or sep in '0123456789'):
raise ValueError('Separator must be a single, non-numeric ' +
'ASCII character')
sep = sep.encode('ascii')
self._sep = sep
@_takes_ascii
def isoparse(self, dt_str):
"""
Parse an ISO-8601 datetime string into a :class:`datetime.datetime`.
An ISO-8601 datetime string consists of a date portion, followed
optionally by a time portion - the date and time portions are separated
by a single character separator, which is ``T`` in the official
standard. Incomplete date formats (such as ``YYYY-MM``) may *not* be
combined with a time portion.
Supported date formats are:
Common:
- ``YYYY``
- ``YYYY-MM`` or ``YYYYMM``
- ``YYYY-MM-DD`` or ``YYYYMMDD``
Uncommon:
- ``YYYY-Www`` or ``YYYYWww`` - ISO week (day defaults to 0)
- ``YYYY-Www-D`` or ``YYYYWwwD`` - ISO week and day
The ISO week and day numbering follows the same logic as
:func:`datetime.date.isocalendar`.
Supported time formats are:
- ``hh``
- ``hh:mm`` or ``hhmm``
- ``hh:mm:ss`` or ``hhmmss``
- ``hh:mm:ss.ssssss`` (Up to 6 sub-second digits)
Midnight is a special case for `hh`, as the standard supports both
00:00 and 24:00 as a representation. The decimal separator can be
either a dot or a comma.
.. caution::
Support for fractional components other than seconds is part of the
ISO-8601 standard, but is not currently implemented in this parser.
Supported time zone offset formats are:
- `Z` (UTC)
- `±HH:MM`
- `±HHMM`
- `±HH`
Offsets will be represented as :class:`dateutil.tz.tzoffset` objects,
with the exception of UTC, which will be represented as
:class:`dateutil.tz.tzutc`. Time zone offsets equivalent to UTC (such
as `+00:00`) will also be represented as :class:`dateutil.tz.tzutc`.
:param dt_str:
A string or stream containing only an ISO-8601 datetime string
:return:
Returns a :class:`datetime.datetime` representing the string.
Unspecified components default to their lowest value.
.. warning::
As of version 2.7.0, the strictness of the parser should not be
considered a stable part of the contract. Any valid ISO-8601 string
that parses correctly with the default settings will continue to
parse correctly in future versions, but invalid strings that
currently fail (e.g. ``2017-01-01T00:00+00:00:00``) are not
guaranteed to continue failing in future versions if they encode
a valid date.
.. versionadded:: 2.7.0
"""
components, pos = self._parse_isodate(dt_str)
if len(dt_str) > pos:
if self._sep is None or dt_str[pos:pos + 1] == self._sep:
components += self._parse_isotime(dt_str[pos + 1:])
else:
raise ValueError('String contains unknown ISO components')
if len(components) > 3 and components[3] == 24:
components[3] = 0
return datetime(*components) + timedelta(days=1)
return datetime(*components)
@_takes_ascii
def parse_isodate(self, datestr):
"""
Parse the date portion of an ISO string.
:param datestr:
The string portion of an ISO string, without a separator
:return:
Returns a :class:`datetime.date` object
"""
components, pos = self._parse_isodate(datestr)
if pos < len(datestr):
raise ValueError('String contains unknown ISO ' +
'components: {!r}'.format(datestr.decode('ascii')))
return date(*components)
@_takes_ascii
def parse_isotime(self, timestr):
"""
Parse the time portion of an ISO string.
:param timestr:
The time portion of an ISO string, without a separator
:return:
Returns a :class:`datetime.time` object
"""
components = self._parse_isotime(timestr)
if components[0] == 24:
components[0] = 0
return time(*components)
@_takes_ascii
def parse_tzstr(self, tzstr, zero_as_utc=True):
"""
Parse a valid ISO time zone string.
See :func:`isoparser.isoparse` for details on supported formats.
:param tzstr:
A string representing an ISO time zone offset
:param zero_as_utc:
Whether to return :class:`dateutil.tz.tzutc` for zero-offset zones
:return:
Returns :class:`dateutil.tz.tzoffset` for offsets and
:class:`dateutil.tz.tzutc` for ``Z`` and (if ``zero_as_utc`` is
specified) offsets equivalent to UTC.
"""
return self._parse_tzstr(tzstr, zero_as_utc=zero_as_utc)
# Constants
_DATE_SEP = b'-'
_TIME_SEP = b':'
_FRACTION_REGEX = re.compile(b'[\\.,]([0-9]+)')
def _parse_isodate(self, dt_str):
try:
return self._parse_isodate_common(dt_str)
except ValueError:
return self._parse_isodate_uncommon(dt_str)
def _parse_isodate_common(self, dt_str):
len_str = len(dt_str)
components = [1, 1, 1]
if len_str < 4:
raise ValueError('ISO string too short')
# Year
components[0] = int(dt_str[0:4])
pos = 4
if pos >= len_str:
return components, pos
has_sep = dt_str[pos:pos + 1] == self._DATE_SEP
if has_sep:
pos += 1
# Month
if len_str - pos < 2:
raise ValueError('Invalid common month')
components[1] = int(dt_str[pos:pos + 2])
pos += 2
if pos >= len_str:
if has_sep:
return components, pos
else:
raise ValueError('Invalid ISO format')
if has_sep:
if dt_str[pos:pos + 1] != self._DATE_SEP:
raise ValueError('Invalid separator in ISO string')
pos += 1
# Day
if len_str - pos < 2:
raise ValueError('Invalid common day')
components[2] = int(dt_str[pos:pos + 2])
return components, pos + 2
def _parse_isodate_uncommon(self, dt_str):
if len(dt_str) < 4:
raise ValueError('ISO string too short')
# All ISO formats start with the year
year = int(dt_str[0:4])
has_sep = dt_str[4:5] == self._DATE_SEP
pos = 4 + has_sep # Skip '-' if it's there
if dt_str[pos:pos + 1] == b'W':
# YYYY-?Www-?D?
pos += 1
weekno = int(dt_str[pos:pos + 2])
pos += 2
dayno = 1
if len(dt_str) > pos:
if (dt_str[pos:pos + 1] == self._DATE_SEP) != has_sep:
raise ValueError('Inconsistent use of dash separator')
pos += has_sep
dayno = int(dt_str[pos:pos + 1])
pos += 1
base_date = self._calculate_weekdate(year, weekno, dayno)
else:
# YYYYDDD or YYYY-DDD
if len(dt_str) - pos < 3:
raise ValueError('Invalid ordinal day')
ordinal_day = int(dt_str[pos:pos + 3])
pos += 3
if ordinal_day < 1 or ordinal_day > (365 + calendar.isleap(year)):
raise ValueError('Invalid ordinal day' +
' {} for year {}'.format(ordinal_day, year))
base_date = date(year, 1, 1) + timedelta(days=ordinal_day - 1)
components = [base_date.year, base_date.month, base_date.day]
return components, pos
def _calculate_weekdate(self, year, week, day):
"""
Calculate the day of corresponding to the ISO year-week-day calendar.
This function is effectively the inverse of
:func:`datetime.date.isocalendar`.
:param year:
The year in the ISO calendar
:param week:
The week in the ISO calendar - range is [1, 53]
:param day:
The day in the ISO calendar - range is [1 (MON), 7 (SUN)]
:return:
Returns a :class:`datetime.date`
"""
if not 0 < week < 54:
raise ValueError('Invalid week: {}'.format(week))
if not 0 < day < 8: # Range is 1-7
raise ValueError('Invalid weekday: {}'.format(day))
# Get week 1 for the specific year:
jan_4 = date(year, 1, 4) # Week 1 always has January 4th in it
week_1 = jan_4 - timedelta(days=jan_4.isocalendar()[2] - 1)
# Now add the specific number of weeks and days to get what we want
week_offset = (week - 1) * 7 + (day - 1)
return week_1 + timedelta(days=week_offset)
def _parse_isotime(self, timestr):
len_str = len(timestr)
components = [0, 0, 0, 0, None]
pos = 0
comp = -1
if len_str < 2:
raise ValueError('ISO time too short')
has_sep = False
while pos < len_str and comp < 5:
comp += 1
if timestr[pos:pos + 1] in b'-+Zz':
# Detect time zone boundary
components[-1] = self._parse_tzstr(timestr[pos:])
pos = len_str
break
if comp == 1 and timestr[pos:pos+1] == self._TIME_SEP:
has_sep = True
pos += 1
elif comp == 2 and has_sep:
if timestr[pos:pos+1] != self._TIME_SEP:
raise ValueError('Inconsistent use of colon separator')
pos += 1
if comp < 3:
# Hour, minute, second
components[comp] = int(timestr[pos:pos + 2])
pos += 2
if comp == 3:
# Fraction of a second
frac = self._FRACTION_REGEX.match(timestr[pos:])
if not frac:
continue
us_str = frac.group(1)[:6] # Truncate to microseconds
components[comp] = int(us_str) * 10**(6 - len(us_str))
pos += len(frac.group())
if pos < len_str:
raise ValueError('Unused components in ISO string')
if components[0] == 24:
# Standard supports 00:00 and 24:00 as representations of midnight
if any(component != 0 for component in components[1:4]):
raise ValueError('Hour may only be 24 at 24:00:00.000')
return components
def _parse_tzstr(self, tzstr, zero_as_utc=True):
if tzstr == b'Z' or tzstr == b'z':
return tz.UTC
if len(tzstr) not in {3, 5, 6}:
raise ValueError('Time zone offset must be 1, 3, 5 or 6 characters')
if tzstr[0:1] == b'-':
mult = -1
elif tzstr[0:1] == b'+':
mult = 1
else:
raise ValueError('Time zone offset requires sign')
hours = int(tzstr[1:3])
if len(tzstr) == 3:
minutes = 0
else:
minutes = int(tzstr[(4 if tzstr[3:4] == self._TIME_SEP else 3):])
if zero_as_utc and hours == 0 and minutes == 0:
return tz.UTC
else:
if minutes > 59:
raise ValueError('Invalid minutes in time zone offset')
if hours > 23:
raise ValueError('Invalid hours in time zone offset')
return tz.tzoffset(None, mult * (hours * 60 + minutes) * 60)
DEFAULT_ISOPARSER = isoparser()
isoparse = DEFAULT_ISOPARSER.isoparse

View File

@@ -0,0 +1,599 @@
# -*- coding: utf-8 -*-
import datetime
import calendar
import operator
from math import copysign
from ..six import integer_types
from warnings import warn
from ._common import weekday
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7))
__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
class relativedelta(object):
"""
The relativedelta type is designed to be applied to an existing datetime and
can replace specific components of that datetime, or represents an interval
of time.
It is based on the specification of the excellent work done by M.-A. Lemburg
in his
`mx.DateTime <https://www.egenix.com/products/python/mxBase/mxDateTime/>`_ extension.
However, notice that this type does *NOT* implement the same algorithm as
his work. Do *NOT* expect it to behave like mx.DateTime's counterpart.
There are two different ways to build a relativedelta instance. The
first one is passing it two date/datetime classes::
relativedelta(datetime1, datetime2)
The second one is passing it any number of the following keyword arguments::
relativedelta(arg1=x,arg2=y,arg3=z...)
year, month, day, hour, minute, second, microsecond:
Absolute information (argument is singular); adding or subtracting a
relativedelta with absolute information does not perform an arithmetic
operation, but rather REPLACES the corresponding value in the
original datetime with the value(s) in relativedelta.
years, months, weeks, days, hours, minutes, seconds, microseconds:
Relative information, may be negative (argument is plural); adding
or subtracting a relativedelta with relative information performs
the corresponding arithmetic operation on the original datetime value
with the information in the relativedelta.
weekday:
One of the weekday instances (MO, TU, etc) available in the
relativedelta module. These instances may receive a parameter N,
specifying the Nth weekday, which could be positive or negative
(like MO(+1) or MO(-2)). Not specifying it is the same as specifying
+1. You can also use an integer, where 0=MO. This argument is always
relative e.g. if the calculated date is already Monday, using MO(1)
or MO(-1) won't change the day. To effectively make it absolute, use
it in combination with the day argument (e.g. day=1, MO(1) for first
Monday of the month).
leapdays:
Will add given days to the date found, if year is a leap
year, and the date found is post 28 of february.
yearday, nlyearday:
Set the yearday or the non-leap year day (jump leap days).
These are converted to day/month/leapdays information.
There are relative and absolute forms of the keyword
arguments. The plural is relative, and the singular is
absolute. For each argument in the order below, the absolute form
is applied first (by setting each attribute to that value) and
then the relative form (by adding the value to the attribute).
The order of attributes considered when this relativedelta is
added to a datetime is:
1. Year
2. Month
3. Day
4. Hours
5. Minutes
6. Seconds
7. Microseconds
Finally, weekday is applied, using the rule described above.
For example
>>> from datetime import datetime
>>> from dateutil.relativedelta import relativedelta, MO
>>> dt = datetime(2018, 4, 9, 13, 37, 0)
>>> delta = relativedelta(hours=25, day=1, weekday=MO(1))
>>> dt + delta
datetime.datetime(2018, 4, 2, 14, 37)
First, the day is set to 1 (the first of the month), then 25 hours
are added, to get to the 2nd day and 14th hour, finally the
weekday is applied, but since the 2nd is already a Monday there is
no effect.
"""
def __init__(self, dt1=None, dt2=None,
years=0, months=0, days=0, leapdays=0, weeks=0,
hours=0, minutes=0, seconds=0, microseconds=0,
year=None, month=None, day=None, weekday=None,
yearday=None, nlyearday=None,
hour=None, minute=None, second=None, microsecond=None):
if dt1 and dt2:
# datetime is a subclass of date. So both must be date
if not (isinstance(dt1, datetime.date) and
isinstance(dt2, datetime.date)):
raise TypeError("relativedelta only diffs datetime/date")
# We allow two dates, or two datetimes, so we coerce them to be
# of the same type
if (isinstance(dt1, datetime.datetime) !=
isinstance(dt2, datetime.datetime)):
if not isinstance(dt1, datetime.datetime):
dt1 = datetime.datetime.fromordinal(dt1.toordinal())
elif not isinstance(dt2, datetime.datetime):
dt2 = datetime.datetime.fromordinal(dt2.toordinal())
self.years = 0
self.months = 0
self.days = 0
self.leapdays = 0
self.hours = 0
self.minutes = 0
self.seconds = 0
self.microseconds = 0
self.year = None
self.month = None
self.day = None
self.weekday = None
self.hour = None
self.minute = None
self.second = None
self.microsecond = None
self._has_time = 0
# Get year / month delta between the two
months = (dt1.year - dt2.year) * 12 + (dt1.month - dt2.month)
self._set_months(months)
# Remove the year/month delta so the timedelta is just well-defined
# time units (seconds, days and microseconds)
dtm = self.__radd__(dt2)
# If we've overshot our target, make an adjustment
if dt1 < dt2:
compare = operator.gt
increment = 1
else:
compare = operator.lt
increment = -1
while compare(dt1, dtm):
months += increment
self._set_months(months)
dtm = self.__radd__(dt2)
# Get the timedelta between the "months-adjusted" date and dt1
delta = dt1 - dtm
self.seconds = delta.seconds + delta.days * 86400
self.microseconds = delta.microseconds
else:
# Check for non-integer values in integer-only quantities
if any(x is not None and x != int(x) for x in (years, months)):
raise ValueError("Non-integer years and months are "
"ambiguous and not currently supported.")
# Relative information
self.years = int(years)
self.months = int(months)
self.days = days + weeks * 7
self.leapdays = leapdays
self.hours = hours
self.minutes = minutes
self.seconds = seconds
self.microseconds = microseconds
# Absolute information
self.year = year
self.month = month
self.day = day
self.hour = hour
self.minute = minute
self.second = second
self.microsecond = microsecond
if any(x is not None and int(x) != x
for x in (year, month, day, hour,
minute, second, microsecond)):
# For now we'll deprecate floats - later it'll be an error.
warn("Non-integer value passed as absolute information. " +
"This is not a well-defined condition and will raise " +
"errors in future versions.", DeprecationWarning)
if isinstance(weekday, integer_types):
self.weekday = weekdays[weekday]
else:
self.weekday = weekday
yday = 0
if nlyearday:
yday = nlyearday
elif yearday:
yday = yearday
if yearday > 59:
self.leapdays = -1
if yday:
ydayidx = [31, 59, 90, 120, 151, 181, 212,
243, 273, 304, 334, 366]
for idx, ydays in enumerate(ydayidx):
if yday <= ydays:
self.month = idx+1
if idx == 0:
self.day = yday
else:
self.day = yday-ydayidx[idx-1]
break
else:
raise ValueError("invalid year day (%d)" % yday)
self._fix()
def _fix(self):
if abs(self.microseconds) > 999999:
s = _sign(self.microseconds)
div, mod = divmod(self.microseconds * s, 1000000)
self.microseconds = mod * s
self.seconds += div * s
if abs(self.seconds) > 59:
s = _sign(self.seconds)
div, mod = divmod(self.seconds * s, 60)
self.seconds = mod * s
self.minutes += div * s
if abs(self.minutes) > 59:
s = _sign(self.minutes)
div, mod = divmod(self.minutes * s, 60)
self.minutes = mod * s
self.hours += div * s
if abs(self.hours) > 23:
s = _sign(self.hours)
div, mod = divmod(self.hours * s, 24)
self.hours = mod * s
self.days += div * s
if abs(self.months) > 11:
s = _sign(self.months)
div, mod = divmod(self.months * s, 12)
self.months = mod * s
self.years += div * s
if (self.hours or self.minutes or self.seconds or self.microseconds
or self.hour is not None or self.minute is not None or
self.second is not None or self.microsecond is not None):
self._has_time = 1
else:
self._has_time = 0
@property
def weeks(self):
return int(self.days / 7.0)
@weeks.setter
def weeks(self, value):
self.days = self.days - (self.weeks * 7) + value * 7
def _set_months(self, months):
self.months = months
if abs(self.months) > 11:
s = _sign(self.months)
div, mod = divmod(self.months * s, 12)
self.months = mod * s
self.years = div * s
else:
self.years = 0
def normalized(self):
"""
Return a version of this object represented entirely using integer
values for the relative attributes.
>>> relativedelta(days=1.5, hours=2).normalized()
relativedelta(days=+1, hours=+14)
:return:
Returns a :class:`dateutil.relativedelta.relativedelta` object.
"""
# Cascade remainders down (rounding each to roughly nearest microsecond)
days = int(self.days)
hours_f = round(self.hours + 24 * (self.days - days), 11)
hours = int(hours_f)
minutes_f = round(self.minutes + 60 * (hours_f - hours), 10)
minutes = int(minutes_f)
seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8)
seconds = int(seconds_f)
microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds))
# Constructor carries overflow back up with call to _fix()
return self.__class__(years=self.years, months=self.months,
days=days, hours=hours, minutes=minutes,
seconds=seconds, microseconds=microseconds,
leapdays=self.leapdays, year=self.year,
month=self.month, day=self.day,
weekday=self.weekday, hour=self.hour,
minute=self.minute, second=self.second,
microsecond=self.microsecond)
def __add__(self, other):
if isinstance(other, relativedelta):
return self.__class__(years=other.years + self.years,
months=other.months + self.months,
days=other.days + self.days,
hours=other.hours + self.hours,
minutes=other.minutes + self.minutes,
seconds=other.seconds + self.seconds,
microseconds=(other.microseconds +
self.microseconds),
leapdays=other.leapdays or self.leapdays,
year=(other.year if other.year is not None
else self.year),
month=(other.month if other.month is not None
else self.month),
day=(other.day if other.day is not None
else self.day),
weekday=(other.weekday if other.weekday is not None
else self.weekday),
hour=(other.hour if other.hour is not None
else self.hour),
minute=(other.minute if other.minute is not None
else self.minute),
second=(other.second if other.second is not None
else self.second),
microsecond=(other.microsecond if other.microsecond
is not None else
self.microsecond))
if isinstance(other, datetime.timedelta):
return self.__class__(years=self.years,
months=self.months,
days=self.days + other.days,
hours=self.hours,
minutes=self.minutes,
seconds=self.seconds + other.seconds,
microseconds=self.microseconds + other.microseconds,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
if not isinstance(other, datetime.date):
return NotImplemented
elif self._has_time and not isinstance(other, datetime.datetime):
other = datetime.datetime.fromordinal(other.toordinal())
year = (self.year or other.year)+self.years
month = self.month or other.month
if self.months:
assert 1 <= abs(self.months) <= 12
month += self.months
if month > 12:
year += 1
month -= 12
elif month < 1:
year -= 1
month += 12
day = min(calendar.monthrange(year, month)[1],
self.day or other.day)
repl = {"year": year, "month": month, "day": day}
for attr in ["hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
repl[attr] = value
days = self.days
if self.leapdays and month > 2 and calendar.isleap(year):
days += self.leapdays
ret = (other.replace(**repl)
+ datetime.timedelta(days=days,
hours=self.hours,
minutes=self.minutes,
seconds=self.seconds,
microseconds=self.microseconds))
if self.weekday:
weekday, nth = self.weekday.weekday, self.weekday.n or 1
jumpdays = (abs(nth) - 1) * 7
if nth > 0:
jumpdays += (7 - ret.weekday() + weekday) % 7
else:
jumpdays += (ret.weekday() - weekday) % 7
jumpdays *= -1
ret += datetime.timedelta(days=jumpdays)
return ret
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
return self.__neg__().__radd__(other)
def __sub__(self, other):
if not isinstance(other, relativedelta):
return NotImplemented # In case the other object defines __rsub__
return self.__class__(years=self.years - other.years,
months=self.months - other.months,
days=self.days - other.days,
hours=self.hours - other.hours,
minutes=self.minutes - other.minutes,
seconds=self.seconds - other.seconds,
microseconds=self.microseconds - other.microseconds,
leapdays=self.leapdays or other.leapdays,
year=(self.year if self.year is not None
else other.year),
month=(self.month if self.month is not None else
other.month),
day=(self.day if self.day is not None else
other.day),
weekday=(self.weekday if self.weekday is not None else
other.weekday),
hour=(self.hour if self.hour is not None else
other.hour),
minute=(self.minute if self.minute is not None else
other.minute),
second=(self.second if self.second is not None else
other.second),
microsecond=(self.microsecond if self.microsecond
is not None else
other.microsecond))
def __abs__(self):
return self.__class__(years=abs(self.years),
months=abs(self.months),
days=abs(self.days),
hours=abs(self.hours),
minutes=abs(self.minutes),
seconds=abs(self.seconds),
microseconds=abs(self.microseconds),
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
def __neg__(self):
return self.__class__(years=-self.years,
months=-self.months,
days=-self.days,
hours=-self.hours,
minutes=-self.minutes,
seconds=-self.seconds,
microseconds=-self.microseconds,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
def __bool__(self):
return not (not self.years and
not self.months and
not self.days and
not self.hours and
not self.minutes and
not self.seconds and
not self.microseconds and
not self.leapdays and
self.year is None and
self.month is None and
self.day is None and
self.weekday is None and
self.hour is None and
self.minute is None and
self.second is None and
self.microsecond is None)
# Compatibility with Python 2.x
__nonzero__ = __bool__
def __mul__(self, other):
try:
f = float(other)
except TypeError:
return NotImplemented
return self.__class__(years=int(self.years * f),
months=int(self.months * f),
days=int(self.days * f),
hours=int(self.hours * f),
minutes=int(self.minutes * f),
seconds=int(self.seconds * f),
microseconds=int(self.microseconds * f),
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
__rmul__ = __mul__
def __eq__(self, other):
if not isinstance(other, relativedelta):
return NotImplemented
if self.weekday or other.weekday:
if not self.weekday or not other.weekday:
return False
if self.weekday.weekday != other.weekday.weekday:
return False
n1, n2 = self.weekday.n, other.weekday.n
if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)):
return False
return (self.years == other.years and
self.months == other.months and
self.days == other.days and
self.hours == other.hours and
self.minutes == other.minutes and
self.seconds == other.seconds and
self.microseconds == other.microseconds and
self.leapdays == other.leapdays and
self.year == other.year and
self.month == other.month and
self.day == other.day and
self.hour == other.hour and
self.minute == other.minute and
self.second == other.second and
self.microsecond == other.microsecond)
def __hash__(self):
return hash((
self.weekday,
self.years,
self.months,
self.days,
self.hours,
self.minutes,
self.seconds,
self.microseconds,
self.leapdays,
self.year,
self.month,
self.day,
self.hour,
self.minute,
self.second,
self.microsecond,
))
def __ne__(self, other):
return not self.__eq__(other)
def __div__(self, other):
try:
reciprocal = 1 / float(other)
except TypeError:
return NotImplemented
return self.__mul__(reciprocal)
__truediv__ = __div__
def __repr__(self):
l = []
for attr in ["years", "months", "days", "leapdays",
"hours", "minutes", "seconds", "microseconds"]:
value = getattr(self, attr)
if value:
l.append("{attr}={value:+g}".format(attr=attr, value=value))
for attr in ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
l.append("{attr}={value}".format(attr=attr, value=repr(value)))
return "{classname}({attrs})".format(classname=self.__class__.__name__,
attrs=", ".join(l))
def _sign(x):
return int(copysign(1, x))
# vim:ts=4:sw=4:et

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,12 @@
# -*- coding: utf-8 -*-
from .tz import *
from .tz import __doc__
__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange",
"tzstr", "tzical", "tzwin", "tzwinlocal", "gettz",
"enfold", "datetime_ambiguous", "datetime_exists",
"resolve_imaginary", "UTC", "DeprecatedTzFormatWarning"]
class DeprecatedTzFormatWarning(Warning):
"""Warning raised when time zones are parsed from deprecated formats."""

View File

@@ -0,0 +1,419 @@
from ...six import PY2
from functools import wraps
from datetime import datetime, timedelta, tzinfo
ZERO = timedelta(0)
__all__ = ['tzname_in_python2', 'enfold']
def tzname_in_python2(namefunc):
"""Change unicode output into bytestrings in Python 2
tzname() API changed in Python 3. It used to return bytes, but was changed
to unicode strings
"""
if PY2:
@wraps(namefunc)
def adjust_encoding(*args, **kwargs):
name = namefunc(*args, **kwargs)
if name is not None:
name = name.encode()
return name
return adjust_encoding
else:
return namefunc
# The following is adapted from Alexander Belopolsky's tz library
# https://github.com/abalkin/tz
if hasattr(datetime, 'fold'):
# This is the pre-python 3.6 fold situation
def enfold(dt, fold=1):
"""
Provides a unified interface for assigning the ``fold`` attribute to
datetimes both before and after the implementation of PEP-495.
:param fold:
The value for the ``fold`` attribute in the returned datetime. This
should be either 0 or 1.
:return:
Returns an object for which ``getattr(dt, 'fold', 0)`` returns
``fold`` for all versions of Python. In versions prior to
Python 3.6, this is a ``_DatetimeWithFold`` object, which is a
subclass of :py:class:`datetime.datetime` with the ``fold``
attribute added, if ``fold`` is 1.
.. versionadded:: 2.6.0
"""
return dt.replace(fold=fold)
else:
class _DatetimeWithFold(datetime):
"""
This is a class designed to provide a PEP 495-compliant interface for
Python versions before 3.6. It is used only for dates in a fold, so
the ``fold`` attribute is fixed at ``1``.
.. versionadded:: 2.6.0
"""
__slots__ = ()
def replace(self, *args, **kwargs):
"""
Return a datetime with the same attributes, except for those
attributes given new values by whichever keyword arguments are
specified. Note that tzinfo=None can be specified to create a naive
datetime from an aware datetime with no conversion of date and time
data.
This is reimplemented in ``_DatetimeWithFold`` because pypy3 will
return a ``datetime.datetime`` even if ``fold`` is unchanged.
"""
argnames = (
'year', 'month', 'day', 'hour', 'minute', 'second',
'microsecond', 'tzinfo'
)
for arg, argname in zip(args, argnames):
if argname in kwargs:
raise TypeError('Duplicate argument: {}'.format(argname))
kwargs[argname] = arg
for argname in argnames:
if argname not in kwargs:
kwargs[argname] = getattr(self, argname)
dt_class = self.__class__ if kwargs.get('fold', 1) else datetime
return dt_class(**kwargs)
@property
def fold(self):
return 1
def enfold(dt, fold=1):
"""
Provides a unified interface for assigning the ``fold`` attribute to
datetimes both before and after the implementation of PEP-495.
:param fold:
The value for the ``fold`` attribute in the returned datetime. This
should be either 0 or 1.
:return:
Returns an object for which ``getattr(dt, 'fold', 0)`` returns
``fold`` for all versions of Python. In versions prior to
Python 3.6, this is a ``_DatetimeWithFold`` object, which is a
subclass of :py:class:`datetime.datetime` with the ``fold``
attribute added, if ``fold`` is 1.
.. versionadded:: 2.6.0
"""
if getattr(dt, 'fold', 0) == fold:
return dt
args = dt.timetuple()[:6]
args += (dt.microsecond, dt.tzinfo)
if fold:
return _DatetimeWithFold(*args)
else:
return datetime(*args)
def _validate_fromutc_inputs(f):
"""
The CPython version of ``fromutc`` checks that the input is a ``datetime``
object and that ``self`` is attached as its ``tzinfo``.
"""
@wraps(f)
def fromutc(self, dt):
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
return f(self, dt)
return fromutc
class _tzinfo(tzinfo):
"""
Base class for all ``dateutil`` ``tzinfo`` objects.
"""
def is_ambiguous(self, dt):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
dt = dt.replace(tzinfo=self)
wall_0 = enfold(dt, fold=0)
wall_1 = enfold(dt, fold=1)
same_offset = wall_0.utcoffset() == wall_1.utcoffset()
same_dt = wall_0.replace(tzinfo=None) == wall_1.replace(tzinfo=None)
return same_dt and not same_offset
def _fold_status(self, dt_utc, dt_wall):
"""
Determine the fold status of a "wall" datetime, given a representation
of the same datetime as a (naive) UTC datetime. This is calculated based
on the assumption that ``dt.utcoffset() - dt.dst()`` is constant for all
datetimes, and that this offset is the actual number of hours separating
``dt_utc`` and ``dt_wall``.
:param dt_utc:
Representation of the datetime as UTC
:param dt_wall:
Representation of the datetime as "wall time". This parameter must
either have a `fold` attribute or have a fold-naive
:class:`datetime.tzinfo` attached, otherwise the calculation may
fail.
"""
if self.is_ambiguous(dt_wall):
delta_wall = dt_wall - dt_utc
_fold = int(delta_wall == (dt_utc.utcoffset() - dt_utc.dst()))
else:
_fold = 0
return _fold
def _fold(self, dt):
return getattr(dt, 'fold', 0)
def _fromutc(self, dt):
"""
Given a timezone-aware datetime in a given timezone, calculates a
timezone-aware datetime in a new timezone.
Since this is the one time that we *know* we have an unambiguous
datetime object, we take this opportunity to determine whether the
datetime is ambiguous and in a "fold" state (e.g. if it's the first
occurrence, chronologically, of the ambiguous datetime).
:param dt:
A timezone-aware :class:`datetime.datetime` object.
"""
# Re-implement the algorithm from Python's datetime.py
dtoff = dt.utcoffset()
if dtoff is None:
raise ValueError("fromutc() requires a non-None utcoffset() "
"result")
# The original datetime.py code assumes that `dst()` defaults to
# zero during ambiguous times. PEP 495 inverts this presumption, so
# for pre-PEP 495 versions of python, we need to tweak the algorithm.
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc() requires a non-None dst() result")
delta = dtoff - dtdst
dt += delta
# Set fold=1 so we can default to being in the fold for
# ambiguous dates.
dtdst = enfold(dt, fold=1).dst()
if dtdst is None:
raise ValueError("fromutc(): dt.dst gave inconsistent "
"results; cannot convert")
return dt + dtdst
@_validate_fromutc_inputs
def fromutc(self, dt):
"""
Given a timezone-aware datetime in a given timezone, calculates a
timezone-aware datetime in a new timezone.
Since this is the one time that we *know* we have an unambiguous
datetime object, we take this opportunity to determine whether the
datetime is ambiguous and in a "fold" state (e.g. if it's the first
occurrence, chronologically, of the ambiguous datetime).
:param dt:
A timezone-aware :class:`datetime.datetime` object.
"""
dt_wall = self._fromutc(dt)
# Calculate the fold status given the two datetimes.
_fold = self._fold_status(dt, dt_wall)
# Set the default fold value for ambiguous dates
return enfold(dt_wall, fold=_fold)
class tzrangebase(_tzinfo):
"""
This is an abstract base class for time zones represented by an annual
transition into and out of DST. Child classes should implement the following
methods:
* ``__init__(self, *args, **kwargs)``
* ``transitions(self, year)`` - this is expected to return a tuple of
datetimes representing the DST on and off transitions in standard
time.
A fully initialized ``tzrangebase`` subclass should also provide the
following attributes:
* ``hasdst``: Boolean whether or not the zone uses DST.
* ``_dst_offset`` / ``_std_offset``: :class:`datetime.timedelta` objects
representing the respective UTC offsets.
* ``_dst_abbr`` / ``_std_abbr``: Strings representing the timezone short
abbreviations in DST and STD, respectively.
* ``_hasdst``: Whether or not the zone has DST.
.. versionadded:: 2.6.0
"""
def __init__(self):
raise NotImplementedError('tzrangebase is an abstract base class')
def utcoffset(self, dt):
isdst = self._isdst(dt)
if isdst is None:
return None
elif isdst:
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
isdst = self._isdst(dt)
if isdst is None:
return None
elif isdst:
return self._dst_base_offset
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
if self._isdst(dt):
return self._dst_abbr
else:
return self._std_abbr
def fromutc(self, dt):
""" Given a datetime in UTC, return local time """
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
# Get transitions - if there are none, fixed offset
transitions = self.transitions(dt.year)
if transitions is None:
return dt + self.utcoffset(dt)
# Get the transition times in UTC
dston, dstoff = transitions
dston -= self._std_offset
dstoff -= self._std_offset
utc_transitions = (dston, dstoff)
dt_utc = dt.replace(tzinfo=None)
isdst = self._naive_isdst(dt_utc, utc_transitions)
if isdst:
dt_wall = dt + self._dst_offset
else:
dt_wall = dt + self._std_offset
_fold = int(not isdst and self.is_ambiguous(dt_wall))
return enfold(dt_wall, fold=_fold)
def is_ambiguous(self, dt):
"""
Whether or not the "wall time" of a given datetime is ambiguous in this
zone.
:param dt:
A :py:class:`datetime.datetime`, naive or time zone aware.
:return:
Returns ``True`` if ambiguous, ``False`` otherwise.
.. versionadded:: 2.6.0
"""
if not self.hasdst:
return False
start, end = self.transitions(dt.year)
dt = dt.replace(tzinfo=None)
return (end <= dt < end + self._dst_base_offset)
def _isdst(self, dt):
if not self.hasdst:
return False
elif dt is None:
return None
transitions = self.transitions(dt.year)
if transitions is None:
return False
dt = dt.replace(tzinfo=None)
isdst = self._naive_isdst(dt, transitions)
# Handle ambiguous dates
if not isdst and self.is_ambiguous(dt):
return not self._fold(dt)
else:
return isdst
def _naive_isdst(self, dt, transitions):
dston, dstoff = transitions
dt = dt.replace(tzinfo=None)
if dston < dstoff:
isdst = dston <= dt < dstoff
else:
isdst = not dstoff <= dt < dston
return isdst
@property
def _dst_base_offset(self):
return self._dst_offset - self._std_offset
__hash__ = None
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "%s(...)" % self.__class__.__name__
__reduce__ = object.__reduce__

View File

@@ -0,0 +1,80 @@
from datetime import timedelta
import weakref
from collections import OrderedDict
from ...six.moves import _thread
class _TzSingleton(type):
def __init__(cls, *args, **kwargs):
cls.__instance = None
super(_TzSingleton, cls).__init__(*args, **kwargs)
def __call__(cls):
if cls.__instance is None:
cls.__instance = super(_TzSingleton, cls).__call__()
return cls.__instance
class _TzFactory(type):
def instance(cls, *args, **kwargs):
"""Alternate constructor that returns a fresh instance"""
return type.__call__(cls, *args, **kwargs)
class _TzOffsetFactory(_TzFactory):
def __init__(cls, *args, **kwargs):
cls.__instances = weakref.WeakValueDictionary()
cls.__strong_cache = OrderedDict()
cls.__strong_cache_size = 8
cls._cache_lock = _thread.allocate_lock()
def __call__(cls, name, offset):
if isinstance(offset, timedelta):
key = (name, offset.total_seconds())
else:
key = (name, offset)
instance = cls.__instances.get(key, None)
if instance is None:
instance = cls.__instances.setdefault(key,
cls.instance(name, offset))
# This lock may not be necessary in Python 3. See GH issue #901
with cls._cache_lock:
cls.__strong_cache[key] = cls.__strong_cache.pop(key, instance)
# Remove an item if the strong cache is overpopulated
if len(cls.__strong_cache) > cls.__strong_cache_size:
cls.__strong_cache.popitem(last=False)
return instance
class _TzStrFactory(_TzFactory):
def __init__(cls, *args, **kwargs):
cls.__instances = weakref.WeakValueDictionary()
cls.__strong_cache = OrderedDict()
cls.__strong_cache_size = 8
cls.__cache_lock = _thread.allocate_lock()
def __call__(cls, s, posix_offset=False):
key = (s, posix_offset)
instance = cls.__instances.get(key, None)
if instance is None:
instance = cls.__instances.setdefault(key,
cls.instance(s, posix_offset))
# This lock may not be necessary in Python 3. See GH issue #901
with cls.__cache_lock:
cls.__strong_cache[key] = cls.__strong_cache.pop(key, instance)
# Remove an item if the strong cache is overpopulated
if len(cls.__strong_cache) > cls.__strong_cache_size:
cls.__strong_cache.popitem(last=False)
return instance

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,370 @@
# -*- coding: utf-8 -*-
"""
This module provides an interface to the native time zone data on Windows,
including :py:class:`datetime.tzinfo` implementations.
Attempting to import this module on a non-Windows platform will raise an
:py:obj:`ImportError`.
"""
# This code was originally contributed by Jeffrey Harris.
import datetime
import struct
from six.moves import winreg
from six import text_type
try:
import ctypes
from ctypes import wintypes
except ValueError:
# ValueError is raised on non-Windows systems for some horrible reason.
raise ImportError("Running tzwin on non-Windows system")
from ._common import tzrangebase
__all__ = ["tzwin", "tzwinlocal", "tzres"]
ONEWEEK = datetime.timedelta(7)
TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones"
TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones"
TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation"
def _settzkeyname():
handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
try:
winreg.OpenKey(handle, TZKEYNAMENT).Close()
TZKEYNAME = TZKEYNAMENT
except WindowsError:
TZKEYNAME = TZKEYNAME9X
handle.Close()
return TZKEYNAME
TZKEYNAME = _settzkeyname()
class tzres(object):
"""
Class for accessing ``tzres.dll``, which contains timezone name related
resources.
.. versionadded:: 2.5.0
"""
p_wchar = ctypes.POINTER(wintypes.WCHAR) # Pointer to a wide char
def __init__(self, tzres_loc='tzres.dll'):
# Load the user32 DLL so we can load strings from tzres
user32 = ctypes.WinDLL('user32')
# Specify the LoadStringW function
user32.LoadStringW.argtypes = (wintypes.HINSTANCE,
wintypes.UINT,
wintypes.LPWSTR,
ctypes.c_int)
self.LoadStringW = user32.LoadStringW
self._tzres = ctypes.WinDLL(tzres_loc)
self.tzres_loc = tzres_loc
def load_name(self, offset):
"""
Load a timezone name from a DLL offset (integer).
>>> from dateutil.tzwin import tzres
>>> tzr = tzres()
>>> print(tzr.load_name(112))
'Eastern Standard Time'
:param offset:
A positive integer value referring to a string from the tzres dll.
.. note::
Offsets found in the registry are generally of the form
``@tzres.dll,-114``. The offset in this case is 114, not -114.
"""
resource = self.p_wchar()
lpBuffer = ctypes.cast(ctypes.byref(resource), wintypes.LPWSTR)
nchar = self.LoadStringW(self._tzres._handle, offset, lpBuffer, 0)
return resource[:nchar]
def name_from_string(self, tzname_str):
"""
Parse strings as returned from the Windows registry into the time zone
name as defined in the registry.
>>> from dateutil.tzwin import tzres
>>> tzr = tzres()
>>> print(tzr.name_from_string('@tzres.dll,-251'))
'Dateline Daylight Time'
>>> print(tzr.name_from_string('Eastern Standard Time'))
'Eastern Standard Time'
:param tzname_str:
A timezone name string as returned from a Windows registry key.
:return:
Returns the localized timezone string from tzres.dll if the string
is of the form `@tzres.dll,-offset`, else returns the input string.
"""
if not tzname_str.startswith('@'):
return tzname_str
name_splt = tzname_str.split(',-')
try:
offset = int(name_splt[1])
except:
raise ValueError("Malformed timezone string.")
return self.load_name(offset)
class tzwinbase(tzrangebase):
"""tzinfo class based on win32's timezones available in the registry."""
def __init__(self):
raise NotImplementedError('tzwinbase is an abstract base class')
def __eq__(self, other):
# Compare on all relevant dimensions, including name.
if not isinstance(other, tzwinbase):
return NotImplemented
return (self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset and
self._stddayofweek == other._stddayofweek and
self._dstdayofweek == other._dstdayofweek and
self._stdweeknumber == other._stdweeknumber and
self._dstweeknumber == other._dstweeknumber and
self._stdhour == other._stdhour and
self._dsthour == other._dsthour and
self._stdminute == other._stdminute and
self._dstminute == other._dstminute and
self._std_abbr == other._std_abbr and
self._dst_abbr == other._dst_abbr)
@staticmethod
def list():
"""Return a list of all time zones known to the system."""
with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
with winreg.OpenKey(handle, TZKEYNAME) as tzkey:
result = [winreg.EnumKey(tzkey, i)
for i in range(winreg.QueryInfoKey(tzkey)[0])]
return result
def display(self):
"""
Return the display name of the time zone.
"""
return self._display
def transitions(self, year):
"""
For a given year, get the DST on and off transition times, expressed
always on the standard time side. For zones with no transitions, this
function returns ``None``.
:param year:
The year whose transitions you would like to query.
:return:
Returns a :class:`tuple` of :class:`datetime.datetime` objects,
``(dston, dstoff)`` for zones with an annual DST transition, or
``None`` for fixed offset zones.
"""
if not self.hasdst:
return None
dston = picknthweekday(year, self._dstmonth, self._dstdayofweek,
self._dsthour, self._dstminute,
self._dstweeknumber)
dstoff = picknthweekday(year, self._stdmonth, self._stddayofweek,
self._stdhour, self._stdminute,
self._stdweeknumber)
# Ambiguous dates default to the STD side
dstoff -= self._dst_base_offset
return dston, dstoff
def _get_hasdst(self):
return self._dstmonth != 0
@property
def _dst_base_offset(self):
return self._dst_base_offset_
class tzwin(tzwinbase):
"""
Time zone object created from the zone info in the Windows registry
These are similar to :py:class:`dateutil.tz.tzrange` objects in that
the time zone data is provided in the format of a single offset rule
for either 0 or 2 time zone transitions per year.
:param: name
The name of a Windows time zone key, e.g. "Eastern Standard Time".
The full list of keys can be retrieved with :func:`tzwin.list`.
"""
def __init__(self, name):
self._name = name
with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
tzkeyname = text_type("{kn}\\{name}").format(kn=TZKEYNAME, name=name)
with winreg.OpenKey(handle, tzkeyname) as tzkey:
keydict = valuestodict(tzkey)
self._std_abbr = keydict["Std"]
self._dst_abbr = keydict["Dlt"]
self._display = keydict["Display"]
# See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
tup = struct.unpack("=3l16h", keydict["TZI"])
stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1
dstoffset = stdoffset-tup[2] # + DaylightBias * -1
self._std_offset = datetime.timedelta(minutes=stdoffset)
self._dst_offset = datetime.timedelta(minutes=dstoffset)
# for the meaning see the win32 TIME_ZONE_INFORMATION structure docs
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms725481(v=vs.85).aspx
(self._stdmonth,
self._stddayofweek, # Sunday = 0
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[4:9]
(self._dstmonth,
self._dstdayofweek, # Sunday = 0
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[12:17]
self._dst_base_offset_ = self._dst_offset - self._std_offset
self.hasdst = self._get_hasdst()
def __repr__(self):
return "tzwin(%s)" % repr(self._name)
def __reduce__(self):
return (self.__class__, (self._name,))
class tzwinlocal(tzwinbase):
"""
Class representing the local time zone information in the Windows registry
While :class:`dateutil.tz.tzlocal` makes system calls (via the :mod:`time`
module) to retrieve time zone information, ``tzwinlocal`` retrieves the
rules directly from the Windows registry and creates an object like
:class:`dateutil.tz.tzwin`.
Because Windows does not have an equivalent of :func:`time.tzset`, on
Windows, :class:`dateutil.tz.tzlocal` instances will always reflect the
time zone settings *at the time that the process was started*, meaning
changes to the machine's time zone settings during the run of a program
on Windows will **not** be reflected by :class:`dateutil.tz.tzlocal`.
Because ``tzwinlocal`` reads the registry directly, it is unaffected by
this issue.
"""
def __init__(self):
with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
with winreg.OpenKey(handle, TZLOCALKEYNAME) as tzlocalkey:
keydict = valuestodict(tzlocalkey)
self._std_abbr = keydict["StandardName"]
self._dst_abbr = keydict["DaylightName"]
try:
tzkeyname = text_type('{kn}\\{sn}').format(kn=TZKEYNAME,
sn=self._std_abbr)
with winreg.OpenKey(handle, tzkeyname) as tzkey:
_keydict = valuestodict(tzkey)
self._display = _keydict["Display"]
except OSError:
self._display = None
stdoffset = -keydict["Bias"]-keydict["StandardBias"]
dstoffset = stdoffset-keydict["DaylightBias"]
self._std_offset = datetime.timedelta(minutes=stdoffset)
self._dst_offset = datetime.timedelta(minutes=dstoffset)
# For reasons unclear, in this particular key, the day of week has been
# moved to the END of the SYSTEMTIME structure.
tup = struct.unpack("=8h", keydict["StandardStart"])
(self._stdmonth,
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[1:5]
self._stddayofweek = tup[7]
tup = struct.unpack("=8h", keydict["DaylightStart"])
(self._dstmonth,
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[1:5]
self._dstdayofweek = tup[7]
self._dst_base_offset_ = self._dst_offset - self._std_offset
self.hasdst = self._get_hasdst()
def __repr__(self):
return "tzwinlocal()"
def __str__(self):
# str will return the standard name, not the daylight name.
return "tzwinlocal(%s)" % repr(self._std_abbr)
def __reduce__(self):
return (self.__class__, ())
def picknthweekday(year, month, dayofweek, hour, minute, whichweek):
""" dayofweek == 0 means Sunday, whichweek 5 means last instance """
first = datetime.datetime(year, month, 1, hour, minute)
# This will work if dayofweek is ISO weekday (1-7) or Microsoft-style (0-6),
# Because 7 % 7 = 0
weekdayone = first.replace(day=((dayofweek - first.isoweekday()) % 7) + 1)
wd = weekdayone + ((whichweek - 1) * ONEWEEK)
if (wd.month != month):
wd -= ONEWEEK
return wd
def valuestodict(key):
"""Convert a registry key's values to a dictionary."""
dout = {}
size = winreg.QueryInfoKey(key)[1]
tz_res = None
for i in range(size):
key_name, value, dtype = winreg.EnumValue(key, i)
if dtype == winreg.REG_DWORD or dtype == winreg.REG_DWORD_LITTLE_ENDIAN:
# If it's a DWORD (32-bit integer), it's stored as unsigned - convert
# that to a proper signed integer
if value & (1 << 31):
value = value - (1 << 32)
elif dtype == winreg.REG_SZ:
# If it's a reference to the tzres DLL, load the actual string
if value.startswith('@tzres'):
tz_res = tz_res or tzres()
value = tz_res.name_from_string(value)
value = value.rstrip('\x00') # Remove trailing nulls
dout[key_name] = value
return dout

View File

@@ -0,0 +1,2 @@
# tzwin has moved to dateutil.tz.win
from .tz.win import *

View File

@@ -0,0 +1,71 @@
# -*- coding: utf-8 -*-
"""
This module offers general convenience and utility functions for dealing with
datetimes.
.. versionadded:: 2.7.0
"""
from __future__ import unicode_literals
from datetime import datetime, time
def today(tzinfo=None):
"""
Returns a :py:class:`datetime` representing the current day at midnight
:param tzinfo:
The time zone to attach (also used to determine the current day).
:return:
A :py:class:`datetime.datetime` object representing the current day
at midnight.
"""
dt = datetime.now(tzinfo)
return datetime.combine(dt.date(), time(0, tzinfo=tzinfo))
def default_tzinfo(dt, tzinfo):
"""
Sets the ``tzinfo`` parameter on naive datetimes only
This is useful for example when you are provided a datetime that may have
either an implicit or explicit time zone, such as when parsing a time zone
string.
.. doctest::
>>> from dateutil.tz import tzoffset
>>> from dateutil.parser import parse
>>> from dateutil.utils import default_tzinfo
>>> dflt_tz = tzoffset("EST", -18000)
>>> print(default_tzinfo(parse('2014-01-01 12:30 UTC'), dflt_tz))
2014-01-01 12:30:00+00:00
>>> print(default_tzinfo(parse('2014-01-01 12:30'), dflt_tz))
2014-01-01 12:30:00-05:00
:param dt:
The datetime on which to replace the time zone
:param tzinfo:
The :py:class:`datetime.tzinfo` subclass instance to assign to
``dt`` if (and only if) it is naive.
:return:
Returns an aware :py:class:`datetime.datetime`.
"""
if dt.tzinfo is not None:
return dt
else:
return dt.replace(tzinfo=tzinfo)
def within_delta(dt1, dt2, delta):
"""
Useful for comparing two datetimes that may have a negligible difference
to be considered equal.
"""
delta = abs(delta)
difference = dt1 - dt2
return -delta <= difference <= delta

View File

@@ -0,0 +1,167 @@
# -*- coding: utf-8 -*-
import warnings
import json
from tarfile import TarFile
from pkgutil import get_data
from io import BytesIO
from dateutil.tz import tzfile as _tzfile
__all__ = ["get_zonefile_instance", "gettz", "gettz_db_metadata"]
ZONEFILENAME = "dateutil-zoneinfo.tar.gz"
METADATA_FN = 'METADATA'
class tzfile(_tzfile):
def __reduce__(self):
return (gettz, (self._filename,))
def getzoneinfofile_stream():
try:
return BytesIO(get_data(__name__, ZONEFILENAME))
except IOError as e: # TODO switch to FileNotFoundError?
warnings.warn("I/O error({0}): {1}".format(e.errno, e.strerror))
return None
class ZoneInfoFile(object):
def __init__(self, zonefile_stream=None):
if zonefile_stream is not None:
with TarFile.open(fileobj=zonefile_stream) as tf:
self.zones = {zf.name: tzfile(tf.extractfile(zf), filename=zf.name)
for zf in tf.getmembers()
if zf.isfile() and zf.name != METADATA_FN}
# deal with links: They'll point to their parent object. Less
# waste of memory
links = {zl.name: self.zones[zl.linkname]
for zl in tf.getmembers() if
zl.islnk() or zl.issym()}
self.zones.update(links)
try:
metadata_json = tf.extractfile(tf.getmember(METADATA_FN))
metadata_str = metadata_json.read().decode('UTF-8')
self.metadata = json.loads(metadata_str)
except KeyError:
# no metadata in tar file
self.metadata = None
else:
self.zones = {}
self.metadata = None
def get(self, name, default=None):
"""
Wrapper for :func:`ZoneInfoFile.zones.get`. This is a convenience method
for retrieving zones from the zone dictionary.
:param name:
The name of the zone to retrieve. (Generally IANA zone names)
:param default:
The value to return in the event of a missing key.
.. versionadded:: 2.6.0
"""
return self.zones.get(name, default)
# The current API has gettz as a module function, although in fact it taps into
# a stateful class. So as a workaround for now, without changing the API, we
# will create a new "global" class instance the first time a user requests a
# timezone. Ugly, but adheres to the api.
#
# TODO: Remove after deprecation period.
_CLASS_ZONE_INSTANCE = []
def get_zonefile_instance(new_instance=False):
"""
This is a convenience function which provides a :class:`ZoneInfoFile`
instance using the data provided by the ``dateutil`` package. By default, it
caches a single instance of the ZoneInfoFile object and returns that.
:param new_instance:
If ``True``, a new instance of :class:`ZoneInfoFile` is instantiated and
used as the cached instance for the next call. Otherwise, new instances
are created only as necessary.
:return:
Returns a :class:`ZoneInfoFile` object.
.. versionadded:: 2.6
"""
if new_instance:
zif = None
else:
zif = getattr(get_zonefile_instance, '_cached_instance', None)
if zif is None:
zif = ZoneInfoFile(getzoneinfofile_stream())
get_zonefile_instance._cached_instance = zif
return zif
def gettz(name):
"""
This retrieves a time zone from the local zoneinfo tarball that is packaged
with dateutil.
:param name:
An IANA-style time zone name, as found in the zoneinfo file.
:return:
Returns a :class:`dateutil.tz.tzfile` time zone object.
.. warning::
It is generally inadvisable to use this function, and it is only
provided for API compatibility with earlier versions. This is *not*
equivalent to ``dateutil.tz.gettz()``, which selects an appropriate
time zone based on the inputs, favoring system zoneinfo. This is ONLY
for accessing the dateutil-specific zoneinfo (which may be out of
date compared to the system zoneinfo).
.. deprecated:: 2.6
If you need to use a specific zoneinfofile over the system zoneinfo,
instantiate a :class:`dateutil.zoneinfo.ZoneInfoFile` object and call
:func:`dateutil.zoneinfo.ZoneInfoFile.get(name)` instead.
Use :func:`get_zonefile_instance` to retrieve an instance of the
dateutil-provided zoneinfo.
"""
warnings.warn("zoneinfo.gettz() will be removed in future versions, "
"to use the dateutil-provided zoneinfo files, instantiate a "
"ZoneInfoFile object and use ZoneInfoFile.zones.get() "
"instead. See the documentation for details.",
DeprecationWarning)
if len(_CLASS_ZONE_INSTANCE) == 0:
_CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))
return _CLASS_ZONE_INSTANCE[0].zones.get(name)
def gettz_db_metadata():
""" Get the zonefile metadata
See `zonefile_metadata`_
:returns:
A dictionary with the database metadata
.. deprecated:: 2.6
See deprecation warning in :func:`zoneinfo.gettz`. To get metadata,
query the attribute ``zoneinfo.ZoneInfoFile.metadata``.
"""
warnings.warn("zoneinfo.gettz_db_metadata() will be removed in future "
"versions, to use the dateutil-provided zoneinfo files, "
"ZoneInfoFile object and query the 'metadata' attribute "
"instead. See the documentation for details.",
DeprecationWarning)
if len(_CLASS_ZONE_INSTANCE) == 0:
_CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))
return _CLASS_ZONE_INSTANCE[0].metadata

View File

@@ -0,0 +1,75 @@
import logging
import os
import tempfile
import shutil
import json
from subprocess import check_call, check_output
from tarfile import TarFile
from dateutil.zoneinfo import METADATA_FN, ZONEFILENAME
def rebuild(filename, tag=None, format="gz", zonegroups=[], metadata=None):
"""Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar*
filename is the timezone tarball from ``ftp.iana.org/tz``.
"""
tmpdir = tempfile.mkdtemp()
zonedir = os.path.join(tmpdir, "zoneinfo")
moduledir = os.path.dirname(__file__)
try:
with TarFile.open(filename) as tf:
for name in zonegroups:
tf.extract(name, tmpdir)
filepaths = [os.path.join(tmpdir, n) for n in zonegroups]
_run_zic(zonedir, filepaths)
# write metadata file
with open(os.path.join(zonedir, METADATA_FN), 'w') as f:
json.dump(metadata, f, indent=4, sort_keys=True)
target = os.path.join(moduledir, ZONEFILENAME)
with TarFile.open(target, "w:%s" % format) as tf:
for entry in os.listdir(zonedir):
entrypath = os.path.join(zonedir, entry)
tf.add(entrypath, entry)
finally:
shutil.rmtree(tmpdir)
def _run_zic(zonedir, filepaths):
"""Calls the ``zic`` compiler in a compatible way to get a "fat" binary.
Recent versions of ``zic`` default to ``-b slim``, while older versions
don't even have the ``-b`` option (but default to "fat" binaries). The
current version of dateutil does not support Version 2+ TZif files, which
causes problems when used in conjunction with "slim" binaries, so this
function is used to ensure that we always get a "fat" binary.
"""
try:
help_text = check_output(["zic", "--help"])
except OSError as e:
_print_on_nosuchfile(e)
raise
if b"-b " in help_text:
bloat_args = ["-b", "fat"]
else:
bloat_args = []
check_call(["zic"] + bloat_args + ["-d", zonedir] + filepaths)
def _print_on_nosuchfile(e):
"""Print helpful troubleshooting message
e is an exception raised by subprocess.check_call()
"""
if e.errno == 2:
logging.error(
"Could not find zic. Perhaps you need to install "
"libc-bin or some other package that provides it, "
"or it's not in your PATH?")

View File

@@ -0,0 +1,451 @@
# ######################### LICENSE ############################ #
# Copyright (c) 2005-2021, Michele Simionato
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in bytecode form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""
Decorator module, see
https://github.com/micheles/decorator/blob/master/docs/documentation.md
for the documentation.
"""
import re
import sys
import inspect
import operator
import itertools
from contextlib import _GeneratorContextManager
from inspect import getfullargspec, iscoroutinefunction, isgeneratorfunction
__version__ = '5.1.1'
DEF = re.compile(r'\s*def\s*([_\w][_\w\d]*)\s*\(')
POS = inspect.Parameter.POSITIONAL_OR_KEYWORD
EMPTY = inspect.Parameter.empty
# this is not used anymore in the core, but kept for backward compatibility
class FunctionMaker(object):
"""
An object with the ability to create functions with a given signature.
It has attributes name, doc, module, signature, defaults, dict and
methods update and make.
"""
# Atomic get-and-increment provided by the GIL
_compile_count = itertools.count()
# make pylint happy
args = varargs = varkw = defaults = kwonlyargs = kwonlydefaults = ()
def __init__(self, func=None, name=None, signature=None,
defaults=None, doc=None, module=None, funcdict=None):
self.shortsignature = signature
if func:
# func can be a class or a callable, but not an instance method
self.name = func.__name__
if self.name == '<lambda>': # small hack for lambda functions
self.name = '_lambda_'
self.doc = func.__doc__
self.module = func.__module__
if inspect.isroutine(func):
argspec = getfullargspec(func)
self.annotations = getattr(func, '__annotations__', {})
for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',
'kwonlydefaults'):
setattr(self, a, getattr(argspec, a))
for i, arg in enumerate(self.args):
setattr(self, 'arg%d' % i, arg)
allargs = list(self.args)
allshortargs = list(self.args)
if self.varargs:
allargs.append('*' + self.varargs)
allshortargs.append('*' + self.varargs)
elif self.kwonlyargs:
allargs.append('*') # single star syntax
for a in self.kwonlyargs:
allargs.append('%s=None' % a)
allshortargs.append('%s=%s' % (a, a))
if self.varkw:
allargs.append('**' + self.varkw)
allshortargs.append('**' + self.varkw)
self.signature = ', '.join(allargs)
self.shortsignature = ', '.join(allshortargs)
self.dict = func.__dict__.copy()
# func=None happens when decorating a caller
if name:
self.name = name
if signature is not None:
self.signature = signature
if defaults:
self.defaults = defaults
if doc:
self.doc = doc
if module:
self.module = module
if funcdict:
self.dict = funcdict
# check existence required attributes
assert hasattr(self, 'name')
if not hasattr(self, 'signature'):
raise TypeError('You are decorating a non function: %s' % func)
def update(self, func, **kw):
"""
Update the signature of func with the data in self
"""
func.__name__ = self.name
func.__doc__ = getattr(self, 'doc', None)
func.__dict__ = getattr(self, 'dict', {})
func.__defaults__ = self.defaults
func.__kwdefaults__ = self.kwonlydefaults or None
func.__annotations__ = getattr(self, 'annotations', None)
try:
frame = sys._getframe(3)
except AttributeError: # for IronPython and similar implementations
callermodule = '?'
else:
callermodule = frame.f_globals.get('__name__', '?')
func.__module__ = getattr(self, 'module', callermodule)
func.__dict__.update(kw)
def make(self, src_templ, evaldict=None, addsource=False, **attrs):
"""
Make a new function from a given template and update the signature
"""
src = src_templ % vars(self) # expand name and signature
evaldict = evaldict or {}
mo = DEF.search(src)
if mo is None:
raise SyntaxError('not a valid function template\n%s' % src)
name = mo.group(1) # extract the function name
names = set([name] + [arg.strip(' *') for arg in
self.shortsignature.split(',')])
for n in names:
if n in ('_func_', '_call_'):
raise NameError('%s is overridden in\n%s' % (n, src))
if not src.endswith('\n'): # add a newline for old Pythons
src += '\n'
# Ensure each generated function has a unique filename for profilers
# (such as cProfile) that depend on the tuple of (<filename>,
# <definition line>, <function name>) being unique.
filename = '<decorator-gen-%d>' % next(self._compile_count)
try:
code = compile(src, filename, 'single')
exec(code, evaldict)
except Exception:
print('Error in generated code:', file=sys.stderr)
print(src, file=sys.stderr)
raise
func = evaldict[name]
if addsource:
attrs['__source__'] = src
self.update(func, **attrs)
return func
@classmethod
def create(cls, obj, body, evaldict, defaults=None,
doc=None, module=None, addsource=True, **attrs):
"""
Create a function from the strings name, signature and body.
evaldict is the evaluation dictionary. If addsource is true an
attribute __source__ is added to the result. The attributes attrs
are added, if any.
"""
if isinstance(obj, str): # "name(signature)"
name, rest = obj.strip().split('(', 1)
signature = rest[:-1] # strip a right parens
func = None
else: # a function
name = None
signature = None
func = obj
self = cls(func, name, signature, defaults, doc, module)
ibody = '\n'.join(' ' + line for line in body.splitlines())
caller = evaldict.get('_call_') # when called from `decorate`
if caller and iscoroutinefunction(caller):
body = ('async def %(name)s(%(signature)s):\n' + ibody).replace(
'return', 'return await')
else:
body = 'def %(name)s(%(signature)s):\n' + ibody
return self.make(body, evaldict, addsource, **attrs)
def fix(args, kwargs, sig):
"""
Fix args and kwargs to be consistent with the signature
"""
ba = sig.bind(*args, **kwargs)
ba.apply_defaults() # needed for test_dan_schult
return ba.args, ba.kwargs
def decorate(func, caller, extras=(), kwsyntax=False):
"""
Decorates a function/generator/coroutine using a caller.
If kwsyntax is True calling the decorated functions with keyword
syntax will pass the named arguments inside the ``kw`` dictionary,
even if such argument are positional, similarly to what functools.wraps
does. By default kwsyntax is False and the the arguments are untouched.
"""
sig = inspect.signature(func)
if iscoroutinefunction(caller):
async def fun(*args, **kw):
if not kwsyntax:
args, kw = fix(args, kw, sig)
return await caller(func, *(extras + args), **kw)
elif isgeneratorfunction(caller):
def fun(*args, **kw):
if not kwsyntax:
args, kw = fix(args, kw, sig)
for res in caller(func, *(extras + args), **kw):
yield res
else:
def fun(*args, **kw):
if not kwsyntax:
args, kw = fix(args, kw, sig)
return caller(func, *(extras + args), **kw)
fun.__name__ = func.__name__
fun.__doc__ = func.__doc__
fun.__wrapped__ = func
fun.__signature__ = sig
fun.__qualname__ = func.__qualname__
# builtin functions like defaultdict.__setitem__ lack many attributes
try:
fun.__defaults__ = func.__defaults__
except AttributeError:
pass
try:
fun.__kwdefaults__ = func.__kwdefaults__
except AttributeError:
pass
try:
fun.__annotations__ = func.__annotations__
except AttributeError:
pass
try:
fun.__module__ = func.__module__
except AttributeError:
pass
try:
fun.__dict__.update(func.__dict__)
except AttributeError:
pass
return fun
def decoratorx(caller):
"""
A version of "decorator" implemented via "exec" and not via the
Signature object. Use this if you are want to preserve the `.__code__`
object properties (https://github.com/micheles/decorator/issues/129).
"""
def dec(func):
return FunctionMaker.create(
func,
"return _call_(_func_, %(shortsignature)s)",
dict(_call_=caller, _func_=func),
__wrapped__=func, __qualname__=func.__qualname__)
return dec
def decorator(caller, _func=None, kwsyntax=False):
"""
decorator(caller) converts a caller function into a decorator
"""
if _func is not None: # return a decorated function
# this is obsolete behavior; you should use decorate instead
return decorate(_func, caller, (), kwsyntax)
# else return a decorator function
sig = inspect.signature(caller)
dec_params = [p for p in sig.parameters.values() if p.kind is POS]
def dec(func=None, *args, **kw):
na = len(args) + 1
extras = args + tuple(kw.get(p.name, p.default)
for p in dec_params[na:]
if p.default is not EMPTY)
if func is None:
return lambda func: decorate(func, caller, extras, kwsyntax)
else:
return decorate(func, caller, extras, kwsyntax)
dec.__signature__ = sig.replace(parameters=dec_params)
dec.__name__ = caller.__name__
dec.__doc__ = caller.__doc__
dec.__wrapped__ = caller
dec.__qualname__ = caller.__qualname__
dec.__kwdefaults__ = getattr(caller, '__kwdefaults__', None)
dec.__dict__.update(caller.__dict__)
return dec
# ####################### contextmanager ####################### #
class ContextManager(_GeneratorContextManager):
def __init__(self, g, *a, **k):
_GeneratorContextManager.__init__(self, g, a, k)
def __call__(self, func):
def caller(f, *a, **k):
with self.__class__(self.func, *self.args, **self.kwds):
return f(*a, **k)
return decorate(func, caller)
_contextmanager = decorator(ContextManager)
def contextmanager(func):
# Enable Pylint config: contextmanager-decorators=decorator.contextmanager
return _contextmanager(func)
# ############################ dispatch_on ############################ #
def append(a, vancestors):
"""
Append ``a`` to the list of the virtual ancestors, unless it is already
included.
"""
add = True
for j, va in enumerate(vancestors):
if issubclass(va, a):
add = False
break
if issubclass(a, va):
vancestors[j] = a
add = False
if add:
vancestors.append(a)
# inspired from simplegeneric by P.J. Eby and functools.singledispatch
def dispatch_on(*dispatch_args):
"""
Factory of decorators turning a function into a generic function
dispatching on the given arguments.
"""
assert dispatch_args, 'No dispatch args passed'
dispatch_str = '(%s,)' % ', '.join(dispatch_args)
def check(arguments, wrong=operator.ne, msg=''):
"""Make sure one passes the expected number of arguments"""
if wrong(len(arguments), len(dispatch_args)):
raise TypeError('Expected %d arguments, got %d%s' %
(len(dispatch_args), len(arguments), msg))
def gen_func_dec(func):
"""Decorator turning a function into a generic function"""
# first check the dispatch arguments
argset = set(getfullargspec(func).args)
if not set(dispatch_args) <= argset:
raise NameError('Unknown dispatch arguments %s' % dispatch_str)
typemap = {}
def vancestors(*types):
"""
Get a list of sets of virtual ancestors for the given types
"""
check(types)
ras = [[] for _ in range(len(dispatch_args))]
for types_ in typemap:
for t, type_, ra in zip(types, types_, ras):
if issubclass(t, type_) and type_ not in t.mro():
append(type_, ra)
return [set(ra) for ra in ras]
def ancestors(*types):
"""
Get a list of virtual MROs, one for each type
"""
check(types)
lists = []
for t, vas in zip(types, vancestors(*types)):
n_vas = len(vas)
if n_vas > 1:
raise RuntimeError(
'Ambiguous dispatch for %s: %s' % (t, vas))
elif n_vas == 1:
va, = vas
mro = type('t', (t, va), {}).mro()[1:]
else:
mro = t.mro()
lists.append(mro[:-1]) # discard t and object
return lists
def register(*types):
"""
Decorator to register an implementation for the given types
"""
check(types)
def dec(f):
check(getfullargspec(f).args, operator.lt, ' in ' + f.__name__)
typemap[types] = f
return f
return dec
def dispatch_info(*types):
"""
An utility to introspect the dispatch algorithm
"""
check(types)
lst = []
for anc in itertools.product(*ancestors(*types)):
lst.append(tuple(a.__name__ for a in anc))
return lst
def _dispatch(dispatch_args, *args, **kw):
types = tuple(type(arg) for arg in dispatch_args)
try: # fast path
f = typemap[types]
except KeyError:
pass
else:
return f(*args, **kw)
combinations = itertools.product(*ancestors(*types))
next(combinations) # the first one has been already tried
for types_ in combinations:
f = typemap.get(types_)
if f is not None:
return f(*args, **kw)
# else call the default implementation
return func(*args, **kw)
return FunctionMaker.create(
func, 'return _f_(%s, %%(shortsignature)s)' % dispatch_str,
dict(_f_=_dispatch), register=register, default=func,
typemap=typemap, vancestors=vancestors, ancestors=ancestors,
dispatch_info=dispatch_info, __wrapped__=func)
gen_func_dec.__name__ = 'dispatch_on' + dispatch_str
return gen_func_dec

View File

@@ -0,0 +1,137 @@
# -*- coding: utf-8 -*-
"""
=======
feedgen
=======
This module can be used to generate web feeds in both ATOM and RSS format.
It has support for extensions. Included is for example an extension to
produce Podcasts.
:copyright: 2013 by Lars Kiesow
:license: FreeBSD and LGPL, see license.* for more details.
-------------
Create a Feed
-------------
To create a feed simply instantiate the FeedGenerator class and insert some
data::
>>> from feedgen.feed import FeedGenerator
>>> fg = FeedGenerator()
>>> fg.id('http://lernfunk.de/media/654321')
>>> fg.title('Some Testfeed')
>>> fg.author( {'name':'John Doe','email':'john@example.de'} )
>>> fg.link( href='http://example.com', rel='alternate' )
>>> fg.logo('http://ex.com/logo.jpg')
>>> fg.subtitle('This is a cool feed!')
>>> fg.link( href='http://larskiesow.de/test.atom', rel='self' )
>>> fg.language('en')
Note that for the methods which set fields that can occur more than once in
a feed you can use all of the following ways to provide data:
- Provide the data for that element as keyword arguments
- Provide the data for that element as dictionary
- Provide a list of dictionaries with the data for several elements
Example::
>>> fg.contributor(name='John Doe', email='jdoe@example.com' )
>>> fg.contributor({'name':'John Doe', 'email':'jdoe@example.com'})
>>> fg.contributor([{'name':'John', 'email':'jdoe@example.com'}, …])
-----------------
Generate the Feed
-----------------
After that you can generate both RSS or ATOM by calling the respective
method::
>>> atomfeed = fg.atom_str(pretty=True) # Get the ATOM feed as string
>>> rssfeed = fg.rss_str(pretty=True) # Get the RSS feed as string
>>> fg.atom_file('atom.xml') # Write the ATOM feed to a file
>>> fg.rss_file('rss.xml') # Write the RSS feed to a file
----------------
Add Feed Entries
----------------
To add entries (items) to a feed you need to create new FeedEntry objects
and append them to the list of entries in the FeedGenerator. The most
convenient way to go is to use the FeedGenerator itself for the
instantiation of the FeedEntry object::
>>> fe = fg.add_entry()
>>> fe.id('http://lernfunk.de/media/654321/1')
>>> fe.title('The First Episode')
The FeedGenerators method add_entry(...) without argument provides will
automatically generate a new FeedEntry object, append it to the feeds
internal list of entries and return it, so that additional data can be
added.
----------
Extensions
----------
The FeedGenerator supports extension to include additional data into the
XML structure of the feeds. Extensions can be loaded like this::
>>> fg.load_extension('someext', atom=True, rss=True)
This will try to load the extension “someext” from the file
`ext/someext.py`. It is required that `someext.py` contains a class named
“SomextExtension” which is required to have at least the two methods
`extend_rss(...)` and `extend_atom(...)`. Although not required, it is
strongly suggested to use BaseExtension from `ext/base.py` as superclass.
`load_extension('someext', ...)` will also try to load a class named
“SomextEntryExtension” for every entry of the feed. This class can be
located either in the same file as SomextExtension or in
`ext/someext_entry.py` which is suggested especially for large extensions.
The parameters `atom` and `rss` tell the FeedGenerator if the extensions
should only be used for either ATOM or RSS feeds. The default value for
both parameters is true which means that the extension would be used for
both kinds of feeds.
**Example: Producing a Podcast**
One extension already provided is the podcast extension. A podcast is an
RSS feed with some additional elements for ITunes.
To produce a podcast simply load the `podcast` extension::
>>> from feedgen.feed import FeedGenerator
>>> fg = FeedGenerator()
>>> fg.load_extension('podcast')
...
>>> fg.podcast.itunes_category('Technology', 'Podcasting')
...
>>> fg.rss_str(pretty=True)
>>> fg.rss_file('podcast.xml')
Of cause the extension has to be loaded for the FeedEntry objects as well
but this is done automatically by the FeedGenerator for every feed entry if
the extension is loaded for the whole feed. You can, however, load an
extension for a specific FeedEntry by calling `load_extension(...)` on that
entry. But this is a rather uncommon use.
Of cause you can still produce a normal ATOM or RSS feed, even if you have
loaded some plugins by temporary disabling them during the feed generation.
This can be done by calling the generating method with the keyword argument
`extensions` set to `False`.
---------------------
Testing the Generator
---------------------
You can test the module by simply executing::
$ python -m feedgen
"""

View File

@@ -0,0 +1,145 @@
# -*- coding: utf-8 -*-
'''
feedgen
~~~~~~~
:copyright: 2013-2016, Lars Kiesow <lkiesow@uos.de>
:license: FreeBSD and LGPL, see license.* for more details.
'''
import sys
from feedgen.feed import FeedGenerator
USAGE = '''
Usage: python -m feedgen [OPTION]
Use one of the following options:
File options:
<file>.atom -- Generate ATOM test feed
<file>.rss -- Generate RSS test teed
Stdout options:
atom -- Generate ATOM test output
rss -- Generate RSS test output
podcast -- Generate Podcast test output
dc.atom -- Generate DC extension test output (atom format)
dc.rss -- Generate DC extension test output (rss format)
syndication.atom -- Generate syndication extension test output (atom format)
syndication.rss -- Generate syndication extension test output (rss format)
torrent -- Generate Torrent test output
'''
def print_enc(s):
'''Print function compatible with both python2 and python3 accepting strings
and byte arrays.
'''
if sys.version_info[0] >= 3:
print(s.decode('utf-8') if isinstance(s, bytes) else s)
else:
print(s)
def main():
if len(sys.argv) != 2 or not (
sys.argv[1].endswith('rss') or
sys.argv[1].endswith('atom') or
sys.argv[1] == 'torrent' or
sys.argv[1] == 'podcast'):
print(USAGE)
exit()
arg = sys.argv[1]
fg = FeedGenerator()
fg.id('http://lernfunk.de/_MEDIAID_123')
fg.title('Testfeed')
fg.author({'name': 'Lars Kiesow', 'email': 'lkiesow@uos.de'})
fg.link(href='http://example.com', rel='alternate')
fg.category(term='test')
fg.contributor(name='Lars Kiesow', email='lkiesow@uos.de')
fg.contributor(name='John Doe', email='jdoe@example.com')
fg.icon('http://ex.com/icon.jpg')
fg.logo('http://ex.com/logo.jpg')
fg.rights('cc-by')
fg.subtitle('This is a cool feed!')
fg.link(href='http://larskiesow.de/test.atom', rel='self')
fg.language('de')
fe = fg.add_entry()
fe.id('http://lernfunk.de/_MEDIAID_123#1')
fe.title('First Element')
fe.content('''Lorem ipsum dolor sit amet, consectetur adipiscing elit. Tamen
aberramus a proposito, et, ne longius, prorsus, inquam, Piso, si
ista mala sunt, placet. Aut etiam, ut vestitum, sic sententiam
habeas aliam domesticam, aliam forensem, ut in fronte ostentatio
sit, intus veritas occultetur? Cum id fugiunt, re eadem defendunt,
quae Peripatetici, verba.''')
fe.summary(u'Lorem ipsum dolor sit amet, consectetur adipiscing elit…')
fe.link(href='http://example.com', rel='alternate')
fe.author(name='Lars Kiesow', email='lkiesow@uos.de')
if arg == 'atom':
print_enc(fg.atom_str(pretty=True))
elif arg == 'rss':
print_enc(fg.rss_str(pretty=True))
elif arg == 'podcast':
# Load the podcast extension. It will automatically be loaded for all
# entries in the feed, too. Thus also for our “fe”.
fg.load_extension('podcast')
fg.podcast.itunes_author('Lars Kiesow')
fg.podcast.itunes_category('Technology', 'Podcasting')
fg.podcast.itunes_explicit('no')
fg.podcast.itunes_complete('no')
fg.podcast.itunes_new_feed_url('http://example.com/new-feed.rss')
fg.podcast.itunes_owner('John Doe', 'john@example.com')
fg.podcast.itunes_summary('Lorem ipsum dolor sit amet, consectetur ' +
'adipiscing elit. Verba tu fingas et ea ' +
'dicas, quae non sentias?')
fe.podcast.itunes_author('Lars Kiesow')
print_enc(fg.rss_str(pretty=True))
elif arg == 'torrent':
fg.load_extension('torrent')
fe.link(href='http://example.com/torrent/debian-8-netint.iso.torrent',
rel='alternate',
type='application/x-bittorrent, length=1000')
fe.torrent.filename('debian-8.4.0-i386-netint.iso.torrent')
fe.torrent.infohash('7661229811ef32014879ceedcdf4a48f256c88ba')
fe.torrent.contentlength('331350016')
fe.torrent.seeds('789')
fe.torrent.peers('456')
fe.torrent.verified('123')
print_enc(fg.rss_str(pretty=True))
elif arg.startswith('dc.'):
fg.load_extension('dc')
fg.dc.dc_contributor('Lars Kiesow')
if arg.endswith('.atom'):
print_enc(fg.atom_str(pretty=True))
else:
print_enc(fg.rss_str(pretty=True))
elif arg.startswith('syndication'):
fg.load_extension('syndication')
fg.syndication.update_period('daily')
fg.syndication.update_frequency(2)
fg.syndication.update_base('2000-01-01T12:00+00:00')
if arg.endswith('.rss'):
print_enc(fg.rss_str(pretty=True))
else:
print_enc(fg.atom_str(pretty=True))
elif arg.endswith('atom'):
fg.atom_file(arg)
elif arg.endswith('rss'):
fg.rss_file(arg)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,8 @@
# -*- coding: utf-8 -*-
import sys
if sys.version_info[0] >= 3:
string_types = str
else:
string_types = basestring # noqa: F821

View File

@@ -0,0 +1,738 @@
# -*- coding: utf-8 -*-
'''
feedgen.entry
~~~~~~~~~~~~~
:copyright: 2013-2020, Lars Kiesow <lkiesow@uos.de>
:license: FreeBSD and LGPL, see license.* for more details.
'''
from datetime import datetime
from ..dateutil import parser as dateutil_parser
from ..dateutil import tz as dateutil_tz
import warnings
from lxml.etree import CDATA # nosec - adding CDATA entry is safe
from .compat import string_types
from .util import ensure_format, formatRFC2822, xml_fromstring, xml_elem
def _add_text_elm(entry, data, name):
"""Add a text subelement to an entry"""
if not data:
return
elm = xml_elem(name, entry)
type_ = data.get('type')
if data.get('src'):
if name != 'content':
raise ValueError("Only the 'content' element of an entry can "
"contain a 'src' attribute")
elm.attrib['src'] = data['src']
elif data.get(name):
# Surround xhtml with a div tag, parse it and embed it
if type_ == 'xhtml':
xhtml = '<div xmlns="http://www.w3.org/1999/xhtml">' \
+ data.get(name) + '</div>'
elm.append(xml_fromstring(xhtml))
elif type_ == 'CDATA':
elm.text = CDATA(data.get(name))
# Parse XML and embed it
elif type_ and (type_.endswith('/xml') or type_.endswith('+xml')):
elm.append(xml_fromstring(data[name]))
# Embed the text in escaped form
elif not type_ or type_.startswith('text') or type_ == 'html':
elm.text = data.get(name)
# Everything else should be included base64 encoded
else:
raise NotImplementedError(
'base64 encoded {} is not supported at the moment. '
'Pull requests adding support are welcome.'.format(name)
)
# Add type description of the content
if type_:
elm.attrib['type'] = type_
class FeedEntry(object):
'''FeedEntry call representing an ATOM feeds entry node or an RSS feeds item
node.
'''
def __init__(self):
# ATOM
# required
self.__atom_id = None
self.__atom_title = None
self.__atom_updated = datetime.now(dateutil_tz.tzutc())
# recommended
self.__atom_author = None
self.__atom_content = None
self.__atom_link = None
self.__atom_summary = None
# optional
self.__atom_category = None
self.__atom_contributor = None
self.__atom_published = None
self.__atom_source = None
self.__atom_rights = None
# RSS
self.__rss_author = None
self.__rss_category = None
self.__rss_comments = None
self.__rss_description = None
self.__rss_content = None
self.__rss_enclosure = None
self.__rss_guid = {}
self.__rss_link = None
self.__rss_pubDate = None
self.__rss_source = None
self.__rss_title = None
# Extension list:
self.__extensions = {}
self.__extensions_register = {}
def atom_entry(self, extensions=True):
'''Create an ATOM entry and return it.'''
entry = xml_elem('entry')
if not (self.__atom_id and self.__atom_title and self.__atom_updated):
raise ValueError('Required fields not set')
id = xml_elem('id', entry)
id.text = self.__atom_id
title = xml_elem('title', entry)
title.text = self.__atom_title
updated = xml_elem('updated', entry)
updated.text = self.__atom_updated.isoformat()
# An entry must contain an alternate link if there is no content
# element.
if not self.__atom_content:
links = self.__atom_link or []
if not [l for l in links if l.get('rel') == 'alternate']:
raise ValueError('Entry must contain an alternate link or ' +
'a content element.')
# Add author elements
for a in self.__atom_author or []:
# Atom requires a name. Skip elements without.
if not a.get('name'):
continue
author = xml_elem('author', entry)
name = xml_elem('name', author)
name.text = a.get('name')
if a.get('email'):
email = xml_elem('email', author)
email.text = a.get('email')
if a.get('uri'):
uri = xml_elem('uri', author)
uri.text = a.get('uri')
_add_text_elm(entry, self.__atom_content, 'content')
for l in self.__atom_link or []:
link = xml_elem('link', entry, href=l['href'])
if l.get('rel'):
link.attrib['rel'] = l['rel']
if l.get('type'):
link.attrib['type'] = l['type']
if l.get('hreflang'):
link.attrib['hreflang'] = l['hreflang']
if l.get('title'):
link.attrib['title'] = l['title']
if l.get('length'):
link.attrib['length'] = l['length']
_add_text_elm(entry, self.__atom_summary, 'summary')
for c in self.__atom_category or []:
cat = xml_elem('category', entry, term=c['term'])
if c.get('scheme'):
cat.attrib['scheme'] = c['scheme']
if c.get('label'):
cat.attrib['label'] = c['label']
# Add author elements
for c in self.__atom_contributor or []:
# Atom requires a name. Skip elements without.
if not c.get('name'):
continue
contrib = xml_elem('contributor', entry)
name = xml_elem('name', contrib)
name.text = c.get('name')
if c.get('email'):
email = xml_elem('email', contrib)
email.text = c.get('email')
if c.get('uri'):
uri = xml_elem('uri', contrib)
uri.text = c.get('uri')
if self.__atom_published:
published = xml_elem('published', entry)
published.text = self.__atom_published.isoformat()
if self.__atom_rights:
rights = xml_elem('rights', entry)
rights.text = self.__atom_rights
if self.__atom_source:
source = xml_elem('source', entry)
if self.__atom_source.get('title'):
source_title = xml_elem('title', source)
source_title.text = self.__atom_source['title']
if self.__atom_source.get('link'):
xml_elem('link', source, href=self.__atom_source['link'])
if extensions:
for ext in self.__extensions.values() or []:
if ext.get('atom'):
ext['inst'].extend_atom(entry)
return entry
def rss_entry(self, extensions=True):
'''Create a RSS item and return it.'''
entry = xml_elem('item')
if not (self.__rss_title or
self.__rss_description or
self.__rss_content):
raise ValueError('Required fields not set')
if self.__rss_title:
title = xml_elem('title', entry)
title.text = self.__rss_title
if self.__rss_link:
link = xml_elem('link', entry)
link.text = self.__rss_link
if self.__rss_description and self.__rss_content:
description = xml_elem('description', entry)
description.text = self.__rss_description
XMLNS_CONTENT = 'http://purl.org/rss/1.0/modules/content/'
content = xml_elem('{%s}encoded' % XMLNS_CONTENT, entry)
content.text = CDATA(self.__rss_content['content']) \
if self.__rss_content.get('type', '') == 'CDATA' \
else self.__rss_content['content']
elif self.__rss_description:
description = xml_elem('description', entry)
description.text = self.__rss_description
elif self.__rss_content:
description = xml_elem('description', entry)
description.text = CDATA(self.__rss_content['content']) \
if self.__rss_content.get('type', '') == 'CDATA' \
else self.__rss_content['content']
for a in self.__rss_author or []:
author = xml_elem('author', entry)
author.text = a
if self.__rss_guid.get('guid'):
guid = xml_elem('guid', entry)
guid.text = self.__rss_guid['guid']
permaLink = str(self.__rss_guid.get('permalink', False)).lower()
guid.attrib['isPermaLink'] = permaLink
for cat in self.__rss_category or []:
category = xml_elem('category', entry)
category.text = cat['value']
if cat.get('domain'):
category.attrib['domain'] = cat['domain']
if self.__rss_comments:
comments = xml_elem('comments', entry)
comments.text = self.__rss_comments
if self.__rss_enclosure:
enclosure = xml_elem('enclosure', entry)
enclosure.attrib['url'] = self.__rss_enclosure['url']
enclosure.attrib['length'] = self.__rss_enclosure['length']
enclosure.attrib['type'] = self.__rss_enclosure['type']
if self.__rss_pubDate:
pubDate = xml_elem('pubDate', entry)
pubDate.text = formatRFC2822(self.__rss_pubDate)
if self.__rss_source:
source = xml_elem('source', entry, url=self.__rss_source['url'])
source.text = self.__rss_source['title']
if extensions:
for ext in self.__extensions.values() or []:
if ext.get('rss'):
ext['inst'].extend_rss(entry)
return entry
def title(self, title=None):
'''Get or set the title value of the entry. It should contain a human
readable title for the entry. Title is mandatory for both ATOM and RSS
and should not be blank.
:param title: The new title of the entry.
:returns: The entriess title.
'''
if title is not None:
self.__atom_title = title
self.__rss_title = title
return self.__atom_title
def id(self, id=None):
'''Get or set the entry id which identifies the entry using a
universally unique and permanent URI. Two entries in a feed can have
the same value for id if they represent the same entry at different
points in time. This method will also set rss:guid with permalink set
to False. Id is mandatory for an ATOM entry.
:param id: New Id of the entry.
:returns: Id of the entry.
'''
if id is not None:
self.__atom_id = id
self.__rss_guid = {'guid': id, 'permalink': False}
return self.__atom_id
def guid(self, guid=None, permalink=False):
'''Get or set the entries guid which is a string that uniquely
identifies the item. This will also set atom:id.
:param guid: Id of the entry.
:param permalink: If this is a permanent identifier for this item
:returns: Id and permalink setting of the entry.
'''
if guid is not None:
self.__atom_id = guid
self.__rss_guid = {'guid': guid, 'permalink': permalink}
return self.__rss_guid
def updated(self, updated=None):
'''Set or get the updated value which indicates the last time the entry
was modified in a significant way.
The value can either be a string which will automatically be parsed or
a datetime.datetime object. In any case it is necessary that the value
include timezone information.
:param updated: The modification date.
:returns: Modification date as datetime.datetime
'''
if updated is not None:
if isinstance(updated, string_types):
updated = dateutil_parser.parse(updated)
if not isinstance(updated, datetime):
raise ValueError('Invalid datetime format')
if updated.tzinfo is None:
raise ValueError('Datetime object has no timezone info')
self.__atom_updated = updated
self.__rss_lastBuildDate = updated
return self.__atom_updated
def author(self, author=None, replace=False, **kwargs):
'''Get or set author data. An author element is a dict containing a
name, an email address and a uri. Name is mandatory for ATOM, email is
mandatory for RSS.
This method can be called with:
- the fields of an author as keyword arguments
- the fields of an author as a dictionary
- a list of dictionaries containing the author fields
An author has the following fields:
- *name* conveys a human-readable name for the person.
- *uri* contains a home page for the person.
- *email* contains an email address for the person.
:param author: Dict or list of dicts with author data.
:param replace: Add or replace old data.
Example::
>>> author({'name':'John Doe', 'email':'jdoe@example.com'})
[{'name':'John Doe','email':'jdoe@example.com'}]
>>> author([{'name': 'Mr. X'}, {'name': 'Max'}])
[{'name':'John Doe','email':'jdoe@example.com'},
{'name':'John Doe'}, {'name':'Max'}]
>>> author(name='John Doe', email='jdoe@example.com', replace=True)
[{'name':'John Doe','email':'jdoe@example.com'}]
'''
if author is None and kwargs:
author = kwargs
if author is not None:
if replace or self.__atom_author is None:
self.__atom_author = []
self.__atom_author += ensure_format(author,
set(['name', 'email', 'uri']),
set())
self.__rss_author = []
for a in self.__atom_author:
if a.get('email'):
if a.get('name'):
self.__rss_author.append('%(email)s (%(name)s)' % a)
else:
self.__rss_author.append('%(email)s' % a)
return self.__atom_author
def content(self, content=None, src=None, type=None):
'''Get or set the content of the entry which contains or links to the
complete content of the entry. Content must be provided for ATOM
entries if there is no alternate link, and should be provided if there
is no summary. If the content is set (not linked) it will also set
rss:description.
:param content: The content of the feed entry.
:param src: Link to the entries content.
:param type: If type is CDATA content would not be escaped.
:returns: Content element of the entry.
'''
if src is not None:
self.__atom_content = {'src': src}
elif content is not None:
self.__atom_content = {'content': content}
self.__rss_content = {'content': content}
if type is not None:
self.__atom_content['type'] = type
self.__rss_content['type'] = type
return self.__atom_content
def link(self, link=None, replace=False, **kwargs):
'''Get or set link data. An link element is a dict with the fields
href, rel, type, hreflang, title, and length. Href is mandatory for
ATOM.
This method can be called with:
- the fields of a link as keyword arguments
- the fields of a link as a dictionary
- a list of dictionaries containing the link fields
A link has the following fields:
- *href* is the URI of the referenced resource (typically a Web page)
- *rel* contains a single link relationship type. It can be a full URI,
or one of the following predefined values (default=alternate):
- *alternate* an alternate representation of the entry or feed, for
example a permalink to the html version of the entry, or the
front page of the weblog.
- *enclosure* a related resource which is potentially large in size
and might require special handling, for example an audio or video
recording.
- *related* an document related to the entry or feed.
- *self* the feed itself.
- *via* the source of the information provided in the entry.
- *type* indicates the media type of the resource.
- *hreflang* indicates the language of the referenced resource.
- *title* human readable information about the link, typically for
display purposes.
- *length* the length of the resource, in bytes.
RSS only supports one link with nothing but a URL. So for the RSS link
element the last link with rel=alternate is used.
RSS also supports one enclusure element per entry which is covered by
the link element in ATOM feed entries. So for the RSS enclusure element
the last link with rel=enclosure is used.
:param link: Dict or list of dicts with data.
:param replace: Add or replace old data.
:returns: List of link data.
'''
if link is None and kwargs:
link = kwargs
if link is not None:
if replace or self.__atom_link is None:
self.__atom_link = []
self.__atom_link += ensure_format(
link,
set(['href', 'rel', 'type', 'hreflang', 'title', 'length']),
set(['href']),
{'rel': ['alternate', 'enclosure', 'related', 'self', 'via']},
{'rel': 'alternate'})
# RSS only needs one URL. We use the first link for RSS:
for l in self.__atom_link:
if l.get('rel') == 'alternate':
self.__rss_link = l['href']
elif l.get('rel') == 'enclosure':
self.__rss_enclosure = {'url': l['href']}
self.__rss_enclosure['type'] = l.get('type')
self.__rss_enclosure['length'] = l.get('length') or '0'
# return the set with more information (atom)
return self.__atom_link
def summary(self, summary=None, type=None):
'''Get or set the summary element of an entry which conveys a short
summary, abstract, or excerpt of the entry. Summary is an ATOM only
element and should be provided if there either is no content provided
for the entry, or that content is not inline (i.e., contains a src
attribute), or if the content is encoded in base64. This method will
also set the rss:description field if it wasn't previously set or
contains the old value of summary.
:param summary: Summary of the entries contents.
:returns: Summary of the entries contents.
'''
if summary is not None:
# Replace the RSS description with the summary if it was the
# summary before. Not if it is the description.
if not self.__rss_description or (
self.__atom_summary and
self.__rss_description == self.__atom_summary.get("summary")
):
self.__rss_description = summary
self.__atom_summary = {'summary': summary}
if type is not None:
self.__atom_summary['type'] = type
return self.__atom_summary
def description(self, description=None, isSummary=False):
'''Get or set the description value which is the item synopsis.
Description is an RSS only element. For ATOM feeds it is split in
summary and content. The isSummary parameter can be used to control
which ATOM value is set when setting description.
:param description: Description of the entry.
:param isSummary: If the description should be used as content or
summary.
:returns: The entries description.
'''
if description is not None:
self.__rss_description = description
if isSummary:
self.__atom_summary = description
else:
self.__atom_content = {'content': description}
return self.__rss_description
def category(self, category=None, replace=False, **kwargs):
'''Get or set categories that the entry belongs to.
This method can be called with:
- the fields of a category as keyword arguments
- the fields of a category as a dictionary
- a list of dictionaries containing the category fields
A categories has the following fields:
- *term* identifies the category
- *scheme* identifies the categorization scheme via a URI.
- *label* provides a human-readable label for display
If a label is present it is used for the RSS feeds. Otherwise the term
is used. The scheme is used for the domain attribute in RSS.
:param category: Dict or list of dicts with data.
:param replace: Add or replace old data.
:returns: List of category data.
'''
if category is None and kwargs:
category = kwargs
if category is not None:
if replace or self.__atom_category is None:
self.__atom_category = []
self.__atom_category += ensure_format(
category,
set(['term', 'scheme', 'label']),
set(['term']))
# Map the ATOM categories to RSS categories. Use the atom:label as
# name or if not present the atom:term. The atom:scheme is the
# rss:domain.
self.__rss_category = []
for cat in self.__atom_category:
rss_cat = {}
rss_cat['value'] = cat.get('label', cat['term'])
if cat.get('scheme'):
rss_cat['domain'] = cat['scheme']
self.__rss_category.append(rss_cat)
return self.__atom_category
def contributor(self, contributor=None, replace=False, **kwargs):
'''Get or set the contributor data of the feed. This is an ATOM only
value.
This method can be called with:
- the fields of an contributor as keyword arguments
- the fields of an contributor as a dictionary
- a list of dictionaries containing the contributor fields
An contributor has the following fields:
- *name* conveys a human-readable name for the person.
- *uri* contains a home page for the person.
- *email* contains an email address for the person.
:param contributor: Dictionary or list of dictionaries with contributor
data.
:param replace: Add or replace old data.
:returns: List of contributors as dictionaries.
'''
if contributor is None and kwargs:
contributor = kwargs
if contributor is not None:
if replace or self.__atom_contributor is None:
self.__atom_contributor = []
self.__atom_contributor += ensure_format(
contributor, set(['name', 'email', 'uri']), set(['name']))
return self.__atom_contributor
def published(self, published=None):
'''Set or get the published value which contains the time of the initial
creation or first availability of the entry.
The value can either be a string which will automatically be parsed or
a datetime.datetime object. In any case it is necessary that the value
include timezone information.
:param published: The creation date.
:returns: Creation date as datetime.datetime
'''
if published is not None:
if isinstance(published, string_types):
published = dateutil_parser.parse(published)
if not isinstance(published, datetime):
raise ValueError('Invalid datetime format')
if published.tzinfo is None:
raise ValueError('Datetime object has no timezone info')
self.__atom_published = published
self.__rss_pubDate = published
return self.__atom_published
def pubDate(self, pubDate=None):
'''Get or set the pubDate of the entry which indicates when the entry
was published. This method is just another name for the published(...)
method.
'''
return self.published(pubDate)
def pubdate(self, pubDate=None):
'''Get or set the pubDate of the entry which indicates when the entry
was published. This method is just another name for the published(...)
method.
pubdate(…) is deprecated and may be removed in feedgen ≥ 0.8. Use
pubDate(…) instead.
'''
warnings.warn('pubdate(…) is deprecated and may be removed in feedgen '
'≥ 0.8. Use pubDate(…) instead.')
return self.published(pubDate)
def rights(self, rights=None):
'''Get or set the rights value of the entry which conveys information
about rights, e.g. copyrights, held in and over the entry. This ATOM
value will also set rss:copyright.
:param rights: Rights information of the feed.
:returns: Rights information of the feed.
'''
if rights is not None:
self.__atom_rights = rights
return self.__atom_rights
def comments(self, comments=None):
'''Get or set the value of comments which is the URL of the comments
page for the item. This is a RSS only value.
:param comments: URL to the comments page.
:returns: URL to the comments page.
'''
if comments is not None:
self.__rss_comments = comments
return self.__rss_comments
def source(self, url=None, title=None):
'''Get or set the source for the current feed entry.
Note that ATOM feeds support a lot more sub elements than title and URL
(which is what RSS supports) but these are currently not supported.
Patches are welcome.
:param url: Link to the source.
:param title: Title of the linked resource
:returns: Source element as dictionaries.
'''
if url is not None and title is not None:
self.__rss_source = {'url': url, 'title': title}
self.__atom_source = {'link': url, 'title': title}
return self.__rss_source
def enclosure(self, url=None, length=None, type=None):
'''Get or set the value of enclosure which describes a media object
that is attached to the item. This is a RSS only value which is
represented by link(rel=enclosure) in ATOM. ATOM feeds can furthermore
contain several enclosures while RSS may contain only one. That is why
this method, if repeatedly called, will add more than one enclosures to
the feed. However, only the last one is used for RSS.
:param url: URL of the media object.
:param length: Size of the media in bytes.
:param type: Mimetype of the linked media.
:returns: Data of the enclosure element.
'''
if url is not None:
self.link(href=url, rel='enclosure', type=type, length=length)
return self.__rss_enclosure
def ttl(self, ttl=None):
'''Get or set the ttl value. It is an RSS only element. ttl stands for
time to live. It's a number of minutes that indicates how long a
channel can be cached before refreshing from the source.
:param ttl: Integer value representing the time to live.
:returns: Time to live of of the entry.
'''
if ttl is not None:
self.__rss_ttl = int(ttl)
return self.__rss_ttl
def load_extension(self, name, atom=True, rss=True):
'''Load a specific extension by name.
:param name: Name of the extension to load.
:param atom: If the extension should be used for ATOM feeds.
:param rss: If the extension should be used for RSS feeds.
'''
# Check loaded extensions
if not isinstance(self.__extensions, dict):
self.__extensions = {}
if name in self.__extensions.keys():
raise ImportError('Extension already loaded')
# Load extension
extname = name[0].upper() + name[1:] + 'EntryExtension'
try:
supmod = __import__('feedgen.ext.%s_entry' % name)
extmod = getattr(supmod.ext, name + '_entry')
except ImportError:
# Use FeedExtension module instead
supmod = __import__('feedgen.ext.%s' % name)
extmod = getattr(supmod.ext, name)
ext = getattr(extmod, extname)
self.register_extension(name, ext, atom, rss)
def register_extension(self, namespace, extension_class_entry=None,
atom=True, rss=True):
'''Register a specific extension by classes to a namespace.
:param namespace: namespace for the extension
:param extension_class_entry: Class of the entry extension to load.
:param atom: If the extension should be used for ATOM feeds.
:param rss: If the extension should be used for RSS feeds.
'''
# Check loaded extensions
# `load_extension` ignores the "Extension" suffix.
if not isinstance(self.__extensions, dict):
self.__extensions = {}
if namespace in self.__extensions.keys():
raise ImportError('Extension already loaded')
if not extension_class_entry:
raise ImportError('No extension class')
extinst = extension_class_entry()
setattr(self, namespace, extinst)
# `load_extension` registry
self.__extensions[namespace] = {
'inst': extinst,
'extension_class_entry': extension_class_entry,
'atom': atom,
'rss': rss
}

View File

@@ -0,0 +1,6 @@
# -*- coding: utf-8 -*-
"""
===========
feedgen.ext
===========
"""

View File

@@ -0,0 +1,43 @@
# -*- coding: utf-8 -*-
'''
feedgen.ext.base
~~~~~~~~~~~~~~~~
Basic FeedGenerator extension which does nothing but provides all necessary
methods.
:copyright: 2013, Lars Kiesow <lkiesow@uos.de>
:license: FreeBSD and LGPL, see license.* for more details.
'''
class BaseExtension(object):
'''Basic FeedGenerator extension.
'''
def extend_ns(self):
'''Returns a dict that will be used in the namespace map for the feed.
'''
return dict()
def extend_rss(self, feed):
'''Extend a RSS feed xml structure containing all previously set fields.
:param feed: The feed xml root element.
:returns: The feed root element.
'''
return feed
def extend_atom(self, feed):
'''Extend an ATOM feed xml structure containing all previously set
fields.
:param feed: The feed xml root element.
:returns: The feed root element.
'''
return feed
class BaseEntryExtension(BaseExtension):
'''Basic FeedEntry extension.
'''

View File

@@ -0,0 +1,407 @@
# -*- coding: utf-8 -*-
'''
feedgen.ext.dc
~~~~~~~~~~~~~~~~~~~
Extends the FeedGenerator to add Dubline Core Elements to the feeds.
Descriptions partly taken from
http://dublincore.org/documents/dcmi-terms/#elements-coverage
:copyright: 2013-2017, Lars Kiesow <lkiesow@uos.de>
:license: FreeBSD and LGPL, see license.* for more details.
'''
from feedgen.ext.base import BaseExtension
from feedgen.util import xml_elem
class DcBaseExtension(BaseExtension):
'''Dublin Core Elements extension for podcasts.
'''
def __init__(self):
# http://dublincore.org/documents/usageguide/elements.shtml
# http://dublincore.org/documents/dces/
# http://dublincore.org/documents/dcmi-terms/
self._dcelem_contributor = None
self._dcelem_coverage = None
self._dcelem_creator = None
self._dcelem_date = None
self._dcelem_description = None
self._dcelem_format = None
self._dcelem_identifier = None
self._dcelem_language = None
self._dcelem_publisher = None
self._dcelem_relation = None
self._dcelem_rights = None
self._dcelem_source = None
self._dcelem_subject = None
self._dcelem_title = None
self._dcelem_type = None
def extend_ns(self):
return {'dc': 'http://purl.org/dc/elements/1.1/'}
def _extend_xml(self, xml_element):
'''Extend xml_element with set DC fields.
:param xml_element: etree element
'''
DCELEMENTS_NS = 'http://purl.org/dc/elements/1.1/'
for elem in ['contributor', 'coverage', 'creator', 'date',
'description', 'language', 'publisher', 'relation',
'rights', 'source', 'subject', 'title', 'type', 'format',
'identifier']:
if hasattr(self, '_dcelem_%s' % elem):
for val in getattr(self, '_dcelem_%s' % elem) or []:
node = xml_elem('{%s}%s' % (DCELEMENTS_NS, elem),
xml_element)
node.text = val
def extend_atom(self, atom_feed):
'''Extend an Atom feed with the set DC fields.
:param atom_feed: The feed root element
:returns: The feed root element
'''
self._extend_xml(atom_feed)
return atom_feed
def extend_rss(self, rss_feed):
'''Extend a RSS feed with the set DC fields.
:param rss_feed: The feed root element
:returns: The feed root element.
'''
channel = rss_feed[0]
self._extend_xml(channel)
return rss_feed
def dc_contributor(self, contributor=None, replace=False):
'''Get or set the dc:contributor which is an entity responsible for
making contributions to the resource.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-contributor
:param contributor: Contributor or list of contributors.
:param replace: Replace alredy set contributors (deault: False).
:returns: List of contributors.
'''
if contributor is not None:
if not isinstance(contributor, list):
contributor = [contributor]
if replace or not self._dcelem_contributor:
self._dcelem_contributor = []
self._dcelem_contributor += contributor
return self._dcelem_contributor
def dc_coverage(self, coverage=None, replace=True):
'''Get or set the dc:coverage which indicated the spatial or temporal
topic of the resource, the spatial applicability of the resource, or
the jurisdiction under which the resource is relevant.
Spatial topic and spatial applicability may be a named place or a
location specified by its geographic coordinates. Temporal topic may be
a named period, date, or date range. A jurisdiction may be a named
administrative entity or a geographic place to which the resource
applies. Recommended best practice is to use a controlled vocabulary
such as the Thesaurus of Geographic Names [TGN]. Where appropriate,
named places or time periods can be used in preference to numeric
identifiers such as sets of coordinates or date ranges.
References:
[TGN] http://www.getty.edu/research/tools/vocabulary/tgn/index.html
:param coverage: Coverage of the feed.
:param replace: Replace already set coverage (default: True).
:returns: Coverage of the feed.
'''
if coverage is not None:
if not isinstance(coverage, list):
coverage = [coverage]
if replace or not self._dcelem_coverage:
self._dcelem_coverage = []
self._dcelem_coverage = coverage
return self._dcelem_coverage
def dc_creator(self, creator=None, replace=False):
'''Get or set the dc:creator which is an entity primarily responsible
for making the resource.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-creator
:param creator: Creator or list of creators.
:param replace: Replace alredy set creators (deault: False).
:returns: List of creators.
'''
if creator is not None:
if not isinstance(creator, list):
creator = [creator]
if replace or not self._dcelem_creator:
self._dcelem_creator = []
self._dcelem_creator += creator
return self._dcelem_creator
def dc_date(self, date=None, replace=True):
'''Get or set the dc:date which describes a point or period of time
associated with an event in the lifecycle of the resource.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-date
:param date: Date or list of dates.
:param replace: Replace alredy set dates (deault: True).
:returns: List of dates.
'''
if date is not None:
if not isinstance(date, list):
date = [date]
if replace or not self._dcelem_date:
self._dcelem_date = []
self._dcelem_date += date
return self._dcelem_date
def dc_description(self, description=None, replace=True):
'''Get or set the dc:description which is an account of the resource.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-description
:param description: Description or list of descriptions.
:param replace: Replace alredy set descriptions (deault: True).
:returns: List of descriptions.
'''
if description is not None:
if not isinstance(description, list):
description = [description]
if replace or not self._dcelem_description:
self._dcelem_description = []
self._dcelem_description += description
return self._dcelem_description
def dc_format(self, format=None, replace=True):
'''Get or set the dc:format which describes the file format, physical
medium, or dimensions of the resource.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-format
:param format: Format of the resource or list of formats.
:param replace: Replace alredy set format (deault: True).
:returns: Format of the resource.
'''
if format is not None:
if not isinstance(format, list):
format = [format]
if replace or not self._dcelem_format:
self._dcelem_format = []
self._dcelem_format += format
return self._dcelem_format
def dc_identifier(self, identifier=None, replace=True):
'''Get or set the dc:identifier which should be an unambiguous
reference to the resource within a given context.
For more inidentifierion see:
http://dublincore.org/documents/dcmi-terms/#elements-identifier
:param identifier: Identifier of the resource or list of identifiers.
:param replace: Replace alredy set identifier (deault: True).
:returns: Identifiers of the resource.
'''
if identifier is not None:
if not isinstance(identifier, list):
identifier = [identifier]
if replace or not self._dcelem_identifier:
self._dcelem_identifier = []
self._dcelem_identifier += identifier
return self._dcelem_identifier
def dc_language(self, language=None, replace=True):
'''Get or set the dc:language which describes a language of the
resource.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-language
:param language: Language or list of languages.
:param replace: Replace alredy set languages (deault: True).
:returns: List of languages.
'''
if language is not None:
if not isinstance(language, list):
language = [language]
if replace or not self._dcelem_language:
self._dcelem_language = []
self._dcelem_language += language
return self._dcelem_language
def dc_publisher(self, publisher=None, replace=False):
'''Get or set the dc:publisher which is an entity responsible for
making the resource available.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-publisher
:param publisher: Publisher or list of publishers.
:param replace: Replace alredy set publishers (deault: False).
:returns: List of publishers.
'''
if publisher is not None:
if not isinstance(publisher, list):
publisher = [publisher]
if replace or not self._dcelem_publisher:
self._dcelem_publisher = []
self._dcelem_publisher += publisher
return self._dcelem_publisher
def dc_relation(self, relation=None, replace=False):
'''Get or set the dc:relation which describes a related resource.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-relation
:param relation: Relation or list of relations.
:param replace: Replace alredy set relations (deault: False).
:returns: List of relations.
'''
if relation is not None:
if not isinstance(relation, list):
relation = [relation]
if replace or not self._dcelem_relation:
self._dcelem_relation = []
self._dcelem_relation += relation
return self._dcelem_relation
def dc_rights(self, rights=None, replace=False):
'''Get or set the dc:rights which may contain information about rights
held in and over the resource.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-rights
:param rights: Rights information or list of rights information.
:param replace: Replace alredy set rightss (deault: False).
:returns: List of rights information.
'''
if rights is not None:
if not isinstance(rights, list):
rights = [rights]
if replace or not self._dcelem_rights:
self._dcelem_rights = []
self._dcelem_rights += rights
return self._dcelem_rights
def dc_source(self, source=None, replace=False):
'''Get or set the dc:source which is a related resource from which the
described resource is derived.
The described resource may be derived from the related resource in
whole or in part. Recommended best practice is to identify the related
resource by means of a string conforming to a formal identification
system.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-source
:param source: Source or list of sources.
:param replace: Replace alredy set sources (deault: False).
:returns: List of sources.
'''
if source is not None:
if not isinstance(source, list):
source = [source]
if replace or not self._dcelem_source:
self._dcelem_source = []
self._dcelem_source += source
return self._dcelem_source
def dc_subject(self, subject=None, replace=False):
'''Get or set the dc:subject which describes the topic of the resource.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-subject
:param subject: Subject or list of subjects.
:param replace: Replace alredy set subjects (deault: False).
:returns: List of subjects.
'''
if subject is not None:
if not isinstance(subject, list):
subject = [subject]
if replace or not self._dcelem_subject:
self._dcelem_subject = []
self._dcelem_subject += subject
return self._dcelem_subject
def dc_title(self, title=None, replace=True):
'''Get or set the dc:title which is a name given to the resource.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-title
:param title: Title or list of titles.
:param replace: Replace alredy set titles (deault: False).
:returns: List of titles.
'''
if title is not None:
if not isinstance(title, list):
title = [title]
if replace or not self._dcelem_title:
self._dcelem_title = []
self._dcelem_title += title
return self._dcelem_title
def dc_type(self, type=None, replace=False):
'''Get or set the dc:type which describes the nature or genre of the
resource.
For more information see:
http://dublincore.org/documents/dcmi-terms/#elements-type
:param type: Type or list of types.
:param replace: Replace alredy set types (deault: False).
:returns: List of types.
'''
if type is not None:
if not isinstance(type, list):
type = [type]
if replace or not self._dcelem_type:
self._dcelem_type = []
self._dcelem_type += type
return self._dcelem_type
class DcExtension(DcBaseExtension):
'''Dublin Core Elements extension for podcasts.
'''
class DcEntryExtension(DcBaseExtension):
'''Dublin Core Elements extension for podcasts.
'''
def extend_atom(self, entry):
'''Add dc elements to an atom item. Alters the item itself.
:param entry: An atom entry element.
:returns: The entry element.
'''
self._extend_xml(entry)
return entry
def extend_rss(self, item):
'''Add dc elements to a RSS item. Alters the item itself.
:param item: A RSS item element.
:returns: The item element.
'''
self._extend_xml(item)
return item

View File

@@ -0,0 +1,21 @@
# -*- coding: utf-8 -*-
'''
feedgen.ext.geo
~~~~~~~~~~~~~~~~~~~
Extends the FeedGenerator to produce Simple GeoRSS feeds.
:copyright: 2017, Bob Breznak <bob.breznak@gmail.com>
:license: FreeBSD and LGPL, see license.* for more details.
'''
from feedgen.ext.base import BaseExtension
class GeoExtension(BaseExtension):
'''FeedGenerator extension for Simple GeoRSS.
'''
def extend_ns(self):
return {'georss': 'http://www.georss.org/georss'}

View File

@@ -0,0 +1,329 @@
# -*- coding: utf-8 -*-
'''
feedgen.ext.geo_entry
~~~~~~~~~~~~~~~~~~~
Extends the FeedGenerator to produce Simple GeoRSS feeds.
:copyright: 2017, Bob Breznak <bob.breznak@gmail.com>
:license: FreeBSD and LGPL, see license.* for more details.
'''
import numbers
import warnings
from feedgen.ext.base import BaseEntryExtension
from feedgen.util import xml_elem
class GeoRSSPolygonInteriorWarning(Warning):
"""
Simple placeholder for warning about ignored polygon interiors.
Stores the original geom on a ``geom`` attribute (if required warnings are
raised as errors).
"""
def __init__(self, geom, *args, **kwargs):
self.geom = geom
super(GeoRSSPolygonInteriorWarning, self).__init__(*args, **kwargs)
def __str__(self):
return '{:d} interiors of polygon ignored'.format(
len(self.geom.__geo_interface__['coordinates']) - 1
) # ignore exterior in count
class GeoRSSGeometryError(ValueError):
"""
Subclass of ValueError for a GeoRSS geometry error
Only some geometries are supported in Simple GeoRSS, so if not raise an
error. Offending geometry is stored on the ``geom`` attribute.
"""
def __init__(self, geom, *args, **kwargs):
self.geom = geom
super(GeoRSSGeometryError, self).__init__(*args, **kwargs)
def __str__(self):
msg = "Geometry of type '{}' not in Point, Linestring or Polygon"
return msg.format(
self.geom.__geo_interface__['type']
)
class GeoEntryExtension(BaseEntryExtension):
'''FeedEntry extension for Simple GeoRSS.
'''
def __init__(self):
'''Simple GeoRSS tag'''
# geometries
self.__point = None
self.__line = None
self.__polygon = None
self.__box = None
# additional properties
self.__featuretypetag = None
self.__relationshiptag = None
self.__featurename = None
# elevation
self.__elev = None
self.__floor = None
# radius
self.__radius = None
def extend_file(self, entry):
'''Add additional fields to an RSS item.
:param feed: The RSS item XML element to use.
'''
GEO_NS = 'http://www.georss.org/georss'
if self.__point:
point = xml_elem('{%s}point' % GEO_NS, entry)
point.text = self.__point
if self.__line:
line = xml_elem('{%s}line' % GEO_NS, entry)
line.text = self.__line
if self.__polygon:
polygon = xml_elem('{%s}polygon' % GEO_NS, entry)
polygon.text = self.__polygon
if self.__box:
box = xml_elem('{%s}box' % GEO_NS, entry)
box.text = self.__box
if self.__featuretypetag:
featuretypetag = xml_elem('{%s}featuretypetag' % GEO_NS, entry)
featuretypetag.text = self.__featuretypetag
if self.__relationshiptag:
relationshiptag = xml_elem('{%s}relationshiptag' % GEO_NS, entry)
relationshiptag.text = self.__relationshiptag
if self.__featurename:
featurename = xml_elem('{%s}featurename' % GEO_NS, entry)
featurename.text = self.__featurename
if self.__elev:
elevation = xml_elem('{%s}elev' % GEO_NS, entry)
elevation.text = str(self.__elev)
if self.__floor:
floor = xml_elem('{%s}floor' % GEO_NS, entry)
floor.text = str(self.__floor)
if self.__radius:
radius = xml_elem('{%s}radius' % GEO_NS, entry)
radius.text = str(self.__radius)
return entry
def extend_rss(self, entry):
return self.extend_file(entry)
def extend_atom(self, entry):
return self.extend_file(entry)
def point(self, point=None):
'''Get or set the georss:point of the entry.
:param point: The GeoRSS formatted point (i.e. "42.36 -71.05")
:returns: The current georss:point of the entry.
'''
if point is not None:
self.__point = point
return self.__point
def line(self, line=None):
'''Get or set the georss:line of the entry
:param point: The GeoRSS formatted line (i.e. "45.256 -110.45 46.46
-109.48 43.84 -109.86")
:return: The current georss:line of the entry
'''
if line is not None:
self.__line = line
return self.__line
def polygon(self, polygon=None):
'''Get or set the georss:polygon of the entry
:param polygon: The GeoRSS formatted polygon (i.e. "45.256 -110.45
46.46 -109.48 43.84 -109.86 45.256 -110.45")
:return: The current georss:polygon of the entry
'''
if polygon is not None:
self.__polygon = polygon
return self.__polygon
def box(self, box=None):
'''
Get or set the georss:box of the entry
:param box: The GeoRSS formatted box (i.e. "42.943 -71.032 43.039
-69.856")
:return: The current georss:box of the entry
'''
if box is not None:
self.__box = box
return self.__box
def featuretypetag(self, featuretypetag=None):
'''
Get or set the georss:featuretypetag of the entry
:param featuretypetag: The GeoRSS feaaturertyptag (e.g. "city")
:return: The current georss:featurertypetag
'''
if featuretypetag is not None:
self.__featuretypetag = featuretypetag
return self.__featuretypetag
def relationshiptag(self, relationshiptag=None):
'''
Get or set the georss:relationshiptag of the entry
:param relationshiptag: The GeoRSS relationshiptag (e.g.
"is-centred-at")
:return: the current georss:relationshiptag
'''
if relationshiptag is not None:
self.__relationshiptag = relationshiptag
return self.__relationshiptag
def featurename(self, featurename=None):
'''
Get or set the georss:featurename of the entry
:param featuretypetag: The GeoRSS featurename (e.g. "Footscray")
:return: the current georss:featurename
'''
if featurename is not None:
self.__featurename = featurename
return self.__featurename
def elev(self, elev=None):
'''
Get or set the georss:elev of the entry
:param elev: The GeoRSS elevation (e.g. 100.3)
:type elev: numbers.Number
:return: the current georss:elev
'''
if elev is not None:
if not isinstance(elev, numbers.Number):
raise ValueError("elev tag must be numeric: {}".format(elev))
self.__elev = elev
return self.__elev
def floor(self, floor=None):
'''
Get or set the georss:floor of the entry
:param floor: The GeoRSS floor (e.g. 4)
:type floor: int
:return: the current georss:floor
'''
if floor is not None:
if not isinstance(floor, int):
raise ValueError("floor tag must be int: {}".format(floor))
self.__floor = floor
return self.__floor
def radius(self, radius=None):
'''
Get or set the georss:radius of the entry
:param radius: The GeoRSS radius (e.g. 100.3)
:type radius: numbers.Number
:return: the current georss:radius
'''
if radius is not None:
if not isinstance(radius, numbers.Number):
raise ValueError(
"radius tag must be numeric: {}".format(radius)
)
self.__radius = radius
return self.__radius
def geom_from_geo_interface(self, geom):
'''
Generate a georss geometry from some Python object with a
``__geo_interface__`` property (see the `geo_interface specification by
Sean Gillies`_geointerface )
Note only a subset of GeoJSON (see `geojson.org`_geojson ) can be
easily converted to GeoRSS:
- Point
- LineString
- Polygon (if there are holes / donuts in the polygons a warning will
be generaated
Other GeoJson types will raise a ``ValueError``.
.. note:: The geometry is assumed to be x, y as longitude, latitude in
the WGS84 projection.
.. _geointerface: https://gist.github.com/sgillies/2217756
.. _geojson: https://geojson.org/
:param geom: Geometry object with a __geo_interface__ property
:return: the formatted GeoRSS geometry
'''
geojson = geom.__geo_interface__
if geojson['type'] not in ('Point', 'LineString', 'Polygon'):
raise GeoRSSGeometryError(geom)
if geojson['type'] == 'Point':
coords = '{:f} {:f}'.format(
geojson['coordinates'][1], # latitude is y
geojson['coordinates'][0]
)
return self.point(coords)
elif geojson['type'] == 'LineString':
coords = ' '.join(
'{:f} {:f}'.format(vertex[1], vertex[0])
for vertex in
geojson['coordinates']
)
return self.line(coords)
elif geojson['type'] == 'Polygon':
if len(geojson['coordinates']) > 1:
warnings.warn(GeoRSSPolygonInteriorWarning(geom))
coords = ' '.join(
'{:f} {:f}'.format(vertex[1], vertex[0])
for vertex in
geojson['coordinates'][0]
)
return self.polygon(coords)

View File

@@ -0,0 +1,183 @@
# -*- coding: utf-8 -*-
'''
feedgen.ext.media
~~~~~~~~~~~~~~~~~
Extends the feedgen to produce media tags.
:copyright: 2013-2017, Lars Kiesow <lkiesow@uos.de>
:license: FreeBSD and LGPL, see license.* for more details.
'''
from feedgen.ext.base import BaseEntryExtension, BaseExtension
from feedgen.util import ensure_format, xml_elem
MEDIA_NS = 'http://search.yahoo.com/mrss/'
class MediaExtension(BaseExtension):
'''FeedGenerator extension for torrent feeds.
'''
def extend_ns(self):
return {'media': MEDIA_NS}
class MediaEntryExtension(BaseEntryExtension):
'''FeedEntry extension for media tags.
'''
def __init__(self):
self.__media_content = []
self.__media_thumbnail = []
def extend_atom(self, entry):
'''Add additional fields to an RSS item.
:param feed: The RSS item XML element to use.
'''
groups = {None: entry}
for media_content in self.__media_content:
# Define current media:group
group = groups.get(media_content.get('group'))
if group is None:
group = xml_elem('{%s}group' % MEDIA_NS, entry)
groups[media_content.get('group')] = group
# Add content
content = xml_elem('{%s}content' % MEDIA_NS, group)
for attr in ('url', 'fileSize', 'type', 'medium', 'isDefault',
'expression', 'bitrate', 'framerate', 'samplingrate',
'channels', 'duration', 'height', 'width', 'lang'):
if media_content.get(attr):
content.set(attr, media_content[attr])
for media_thumbnail in self.__media_thumbnail:
# Define current media:group
group = groups.get(media_thumbnail.get('group'))
if group is None:
group = xml_elem('{%s}group' % MEDIA_NS, entry)
groups[media_thumbnail.get('group')] = group
# Add thumbnails
thumbnail = xml_elem('{%s}thumbnail' % MEDIA_NS, group)
for attr in ('url', 'height', 'width', 'time'):
if media_thumbnail.get(attr):
thumbnail.set(attr, media_thumbnail[attr])
return entry
def extend_rss(self, item):
return self.extend_atom(item)
def content(self, content=None, replace=False, group='default', **kwargs):
'''Get or set media:content data.
This method can be called with:
- the fields of a media:content as keyword arguments
- the fields of a media:content as a dictionary
- a list of dictionaries containing the media:content fields
<media:content> is a sub-element of either <item> or <media:group>.
Media objects that are not the same content should not be included in
the same <media:group> element. The sequence of these items implies
the order of presentation. While many of the attributes appear to be
audio/video specific, this element can be used to publish any type
of media. It contains 14 attributes, most of which are optional.
media:content has the following fields:
- *url* should specify the direct URL to the media object.
- *fileSize* number of bytes of the media object.
- *type* standard MIME type of the object.
- *medium* type of object (image | audio | video | document |
executable).
- *isDefault* determines if this is the default object.
- *expression* determines if the object is a sample or the full version
of the object, or even if it is a continuous stream (sample | full |
nonstop).
- *bitrate* kilobits per second rate of media.
- *framerate* number of frames per second for the media object.
- *samplingrate* number of samples per second taken to create the media
object. It is expressed in thousands of samples per second (kHz).
- *channels* number of audio channels in the media object.
- *duration* number of seconds the media object plays.
- *height* height of the media object.
- *width* width of the media object.
- *lang* is the primary language encapsulated in the media object.
:param content: Dictionary or list of dictionaries with content data.
:param replace: Add or replace old data.
:param group: Media group to put this content in.
:returns: The media content tag.
'''
# Handle kwargs
if content is None and kwargs:
content = kwargs
# Handle new data
if content is not None:
# Reset data if we want to replace them
if replace or self.__media_content is None:
self.__media_content = []
# Ensure list
if not isinstance(content, list):
content = [content]
# define media group
for c in content:
c['group'] = c.get('group', group)
self.__media_content += ensure_format(
content,
set(['url', 'fileSize', 'type', 'medium', 'isDefault',
'expression', 'bitrate', 'framerate', 'samplingrate',
'channels', 'duration', 'height', 'width', 'lang',
'group']),
set(['url', 'group']))
return self.__media_content
def thumbnail(self, thumbnail=None, replace=False, group='default',
**kwargs):
'''Get or set media:thumbnail data.
This method can be called with:
- the fields of a media:content as keyword arguments
- the fields of a media:content as a dictionary
- a list of dictionaries containing the media:content fields
Allows particular images to be used as representative images for
the media object. If multiple thumbnails are included, and time
coding is not at play, it is assumed that the images are in order
of importance. It has one required attribute and three optional
attributes.
media:thumbnail has the following fields:
- *url* should specify the direct URL to the media object.
- *height* height of the media object.
- *width* width of the media object.
- *time* specifies the time offset in relation to the media object.
:param thumbnail: Dictionary or list of dictionaries with thumbnail
data.
:param replace: Add or replace old data.
:param group: Media group to put this content in.
:returns: The media thumbnail tag.
'''
# Handle kwargs
if thumbnail is None and kwargs:
thumbnail = kwargs
# Handle new data
if thumbnail is not None:
# Reset data if we want to replace them
if replace or self.__media_thumbnail is None:
self.__media_thumbnail = []
# Ensure list
if not isinstance(thumbnail, list):
thumbnail = [thumbnail]
# Define media group
for t in thumbnail:
t['group'] = t.get('group', group)
self.__media_thumbnail += ensure_format(
thumbnail,
set(['url', 'height', 'width', 'time', 'group']),
set(['url', 'group']))
return self.__media_thumbnail

View File

@@ -0,0 +1,355 @@
# -*- coding: utf-8 -*-
'''
feedgen.ext.podcast
~~~~~~~~~~~~~~~~~~~
Extends the FeedGenerator to produce podcasts.
:copyright: 2013, Lars Kiesow <lkiesow@uos.de>
:license: FreeBSD and LGPL, see license.* for more details.
'''
from feedgen.compat import string_types
from feedgen.ext.base import BaseExtension
from feedgen.util import ensure_format, xml_elem
class PodcastExtension(BaseExtension):
'''FeedGenerator extension for podcasts.
'''
def __init__(self):
# ITunes tags
# http://www.apple.com/itunes/podcasts/specs.html#rss
self.__itunes_author = None
self.__itunes_block = None
self.__itunes_category = None
self.__itunes_image = None
self.__itunes_explicit = None
self.__itunes_complete = None
self.__itunes_new_feed_url = None
self.__itunes_owner = None
self.__itunes_subtitle = None
self.__itunes_summary = None
def extend_ns(self):
return {'itunes': 'http://www.itunes.com/dtds/podcast-1.0.dtd'}
def extend_rss(self, rss_feed):
'''Extend an RSS feed root with set itunes fields.
:returns: The feed root element.
'''
ITUNES_NS = 'http://www.itunes.com/dtds/podcast-1.0.dtd'
channel = rss_feed[0]
if self.__itunes_author:
author = xml_elem('{%s}author' % ITUNES_NS, channel)
author.text = self.__itunes_author
if self.__itunes_block is not None:
block = xml_elem('{%s}block' % ITUNES_NS, channel)
block.text = 'yes' if self.__itunes_block else 'no'
for c in self.__itunes_category or []:
if not c.get('cat'):
continue
category = channel.find(
'{%s}category[@text="%s"]' % (ITUNES_NS, c.get('cat')))
if category is None:
category = xml_elem('{%s}category' % ITUNES_NS, channel)
category.attrib['text'] = c.get('cat')
if c.get('sub'):
subcategory = xml_elem('{%s}category' % ITUNES_NS, category)
subcategory.attrib['text'] = c.get('sub')
if self.__itunes_image:
image = xml_elem('{%s}image' % ITUNES_NS, channel)
image.attrib['href'] = self.__itunes_image
if self.__itunes_explicit in ('yes', 'no', 'clean'):
explicit = xml_elem('{%s}explicit' % ITUNES_NS, channel)
explicit.text = self.__itunes_explicit
if self.__itunes_complete in ('yes', 'no'):
complete = xml_elem('{%s}complete' % ITUNES_NS, channel)
complete.text = self.__itunes_complete
if self.__itunes_new_feed_url:
new_feed_url = xml_elem('{%s}new-feed-url' % ITUNES_NS, channel)
new_feed_url.text = self.__itunes_new_feed_url
if self.__itunes_owner:
owner = xml_elem('{%s}owner' % ITUNES_NS, channel)
owner_name = xml_elem('{%s}name' % ITUNES_NS, owner)
owner_name.text = self.__itunes_owner.get('name')
owner_email = xml_elem('{%s}email' % ITUNES_NS, owner)
owner_email.text = self.__itunes_owner.get('email')
if self.__itunes_subtitle:
subtitle = xml_elem('{%s}subtitle' % ITUNES_NS, channel)
subtitle.text = self.__itunes_subtitle
if self.__itunes_summary:
summary = xml_elem('{%s}summary' % ITUNES_NS, channel)
summary.text = self.__itunes_summary
return rss_feed
def itunes_author(self, itunes_author=None):
'''Get or set the itunes:author. The content of this tag is shown in
the Artist column in iTunes. If the tag is not present, iTunes uses the
contents of the <author> tag. If <itunes:author> is not present at the
feed level, iTunes will use the contents of <managingEditor>.
:param itunes_author: The author of the podcast.
:returns: The author of the podcast.
'''
if itunes_author is not None:
self.__itunes_author = itunes_author
return self.__itunes_author
def itunes_block(self, itunes_block=None):
'''Get or set the ITunes block attribute. Use this to prevent the
entire podcast from appearing in the iTunes podcast directory.
:param itunes_block: Block the podcast.
:returns: If the podcast is blocked.
'''
if itunes_block is not None:
self.__itunes_block = itunes_block
return self.__itunes_block
def itunes_category(self, itunes_category=None, replace=False, **kwargs):
'''Get or set the ITunes category which appears in the category column
and in iTunes Store Browser.
The (sub-)category has to be one from the values defined at
http://www.apple.com/itunes/podcasts/specs.html#categories
This method can be called with:
- the fields of an itunes_category as keyword arguments
- the fields of an itunes_category as a dictionary
- a list of dictionaries containing the itunes_category fields
An itunes_category has the following fields:
- *cat* name for a category.
- *sub* name for a subcategory, child of category
If a podcast has more than one subcategory from the same category, the
category is called more than once.
Likei the parameter::
[{"cat":"Arts","sub":"Design"},{"cat":"Arts","sub":"Food"}]
…would become::
<itunes:category text="Arts">
<itunes:category text="Design"/>
<itunes:category text="Food"/>
</itunes:category>
:param itunes_category: Dictionary or list of dictionaries with
itunes_category data.
:param replace: Add or replace old data.
:returns: List of itunes_categories as dictionaries.
---
**Important note about deprecated parameter syntax:** Old version of
the feedgen did only support one category plus one subcategory which
would be passed to this ducntion as first two parameters. For
compatibility reasons, this still works but should not be used any may
be removed at any time.
'''
# Ensure old API still works for now. Note that the API is deprecated
# and this fallback may be removed at any time.
if isinstance(itunes_category, string_types):
itunes_category = {'cat': itunes_category}
if replace:
itunes_category['sub'] = replace
replace = True
if itunes_category is None and kwargs:
itunes_category = kwargs
if itunes_category is not None:
if replace or self.__itunes_category is None:
self.__itunes_category = []
self.__itunes_category += ensure_format(itunes_category,
set(['cat', 'sub']),
set(['cat']))
return self.__itunes_category
def itunes_image(self, itunes_image=None):
'''Get or set the image for the podcast. This tag specifies the artwork
for your podcast. Put the URL to the image in the href attribute.
iTunes prefers square .jpg images that are at least 1400x1400 pixels,
which is different from what is specified for the standard RSS image
tag. In order for a podcast to be eligible for an iTunes Store feature,
the accompanying image must be at least 1400x1400 pixels.
iTunes supports images in JPEG and PNG formats with an RGB color space
(CMYK is not supported). The URL must end in ".jpg" or ".png". If the
<itunes:image> tag is not present, iTunes will use the contents of the
RSS image tag.
If you change your podcasts image, also change the files name. iTunes
may not change the image if it checks your feed and the image URL is
the same. The server hosting your cover art image must allow HTTP head
requests for iTS to be able to automatically update your cover art.
:param itunes_image: Image of the podcast.
:returns: Image of the podcast.
'''
if itunes_image is not None:
if itunes_image.endswith('.jpg') or itunes_image.endswith('.png'):
self.__itunes_image = itunes_image
else:
ValueError('Image file must be png or jpg')
return self.__itunes_image
def itunes_explicit(self, itunes_explicit=None):
'''Get or the the itunes:explicit value of the podcast. This tag should
be used to indicate whether your podcast contains explicit material.
The three values for this tag are "yes", "no", and "clean".
If you populate this tag with "yes", an "explicit" parental advisory
graphic will appear next to your podcast artwork on the iTunes Store
and in the Name column in iTunes. If the value is "clean", the parental
advisory type is considered Clean, meaning that no explicit language or
adult content is included anywhere in the episodes, and a "clean"
graphic will appear. If the explicit tag is present and has any other
value (e.g., "no"), you see no indicator — blank is the default
advisory type.
:param itunes_explicit: If the podcast contains explicit material.
:returns: If the podcast contains explicit material.
'''
if itunes_explicit is not None:
if itunes_explicit not in ('', 'yes', 'no', 'clean'):
raise ValueError('Invalid value for explicit tag')
self.__itunes_explicit = itunes_explicit
return self.__itunes_explicit
def itunes_complete(self, itunes_complete=None):
'''Get or set the itunes:complete value of the podcast. This tag can be
used to indicate the completion of a podcast.
If you populate this tag with "yes", you are indicating that no more
episodes will be added to the podcast. If the <itunes:complete> tag is
present and has any other value (e.g. “no”), it will have no effect on
the podcast.
:param itunes_complete: If the podcast is complete.
:returns: If the podcast is complete.
'''
if itunes_complete is not None:
if itunes_complete not in ('yes', 'no', '', True, False):
raise ValueError('Invalid value for complete tag')
if itunes_complete is True:
itunes_complete = 'yes'
if itunes_complete is False:
itunes_complete = 'no'
self.__itunes_complete = itunes_complete
return self.__itunes_complete
def itunes_new_feed_url(self, itunes_new_feed_url=None):
'''Get or set the new-feed-url property of the podcast. This tag allows
you to change the URL where the podcast feed is located
After adding the tag to your old feed, you should maintain the old feed
for 48 hours before retiring it. At that point, iTunes will have
updated the directory with the new feed URL.
:param itunes_new_feed_url: New feed URL.
:returns: New feed URL.
'''
if itunes_new_feed_url is not None:
self.__itunes_new_feed_url = itunes_new_feed_url
return self.__itunes_new_feed_url
def itunes_owner(self, name=None, email=None):
'''Get or set the itunes:owner of the podcast. This tag contains
information that will be used to contact the owner of the podcast for
communication specifically about the podcast. It will not be publicly
displayed.
:param itunes_owner: The owner of the feed.
:returns: Data of the owner of the feed.
'''
if name is not None:
if name and email:
self.__itunes_owner = {'name': name, 'email': email}
elif not name and not email:
self.__itunes_owner = None
else:
raise ValueError('Both name and email have to be set.')
return self.__itunes_owner
def itunes_subtitle(self, itunes_subtitle=None):
'''Get or set the itunes:subtitle value for the podcast. The contents of
this tag are shown in the Description column in iTunes. The subtitle
displays best if it is only a few words long.
:param itunes_subtitle: Subtitle of the podcast.
:returns: Subtitle of the podcast.
'''
if itunes_subtitle is not None:
self.__itunes_subtitle = itunes_subtitle
return self.__itunes_subtitle
def itunes_summary(self, itunes_summary=None):
'''Get or set the itunes:summary value for the podcast. The contents of
this tag are shown in a separate window that appears when the "circled
i" in the Description column is clicked. It also appears on the iTunes
page for your podcast. This field can be up to 4000 characters. If
`<itunes:summary>` is not included, the contents of the <description>
tag are used.
:param itunes_summary: Summary of the podcast.
:returns: Summary of the podcast.
'''
if itunes_summary is not None:
self.__itunes_summary = itunes_summary
return self.__itunes_summary
_itunes_categories = {
'Arts': [
'Design', 'Fashion & Beauty', 'Food', 'Literature',
'Performing Arts', 'Visual Arts'],
'Business': [
'Business News', 'Careers', 'Investing',
'Management & Marketing', 'Shopping'],
'Comedy': [],
'Education': [
'Education', 'Education Technology', 'Higher Education',
'K-12', 'Language Courses', 'Training'],
'Games & Hobbies': [
'Automotive', 'Aviation', 'Hobbies', 'Other Games',
'Video Games'],
'Government & Organizations': [
'Local', 'National', 'Non-Profit', 'Regional'],
'Health': [
'Alternative Health', 'Fitness & Nutrition', 'Self-Help',
'Sexuality'],
'Kids & Family': [],
'Music': [],
'News & Politics': [],
'Religion & Spirituality': [
'Buddhism', 'Christianity', 'Hinduism', 'Islam', 'Judaism',
'Other', 'Spirituality'],
'Science & Medicine': [
'Medicine', 'Natural Sciences', 'Social Sciences'],
'Society & Culture': [
'History', 'Personal Journals', 'Philosophy',
'Places & Travel'],
'Sports & Recreation': [
'Amateur', 'College & High School', 'Outdoor', 'Professional'],
'Technology': [
'Gadgets', 'Tech News', 'Podcasting', 'Software How-To'],
'TV & Film': []}

View File

@@ -0,0 +1,244 @@
# -*- coding: utf-8 -*-
'''
feedgen.ext.podcast_entry
~~~~~~~~~~~~~~~~~~~~~~~~~
Extends the feedgen to produce podcasts.
:copyright: 2013-2016, Lars Kiesow <lkiesow@uos.de>
:license: FreeBSD and LGPL, see license.* for more details.
'''
from feedgen.ext.base import BaseEntryExtension
from feedgen.util import xml_elem
class PodcastEntryExtension(BaseEntryExtension):
'''FeedEntry extension for podcasts.
'''
def __init__(self):
# ITunes tags
# http://www.apple.com/itunes/podcasts/specs.html#rss
self.__itunes_author = None
self.__itunes_block = None
self.__itunes_image = None
self.__itunes_duration = None
self.__itunes_explicit = None
self.__itunes_is_closed_captioned = None
self.__itunes_order = None
self.__itunes_subtitle = None
self.__itunes_summary = None
def extend_rss(self, entry):
'''Add additional fields to an RSS item.
:param feed: The RSS item XML element to use.
'''
ITUNES_NS = 'http://www.itunes.com/dtds/podcast-1.0.dtd'
if self.__itunes_author:
author = xml_elem('{%s}author' % ITUNES_NS, entry)
author.text = self.__itunes_author
if self.__itunes_block is not None:
block = xml_elem('{%s}block' % ITUNES_NS, entry)
block.text = 'yes' if self.__itunes_block else 'no'
if self.__itunes_image:
image = xml_elem('{%s}image' % ITUNES_NS, entry)
image.attrib['href'] = self.__itunes_image
if self.__itunes_duration:
duration = xml_elem('{%s}duration' % ITUNES_NS, entry)
duration.text = self.__itunes_duration
if self.__itunes_explicit in ('yes', 'no', 'clean'):
explicit = xml_elem('{%s}explicit' % ITUNES_NS, entry)
explicit.text = self.__itunes_explicit
if self.__itunes_is_closed_captioned is not None:
is_closed_captioned = xml_elem(
'{%s}isClosedCaptioned' % ITUNES_NS, entry)
if self.__itunes_is_closed_captioned:
is_closed_captioned.text = 'yes'
else:
is_closed_captioned.text = 'no'
if self.__itunes_order is not None and self.__itunes_order >= 0:
order = xml_elem('{%s}order' % ITUNES_NS, entry)
order.text = str(self.__itunes_order)
if self.__itunes_subtitle:
subtitle = xml_elem('{%s}subtitle' % ITUNES_NS, entry)
subtitle.text = self.__itunes_subtitle
if self.__itunes_summary:
summary = xml_elem('{%s}summary' % ITUNES_NS, entry)
summary.text = self.__itunes_summary
return entry
def itunes_author(self, itunes_author=None):
'''Get or set the itunes:author of the podcast episode. The content of
this tag is shown in the Artist column in iTunes. If the tag is not
present, iTunes uses the contents of the <author> tag. If
<itunes:author> is not present at the feed level, iTunes will use the
contents of <managingEditor>.
:param itunes_author: The author of the podcast.
:returns: The author of the podcast.
'''
if itunes_author is not None:
self.__itunes_author = itunes_author
return self.__itunes_author
def itunes_block(self, itunes_block=None):
'''Get or set the ITunes block attribute. Use this to prevent episodes
from appearing in the iTunes podcast directory.
:param itunes_block: Block podcast episodes.
:returns: If the podcast episode is blocked.
'''
if itunes_block is not None:
self.__itunes_block = itunes_block
return self.__itunes_block
def itunes_image(self, itunes_image=None):
'''Get or set the image for the podcast episode. This tag specifies the
artwork for your podcast. Put the URL to the image in the href
attribute. iTunes prefers square .jpg images that are at least
1400x1400 pixels, which is different from what is specified for the
standard RSS image tag. In order for a podcast to be eligible for an
iTunes Store feature, the accompanying image must be at least 1400x1400
pixels.
iTunes supports images in JPEG and PNG formats with an RGB color space
(CMYK is not supported). The URL must end in ".jpg" or ".png". If the
<itunes:image> tag is not present, iTunes will use the contents of the
RSS image tag.
If you change your podcasts image, also change the files name. iTunes
may not change the image if it checks your feed and the image URL is
the same. The server hosting your cover art image must allow HTTP head
requests for iTS to be able to automatically update your cover art.
:param itunes_image: Image of the podcast.
:returns: Image of the podcast.
'''
if itunes_image is not None:
if itunes_image.endswith('.jpg') or itunes_image.endswith('.png'):
self.__itunes_image = itunes_image
else:
raise ValueError('Image file must be png or jpg')
return self.__itunes_image
def itunes_duration(self, itunes_duration=None):
'''Get or set the duration of the podcast episode. The content of this
tag is shown in the Time column in iTunes.
The tag can be formatted HH:MM:SS, H:MM:SS, MM:SS, or M:SS (H = hours,
M = minutes, S = seconds). If an integer is provided (no colon
present), the value is assumed to be in seconds. If one colon is
present, the number to the left is assumed to be minutes, and the
number to the right is assumed to be seconds. If more than two colons
are present, the numbers farthest to the right are ignored.
:param itunes_duration: Duration of the podcast episode.
:returns: Duration of the podcast episode.
'''
if itunes_duration is not None:
itunes_duration = str(itunes_duration)
if len(itunes_duration.split(':')) > 3 or \
itunes_duration.lstrip('0123456789:') != '':
raise ValueError('Invalid duration format')
self.__itunes_duration = itunes_duration
return self.__itunes_duration
def itunes_explicit(self, itunes_explicit=None):
'''Get or the the itunes:explicit value of the podcast episode. This
tag should be used to indicate whether your podcast episode contains
explicit material. The three values for this tag are "yes", "no", and
"clean".
If you populate this tag with "yes", an "explicit" parental advisory
graphic will appear next to your podcast artwork on the iTunes Store
and in the Name column in iTunes. If the value is "clean", the parental
advisory type is considered Clean, meaning that no explicit language or
adult content is included anywhere in the episodes, and a "clean"
graphic will appear. If the explicit tag is present and has any other
value (e.g., "no"), you see no indicator — blank is the default
advisory type.
:param itunes_explicit: If the podcast episode contains explicit
material.
:returns: If the podcast episode contains explicit material.
'''
if itunes_explicit is not None:
if itunes_explicit not in ('', 'yes', 'no', 'clean'):
raise ValueError('Invalid value for explicit tag')
self.__itunes_explicit = itunes_explicit
return self.__itunes_explicit
def itunes_is_closed_captioned(self, itunes_is_closed_captioned=None):
'''Get or set the is_closed_captioned value of the podcast episode.
This tag should be used if your podcast includes a video episode with
embedded closed captioning support. The two values for this tag are
"yes" and "no”.
:param is_closed_captioned: If the episode has closed captioning
support.
:returns: If the episode has closed captioning support.
'''
if itunes_is_closed_captioned is not None:
self.__itunes_is_closed_captioned = \
itunes_is_closed_captioned in ('yes', True)
return self.__itunes_is_closed_captioned
def itunes_order(self, itunes_order=None):
'''Get or set the itunes:order value of the podcast episode. This tag
can be used to override the default ordering of episodes on the store.
This tag is used at an <item> level by populating with the number value
in which you would like the episode to appear on the store. For
example, if you would like an <item> to appear as the first episode in
the podcast, you would populate the <itunes:order> tag with “1”. If
conflicting order values are present in multiple episodes, the store
will use default ordering (pubDate).
To remove the order from the episode set the order to a value below
zero.
:param itunes_order: The order of the episode.
:returns: The order of the episode.
'''
if itunes_order is not None:
self.__itunes_order = int(itunes_order)
return self.__itunes_order
def itunes_subtitle(self, itunes_subtitle=None):
'''Get or set the itunes:subtitle value for the podcast episode. The
contents of this tag are shown in the Description column in iTunes. The
subtitle displays best if it is only a few words long.
:param itunes_subtitle: Subtitle of the podcast episode.
:returns: Subtitle of the podcast episode.
'''
if itunes_subtitle is not None:
self.__itunes_subtitle = itunes_subtitle
return self.__itunes_subtitle
def itunes_summary(self, itunes_summary=None):
'''Get or set the itunes:summary value for the podcast episode. The
contents of this tag are shown in a separate window that appears when
the "circled i" in the Description column is clicked. It also appears
on the iTunes page for your podcast. This field can be up to 4000
characters. If <itunes:summary> is not included, the contents of the
<description> tag are used.
:param itunes_summary: Summary of the podcast episode.
:returns: Summary of the podcast episode.
'''
if itunes_summary is not None:
self.__itunes_summary = itunes_summary
return self.__itunes_summary

View File

@@ -0,0 +1,60 @@
# -*- coding: utf-8 -*-
#
# Copyright 2015 Kenichi Sato <ksato9700@gmail.com>
#
'''
Extends FeedGenerator to support Syndication module
See below for details
http://web.resource.org/rss/1.0/modules/syndication/
'''
from feedgen.ext.base import BaseExtension
from feedgen.util import xml_elem
SYNDICATION_NS = 'http://purl.org/rss/1.0/modules/syndication/'
PERIOD_TYPE = ('hourly', 'daily', 'weekly', 'monthly', 'yearly')
def _set_value(channel, name, value):
if value:
newelem = xml_elem('{%s}' % SYNDICATION_NS + name, channel)
newelem.text = value
class SyndicationExtension(BaseExtension):
def __init__(self):
self._update_period = None
self._update_freq = None
self._update_base = None
def extend_ns(self):
return {'sy': SYNDICATION_NS}
def extend_rss(self, rss_feed):
channel = rss_feed[0]
_set_value(channel, 'UpdatePeriod', self._update_period)
_set_value(channel, 'UpdateFrequency', str(self._update_freq))
_set_value(channel, 'UpdateBase', self._update_base)
def update_period(self, value):
if value not in PERIOD_TYPE:
raise ValueError('Invalid update period value')
self._update_period = value
return self._update_period
def update_frequency(self, value):
if type(value) is not int or value <= 0:
raise ValueError('Invalid update frequency value')
self._update_freq = value
return self._update_freq
def update_base(self, value):
# the value should be in W3CDTF format
self._update_base = value
return self._update_base
class SyndicationEntryExtension(BaseExtension):
pass

View File

@@ -0,0 +1,126 @@
# -*- coding: utf-8 -*-
'''
feedgen.ext.torrent
~~~~~~~~~~~~~~~~~~~
Extends the FeedGenerator to produce torrent feeds.
:copyright: 2016, Raspbeguy <raspbeguy@hashtagueule.fr>
:license: FreeBSD and LGPL, see license.* for more details.
'''
from feedgen.ext.base import BaseEntryExtension, BaseExtension
from feedgen.util import xml_elem
TORRENT_NS = 'http://xmlns.ezrss.it/0.1/dtd/'
class TorrentExtension(BaseExtension):
'''FeedGenerator extension for torrent feeds.
'''
def extend_ns(self):
return {'torrent': TORRENT_NS}
class TorrentEntryExtension(BaseEntryExtension):
'''FeedEntry extension for torrent feeds
'''
def __init__(self):
self.__torrent_filename = None
self.__torrent_infohash = None
self.__torrent_contentlength = None
self.__torrent_seeds = None
self.__torrent_peers = None
self.__torrent_verified = None
def extend_rss(self, entry):
'''Add additional fields to an RSS item.
:param feed: The RSS item XML element to use.
'''
if self.__torrent_filename:
filename = xml_elem('{%s}filename' % TORRENT_NS, entry)
filename.text = self.__torrent_filename
if self.__torrent_contentlength:
contentlength = xml_elem('{%s}contentlength' % TORRENT_NS, entry)
contentlength.text = self.__torrent_contentlength
if self.__torrent_infohash:
infohash = xml_elem('{%s}infohash' % TORRENT_NS, entry)
infohash.text = self.__torrent_infohash
magnet = xml_elem('{%s}magneturi' % TORRENT_NS, entry)
magnet.text = 'magnet:?xt=urn:btih:' + self.__torrent_infohash
if self.__torrent_seeds:
seeds = xml_elem('{%s}seed' % TORRENT_NS, entry)
seeds.text = self.__torrent_seeds
if self.__torrent_peers:
peers = xml_elem('{%s}peers' % TORRENT_NS, entry)
peers.text = self.__torrent_peers
if self.__torrent_verified:
verified = xml_elem('{%s}verified' % TORRENT_NS, entry)
verified.text = self.__torrent_verified
def filename(self, torrent_filename=None):
'''Get or set the name of the torrent file.
:param torrent_filename: The name of the torrent file.
:returns: The name of the torrent file.
'''
if torrent_filename is not None:
self.__torrent_filename = torrent_filename
return self.__torrent_filename
def infohash(self, torrent_infohash=None):
'''Get or set the hash of the target file.
:param torrent_infohash: The target file hash.
:returns: The target hash file.
'''
if torrent_infohash is not None:
self.__torrent_infohash = torrent_infohash
return self.__torrent_infohash
def contentlength(self, torrent_contentlength=None):
'''Get or set the size of the target file.
:param torrent_contentlength: The target file size.
:returns: The target file size.
'''
if torrent_contentlength is not None:
self.__torrent_contentlength = torrent_contentlength
return self.__torrent_contentlength
def seeds(self, torrent_seeds=None):
'''Get or set the number of seeds.
:param torrent_seeds: The seeds number.
:returns: The seeds number.
'''
if torrent_seeds is not None:
self.__torrent_seeds = torrent_seeds
return self.__torrent_seeds
def peers(self, torrent_peers=None):
'''Get or set the number od peers
:param torrent_infohash: The peers number.
:returns: The peers number.
'''
if torrent_peers is not None:
self.__torrent_peers = torrent_peers
return self.__torrent_peers
def verified(self, torrent_verified=None):
'''Get or set the number of verified peers.
:param torrent_infohash: The verified peers number.
:returns: The verified peers number.
'''
if torrent_verified is not None:
self.__torrent_verified = torrent_verified
return self.__torrent_verified

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,25 @@
BSD 2-Clause License
Copyright 2011, Lars Kiesow <lkiesow@uos.de>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -0,0 +1,165 @@
GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
1. Exception to Section 3 of the GNU GPL.
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
2. Conveying Modified Versions.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
3. Object Code Incorporating Material from Library Header Files.
The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
4. Combined Works.
You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
a) Accompany the combined library with a copy of the same work based
on the Library, uncombined with any other library facilities,
conveyed under the terms of this License.
b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.

View File

@@ -0,0 +1,96 @@
# -*- coding: utf-8 -*-
'''
feedgen.util
~~~~~~~~~~~~
This file contains helper functions for the feed generator module.
:copyright: 2013, Lars Kiesow <lkiesow@uos.de>
:license: FreeBSD and LGPL, see license.* for more details.
'''
import locale
import sys
import lxml # nosec - we configure a safe parser below
# Configure a safe parser which does not allow XML entity expansion
parser = lxml.etree.XMLParser(
attribute_defaults=False,
dtd_validation=False,
load_dtd=False,
no_network=True,
recover=False,
remove_pis=True,
resolve_entities=False,
huge_tree=False)
def xml_fromstring(xmlstring):
return lxml.etree.fromstring(xmlstring, parser) # nosec - safe parser
def xml_elem(name, parent=None, **kwargs):
if parent is not None:
return lxml.etree.SubElement(parent, name, **kwargs)
return lxml.etree.Element(name, **kwargs)
def ensure_format(val, allowed, required, allowed_values=None, defaults=None):
'''Takes a dictionary or a list of dictionaries and check if all keys are in
the set of allowed keys, if all required keys are present and if the values
of a specific key are ok.
:param val: Dictionaries to check.
:param allowed: Set of allowed keys.
:param required: Set of required keys.
:param allowed_values: Dictionary with keys and sets of their allowed
values.
:param defaults: Dictionary with default values.
:returns: List of checked dictionaries.
'''
if not val:
return []
if allowed_values is None:
allowed_values = {}
if defaults is None:
defaults = {}
# Make shure that we have a list of dicts. Even if there is only one.
if not isinstance(val, list):
val = [val]
for elem in val:
if not isinstance(elem, dict):
raise ValueError('Invalid data (value is no dictionary)')
# Set default values
version = sys.version_info[0]
if version == 2:
items = defaults.iteritems()
else:
items = defaults.items()
for k, v in items:
elem[k] = elem.get(k, v)
if not set(elem.keys()) <= allowed:
raise ValueError('Data contains invalid keys')
if not set(elem.keys()) >= required:
raise ValueError('Data contains not all required keys')
if version == 2:
values = allowed_values.iteritems()
else:
values = allowed_values.items()
for k, v in values:
if elem.get(k) and not elem[k] in v:
raise ValueError('Invalid value for %s' % k)
return val
def formatRFC2822(date):
'''Make sure the locale setting do not interfere with the time format.
'''
old = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, 'C')
date = date.strftime('%a, %d %b %Y %H:%M:%S %z')
locale.setlocale(locale.LC_ALL, old)
return date

View File

@@ -0,0 +1,25 @@
# -*- coding: utf-8 -*-
'''
feedgen.version
~~~~~~~~~~~~~~~
:copyright: 2013-2018, Lars Kiesow <lkiesow@uos.de>
:license: FreeBSD and LGPL, see license.* for more details.
'''
'Version of python-feedgen represented as tuple'
version = (0, 9, 0)
'Version of python-feedgen represented as string'
version_str = '.'.join([str(x) for x in version])
version_major = version[:1]
version_minor = version[:2]
version_full = version
version_major_str = '.'.join([str(x) for x in version_major])
version_minor_str = '.'.join([str(x) for x in version_minor])
version_full_str = '.'.join([str(x) for x in version_full])

View File

@@ -0,0 +1,24 @@
Copyright (c) 2013, Dave Mankoff
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Dave Mankoff nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL DAVE MANKOFF BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -0,0 +1,30 @@
"""
Copyright (c) 2013, Dave Mankoff
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Dave Mankoff nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL DAVE MANKOFF BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from .main import minify, Minifier
__version__ = '0.1.12'

View File

@@ -0,0 +1,175 @@
#!/usr/bin/env python
"""
Copyright (c) 2013, Dave Mankoff
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Dave Mankoff nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL DAVE MANKOFF BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import argparse
import codecs
import locale
import io
import sys
#import htmlmin
from . import Minifier
parser = argparse.ArgumentParser(
description='Minify HTML',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument('input_file',
nargs='?',
metavar='INPUT',
help='File path to html file to minify. Defaults to stdin.',
)
parser.add_argument('output_file',
nargs='?',
metavar='OUTPUT',
help="File path to output to. Defaults to stdout.",
)
parser.add_argument('-c', '--remove-comments',
help=(
'''When set, comments will be removed. They can be kept on an individual basis
by starting them with a '!': <!--! comment -->. The '!' will be removed from
the final output. If you want a '!' as the leading character of your comment,
put two of them: <!--!! comment -->.
'''),
action='store_true')
parser.add_argument('-s', '--remove-empty-space',
help=(
'''When set, this removes empty space betwen tags in certain cases.
Specifically, it will remove empty space if and only if there a newline
character occurs within the space. Thus, code like
'<span>x</span> <span>y</span>' will be left alone, but code such as
' ...
</head>
<body>
...'
will become '...</head><body>...'. Note that this CAN break your
html if you spread two inline tags over two lines. Use with caution.
'''),
action='store_true')
parser.add_argument('--remove-all-empty-space',
help=(
'''When set, this removes ALL empty space betwen tags. WARNING: this can and
likely will cause unintended consequences. For instance, '<i>X</i> <i>Y</i>'
will become '<i>X</i><i>Y</i>'. Putting whitespace along with other text will
avoid this problem. Only use if you are confident in the result. Whitespace is
not removed from inside of tags, thus '<span> </span>' will be left alone.
'''),
action='store_true')
parser.add_argument('--keep-optional-attribute-quotes',
help=(
'''When set, this keeps all attribute quotes, even if they are optional.
'''),
action='store_true')
parser.add_argument('-H', '--in-head',
help=(
'''If you are parsing only a fragment of HTML, and the fragment occurs in the
head of the document, setting this will remove some extra whitespace.
'''),
action='store_true')
parser.add_argument('-k', '--keep-pre-attr',
help=(
'''HTMLMin supports the propietary attribute 'pre' that can be added to elements
to prevent minification. This attribute is removed by default. Set this flag to
keep the 'pre' attributes in place.
'''),
action='store_true')
parser.add_argument('-a', '--pre-attr',
help=(
'''The attribute htmlmin looks for to find blocks of HTML that it should not
minify. This attribute will be removed from the HTML unless '-k' is
specified. Defaults to 'pre'.
'''),
default='pre')
parser.add_argument('-p', '--pre-tags',
metavar='TAG',
help=(
'''By default, the contents of 'pre', and 'textarea' tags are left unminified.
You can specify different tags using the --pre-tags option. 'script' and 'style'
tags are always left unmininfied.
'''),
nargs='*',
default=['pre', 'textarea'])
parser.add_argument('-e', '--encoding',
help=("Encoding to read and write with. Default 'utf-8'."
" When reading from stdin, attempts to use the system's"
" encoding before defaulting to utf-8.\n\n"),
default=None,
)
def main():
args = parser.parse_args()
minifier = Minifier(
remove_comments=args.remove_comments,
remove_empty_space=args.remove_empty_space,
remove_optional_attribute_quotes=not args.keep_optional_attribute_quotes,
pre_tags=args.pre_tags,
keep_pre=args.keep_pre_attr,
pre_attr=args.pre_attr,
)
default_encoding = args.encoding or 'utf-8'
if args.input_file:
inp = codecs.open(args.input_file, encoding=default_encoding)
else:
encoding = args.encoding or sys.stdin.encoding \
or locale.getpreferredencoding() or default_encoding
inp = io.open(sys.stdin.fileno(), encoding=encoding)
for line in inp.readlines():
minifier.input(line)
if args.output_file:
codecs.open(
args.output_file, 'w', encoding=default_encoding).write(minifier.output)
else:
encoding = args.encoding or sys.stdout.encoding \
or locale.getpreferredencoding() or default_encoding
io.open(sys.stdout.fileno(), 'w', encoding=encoding).write(minifier.output)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,64 @@
"""
Copyright (c) 2013, Dave Mankoff
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Dave Mankoff nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL DAVE MANKOFF BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from .main import Minifier
def htmlmin(*args, **kwargs):
"""Minifies HTML that is returned by a function.
A simple decorator that minifies the HTML output of any function that it
decorates. It supports all the same options that :class:`htmlmin.minify` has.
With no options, it uses ``minify``'s default settings::
@htmlmin
def foobar():
return ' minify me! '
or::
@htmlmin(remove_comments=True)
def foobar():
return ' minify me! <!-- and remove me! -->'
"""
def _decorator(fn):
minify = Minifier(**kwargs).minify
def wrapper(*a, **kw):
return minify(fn(*a, **kw))
return wrapper
if len(args) == 1:
if callable(args[0]) and not kwargs:
return _decorator(args[0])
else:
raise RuntimeError(
'htmlmin decorator does accept positional arguments')
elif len(args) > 1:
raise RuntimeError(
'htmlmin decorator does accept positional arguments')
else:
return _decorator

View File

@@ -0,0 +1,204 @@
"""
Copyright (c) 2015, Dave Mankoff
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Dave Mankoff nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL DAVE MANKOFF BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import re
try:
from html import escape
except ImportError:
from cgi import escape
import re
NO_QUOTES = 0
SINGLE_QUOTE = 1
DOUBLE_QUOTE = 2
UPPER_A = ord('A')
UPPER_F = ord('F')
UPPER_Z = ord('Z')
LOWER_A = ord('a')
LOWER_F = ord('f')
LOWER_Z = ord('z')
ZERO = ord('0')
NINE = ord('9')
# https://www.w3.org/TR/html5/syntax.html#attributes-0
CHARS_TO_QUOTE_RE = re.compile(u'[\x20\x09\x0a\x0c\x0d=><`]')
def escape_tag(val):
return escape(val)
def escape_attr_name(val):
return escape(val)
def escape_attr_value(val, double_quote=False):
val = escape_ambiguous_ampersand(val)
has_html_tag = '<' in val or '>' in val
if double_quote:
return (val.replace('"', '&#34;'), DOUBLE_QUOTE)
double_quote_count = 0
single_quote_count = 0
for ch in val:
if ch == '"':
double_quote_count += 1
elif ch == "'":
single_quote_count += 1
if double_quote_count > single_quote_count:
return (val.replace("'", '&#39;'), SINGLE_QUOTE)
elif single_quote_count:
return (val.replace('"', '&#34;'), DOUBLE_QUOTE)
if not val or CHARS_TO_QUOTE_RE.search(val):
return (val, DOUBLE_QUOTE)
return (val, NO_QUOTES)
def escape_ambiguous_ampersand(val):
# TODO: this function could probably me made a lot faster.
if not '&' in val: # short circuit for speed
return val
state = 0
result = []
amp_buff = []
for c in val:
if state == 0: # beginning
if c == '&':
state = 1
else:
result.append(c)
elif state == 1: # ampersand
ord_c = ord(c)
if (UPPER_A <= ord_c <= UPPER_Z or
LOWER_A <= ord_c <= LOWER_Z or
ZERO <= ord_c <= NINE):
amp_buff.append(c) # TODO: use "name character references" section
# https://html.spec.whatwg.org/multipage/syntax.html#named-character-references
elif c == '#':
state = 2
elif c == ';':
if amp_buff:
result.append('&')
result.extend(amp_buff)
result.append(';')
else:
result.append('&;')
state = 0
amp_buff = []
elif c == '&':
if amp_buff:
result.append('&amp;')
result.extend(amp_buff)
else:
result.append('&')
amp_buff = []
else:
result.append('&')
result.extend(amp_buff)
result.append(c)
state = 0
amp_buff = []
elif state == 2: # numeric character reference
ord_c = ord(c)
if c == 'x' or c == 'X':
state = 3
elif ZERO <= ord_c <= NINE:
amp_buff.append(c)
elif c == ';':
if amp_buff:
result.append('&#')
result.extend(amp_buff)
result.append(';')
else:
result.append('&#;')
state = 0
amp_buff = []
elif c == '&':
if amp_buff:
result.append('&amp;#')
result.extend(amp_buff)
else:
result.append('&#')
state = 1
amp_buff = []
else:
if amp_buff:
result.append('&amp;#')
result.extend(amp_buff)
result.append(c)
else:
result.append('&#')
result.append(c)
state = 0
amp_buff = []
elif state == 3: # hex character reference
ord_c = ord(c)
if (UPPER_A <= ord_c <= UPPER_F or
LOWER_A <= ord_c <= LOWER_F or
ZERO <= ord_c <= NINE):
amp_buff.append(c)
elif c == ';':
if amp_buff:
result.append('&#x')
result.extend(amp_buff)
result.append(';')
else:
result.append('&#x;')
state = 0
amp_buff = []
elif c == '&':
if amp_buff:
result.append('&amp;#x')
result.extend(amp_buff)
else:
result.append('&#x')
state = 1
amp_buff = []
else:
if amp_buff:
result.append('&amp;#x')
result.extend(amp_buff)
result.append(c)
else:
result.append('&#x')
result.append(c)
state = 0
amp_buff = []
if state == 1:
result.append('&amp;')
result.extend(amp_buff)
elif state == 2:
result.append('&amp;#')
result.extend(amp_buff)
elif state == 3:
result.append('&amp;#x')
result.extend(amp_buff)
return ''.join(result)

View File

@@ -0,0 +1,193 @@
"""
Copyright (c) 2013, Dave Mankoff
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Dave Mankoff nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL DAVE MANKOFF BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import cgi
from . import parser
def minify(input,
remove_comments=False,
remove_empty_space=False,
remove_all_empty_space=False,
reduce_empty_attributes=True,
reduce_boolean_attributes=False,
remove_optional_attribute_quotes=True,
convert_charrefs=True,
keep_pre=False,
pre_tags=parser.PRE_TAGS,
pre_attr='pre',
cls=parser.HTMLMinParser):
"""Minifies HTML in one shot.
:param input: A string containing the HTML to be minified.
:param remove_comments: Remove comments found in HTML. Individual comments can
be maintained by putting a ``!`` as the first character inside the comment.
Thus::
<!-- FOO --> <!--! BAR -->
Will become simply::
<!-- BAR -->
The added exclamation is removed.
:param remove_empty_space: Remove empty space found in HTML between an opening
and a closing tag and when it contains a newline or carriage return. If
whitespace is found that is only spaces and/or tabs, it will be turned into
a single space. Be careful, this can have unintended consequences.
:param remove_all_empty_space: A more extreme version of
``remove_empty_space``, this removes all empty whitespace found between
tags. This is almost guaranteed to break your HTML unless you are very
careful.
:param reduce_boolean_attributes: Where allowed by the HTML5 specification,
attributes such as 'disabled' and 'readonly' will have their value removed,
so 'disabled="true"' will simply become 'disabled'. This is generally a
good option to turn on except when JavaScript relies on the values.
:param remove_optional_attribute_quotes: When True, optional quotes around
attributes are removed. When False, all attribute quotes are left intact.
Defaults to True.
:param conver_charrefs: Decode character references such as &amp; and &#46;
to their single charater values where safe. This currently only applies to
attributes. Data content between tags will be left encoded.
:param keep_pre: By default, htmlmin uses the special attribute ``pre`` to
allow you to demarcate areas of HTML that should not be minified. It removes
this attribute as it finds it. Setting this value to ``True`` tells htmlmin
to leave the attribute in the output.
:param pre_tags: A list of tag names that should never be minified. You are
free to change this list as you see fit, but you will probably want to
include ``pre`` and ``textarea`` if you make any changes to the list. Note
that ``<script>`` and ``<style>`` tags are never minimized.
:param pre_attr: Specifies the attribute that, when found in an HTML tag,
indicates that the content of the tag should not be minified. Defaults to
``pre``. You can also prefix individual tag attributes with
``{pre_attr}-`` to prevent the contents of the individual attribute from
being changed.
:return: A string containing the minified HTML.
If you are going to be minifying multiple HTML documents, each with the same
settings, consider using :class:`.Minifier`.
"""
minifier = cls(
remove_comments=remove_comments,
remove_empty_space=remove_empty_space,
remove_all_empty_space=remove_all_empty_space,
reduce_empty_attributes=reduce_empty_attributes,
reduce_boolean_attributes=reduce_boolean_attributes,
remove_optional_attribute_quotes=remove_optional_attribute_quotes,
convert_charrefs=convert_charrefs,
keep_pre=keep_pre,
pre_tags=pre_tags,
pre_attr=pre_attr)
minifier.feed(input)
minifier.close()
return minifier.result
class Minifier(object):
"""An object that supports HTML Minification.
Options are passed into this class at initialization time and are then
persisted across each use of the instance. If you are going to be minifying
multiple peices of HTML, this will be more efficient than using
:class:`htmlmin.minify`.
See :class:`htmlmin.minify` for an explanation of options.
"""
def __init__(self,
remove_comments=False,
remove_empty_space=False,
remove_all_empty_space=False,
reduce_empty_attributes=True,
reduce_boolean_attributes=False,
remove_optional_attribute_quotes=True,
convert_charrefs=True,
keep_pre=False,
pre_tags=parser.PRE_TAGS,
pre_attr='pre',
cls=parser.HTMLMinParser):
"""Initialize the Minifier.
See :class:`htmlmin.minify` for an explanation of options.
"""
self._parser = cls(
remove_comments=remove_comments,
remove_empty_space=remove_empty_space,
remove_all_empty_space=remove_all_empty_space,
reduce_empty_attributes=reduce_empty_attributes,
reduce_boolean_attributes=reduce_boolean_attributes,
remove_optional_attribute_quotes=remove_optional_attribute_quotes,
convert_charrefs=convert_charrefs,
keep_pre=keep_pre,
pre_tags=pre_tags,
pre_attr=pre_attr)
def minify(self, *input):
"""Runs HTML through the minifier in one pass.
:param input: HTML to be fed into the minimizer. Multiple chunks of HTML
can be provided, and they are fed in sequentially as if they were
concatenated.
:returns: A string containing the minified HTML.
This is the simplest way to use an existing ``Minifier`` instance. This
method takes in HTML and minfies it, returning the result. Note that this
method resets the internal state of the parser before it does any work. If
there is pending HTML in the buffers, it will be lost.
"""
self._parser.reset()
self.input(*input)
return self.finalize()
def input(self, *input):
"""Feed more HTML into the input stream
:param input: HTML to be fed into the minimizer. Multiple chunks of HTML
can be provided, and they are fed in sequentially as if they were
concatenated. You can also call this method multiple times to achieve
the same effect.
"""
for i in input:
self._parser.feed(i)
@property
def output(self):
"""Retrieve the minified output generated thus far.
"""
return self._parser.result
def finalize(self):
"""Finishes current input HTML and returns mininified result.
This method flushes any remaining input HTML and returns the minified
result. It resets the state of the internal parser in the process so that
new HTML can be minified. Be sure to call this method before you reuse
the ``Minifier`` instance on a new HTML document.
"""
self._parser.close()
result = self._parser.result
self._parser.reset()
return result

View File

@@ -0,0 +1,92 @@
"""
Copyright (c) 2013, Dave Mankoff
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Dave Mankoff nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL DAVE MANKOFF BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from .main import Minifier
class HTMLMinMiddleware(object):
"""WSGI Middleware that minifies html on the way out.
:param by_default: Specifies if minification should be turned on or off by
default. Defaults to ``True``.
:param keep_header: The middleware recognizes one custom HTTP header that
can be used to turn minification on or off on a per-request basis:
``X-HTML-Min-Enable``. Setting the header to ``true`` will turn minfication
on; anything else will turn minification off. If ``by_default`` is set to
``False``, this header is how you would turn minification back on. The
middleware, by default, removes the header from the output. Setting this
to ``True`` leaves the header in tact.
:param debug: A quick setting to turn all minification off. The middleware
is effectively bypassed.
This simple middleware minifies any HTML content that passes through it. Any
additional keyword arguments beyond the three settings the middleware has are
passed on to the internal minifier. The documentation for the options can
be found under :class:`htmlmin.minify`.
"""
def __init__(self, app, by_default=True, keep_header=False,
debug=False, **kwargs):
self.app = app
self.by_default = by_default
self.debug = debug
self.keep_header = keep_header
self.minifier = Minifier(**kwargs)
def __call__(self, environ, start_response):
if self.debug:
return self.app(environ, start_response)
should_minify = [] # need to use a mutable object so we can change it
# in a different scope.
def minified_start_response(status, headers, exc_info=None):
should_minify.append(self.should_minify(headers))
if not self.keep_header:
headers = [(header, value) for header, value in
headers if header != 'X-HTML-Min-Enable']
start_response(status, headers, exc_info)
html = [i for i in self.app(environ, minified_start_response)]
if should_minify[0]:
return [self.minifier.minify(*html)]
return html
def should_minify(self, headers):
is_html = False
flag_header = None
for header, value in headers:
if not is_html and header == 'Content-Type' and value == 'text/html':
is_html = True
if flag_header is not None:
break
if flag_header is None and header == 'X-HTML-Min-Enable':
flag_header = (value.lower() == 'true')
if is_html:
break
return is_html and (
(self.by_default and flag_header != False) or
(not self.by_default and flag_header))

View File

@@ -0,0 +1,408 @@
"""
Copyright (c) 2013, Dave Mankoff
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Dave Mankoff nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL DAVE MANKOFF BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import unicode_literals
import logging
import sys
import re
from .python3html.parser import HTMLParser
from . import escape
# https://www.w3.org/TR/html5/single-page.html#space-character
HTML_SPACE_RE = re.compile('[\x20\x09\x0a\x0c\x0d]+')
HTML_ALL_SPACE_RE = re.compile('^[\x20\x09\x0a\x0c\x0d]+$')
HTML_LEADING_SPACE_RE = re.compile(
'^[\x20\x09\x0a\x0c\x0d]+')
HTML_TRAILING_SPACE_RE = re.compile(
'[\x20\x09\x0a\x0c\x0d]+$')
HTML_LEADING_TRAILING_SPACE_RE = re.compile(
'(^[\x20\x09\x0a\x0c\x0d]+)|([\x20\x09\x0a\x0c\x0d]+$)')
PRE_TAGS = ('pre', 'textarea') # styles and scripts are never minified
# http://www.w3.org/TR/html51/syntax.html#elements-0
NO_CLOSE_TAGS = ('area', 'base', 'br', 'col', 'command', 'embed', 'hr', 'img',
'input', 'keygen', 'link', 'meta', 'param', 'source', 'track',
'wbr')
# http://www.w3.org/TR/html51/index.html#attributes-1
BOOLEAN_ATTRIBUTES = {
'audio': ('autoplay', 'controls', 'hidden', 'loop', 'muted',),
'button': ('autofocus', 'disabled', 'formnovalidate', 'hidden',),
'command': ('checked', 'disabled', 'hidden'),
'dialog': ('hidden', 'open',),
'fieldset': ('disabled', 'hidden',),
'form': ('hidden', 'novalidate',),
'iframe': ('hidden', 'seamless',),
'img': ('hidden', 'ismap',),
'input': ('autofocus', 'checked', 'disabled', 'formnovalidate', 'hidden',
'multiple', 'readonly', 'required',),
'keygen': ('autofocus', 'disabled', 'hidden',),
'object': ('hidden', 'typesmustmatch',),
'ol': ('hidden', 'reversed',),
'optgroup': ('disabled', 'hidden',),
'option': ('disabled', 'hidden', 'selected',),
'script': ('async', 'defer', 'hidden',),
'select': ('autofocus', 'disabled', 'hidden', 'multiple', 'required',),
'style': ('hidden', 'scoped',),
'textarea': ('autofocus', 'disabled', 'hidden', 'readonly', 'required',),
'track': ('default', 'hidden', ),
'video': ('autoplay', 'controls', 'hidden', 'loop', 'muted',),
'*': ('hidden',),
}
# a list of tags and tags that they are closed by
TAG_SETS = {
'li': ('li',),
'dd': ('dd', 'dt'),
'rp': ('rp', 'rt'),
'p': ('address', 'article', 'aside', 'blockquote', 'dir', 'div', 'dl',
'fieldset', 'footer', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'header', 'hgroup', 'hr', 'menu', 'nav', 'ol', 'p', 'pre', 'section',
'table', 'ul'),
'optgroup': ('optgroup',),
'option': ('option', 'optgroup'),
'colgroup': '*',
'tbody': ('tbody', 'tfoot'),
'tfoot': ('tbody',),
'tr': ('tr',),
'td': ('td', 'th'),
}
TAG_SETS['dt'] = TAG_SETS['dd']
TAG_SETS['rt'] = TAG_SETS['rp']
TAG_SETS['thead'] = TAG_SETS['tbody']
TAG_SETS['th'] = TAG_SETS['td']
# Tag omission rules:
# http://www.w3.org/TR/html51/syntax.html#optional-tags
class HTMLMinError(Exception): pass
class ParseError(HTMLMinError): pass
class OpenTagNotFoundError(ParseError): pass
class HTMLMinParser(HTMLParser):
def __init__(self,
remove_comments=False,
remove_empty_space=False,
remove_all_empty_space=False,
reduce_empty_attributes=True,
reduce_boolean_attributes=False,
remove_optional_attribute_quotes=True,
convert_charrefs=True,
keep_pre=False,
pre_tags=PRE_TAGS,
pre_attr='pre'):
if sys.version_info[0] >= 3 and sys.version_info[1] >= 4:
# convert_charrefs is True by default in Python 3.5.0 and newer. It was
# introduced in 3.4.
HTMLParser.__init__(self, convert_charrefs=False)
else:
HTMLParser.__init__(self)
self.keep_pre = keep_pre
self.pre_tags = pre_tags
self.remove_comments = remove_comments
self.remove_empty_space = remove_empty_space
self.remove_all_empty_space = remove_all_empty_space
self.reduce_empty_attributes = reduce_empty_attributes
self.reduce_boolean_attributes = reduce_boolean_attributes
self.remove_optional_attribute_quotes = remove_optional_attribute_quotes
self.convert_charrefs = convert_charrefs
self.pre_attr = pre_attr
self.reset()
def _tag_lang(self):
return self._tag_stack[0][2] if self._tag_stack else None
def build_tag(self, tag, attrs, close_tag):
has_pre = False
if self.reduce_boolean_attributes:
bool_attrs = BOOLEAN_ATTRIBUTES.get(tag, BOOLEAN_ATTRIBUTES['*'])
else:
bool_attrs = False
lang = self._tag_lang()
attrs = list(attrs) # We're modifying it in place
last_quoted = last_no_slash = i = -1
for k, v in attrs:
pre_prefix = k.startswith("{}-".format(self.pre_attr))
if pre_prefix:
k = k[len(self.pre_attr)+1:]
if k == self.pre_attr:
has_pre = True
if not self.keep_pre and not pre_prefix:
continue
if v and self.convert_charrefs and not pre_prefix:
v = HTMLParser.unescape(self, v)
if k == 'lang':
lang = v
if v == self._tag_lang():
continue
i += 1
if not pre_prefix:
k = escape.escape_attr_name(k)
if (v is None or (not v and self.reduce_empty_attributes) or
(bool_attrs and k in bool_attrs)):
# For our use case, we treat boolean attributes as quoted because they
# don't require space between them and "/>" in closing tags.
attrs[i] = k
last_quoted = i
else:
if pre_prefix:
has_double_quotes = '"' in v
has_single_quotes = "'" in v
if not has_double_quotes:
if not has_single_quotes and self.remove_optional_attribute_quotes:
q = escape.NO_QUOTES
else:
q = escape.DOUBLE_QUOTE
elif not has_single_quotes:
q = escape.SINGLE_QUOTES
else:
logging.error('Unsafe content found in pre-attribute. Escaping.')
(v, q) = escape.escape_attr_value(
v, double_quote=not self.remove_optional_attribute_quotes)
else:
(v, q) = escape.escape_attr_value(
v, double_quote=not self.remove_optional_attribute_quotes)
if q == escape.NO_QUOTES:
attrs[i] = '%s=%s' % (k, v)
if v[-1] != '/':
last_no_slash = i
else:
q = '"' if q == escape.DOUBLE_QUOTE else "'"
attrs[i] = '%s=%s%s%s' % (k, q, v, q)
last_quoted = i
i += 1
if i != len(attrs):
del attrs[i:]
# 1. If there are no attributes, no additional space is necessary.
# 2. If last attribute is quoted, no additional space is necessary.
# 3. Two things are happening here:
# a) according to the standard, <foo bar=baz/> should be treated as <foo
# bar="baz/"> so space is necessary if this is self-closing tag,
# however
# b) reportedly (https://github.com/mankyd/htmlmin/pull/12), older
# versions of WebKit interpret <foo bar=baz/> as self-closing tag so
# we need the space if the last argument ends with a slash.
space_maybe = ''
if attrs:
needs_space = lambda last_attr: (last_attr[-1] not in '"\'' and
(close_tag or last_attr[-1] == '/'))
if needs_space(attrs[-1][-1]):
# If moving attributes around can help, do it. Otherwise bite the
# bullet and put the space in.
i = last_no_slash if last_quoted == -1 else last_quoted
if i == -1 or needs_space(attrs[i]):
space_maybe = ' '
else:
attrs.append(attrs[i])
del attrs[i]
return has_pre, '<%s%s%s%s%s>' % (escape.escape_tag(tag),
' ' if attrs else '',
' '.join(attrs),
space_maybe,
'/' if close_tag else ''), lang
def handle_decl(self, decl):
if (len(self._data_buffer) == 1 and
HTML_SPACE_RE.match(self._data_buffer[0][0])):
self._data_buffer = []
self._data_buffer.append('<!' + decl + '>')
self._after_doctype = True
def _close_tags_up_to(self, tag):
num_pres = 0
i = 0
for i, t in enumerate(self._tag_stack):
if t[1]:
num_pres += 1
if t[0] == tag:
break
# Only the html tag can close out everything. Put on the brakes if
# we encounter a closing tag that we didn't recognize.
if tag != 'html' and t[0] in ('body', 'html', 'head'):
raise OpenTagNotFoundError()
self._tag_stack = self._tag_stack[i+1:]
return num_pres
def handle_starttag(self, tag, attrs):
self._after_doctype = False
if tag == 'head':
self._in_head = True
elif self._in_head and tag == 'title':
self._in_title = True
self._title_newly_opened = True
for t in self._tag_stack:
closed_by_tags = TAG_SETS.get(t[0])
if closed_by_tags and (closed_by_tags == '*' or tag in closed_by_tags):
self._in_pre_tag -= self._close_tags_up_to(t[0])
break
has_pre, data, lang = self.build_tag(tag, attrs, False)
start_pre = False
if (has_pre or self._in_pre_tag > 0 or
tag == 'script' or tag == 'style' or tag in self.pre_tags):
self._in_pre_tag += 1
start_pre = True
self._tag_stack.insert(0, (tag, start_pre, lang))
self._data_buffer.append(data)
def handle_endtag(self, tag):
# According to the spec, <p> tags don't get closed when a parent a
# tag closes them. Here's some logic that addresses this.
if tag == 'a':
contains_p = False
for i, t in enumerate(self._tag_stack):
if t[0] == 'p':
contains_p = True
elif t[0] == 'a':
break
if contains_p: # the p tag, and all its children should be left open
a_tag = self._tag_stack.pop(i)
if a_tag[1]:
self._in_pre_tag -= 1
else:
if tag == 'head':
# TODO: Did we know that we were in an head tag?! If not, we need to
# reminify everything to remove extra spaces.
self._in_head = False
elif tag == 'title':
self._in_title = False
self._title_newly_opened = False
try:
self._in_pre_tag -= self._close_tags_up_to(tag)
except OpenTagNotFoundError:
# Some tags don't require a start tag. Most do. Either way, we leave
# closing tags along since they affect output. For instance, a '</p>'
# results in a '<p></p>' in Chrome.
pass
if tag not in NO_CLOSE_TAGS:
self._data_buffer.extend(['</', escape.escape_tag(tag), '>'])
def handle_startendtag(self, tag, attrs):
self._after_doctype = False
data = self.build_tag(tag, attrs, tag not in NO_CLOSE_TAGS)[1]
self._data_buffer.append(data)
def handle_comment(self, data):
if not self.remove_comments or re.match(r'^(?:!|\[if\s)', data):
self._data_buffer.append('<!--{}-->'.format(
data[1:] if len(data) and data[0] == '!' else data))
def handle_data(self, data):
if self._in_pre_tag > 0:
self._data_buffer.append(data)
else:
# remove_all_empty_space matches everything. remove_empty_space only
# matches if there's a newline involved.
if self.remove_all_empty_space or self._in_head or self._after_doctype:
if HTML_ALL_SPACE_RE.match(data):
return
elif (self.remove_empty_space and HTML_ALL_SPACE_RE.match(data) and
('\n' in data or '\r' in data)):
return
# if we're in the title, remove leading and trailing whitespace.
# note that the title may be parsed in chunks if entityref's or charrefs
# are encountered.
if self._in_title:
if self.__title_trailing_whitespace:
self._data_buffer.append(' ')
self.__title_trailing_whitespace = (
HTML_ALL_SPACE_RE.match(data[-1]) is not None)
if self._title_newly_opened:
self._title_newly_opened = False
data = HTML_LEADING_TRAILING_SPACE_RE.sub('', data)
else:
data = HTML_TRAILING_SPACE_RE.sub(
'', HTML_LEADING_TRAILING_SPACE_RE.sub(' ', data))
data = HTML_SPACE_RE.sub(' ', data)
if not data:
return
if self._in_pre_tag == 0 and self._data_buffer:
# If we're not in a pre block, its possible that we append two spaces
# together, which we want to avoid. For instance, if we remove a comment
# from between two blocks of text: a <!-- B --> c => a c.
if data[0] == ' ' and self._data_buffer[-1][-1] == ' ':
data = data[1:]
if not data:
return
self._data_buffer.append(data)
def handle_entityref(self, data):
if self._in_title:
if not self._title_newly_opened and self.__title_trailing_whitespace:
self._data_buffer.append(' ')
self.__title_trailing_whitespace = False
self._title_newly_opened = False
self._data_buffer.append('&{};'.format(data))
def handle_charref(self, data):
if self._in_title:
if not self._title_newly_opened and self.__title_trailing_whitespace:
self._data_buffer.append(' ')
self.__title_trailing_whitespace = False
self._title_newly_opened = False
self._data_buffer.append('&#{};'.format(data))
def handle_pi(self, data):
self._data_buffer.append('<?' + data + '>')
def unknown_decl(self, data):
self._data_buffer.append('<![' + data + ']>')
def reset(self):
self._data_buffer = []
self._in_pre_tag = 0
self._in_head = False
self._in_title = False
self._after_doctype = False
self._tag_stack = []
self._title_newly_opened = False
self.__title_trailing_whitespace = False
HTMLParser.reset(self)
def unescape(self, val):
"""Override this method so that we can handle char ref conversion ourself.
"""
return val
@property
def result(self):
return ''.join(self._data_buffer)

View File

@@ -0,0 +1,139 @@
"""
General functions for HTML manipulation.
"""
import re as _re
try:
from html.entities import html5 as _html5
unichr = chr
except ImportError:
import htmlentitydefs
_html5 = {'apos;':u"'"}
for k, v in htmlentitydefs.name2codepoint.iteritems():
_html5[k + ';'] = unichr(v)
__all__ = ['escape', 'unescape']
def escape(s, quote=True):
"""
Replace special characters "&", "<" and ">" to HTML-safe sequences.
If the optional flag quote is true (the default), the quotation mark
characters, both double quote (") and single quote (') characters are also
translated.
"""
s = s.replace("&", "&amp;") # Must be done first!
s = s.replace("<", "&lt;")
s = s.replace(">", "&gt;")
if quote:
s = s.replace('"', "&quot;")
s = s.replace('\'', "&#x27;")
return s
# see http://www.w3.org/TR/html5/syntax.html#tokenizing-character-references
_invalid_charrefs = {
0x00: '\ufffd', # REPLACEMENT CHARACTER
0x0d: '\r', # CARRIAGE RETURN
0x80: '\u20ac', # EURO SIGN
0x81: '\x81', # <control>
0x82: '\u201a', # SINGLE LOW-9 QUOTATION MARK
0x83: '\u0192', # LATIN SMALL LETTER F WITH HOOK
0x84: '\u201e', # DOUBLE LOW-9 QUOTATION MARK
0x85: '\u2026', # HORIZONTAL ELLIPSIS
0x86: '\u2020', # DAGGER
0x87: '\u2021', # DOUBLE DAGGER
0x88: '\u02c6', # MODIFIER LETTER CIRCUMFLEX ACCENT
0x89: '\u2030', # PER MILLE SIGN
0x8a: '\u0160', # LATIN CAPITAL LETTER S WITH CARON
0x8b: '\u2039', # SINGLE LEFT-POINTING ANGLE QUOTATION MARK
0x8c: '\u0152', # LATIN CAPITAL LIGATURE OE
0x8d: '\x8d', # <control>
0x8e: '\u017d', # LATIN CAPITAL LETTER Z WITH CARON
0x8f: '\x8f', # <control>
0x90: '\x90', # <control>
0x91: '\u2018', # LEFT SINGLE QUOTATION MARK
0x92: '\u2019', # RIGHT SINGLE QUOTATION MARK
0x93: '\u201c', # LEFT DOUBLE QUOTATION MARK
0x94: '\u201d', # RIGHT DOUBLE QUOTATION MARK
0x95: '\u2022', # BULLET
0x96: '\u2013', # EN DASH
0x97: '\u2014', # EM DASH
0x98: '\u02dc', # SMALL TILDE
0x99: '\u2122', # TRADE MARK SIGN
0x9a: '\u0161', # LATIN SMALL LETTER S WITH CARON
0x9b: '\u203a', # SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
0x9c: '\u0153', # LATIN SMALL LIGATURE OE
0x9d: '\x9d', # <control>
0x9e: '\u017e', # LATIN SMALL LETTER Z WITH CARON
0x9f: '\u0178', # LATIN CAPITAL LETTER Y WITH DIAERESIS
}
_invalid_codepoints = {
# 0x0001 to 0x0008
0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8,
# 0x000E to 0x001F
0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19,
0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
# 0x007F to 0x009F
0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a,
0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96,
0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
# 0xFDD0 to 0xFDEF
0xfdd0, 0xfdd1, 0xfdd2, 0xfdd3, 0xfdd4, 0xfdd5, 0xfdd6, 0xfdd7, 0xfdd8,
0xfdd9, 0xfdda, 0xfddb, 0xfddc, 0xfddd, 0xfdde, 0xfddf, 0xfde0, 0xfde1,
0xfde2, 0xfde3, 0xfde4, 0xfde5, 0xfde6, 0xfde7, 0xfde8, 0xfde9, 0xfdea,
0xfdeb, 0xfdec, 0xfded, 0xfdee, 0xfdef,
# others
0xb, 0xfffe, 0xffff, 0x1fffe, 0x1ffff, 0x2fffe, 0x2ffff, 0x3fffe, 0x3ffff,
0x4fffe, 0x4ffff, 0x5fffe, 0x5ffff, 0x6fffe, 0x6ffff, 0x7fffe, 0x7ffff,
0x8fffe, 0x8ffff, 0x9fffe, 0x9ffff, 0xafffe, 0xaffff, 0xbfffe, 0xbffff,
0xcfffe, 0xcffff, 0xdfffe, 0xdffff, 0xefffe, 0xeffff, 0xffffe, 0xfffff,
0x10fffe, 0x10ffff
}
def _replace_charref(s):
s = s.group(1)
if s[0] == '#':
# numeric charref
if s[1] in 'xX':
num = int(s[2:].rstrip(';'), 16)
else:
num = int(s[1:].rstrip(';'))
if num in _invalid_charrefs:
return _invalid_charrefs[num]
if 0xD800 <= num <= 0xDFFF or num > 0x10FFFF:
return '\uFFFD'
if num in _invalid_codepoints:
return ''
return unichr(num)
else:
# named charref
if s in _html5:
return _html5[s]
# find the longest matching name (as defined by the standard)
for x in range(len(s)-1, 1, -1):
if s[:x] in _html5:
return _html5[s[:x]] + s[x:]
else:
return '&' + s
_charref = _re.compile(r'&(#[0-9]+;?'
r'|#[xX][0-9a-fA-F]+;?'
r'|[^\t\n\f <&#;]{1,32};?)')
def unescape(s):
"""
Convert all named and numeric character references (e.g. &gt;, &#62;,
&x3e;) in the string s to the corresponding unicode characters.
This function uses the rules defined by the HTML 5 standard
for both valid and invalid character references, and the list of
HTML 5 named character references defined in html.entities.html5.
"""
if '&' not in s:
return s
return _charref.sub(_replace_charref, s)

View File

@@ -0,0 +1,481 @@
"""A parser for HTML and XHTML."""
########
# This is copied from Python3 and the slightly modified to support needed
# features. The original file can be found at:
# https://github.com/python/cpython/blob/44b548dda872c0d4f30afd6b44fd74b053a55ad8/Lib/html/parser.py
#
# The largest difference is the reinstatment of the unescape method in
# HTMLParser, which is needed for features in htmlmin. Changes are also
# made to ensure Python2.7 compatability.
########
# This file is based on sgmllib.py, but the API is slightly different.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special).
import re
import warnings
try:
import _markupbase as markupbase
except ImportError:
import markupbase
from . import unescape
__all__ = ['HTMLParser']
# Regular expressions used for parsing
interesting_normal = re.compile('[&<]')
incomplete = re.compile('&[a-zA-Z#]')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
starttagopen = re.compile('<[a-zA-Z]')
piclose = re.compile('>')
commentclose = re.compile(r'--\s*>')
# Note:
# 1) if you change tagfind/attrfind remember to update locatestarttagend too;
# 2) if you change tagfind/attrfind and/or locatestarttagend the parser will
# explode, so don't do it.
# see http://www.w3.org/TR/html5/tokenization.html#tag-open-state
# and http://www.w3.org/TR/html5/tokenization.html#tag-name-state
tagfind_tolerant = re.compile(r'([a-zA-Z][^\t\n\r\f />\x00]*)(?:\s|/(?!>))*')
attrfind_tolerant = re.compile(
r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*')
locatestarttagend_tolerant = re.compile(r"""
<[a-zA-Z][^\t\n\r\f />\x00]* # tag name
(?:[\s/]* # optional whitespace before attribute name
(?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name
(?:\s*=+\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|"[^"]*" # LIT-enclosed value
|(?!['"])[^>\s]* # bare value
)
(?:\s*,)* # possibly followed by a comma
)?(?:\s|/(?!>))*
)*
)?
\s* # trailing whitespace
""", re.VERBOSE)
endendtag = re.compile('>')
# the HTML 5 spec, section 8.1.2.2, doesn't allow spaces between
# </ and the tag name, so maybe this should be fixed
endtagfind = re.compile(r'</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
class HTMLParser(markupbase.ParserBase):
"""Find tags and other markup and call handler functions.
Usage:
p = HTMLParser()
p.feed(data)
...
p.close()
Start tags are handled by calling self.handle_starttag() or
self.handle_startendtag(); end tags by self.handle_endtag(). The
data between tags is passed from the parser to the derived class
by calling self.handle_data() with the data as argument (the data
may be split up in arbitrary chunks). If convert_charrefs is
True the character references are converted automatically to the
corresponding Unicode character (and self.handle_data() is no
longer split in chunks), otherwise they are passed by calling
self.handle_entityref() or self.handle_charref() with the string
containing respectively the named or numeric reference as the
argument.
"""
CDATA_CONTENT_ELEMENTS = ("script", "style")
def __init__(self, convert_charrefs=True):
"""Initialize and reset this instance.
If convert_charrefs is True (the default), all character references
are automatically converted to the corresponding Unicode characters.
"""
self.convert_charrefs = convert_charrefs
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.rawdata = ''
self.lasttag = '???'
self.interesting = interesting_normal
self.cdata_elem = None
markupbase.ParserBase.reset(self)
def feed(self, data):
r"""Feed data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n').
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle any buffered data."""
self.goahead(1)
__starttag_text = None
def get_starttag_text(self):
"""Return full source of start tag: '<...>'."""
return self.__starttag_text
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
def clear_cdata_mode(self):
self.interesting = interesting_normal
self.cdata_elem = None
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
if self.convert_charrefs and not self.cdata_elem:
j = rawdata.find('<', i)
if j < 0:
# if we can't find the next <, either we are at the end
# or there's more text incoming. If the latter is True,
# we can't pass the text to handle_data in case we have
# a charref cut in half at end. Try to determine if
# this is the case before proceeding by looking for an
# & near the end and see if it's followed by a space or ;.
amppos = rawdata.rfind('&', max(i, n-34))
if (amppos >= 0 and
not re.compile(r'[\s;]').search(rawdata, amppos)):
break # wait till we get all the text
j = n
else:
match = self.interesting.search(rawdata, i) # < or &
if match:
j = match.start()
else:
if self.cdata_elem:
break
j = n
if i < j:
if self.convert_charrefs and not self.cdata_elem:
self.handle_data(self.unescape(rawdata[i:j]))
else:
self.handle_data(rawdata[i:j])
i = self.updatepos(i, j)
if i == n: break
startswith = rawdata.startswith
if startswith('<', i):
if starttagopen.match(rawdata, i): # < + letter
k = self.parse_starttag(i)
elif startswith("</", i):
k = self.parse_endtag(i)
elif startswith("<!--", i):
k = self.parse_comment(i)
elif startswith("<?", i):
k = self.parse_pi(i)
elif startswith("<!", i):
k = self.parse_html_declaration(i)
elif (i + 1) < n:
self.handle_data("<")
k = i + 1
else:
break
if k < 0:
if not end:
break
k = rawdata.find('>', i + 1)
if k < 0:
k = rawdata.find('<', i + 1)
if k < 0:
k = i + 1
else:
k += 1
if self.convert_charrefs and not self.cdata_elem:
self.handle_data(self.unescape(rawdata[i:k]))
else:
self.handle_data(rawdata[i:k])
i = self.updatepos(i, k)
elif startswith("&#", i):
match = charref.match(rawdata, i)
if match:
name = match.group()[2:-1]
self.handle_charref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
else:
if ";" in rawdata[i:]: # bail by consuming &#
self.handle_data(rawdata[i:i+2])
i = self.updatepos(i, i+2)
break
elif startswith('&', i):
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
match = incomplete.match(rawdata, i)
if match:
# match.group() will contain at least 2 chars
if end and match.group() == rawdata[i:]:
k = match.end()
if k <= i:
k = n
i = self.updatepos(i, i + 1)
# incomplete
break
elif (i + 1) < n:
# not the end of the buffer, and can't be confused
# with some other construct
self.handle_data("&")
i = self.updatepos(i, i + 1)
else:
break
else:
assert 0, "interesting.search() lied"
# end while
if end and i < n and not self.cdata_elem:
if self.convert_charrefs and not self.cdata_elem:
self.handle_data(self.unescape(rawdata[i:n]))
else:
self.handle_data(rawdata[i:n])
i = self.updatepos(i, n)
self.rawdata = rawdata[i:]
# Internal -- parse html declarations, return length or -1 if not terminated
# See w3.org/TR/html5/tokenization.html#markup-declaration-open-state
# See also parse_declaration in _markupbase
def parse_html_declaration(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<!', ('unexpected call to '
'parse_html_declaration()')
if rawdata[i:i+4] == '<!--':
# this case is actually already handled in goahead()
return self.parse_comment(i)
elif rawdata[i:i+3] == '<![':
return self.parse_marked_section(i)
elif rawdata[i:i+9].lower() == '<!doctype':
# find the closing >
gtpos = rawdata.find('>', i+9)
if gtpos == -1:
return -1
self.handle_decl(rawdata[i+2:gtpos])
return gtpos+1
else:
return self.parse_bogus_comment(i)
# Internal -- parse bogus comment, return length or -1 if not terminated
# see http://www.w3.org/TR/html5/tokenization.html#bogus-comment-state
def parse_bogus_comment(self, i, report=1):
rawdata = self.rawdata
assert rawdata[i:i+2] in ('<!', '</'), ('unexpected call to '
'parse_comment()')
pos = rawdata.find('>', i+2)
if pos == -1:
return -1
if report:
self.handle_comment(rawdata[i+2:pos])
return pos + 1
# Internal -- parse processing instr, return end or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
match = piclose.search(rawdata, i+2) # >
if not match:
return -1
j = match.start()
self.handle_pi(rawdata[i+2: j])
j = match.end()
return j
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind_tolerant.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = match.group(1).lower()
while k < endpos:
m = attrfind_tolerant.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
# Internal -- check to see if we have a complete starttag; return end
# or -1 if incomplete.
def check_for_whole_start_tag(self, i):
rawdata = self.rawdata
m = locatestarttagend_tolerant.match(rawdata, i)
if m:
j = m.end()
next = rawdata[j:j+1]
if next == ">":
return j + 1
if next == "/":
if rawdata.startswith("/>", j):
return j + 2
if rawdata.startswith("/", j):
# buffer boundary
return -1
# else bogus input
if j > i:
return j
else:
return i + 1
if next == "":
# end of input
return -1
if next in ("abcdefghijklmnopqrstuvwxyz=/"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
# end of input in or before attribute value, or we have the
# '/' from a '/>' ending
return -1
if j > i:
return j
else:
return i + 1
raise AssertionError("we should not get here!")
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
match = endendtag.search(rawdata, i+1) # >
if not match:
return -1
gtpos = match.end()
match = endtagfind.match(rawdata, i) # </ + tag + >
if not match:
if self.cdata_elem is not None:
self.handle_data(rawdata[i:gtpos])
return gtpos
# find the name: w3.org/TR/html5/tokenization.html#tag-name-state
namematch = tagfind_tolerant.match(rawdata, i+2)
if not namematch:
# w3.org/TR/html5/tokenization.html#end-tag-open-state
if rawdata[i:i+3] == '</>':
return i+3
else:
return self.parse_bogus_comment(i)
tagname = namematch.group(1).lower()
# consume and ignore other stuff between the name and the >
# Note: this is not 100% correct, since we might have things like
# </tag attr=">">, but looking for > after tha name should cover
# most of the cases and is much simpler
gtpos = rawdata.find('>', namematch.end())
self.handle_endtag(tagname)
return gtpos+1
elem = match.group(1).lower() # script or style
if self.cdata_elem is not None:
if elem != self.cdata_elem:
self.handle_data(rawdata[i:gtpos])
return gtpos
self.handle_endtag(elem.lower())
self.clear_cdata_mode()
return gtpos
# Overridable -- finish processing of start+end tag: <tag.../>
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
self.handle_endtag(tag)
# Overridable -- handle start tag
def handle_starttag(self, tag, attrs):
pass
# Overridable -- handle end tag
def handle_endtag(self, tag):
pass
# Overridable -- handle character reference
def handle_charref(self, name):
pass
# Overridable -- handle entity reference
def handle_entityref(self, name):
pass
# Overridable -- handle data
def handle_data(self, data):
pass
# Overridable -- handle comment
def handle_comment(self, data):
pass
# Overridable -- handle declaration
def handle_decl(self, decl):
pass
# Overridable -- handle processing instruction
def handle_pi(self, data):
pass
def unknown_decl(self, data):
pass
# Internal -- helper to remove special character quoting
def unescape(self, s):
return unescape(s)

View File

@@ -0,0 +1,13 @@
Copyright 2017-2019 Jason R. Coombs, Barry Warsaw
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,68 @@
import re
import textwrap
import email.message
from ._text import FoldedCase
class Message(email.message.Message):
multiple_use_keys = set(
map(
FoldedCase,
[
'Classifier',
'Obsoletes-Dist',
'Platform',
'Project-URL',
'Provides-Dist',
'Provides-Extra',
'Requires-Dist',
'Requires-External',
'Supported-Platform',
'Dynamic',
],
)
)
"""
Keys that may be indicated multiple times per PEP 566.
"""
def __new__(cls, orig: email.message.Message):
res = super().__new__(cls)
vars(res).update(vars(orig))
return res
def __init__(self, *args, **kwargs):
self._headers = self._repair_headers()
# suppress spurious error from mypy
def __iter__(self):
return super().__iter__()
def _repair_headers(self):
def redent(value):
"Correct for RFC822 indentation"
if not value or '\n' not in value:
return value
return textwrap.dedent(' ' * 8 + value)
headers = [(key, redent(value)) for key, value in vars(self)['_headers']]
if self._payload:
headers.append(('Description', self.get_payload()))
return headers
@property
def json(self):
"""
Convert PackageMetadata to a JSON-compatible format
per PEP 0566.
"""
def transform(key):
value = self.get_all(key) if key in self.multiple_use_keys else self[key]
if key == 'Keywords':
value = re.split(r'\s+', value)
tk = key.lower().replace('-', '_')
return tk, value
return dict(map(transform, map(FoldedCase, self)))

View File

@@ -0,0 +1,30 @@
import collections
# from jaraco.collections 3.3
class FreezableDefaultDict(collections.defaultdict):
"""
Often it is desirable to prevent the mutation of
a default dict after its initial construction, such
as to prevent mutation during iteration.
>>> dd = FreezableDefaultDict(list)
>>> dd[0].append('1')
>>> dd.freeze()
>>> dd[1]
[]
>>> len(dd)
1
"""
def __missing__(self, key):
return getattr(self, '_frozen', super().__missing__)(key)
def freeze(self):
self._frozen = lambda key: self.default_factory()
class Pair(collections.namedtuple('Pair', 'name value')):
@classmethod
def parse(cls, text):
return cls(*map(str.strip, text.split("=", 1)))

View File

@@ -0,0 +1,71 @@
import sys
import platform
__all__ = ['install', 'NullFinder', 'Protocol']
try:
from typing import Protocol
except ImportError: # pragma: no cover
from typing_extensions import Protocol # type: ignore
def install(cls):
"""
Class decorator for installation on sys.meta_path.
Adds the backport DistributionFinder to sys.meta_path and
attempts to disable the finder functionality of the stdlib
DistributionFinder.
"""
sys.meta_path.append(cls())
disable_stdlib_finder()
return cls
def disable_stdlib_finder():
"""
Give the backport primacy for discovering path-based distributions
by monkey-patching the stdlib O_O.
See #91 for more background for rationale on this sketchy
behavior.
"""
def matches(finder):
return getattr(
finder, '__module__', None
) == '_frozen_importlib_external' and hasattr(finder, 'find_distributions')
for finder in filter(matches, sys.meta_path): # pragma: nocover
del finder.find_distributions
class NullFinder:
"""
A "Finder" (aka "MetaClassFinder") that never finds any modules,
but may find distributions.
"""
@staticmethod
def find_spec(*args, **kwargs):
return None
# In Python 2, the import system requires finders
# to have a find_module() method, but this usage
# is deprecated in Python 3 in favor of find_spec().
# For the purposes of this finder (i.e. being present
# on sys.meta_path but having no other import
# system functionality), the two methods are identical.
find_module = find_spec
def pypy_partial(val):
"""
Adjust for variable stacklevel on partial under PyPy.
Workaround for #327.
"""
is_pypy = platform.python_implementation() == 'PyPy'
return val + is_pypy

View File

@@ -0,0 +1,104 @@
import types
import functools
# from jaraco.functools 3.3
def method_cache(method, cache_wrapper=None):
"""
Wrap lru_cache to support storing the cache data in the object instances.
Abstracts the common paradigm where the method explicitly saves an
underscore-prefixed protected property on first call and returns that
subsequently.
>>> class MyClass:
... calls = 0
...
... @method_cache
... def method(self, value):
... self.calls += 1
... return value
>>> a = MyClass()
>>> a.method(3)
3
>>> for x in range(75):
... res = a.method(x)
>>> a.calls
75
Note that the apparent behavior will be exactly like that of lru_cache
except that the cache is stored on each instance, so values in one
instance will not flush values from another, and when an instance is
deleted, so are the cached values for that instance.
>>> b = MyClass()
>>> for x in range(35):
... res = b.method(x)
>>> b.calls
35
>>> a.method(0)
0
>>> a.calls
75
Note that if method had been decorated with ``functools.lru_cache()``,
a.calls would have been 76 (due to the cached value of 0 having been
flushed by the 'b' instance).
Clear the cache with ``.cache_clear()``
>>> a.method.cache_clear()
Same for a method that hasn't yet been called.
>>> c = MyClass()
>>> c.method.cache_clear()
Another cache wrapper may be supplied:
>>> cache = functools.lru_cache(maxsize=2)
>>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache)
>>> a = MyClass()
>>> a.method2()
3
Caution - do not subsequently wrap the method with another decorator, such
as ``@property``, which changes the semantics of the function.
See also
http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
for another implementation and additional justification.
"""
cache_wrapper = cache_wrapper or functools.lru_cache()
def wrapper(self, *args, **kwargs):
# it's the first call, replace the method with a cached, bound method
bound_method = types.MethodType(method, self)
cached_method = cache_wrapper(bound_method)
setattr(self, method.__name__, cached_method)
return cached_method(*args, **kwargs)
# Support cache clear even before cache has been created.
wrapper.cache_clear = lambda: None
return wrapper
# From jaraco.functools 3.3
def pass_none(func):
"""
Wrap func so it's not called if its first param is None
>>> print_text = pass_none(print)
>>> print_text('text')
text
>>> print_text(None)
"""
@functools.wraps(func)
def wrapper(param, *args, **kwargs):
if param is not None:
return func(param, *args, **kwargs)
return wrapper

View File

@@ -0,0 +1,73 @@
from itertools import filterfalse
def unique_everseen(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
# copied from more_itertools 8.8
def always_iterable(obj, base_type=(str, bytes)):
"""If *obj* is iterable, return an iterator over its items::
>>> obj = (1, 2, 3)
>>> list(always_iterable(obj))
[1, 2, 3]
If *obj* is not iterable, return a one-item iterable containing *obj*::
>>> obj = 1
>>> list(always_iterable(obj))
[1]
If *obj* is ``None``, return an empty iterable:
>>> obj = None
>>> list(always_iterable(None))
[]
By default, binary and text strings are not considered iterable::
>>> obj = 'foo'
>>> list(always_iterable(obj))
['foo']
If *base_type* is set, objects for which ``isinstance(obj, base_type)``
returns ``True`` won't be considered iterable.
>>> obj = {'a': 1}
>>> list(always_iterable(obj)) # Iterate over the dict's keys
['a']
>>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit
[{'a': 1}]
Set *base_type* to ``None`` to avoid any special handling and treat objects
Python considers iterable as iterable:
>>> obj = 'foo'
>>> list(always_iterable(obj, base_type=None))
['f', 'o', 'o']
"""
if obj is None:
return iter(())
if (base_type is not None) and isinstance(obj, base_type):
return iter((obj,))
try:
return iter(obj)
except TypeError:
return iter((obj,))

View File

@@ -0,0 +1,48 @@
from ._compat import Protocol
from typing import Any, Dict, Iterator, List, TypeVar, Union
_T = TypeVar("_T")
class PackageMetadata(Protocol):
def __len__(self) -> int:
... # pragma: no cover
def __contains__(self, item: str) -> bool:
... # pragma: no cover
def __getitem__(self, key: str) -> str:
... # pragma: no cover
def __iter__(self) -> Iterator[str]:
... # pragma: no cover
def get_all(self, name: str, failobj: _T = ...) -> Union[List[Any], _T]:
"""
Return all values associated with a possibly multi-valued key.
"""
@property
def json(self) -> Dict[str, Union[str, List[str]]]:
"""
A JSON-compatible form of the metadata.
"""
class SimplePath(Protocol):
"""
A minimal subset of pathlib.Path required by PathDistribution.
"""
def joinpath(self) -> 'SimplePath':
... # pragma: no cover
def __truediv__(self) -> 'SimplePath':
... # pragma: no cover
def parent(self) -> 'SimplePath':
... # pragma: no cover
def read_text(self) -> str:
... # pragma: no cover

View File

@@ -0,0 +1,99 @@
import re
from ._functools import method_cache
# from jaraco.text 3.5
class FoldedCase(str):
"""
A case insensitive string class; behaves just like str
except compares equal when the only variation is case.
>>> s = FoldedCase('hello world')
>>> s == 'Hello World'
True
>>> 'Hello World' == s
True
>>> s != 'Hello World'
False
>>> s.index('O')
4
>>> s.split('O')
['hell', ' w', 'rld']
>>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta']))
['alpha', 'Beta', 'GAMMA']
Sequence membership is straightforward.
>>> "Hello World" in [s]
True
>>> s in ["Hello World"]
True
You may test for set inclusion, but candidate and elements
must both be folded.
>>> FoldedCase("Hello World") in {s}
True
>>> s in {FoldedCase("Hello World")}
True
String inclusion works as long as the FoldedCase object
is on the right.
>>> "hello" in FoldedCase("Hello World")
True
But not if the FoldedCase object is on the left:
>>> FoldedCase('hello') in 'Hello World'
False
In that case, use in_:
>>> FoldedCase('hello').in_('Hello World')
True
>>> FoldedCase('hello') > FoldedCase('Hello')
False
"""
def __lt__(self, other):
return self.lower() < other.lower()
def __gt__(self, other):
return self.lower() > other.lower()
def __eq__(self, other):
return self.lower() == other.lower()
def __ne__(self, other):
return self.lower() != other.lower()
def __hash__(self):
return hash(self.lower())
def __contains__(self, other):
return super().lower().__contains__(other.lower())
def in_(self, other):
"Does self appear in other?"
return self in FoldedCase(other)
# cache lower since it's likely to be called frequently.
@method_cache
def lower(self):
return super().lower()
def index(self, sub):
return self.lower().index(sub.lower())
def split(self, splitter=' ', maxsplit=0):
pattern = re.compile(re.escape(splitter), re.I)
return pattern.split(self, maxsplit)

View File

@@ -0,0 +1,29 @@
Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Python Markdown Project nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE PYTHON MARKDOWN PROJECT ''AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL ANY CONTRIBUTORS TO THE PYTHON MARKDOWN PROJECT
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.

View File

@@ -0,0 +1,61 @@
"""
Python Markdown
A Python implementation of John Gruber's Markdown.
Documentation: https://python-markdown.github.io/
GitHub: https://github.com/Python-Markdown/markdown/
PyPI: https://pypi.org/project/Markdown/
Started by Manfred Stienstra (http://www.dwerg.net/).
Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
Currently maintained by Waylan Limberg (https://github.com/waylan),
Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see LICENSE.md for details).
"""
import sys
# TODO: Remove this check at some point in the future.
# (also remove flake8's 'ignore E402' comments below)
if sys.version_info[0] < 3: # pragma: no cover
raise ImportError('A recent version of Python 3 is required.')
from .core import Markdown, markdown, markdownFromFile # noqa: E402
from .util import PY37 # noqa: E402
from .pep562 import Pep562 # noqa: E402
from .__meta__ import __version__, __version_info__ # noqa: E402
import warnings # noqa: E402
# For backward compatibility as some extensions expect it...
from .extensions import Extension # noqa
__all__ = ['Markdown', 'markdown', 'markdownFromFile']
__deprecated__ = {
"version": ("__version__", __version__),
"version_info": ("__version_info__", __version_info__)
}
def __getattr__(name):
"""Get attribute."""
deprecated = __deprecated__.get(name)
if deprecated:
warnings.warn(
"'{}' is deprecated. Use '{}' instead.".format(name, deprecated[0]),
category=DeprecationWarning,
stacklevel=(3 if PY37 else 4)
)
return deprecated[1]
raise AttributeError("module '{}' has no attribute '{}'".format(__name__, name))
if not PY37:
Pep562(__name__)

View File

@@ -0,0 +1,151 @@
"""
Python Markdown
A Python implementation of John Gruber's Markdown.
Documentation: https://python-markdown.github.io/
GitHub: https://github.com/Python-Markdown/markdown/
PyPI: https://pypi.org/project/Markdown/
Started by Manfred Stienstra (http://www.dwerg.net/).
Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
Currently maintained by Waylan Limberg (https://github.com/waylan),
Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see LICENSE.md for details).
"""
import sys
import optparse
import codecs
import warnings
import markdown
try:
# We use `unsafe_load` because users may need to pass in actual Python
# objects. As this is only available from the CLI, the user has much
# worse problems if an attacker can use this as an attach vector.
from yaml import unsafe_load as yaml_load
except ImportError: # pragma: no cover
try:
# Fall back to PyYAML <5.1
from yaml import load as yaml_load
except ImportError:
# Fall back to JSON
from json import load as yaml_load
import logging
from logging import DEBUG, WARNING, CRITICAL
logger = logging.getLogger('MARKDOWN')
def parse_options(args=None, values=None):
"""
Define and parse `optparse` options for command-line usage.
"""
usage = """%prog [options] [INPUTFILE]
(STDIN is assumed if no INPUTFILE is given)"""
desc = "A Python implementation of John Gruber's Markdown. " \
"https://Python-Markdown.github.io/"
ver = "%%prog %s" % markdown.__version__
parser = optparse.OptionParser(usage=usage, description=desc, version=ver)
parser.add_option("-f", "--file", dest="filename", default=None,
help="Write output to OUTPUT_FILE. Defaults to STDOUT.",
metavar="OUTPUT_FILE")
parser.add_option("-e", "--encoding", dest="encoding",
help="Encoding for input and output files.",)
parser.add_option("-o", "--output_format", dest="output_format",
default='xhtml', metavar="OUTPUT_FORMAT",
help="Use output format 'xhtml' (default) or 'html'.")
parser.add_option("-n", "--no_lazy_ol", dest="lazy_ol",
action='store_false', default=True,
help="Observe number of first item of ordered lists.")
parser.add_option("-x", "--extension", action="append", dest="extensions",
help="Load extension EXTENSION.", metavar="EXTENSION")
parser.add_option("-c", "--extension_configs",
dest="configfile", default=None,
help="Read extension configurations from CONFIG_FILE. "
"CONFIG_FILE must be of JSON or YAML format. YAML "
"format requires that a python YAML library be "
"installed. The parsed JSON or YAML must result in a "
"python dictionary which would be accepted by the "
"'extension_configs' keyword on the markdown.Markdown "
"class. The extensions must also be loaded with the "
"`--extension` option.",
metavar="CONFIG_FILE")
parser.add_option("-q", "--quiet", default=CRITICAL,
action="store_const", const=CRITICAL+10, dest="verbose",
help="Suppress all warnings.")
parser.add_option("-v", "--verbose",
action="store_const", const=WARNING, dest="verbose",
help="Print all warnings.")
parser.add_option("--noisy",
action="store_const", const=DEBUG, dest="verbose",
help="Print debug messages.")
(options, args) = parser.parse_args(args, values)
if len(args) == 0:
input_file = None
else:
input_file = args[0]
if not options.extensions:
options.extensions = []
extension_configs = {}
if options.configfile:
with codecs.open(
options.configfile, mode="r", encoding=options.encoding
) as fp:
try:
extension_configs = yaml_load(fp)
except Exception as e:
message = "Failed parsing extension config file: %s" % \
options.configfile
e.args = (message,) + e.args[1:]
raise
opts = {
'input': input_file,
'output': options.filename,
'extensions': options.extensions,
'extension_configs': extension_configs,
'encoding': options.encoding,
'output_format': options.output_format,
'lazy_ol': options.lazy_ol
}
return opts, options.verbose
def run(): # pragma: no cover
"""Run Markdown from the command line."""
# Parse options and adjust logging level if necessary
options, logging_level = parse_options()
if not options:
sys.exit(2)
logger.setLevel(logging_level)
console_handler = logging.StreamHandler()
logger.addHandler(console_handler)
if logging_level <= WARNING:
# Ensure deprecation warnings get displayed
warnings.filterwarnings('default')
logging.captureWarnings(True)
warn_logger = logging.getLogger('py.warnings')
warn_logger.addHandler(console_handler)
# Run
markdown.markdownFromFile(**options)
if __name__ == '__main__': # pragma: no cover
# Support running module as a commandline command.
# `python -m markdown [options] [args]`.
run()

View File

@@ -0,0 +1,49 @@
"""
Python Markdown
A Python implementation of John Gruber's Markdown.
Documentation: https://python-markdown.github.io/
GitHub: https://github.com/Python-Markdown/markdown/
PyPI: https://pypi.org/project/Markdown/
Started by Manfred Stienstra (http://www.dwerg.net/).
Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
Currently maintained by Waylan Limberg (https://github.com/waylan),
Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see LICENSE.md for details).
"""
# __version_info__ format:
# (major, minor, patch, dev/alpha/beta/rc/final, #)
# (1, 1, 2, 'dev', 0) => "1.1.2.dev0"
# (1, 1, 2, 'alpha', 1) => "1.1.2a1"
# (1, 2, 0, 'beta', 2) => "1.2b2"
# (1, 2, 0, 'rc', 4) => "1.2rc4"
# (1, 2, 0, 'final', 0) => "1.2"
__version_info__ = (3, 3, 7, 'final', 0)
def _get_version(version_info):
" Returns a PEP 440-compliant version number from version_info. "
assert len(version_info) == 5
assert version_info[3] in ('dev', 'alpha', 'beta', 'rc', 'final')
parts = 2 if version_info[2] == 0 else 3
v = '.'.join(map(str, version_info[:parts]))
if version_info[3] == 'dev':
v += '.dev' + str(version_info[4])
elif version_info[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'}
v += mapping[version_info[3]] + str(version_info[4])
return v
__version__ = _get_version(__version_info__)

View File

@@ -0,0 +1,125 @@
"""
Python Markdown
A Python implementation of John Gruber's Markdown.
Documentation: https://python-markdown.github.io/
GitHub: https://github.com/Python-Markdown/markdown/
PyPI: https://pypi.org/project/Markdown/
Started by Manfred Stienstra (http://www.dwerg.net/).
Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
Currently maintained by Waylan Limberg (https://github.com/waylan),
Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see LICENSE.md for details).
"""
import xml.etree.ElementTree as etree
from . import util
class State(list):
""" Track the current and nested state of the parser.
This utility class is used to track the state of the BlockParser and
support multiple levels if nesting. It's just a simple API wrapped around
a list. Each time a state is set, that state is appended to the end of the
list. Each time a state is reset, that state is removed from the end of
the list.
Therefore, each time a state is set for a nested block, that state must be
reset when we back out of that level of nesting or the state could be
corrupted.
While all the methods of a list object are available, only the three
defined below need be used.
"""
def set(self, state):
""" Set a new state. """
self.append(state)
def reset(self):
""" Step back one step in nested state. """
self.pop()
def isstate(self, state):
""" Test that top (current) level is of given state. """
if len(self):
return self[-1] == state
else:
return False
class BlockParser:
""" Parse Markdown blocks into an ElementTree object.
A wrapper class that stitches the various BlockProcessors together,
looping through them and creating an ElementTree object.
"""
def __init__(self, md):
self.blockprocessors = util.Registry()
self.state = State()
self.md = md
@property
@util.deprecated("Use 'md' instead.")
def markdown(self):
# TODO: remove this later
return self.md
def parseDocument(self, lines):
""" Parse a markdown document into an ElementTree.
Given a list of lines, an ElementTree object (not just a parent
Element) is created and the root element is passed to the parser
as the parent. The ElementTree object is returned.
This should only be called on an entire document, not pieces.
"""
# Create a ElementTree from the lines
self.root = etree.Element(self.md.doc_tag)
self.parseChunk(self.root, '\n'.join(lines))
return etree.ElementTree(self.root)
def parseChunk(self, parent, text):
""" Parse a chunk of markdown text and attach to given etree node.
While the ``text`` argument is generally assumed to contain multiple
blocks which will be split on blank lines, it could contain only one
block. Generally, this method would be called by extensions when
block parsing is required.
The ``parent`` etree Element passed in is altered in place.
Nothing is returned.
"""
self.parseBlocks(parent, text.split('\n\n'))
def parseBlocks(self, parent, blocks):
""" Process blocks of markdown text and attach to given etree node.
Given a list of ``blocks``, each blockprocessor is stepped through
until there are no blocks left. While an extension could potentially
call this method directly, it's generally expected to be used
internally.
This is a public method as an extension may need to add/alter
additional BlockProcessors which call this method to recursively
parse a nested block.
"""
while blocks:
for processor in self.blockprocessors:
if processor.test(parent, blocks[0]):
if processor.run(parent, blocks) is not False:
# run returns True or None
break

View File

@@ -0,0 +1,623 @@
"""
Python Markdown
A Python implementation of John Gruber's Markdown.
Documentation: https://python-markdown.github.io/
GitHub: https://github.com/Python-Markdown/markdown/
PyPI: https://pypi.org/project/Markdown/
Started by Manfred Stienstra (http://www.dwerg.net/).
Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
Currently maintained by Waylan Limberg (https://github.com/waylan),
Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see LICENSE.md for details).
CORE MARKDOWN BLOCKPARSER
===========================================================================
This parser handles basic parsing of Markdown blocks. It doesn't concern
itself with inline elements such as **bold** or *italics*, but rather just
catches blocks, lists, quotes, etc.
The BlockParser is made up of a bunch of BlockProcessors, each handling a
different type of block. Extensions may add/replace/remove BlockProcessors
as they need to alter how markdown blocks are parsed.
"""
import logging
import re
import xml.etree.ElementTree as etree
from . import util
from .blockparser import BlockParser
logger = logging.getLogger('MARKDOWN')
def build_block_parser(md, **kwargs):
""" Build the default block parser used by Markdown. """
parser = BlockParser(md)
parser.blockprocessors.register(EmptyBlockProcessor(parser), 'empty', 100)
parser.blockprocessors.register(ListIndentProcessor(parser), 'indent', 90)
parser.blockprocessors.register(CodeBlockProcessor(parser), 'code', 80)
parser.blockprocessors.register(HashHeaderProcessor(parser), 'hashheader', 70)
parser.blockprocessors.register(SetextHeaderProcessor(parser), 'setextheader', 60)
parser.blockprocessors.register(HRProcessor(parser), 'hr', 50)
parser.blockprocessors.register(OListProcessor(parser), 'olist', 40)
parser.blockprocessors.register(UListProcessor(parser), 'ulist', 30)
parser.blockprocessors.register(BlockQuoteProcessor(parser), 'quote', 20)
parser.blockprocessors.register(ReferenceProcessor(parser), 'reference', 15)
parser.blockprocessors.register(ParagraphProcessor(parser), 'paragraph', 10)
return parser
class BlockProcessor:
""" Base class for block processors.
Each subclass will provide the methods below to work with the source and
tree. Each processor will need to define it's own ``test`` and ``run``
methods. The ``test`` method should return True or False, to indicate
whether the current block should be processed by this processor. If the
test passes, the parser will call the processors ``run`` method.
"""
def __init__(self, parser):
self.parser = parser
self.tab_length = parser.md.tab_length
def lastChild(self, parent):
""" Return the last child of an etree element. """
if len(parent):
return parent[-1]
else:
return None
def detab(self, text, length=None):
""" Remove a tab from the front of each line of the given text. """
if length is None:
length = self.tab_length
newtext = []
lines = text.split('\n')
for line in lines:
if line.startswith(' ' * length):
newtext.append(line[length:])
elif not line.strip():
newtext.append('')
else:
break
return '\n'.join(newtext), '\n'.join(lines[len(newtext):])
def looseDetab(self, text, level=1):
""" Remove a tab from front of lines but allowing dedented lines. """
lines = text.split('\n')
for i in range(len(lines)):
if lines[i].startswith(' '*self.tab_length*level):
lines[i] = lines[i][self.tab_length*level:]
return '\n'.join(lines)
def test(self, parent, block):
""" Test for block type. Must be overridden by subclasses.
As the parser loops through processors, it will call the ``test``
method on each to determine if the given block of text is of that
type. This method must return a boolean ``True`` or ``False``. The
actual method of testing is left to the needs of that particular
block type. It could be as simple as ``block.startswith(some_string)``
or a complex regular expression. As the block type may be different
depending on the parent of the block (i.e. inside a list), the parent
etree element is also provided and may be used as part of the test.
Keywords:
* ``parent``: A etree element which will be the parent of the block.
* ``block``: A block of text from the source which has been split at
blank lines.
"""
pass # pragma: no cover
def run(self, parent, blocks):
""" Run processor. Must be overridden by subclasses.
When the parser determines the appropriate type of a block, the parser
will call the corresponding processor's ``run`` method. This method
should parse the individual lines of the block and append them to
the etree.
Note that both the ``parent`` and ``etree`` keywords are pointers
to instances of the objects which should be edited in place. Each
processor must make changes to the existing objects as there is no
mechanism to return new/different objects to replace them.
This means that this method should be adding SubElements or adding text
to the parent, and should remove (``pop``) or add (``insert``) items to
the list of blocks.
Keywords:
* ``parent``: A etree element which is the parent of the current block.
* ``blocks``: A list of all remaining blocks of the document.
"""
pass # pragma: no cover
class ListIndentProcessor(BlockProcessor):
""" Process children of list items.
Example:
* a list item
process this part
or this part
"""
ITEM_TYPES = ['li']
LIST_TYPES = ['ul', 'ol']
def __init__(self, *args):
super().__init__(*args)
self.INDENT_RE = re.compile(r'^(([ ]{%s})+)' % self.tab_length)
def test(self, parent, block):
return block.startswith(' '*self.tab_length) and \
not self.parser.state.isstate('detabbed') and \
(parent.tag in self.ITEM_TYPES or
(len(parent) and parent[-1] is not None and
(parent[-1].tag in self.LIST_TYPES)))
def run(self, parent, blocks):
block = blocks.pop(0)
level, sibling = self.get_level(parent, block)
block = self.looseDetab(block, level)
self.parser.state.set('detabbed')
if parent.tag in self.ITEM_TYPES:
# It's possible that this parent has a 'ul' or 'ol' child list
# with a member. If that is the case, then that should be the
# parent. This is intended to catch the edge case of an indented
# list whose first member was parsed previous to this point
# see OListProcessor
if len(parent) and parent[-1].tag in self.LIST_TYPES:
self.parser.parseBlocks(parent[-1], [block])
else:
# The parent is already a li. Just parse the child block.
self.parser.parseBlocks(parent, [block])
elif sibling.tag in self.ITEM_TYPES:
# The sibling is a li. Use it as parent.
self.parser.parseBlocks(sibling, [block])
elif len(sibling) and sibling[-1].tag in self.ITEM_TYPES:
# The parent is a list (``ol`` or ``ul``) which has children.
# Assume the last child li is the parent of this block.
if sibling[-1].text:
# If the parent li has text, that text needs to be moved to a p
# The p must be 'inserted' at beginning of list in the event
# that other children already exist i.e.; a nested sublist.
p = etree.Element('p')
p.text = sibling[-1].text
sibling[-1].text = ''
sibling[-1].insert(0, p)
self.parser.parseChunk(sibling[-1], block)
else:
self.create_item(sibling, block)
self.parser.state.reset()
def create_item(self, parent, block):
""" Create a new li and parse the block with it as the parent. """
li = etree.SubElement(parent, 'li')
self.parser.parseBlocks(li, [block])
def get_level(self, parent, block):
""" Get level of indent based on list level. """
# Get indent level
m = self.INDENT_RE.match(block)
if m:
indent_level = len(m.group(1))/self.tab_length
else:
indent_level = 0
if self.parser.state.isstate('list'):
# We're in a tightlist - so we already are at correct parent.
level = 1
else:
# We're in a looselist - so we need to find parent.
level = 0
# Step through children of tree to find matching indent level.
while indent_level > level:
child = self.lastChild(parent)
if (child is not None and
(child.tag in self.LIST_TYPES or child.tag in self.ITEM_TYPES)):
if child.tag in self.LIST_TYPES:
level += 1
parent = child
else:
# No more child levels. If we're short of indent_level,
# we have a code block. So we stop here.
break
return level, parent
class CodeBlockProcessor(BlockProcessor):
""" Process code blocks. """
def test(self, parent, block):
return block.startswith(' '*self.tab_length)
def run(self, parent, blocks):
sibling = self.lastChild(parent)
block = blocks.pop(0)
theRest = ''
if (sibling is not None and sibling.tag == "pre" and
len(sibling) and sibling[0].tag == "code"):
# The previous block was a code block. As blank lines do not start
# new code blocks, append this block to the previous, adding back
# linebreaks removed from the split into a list.
code = sibling[0]
block, theRest = self.detab(block)
code.text = util.AtomicString(
'{}\n{}\n'.format(code.text, util.code_escape(block.rstrip()))
)
else:
# This is a new codeblock. Create the elements and insert text.
pre = etree.SubElement(parent, 'pre')
code = etree.SubElement(pre, 'code')
block, theRest = self.detab(block)
code.text = util.AtomicString('%s\n' % util.code_escape(block.rstrip()))
if theRest:
# This block contained unindented line(s) after the first indented
# line. Insert these lines as the first block of the master blocks
# list for future processing.
blocks.insert(0, theRest)
class BlockQuoteProcessor(BlockProcessor):
RE = re.compile(r'(^|\n)[ ]{0,3}>[ ]?(.*)')
def test(self, parent, block):
return bool(self.RE.search(block)) and not util.nearing_recursion_limit()
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
before = block[:m.start()] # Lines before blockquote
# Pass lines before blockquote in recursively for parsing first.
self.parser.parseBlocks(parent, [before])
# Remove ``> `` from beginning of each line.
block = '\n'.join(
[self.clean(line) for line in block[m.start():].split('\n')]
)
sibling = self.lastChild(parent)
if sibling is not None and sibling.tag == "blockquote":
# Previous block was a blockquote so set that as this blocks parent
quote = sibling
else:
# This is a new blockquote. Create a new parent element.
quote = etree.SubElement(parent, 'blockquote')
# Recursively parse block with blockquote as parent.
# change parser state so blockquotes embedded in lists use p tags
self.parser.state.set('blockquote')
self.parser.parseChunk(quote, block)
self.parser.state.reset()
def clean(self, line):
""" Remove ``>`` from beginning of a line. """
m = self.RE.match(line)
if line.strip() == ">":
return ""
elif m:
return m.group(2)
else:
return line
class OListProcessor(BlockProcessor):
""" Process ordered list blocks. """
TAG = 'ol'
# The integer (python string) with which the lists starts (default=1)
# Eg: If list is initialized as)
# 3. Item
# The ol tag will get starts="3" attribute
STARTSWITH = '1'
# Lazy ol - ignore startswith
LAZY_OL = True
# List of allowed sibling tags.
SIBLING_TAGS = ['ol', 'ul']
def __init__(self, parser):
super().__init__(parser)
# Detect an item (``1. item``). ``group(1)`` contains contents of item.
self.RE = re.compile(r'^[ ]{0,%d}\d+\.[ ]+(.*)' % (self.tab_length - 1))
# Detect items on secondary lines. they can be of either list type.
self.CHILD_RE = re.compile(r'^[ ]{0,%d}((\d+\.)|[*+-])[ ]+(.*)' %
(self.tab_length - 1))
# Detect indented (nested) items of either type
self.INDENT_RE = re.compile(r'^[ ]{%d,%d}((\d+\.)|[*+-])[ ]+.*' %
(self.tab_length, self.tab_length * 2 - 1))
def test(self, parent, block):
return bool(self.RE.match(block))
def run(self, parent, blocks):
# Check fr multiple items in one block.
items = self.get_items(blocks.pop(0))
sibling = self.lastChild(parent)
if sibling is not None and sibling.tag in self.SIBLING_TAGS:
# Previous block was a list item, so set that as parent
lst = sibling
# make sure previous item is in a p- if the item has text,
# then it isn't in a p
if lst[-1].text:
# since it's possible there are other children for this
# sibling, we can't just SubElement the p, we need to
# insert it as the first item.
p = etree.Element('p')
p.text = lst[-1].text
lst[-1].text = ''
lst[-1].insert(0, p)
# if the last item has a tail, then the tail needs to be put in a p
# likely only when a header is not followed by a blank line
lch = self.lastChild(lst[-1])
if lch is not None and lch.tail:
p = etree.SubElement(lst[-1], 'p')
p.text = lch.tail.lstrip()
lch.tail = ''
# parse first block differently as it gets wrapped in a p.
li = etree.SubElement(lst, 'li')
self.parser.state.set('looselist')
firstitem = items.pop(0)
self.parser.parseBlocks(li, [firstitem])
self.parser.state.reset()
elif parent.tag in ['ol', 'ul']:
# this catches the edge case of a multi-item indented list whose
# first item is in a blank parent-list item:
# * * subitem1
# * subitem2
# see also ListIndentProcessor
lst = parent
else:
# This is a new list so create parent with appropriate tag.
lst = etree.SubElement(parent, self.TAG)
# Check if a custom start integer is set
if not self.LAZY_OL and self.STARTSWITH != '1':
lst.attrib['start'] = self.STARTSWITH
self.parser.state.set('list')
# Loop through items in block, recursively parsing each with the
# appropriate parent.
for item in items:
if item.startswith(' '*self.tab_length):
# Item is indented. Parse with last item as parent
self.parser.parseBlocks(lst[-1], [item])
else:
# New item. Create li and parse with it as parent
li = etree.SubElement(lst, 'li')
self.parser.parseBlocks(li, [item])
self.parser.state.reset()
def get_items(self, block):
""" Break a block into list items. """
items = []
for line in block.split('\n'):
m = self.CHILD_RE.match(line)
if m:
# This is a new list item
# Check first item for the start index
if not items and self.TAG == 'ol':
# Detect the integer value of first list item
INTEGER_RE = re.compile(r'(\d+)')
self.STARTSWITH = INTEGER_RE.match(m.group(1)).group()
# Append to the list
items.append(m.group(3))
elif self.INDENT_RE.match(line):
# This is an indented (possibly nested) item.
if items[-1].startswith(' '*self.tab_length):
# Previous item was indented. Append to that item.
items[-1] = '{}\n{}'.format(items[-1], line)
else:
items.append(line)
else:
# This is another line of previous item. Append to that item.
items[-1] = '{}\n{}'.format(items[-1], line)
return items
class UListProcessor(OListProcessor):
""" Process unordered list blocks. """
TAG = 'ul'
def __init__(self, parser):
super().__init__(parser)
# Detect an item (``1. item``). ``group(1)`` contains contents of item.
self.RE = re.compile(r'^[ ]{0,%d}[*+-][ ]+(.*)' % (self.tab_length - 1))
class HashHeaderProcessor(BlockProcessor):
""" Process Hash Headers. """
# Detect a header at start of any line in block
RE = re.compile(r'(?:^|\n)(?P<level>#{1,6})(?P<header>(?:\\.|[^\\])*?)#*(?:\n|$)')
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
before = block[:m.start()] # All lines before header
after = block[m.end():] # All lines after header
if before:
# As the header was not the first line of the block and the
# lines before the header must be parsed first,
# recursively parse this lines as a block.
self.parser.parseBlocks(parent, [before])
# Create header using named groups from RE
h = etree.SubElement(parent, 'h%d' % len(m.group('level')))
h.text = m.group('header').strip()
if after:
# Insert remaining lines as first block for future parsing.
blocks.insert(0, after)
else: # pragma: no cover
# This should never happen, but just in case...
logger.warn("We've got a problem header: %r" % block)
class SetextHeaderProcessor(BlockProcessor):
""" Process Setext-style Headers. """
# Detect Setext-style header. Must be first 2 lines of block.
RE = re.compile(r'^.*?\n[=-]+[ ]*(\n|$)', re.MULTILINE)
def test(self, parent, block):
return bool(self.RE.match(block))
def run(self, parent, blocks):
lines = blocks.pop(0).split('\n')
# Determine level. ``=`` is 1 and ``-`` is 2.
if lines[1].startswith('='):
level = 1
else:
level = 2
h = etree.SubElement(parent, 'h%d' % level)
h.text = lines[0].strip()
if len(lines) > 2:
# Block contains additional lines. Add to master blocks for later.
blocks.insert(0, '\n'.join(lines[2:]))
class HRProcessor(BlockProcessor):
""" Process Horizontal Rules. """
# Python's re module doesn't officially support atomic grouping. However you can fake it.
# See https://stackoverflow.com/a/13577411/866026
RE = r'^[ ]{0,3}(?=(?P<atomicgroup>(-+[ ]{0,2}){3,}|(_+[ ]{0,2}){3,}|(\*+[ ]{0,2}){3,}))(?P=atomicgroup)[ ]*$'
# Detect hr on any line of a block.
SEARCH_RE = re.compile(RE, re.MULTILINE)
def test(self, parent, block):
m = self.SEARCH_RE.search(block)
if m:
# Save match object on class instance so we can use it later.
self.match = m
return True
return False
def run(self, parent, blocks):
block = blocks.pop(0)
match = self.match
# Check for lines in block before hr.
prelines = block[:match.start()].rstrip('\n')
if prelines:
# Recursively parse lines before hr so they get parsed first.
self.parser.parseBlocks(parent, [prelines])
# create hr
etree.SubElement(parent, 'hr')
# check for lines in block after hr.
postlines = block[match.end():].lstrip('\n')
if postlines:
# Add lines after hr to master blocks for later parsing.
blocks.insert(0, postlines)
class EmptyBlockProcessor(BlockProcessor):
""" Process blocks that are empty or start with an empty line. """
def test(self, parent, block):
return not block or block.startswith('\n')
def run(self, parent, blocks):
block = blocks.pop(0)
filler = '\n\n'
if block:
# Starts with empty line
# Only replace a single line.
filler = '\n'
# Save the rest for later.
theRest = block[1:]
if theRest:
# Add remaining lines to master blocks for later.
blocks.insert(0, theRest)
sibling = self.lastChild(parent)
if (sibling is not None and sibling.tag == 'pre' and
len(sibling) and sibling[0].tag == 'code'):
# Last block is a codeblock. Append to preserve whitespace.
sibling[0].text = util.AtomicString(
'{}{}'.format(sibling[0].text, filler)
)
class ReferenceProcessor(BlockProcessor):
""" Process link references. """
RE = re.compile(
r'^[ ]{0,3}\[([^\[\]]*)\]:[ ]*\n?[ ]*([^\s]+)[ ]*(?:\n[ ]*)?((["\'])(.*)\4[ ]*|\((.*)\)[ ]*)?$', re.MULTILINE
)
def test(self, parent, block):
return True
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
id = m.group(1).strip().lower()
link = m.group(2).lstrip('<').rstrip('>')
title = m.group(5) or m.group(6)
self.parser.md.references[id] = (link, title)
if block[m.end():].strip():
# Add any content after match back to blocks as separate block
blocks.insert(0, block[m.end():].lstrip('\n'))
if block[:m.start()].strip():
# Add any content before match back to blocks as separate block
blocks.insert(0, block[:m.start()].rstrip('\n'))
return True
# No match. Restore block.
blocks.insert(0, block)
return False
class ParagraphProcessor(BlockProcessor):
""" Process Paragraph blocks. """
def test(self, parent, block):
return True
def run(self, parent, blocks):
block = blocks.pop(0)
if block.strip():
# Not a blank block. Add to parent, otherwise throw it away.
if self.parser.state.isstate('list'):
# The parent is a tight-list.
#
# Check for any children. This will likely only happen in a
# tight-list when a header isn't followed by a blank line.
# For example:
#
# * # Header
# Line 2 of list item - not part of header.
sibling = self.lastChild(parent)
if sibling is not None:
# Insetrt after sibling.
if sibling.tail:
sibling.tail = '{}\n{}'.format(sibling.tail, block)
else:
sibling.tail = '\n%s' % block
else:
# Append to parent.text
if parent.text:
parent.text = '{}\n{}'.format(parent.text, block)
else:
parent.text = block.lstrip()
else:
# Create a regular paragraph
p = etree.SubElement(parent, 'p')
p.text = block.lstrip()

View File

@@ -0,0 +1,394 @@
"""
Python Markdown
A Python implementation of John Gruber's Markdown.
Documentation: https://python-markdown.github.io/
GitHub: https://github.com/Python-Markdown/markdown/
PyPI: https://pypi.org/project/Markdown/
Started by Manfred Stienstra (http://www.dwerg.net/).
Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
Currently maintained by Waylan Limberg (https://github.com/waylan),
Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see LICENSE.md for details).
"""
import codecs
import sys
import logging
import importlib
from . import util
from .preprocessors import build_preprocessors
from .blockprocessors import build_block_parser
from .treeprocessors import build_treeprocessors
from .inlinepatterns import build_inlinepatterns
from .postprocessors import build_postprocessors
from .extensions import Extension
from .serializers import to_html_string, to_xhtml_string
__all__ = ['Markdown', 'markdown', 'markdownFromFile']
logger = logging.getLogger('MARKDOWN')
class Markdown:
"""Convert Markdown to HTML."""
doc_tag = "div" # Element used to wrap document - later removed
output_formats = {
'html': to_html_string,
'xhtml': to_xhtml_string,
}
def __init__(self, **kwargs):
"""
Creates a new Markdown instance.
Keyword arguments:
* extensions: A list of extensions.
If an item is an instance of a subclass of `markdown.extension.Extension`, the instance will be used
as-is. If an item is of type string, first an entry point will be loaded. If that fails, the string is
assumed to use Python dot notation (`path.to.module:ClassName`) to load a markdown.Extension subclass. If
no class is specified, then a `makeExtension` function is called within the specified module.
* extension_configs: Configuration settings for extensions.
* output_format: Format of output. Supported formats are:
* "xhtml": Outputs XHTML style tags. Default.
* "html": Outputs HTML style tags.
* tab_length: Length of tabs in the source. Default: 4
"""
self.tab_length = kwargs.get('tab_length', 4)
self.ESCAPED_CHARS = ['\\', '`', '*', '_', '{', '}', '[', ']',
'(', ')', '>', '#', '+', '-', '.', '!']
self.block_level_elements = [
# Elements which are invalid to wrap in a `<p>` tag.
# See https://w3c.github.io/html/grouping-content.html#the-p-element
'address', 'article', 'aside', 'blockquote', 'details', 'div', 'dl',
'fieldset', 'figcaption', 'figure', 'footer', 'form', 'h1', 'h2', 'h3',
'h4', 'h5', 'h6', 'header', 'hgroup', 'hr', 'main', 'menu', 'nav', 'ol',
'p', 'pre', 'section', 'table', 'ul',
# Other elements which Markdown should not be mucking up the contents of.
'canvas', 'colgroup', 'dd', 'body', 'dt', 'group', 'iframe', 'li', 'legend',
'math', 'map', 'noscript', 'output', 'object', 'option', 'progress', 'script',
'style', 'summary', 'tbody', 'td', 'textarea', 'tfoot', 'th', 'thead', 'tr', 'video'
]
self.registeredExtensions = []
self.docType = ""
self.stripTopLevelTags = True
self.build_parser()
self.references = {}
self.htmlStash = util.HtmlStash()
self.registerExtensions(extensions=kwargs.get('extensions', []),
configs=kwargs.get('extension_configs', {}))
self.set_output_format(kwargs.get('output_format', 'xhtml'))
self.reset()
def build_parser(self):
""" Build the parser from the various parts. """
self.preprocessors = build_preprocessors(self)
self.parser = build_block_parser(self)
self.inlinePatterns = build_inlinepatterns(self)
self.treeprocessors = build_treeprocessors(self)
self.postprocessors = build_postprocessors(self)
return self
def registerExtensions(self, extensions, configs):
"""
Register extensions with this instance of Markdown.
Keyword arguments:
* extensions: A list of extensions, which can either
be strings or objects.
* configs: A dictionary mapping extension names to config options.
"""
for ext in extensions:
if isinstance(ext, str):
ext = self.build_extension(ext, configs.get(ext, {}))
if isinstance(ext, Extension):
ext._extendMarkdown(self)
logger.debug(
'Successfully loaded extension "%s.%s".'
% (ext.__class__.__module__, ext.__class__.__name__)
)
return self
def build_extension(self, ext_name, configs):
"""
Build extension from a string name, then return an instance.
Don't attempt to load any entrypoint,
as it conflicts with an already installed Python-Markdown.
Assume dot notation (`path.to.module:ClassName`).
Load the specified class and return an instance. If no class is specified,
import the module and call a `makeExtension` function
and return the Extension instance returned by that function.
"""
configs = dict(configs)
# Get class name (if provided): `path.to.module:ClassName`
ext_name, class_name = ext_name.split(':', 1) if ':' in ext_name else (ext_name, '')
try:
module = importlib.import_module('Libs.markdown.extensions.'+ext_name)
logger.debug(
'Successfully imported extension module "%s".' % ext_name
)
except ImportError as e:
message = 'Failed loading extension "%s".' % ext_name
e.args = (message,) + e.args[1:]
raise
if class_name:
# Load given class name from module.
return getattr(module, class_name)(**configs)
else:
# Expect makeExtension() function to return a class.
try:
return module.makeExtension(**configs)
except AttributeError as e:
message = e.args[0]
message = "Failed to initiate extension " \
"'%s': %s" % (ext_name, message)
e.args = (message,) + e.args[1:]
raise
def registerExtension(self, extension):
""" This gets called by the extension """
self.registeredExtensions.append(extension)
return self
def reset(self):
"""
Resets all state variables so that we can start with a new text.
"""
self.htmlStash.reset()
self.references.clear()
for extension in self.registeredExtensions:
if hasattr(extension, 'reset'):
extension.reset()
return self
def set_output_format(self, format):
""" Set the output format for the class instance. """
self.output_format = format.lower().rstrip('145') # ignore num
try:
self.serializer = self.output_formats[self.output_format]
except KeyError as e:
valid_formats = list(self.output_formats.keys())
valid_formats.sort()
message = 'Invalid Output Format: "%s". Use one of %s.' \
% (self.output_format,
'"' + '", "'.join(valid_formats) + '"')
e.args = (message,) + e.args[1:]
raise
return self
def is_block_level(self, tag):
"""Check if the tag is a block level HTML tag."""
if isinstance(tag, str):
return tag.lower().rstrip('/') in self.block_level_elements
# Some ElementTree tags are not strings, so return False.
return False
def convert(self, source):
"""
Convert markdown to serialized XHTML or HTML.
Keyword arguments:
* source: Source text as a Unicode string.
Markdown processing takes place in five steps:
1. A bunch of "preprocessors" munge the input text.
2. BlockParser() parses the high-level structural elements of the
pre-processed text into an ElementTree.
3. A bunch of "treeprocessors" are run against the ElementTree. One
such treeprocessor runs InlinePatterns against the ElementTree,
detecting inline markup.
4. Some post-processors are run against the text after the ElementTree
has been serialized into text.
5. The output is written to a string.
"""
# Fixup the source text
if not source.strip():
return '' # a blank unicode string
try:
source = str(source)
except UnicodeDecodeError as e: # pragma: no cover
# Customise error message while maintaining original trackback
e.reason += '. -- Note: Markdown only accepts unicode input!'
raise
# Split into lines and run the line preprocessors.
self.lines = source.split("\n")
for prep in self.preprocessors:
self.lines = prep.run(self.lines)
# Parse the high-level elements.
root = self.parser.parseDocument(self.lines).getroot()
# Run the tree-processors
for treeprocessor in self.treeprocessors:
newRoot = treeprocessor.run(root)
if newRoot is not None:
root = newRoot
# Serialize _properly_. Strip top-level tags.
output = self.serializer(root)
if self.stripTopLevelTags:
try:
start = output.index(
'<%s>' % self.doc_tag) + len(self.doc_tag) + 2
end = output.rindex('</%s>' % self.doc_tag)
output = output[start:end].strip()
except ValueError as e: # pragma: no cover
if output.strip().endswith('<%s />' % self.doc_tag):
# We have an empty document
output = ''
else:
# We have a serious problem
raise ValueError('Markdown failed to strip top-level '
'tags. Document=%r' % output.strip()) from e
# Run the text post-processors
for pp in self.postprocessors:
output = pp.run(output)
return output.strip()
def convertFile(self, input=None, output=None, encoding=None):
"""Converts a markdown file and returns the HTML as a unicode string.
Decodes the file using the provided encoding (defaults to utf-8),
passes the file content to markdown, and outputs the html to either
the provided stream or the file with provided name, using the same
encoding as the source file. The 'xmlcharrefreplace' error handler is
used when encoding the output.
**Note:** This is the only place that decoding and encoding of unicode
takes place in Python-Markdown. (All other code is unicode-in /
unicode-out.)
Keyword arguments:
* input: File object or path. Reads from stdin if `None`.
* output: File object or path. Writes to stdout if `None`.
* encoding: Encoding of input and output files. Defaults to utf-8.
"""
encoding = encoding or "utf-8"
# Read the source
if input:
if isinstance(input, str):
input_file = codecs.open(input, mode="r", encoding=encoding)
else:
input_file = codecs.getreader(encoding)(input)
text = input_file.read()
input_file.close()
else:
text = sys.stdin.read()
if not isinstance(text, str): # pragma: no cover
text = text.decode(encoding)
text = text.lstrip('\ufeff') # remove the byte-order mark
# Convert
html = self.convert(text)
# Write to file or stdout
if output:
if isinstance(output, str):
output_file = codecs.open(output, "w",
encoding=encoding,
errors="xmlcharrefreplace")
output_file.write(html)
output_file.close()
else:
writer = codecs.getwriter(encoding)
output_file = writer(output, errors="xmlcharrefreplace")
output_file.write(html)
# Don't close here. User may want to write more.
else:
# Encode manually and write bytes to stdout.
html = html.encode(encoding, "xmlcharrefreplace")
try:
# Write bytes directly to buffer (Python 3).
sys.stdout.buffer.write(html)
except AttributeError: # pragma: no cover
# Probably Python 2, which works with bytes by default.
sys.stdout.write(html)
return self
"""
EXPORTED FUNCTIONS
=============================================================================
Those are the two functions we really mean to export: markdown() and
markdownFromFile().
"""
def markdown(text, **kwargs):
"""Convert a markdown string to HTML and return HTML as a unicode string.
This is a shortcut function for `Markdown` class to cover the most
basic use case. It initializes an instance of Markdown, loads the
necessary extensions and runs the parser on the given text.
Keyword arguments:
* text: Markdown formatted text as Unicode or ASCII string.
* Any arguments accepted by the Markdown class.
Returns: An HTML document as a string.
"""
md = Markdown(**kwargs)
return md.convert(text)
def markdownFromFile(**kwargs):
"""Read markdown code from a file and write it to a file or a stream.
This is a shortcut function which initializes an instance of Markdown,
and calls the convertFile method rather than convert.
Keyword arguments:
* input: a file name or readable object.
* output: a file name or writable object.
* encoding: Encoding of input and output.
* Any arguments accepted by the Markdown class.
"""
md = Markdown(**kwargs)
md.convertFile(kwargs.get('input', None),
kwargs.get('output', None),
kwargs.get('encoding', None))

View File

@@ -0,0 +1,107 @@
"""
Python Markdown
A Python implementation of John Gruber's Markdown.
Documentation: https://python-markdown.github.io/
GitHub: https://github.com/Python-Markdown/markdown/
PyPI: https://pypi.org/project/Markdown/
Started by Manfred Stienstra (http://www.dwerg.net/).
Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
Currently maintained by Waylan Limberg (https://github.com/waylan),
Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see LICENSE.md for details).
"""
import warnings
from ..util import parseBoolValue
class Extension:
""" Base class for extensions to subclass. """
# Default config -- to be overridden by a subclass
# Must be of the following format:
# {
# 'key': ['value', 'description']
# }
# Note that Extension.setConfig will raise a KeyError
# if a default is not set here.
config = {}
def __init__(self, **kwargs):
""" Initiate Extension and set up configs. """
self.setConfigs(kwargs)
def getConfig(self, key, default=''):
""" Return a setting for the given key or an empty string. """
if key in self.config:
return self.config[key][0]
else:
return default
def getConfigs(self):
""" Return all configs settings as a dict. """
return {key: self.getConfig(key) for key in self.config.keys()}
def getConfigInfo(self):
""" Return all config descriptions as a list of tuples. """
return [(key, self.config[key][1]) for key in self.config.keys()]
def setConfig(self, key, value):
""" Set a config setting for `key` with the given `value`. """
if isinstance(self.config[key][0], bool):
value = parseBoolValue(value)
if self.config[key][0] is None:
value = parseBoolValue(value, preserve_none=True)
self.config[key][0] = value
def setConfigs(self, items):
""" Set multiple config settings given a dict or list of tuples. """
if hasattr(items, 'items'):
# it's a dict
items = items.items()
for key, value in items:
self.setConfig(key, value)
def _extendMarkdown(self, *args):
""" Private wrapper around extendMarkdown. """
md = args[0]
try:
self.extendMarkdown(md)
except TypeError as e:
if "missing 1 required positional argument" in str(e):
# Must be a 2.x extension. Pass in a dumby md_globals.
self.extendMarkdown(md, {})
warnings.warn(
"The 'md_globals' parameter of '{}.{}.extendMarkdown' is "
"deprecated.".format(self.__class__.__module__, self.__class__.__name__),
category=DeprecationWarning,
stacklevel=2
)
else:
raise
def extendMarkdown(self, md):
"""
Add the various processors and patterns to the Markdown Instance.
This method must be overridden by every extension.
Keyword arguments:
* md: The Markdown instance.
* md_globals: Global variables in the markdown module namespace.
"""
raise NotImplementedError(
'Extension "%s.%s" must define an "extendMarkdown"'
'method.' % (self.__class__.__module__, self.__class__.__name__)
)

View File

@@ -0,0 +1,99 @@
'''
Abbreviation Extension for Python-Markdown
==========================================
This extension adds abbreviation handling to Python-Markdown.
See <https://Python-Markdown.github.io/extensions/abbreviations>
for documentation.
Oringinal code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/) and
[Seemant Kulleen](http://www.kulleen.org/)
All changes Copyright 2008-2014 The Python Markdown Project
License: [BSD](https://opensource.org/licenses/bsd-license.php)
'''
from . import Extension
from ..blockprocessors import BlockProcessor
from ..inlinepatterns import InlineProcessor
from ..util import AtomicString
import re
import xml.etree.ElementTree as etree
class AbbrExtension(Extension):
""" Abbreviation Extension for Python-Markdown. """
def extendMarkdown(self, md):
""" Insert AbbrPreprocessor before ReferencePreprocessor. """
md.parser.blockprocessors.register(AbbrPreprocessor(md.parser), 'abbr', 16)
class AbbrPreprocessor(BlockProcessor):
""" Abbreviation Preprocessor - parse text for abbr references. """
RE = re.compile(r'^[*]\[(?P<abbr>[^\]]*)\][ ]?:[ ]*\n?[ ]*(?P<title>.*)$', re.MULTILINE)
def test(self, parent, block):
return True
def run(self, parent, blocks):
'''
Find and remove all Abbreviation references from the text.
Each reference is set as a new AbbrPattern in the markdown instance.
'''
block = blocks.pop(0)
m = self.RE.search(block)
if m:
abbr = m.group('abbr').strip()
title = m.group('title').strip()
self.parser.md.inlinePatterns.register(
AbbrInlineProcessor(self._generate_pattern(abbr), title), 'abbr-%s' % abbr, 2
)
if block[m.end():].strip():
# Add any content after match back to blocks as separate block
blocks.insert(0, block[m.end():].lstrip('\n'))
if block[:m.start()].strip():
# Add any content before match back to blocks as separate block
blocks.insert(0, block[:m.start()].rstrip('\n'))
return True
# No match. Restore block.
blocks.insert(0, block)
return False
def _generate_pattern(self, text):
'''
Given a string, returns an regex pattern to match that string.
'HTML' -> r'(?P<abbr>[H][T][M][L])'
Note: we force each char as a literal match (in brackets) as we don't
know what they will be beforehand.
'''
chars = list(text)
for i in range(len(chars)):
chars[i] = r'[%s]' % chars[i]
return r'(?P<abbr>\b%s\b)' % (r''.join(chars))
class AbbrInlineProcessor(InlineProcessor):
""" Abbreviation inline pattern. """
def __init__(self, pattern, title):
super().__init__(pattern)
self.title = title
def handleMatch(self, m, data):
abbr = etree.Element('abbr')
abbr.text = AtomicString(m.group('abbr'))
abbr.set('title', self.title)
return abbr, m.start(0), m.end(0)
def makeExtension(**kwargs): # pragma: no cover
return AbbrExtension(**kwargs)

View File

@@ -0,0 +1,170 @@
"""
Admonition extension for Python-Markdown
========================================
Adds rST-style admonitions. Inspired by [rST][] feature with the same name.
[rST]: http://docutils.sourceforge.net/docs/ref/rst/directives.html#specific-admonitions # noqa
See <https://Python-Markdown.github.io/extensions/admonition>
for documentation.
Original code Copyright [Tiago Serafim](https://www.tiagoserafim.com/).
All changes Copyright The Python Markdown Project
License: [BSD](https://opensource.org/licenses/bsd-license.php)
"""
from . import Extension
from ..blockprocessors import BlockProcessor
import xml.etree.ElementTree as etree
import re
class AdmonitionExtension(Extension):
""" Admonition extension for Python-Markdown. """
def extendMarkdown(self, md):
""" Add Admonition to Markdown instance. """
md.registerExtension(self)
md.parser.blockprocessors.register(AdmonitionProcessor(md.parser), 'admonition', 105)
class AdmonitionProcessor(BlockProcessor):
CLASSNAME = 'admonition'
CLASSNAME_TITLE = 'admonition-title'
RE = re.compile(r'(?:^|\n)!!! ?([\w\-]+(?: +[\w\-]+)*)(?: +"(.*?)")? *(?:\n|$)')
RE_SPACES = re.compile(' +')
def __init__(self, parser):
"""Initialization."""
super().__init__(parser)
self.current_sibling = None
self.content_indention = 0
def parse_content(self, parent, block):
"""Get sibling admonition.
Retrieve the appropriate sibling element. This can get tricky when
dealing with lists.
"""
old_block = block
the_rest = ''
# We already acquired the block via test
if self.current_sibling is not None:
sibling = self.current_sibling
block, the_rest = self.detab(block, self.content_indent)
self.current_sibling = None
self.content_indent = 0
return sibling, block, the_rest
sibling = self.lastChild(parent)
if sibling is None or sibling.get('class', '').find(self.CLASSNAME) == -1:
sibling = None
else:
# If the last child is a list and the content is sufficiently indented
# to be under it, then the content's sibling is in the list.
last_child = self.lastChild(sibling)
indent = 0
while last_child:
if (
sibling and block.startswith(' ' * self.tab_length * 2) and
last_child and last_child.tag in ('ul', 'ol', 'dl')
):
# The expectation is that we'll find an <li> or <dt>.
# We should get its last child as well.
sibling = self.lastChild(last_child)
last_child = self.lastChild(sibling) if sibling else None
# Context has been lost at this point, so we must adjust the
# text's indentation level so it will be evaluated correctly
# under the list.
block = block[self.tab_length:]
indent += self.tab_length
else:
last_child = None
if not block.startswith(' ' * self.tab_length):
sibling = None
if sibling is not None:
indent += self.tab_length
block, the_rest = self.detab(old_block, indent)
self.current_sibling = sibling
self.content_indent = indent
return sibling, block, the_rest
def test(self, parent, block):
if self.RE.search(block):
return True
else:
return self.parse_content(parent, block)[0] is not None
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
if m.start() > 0:
self.parser.parseBlocks(parent, [block[:m.start()]])
block = block[m.end():] # removes the first line
block, theRest = self.detab(block)
else:
sibling, block, theRest = self.parse_content(parent, block)
if m:
klass, title = self.get_class_and_title(m)
div = etree.SubElement(parent, 'div')
div.set('class', '{} {}'.format(self.CLASSNAME, klass))
if title:
p = etree.SubElement(div, 'p')
p.text = title
p.set('class', self.CLASSNAME_TITLE)
else:
# Sibling is a list item, but we need to wrap it's content should be wrapped in <p>
if sibling.tag in ('li', 'dd') and sibling.text:
text = sibling.text
sibling.text = ''
p = etree.SubElement(sibling, 'p')
p.text = text
div = sibling
self.parser.parseChunk(div, block)
if theRest:
# This block contained unindented line(s) after the first indented
# line. Insert these lines as the first block of the master blocks
# list for future processing.
blocks.insert(0, theRest)
def get_class_and_title(self, match):
klass, title = match.group(1).lower(), match.group(2)
klass = self.RE_SPACES.sub(' ', klass)
if title is None:
# no title was provided, use the capitalized classname as title
# e.g.: `!!! note` will render
# `<p class="admonition-title">Note</p>`
title = klass.split(' ', 1)[0].capitalize()
elif title == '':
# an explicit blank title should not be rendered
# e.g.: `!!! warning ""` will *not* render `p` with a title
title = None
return klass, title
def makeExtension(**kwargs): # pragma: no cover
return AdmonitionExtension(**kwargs)

View File

@@ -0,0 +1,166 @@
"""
Attribute List Extension for Python-Markdown
============================================
Adds attribute list syntax. Inspired by
[maruku](http://maruku.rubyforge.org/proposal.html#attribute_lists)'s
feature of the same name.
See <https://Python-Markdown.github.io/extensions/attr_list>
for documentation.
Original code Copyright 2011 [Waylan Limberg](http://achinghead.com/).
All changes Copyright 2011-2014 The Python Markdown Project
License: [BSD](https://opensource.org/licenses/bsd-license.php)
"""
from . import Extension
from ..treeprocessors import Treeprocessor
import re
def _handle_double_quote(s, t):
k, v = t.split('=', 1)
return k, v.strip('"')
def _handle_single_quote(s, t):
k, v = t.split('=', 1)
return k, v.strip("'")
def _handle_key_value(s, t):
return t.split('=', 1)
def _handle_word(s, t):
if t.startswith('.'):
return '.', t[1:]
if t.startswith('#'):
return 'id', t[1:]
return t, t
_scanner = re.Scanner([
(r'[^ =]+=".*?"', _handle_double_quote),
(r"[^ =]+='.*?'", _handle_single_quote),
(r'[^ =]+=[^ =]+', _handle_key_value),
(r'[^ =]+', _handle_word),
(r' ', None)
])
def get_attrs(str):
""" Parse attribute list and return a list of attribute tuples. """
return _scanner.scan(str)[0]
def isheader(elem):
return elem.tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']
class AttrListTreeprocessor(Treeprocessor):
BASE_RE = r'\{\:?[ ]*([^\}\n ][^\}\n]*)[ ]*\}'
HEADER_RE = re.compile(r'[ ]+{}[ ]*$'.format(BASE_RE))
BLOCK_RE = re.compile(r'\n[ ]*{}[ ]*$'.format(BASE_RE))
INLINE_RE = re.compile(r'^{}'.format(BASE_RE))
NAME_RE = re.compile(r'[^A-Z_a-z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02ff'
r'\u0370-\u037d\u037f-\u1fff\u200c-\u200d'
r'\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff'
r'\uf900-\ufdcf\ufdf0-\ufffd'
r'\:\-\.0-9\u00b7\u0300-\u036f\u203f-\u2040]+')
def run(self, doc):
for elem in doc.iter():
if self.md.is_block_level(elem.tag):
# Block level: check for attrs on last line of text
RE = self.BLOCK_RE
if isheader(elem) or elem.tag in ['dt', 'td', 'th']:
# header, def-term, or table cell: check for attrs at end of element
RE = self.HEADER_RE
if len(elem) and elem.tag == 'li':
# special case list items. children may include a ul or ol.
pos = None
# find the ul or ol position
for i, child in enumerate(elem):
if child.tag in ['ul', 'ol']:
pos = i
break
if pos is None and elem[-1].tail:
# use tail of last child. no ul or ol.
m = RE.search(elem[-1].tail)
if m:
self.assign_attrs(elem, m.group(1))
elem[-1].tail = elem[-1].tail[:m.start()]
elif pos is not None and pos > 0 and elem[pos-1].tail:
# use tail of last child before ul or ol
m = RE.search(elem[pos-1].tail)
if m:
self.assign_attrs(elem, m.group(1))
elem[pos-1].tail = elem[pos-1].tail[:m.start()]
elif elem.text:
# use text. ul is first child.
m = RE.search(elem.text)
if m:
self.assign_attrs(elem, m.group(1))
elem.text = elem.text[:m.start()]
elif len(elem) and elem[-1].tail:
# has children. Get from tail of last child
m = RE.search(elem[-1].tail)
if m:
self.assign_attrs(elem, m.group(1))
elem[-1].tail = elem[-1].tail[:m.start()]
if isheader(elem):
# clean up trailing #s
elem[-1].tail = elem[-1].tail.rstrip('#').rstrip()
elif elem.text:
# no children. Get from text.
m = RE.search(elem.text)
if m:
self.assign_attrs(elem, m.group(1))
elem.text = elem.text[:m.start()]
if isheader(elem):
# clean up trailing #s
elem.text = elem.text.rstrip('#').rstrip()
else:
# inline: check for attrs at start of tail
if elem.tail:
m = self.INLINE_RE.match(elem.tail)
if m:
self.assign_attrs(elem, m.group(1))
elem.tail = elem.tail[m.end():]
def assign_attrs(self, elem, attrs):
""" Assign attrs to element. """
for k, v in get_attrs(attrs):
if k == '.':
# add to class
cls = elem.get('class')
if cls:
elem.set('class', '{} {}'.format(cls, v))
else:
elem.set('class', v)
else:
# assign attr k with v
elem.set(self.sanitize_name(k), v)
def sanitize_name(self, name):
"""
Sanitize name as 'an XML Name, minus the ":"'.
See https://www.w3.org/TR/REC-xml-names/#NT-NCName
"""
return self.NAME_RE.sub('_', name)
class AttrListExtension(Extension):
def extendMarkdown(self, md):
md.treeprocessors.register(AttrListTreeprocessor(md), 'attr_list', 8)
md.registerExtension(self)
def makeExtension(**kwargs): # pragma: no cover
return AttrListExtension(**kwargs)

View File

@@ -0,0 +1,173 @@
"""
Attribute List Extension for Python-Markdown
============================================
Adds attribute list syntax. Inspired by
[maruku](http://maruku.rubyforge.org/proposal.html#attribute_lists)'s
feature of the same name.
See <https://Python-Markdown.github.io/extensions/attr_list>
for documentation.
Original code Copyright 2011 [Waylan Limberg](http://achinghead.com/).
All changes Copyright 2011-2014 The Python Markdown Project
License: [BSD](https://opensource.org/licenses/bsd-license.php)
"""
from . import Extension
from ..treeprocessors import Treeprocessor
from xml.etree import ElementTree
import re
def _handle_double_quote(s, t):
k, v = t.split('=', 1)
return k, v.strip('"')
def _handle_single_quote(s, t):
k, v = t.split('=', 1)
return k, v.strip("'")
def _handle_key_value(s, t):
return t.split('=', 1)
def _handle_word(s, t):
print(s, t)
if t.startswith('.'):
return '.', t[1:]
if t.startswith('#'):
return 'id', t[1:]
return t, t
_scanner = re.Scanner([
(r'[^ =]+=".*?"', _handle_double_quote),
(r"[^ =]+='.*?'", _handle_single_quote),
(r'[^ =]+=[^ =]+', _handle_key_value),
(r'[^ =]+', _handle_word),
(r' ', None)
])
def get_attrs(str):
""" Parse attribute list and return a list of attribute tuples. """
return _scanner.scan(str)[0]
def isheader(elem):
return elem.tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']
class AttrListTreeprocessor(Treeprocessor):
BASE_RE = r'\{\:[ ]*([^\n ][^\n]*)[ ]*\:\}'
HEADER_RE = re.compile(r'[ ]+{}[ ]*$'.format(BASE_RE))
BLOCK_RE = re.compile(r'\n[ ]*{}[ ]*$'.format(BASE_RE))
INLINE_RE = re.compile(r'^{}'.format(BASE_RE))
NAME_RE = re.compile(r'[^A-Z_a-z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02ff'
r'\u0370-\u037d\u037f-\u1fff\u200c-\u200d'
r'\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff'
r'\uf900-\ufdcf\ufdf0-\ufffd'
r'\:\-\.0-9\u00b7\u0300-\u036f\u203f-\u2040]+')
def run(self, doc):
for elem in doc.iter():
if self.md.is_block_level(elem.tag):
# Block level: check for attrs on last line of text
RE = self.BLOCK_RE
if isheader(elem) or elem.tag in ['dt', 'td', 'th']:
# header, def-term, or table cell: check for attrs at end of element
RE = self.HEADER_RE
if len(elem) and elem.tag == 'li':
# special case list items. children may include a ul or ol.
pos = None
# find the ul or ol position
for i, child in enumerate(elem):
if child.tag in ['ul', 'ol']:
pos = i
break
if pos is None and elem[-1].tail:
# use tail of last child. no ul or ol.
m = RE.search(elem[-1].tail)
if m:
self.assign_attrs(elem, m.group(1))
elem[-1].tail = elem[-1].tail[:m.start()]
elif pos is not None and pos > 0 and elem[pos-1].tail:
# use tail of last child before ul or ol
m = RE.search(elem[pos-1].tail)
if m:
self.assign_attrs(elem, m.group(1))
elem[pos-1].tail = elem[pos-1].tail[:m.start()]
elif elem.text:
# use text. ul is first child.
m = RE.search(elem.text)
if m:
self.assign_attrs(elem, m.group(1))
elem.text = elem.text[:m.start()]
elif len(elem) and elem[-1].tail:
# has children. Get from tail of last child
m = RE.search(elem[-1].tail)
if m:
self.assign_attrs(elem, m.group(1))
elem[-1].tail = elem[-1].tail[:m.start()]
if isheader(elem):
# clean up trailing #s
elem[-1].tail = elem[-1].tail.rstrip('#').rstrip()
elif elem.text:
# no children. Get from text.
m = RE.search(elem.text)
if m:
self.assign_attrs(elem, m.group(1))
elem.text = elem.text[:m.start()]
if isheader(elem):
# clean up trailing #s
elem.text = elem.text.rstrip('#').rstrip()
else:
# inline: check for attrs at start of tail
if elem.tail:
m = self.INLINE_RE.match(elem.tail)
if m:
self.assign_attrs(elem, m.group(1))
elem.tail = elem.tail[m.end():]
{+ +}
def assign_attrs(self, elem, attrs):
""" Assign attrs to element. """
for k, v in get_attrs(attrs):
print(0, k, v)
if k == '.':
# add to class
cls = elem.get('class')
if cls:
elem.set('class', '{} {}'.format(cls, v))
else:
elem.set('class', v)
elif k.startswith('{=') and k.endswith('=}'):
#elem.set(self.sanitize_name(k), f'" {v} {self.sanitize_name(k)}1="')
print(elem.keys())
#elem.set(elem, v)
else:
# assign attr k with v
elem.set(self.sanitize_name(k), v)
def sanitize_name(self, name):
"""
Sanitize name as 'an XML Name, minus the ":"'.
See https://www.w3.org/TR/REC-xml-names/#NT-NCName
"""
return self.NAME_RE.sub('_', name)
class AttrListExtension(Extension):
def extendMarkdown(self, md):
md.treeprocessors.register(AttrListTreeprocessor(md), 'attr_list', 8)
md.registerExtension(self)
def makeExtension(**kwargs): # pragma: no cover
return AttrListExtension(**kwargs)

View File

@@ -0,0 +1,308 @@
"""
CodeHilite Extension for Python-Markdown
========================================
Adds code/syntax highlighting to standard Python-Markdown code blocks.
See <https://Python-Markdown.github.io/extensions/code_hilite>
for documentation.
Original code Copyright 2006-2008 [Waylan Limberg](http://achinghead.com/).
All changes Copyright 2008-2014 The Python Markdown Project
License: [BSD](https://opensource.org/licenses/bsd-license.php)
"""
from . import Extension
from ..treeprocessors import Treeprocessor
from ..util import parseBoolValue
try: # pragma: no cover
from pygments import highlight
from pygments.lexers import get_lexer_by_name, guess_lexer
from pygments.formatters import get_formatter_by_name
pygments = True
except ImportError: # pragma: no cover
pygments = False
def parse_hl_lines(expr):
"""Support our syntax for emphasizing certain lines of code.
expr should be like '1 2' to emphasize lines 1 and 2 of a code block.
Returns a list of ints, the line numbers to emphasize.
"""
if not expr:
return []
try:
return list(map(int, expr.split()))
except ValueError: # pragma: no cover
return []
# ------------------ The Main CodeHilite Class ----------------------
class CodeHilite:
"""
Determine language of source code, and pass it on to the Pygments highlighter.
Usage:
code = CodeHilite(src=some_code, lang='python')
html = code.hilite()
Arguments:
* src: Source string or any object with a .readline attribute.
* lang: String name of Pygments lexer to use for highlighting. Default: `None`.
* guess_lang: Auto-detect which lexer to use. Ignored if `lang` is set to a valid
value. Default: `True`.
* use_pygments: Pass code to pygments for code highlighting. If `False`, the code is
instead wrapped for highlighting by a JavaScript library. Default: `True`.
* linenums: An alias to Pygments `linenos` formatter option. Default: `None`.
* css_class: An alias to Pygments `cssclass` formatter option. Default: 'codehilite'.
* lang_prefix: Prefix prepended to the language when `use_pygments` is `False`.
Default: "language-".
Other Options:
Any other options are accepted and passed on to the lexer and formatter. Therefore,
valid options include any options which are accepted by the `html` formatter or
whichever lexer the code's language uses. Note that most lexers do not have any
options. However, a few have very useful options, such as PHP's `startinline` option.
Any invalid options are ignored without error.
Formatter options: https://pygments.org/docs/formatters/#HtmlFormatter
Lexer Options: https://pygments.org/docs/lexers/
Advanced Usage:
code = CodeHilite(
src = some_code,
lang = 'php',
startinline = True, # Lexer option. Snippet does not start with `<?php`.
linenostart = 42, # Formatter option. Snippet starts on line 42.
hl_lines = [45, 49, 50], # Formatter option. Highlight lines 45, 49, and 50.
linenos = 'inline' # Formatter option. Avoid alignment problems.
)
html = code.hilite()
"""
def __init__(self, src, **options):
self.src = src
self.lang = options.pop('lang', None)
self.guess_lang = options.pop('guess_lang', True)
self.use_pygments = options.pop('use_pygments', True)
self.lang_prefix = options.pop('lang_prefix', 'language-')
if 'linenos' not in options:
options['linenos'] = options.pop('linenums', None)
if 'cssclass' not in options:
options['cssclass'] = options.pop('css_class', 'codehilite')
if 'wrapcode' not in options:
# Override pygments default
options['wrapcode'] = True
# Disallow use of `full` option
options['full'] = False
self.options = options
def hilite(self, shebang=True):
"""
Pass code to the [Pygments](http://pygments.pocoo.org/) highliter with
optional line numbers. The output should then be styled with css to
your liking. No styles are applied by default - only styling hooks
(i.e.: <span class="k">).
returns : A string of html.
"""
self.src = self.src.strip('\n')
if self.lang is None and shebang:
self._parseHeader()
if pygments and self.use_pygments:
try:
lexer = get_lexer_by_name(self.lang, **self.options)
except ValueError:
try:
if self.guess_lang:
lexer = guess_lexer(self.src, **self.options)
else:
lexer = get_lexer_by_name('text', **self.options)
except ValueError: # pragma: no cover
lexer = get_lexer_by_name('text', **self.options)
formatter = get_formatter_by_name('html', **self.options)
return highlight(self.src, lexer, formatter)
else:
# just escape and build markup usable by JS highlighting libs
txt = self.src.replace('&', '&amp;')
txt = txt.replace('<', '&lt;')
txt = txt.replace('>', '&gt;')
txt = txt.replace('"', '&quot;')
classes = []
if self.lang:
classes.append('{}{}'.format(self.lang_prefix, self.lang))
if self.options['linenos']:
classes.append('linenums')
class_str = ''
if classes:
class_str = ' class="{}"'.format(' '.join(classes))
return '<pre class="{}"><code{}>{}\n</code></pre>\n'.format(
self.options['cssclass'],
class_str,
txt
)
def _parseHeader(self):
"""
Determines language of a code block from shebang line and whether the
said line should be removed or left in place. If the sheband line
contains a path (even a single /) then it is assumed to be a real
shebang line and left alone. However, if no path is given
(e.i.: #!python or :::python) then it is assumed to be a mock shebang
for language identification of a code fragment and removed from the
code block prior to processing for code highlighting. When a mock
shebang (e.i: #!python) is found, line numbering is turned on. When
colons are found in place of a shebang (e.i.: :::python), line
numbering is left in the current state - off by default.
Also parses optional list of highlight lines, like:
:::python hl_lines="1 3"
"""
import re
# split text into lines
lines = self.src.split("\n")
# pull first line to examine
fl = lines.pop(0)
c = re.compile(r'''
(?:(?:^::+)|(?P<shebang>^[#]!)) # Shebang or 2 or more colons
(?P<path>(?:/\w+)*[/ ])? # Zero or 1 path
(?P<lang>[\w#.+-]*) # The language
\s* # Arbitrary whitespace
# Optional highlight lines, single- or double-quote-delimited
(hl_lines=(?P<quot>"|')(?P<hl_lines>.*?)(?P=quot))?
''', re.VERBOSE)
# search first line for shebang
m = c.search(fl)
if m:
# we have a match
try:
self.lang = m.group('lang').lower()
except IndexError: # pragma: no cover
self.lang = None
if m.group('path'):
# path exists - restore first line
lines.insert(0, fl)
if self.options['linenos'] is None and m.group('shebang'):
# Overridable and Shebang exists - use line numbers
self.options['linenos'] = True
self.options['hl_lines'] = parse_hl_lines(m.group('hl_lines'))
else:
# No match
lines.insert(0, fl)
self.src = "\n".join(lines).strip("\n")
# ------------------ The Markdown Extension -------------------------------
class HiliteTreeprocessor(Treeprocessor):
""" Highlight source code in code blocks. """
def code_unescape(self, text):
"""Unescape code."""
text = text.replace("&lt;", "<")
text = text.replace("&gt;", ">")
# Escaped '&' should be replaced at the end to avoid
# conflicting with < and >.
text = text.replace("&amp;", "&")
return text
def run(self, root):
""" Find code blocks and store in htmlStash. """
blocks = root.iter('pre')
for block in blocks:
if len(block) == 1 and block[0].tag == 'code':
local_config = self.config.copy()
code = CodeHilite(
self.code_unescape(block[0].text),
tab_length=self.md.tab_length,
style=local_config.pop('pygments_style', 'default'),
**local_config
)
placeholder = self.md.htmlStash.store(code.hilite())
# Clear codeblock in etree instance
block.clear()
# Change to p element which will later
# be removed when inserting raw html
block.tag = 'p'
block.text = placeholder
class CodeHiliteExtension(Extension):
""" Add source code highlighting to markdown codeblocks. """
def __init__(self, **kwargs):
# define default configs
self.config = {
'linenums': [None,
"Use lines numbers. True|table|inline=yes, False=no, None=auto"],
'guess_lang': [True,
"Automatic language detection - Default: True"],
'css_class': ["codehilite",
"Set class name for wrapper <div> - "
"Default: codehilite"],
'pygments_style': ['default',
'Pygments HTML Formatter Style '
'(Colorscheme) - Default: default'],
'noclasses': [False,
'Use inline styles instead of CSS classes - '
'Default false'],
'use_pygments': [True,
'Use Pygments to Highlight code blocks. '
'Disable if using a JavaScript library. '
'Default: True'],
'lang_prefix': [
'language-',
'Prefix prepended to the language when use_pygments is false. Default: "language-"'
]
}
for key, value in kwargs.items():
if key in self.config:
self.setConfig(key, value)
else:
# manually set unknown keywords.
if isinstance(value, str):
try:
# Attempt to parse str as a bool value
value = parseBoolValue(value, preserve_none=True)
except ValueError:
pass # Assume it's not a bool value. Use as-is.
self.config[key] = [value, '']
def extendMarkdown(self, md):
""" Add HilitePostprocessor to Markdown instance. """
hiliter = HiliteTreeprocessor(md)
hiliter.config = self.getConfigs()
md.treeprocessors.register(hiliter, 'hilite', 30)
md.registerExtension(self)
def makeExtension(**kwargs): # pragma: no cover
return CodeHiliteExtension(**kwargs)

View File

@@ -0,0 +1,111 @@
"""
Definition List Extension for Python-Markdown
=============================================
Adds parsing of Definition Lists to Python-Markdown.
See <https://Python-Markdown.github.io/extensions/definition_lists>
for documentation.
Original code Copyright 2008 [Waylan Limberg](http://achinghead.com)
All changes Copyright 2008-2014 The Python Markdown Project
License: [BSD](https://opensource.org/licenses/bsd-license.php)
"""
from . import Extension
from ..blockprocessors import BlockProcessor, ListIndentProcessor
import xml.etree.ElementTree as etree
import re
class DefListProcessor(BlockProcessor):
""" Process Definition Lists. """
RE = re.compile(r'(^|\n)[ ]{0,3}:[ ]{1,3}(.*?)(\n|$)')
NO_INDENT_RE = re.compile(r'^[ ]{0,3}[^ :]')
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
raw_block = blocks.pop(0)
m = self.RE.search(raw_block)
terms = [term.strip() for term in
raw_block[:m.start()].split('\n') if term.strip()]
block = raw_block[m.end():]
no_indent = self.NO_INDENT_RE.match(block)
if no_indent:
d, theRest = (block, None)
else:
d, theRest = self.detab(block)
if d:
d = '{}\n{}'.format(m.group(2), d)
else:
d = m.group(2)
sibling = self.lastChild(parent)
if not terms and sibling is None:
# This is not a definition item. Most likely a paragraph that
# starts with a colon at the beginning of a document or list.
blocks.insert(0, raw_block)
return False
if not terms and sibling.tag == 'p':
# The previous paragraph contains the terms
state = 'looselist'
terms = sibling.text.split('\n')
parent.remove(sibling)
# Acquire new sibling
sibling = self.lastChild(parent)
else:
state = 'list'
if sibling is not None and sibling.tag == 'dl':
# This is another item on an existing list
dl = sibling
if not terms and len(dl) and dl[-1].tag == 'dd' and len(dl[-1]):
state = 'looselist'
else:
# This is a new list
dl = etree.SubElement(parent, 'dl')
# Add terms
for term in terms:
dt = etree.SubElement(dl, 'dt')
dt.text = term
# Add definition
self.parser.state.set(state)
dd = etree.SubElement(dl, 'dd')
self.parser.parseBlocks(dd, [d])
self.parser.state.reset()
if theRest:
blocks.insert(0, theRest)
class DefListIndentProcessor(ListIndentProcessor):
""" Process indented children of definition list items. """
# Definition lists need to be aware of all list types
ITEM_TYPES = ['dd', 'li']
LIST_TYPES = ['dl', 'ol', 'ul']
def create_item(self, parent, block):
""" Create a new dd or li (depending on parent) and parse the block with it as the parent. """
dd = etree.SubElement(parent, 'dd')
self.parser.parseBlocks(dd, [block])
class DefListExtension(Extension):
""" Add definition lists to Markdown. """
def extendMarkdown(self, md):
""" Add an instance of DefListProcessor to BlockParser. """
md.parser.blockprocessors.register(DefListIndentProcessor(md.parser), 'defindent', 85)
md.parser.blockprocessors.register(DefListProcessor(md.parser), 'deflist', 25)
def makeExtension(**kwargs): # pragma: no cover
return DefListExtension(**kwargs)

Some files were not shown because too many files have changed in this diff Show More