Transition to monorepo on a new Dev branch

This commit is contained in:
2023-02-22 10:29:46 +01:00
parent 8436f03ec7
commit 3a483dc0ef
3712 changed files with 751 additions and 11 deletions

View File

@@ -0,0 +1,107 @@
"""
Python Markdown
A Python implementation of John Gruber's Markdown.
Documentation: https://python-markdown.github.io/
GitHub: https://github.com/Python-Markdown/markdown/
PyPI: https://pypi.org/project/Markdown/
Started by Manfred Stienstra (http://www.dwerg.net/).
Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
Currently maintained by Waylan Limberg (https://github.com/waylan),
Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see LICENSE.md for details).
"""
import warnings
from ..util import parseBoolValue
class Extension:
""" Base class for extensions to subclass. """
# Default config -- to be overridden by a subclass
# Must be of the following format:
# {
# 'key': ['value', 'description']
# }
# Note that Extension.setConfig will raise a KeyError
# if a default is not set here.
config = {}
def __init__(self, **kwargs):
""" Initiate Extension and set up configs. """
self.setConfigs(kwargs)
def getConfig(self, key, default=''):
""" Return a setting for the given key or an empty string. """
if key in self.config:
return self.config[key][0]
else:
return default
def getConfigs(self):
""" Return all configs settings as a dict. """
return {key: self.getConfig(key) for key in self.config.keys()}
def getConfigInfo(self):
""" Return all config descriptions as a list of tuples. """
return [(key, self.config[key][1]) for key in self.config.keys()]
def setConfig(self, key, value):
""" Set a config setting for `key` with the given `value`. """
if isinstance(self.config[key][0], bool):
value = parseBoolValue(value)
if self.config[key][0] is None:
value = parseBoolValue(value, preserve_none=True)
self.config[key][0] = value
def setConfigs(self, items):
""" Set multiple config settings given a dict or list of tuples. """
if hasattr(items, 'items'):
# it's a dict
items = items.items()
for key, value in items:
self.setConfig(key, value)
def _extendMarkdown(self, *args):
""" Private wrapper around extendMarkdown. """
md = args[0]
try:
self.extendMarkdown(md)
except TypeError as e:
if "missing 1 required positional argument" in str(e):
# Must be a 2.x extension. Pass in a dumby md_globals.
self.extendMarkdown(md, {})
warnings.warn(
"The 'md_globals' parameter of '{}.{}.extendMarkdown' is "
"deprecated.".format(self.__class__.__module__, self.__class__.__name__),
category=DeprecationWarning,
stacklevel=2
)
else:
raise
def extendMarkdown(self, md):
"""
Add the various processors and patterns to the Markdown Instance.
This method must be overridden by every extension.
Keyword arguments:
* md: The Markdown instance.
* md_globals: Global variables in the markdown module namespace.
"""
raise NotImplementedError(
'Extension "%s.%s" must define an "extendMarkdown"'
'method.' % (self.__class__.__module__, self.__class__.__name__)
)

View File

@@ -0,0 +1,99 @@
'''
Abbreviation Extension for Python-Markdown
==========================================
This extension adds abbreviation handling to Python-Markdown.
See <https://Python-Markdown.github.io/extensions/abbreviations>
for documentation.
Oringinal code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/) and
[Seemant Kulleen](http://www.kulleen.org/)
All changes Copyright 2008-2014 The Python Markdown Project
License: [BSD](https://opensource.org/licenses/bsd-license.php)
'''
from . import Extension
from ..blockprocessors import BlockProcessor
from ..inlinepatterns import InlineProcessor
from ..util import AtomicString
import re
import xml.etree.ElementTree as etree
class AbbrExtension(Extension):
""" Abbreviation Extension for Python-Markdown. """
def extendMarkdown(self, md):
""" Insert AbbrPreprocessor before ReferencePreprocessor. """
md.parser.blockprocessors.register(AbbrPreprocessor(md.parser), 'abbr', 16)
class AbbrPreprocessor(BlockProcessor):
""" Abbreviation Preprocessor - parse text for abbr references. """
RE = re.compile(r'^[*]\[(?P<abbr>[^\]]*)\][ ]?:[ ]*\n?[ ]*(?P<title>.*)$', re.MULTILINE)
def test(self, parent, block):
return True
def run(self, parent, blocks):
'''
Find and remove all Abbreviation references from the text.
Each reference is set as a new AbbrPattern in the markdown instance.
'''
block = blocks.pop(0)
m = self.RE.search(block)
if m:
abbr = m.group('abbr').strip()
title = m.group('title').strip()
self.parser.md.inlinePatterns.register(
AbbrInlineProcessor(self._generate_pattern(abbr), title), 'abbr-%s' % abbr, 2
)
if block[m.end():].strip():
# Add any content after match back to blocks as separate block
blocks.insert(0, block[m.end():].lstrip('\n'))
if block[:m.start()].strip():
# Add any content before match back to blocks as separate block
blocks.insert(0, block[:m.start()].rstrip('\n'))
return True
# No match. Restore block.
blocks.insert(0, block)
return False
def _generate_pattern(self, text):
'''
Given a string, returns an regex pattern to match that string.
'HTML' -> r'(?P<abbr>[H][T][M][L])'
Note: we force each char as a literal match (in brackets) as we don't
know what they will be beforehand.
'''
chars = list(text)
for i in range(len(chars)):
chars[i] = r'[%s]' % chars[i]
return r'(?P<abbr>\b%s\b)' % (r''.join(chars))
class AbbrInlineProcessor(InlineProcessor):
""" Abbreviation inline pattern. """
def __init__(self, pattern, title):
super().__init__(pattern)
self.title = title
def handleMatch(self, m, data):
abbr = etree.Element('abbr')
abbr.text = AtomicString(m.group('abbr'))
abbr.set('title', self.title)
return abbr, m.start(0), m.end(0)
def makeExtension(**kwargs): # pragma: no cover
return AbbrExtension(**kwargs)

View File

@@ -0,0 +1,170 @@
"""
Admonition extension for Python-Markdown
========================================
Adds rST-style admonitions. Inspired by [rST][] feature with the same name.
[rST]: http://docutils.sourceforge.net/docs/ref/rst/directives.html#specific-admonitions # noqa
See <https://Python-Markdown.github.io/extensions/admonition>
for documentation.
Original code Copyright [Tiago Serafim](https://www.tiagoserafim.com/).
All changes Copyright The Python Markdown Project
License: [BSD](https://opensource.org/licenses/bsd-license.php)
"""
from . import Extension
from ..blockprocessors import BlockProcessor
import xml.etree.ElementTree as etree
import re
class AdmonitionExtension(Extension):
""" Admonition extension for Python-Markdown. """
def extendMarkdown(self, md):
""" Add Admonition to Markdown instance. """
md.registerExtension(self)
md.parser.blockprocessors.register(AdmonitionProcessor(md.parser), 'admonition', 105)
class AdmonitionProcessor(BlockProcessor):
CLASSNAME = 'admonition'
CLASSNAME_TITLE = 'admonition-title'
RE = re.compile(r'(?:^|\n)!!! ?([\w\-]+(?: +[\w\-]+)*)(?: +"(.*?)")? *(?:\n|$)')
RE_SPACES = re.compile(' +')
def __init__(self, parser):
"""Initialization."""
super().__init__(parser)
self.current_sibling = None
self.content_indention = 0
def parse_content(self, parent, block):
"""Get sibling admonition.
Retrieve the appropriate sibling element. This can get tricky when
dealing with lists.
"""
old_block = block
the_rest = ''
# We already acquired the block via test
if self.current_sibling is not None:
sibling = self.current_sibling
block, the_rest = self.detab(block, self.content_indent)
self.current_sibling = None
self.content_indent = 0
return sibling, block, the_rest
sibling = self.lastChild(parent)
if sibling is None or sibling.get('class', '').find(self.CLASSNAME) == -1:
sibling = None
else:
# If the last child is a list and the content is sufficiently indented
# to be under it, then the content's sibling is in the list.
last_child = self.lastChild(sibling)
indent = 0
while last_child:
if (
sibling and block.startswith(' ' * self.tab_length * 2) and
last_child and last_child.tag in ('ul', 'ol', 'dl')
):
# The expectation is that we'll find an <li> or <dt>.
# We should get its last child as well.
sibling = self.lastChild(last_child)
last_child = self.lastChild(sibling) if sibling else None
# Context has been lost at this point, so we must adjust the
# text's indentation level so it will be evaluated correctly
# under the list.
block = block[self.tab_length:]
indent += self.tab_length
else:
last_child = None
if not block.startswith(' ' * self.tab_length):
sibling = None
if sibling is not None:
indent += self.tab_length
block, the_rest = self.detab(old_block, indent)
self.current_sibling = sibling
self.content_indent = indent
return sibling, block, the_rest
def test(self, parent, block):
if self.RE.search(block):
return True
else:
return self.parse_content(parent, block)[0] is not None
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
if m.start() > 0:
self.parser.parseBlocks(parent, [block[:m.start()]])
block = block[m.end():] # removes the first line
block, theRest = self.detab(block)
else:
sibling, block, theRest = self.parse_content(parent, block)
if m:
klass, title = self.get_class_and_title(m)
div = etree.SubElement(parent, 'div')
div.set('class', '{} {}'.format(self.CLASSNAME, klass))
if title:
p = etree.SubElement(div, 'p')
p.text = title
p.set('class', self.CLASSNAME_TITLE)
else:
# Sibling is a list item, but we need to wrap it's content should be wrapped in <p>
if sibling.tag in ('li', 'dd') and sibling.text:
text = sibling.text
sibling.text = ''
p = etree.SubElement(sibling, 'p')
p.text = text
div = sibling
self.parser.parseChunk(div, block)
if theRest:
# This block contained unindented line(s) after the first indented
# line. Insert these lines as the first block of the master blocks
# list for future processing.
blocks.insert(0, theRest)
def get_class_and_title(self, match):
klass, title = match.group(1).lower(), match.group(2)
klass = self.RE_SPACES.sub(' ', klass)
if title is None:
# no title was provided, use the capitalized classname as title
# e.g.: `!!! note` will render
# `<p class="admonition-title">Note</p>`
title = klass.split(' ', 1)[0].capitalize()
elif title == '':
# an explicit blank title should not be rendered
# e.g.: `!!! warning ""` will *not* render `p` with a title
title = None
return klass, title
def makeExtension(**kwargs): # pragma: no cover
return AdmonitionExtension(**kwargs)

View File

@@ -0,0 +1,166 @@
"""
Attribute List Extension for Python-Markdown
============================================
Adds attribute list syntax. Inspired by
[maruku](http://maruku.rubyforge.org/proposal.html#attribute_lists)'s
feature of the same name.
See <https://Python-Markdown.github.io/extensions/attr_list>
for documentation.
Original code Copyright 2011 [Waylan Limberg](http://achinghead.com/).
All changes Copyright 2011-2014 The Python Markdown Project
License: [BSD](https://opensource.org/licenses/bsd-license.php)
"""
from . import Extension
from ..treeprocessors import Treeprocessor
import re
def _handle_double_quote(s, t):
k, v = t.split('=', 1)
return k, v.strip('"')
def _handle_single_quote(s, t):
k, v = t.split('=', 1)
return k, v.strip("'")
def _handle_key_value(s, t):
return t.split('=', 1)
def _handle_word(s, t):
if t.startswith('.'):
return '.', t[1:]
if t.startswith('#'):
return 'id', t[1:]
return t, t
_scanner = re.Scanner([
(r'[^ =]+=".*?"', _handle_double_quote),
(r"[^ =]+='.*?'", _handle_single_quote),
(r'[^ =]+=[^ =]+', _handle_key_value),
(r'[^ =]+', _handle_word),
(r' ', None)
])
def get_attrs(str):
""" Parse attribute list and return a list of attribute tuples. """
return _scanner.scan(str)[0]
def isheader(elem):
return elem.tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']
class AttrListTreeprocessor(Treeprocessor):
BASE_RE = r'\{\:?[ ]*([^\}\n ][^\}\n]*)[ ]*\}'
HEADER_RE = re.compile(r'[ ]+{}[ ]*$'.format(BASE_RE))
BLOCK_RE = re.compile(r'\n[ ]*{}[ ]*$'.format(BASE_RE))
INLINE_RE = re.compile(r'^{}'.format(BASE_RE))
NAME_RE = re.compile(r'[^A-Z_a-z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02ff'
r'\u0370-\u037d\u037f-\u1fff\u200c-\u200d'
r'\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff'
r'\uf900-\ufdcf\ufdf0-\ufffd'
r'\:\-\.0-9\u00b7\u0300-\u036f\u203f-\u2040]+')
def run(self, doc):
for elem in doc.iter():
if self.md.is_block_level(elem.tag):
# Block level: check for attrs on last line of text
RE = self.BLOCK_RE
if isheader(elem) or elem.tag in ['dt', 'td', 'th']:
# header, def-term, or table cell: check for attrs at end of element
RE = self.HEADER_RE
if len(elem) and elem.tag == 'li':
# special case list items. children may include a ul or ol.
pos = None
# find the ul or ol position
for i, child in enumerate(elem):
if child.tag in ['ul', 'ol']:
pos = i
break
if pos is None and elem[-1].tail:
# use tail of last child. no ul or ol.
m = RE.search(elem[-1].tail)
if m:
self.assign_attrs(elem, m.group(1))
elem[-1].tail = elem[-1].tail[:m.start()]
elif pos is not None and pos > 0 and elem[pos-1].tail:
# use tail of last child before ul or ol
m = RE.search(elem[pos-1].tail)
if m:
self.assign_attrs(elem, m.group(1))
elem[pos-1].tail = elem[pos-1].tail[:m.start()]
elif elem.text:
# use text. ul is first child.
m = RE.search(elem.text)
if m:
self.assign_attrs(elem, m.group(1))
elem.text = elem.text[:m.start()]
elif len(elem) and elem[-1].tail:
# has children. Get from tail of last child
m = RE.search(elem[-1].tail)
if m:
self.assign_attrs(elem, m.group(1))
elem[-1].tail = elem[-1].tail[:m.start()]
if isheader(elem):
# clean up trailing #s
elem[-1].tail = elem[-1].tail.rstrip('#').rstrip()
elif elem.text:
# no children. Get from text.
m = RE.search(elem.text)
if m:
self.assign_attrs(elem, m.group(1))
elem.text = elem.text[:m.start()]
if isheader(elem):
# clean up trailing #s
elem.text = elem.text.rstrip('#').rstrip()
else:
# inline: check for attrs at start of tail
if elem.tail:
m = self.INLINE_RE.match(elem.tail)
if m:
self.assign_attrs(elem, m.group(1))
elem.tail = elem.tail[m.end():]
def assign_attrs(self, elem, attrs):
""" Assign attrs to element. """
for k, v in get_attrs(attrs):
if k == '.':
# add to class
cls = elem.get('class')
if cls:
elem.set('class', '{} {}'.format(cls, v))
else:
elem.set('class', v)
else:
# assign attr k with v
elem.set(self.sanitize_name(k), v)
def sanitize_name(self, name):
"""
Sanitize name as 'an XML Name, minus the ":"'.
See https://www.w3.org/TR/REC-xml-names/#NT-NCName
"""
return self.NAME_RE.sub('_', name)
class AttrListExtension(Extension):
def extendMarkdown(self, md):
md.treeprocessors.register(AttrListTreeprocessor(md), 'attr_list', 8)
md.registerExtension(self)
def makeExtension(**kwargs): # pragma: no cover
return AttrListExtension(**kwargs)

View File

@@ -0,0 +1,173 @@
"""
Attribute List Extension for Python-Markdown
============================================
Adds attribute list syntax. Inspired by
[maruku](http://maruku.rubyforge.org/proposal.html#attribute_lists)'s
feature of the same name.
See <https://Python-Markdown.github.io/extensions/attr_list>
for documentation.
Original code Copyright 2011 [Waylan Limberg](http://achinghead.com/).
All changes Copyright 2011-2014 The Python Markdown Project
License: [BSD](https://opensource.org/licenses/bsd-license.php)
"""
from . import Extension
from ..treeprocessors import Treeprocessor
from xml.etree import ElementTree
import re
def _handle_double_quote(s, t):
k, v = t.split('=', 1)
return k, v.strip('"')
def _handle_single_quote(s, t):
k, v = t.split('=', 1)
return k, v.strip("'")
def _handle_key_value(s, t):
return t.split('=', 1)
def _handle_word(s, t):
print(s, t)
if t.startswith('.'):
return '.', t[1:]
if t.startswith('#'):
return 'id', t[1:]
return t, t
_scanner = re.Scanner([
(r'[^ =]+=".*?"', _handle_double_quote),
(r"[^ =]+='.*?'", _handle_single_quote),
(r'[^ =]+=[^ =]+', _handle_key_value),
(r'[^ =]+', _handle_word),
(r' ', None)
])
def get_attrs(str):
""" Parse attribute list and return a list of attribute tuples. """
return _scanner.scan(str)[0]
def isheader(elem):
return elem.tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']
class AttrListTreeprocessor(Treeprocessor):
BASE_RE = r'\{\:[ ]*([^\n ][^\n]*)[ ]*\:\}'
HEADER_RE = re.compile(r'[ ]+{}[ ]*$'.format(BASE_RE))
BLOCK_RE = re.compile(r'\n[ ]*{}[ ]*$'.format(BASE_RE))
INLINE_RE = re.compile(r'^{}'.format(BASE_RE))
NAME_RE = re.compile(r'[^A-Z_a-z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02ff'
r'\u0370-\u037d\u037f-\u1fff\u200c-\u200d'
r'\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff'
r'\uf900-\ufdcf\ufdf0-\ufffd'
r'\:\-\.0-9\u00b7\u0300-\u036f\u203f-\u2040]+')
def run(self, doc):
for elem in doc.iter():
if self.md.is_block_level(elem.tag):
# Block level: check for attrs on last line of text
RE = self.BLOCK_RE
if isheader(elem) or elem.tag in ['dt', 'td', 'th']:
# header, def-term, or table cell: check for attrs at end of element
RE = self.HEADER_RE
if len(elem) and elem.tag == 'li':
# special case list items. children may include a ul or ol.
pos = None
# find the ul or ol position
for i, child in enumerate(elem):
if child.tag in ['ul', 'ol']:
pos = i
break
if pos is None and elem[-1].tail:
# use tail of last child. no ul or ol.
m = RE.search(elem[-1].tail)
if m:
self.assign_attrs(elem, m.group(1))
elem[-1].tail = elem[-1].tail[:m.start()]
elif pos is not None and pos > 0 and elem[pos-1].tail:
# use tail of last child before ul or ol
m = RE.search(elem[pos-1].tail)
if m:
self.assign_attrs(elem, m.group(1))
elem[pos-1].tail = elem[pos-1].tail[:m.start()]
elif elem.text:
# use text. ul is first child.
m = RE.search(elem.text)
if m:
self.assign_attrs(elem, m.group(1))
elem.text = elem.text[:m.start()]
elif len(elem) and elem[-1].tail:
# has children. Get from tail of last child
m = RE.search(elem[-1].tail)
if m:
self.assign_attrs(elem, m.group(1))
elem[-1].tail = elem[-1].tail[:m.start()]
if isheader(elem):
# clean up trailing #s
elem[-1].tail = elem[-1].tail.rstrip('#').rstrip()
elif elem.text:
# no children. Get from text.
m = RE.search(elem.text)
if m:
self.assign_attrs(elem, m.group(1))
elem.text = elem.text[:m.start()]
if isheader(elem):
# clean up trailing #s
elem.text = elem.text.rstrip('#').rstrip()
else:
# inline: check for attrs at start of tail
if elem.tail:
m = self.INLINE_RE.match(elem.tail)
if m:
self.assign_attrs(elem, m.group(1))
elem.tail = elem.tail[m.end():]
{+ +}
def assign_attrs(self, elem, attrs):
""" Assign attrs to element. """
for k, v in get_attrs(attrs):
print(0, k, v)
if k == '.':
# add to class
cls = elem.get('class')
if cls:
elem.set('class', '{} {}'.format(cls, v))
else:
elem.set('class', v)
elif k.startswith('{=') and k.endswith('=}'):
#elem.set(self.sanitize_name(k), f'" {v} {self.sanitize_name(k)}1="')
print(elem.keys())
#elem.set(elem, v)
else:
# assign attr k with v
elem.set(self.sanitize_name(k), v)
def sanitize_name(self, name):
"""
Sanitize name as 'an XML Name, minus the ":"'.
See https://www.w3.org/TR/REC-xml-names/#NT-NCName
"""
return self.NAME_RE.sub('_', name)
class AttrListExtension(Extension):
def extendMarkdown(self, md):
md.treeprocessors.register(AttrListTreeprocessor(md), 'attr_list', 8)
md.registerExtension(self)
def makeExtension(**kwargs): # pragma: no cover
return AttrListExtension(**kwargs)

View File

@@ -0,0 +1,308 @@
"""
CodeHilite Extension for Python-Markdown
========================================
Adds code/syntax highlighting to standard Python-Markdown code blocks.
See <https://Python-Markdown.github.io/extensions/code_hilite>
for documentation.
Original code Copyright 2006-2008 [Waylan Limberg](http://achinghead.com/).
All changes Copyright 2008-2014 The Python Markdown Project
License: [BSD](https://opensource.org/licenses/bsd-license.php)
"""
from . import Extension
from ..treeprocessors import Treeprocessor
from ..util import parseBoolValue
try: # pragma: no cover
from pygments import highlight
from pygments.lexers import get_lexer_by_name, guess_lexer
from pygments.formatters import get_formatter_by_name
pygments = True
except ImportError: # pragma: no cover
pygments = False
def parse_hl_lines(expr):
"""Support our syntax for emphasizing certain lines of code.
expr should be like '1 2' to emphasize lines 1 and 2 of a code block.
Returns a list of ints, the line numbers to emphasize.
"""
if not expr:
return []
try:
return list(map(int, expr.split()))
except ValueError: # pragma: no cover
return []
# ------------------ The Main CodeHilite Class ----------------------
class CodeHilite:
"""
Determine language of source code, and pass it on to the Pygments highlighter.
Usage:
code = CodeHilite(src=some_code, lang='python')
html = code.hilite()
Arguments:
* src: Source string or any object with a .readline attribute.
* lang: String name of Pygments lexer to use for highlighting. Default: `None`.
* guess_lang: Auto-detect which lexer to use. Ignored if `lang` is set to a valid
value. Default: `True`.
* use_pygments: Pass code to pygments for code highlighting. If `False`, the code is
instead wrapped for highlighting by a JavaScript library. Default: `True`.
* linenums: An alias to Pygments `linenos` formatter option. Default: `None`.
* css_class: An alias to Pygments `cssclass` formatter option. Default: 'codehilite'.
* lang_prefix: Prefix prepended to the language when `use_pygments` is `False`.
Default: "language-".
Other Options:
Any other options are accepted and passed on to the lexer and formatter. Therefore,
valid options include any options which are accepted by the `html` formatter or
whichever lexer the code's language uses. Note that most lexers do not have any
options. However, a few have very useful options, such as PHP's `startinline` option.
Any invalid options are ignored without error.
Formatter options: https://pygments.org/docs/formatters/#HtmlFormatter
Lexer Options: https://pygments.org/docs/lexers/
Advanced Usage:
code = CodeHilite(
src = some_code,
lang = 'php',
startinline = True, # Lexer option. Snippet does not start with `<?php`.
linenostart = 42, # Formatter option. Snippet starts on line 42.
hl_lines = [45, 49, 50], # Formatter option. Highlight lines 45, 49, and 50.
linenos = 'inline' # Formatter option. Avoid alignment problems.
)
html = code.hilite()
"""
def __init__(self, src, **options):
self.src = src
self.lang = options.pop('lang', None)
self.guess_lang = options.pop('guess_lang', True)
self.use_pygments = options.pop('use_pygments', True)
self.lang_prefix = options.pop('lang_prefix', 'language-')
if 'linenos' not in options:
options['linenos'] = options.pop('linenums', None)
if 'cssclass' not in options:
options['cssclass'] = options.pop('css_class', 'codehilite')
if 'wrapcode' not in options:
# Override pygments default
options['wrapcode'] = True
# Disallow use of `full` option
options['full'] = False
self.options = options
def hilite(self, shebang=True):
"""
Pass code to the [Pygments](http://pygments.pocoo.org/) highliter with
optional line numbers. The output should then be styled with css to
your liking. No styles are applied by default - only styling hooks
(i.e.: <span class="k">).
returns : A string of html.
"""
self.src = self.src.strip('\n')
if self.lang is None and shebang:
self._parseHeader()
if pygments and self.use_pygments:
try:
lexer = get_lexer_by_name(self.lang, **self.options)
except ValueError:
try:
if self.guess_lang:
lexer = guess_lexer(self.src, **self.options)
else:
lexer = get_lexer_by_name('text', **self.options)
except ValueError: # pragma: no cover
lexer = get_lexer_by_name('text', **self.options)
formatter = get_formatter_by_name('html', **self.options)
return highlight(self.src, lexer, formatter)
else:
# just escape and build markup usable by JS highlighting libs
txt = self.src.replace('&', '&amp;')
txt = txt.replace('<', '&lt;')
txt = txt.replace('>', '&gt;')
txt = txt.replace('"', '&quot;')
classes = []
if self.lang:
classes.append('{}{}'.format(self.lang_prefix, self.lang))
if self.options['linenos']:
classes.append('linenums')
class_str = ''
if classes:
class_str = ' class="{}"'.format(' '.join(classes))
return '<pre class="{}"><code{}>{}\n</code></pre>\n'.format(
self.options['cssclass'],
class_str,
txt
)
def _parseHeader(self):
"""
Determines language of a code block from shebang line and whether the
said line should be removed or left in place. If the sheband line
contains a path (even a single /) then it is assumed to be a real
shebang line and left alone. However, if no path is given
(e.i.: #!python or :::python) then it is assumed to be a mock shebang
for language identification of a code fragment and removed from the
code block prior to processing for code highlighting. When a mock
shebang (e.i: #!python) is found, line numbering is turned on. When
colons are found in place of a shebang (e.i.: :::python), line
numbering is left in the current state - off by default.
Also parses optional list of highlight lines, like:
:::python hl_lines="1 3"
"""
import re
# split text into lines
lines = self.src.split("\n")
# pull first line to examine
fl = lines.pop(0)
c = re.compile(r'''
(?:(?:^::+)|(?P<shebang>^[#]!)) # Shebang or 2 or more colons
(?P<path>(?:/\w+)*[/ ])? # Zero or 1 path
(?P<lang>[\w#.+-]*) # The language
\s* # Arbitrary whitespace
# Optional highlight lines, single- or double-quote-delimited
(hl_lines=(?P<quot>"|')(?P<hl_lines>.*?)(?P=quot))?
''', re.VERBOSE)
# search first line for shebang
m = c.search(fl)
if m:
# we have a match
try:
self.lang = m.group('lang').lower()
except IndexError: # pragma: no cover
self.lang = None
if m.group('path'):
# path exists - restore first line
lines.insert(0, fl)
if self.options['linenos'] is None and m.group('shebang'):
# Overridable and Shebang exists - use line numbers
self.options['linenos'] = True
self.options['hl_lines'] = parse_hl_lines(m.group('hl_lines'))
else:
# No match
lines.insert(0, fl)
self.src = "\n".join(lines).strip("\n")
# ------------------ The Markdown Extension -------------------------------
class HiliteTreeprocessor(Treeprocessor):
""" Highlight source code in code blocks. """
def code_unescape(self, text):
"""Unescape code."""
text = text.replace("&lt;", "<")
text = text.replace("&gt;", ">")
# Escaped '&' should be replaced at the end to avoid
# conflicting with < and >.
text = text.replace("&amp;", "&")
return text
def run(self, root):
""" Find code blocks and store in htmlStash. """
blocks = root.iter('pre')
for block in blocks:
if len(block) == 1 and block[0].tag == 'code':
local_config = self.config.copy()
code = CodeHilite(
self.code_unescape(block[0].text),
tab_length=self.md.tab_length,
style=local_config.pop('pygments_style', 'default'),
**local_config
)
placeholder = self.md.htmlStash.store(code.hilite())
# Clear codeblock in etree instance
block.clear()
# Change to p element which will later
# be removed when inserting raw html
block.tag = 'p'
block.text = placeholder
class CodeHiliteExtension(Extension):
""" Add source code highlighting to markdown codeblocks. """
def __init__(self, **kwargs):
# define default configs
self.config = {
'linenums': [None,
"Use lines numbers. True|table|inline=yes, False=no, None=auto"],
'guess_lang': [True,
"Automatic language detection - Default: True"],
'css_class': ["codehilite",
"Set class name for wrapper <div> - "
"Default: codehilite"],
'pygments_style': ['default',
'Pygments HTML Formatter Style '
'(Colorscheme) - Default: default'],
'noclasses': [False,
'Use inline styles instead of CSS classes - '
'Default false'],
'use_pygments': [True,
'Use Pygments to Highlight code blocks. '
'Disable if using a JavaScript library. '
'Default: True'],
'lang_prefix': [
'language-',
'Prefix prepended to the language when use_pygments is false. Default: "language-"'
]
}
for key, value in kwargs.items():
if key in self.config:
self.setConfig(key, value)
else:
# manually set unknown keywords.
if isinstance(value, str):
try:
# Attempt to parse str as a bool value
value = parseBoolValue(value, preserve_none=True)
except ValueError:
pass # Assume it's not a bool value. Use as-is.
self.config[key] = [value, '']
def extendMarkdown(self, md):
""" Add HilitePostprocessor to Markdown instance. """
hiliter = HiliteTreeprocessor(md)
hiliter.config = self.getConfigs()
md.treeprocessors.register(hiliter, 'hilite', 30)
md.registerExtension(self)
def makeExtension(**kwargs): # pragma: no cover
return CodeHiliteExtension(**kwargs)

View File

@@ -0,0 +1,111 @@
"""
Definition List Extension for Python-Markdown
=============================================
Adds parsing of Definition Lists to Python-Markdown.
See <https://Python-Markdown.github.io/extensions/definition_lists>
for documentation.
Original code Copyright 2008 [Waylan Limberg](http://achinghead.com)
All changes Copyright 2008-2014 The Python Markdown Project
License: [BSD](https://opensource.org/licenses/bsd-license.php)
"""
from . import Extension
from ..blockprocessors import BlockProcessor, ListIndentProcessor
import xml.etree.ElementTree as etree
import re
class DefListProcessor(BlockProcessor):
""" Process Definition Lists. """
RE = re.compile(r'(^|\n)[ ]{0,3}:[ ]{1,3}(.*?)(\n|$)')
NO_INDENT_RE = re.compile(r'^[ ]{0,3}[^ :]')
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
raw_block = blocks.pop(0)
m = self.RE.search(raw_block)
terms = [term.strip() for term in
raw_block[:m.start()].split('\n') if term.strip()]
block = raw_block[m.end():]
no_indent = self.NO_INDENT_RE.match(block)
if no_indent:
d, theRest = (block, None)
else:
d, theRest = self.detab(block)
if d:
d = '{}\n{}'.format(m.group(2), d)
else:
d = m.group(2)
sibling = self.lastChild(parent)
if not terms and sibling is None:
# This is not a definition item. Most likely a paragraph that
# starts with a colon at the beginning of a document or list.
blocks.insert(0, raw_block)
return False
if not terms and sibling.tag == 'p':
# The previous paragraph contains the terms
state = 'looselist'
terms = sibling.text.split('\n')
parent.remove(sibling)
# Acquire new sibling
sibling = self.lastChild(parent)
else:
state = 'list'
if sibling is not None and sibling.tag == 'dl':
# This is another item on an existing list
dl = sibling
if not terms and len(dl) and dl[-1].tag == 'dd' and len(dl[-1]):
state = 'looselist'
else:
# This is a new list
dl = etree.SubElement(parent, 'dl')
# Add terms
for term in terms:
dt = etree.SubElement(dl, 'dt')
dt.text = term
# Add definition
self.parser.state.set(state)
dd = etree.SubElement(dl, 'dd')
self.parser.parseBlocks(dd, [d])
self.parser.state.reset()
if theRest:
blocks.insert(0, theRest)
class DefListIndentProcessor(ListIndentProcessor):
""" Process indented children of definition list items. """
# Definition lists need to be aware of all list types
ITEM_TYPES = ['dd', 'li']
LIST_TYPES = ['dl', 'ol', 'ul']
def create_item(self, parent, block):
""" Create a new dd or li (depending on parent) and parse the block with it as the parent. """
dd = etree.SubElement(parent, 'dd')
self.parser.parseBlocks(dd, [block])
class DefListExtension(Extension):
""" Add definition lists to Markdown. """
def extendMarkdown(self, md):
""" Add an instance of DefListProcessor to BlockParser. """
md.parser.blockprocessors.register(DefListIndentProcessor(md.parser), 'defindent', 85)
md.parser.blockprocessors.register(DefListProcessor(md.parser), 'deflist', 25)
def makeExtension(**kwargs): # pragma: no cover
return DefListExtension(**kwargs)

View File

@@ -0,0 +1,58 @@
"""
Python-Markdown Extra Extension
===============================
A compilation of various Python-Markdown extensions that imitates
[PHP Markdown Extra](http://michelf.com/projects/php-markdown/extra/).
Note that each of the individual extensions still need to be available
on your PYTHONPATH. This extension simply wraps them all up as a
convenience so that only one extension needs to be listed when
initiating Markdown. See the documentation for each individual
extension for specifics about that extension.
There may be additional extensions that are distributed with
Python-Markdown that are not included here in Extra. Those extensions
are not part of PHP Markdown Extra, and therefore, not part of
Python-Markdown Extra. If you really would like Extra to include
additional extensions, we suggest creating your own clone of Extra
under a different name. You could also edit the `extensions` global
variable defined below, but be aware that such changes may be lost
when you upgrade to any future version of Python-Markdown.
See <https://Python-Markdown.github.io/extensions/extra>
for documentation.
Copyright The Python Markdown Project
License: [BSD](https://opensource.org/licenses/bsd-license.php)
"""
from . import Extension
extensions = [
'fenced_code',
'footnotes',
'attr_list',
'def_list',
'tables',
'abbr',
'md_in_html'
]
class ExtraExtension(Extension):
""" Add various extensions to Markdown class."""
def __init__(self, **kwargs):
""" config is a dumb holder which gets passed to actual ext later. """
self.config = kwargs
def extendMarkdown(self, md):
""" Register extension instances. """
md.registerExtensions(extensions, self.config)
def makeExtension(**kwargs): # pragma: no cover
return ExtraExtension(**kwargs)

View File

@@ -0,0 +1,174 @@
"""
Fenced Code Extension for Python Markdown
=========================================
This extension adds Fenced Code Blocks to Python-Markdown.
See <https://Python-Markdown.github.io/extensions/fenced_code_blocks>
for documentation.
Original code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/).
All changes Copyright 2008-2014 The Python Markdown Project
License: [BSD](https://opensource.org/licenses/bsd-license.php)
"""
from textwrap import dedent
from . import Extension
from ..preprocessors import Preprocessor
from .codehilite import CodeHilite, CodeHiliteExtension, parse_hl_lines
from .attr_list import get_attrs, AttrListExtension
from ..util import parseBoolValue
from ..serializers import _escape_attrib_html
import re
class FencedCodeExtension(Extension):
def __init__(self, **kwargs):
self.config = {
'lang_prefix': ['language-', 'Prefix prepended to the language. Default: "language-"']
}
super().__init__(**kwargs)
def extendMarkdown(self, md):
""" Add FencedBlockPreprocessor to the Markdown instance. """
md.registerExtension(self)
md.preprocessors.register(FencedBlockPreprocessor(md, self.getConfigs()), 'fenced_code_block', 25)
class FencedBlockPreprocessor(Preprocessor):
FENCED_BLOCK_RE = re.compile(
dedent(r'''
(?P<fence>^(?:~{3,}|`{3,}))[ ]* # opening fence
((\{(?P<attrs>[^\}\n]*)\})| # (optional {attrs} or
(\.?(?P<lang>[\w#.+-]*)[ ]*)? # optional (.)lang
(hl_lines=(?P<quot>"|')(?P<hl_lines>.*?)(?P=quot)[ ]*)?) # optional hl_lines)
\n # newline (end of opening fence)
(?P<code>.*?)(?<=\n) # the code block
(?P=fence)[ ]*$ # closing fence
'''),
re.MULTILINE | re.DOTALL | re.VERBOSE
)
def __init__(self, md, config):
super().__init__(md)
self.config = config
self.checked_for_deps = False
self.codehilite_conf = {}
self.use_attr_list = False
# List of options to convert to bool values
self.bool_options = [
'linenums',
'guess_lang',
'noclasses',
'use_pygments'
]
def run(self, lines):
""" Match and store Fenced Code Blocks in the HtmlStash. """
# Check for dependent extensions
if not self.checked_for_deps:
for ext in self.md.registeredExtensions:
if isinstance(ext, CodeHiliteExtension):
self.codehilite_conf = ext.getConfigs()
if isinstance(ext, AttrListExtension):
self.use_attr_list = True
self.checked_for_deps = True
text = "\n".join(lines)
while 1:
m = self.FENCED_BLOCK_RE.search(text)
if m:
lang, id, classes, config = None, '', [], {}
if m.group('attrs'):
id, classes, config = self.handle_attrs(get_attrs(m.group('attrs')))
if len(classes):
lang = classes.pop(0)
else:
if m.group('lang'):
lang = m.group('lang')
if m.group('hl_lines'):
# Support hl_lines outside of attrs for backward-compatibility
config['hl_lines'] = parse_hl_lines(m.group('hl_lines'))
# If config is not empty, then the codehighlite extension
# is enabled, so we call it to highlight the code
if self.codehilite_conf and self.codehilite_conf['use_pygments'] and config.get('use_pygments', True):
local_config = self.codehilite_conf.copy()
local_config.update(config)
# Combine classes with cssclass. Ensure cssclass is at end
# as pygments appends a suffix under certain circumstances.
# Ignore ID as Pygments does not offer an option to set it.
if classes:
local_config['css_class'] = '{} {}'.format(
' '.join(classes),
local_config['css_class']
)
highliter = CodeHilite(
m.group('code'),
lang=lang,
style=local_config.pop('pygments_style', 'default'),
**local_config
)
code = highliter.hilite(shebang=False)
else:
id_attr = lang_attr = class_attr = kv_pairs = ''
if lang:
prefix = self.config.get('lang_prefix', 'language-')
lang_attr = f' class="{prefix}{_escape_attrib_html(lang)}"'
if classes:
class_attr = f' class="{_escape_attrib_html(" ".join(classes))}"'
if id:
id_attr = f' id="{_escape_attrib_html(id)}"'
if self.use_attr_list and config and not config.get('use_pygments', False):
# Only assign key/value pairs to code element if attr_list ext is enabled, key/value pairs
# were defined on the code block, and the `use_pygments` key was not set to True. The
# `use_pygments` key could be either set to False or not defined. It is omitted from output.
kv_pairs = ''.join(
f' {k}="{_escape_attrib_html(v)}"' for k, v in config.items() if k != 'use_pygments'
)
code = self._escape(m.group('code'))
code = f'<pre{id_attr}{class_attr}><code{lang_attr}{kv_pairs}>{code}</code></pre>'
placeholder = self.md.htmlStash.store(code)
text = f'{text[:m.start()]}\n{placeholder}\n{text[m.end():]}'
else:
break
return text.split("\n")
def handle_attrs(self, attrs):
""" Return tuple: (id, [list, of, classes], {configs}) """
id = ''
classes = []
configs = {}
for k, v in attrs:
if k == 'id':
id = v
elif k == '.':
classes.append(v)
elif k == 'hl_lines':
configs[k] = parse_hl_lines(v)
elif k in self.bool_options:
configs[k] = parseBoolValue(v, fail_on_errors=False, preserve_none=True)
else:
configs[k] = v
return id, classes, configs
def _escape(self, txt):
""" basic html escaping """
txt = txt.replace('&', '&amp;')
txt = txt.replace('<', '&lt;')
txt = txt.replace('>', '&gt;')
txt = txt.replace('"', '&quot;')
return txt
def makeExtension(**kwargs): # pragma: no cover
return FencedCodeExtension(**kwargs)

View File

@@ -0,0 +1,404 @@
"""
Footnotes Extension for Python-Markdown
=======================================
Adds footnote handling to Python-Markdown.
See <https://Python-Markdown.github.io/extensions/footnotes>
for documentation.
Copyright The Python Markdown Project
License: [BSD](https://opensource.org/licenses/bsd-license.php)
"""
from . import Extension
from ..blockprocessors import BlockProcessor
from ..inlinepatterns import InlineProcessor
from ..treeprocessors import Treeprocessor
from ..postprocessors import Postprocessor
from .. import util
from collections import OrderedDict
import re
import copy
import xml.etree.ElementTree as etree
FN_BACKLINK_TEXT = util.STX + "zz1337820767766393qq" + util.ETX
NBSP_PLACEHOLDER = util.STX + "qq3936677670287331zz" + util.ETX
RE_REF_ID = re.compile(r'(fnref)(\d+)')
class FootnoteExtension(Extension):
""" Footnote Extension. """
def __init__(self, **kwargs):
""" Setup configs. """
self.config = {
'PLACE_MARKER':
["///Footnotes Go Here///",
"The text string that marks where the footnotes go"],
'UNIQUE_IDS':
[False,
"Avoid name collisions across "
"multiple calls to reset()."],
"BACKLINK_TEXT":
["&#8617;",
"The text string that links from the footnote "
"to the reader's place."],
"BACKLINK_TITLE":
["Jump back to footnote %d in the text",
"The text string used for the title HTML attribute "
"of the backlink. %d will be replaced by the "
"footnote number."],
"SEPARATOR":
[":",
"Footnote separator."]
}
super().__init__(**kwargs)
# In multiple invocations, emit links that don't get tangled.
self.unique_prefix = 0
self.found_refs = {}
self.used_refs = set()
self.reset()
def extendMarkdown(self, md):
""" Add pieces to Markdown. """
md.registerExtension(self)
self.parser = md.parser
self.md = md
# Insert a blockprocessor before ReferencePreprocessor
md.parser.blockprocessors.register(FootnoteBlockProcessor(self), 'footnote', 17)
# Insert an inline pattern before ImageReferencePattern
FOOTNOTE_RE = r'\[\^([^\]]*)\]' # blah blah [^1] blah
md.inlinePatterns.register(FootnoteInlineProcessor(FOOTNOTE_RE, self), 'footnote', 175)
# Insert a tree-processor that would actually add the footnote div
# This must be before all other treeprocessors (i.e., inline and
# codehilite) so they can run on the the contents of the div.
md.treeprocessors.register(FootnoteTreeprocessor(self), 'footnote', 50)
# Insert a tree-processor that will run after inline is done.
# In this tree-processor we want to check our duplicate footnote tracker
# And add additional backrefs to the footnote pointing back to the
# duplicated references.
md.treeprocessors.register(FootnotePostTreeprocessor(self), 'footnote-duplicate', 15)
# Insert a postprocessor after amp_substitute processor
md.postprocessors.register(FootnotePostprocessor(self), 'footnote', 25)
def reset(self):
""" Clear footnotes on reset, and prepare for distinct document. """
self.footnotes = OrderedDict()
self.unique_prefix += 1
self.found_refs = {}
self.used_refs = set()
def unique_ref(self, reference, found=False):
""" Get a unique reference if there are duplicates. """
if not found:
return reference
original_ref = reference
while reference in self.used_refs:
ref, rest = reference.split(self.get_separator(), 1)
m = RE_REF_ID.match(ref)
if m:
reference = '%s%d%s%s' % (m.group(1), int(m.group(2))+1, self.get_separator(), rest)
else:
reference = '%s%d%s%s' % (ref, 2, self.get_separator(), rest)
self.used_refs.add(reference)
if original_ref in self.found_refs:
self.found_refs[original_ref] += 1
else:
self.found_refs[original_ref] = 1
return reference
def findFootnotesPlaceholder(self, root):
""" Return ElementTree Element that contains Footnote placeholder. """
def finder(element):
for child in element:
if child.text:
if child.text.find(self.getConfig("PLACE_MARKER")) > -1:
return child, element, True
if child.tail:
if child.tail.find(self.getConfig("PLACE_MARKER")) > -1:
return child, element, False
child_res = finder(child)
if child_res is not None:
return child_res
return None
res = finder(root)
return res
def setFootnote(self, id, text):
""" Store a footnote for later retrieval. """
self.footnotes[id] = text
def get_separator(self):
""" Get the footnote separator. """
return self.getConfig("SEPARATOR")
def makeFootnoteId(self, id):
""" Return footnote link id. """
if self.getConfig("UNIQUE_IDS"):
return 'fn%s%d-%s' % (self.get_separator(), self.unique_prefix, id)
else:
return 'fn{}{}'.format(self.get_separator(), id)
def makeFootnoteRefId(self, id, found=False):
""" Return footnote back-link id. """
if self.getConfig("UNIQUE_IDS"):
return self.unique_ref('fnref%s%d-%s' % (self.get_separator(), self.unique_prefix, id), found)
else:
return self.unique_ref('fnref{}{}'.format(self.get_separator(), id), found)
def makeFootnotesDiv(self, root):
""" Return div of footnotes as et Element. """
if not list(self.footnotes.keys()):
return None
div = etree.Element("div")
div.set('class', 'footnote')
# etree.SubElement(div, "hr")
ol = etree.SubElement(div, "ol")
# ol = etree.Element("ol")
surrogate_parent = etree.Element("div")
for index, id in enumerate(self.footnotes.keys(), start=1):
li = etree.SubElement(ol, "li")
li.set("id", self.makeFootnoteId(id))
# Parse footnote with surrogate parent as li cannot be used.
# List block handlers have special logic to deal with li.
# When we are done parsing, we will copy everything over to li.
self.parser.parseChunk(surrogate_parent, self.footnotes[id])
for el in list(surrogate_parent):
li.append(el)
surrogate_parent.remove(el)
backlink = etree.Element("a")
backlink.set("href", "#" + self.makeFootnoteRefId(id))
backlink.set("class", "footnote-backref")
backlink.set(
"title",
self.getConfig("BACKLINK_TITLE") % (index)
)
backlink.text = FN_BACKLINK_TEXT
if len(li):
node = li[-1]
if node.tag == "p":
node.text = node.text + NBSP_PLACEHOLDER
node.append(backlink)
else:
p = etree.SubElement(li, "p")
p.append(backlink)
return div
# return ol
class FootnoteBlockProcessor(BlockProcessor):
""" Find all footnote references and store for later use. """
RE = re.compile(r'^[ ]{0,3}\[\^([^\]]*)\]:[ ]*(.*)$', re.MULTILINE)
def __init__(self, footnotes):
super().__init__(footnotes.parser)
self.footnotes = footnotes
def test(self, parent, block):
return True
def run(self, parent, blocks):
""" Find, set, and remove footnote definitions. """
block = blocks.pop(0)
m = self.RE.search(block)
if m:
id = m.group(1)
fn_blocks = [m.group(2)]
# Handle rest of block
therest = block[m.end():].lstrip('\n')
m2 = self.RE.search(therest)
if m2:
# Another footnote exists in the rest of this block.
# Any content before match is continuation of this footnote, which may be lazily indented.
before = therest[:m2.start()].rstrip('\n')
fn_blocks[0] = '\n'.join([fn_blocks[0], self.detab(before)]).lstrip('\n')
# Add back to blocks everything from beginning of match forward for next iteration.
blocks.insert(0, therest[m2.start():])
else:
# All remaining lines of block are continuation of this footnote, which may be lazily indented.
fn_blocks[0] = '\n'.join([fn_blocks[0], self.detab(therest)]).strip('\n')
# Check for child elements in remaining blocks.
fn_blocks.extend(self.detectTabbed(blocks))
footnote = "\n\n".join(fn_blocks)
self.footnotes.setFootnote(id, footnote.rstrip())
if block[:m.start()].strip():
# Add any content before match back to blocks as separate block
blocks.insert(0, block[:m.start()].rstrip('\n'))
return True
# No match. Restore block.
blocks.insert(0, block)
return False
def detectTabbed(self, blocks):
""" Find indented text and remove indent before further proccesing.
Returns: a list of blocks with indentation removed.
"""
fn_blocks = []
while blocks:
if blocks[0].startswith(' '*4):
block = blocks.pop(0)
# Check for new footnotes within this block and split at new footnote.
m = self.RE.search(block)
if m:
# Another footnote exists in this block.
# Any content before match is continuation of this footnote, which may be lazily indented.
before = block[:m.start()].rstrip('\n')
fn_blocks.append(self.detab(before))
# Add back to blocks everything from beginning of match forward for next iteration.
blocks.insert(0, block[m.start():])
# End of this footnote.
break
else:
# Entire block is part of this footnote.
fn_blocks.append(self.detab(block))
else:
# End of this footnote.
break
return fn_blocks
def detab(self, block):
""" Remove one level of indent from a block.
Preserve lazily indented blocks by only removing indent from indented lines.
"""
lines = block.split('\n')
for i, line in enumerate(lines):
if line.startswith(' '*4):
lines[i] = line[4:]
return '\n'.join(lines)
class FootnoteInlineProcessor(InlineProcessor):
""" InlinePattern for footnote markers in a document's body text. """
def __init__(self, pattern, footnotes):
super().__init__(pattern)
self.footnotes = footnotes
def handleMatch(self, m, data):
id = m.group(1)
if id in self.footnotes.footnotes.keys():
sup = etree.Element("sup")
a = etree.SubElement(sup, "a")
sup.set('id', self.footnotes.makeFootnoteRefId(id, found=True))
a.set('href', '#' + self.footnotes.makeFootnoteId(id))
a.set('class', 'footnote-ref')
a.text = str(list(self.footnotes.footnotes.keys()).index(id) + 1)
return sup, m.start(0), m.end(0)
else:
return None, None, None
class FootnotePostTreeprocessor(Treeprocessor):
""" Amend footnote div with duplicates. """
def __init__(self, footnotes):
self.footnotes = footnotes
def add_duplicates(self, li, duplicates):
""" Adjust current li and add the duplicates: fnref2, fnref3, etc. """
for link in li.iter('a'):
# Find the link that needs to be duplicated.
if link.attrib.get('class', '') == 'footnote-backref':
ref, rest = link.attrib['href'].split(self.footnotes.get_separator(), 1)
# Duplicate link the number of times we need to
# and point the to the appropriate references.
links = []
for index in range(2, duplicates + 1):
sib_link = copy.deepcopy(link)
sib_link.attrib['href'] = '%s%d%s%s' % (ref, index, self.footnotes.get_separator(), rest)
links.append(sib_link)
self.offset += 1
# Add all the new duplicate links.
el = list(li)[-1]
for link in links:
el.append(link)
break
def get_num_duplicates(self, li):
""" Get the number of duplicate refs of the footnote. """
fn, rest = li.attrib.get('id', '').split(self.footnotes.get_separator(), 1)
link_id = '{}ref{}{}'.format(fn, self.footnotes.get_separator(), rest)
return self.footnotes.found_refs.get(link_id, 0)
def handle_duplicates(self, parent):
""" Find duplicate footnotes and format and add the duplicates. """
for li in list(parent):
# Check number of duplicates footnotes and insert
# additional links if needed.
count = self.get_num_duplicates(li)
if count > 1:
self.add_duplicates(li, count)
def run(self, root):
""" Crawl the footnote div and add missing duplicate footnotes. """
self.offset = 0
for div in root.iter('div'):
if div.attrib.get('class', '') == 'footnote':
# Footnotes should be under the first ordered list under
# the footnote div. So once we find it, quit.
for ol in div.iter('ol'):
self.handle_duplicates(ol)
break
class FootnoteTreeprocessor(Treeprocessor):
""" Build and append footnote div to end of document. """
def __init__(self, footnotes):
self.footnotes = footnotes
def run(self, root):
footnotesDiv = self.footnotes.makeFootnotesDiv(root)
if footnotesDiv is not None:
result = self.footnotes.findFootnotesPlaceholder(root)
if result:
child, parent, isText = result
ind = list(parent).index(child)
if isText:
parent.remove(child)
parent.insert(ind, footnotesDiv)
else:
parent.insert(ind + 1, footnotesDiv)
child.tail = None
else:
root.append(footnotesDiv)
class FootnotePostprocessor(Postprocessor):
""" Replace placeholders with html entities. """
def __init__(self, footnotes):
self.footnotes = footnotes
def run(self, text):
text = text.replace(
FN_BACKLINK_TEXT, self.footnotes.getConfig("BACKLINK_TEXT")
)
return text.replace(NBSP_PLACEHOLDER, "&#160;")
def makeExtension(**kwargs): # pragma: no cover
""" Return an instance of the FootnoteExtension """
return FootnoteExtension(**kwargs)

View File

@@ -0,0 +1,67 @@
"""
Python Markdown
A Python implementation of John Gruber's Markdown.
Documentation: https://python-markdown.github.io/
GitHub: https://github.com/Python-Markdown/markdown/
PyPI: https://pypi.org/project/Markdown/
Started by Manfred Stienstra (http://www.dwerg.net/).
Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
Currently maintained by Waylan Limberg (https://github.com/waylan),
Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see LICENSE.md for details).
Legacy Attributes Extension
===========================
An extension to Python Markdown which implements legacy attributes.
Prior to Python-Markdown version 3.0, the Markdown class had an `enable_attributes`
keyword which was on by default and provided for attributes to be defined for elements
using the format `{@key=value}`. This extension is provided as a replacement for
backward compatibility. New documents should be authored using attr_lists. However,
numerious documents exist which have been using the old attribute format for many
years. This extension can be used to continue to render those documents correctly.
"""
import re
from markdown.treeprocessors import Treeprocessor, isString
from markdown.extensions import Extension
ATTR_RE = re.compile(r'\{@([^\}]*)=([^\}]*)}') # {@id=123}
class LegacyAttrs(Treeprocessor):
def run(self, doc):
"""Find and set values of attributes ({@key=value}). """
for el in doc.iter():
alt = el.get('alt', None)
if alt is not None:
el.set('alt', self.handleAttributes(el, alt))
if el.text and isString(el.text):
el.text = self.handleAttributes(el, el.text)
if el.tail and isString(el.tail):
el.tail = self.handleAttributes(el, el.tail)
def handleAttributes(self, el, txt):
""" Set attributes and return text without definitions. """
def attributeCallback(match):
el.set(match.group(1), match.group(2).replace('\n', ' '))
return ATTR_RE.sub(attributeCallback, txt)
class LegacyAttrExtension(Extension):
def extendMarkdown(self, md):
md.treeprocessors.register(LegacyAttrs(md), 'legacyattrs', 15)
def makeExtension(**kwargs): # pragma: no cover
return LegacyAttrExtension(**kwargs)

View File

@@ -0,0 +1,49 @@
'''
Legacy Em Extension for Python-Markdown
=======================================
This extension provides legacy behavior for _connected_words_.
Copyright 2015-2018 The Python Markdown Project
License: [BSD](https://opensource.org/licenses/bsd-license.php)
'''
from . import Extension
from ..inlinepatterns import UnderscoreProcessor, EmStrongItem, EM_STRONG2_RE, STRONG_EM2_RE
import re
# _emphasis_
EMPHASIS_RE = r'(_)([^_]+)\1'
# __strong__
STRONG_RE = r'(_{2})(.+?)\1'
# __strong_em___
STRONG_EM_RE = r'(_)\1(?!\1)([^_]+?)\1(?!\1)(.+?)\1{3}'
class LegacyUnderscoreProcessor(UnderscoreProcessor):
"""Emphasis processor for handling strong and em matches inside underscores."""
PATTERNS = [
EmStrongItem(re.compile(EM_STRONG2_RE, re.DOTALL | re.UNICODE), 'double', 'strong,em'),
EmStrongItem(re.compile(STRONG_EM2_RE, re.DOTALL | re.UNICODE), 'double', 'em,strong'),
EmStrongItem(re.compile(STRONG_EM_RE, re.DOTALL | re.UNICODE), 'double2', 'strong,em'),
EmStrongItem(re.compile(STRONG_RE, re.DOTALL | re.UNICODE), 'single', 'strong'),
EmStrongItem(re.compile(EMPHASIS_RE, re.DOTALL | re.UNICODE), 'single', 'em')
]
class LegacyEmExtension(Extension):
""" Add legacy_em extension to Markdown class."""
def extendMarkdown(self, md):
""" Modify inline patterns. """
md.inlinePatterns.register(LegacyUnderscoreProcessor(r'_'), 'em_strong2', 50)
def makeExtension(**kwargs): # pragma: no cover
""" Return an instance of the LegacyEmExtension """
return LegacyEmExtension(**kwargs)

View File

@@ -0,0 +1,50 @@
"""
Copyright 2011, 2012 The Active Archives contributors
Copyright 2011, 2012 Alexandre Leray
Copyright 2020 Honza Javorek <https://honzajavorek.cz>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the <organization> nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE PYTHON MARKDOWN PROJECT ''AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL ANY CONTRIBUTORS TO THE PYTHON MARKDOWN PROJECT
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from . import Extension
from ..inlinepatterns import SimpleTagInlineProcessor
class DelInsExtension(Extension):
def extendMarkdown(self, md):
del_proc = SimpleTagInlineProcessor(r'(\~\~)(.+?)(\~\~)', 'del')
md.inlinePatterns.register(del_proc, 'del', 200)
ins_proc = SimpleTagInlineProcessor(r'(\+\+)(.+?)(\+\+)', 'ins')
md.inlinePatterns.register(ins_proc, 'ins', 200)
def makeExtension(**kwargs):
return DelInsExtension(**kwargs)
if __name__ == '__main__':
import doctest
doctest.testfile('README.md')

View File

@@ -0,0 +1,364 @@
"""
Python-Markdown Markdown in HTML Extension
===============================
An implementation of [PHP Markdown Extra](http://michelf.com/projects/php-markdown/extra/)'s
parsing of Markdown syntax in raw HTML.
See <https://Python-Markdown.github.io/extensions/raw_html>
for documentation.
Copyright The Python Markdown Project
License: [BSD](https://opensource.org/licenses/bsd-license.php)
"""
from . import Extension
from ..blockprocessors import BlockProcessor
from ..preprocessors import Preprocessor
from ..postprocessors import RawHtmlPostprocessor
from .. import util
from ..htmlparser import HTMLExtractor, blank_line_re
import xml.etree.ElementTree as etree
class HTMLExtractorExtra(HTMLExtractor):
"""
Override HTMLExtractor and create etree Elements for any elements which should have content parsed as Markdown.
"""
def __init__(self, md, *args, **kwargs):
# All block-level tags.
self.block_level_tags = set(md.block_level_elements.copy())
# Block-level tags in which the content only gets span level parsing
self.span_tags = set(
['address', 'dd', 'dt', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'legend', 'li', 'p', 'summary', 'td', 'th']
)
# Block-level tags which never get their content parsed.
self.raw_tags = set(['canvas', 'math', 'option', 'pre', 'script', 'style', 'textarea'])
super().__init__(md, *args, **kwargs)
# Block-level tags in which the content gets parsed as blocks
self.block_tags = set(self.block_level_tags) - (self.span_tags | self.raw_tags | self.empty_tags)
self.span_and_blocks_tags = self.block_tags | self.span_tags
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.mdstack = [] # When markdown=1, stack contains a list of tags
self.treebuilder = etree.TreeBuilder()
self.mdstate = [] # one of 'block', 'span', 'off', or None
super().reset()
def close(self):
"""Handle any buffered data."""
super().close()
# Handle any unclosed tags.
if self.mdstack:
# Close the outermost parent. handle_endtag will close all unclosed children.
self.handle_endtag(self.mdstack[0])
def get_element(self):
""" Return element from treebuilder and reset treebuilder for later use. """
element = self.treebuilder.close()
self.treebuilder = etree.TreeBuilder()
return element
def get_state(self, tag, attrs):
""" Return state from tag and `markdown` attr. One of 'block', 'span', or 'off'. """
md_attr = attrs.get('markdown', '0')
if md_attr == 'markdown':
# `<tag markdown>` is the same as `<tag markdown='1'>`.
md_attr = '1'
parent_state = self.mdstate[-1] if self.mdstate else None
if parent_state == 'off' or (parent_state == 'span' and md_attr != '0'):
# Only use the parent state if it is more restrictive than the markdown attribute.
md_attr = parent_state
if ((md_attr == '1' and tag in self.block_tags) or
(md_attr == 'block' and tag in self.span_and_blocks_tags)):
return 'block'
elif ((md_attr == '1' and tag in self.span_tags) or
(md_attr == 'span' and tag in self.span_and_blocks_tags)):
return 'span'
elif tag in self.block_level_tags:
return 'off'
else: # pragma: no cover
return None
def handle_starttag(self, tag, attrs):
# Handle tags that should always be empty and do not specify a closing tag
if tag in self.empty_tags and (self.at_line_start() or self.intail):
attrs = {key: value if value is not None else key for key, value in attrs}
if "markdown" in attrs:
attrs.pop('markdown')
element = etree.Element(tag, attrs)
data = etree.tostring(element, encoding='unicode', method='html')
else:
data = self.get_starttag_text()
self.handle_empty_tag(data, True)
return
if tag in self.block_level_tags and (self.at_line_start() or self.intail):
# Valueless attr (ex: `<tag checked>`) results in `[('checked', None)]`.
# Convert to `{'checked': 'checked'}`.
attrs = {key: value if value is not None else key for key, value in attrs}
state = self.get_state(tag, attrs)
if self.inraw or (state in [None, 'off'] and not self.mdstack):
# fall back to default behavior
attrs.pop('markdown', None)
super().handle_starttag(tag, attrs)
else:
if 'p' in self.mdstack and tag in self.block_level_tags:
# Close unclosed 'p' tag
self.handle_endtag('p')
self.mdstate.append(state)
self.mdstack.append(tag)
attrs['markdown'] = state
self.treebuilder.start(tag, attrs)
else:
# Span level tag
if self.inraw:
super().handle_starttag(tag, attrs)
else:
text = self.get_starttag_text()
if self.mdstate and self.mdstate[-1] == "off":
self.handle_data(self.md.htmlStash.store(text))
else:
self.handle_data(text)
if tag in self.CDATA_CONTENT_ELEMENTS:
# This is presumably a standalone tag in a code span (see #1036).
self.clear_cdata_mode()
def handle_endtag(self, tag):
if tag in self.block_level_tags:
if self.inraw:
super().handle_endtag(tag)
elif tag in self.mdstack:
# Close element and any unclosed children
while self.mdstack:
item = self.mdstack.pop()
self.mdstate.pop()
self.treebuilder.end(item)
if item == tag:
break
if not self.mdstack:
# Last item in stack is closed. Stash it
element = self.get_element()
# Get last entry to see if it ends in newlines
# If it is an element, assume there is no newlines
item = self.cleandoc[-1] if self.cleandoc else ''
# If we only have one newline before block element, add another
if not item.endswith('\n\n') and item.endswith('\n'):
self.cleandoc.append('\n')
self.cleandoc.append(self.md.htmlStash.store(element))
self.cleandoc.append('\n\n')
self.state = []
# Check if element has a tail
if not blank_line_re.match(
self.rawdata[self.line_offset + self.offset + len(self.get_endtag_text(tag)):]):
# More content exists after endtag.
self.intail = True
else:
# Treat orphan closing tag as a span level tag.
text = self.get_endtag_text(tag)
if self.mdstate and self.mdstate[-1] == "off":
self.handle_data(self.md.htmlStash.store(text))
else:
self.handle_data(text)
else:
# Span level tag
if self.inraw:
super().handle_endtag(tag)
else:
text = self.get_endtag_text(tag)
if self.mdstate and self.mdstate[-1] == "off":
self.handle_data(self.md.htmlStash.store(text))
else:
self.handle_data(text)
def handle_startendtag(self, tag, attrs):
if tag in self.empty_tags:
attrs = {key: value if value is not None else key for key, value in attrs}
if "markdown" in attrs:
attrs.pop('markdown')
element = etree.Element(tag, attrs)
data = etree.tostring(element, encoding='unicode', method='html')
else:
data = self.get_starttag_text()
else:
data = self.get_starttag_text()
self.handle_empty_tag(data, is_block=self.md.is_block_level(tag))
def handle_data(self, data):
if self.intail and '\n' in data:
self.intail = False
if self.inraw or not self.mdstack:
super().handle_data(data)
else:
self.treebuilder.data(data)
def handle_empty_tag(self, data, is_block):
if self.inraw or not self.mdstack:
super().handle_empty_tag(data, is_block)
else:
if self.at_line_start() and is_block:
self.handle_data('\n' + self.md.htmlStash.store(data) + '\n\n')
else:
self.handle_data(self.md.htmlStash.store(data))
def parse_pi(self, i):
if self.at_line_start() or self.intail or self.mdstack:
# The same override exists in HTMLExtractor without the check
# for mdstack. Therefore, use HTMLExtractor's parent instead.
return super(HTMLExtractor, self).parse_pi(i)
# This is not the beginning of a raw block so treat as plain data
# and avoid consuming any tags which may follow (see #1066).
self.handle_data('<?')
return i + 2
def parse_html_declaration(self, i):
if self.at_line_start() or self.intail or self.mdstack:
# The same override exists in HTMLExtractor without the check
# for mdstack. Therefore, use HTMLExtractor's parent instead.
return super(HTMLExtractor, self).parse_html_declaration(i)
# This is not the beginning of a raw block so treat as plain data
# and avoid consuming any tags which may follow (see #1066).
self.handle_data('<!')
return i + 2
class HtmlBlockPreprocessor(Preprocessor):
"""Remove html blocks from the text and store them for later retrieval."""
def run(self, lines):
source = '\n'.join(lines)
parser = HTMLExtractorExtra(self.md)
parser.feed(source)
parser.close()
return ''.join(parser.cleandoc).split('\n')
class MarkdownInHtmlProcessor(BlockProcessor):
"""Process Markdown Inside HTML Blocks which have been stored in the HtmlStash."""
def test(self, parent, block):
# ALways return True. `run` will return `False` it not a valid match.
return True
def parse_element_content(self, element):
"""
Recursively parse the text content of an etree Element as Markdown.
Any block level elements generated from the Markdown will be inserted as children of the element in place
of the text content. All `markdown` attributes are removed. For any elements in which Markdown parsing has
been disabled, the text content of it and its chidlren are wrapped in an `AtomicString`.
"""
md_attr = element.attrib.pop('markdown', 'off')
if md_attr == 'block':
# Parse content as block level
# The order in which the different parts are parsed (text, children, tails) is important here as the
# order of elements needs to be preserved. We can't be inserting items at a later point in the current
# iteration as we don't want to do raw processing on elements created from parsing Markdown text (for
# example). Therefore, the order of operations is children, tails, text.
# Recursively parse existing children from raw HTML
for child in list(element):
self.parse_element_content(child)
# Parse Markdown text in tail of children. Do this separate to avoid raw HTML parsing.
# Save the position of each item to be inserted later in reverse.
tails = []
for pos, child in enumerate(element):
if child.tail:
block = child.tail.rstrip('\n')
child.tail = ''
# Use a dummy placeholder element.
dummy = etree.Element('div')
self.parser.parseBlocks(dummy, block.split('\n\n'))
children = list(dummy)
children.reverse()
tails.append((pos + 1, children))
# Insert the elements created from the tails in reverse.
tails.reverse()
for pos, tail in tails:
for item in tail:
element.insert(pos, item)
# Parse Markdown text content. Do this last to avoid raw HTML parsing.
if element.text:
block = element.text.rstrip('\n')
element.text = ''
# Use a dummy placeholder element as the content needs to get inserted before existing children.
dummy = etree.Element('div')
self.parser.parseBlocks(dummy, block.split('\n\n'))
children = list(dummy)
children.reverse()
for child in children:
element.insert(0, child)
elif md_attr == 'span':
# Span level parsing will be handled by inlineprocessors.
# Walk children here to remove any `markdown` attributes.
for child in list(element):
self.parse_element_content(child)
else:
# Disable inline parsing for everything else
if element.text is None:
element.text = ''
element.text = util.AtomicString(element.text)
for child in list(element):
self.parse_element_content(child)
if child.tail:
child.tail = util.AtomicString(child.tail)
def run(self, parent, blocks):
m = util.HTML_PLACEHOLDER_RE.match(blocks[0])
if m:
index = int(m.group(1))
element = self.parser.md.htmlStash.rawHtmlBlocks[index]
if isinstance(element, etree.Element):
# We have a matched element. Process it.
blocks.pop(0)
self.parse_element_content(element)
parent.append(element)
# Cleanup stash. Replace element with empty string to avoid confusing postprocessor.
self.parser.md.htmlStash.rawHtmlBlocks.pop(index)
self.parser.md.htmlStash.rawHtmlBlocks.insert(index, '')
# Confirm the match to the blockparser.
return True
# No match found.
return False
class MarkdownInHTMLPostprocessor(RawHtmlPostprocessor):
def stash_to_string(self, text):
""" Override default to handle any etree elements still in the stash. """
if isinstance(text, etree.Element):
return self.md.serializer(text)
else:
return str(text)
class MarkdownInHtmlExtension(Extension):
"""Add Markdown parsing in HTML to Markdown class."""
def extendMarkdown(self, md):
""" Register extension instances. """
# Replace raw HTML preprocessor
md.preprocessors.register(HtmlBlockPreprocessor(md), 'html_block', 20)
# Add blockprocessor which handles the placeholders for etree elements
md.parser.blockprocessors.register(
MarkdownInHtmlProcessor(md.parser), 'markdown_block', 105
)
# Replace raw HTML postprocessor
md.postprocessors.register(MarkdownInHTMLPostprocessor(md), 'raw_html', 30)
def makeExtension(**kwargs): # pragma: no cover
return MarkdownInHtmlExtension(**kwargs)

View File

@@ -0,0 +1,37 @@
# -*- coding: utf-8 -*-
"""Markdown Subscript Extension
Extends the Python-Markdown library to support subscript text.
Given the text:
The molecular composition of water is H~2~O.
Will output:
<p>The molecular composition of water is H<sub>2</sub>O.</p>
:website: https://github.com/jambonrose/markdown_subscript_extension
:copyright: Copyright 2014-2018 Andrew Pinkham
:license: Simplified BSD, see LICENSE for details.
"""
from __future__ import unicode_literals
from . import Extension
from ..inlinepatterns import SimpleTagPattern
# match ~, at least one character that is not ~, and ~ again
SUBSCRIPT_RE = r"(\~)([^\~]+)\2"
def makeExtension(*args, **kwargs): # noqa: N802
"""Inform Markdown of the existence of the extension."""
return SubscriptExtension(*args, **kwargs)
class SubscriptExtension(Extension):
"""Extension: text between ~ characters will be subscripted."""
def extendMarkdown(self, md, md_globals): # noqa: N802
"""Insert 'subscript' pattern before 'not_strong' pattern."""
md.inlinePatterns.add(
"subscript", SimpleTagPattern(SUBSCRIPT_RE, "sub"), "<not_strong"
)

View File

@@ -0,0 +1,39 @@
# -*- coding: utf-8 -*-
"""Markdown Superscript Extension
Extends the Python-Markdown library to support superscript text.
Given the text:
2^10^ is 1024.
Will output:
2<sup>10</sup> is 1024.
:website: https://github.com/jambonrose/markdown_superscript_extension
:copyright: Copyright 2014-2018 Andrew Pinkham
:license: Simplified BSD, see LICENSE for details.
"""
from __future__ import unicode_literals
from . import Extension
from ..inlinepatterns import SimpleTagPattern
# match ^, at least one character that is not ^, and ^ again
SUPERSCRIPT_RE = r"(\^)([^\^]+)\2"
def makeExtension(*args, **kwargs): # noqa: N802
"""Inform Markdown of the existence of the extension."""
return SuperscriptExtension(*args, **kwargs)
class SuperscriptExtension(Extension):
"""Extension: text between ^ characters will be superscripted."""
def extendMarkdown(self, md, md_globals): # noqa: N802
"""Insert 'superscript' pattern before 'not_strong' pattern."""
md.inlinePatterns.add(
"superscript",
SimpleTagPattern(SUPERSCRIPT_RE, "sup"),
"<not_strong",
)

View File

@@ -0,0 +1,79 @@
"""
Meta Data Extension for Python-Markdown
=======================================
This extension adds Meta Data handling to markdown.
See <https://Python-Markdown.github.io/extensions/meta_data>
for documentation.
Original code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com).
All changes Copyright 2008-2014 The Python Markdown Project
License: [BSD](https://opensource.org/licenses/bsd-license.php)
"""
from . import Extension
from ..preprocessors import Preprocessor
import re
import logging
log = logging.getLogger('MARKDOWN')
# Global Vars
META_RE = re.compile(r'^[ ]{0,3}(?P<key>[A-Za-z0-9_-]+):\s*(?P<value>.*)')
META_MORE_RE = re.compile(r'^[ ]{4,}(?P<value>.*)')
BEGIN_RE = re.compile(r'^-{3}(\s.*)?')
END_RE = re.compile(r'^(-{3}|\.{3})(\s.*)?')
class MetaExtension (Extension):
""" Meta-Data extension for Python-Markdown. """
def extendMarkdown(self, md):
""" Add MetaPreprocessor to Markdown instance. """
md.registerExtension(self)
self.md = md
md.preprocessors.register(MetaPreprocessor(md), 'meta', 27)
def reset(self):
self.md.Meta = {}
class MetaPreprocessor(Preprocessor):
""" Get Meta-Data. """
def run(self, lines):
""" Parse Meta-Data and store in Markdown.Meta. """
meta = {}
key = None
if lines and BEGIN_RE.match(lines[0]):
lines.pop(0)
while lines:
line = lines.pop(0)
m1 = META_RE.match(line)
if line.strip() == '' or END_RE.match(line):
break # blank line or end of YAML header - done
if m1:
key = m1.group('key').lower().strip()
value = m1.group('value').strip()
try:
meta[key].append(value)
except KeyError:
meta[key] = [value]
else:
m2 = META_MORE_RE.match(line)
if m2 and key:
# Add another line to existing key
meta[key].append(m2.group('value').strip())
else:
lines.insert(0, line)
break # no meta data - done
self.md.Meta = meta
return lines
def makeExtension(**kwargs): # pragma: no cover
return MetaExtension(**kwargs)

View File

@@ -0,0 +1,33 @@
"""
NL2BR Extension
===============
A Python-Markdown extension to treat newlines as hard breaks; like
GitHub-flavored Markdown does.
See <https://Python-Markdown.github.io/extensions/nl2br>
for documentation.
Oringinal code Copyright 2011 [Brian Neal](https://deathofagremmie.com/)
All changes Copyright 2011-2014 The Python Markdown Project
License: [BSD](https://opensource.org/licenses/bsd-license.php)
"""
from . import Extension
from ..inlinepatterns import SubstituteTagInlineProcessor
BR_RE = r'\n'
class Nl2BrExtension(Extension):
def extendMarkdown(self, md):
br_tag = SubstituteTagInlineProcessor(BR_RE, 'br')
md.inlinePatterns.register(br_tag, 'nl', 5)
def makeExtension(**kwargs): # pragma: no cover
return Nl2BrExtension(**kwargs)

View File

@@ -0,0 +1,54 @@
"""
Sane List Extension for Python-Markdown
=======================================
Modify the behavior of Lists in Python-Markdown to act in a sane manor.
See <https://Python-Markdown.github.io/extensions/sane_lists>
for documentation.
Original code Copyright 2011 [Waylan Limberg](http://achinghead.com)
All changes Copyright 2011-2014 The Python Markdown Project
License: [BSD](https://opensource.org/licenses/bsd-license.php)
"""
from . import Extension
from ..blockprocessors import OListProcessor, UListProcessor
import re
class SaneOListProcessor(OListProcessor):
SIBLING_TAGS = ['ol']
LAZY_OL = False
def __init__(self, parser):
super().__init__(parser)
self.CHILD_RE = re.compile(r'^[ ]{0,%d}((\d+\.))[ ]+(.*)' %
(self.tab_length - 1))
class SaneUListProcessor(UListProcessor):
SIBLING_TAGS = ['ul']
def __init__(self, parser):
super().__init__(parser)
self.CHILD_RE = re.compile(r'^[ ]{0,%d}(([*+-]))[ ]+(.*)' %
(self.tab_length - 1))
class SaneListExtension(Extension):
""" Add sane lists to Markdown. """
def extendMarkdown(self, md):
""" Override existing Processors. """
md.parser.blockprocessors.register(SaneOListProcessor(md.parser), 'olist', 40)
md.parser.blockprocessors.register(SaneUListProcessor(md.parser), 'ulist', 30)
def makeExtension(**kwargs): # pragma: no cover
return SaneListExtension(**kwargs)

View File

@@ -0,0 +1,263 @@
'''
Smarty extension for Python-Markdown
====================================
Adds conversion of ASCII dashes, quotes and ellipses to their HTML
entity equivalents.
See <https://Python-Markdown.github.io/extensions/smarty>
for documentation.
Author: 2013, Dmitry Shachnev <mitya57@gmail.com>
All changes Copyright 2013-2014 The Python Markdown Project
License: [BSD](https://opensource.org/licenses/bsd-license.php)
SmartyPants license:
Copyright (c) 2003 John Gruber <https://daringfireball.net/>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name "SmartyPants" nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
This software is provided by the copyright holders and contributors "as
is" and any express or implied warranties, including, but not limited
to, the implied warranties of merchantability and fitness for a
particular purpose are disclaimed. In no event shall the copyright
owner or contributors be liable for any direct, indirect, incidental,
special, exemplary, or consequential damages (including, but not
limited to, procurement of substitute goods or services; loss of use,
data, or profits; or business interruption) however caused and on any
theory of liability, whether in contract, strict liability, or tort
(including negligence or otherwise) arising in any way out of the use
of this software, even if advised of the possibility of such damage.
smartypants.py license:
smartypants.py is a derivative work of SmartyPants.
Copyright (c) 2004, 2007 Chad Miller <http://web.chad.org/>
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
This software is provided by the copyright holders and contributors "as
is" and any express or implied warranties, including, but not limited
to, the implied warranties of merchantability and fitness for a
particular purpose are disclaimed. In no event shall the copyright
owner or contributors be liable for any direct, indirect, incidental,
special, exemplary, or consequential damages (including, but not
limited to, procurement of substitute goods or services; loss of use,
data, or profits; or business interruption) however caused and on any
theory of liability, whether in contract, strict liability, or tort
(including negligence or otherwise) arising in any way out of the use
of this software, even if advised of the possibility of such damage.
'''
from . import Extension
from ..inlinepatterns import HtmlInlineProcessor, HTML_RE
from ..treeprocessors import InlineProcessor
from ..util import Registry, deprecated
# Constants for quote education.
punctClass = r"""[!"#\$\%'()*+,-.\/:;<=>?\@\[\\\]\^_`{|}~]"""
endOfWordClass = r"[\s.,;:!?)]"
closeClass = r"[^\ \t\r\n\[\{\(\-\u0002\u0003]"
openingQuotesBase = (
r'(\s' # a whitespace char
r'|&nbsp;' # or a non-breaking space entity
r'|--' # or dashes
r'||—' # or unicode
r'|&[mn]dash;' # or named dash entities
r'|&#8211;|&#8212;' # or decimal entities
r')'
)
substitutions = {
'mdash': '&mdash;',
'ndash': '&ndash;',
'ellipsis': '&hellip;',
'left-angle-quote': '&laquo;',
'right-angle-quote': '&raquo;',
'left-single-quote': '&lsquo;',
'right-single-quote': '&rsquo;',
'left-double-quote': '&ldquo;',
'right-double-quote': '&rdquo;',
}
# Special case if the very first character is a quote
# followed by punctuation at a non-word-break. Close the quotes by brute force:
singleQuoteStartRe = r"^'(?=%s\B)" % punctClass
doubleQuoteStartRe = r'^"(?=%s\B)' % punctClass
# Special case for double sets of quotes, e.g.:
# <p>He said, "'Quoted' words in a larger quote."</p>
doubleQuoteSetsRe = r""""'(?=\w)"""
singleQuoteSetsRe = r"""'"(?=\w)"""
# Special case for decade abbreviations (the '80s):
decadeAbbrRe = r"(?<!\w)'(?=\d{2}s)"
# Get most opening double quotes:
openingDoubleQuotesRegex = r'%s"(?=\w)' % openingQuotesBase
# Double closing quotes:
closingDoubleQuotesRegex = r'"(?=\s)'
closingDoubleQuotesRegex2 = '(?<=%s)"' % closeClass
# Get most opening single quotes:
openingSingleQuotesRegex = r"%s'(?=\w)" % openingQuotesBase
# Single closing quotes:
closingSingleQuotesRegex = r"(?<=%s)'(?!\s|s\b|\d)" % closeClass
closingSingleQuotesRegex2 = r"(?<=%s)'(\s|s\b)" % closeClass
# All remaining quotes should be opening ones
remainingSingleQuotesRegex = r"'"
remainingDoubleQuotesRegex = r'"'
HTML_STRICT_RE = HTML_RE + r'(?!\>)'
class SubstituteTextPattern(HtmlInlineProcessor):
def __init__(self, pattern, replace, md):
""" Replaces matches with some text. """
HtmlInlineProcessor.__init__(self, pattern)
self.replace = replace
self.md = md
@property
@deprecated("Use 'md' instead.")
def markdown(self):
# TODO: remove this later
return self.md
def handleMatch(self, m, data):
result = ''
for part in self.replace:
if isinstance(part, int):
result += m.group(part)
else:
result += self.md.htmlStash.store(part)
return result, m.start(0), m.end(0)
class SmartyExtension(Extension):
def __init__(self, **kwargs):
self.config = {
'smart_quotes': [True, 'Educate quotes'],
'smart_angled_quotes': [False, 'Educate angled quotes'],
'smart_dashes': [True, 'Educate dashes'],
'smart_ellipses': [True, 'Educate ellipses'],
'substitutions': [{}, 'Overwrite default substitutions'],
}
super().__init__(**kwargs)
self.substitutions = dict(substitutions)
self.substitutions.update(self.getConfig('substitutions', default={}))
def _addPatterns(self, md, patterns, serie, priority):
for ind, pattern in enumerate(patterns):
pattern += (md,)
pattern = SubstituteTextPattern(*pattern)
name = 'smarty-%s-%d' % (serie, ind)
self.inlinePatterns.register(pattern, name, priority-ind)
def educateDashes(self, md):
emDashesPattern = SubstituteTextPattern(
r'(?<!-)---(?!-)', (self.substitutions['mdash'],), md
)
enDashesPattern = SubstituteTextPattern(
r'(?<!-)--(?!-)', (self.substitutions['ndash'],), md
)
self.inlinePatterns.register(emDashesPattern, 'smarty-em-dashes', 50)
self.inlinePatterns.register(enDashesPattern, 'smarty-en-dashes', 45)
def educateEllipses(self, md):
ellipsesPattern = SubstituteTextPattern(
r'(?<!\.)\.{3}(?!\.)', (self.substitutions['ellipsis'],), md
)
self.inlinePatterns.register(ellipsesPattern, 'smarty-ellipses', 10)
def educateAngledQuotes(self, md):
leftAngledQuotePattern = SubstituteTextPattern(
r'\<\<', (self.substitutions['left-angle-quote'],), md
)
rightAngledQuotePattern = SubstituteTextPattern(
r'\>\>', (self.substitutions['right-angle-quote'],), md
)
self.inlinePatterns.register(leftAngledQuotePattern, 'smarty-left-angle-quotes', 40)
self.inlinePatterns.register(rightAngledQuotePattern, 'smarty-right-angle-quotes', 35)
def educateQuotes(self, md):
lsquo = self.substitutions['left-single-quote']
rsquo = self.substitutions['right-single-quote']
ldquo = self.substitutions['left-double-quote']
rdquo = self.substitutions['right-double-quote']
patterns = (
(singleQuoteStartRe, (rsquo,)),
(doubleQuoteStartRe, (rdquo,)),
(doubleQuoteSetsRe, (ldquo + lsquo,)),
(singleQuoteSetsRe, (lsquo + ldquo,)),
(decadeAbbrRe, (rsquo,)),
(openingSingleQuotesRegex, (1, lsquo)),
(closingSingleQuotesRegex, (rsquo,)),
(closingSingleQuotesRegex2, (rsquo, 1)),
(remainingSingleQuotesRegex, (lsquo,)),
(openingDoubleQuotesRegex, (1, ldquo)),
(closingDoubleQuotesRegex, (rdquo,)),
(closingDoubleQuotesRegex2, (rdquo,)),
(remainingDoubleQuotesRegex, (ldquo,))
)
self._addPatterns(md, patterns, 'quotes', 30)
def extendMarkdown(self, md):
configs = self.getConfigs()
self.inlinePatterns = Registry()
if configs['smart_ellipses']:
self.educateEllipses(md)
if configs['smart_quotes']:
self.educateQuotes(md)
if configs['smart_angled_quotes']:
self.educateAngledQuotes(md)
# Override HTML_RE from inlinepatterns.py so that it does not
# process tags with duplicate closing quotes.
md.inlinePatterns.register(HtmlInlineProcessor(HTML_STRICT_RE, md), 'html', 90)
if configs['smart_dashes']:
self.educateDashes(md)
inlineProcessor = InlineProcessor(md)
inlineProcessor.inlinePatterns = self.inlinePatterns
md.treeprocessors.register(inlineProcessor, 'smarty', 2)
md.ESCAPED_CHARS.extend(['"', "'"])
def makeExtension(**kwargs): # pragma: no cover
return SmartyExtension(**kwargs)

View File

@@ -0,0 +1,223 @@
"""
Tables Extension for Python-Markdown
====================================
Added parsing of tables to Python-Markdown.
See <https://Python-Markdown.github.io/extensions/tables>
for documentation.
Original code Copyright 2009 [Waylan Limberg](http://achinghead.com)
All changes Copyright 2008-2014 The Python Markdown Project
License: [BSD](https://opensource.org/licenses/bsd-license.php)
"""
from . import Extension
from ..blockprocessors import BlockProcessor
import xml.etree.ElementTree as etree
import re
PIPE_NONE = 0
PIPE_LEFT = 1
PIPE_RIGHT = 2
class TableProcessor(BlockProcessor):
""" Process Tables. """
RE_CODE_PIPES = re.compile(r'(?:(\\\\)|(\\`+)|(`+)|(\\\|)|(\|))')
RE_END_BORDER = re.compile(r'(?<!\\)(?:\\\\)*\|$')
def __init__(self, parser):
self.border = False
self.separator = ''
super().__init__(parser)
def test(self, parent, block):
"""
Ensure first two rows (column header and separator row) are valid table rows.
Keep border check and separator row do avoid repeating the work.
"""
is_table = False
rows = [row.strip(' ') for row in block.split('\n')]
if len(rows) > 1:
header0 = rows[0]
self.border = PIPE_NONE
if header0.startswith('|'):
self.border |= PIPE_LEFT
if self.RE_END_BORDER.search(header0) is not None:
self.border |= PIPE_RIGHT
row = self._split_row(header0)
row0_len = len(row)
is_table = row0_len > 1
# Each row in a single column table needs at least one pipe.
if not is_table and row0_len == 1 and self.border:
for index in range(1, len(rows)):
is_table = rows[index].startswith('|')
if not is_table:
is_table = self.RE_END_BORDER.search(rows[index]) is not None
if not is_table:
break
if is_table:
row = self._split_row(rows[1])
is_table = (len(row) == row0_len) and set(''.join(row)) <= set('|:- ')
if is_table:
self.separator = row
return is_table
def run(self, parent, blocks):
""" Parse a table block and build table. """
block = blocks.pop(0).split('\n')
header = block[0].strip(' ')
rows = [] if len(block) < 3 else block[2:]
# Get alignment of columns
align = []
for c in self.separator:
c = c.strip(' ')
if c.startswith(':') and c.endswith(':'):
align.append('center')
elif c.startswith(':'):
align.append('left')
elif c.endswith(':'):
align.append('right')
else:
align.append(None)
# Build table
table = etree.SubElement(parent, 'table')
thead = etree.SubElement(table, 'thead')
self._build_row(header, thead, align)
tbody = etree.SubElement(table, 'tbody')
if len(rows) == 0:
# Handle empty table
self._build_empty_row(tbody, align)
else:
for row in rows:
self._build_row(row.strip(' '), tbody, align)
def _build_empty_row(self, parent, align):
"""Build an empty row."""
tr = etree.SubElement(parent, 'tr')
count = len(align)
while count:
etree.SubElement(tr, 'td')
count -= 1
def _build_row(self, row, parent, align):
""" Given a row of text, build table cells. """
tr = etree.SubElement(parent, 'tr')
tag = 'td'
if parent.tag == 'thead':
tag = 'th'
cells = self._split_row(row)
# We use align here rather than cells to ensure every row
# contains the same number of columns.
for i, a in enumerate(align):
c = etree.SubElement(tr, tag)
try:
c.text = cells[i].strip(' ')
except IndexError: # pragma: no cover
c.text = ""
if a:
c.set('align', a)
def _split_row(self, row):
""" split a row of text into list of cells. """
if self.border:
if row.startswith('|'):
row = row[1:]
row = self.RE_END_BORDER.sub('', row)
return self._split(row)
def _split(self, row):
""" split a row of text with some code into a list of cells. """
elements = []
pipes = []
tics = []
tic_points = []
tic_region = []
good_pipes = []
# Parse row
# Throw out \\, and \|
for m in self.RE_CODE_PIPES.finditer(row):
# Store ` data (len, start_pos, end_pos)
if m.group(2):
# \`+
# Store length of each tic group: subtract \
tics.append(len(m.group(2)) - 1)
# Store start of group, end of group, and escape length
tic_points.append((m.start(2), m.end(2) - 1, 1))
elif m.group(3):
# `+
# Store length of each tic group
tics.append(len(m.group(3)))
# Store start of group, end of group, and escape length
tic_points.append((m.start(3), m.end(3) - 1, 0))
# Store pipe location
elif m.group(5):
pipes.append(m.start(5))
# Pair up tics according to size if possible
# Subtract the escape length *only* from the opening.
# Walk through tic list and see if tic has a close.
# Store the tic region (start of region, end of region).
pos = 0
tic_len = len(tics)
while pos < tic_len:
try:
tic_size = tics[pos] - tic_points[pos][2]
if tic_size == 0:
raise ValueError
index = tics[pos + 1:].index(tic_size) + 1
tic_region.append((tic_points[pos][0], tic_points[pos + index][1]))
pos += index + 1
except ValueError:
pos += 1
# Resolve pipes. Check if they are within a tic pair region.
# Walk through pipes comparing them to each region.
# - If pipe position is less that a region, it isn't in a region
# - If it is within a region, we don't want it, so throw it out
# - If we didn't throw it out, it must be a table pipe
for pipe in pipes:
throw_out = False
for region in tic_region:
if pipe < region[0]:
# Pipe is not in a region
break
elif region[0] <= pipe <= region[1]:
# Pipe is within a code region. Throw it out.
throw_out = True
break
if not throw_out:
good_pipes.append(pipe)
# Split row according to table delimiters.
pos = 0
for pipe in good_pipes:
elements.append(row[pos:pipe])
pos = pipe + 1
elements.append(row[pos:])
return elements
class TableExtension(Extension):
""" Add tables to Markdown. """
def extendMarkdown(self, md):
""" Add an instance of TableProcessor to BlockParser. """
if '|' not in md.ESCAPED_CHARS:
md.ESCAPED_CHARS.append('|')
md.parser.blockprocessors.register(TableProcessor(md.parser), 'table', 75)
def makeExtension(**kwargs): # pragma: no cover
return TableExtension(**kwargs)

View File

@@ -0,0 +1,380 @@
"""
Table of Contents Extension for Python-Markdown
===============================================
See <https://Python-Markdown.github.io/extensions/toc>
for documentation.
Oringinal code Copyright 2008 [Jack Miller](https://codezen.org/)
All changes Copyright 2008-2014 The Python Markdown Project
License: [BSD](https://opensource.org/licenses/bsd-license.php)
"""
from . import Extension
from ..treeprocessors import Treeprocessor
from ..util import code_escape, parseBoolValue, AMP_SUBSTITUTE, HTML_PLACEHOLDER_RE, AtomicString
from ..postprocessors import UnescapePostprocessor
import re
import html
import unicodedata
import xml.etree.ElementTree as etree
def slugify(value, separator, unicode=False):
""" Slugify a string, to make it URL friendly. """
if not unicode:
# Replace Extended Latin characters with ASCII, i.e. žlutý → zluty
value = unicodedata.normalize('NFKD', value)
value = value.encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s-]', '', value).strip().lower()
return re.sub(r'[{}\s]+'.format(separator), separator, value)
def slugify_unicode(value, separator):
""" Slugify a string, to make it URL friendly while preserving Unicode characters. """
return slugify(value, separator, unicode=True)
IDCOUNT_RE = re.compile(r'^(.*)_([0-9]+)$')
def unique(id, ids):
""" Ensure id is unique in set of ids. Append '_1', '_2'... if not """
while id in ids or not id:
m = IDCOUNT_RE.match(id)
if m:
id = '%s_%d' % (m.group(1), int(m.group(2))+1)
else:
id = '%s_%d' % (id, 1)
ids.add(id)
return id
def get_name(el):
"""Get title name."""
text = []
for c in el.itertext():
if isinstance(c, AtomicString):
text.append(html.unescape(c))
else:
text.append(c)
return ''.join(text).strip()
def stashedHTML2text(text, md, strip_entities=True):
""" Extract raw HTML from stash, reduce to plain text and swap with placeholder. """
def _html_sub(m):
""" Substitute raw html with plain text. """
try:
raw = md.htmlStash.rawHtmlBlocks[int(m.group(1))]
except (IndexError, TypeError): # pragma: no cover
return m.group(0)
# Strip out tags and/or entities - leaving text
res = re.sub(r'(<[^>]+>)', '', raw)
if strip_entities:
res = re.sub(r'(&[\#a-zA-Z0-9]+;)', '', res)
return res
return HTML_PLACEHOLDER_RE.sub(_html_sub, text)
def unescape(text):
""" Unescape escaped text. """
c = UnescapePostprocessor()
return c.run(text)
def nest_toc_tokens(toc_list):
"""Given an unsorted list with errors and skips, return a nested one.
[{'level': 1}, {'level': 2}]
=>
[{'level': 1, 'children': [{'level': 2, 'children': []}]}]
A wrong list is also converted:
[{'level': 2}, {'level': 1}]
=>
[{'level': 2, 'children': []}, {'level': 1, 'children': []}]
"""
ordered_list = []
if len(toc_list):
# Initialize everything by processing the first entry
last = toc_list.pop(0)
last['children'] = []
levels = [last['level']]
ordered_list.append(last)
parents = []
# Walk the rest nesting the entries properly
while toc_list:
t = toc_list.pop(0)
current_level = t['level']
t['children'] = []
# Reduce depth if current level < last item's level
if current_level < levels[-1]:
# Pop last level since we know we are less than it
levels.pop()
# Pop parents and levels we are less than or equal to
to_pop = 0
for p in reversed(parents):
if current_level <= p['level']:
to_pop += 1
else: # pragma: no cover
break
if to_pop:
levels = levels[:-to_pop]
parents = parents[:-to_pop]
# Note current level as last
levels.append(current_level)
# Level is the same, so append to
# the current parent (if available)
if current_level == levels[-1]:
(parents[-1]['children'] if parents
else ordered_list).append(t)
# Current level is > last item's level,
# So make last item a parent and append current as child
else:
last['children'].append(t)
parents.append(last)
levels.append(current_level)
last = t
return ordered_list
class TocTreeprocessor(Treeprocessor):
def __init__(self, md, config):
super().__init__(md)
self.marker = config["marker"]
self.title = config["title"]
self.base_level = int(config["baselevel"]) - 1
self.slugify = config["slugify"]
self.sep = config["separator"]
self.use_anchors = parseBoolValue(config["anchorlink"])
self.anchorlink_class = config["anchorlink_class"]
self.use_permalinks = parseBoolValue(config["permalink"], False)
if self.use_permalinks is None:
self.use_permalinks = config["permalink"]
self.permalink_class = config["permalink_class"]
self.permalink_title = config["permalink_title"]
self.header_rgx = re.compile("[Hh][123456]")
if isinstance(config["toc_depth"], str) and '-' in config["toc_depth"]:
self.toc_top, self.toc_bottom = [int(x) for x in config["toc_depth"].split('-')]
else:
self.toc_top = 1
self.toc_bottom = int(config["toc_depth"])
def iterparent(self, node):
''' Iterator wrapper to get allowed parent and child all at once. '''
# We do not allow the marker inside a header as that
# would causes an enless loop of placing a new TOC
# inside previously generated TOC.
for child in node:
if not self.header_rgx.match(child.tag) and child.tag not in ['pre', 'code']:
yield node, child
yield from self.iterparent(child)
def replace_marker(self, root, elem):
''' Replace marker with elem. '''
for (p, c) in self.iterparent(root):
text = ''.join(c.itertext()).strip()
if not text:
continue
# To keep the output from screwing up the
# validation by putting a <div> inside of a <p>
# we actually replace the <p> in its entirety.
# The <p> element may contain more than a single text content
# (nl2br can introduce a <br>). In this situation, c.text returns
# the very first content, ignore children contents or tail content.
# len(c) == 0 is here to ensure there is only text in the <p>.
if c.text and c.text.strip() == self.marker and len(c) == 0:
for i in range(len(p)):
if p[i] == c:
p[i] = elem
break
def set_level(self, elem):
''' Adjust header level according to base level. '''
level = int(elem.tag[-1]) + self.base_level
if level > 6:
level = 6
elem.tag = 'h%d' % level
def add_anchor(self, c, elem_id): # @ReservedAssignment
anchor = etree.Element("a")
anchor.text = c.text
anchor.attrib["href"] = "#" + elem_id
anchor.attrib["class"] = self.anchorlink_class
c.text = ""
for elem in c:
anchor.append(elem)
while len(c):
c.remove(c[0])
c.append(anchor)
def add_permalink(self, c, elem_id):
permalink = etree.Element("a")
permalink.text = ("%spara;" % AMP_SUBSTITUTE
if self.use_permalinks is True
else self.use_permalinks)
permalink.attrib["href"] = "#" + elem_id
permalink.attrib["class"] = self.permalink_class
if self.permalink_title:
permalink.attrib["title"] = self.permalink_title
c.append(permalink)
def build_toc_div(self, toc_list):
""" Return a string div given a toc list. """
div = etree.Element("div")
div.attrib["class"] = "toc"
# Add title to the div
if self.title:
header = etree.SubElement(div, "span")
header.attrib["class"] = "toctitle"
header.text = self.title
def build_etree_ul(toc_list, parent):
ul = etree.SubElement(parent, "ul")
for item in toc_list:
# List item link, to be inserted into the toc div
li = etree.SubElement(ul, "li")
link = etree.SubElement(li, "a")
link.text = item.get('name', '')
link.attrib["href"] = '#' + item.get('id', '')
if item['children']:
build_etree_ul(item['children'], li)
return ul
build_etree_ul(toc_list, div)
if 'prettify' in self.md.treeprocessors:
self.md.treeprocessors['prettify'].run(div)
return div
def run(self, doc):
# Get a list of id attributes
used_ids = set()
for el in doc.iter():
if "id" in el.attrib:
used_ids.add(el.attrib["id"])
toc_tokens = []
for el in doc.iter():
if isinstance(el.tag, str) and self.header_rgx.match(el.tag):
self.set_level(el)
text = get_name(el)
# Do not override pre-existing ids
if "id" not in el.attrib:
innertext = unescape(stashedHTML2text(text, self.md))
el.attrib["id"] = unique(self.slugify(innertext, self.sep), used_ids)
if int(el.tag[-1]) >= self.toc_top and int(el.tag[-1]) <= self.toc_bottom:
toc_tokens.append({
'level': int(el.tag[-1]),
'id': el.attrib["id"],
'name': unescape(stashedHTML2text(
code_escape(el.attrib.get('data-toc-label', text)),
self.md, strip_entities=False
))
})
# Remove the data-toc-label attribute as it is no longer needed
if 'data-toc-label' in el.attrib:
del el.attrib['data-toc-label']
if self.use_anchors:
self.add_anchor(el, el.attrib["id"])
if self.use_permalinks not in [False, None]:
self.add_permalink(el, el.attrib["id"])
toc_tokens = nest_toc_tokens(toc_tokens)
div = self.build_toc_div(toc_tokens)
if self.marker:
self.replace_marker(doc, div)
# serialize and attach to markdown instance.
toc = self.md.serializer(div)
for pp in self.md.postprocessors:
toc = pp.run(toc)
self.md.toc_tokens = toc_tokens
self.md.toc = toc
class TocExtension(Extension):
TreeProcessorClass = TocTreeprocessor
def __init__(self, **kwargs):
self.config = {
"marker": ['[TOC]',
'Text to find and replace with Table of Contents - '
'Set to an empty string to disable. Defaults to "[TOC]"'],
"title": ["",
"Title to insert into TOC <div> - "
"Defaults to an empty string"],
"anchorlink": [False,
"True if header should be a self link - "
"Defaults to False"],
"anchorlink_class": ['toclink',
'CSS class(es) used for the link. '
'Defaults to "toclink"'],
"permalink": [0,
"True or link text if a Sphinx-style permalink should "
"be added - Defaults to False"],
"permalink_class": ['headerlink',
'CSS class(es) used for the link. '
'Defaults to "headerlink"'],
"permalink_title": ["Permanent link",
"Title attribute of the permalink - "
"Defaults to 'Permanent link'"],
"baselevel": ['1', 'Base level for headers.'],
"slugify": [slugify,
"Function to generate anchors based on header text - "
"Defaults to the headerid ext's slugify function."],
'separator': ['-', 'Word separator. Defaults to "-".'],
"toc_depth": [6,
'Define the range of section levels to include in'
'the Table of Contents. A single integer (b) defines'
'the bottom section level (<h1>..<hb>) only.'
'A string consisting of two digits separated by a hyphen'
'in between ("2-5"), define the top (t) and the'
'bottom (b) (<ht>..<hb>). Defaults to `6` (bottom).'],
}
super().__init__(**kwargs)
def extendMarkdown(self, md):
md.registerExtension(self)
self.md = md
self.reset()
tocext = self.TreeProcessorClass(md, self.getConfigs())
# Headerid ext is set to '>prettify'. With this set to '_end',
# it should always come after headerid ext (and honor ids assigned
# by the header id extension) if both are used. Same goes for
# attr_list extension. This must come last because we don't want
# to redefine ids after toc is created. But we do want toc prettified.
md.treeprocessors.register(tocext, 'toc', 5)
def reset(self):
self.md.toc = ''
self.md.toc_tokens = []
def makeExtension(**kwargs): # pragma: no cover
return TocExtension(**kwargs)

View File

@@ -0,0 +1,87 @@
'''
WikiLinks Extension for Python-Markdown
======================================
Converts [[WikiLinks]] to relative links.
See <https://Python-Markdown.github.io/extensions/wikilinks>
for documentation.
Original code Copyright [Waylan Limberg](http://achinghead.com/).
All changes Copyright The Python Markdown Project
License: [BSD](https://opensource.org/licenses/bsd-license.php)
'''
from . import Extension
from ..inlinepatterns import InlineProcessor
import xml.etree.ElementTree as etree
import re
def build_url(label, base, end):
""" Build a url from the label, a base, and an end. """
clean_label = re.sub(r'([ ]+_)|(_[ ]+)|([ ]+)', '_', label)
return '{}{}{}'.format(base, clean_label, end)
class WikiLinkExtension(Extension):
def __init__(self, **kwargs):
self.config = {
'base_url': ['/', 'String to append to beginning or URL.'],
'end_url': ['/', 'String to append to end of URL.'],
'html_class': ['wikilink', 'CSS hook. Leave blank for none.'],
'build_url': [build_url, 'Callable formats URL from label.'],
}
super().__init__(**kwargs)
def extendMarkdown(self, md):
self.md = md
# append to end of inline patterns
WIKILINK_RE = r'\[\[([\w0-9_ -]+)\]\]'
wikilinkPattern = WikiLinksInlineProcessor(WIKILINK_RE, self.getConfigs())
wikilinkPattern.md = md
md.inlinePatterns.register(wikilinkPattern, 'wikilink', 75)
class WikiLinksInlineProcessor(InlineProcessor):
def __init__(self, pattern, config):
super().__init__(pattern)
self.config = config
def handleMatch(self, m, data):
if m.group(1).strip():
base_url, end_url, html_class = self._getMeta()
label = m.group(1).strip()
url = self.config['build_url'](label, base_url, end_url)
a = etree.Element('a')
a.text = label
a.set('href', url)
if html_class:
a.set('class', html_class)
else:
a = ''
return a, m.start(0), m.end(0)
def _getMeta(self):
""" Return meta data or config data. """
base_url = self.config['base_url']
end_url = self.config['end_url']
html_class = self.config['html_class']
if hasattr(self.md, 'Meta'):
if 'wiki_base_url' in self.md.Meta:
base_url = self.md.Meta['wiki_base_url'][0]
if 'wiki_end_url' in self.md.Meta:
end_url = self.md.Meta['wiki_end_url'][0]
if 'wiki_html_class' in self.md.Meta:
html_class = self.md.Meta['wiki_html_class'][0]
return base_url, end_url, html_class
def makeExtension(**kwargs): # pragma: no cover
return WikiLinkExtension(**kwargs)