mirror of
				https://gitlab.com/octtspacc/staticoso
				synced 2025-06-05 22:09:23 +02:00 
			
		
		
		
	First support for md extensions; Package all needed dependencies in the repo
This commit is contained in:
		
							
								
								
									
										14
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										14
									
								
								README.md
									
									
									
									
									
								
							| @@ -10,10 +10,12 @@ Also, this software is needed for someone to edit and compile my personal sub-we | |||||||
| Feel free to experiment with all of this stuff! | Feel free to experiment with all of this stuff! | ||||||
|  |  | ||||||
| ## Dependencies | ## Dependencies | ||||||
| - [Python >= 3.10.4](https://python.org) | - [Python == 3.10.4](https://python.org) | ||||||
| - [Python Markdown >= 3.3.7](https://pypi.org/project/Markdown) | - (Included) [Python Markdown == 3.3.7](https://pypi.org/project/Markdown) | ||||||
| - (Included) [htmlmin >= 0.1.12](https://pypi.org/project/htmlmin) | - (Included) [Beautiful Soup == 4.11.1](https://pypi.org/project/beautifulsoup4) | ||||||
| - [pug-cli >= 1.0.0-alpha6](https://npmjs.com/package/pug-cli) | - (Included) [htmlmin == 0.1.12](https://pypi.org/project/htmlmin) | ||||||
|  | - [node == 12.22.5](https://nodejs.org) | [npm == 7.5.2](https://www.npmjs.com) | ||||||
|  | - (Included) [pug-cli == 1.0.0-alpha6](https://npmjs.com/package/pug-cli) | ||||||
|  |  | ||||||
| ## Features roadmap | ## Features roadmap | ||||||
| - [x] Autodetection of pages and posts | - [x] Autodetection of pages and posts | ||||||
| @@ -35,5 +37,5 @@ Feel free to experiment with all of this stuff! | |||||||
| - [x] Generation of titles in right sidebar with clickable links | - [x] Generation of titles in right sidebar with clickable links | ||||||
| - [x] Detections of titles in a page | - [x] Detections of titles in a page | ||||||
| - [x] Custom static page parts by template | - [x] Custom static page parts by template | ||||||
| - [x] Markdown + Pug support for pages | - [x] Extended Markdown + Pug support for pages | ||||||
| - [x] First working version | - [x] Ready for use | ||||||
|   | |||||||
| @@ -9,36 +9,18 @@ | |||||||
|  |  | ||||||
| import argparse | import argparse | ||||||
| import json | import json | ||||||
| from Libs import htmlmin |  | ||||||
| import os | import os | ||||||
| import shutil | import shutil | ||||||
| from ast import literal_eval | from ast import literal_eval | ||||||
|  | from Libs import htmlmin | ||||||
| from Libs.bs4 import BeautifulSoup | from Libs.bs4 import BeautifulSoup | ||||||
| #from html.parser import HTMLParser | from Libs.markdown import Markdown | ||||||
| from markdown import Markdown | from Libs.markdown import markdown | ||||||
| from pathlib import Path | from pathlib import Path | ||||||
|  |  | ||||||
| Extensions = { | Extensions = { | ||||||
| 	'Pages': ('md', 'pug')} | 	'Pages': ('md', 'pug')} | ||||||
|  |  | ||||||
| """ |  | ||||||
| class HTMLParser(HTMLParser): |  | ||||||
| 	Tags = [] |  | ||||||
| 	def handle_starttag(self, tag, attrs): |  | ||||||
| 		#print(tag, attrs) |  | ||||||
| 		#self.Tags += [tag, attrs] |  | ||||||
| 		self.Tags += [[tag,attrs]] |  | ||||||
| 	def handle_data(self, data): |  | ||||||
| 		#print(data) |  | ||||||
| 		if self.Tags: |  | ||||||
| 			#self.Tags += [data] |  | ||||||
| 			self.Tags[-1] += [data] |  | ||||||
| 	def Clean(self): |  | ||||||
| 		self.Tags = [] |  | ||||||
| 		self.reset() |  | ||||||
| 		self.close() |  | ||||||
| """ |  | ||||||
|  |  | ||||||
| def ReadFile(p): | def ReadFile(p): | ||||||
| 	try: | 	try: | ||||||
| 		with open(p, 'r') as f: | 		with open(p, 'r') as f: | ||||||
| @@ -256,27 +238,8 @@ def MakeCategoryLine(Meta, Reserved): | |||||||
| 	return Categories | 	return Categories | ||||||
|  |  | ||||||
| def PatchHTML(Template, PartsText, ContextParts, ContextPartsText, HTMLPagesList, PagePath, Content, Titles, Meta, SiteRoot, FolderRoots, Categories, Locale, Reserved): | def PatchHTML(Template, PartsText, ContextParts, ContextPartsText, HTMLPagesList, PagePath, Content, Titles, Meta, SiteRoot, FolderRoots, Categories, Locale, Reserved): | ||||||
| 	BodyDescription, BodyImage = '', '' |  | ||||||
| 	HTMLTitles = FormatTitles(Titles) | 	HTMLTitles = FormatTitles(Titles) | ||||||
| 	""" # This is broken and somehow always returns the same wrong values? Disabled for now | 	BodyDescription, BodyImage = '', '' | ||||||
| 	#print(Content) |  | ||||||
| 	Parser = HTMLParser() |  | ||||||
| 	Parser.feed(Content) |  | ||||||
| 	for e in Parser.Tags: |  | ||||||
| 		if not BodyDescription and e[0] == 'p': |  | ||||||
| 			BodyDescription = e[2][:150] + '...' |  | ||||||
| 		elif not BodyImage and e[0] == 'img': |  | ||||||
| 			for j,f in enumerate(e[1]): |  | ||||||
| 				if f == 'src': |  | ||||||
| 					BodyImage = e[1][j] |  | ||||||
| 	print(BodyDescription) |  | ||||||
| 	print(BodyImage) |  | ||||||
| 	print(len(Parser.Tags)) |  | ||||||
| 	#print(Parser.Tags) |  | ||||||
| 	#exit() |  | ||||||
| 	Parser.Clean() |  | ||||||
| 	""" |  | ||||||
| 	#Content.find("<p ") |  | ||||||
| 	Parse = BeautifulSoup(Content, 'html.parser') | 	Parse = BeautifulSoup(Content, 'html.parser') | ||||||
| 	if not BodyDescription and Parse.p: | 	if not BodyDescription and Parse.p: | ||||||
| 		BodyDescription = Parse.p.get_text()[:150].replace('\n', ' ').replace('"', "'") + '...' | 		BodyDescription = Parse.p.get_text()[:150].replace('\n', ' ').replace('"', "'") + '...' | ||||||
| @@ -448,7 +411,7 @@ def MakeSite(TemplatesText, PartsText, ContextParts, ContextPartsText, SiteRoot, | |||||||
| 			Type='Page') | 			Type='Page') | ||||||
| 		PagePath = 'public/{}.html'.format(StripExt(File)) | 		PagePath = 'public/{}.html'.format(StripExt(File)) | ||||||
| 		if File.endswith('.md'): | 		if File.endswith('.md'): | ||||||
| 			Content = Markdown().convert(Content) | 			Content = markdown(Content, extensions=['attr_list']) | ||||||
| 		elif File.endswith('.pug'): | 		elif File.endswith('.pug'): | ||||||
| 			Content = ReadFile(PagePath) | 			Content = ReadFile(PagePath) | ||||||
| 		HTML = PatchHTML( | 		HTML = PatchHTML( | ||||||
|   | |||||||
							
								
								
									
										24
									
								
								Source/Libs/htmlmin/LICENSE
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										24
									
								
								Source/Libs/htmlmin/LICENSE
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,24 @@ | |||||||
|  | Copyright (c) 2013, Dave Mankoff | ||||||
|  | All rights reserved. | ||||||
|  |  | ||||||
|  | Redistribution and use in source and binary forms, with or without | ||||||
|  | modification, are permitted provided that the following conditions are met: | ||||||
|  |     * Redistributions of source code must retain the above copyright | ||||||
|  |       notice, this list of conditions and the following disclaimer. | ||||||
|  |     * Redistributions in binary form must reproduce the above copyright | ||||||
|  |       notice, this list of conditions and the following disclaimer in the | ||||||
|  |       documentation and/or other materials provided with the distribution. | ||||||
|  |     * Neither the name of Dave Mankoff nor the | ||||||
|  |       names of its contributors may be used to endorse or promote products | ||||||
|  |       derived from this software without specific prior written permission. | ||||||
|  |  | ||||||
|  | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND | ||||||
|  | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||||||
|  | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||||||
|  | DISCLAIMED. IN NO EVENT SHALL DAVE MANKOFF BE LIABLE FOR ANY | ||||||
|  | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||||||
|  | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||||||
|  | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||||||
|  | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||||||
|  | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||||||
|  | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||||||
							
								
								
									
										13
									
								
								Source/Libs/importlib_metadata/LICENSE
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										13
									
								
								Source/Libs/importlib_metadata/LICENSE
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,13 @@ | |||||||
|  | Copyright 2017-2019 Jason R. Coombs, Barry Warsaw | ||||||
|  |  | ||||||
|  | Licensed under the Apache License, Version 2.0 (the "License"); | ||||||
|  | you may not use this file except in compliance with the License. | ||||||
|  | You may obtain a copy of the License at | ||||||
|  |  | ||||||
|  | http://www.apache.org/licenses/LICENSE-2.0 | ||||||
|  |  | ||||||
|  | Unless required by applicable law or agreed to in writing, software | ||||||
|  | distributed under the License is distributed on an "AS IS" BASIS, | ||||||
|  | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||||
|  | See the License for the specific language governing permissions and | ||||||
|  | limitations under the License. | ||||||
							
								
								
									
										1095
									
								
								Source/Libs/importlib_metadata/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1095
									
								
								Source/Libs/importlib_metadata/__init__.py
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										68
									
								
								Source/Libs/importlib_metadata/_adapters.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										68
									
								
								Source/Libs/importlib_metadata/_adapters.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,68 @@ | |||||||
|  | import re | ||||||
|  | import textwrap | ||||||
|  | import email.message | ||||||
|  |  | ||||||
|  | from ._text import FoldedCase | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class Message(email.message.Message): | ||||||
|  |     multiple_use_keys = set( | ||||||
|  |         map( | ||||||
|  |             FoldedCase, | ||||||
|  |             [ | ||||||
|  |                 'Classifier', | ||||||
|  |                 'Obsoletes-Dist', | ||||||
|  |                 'Platform', | ||||||
|  |                 'Project-URL', | ||||||
|  |                 'Provides-Dist', | ||||||
|  |                 'Provides-Extra', | ||||||
|  |                 'Requires-Dist', | ||||||
|  |                 'Requires-External', | ||||||
|  |                 'Supported-Platform', | ||||||
|  |                 'Dynamic', | ||||||
|  |             ], | ||||||
|  |         ) | ||||||
|  |     ) | ||||||
|  |     """ | ||||||
|  |     Keys that may be indicated multiple times per PEP 566. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __new__(cls, orig: email.message.Message): | ||||||
|  |         res = super().__new__(cls) | ||||||
|  |         vars(res).update(vars(orig)) | ||||||
|  |         return res | ||||||
|  |  | ||||||
|  |     def __init__(self, *args, **kwargs): | ||||||
|  |         self._headers = self._repair_headers() | ||||||
|  |  | ||||||
|  |     # suppress spurious error from mypy | ||||||
|  |     def __iter__(self): | ||||||
|  |         return super().__iter__() | ||||||
|  |  | ||||||
|  |     def _repair_headers(self): | ||||||
|  |         def redent(value): | ||||||
|  |             "Correct for RFC822 indentation" | ||||||
|  |             if not value or '\n' not in value: | ||||||
|  |                 return value | ||||||
|  |             return textwrap.dedent(' ' * 8 + value) | ||||||
|  |  | ||||||
|  |         headers = [(key, redent(value)) for key, value in vars(self)['_headers']] | ||||||
|  |         if self._payload: | ||||||
|  |             headers.append(('Description', self.get_payload())) | ||||||
|  |         return headers | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def json(self): | ||||||
|  |         """ | ||||||
|  |         Convert PackageMetadata to a JSON-compatible format | ||||||
|  |         per PEP 0566. | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |         def transform(key): | ||||||
|  |             value = self.get_all(key) if key in self.multiple_use_keys else self[key] | ||||||
|  |             if key == 'Keywords': | ||||||
|  |                 value = re.split(r'\s+', value) | ||||||
|  |             tk = key.lower().replace('-', '_') | ||||||
|  |             return tk, value | ||||||
|  |  | ||||||
|  |         return dict(map(transform, map(FoldedCase, self))) | ||||||
							
								
								
									
										30
									
								
								Source/Libs/importlib_metadata/_collections.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										30
									
								
								Source/Libs/importlib_metadata/_collections.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,30 @@ | |||||||
|  | import collections | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # from jaraco.collections 3.3 | ||||||
|  | class FreezableDefaultDict(collections.defaultdict): | ||||||
|  |     """ | ||||||
|  |     Often it is desirable to prevent the mutation of | ||||||
|  |     a default dict after its initial construction, such | ||||||
|  |     as to prevent mutation during iteration. | ||||||
|  |  | ||||||
|  |     >>> dd = FreezableDefaultDict(list) | ||||||
|  |     >>> dd[0].append('1') | ||||||
|  |     >>> dd.freeze() | ||||||
|  |     >>> dd[1] | ||||||
|  |     [] | ||||||
|  |     >>> len(dd) | ||||||
|  |     1 | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __missing__(self, key): | ||||||
|  |         return getattr(self, '_frozen', super().__missing__)(key) | ||||||
|  |  | ||||||
|  |     def freeze(self): | ||||||
|  |         self._frozen = lambda key: self.default_factory() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class Pair(collections.namedtuple('Pair', 'name value')): | ||||||
|  |     @classmethod | ||||||
|  |     def parse(cls, text): | ||||||
|  |         return cls(*map(str.strip, text.split("=", 1))) | ||||||
							
								
								
									
										71
									
								
								Source/Libs/importlib_metadata/_compat.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										71
									
								
								Source/Libs/importlib_metadata/_compat.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,71 @@ | |||||||
|  | import sys | ||||||
|  | import platform | ||||||
|  |  | ||||||
|  |  | ||||||
|  | __all__ = ['install', 'NullFinder', 'Protocol'] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | try: | ||||||
|  |     from typing import Protocol | ||||||
|  | except ImportError:  # pragma: no cover | ||||||
|  |     from typing_extensions import Protocol  # type: ignore | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def install(cls): | ||||||
|  |     """ | ||||||
|  |     Class decorator for installation on sys.meta_path. | ||||||
|  |  | ||||||
|  |     Adds the backport DistributionFinder to sys.meta_path and | ||||||
|  |     attempts to disable the finder functionality of the stdlib | ||||||
|  |     DistributionFinder. | ||||||
|  |     """ | ||||||
|  |     sys.meta_path.append(cls()) | ||||||
|  |     disable_stdlib_finder() | ||||||
|  |     return cls | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def disable_stdlib_finder(): | ||||||
|  |     """ | ||||||
|  |     Give the backport primacy for discovering path-based distributions | ||||||
|  |     by monkey-patching the stdlib O_O. | ||||||
|  |  | ||||||
|  |     See #91 for more background for rationale on this sketchy | ||||||
|  |     behavior. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def matches(finder): | ||||||
|  |         return getattr( | ||||||
|  |             finder, '__module__', None | ||||||
|  |         ) == '_frozen_importlib_external' and hasattr(finder, 'find_distributions') | ||||||
|  |  | ||||||
|  |     for finder in filter(matches, sys.meta_path):  # pragma: nocover | ||||||
|  |         del finder.find_distributions | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class NullFinder: | ||||||
|  |     """ | ||||||
|  |     A "Finder" (aka "MetaClassFinder") that never finds any modules, | ||||||
|  |     but may find distributions. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     @staticmethod | ||||||
|  |     def find_spec(*args, **kwargs): | ||||||
|  |         return None | ||||||
|  |  | ||||||
|  |     # In Python 2, the import system requires finders | ||||||
|  |     # to have a find_module() method, but this usage | ||||||
|  |     # is deprecated in Python 3 in favor of find_spec(). | ||||||
|  |     # For the purposes of this finder (i.e. being present | ||||||
|  |     # on sys.meta_path but having no other import | ||||||
|  |     # system functionality), the two methods are identical. | ||||||
|  |     find_module = find_spec | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def pypy_partial(val): | ||||||
|  |     """ | ||||||
|  |     Adjust for variable stacklevel on partial under PyPy. | ||||||
|  |  | ||||||
|  |     Workaround for #327. | ||||||
|  |     """ | ||||||
|  |     is_pypy = platform.python_implementation() == 'PyPy' | ||||||
|  |     return val + is_pypy | ||||||
							
								
								
									
										104
									
								
								Source/Libs/importlib_metadata/_functools.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										104
									
								
								Source/Libs/importlib_metadata/_functools.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,104 @@ | |||||||
|  | import types | ||||||
|  | import functools | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # from jaraco.functools 3.3 | ||||||
|  | def method_cache(method, cache_wrapper=None): | ||||||
|  |     """ | ||||||
|  |     Wrap lru_cache to support storing the cache data in the object instances. | ||||||
|  |  | ||||||
|  |     Abstracts the common paradigm where the method explicitly saves an | ||||||
|  |     underscore-prefixed protected property on first call and returns that | ||||||
|  |     subsequently. | ||||||
|  |  | ||||||
|  |     >>> class MyClass: | ||||||
|  |     ...     calls = 0 | ||||||
|  |     ... | ||||||
|  |     ...     @method_cache | ||||||
|  |     ...     def method(self, value): | ||||||
|  |     ...         self.calls += 1 | ||||||
|  |     ...         return value | ||||||
|  |  | ||||||
|  |     >>> a = MyClass() | ||||||
|  |     >>> a.method(3) | ||||||
|  |     3 | ||||||
|  |     >>> for x in range(75): | ||||||
|  |     ...     res = a.method(x) | ||||||
|  |     >>> a.calls | ||||||
|  |     75 | ||||||
|  |  | ||||||
|  |     Note that the apparent behavior will be exactly like that of lru_cache | ||||||
|  |     except that the cache is stored on each instance, so values in one | ||||||
|  |     instance will not flush values from another, and when an instance is | ||||||
|  |     deleted, so are the cached values for that instance. | ||||||
|  |  | ||||||
|  |     >>> b = MyClass() | ||||||
|  |     >>> for x in range(35): | ||||||
|  |     ...     res = b.method(x) | ||||||
|  |     >>> b.calls | ||||||
|  |     35 | ||||||
|  |     >>> a.method(0) | ||||||
|  |     0 | ||||||
|  |     >>> a.calls | ||||||
|  |     75 | ||||||
|  |  | ||||||
|  |     Note that if method had been decorated with ``functools.lru_cache()``, | ||||||
|  |     a.calls would have been 76 (due to the cached value of 0 having been | ||||||
|  |     flushed by the 'b' instance). | ||||||
|  |  | ||||||
|  |     Clear the cache with ``.cache_clear()`` | ||||||
|  |  | ||||||
|  |     >>> a.method.cache_clear() | ||||||
|  |  | ||||||
|  |     Same for a method that hasn't yet been called. | ||||||
|  |  | ||||||
|  |     >>> c = MyClass() | ||||||
|  |     >>> c.method.cache_clear() | ||||||
|  |  | ||||||
|  |     Another cache wrapper may be supplied: | ||||||
|  |  | ||||||
|  |     >>> cache = functools.lru_cache(maxsize=2) | ||||||
|  |     >>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache) | ||||||
|  |     >>> a = MyClass() | ||||||
|  |     >>> a.method2() | ||||||
|  |     3 | ||||||
|  |  | ||||||
|  |     Caution - do not subsequently wrap the method with another decorator, such | ||||||
|  |     as ``@property``, which changes the semantics of the function. | ||||||
|  |  | ||||||
|  |     See also | ||||||
|  |     http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/ | ||||||
|  |     for another implementation and additional justification. | ||||||
|  |     """ | ||||||
|  |     cache_wrapper = cache_wrapper or functools.lru_cache() | ||||||
|  |  | ||||||
|  |     def wrapper(self, *args, **kwargs): | ||||||
|  |         # it's the first call, replace the method with a cached, bound method | ||||||
|  |         bound_method = types.MethodType(method, self) | ||||||
|  |         cached_method = cache_wrapper(bound_method) | ||||||
|  |         setattr(self, method.__name__, cached_method) | ||||||
|  |         return cached_method(*args, **kwargs) | ||||||
|  |  | ||||||
|  |     # Support cache clear even before cache has been created. | ||||||
|  |     wrapper.cache_clear = lambda: None | ||||||
|  |  | ||||||
|  |     return wrapper | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # From jaraco.functools 3.3 | ||||||
|  | def pass_none(func): | ||||||
|  |     """ | ||||||
|  |     Wrap func so it's not called if its first param is None | ||||||
|  |  | ||||||
|  |     >>> print_text = pass_none(print) | ||||||
|  |     >>> print_text('text') | ||||||
|  |     text | ||||||
|  |     >>> print_text(None) | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     @functools.wraps(func) | ||||||
|  |     def wrapper(param, *args, **kwargs): | ||||||
|  |         if param is not None: | ||||||
|  |             return func(param, *args, **kwargs) | ||||||
|  |  | ||||||
|  |     return wrapper | ||||||
							
								
								
									
										73
									
								
								Source/Libs/importlib_metadata/_itertools.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										73
									
								
								Source/Libs/importlib_metadata/_itertools.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,73 @@ | |||||||
|  | from itertools import filterfalse | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def unique_everseen(iterable, key=None): | ||||||
|  |     "List unique elements, preserving order. Remember all elements ever seen." | ||||||
|  |     # unique_everseen('AAAABBBCCDAABBB') --> A B C D | ||||||
|  |     # unique_everseen('ABBCcAD', str.lower) --> A B C D | ||||||
|  |     seen = set() | ||||||
|  |     seen_add = seen.add | ||||||
|  |     if key is None: | ||||||
|  |         for element in filterfalse(seen.__contains__, iterable): | ||||||
|  |             seen_add(element) | ||||||
|  |             yield element | ||||||
|  |     else: | ||||||
|  |         for element in iterable: | ||||||
|  |             k = key(element) | ||||||
|  |             if k not in seen: | ||||||
|  |                 seen_add(k) | ||||||
|  |                 yield element | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # copied from more_itertools 8.8 | ||||||
|  | def always_iterable(obj, base_type=(str, bytes)): | ||||||
|  |     """If *obj* is iterable, return an iterator over its items:: | ||||||
|  |  | ||||||
|  |         >>> obj = (1, 2, 3) | ||||||
|  |         >>> list(always_iterable(obj)) | ||||||
|  |         [1, 2, 3] | ||||||
|  |  | ||||||
|  |     If *obj* is not iterable, return a one-item iterable containing *obj*:: | ||||||
|  |  | ||||||
|  |         >>> obj = 1 | ||||||
|  |         >>> list(always_iterable(obj)) | ||||||
|  |         [1] | ||||||
|  |  | ||||||
|  |     If *obj* is ``None``, return an empty iterable: | ||||||
|  |  | ||||||
|  |         >>> obj = None | ||||||
|  |         >>> list(always_iterable(None)) | ||||||
|  |         [] | ||||||
|  |  | ||||||
|  |     By default, binary and text strings are not considered iterable:: | ||||||
|  |  | ||||||
|  |         >>> obj = 'foo' | ||||||
|  |         >>> list(always_iterable(obj)) | ||||||
|  |         ['foo'] | ||||||
|  |  | ||||||
|  |     If *base_type* is set, objects for which ``isinstance(obj, base_type)`` | ||||||
|  |     returns ``True`` won't be considered iterable. | ||||||
|  |  | ||||||
|  |         >>> obj = {'a': 1} | ||||||
|  |         >>> list(always_iterable(obj))  # Iterate over the dict's keys | ||||||
|  |         ['a'] | ||||||
|  |         >>> list(always_iterable(obj, base_type=dict))  # Treat dicts as a unit | ||||||
|  |         [{'a': 1}] | ||||||
|  |  | ||||||
|  |     Set *base_type* to ``None`` to avoid any special handling and treat objects | ||||||
|  |     Python considers iterable as iterable: | ||||||
|  |  | ||||||
|  |         >>> obj = 'foo' | ||||||
|  |         >>> list(always_iterable(obj, base_type=None)) | ||||||
|  |         ['f', 'o', 'o'] | ||||||
|  |     """ | ||||||
|  |     if obj is None: | ||||||
|  |         return iter(()) | ||||||
|  |  | ||||||
|  |     if (base_type is not None) and isinstance(obj, base_type): | ||||||
|  |         return iter((obj,)) | ||||||
|  |  | ||||||
|  |     try: | ||||||
|  |         return iter(obj) | ||||||
|  |     except TypeError: | ||||||
|  |         return iter((obj,)) | ||||||
							
								
								
									
										48
									
								
								Source/Libs/importlib_metadata/_meta.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										48
									
								
								Source/Libs/importlib_metadata/_meta.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,48 @@ | |||||||
|  | from ._compat import Protocol | ||||||
|  | from typing import Any, Dict, Iterator, List, TypeVar, Union | ||||||
|  |  | ||||||
|  |  | ||||||
|  | _T = TypeVar("_T") | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class PackageMetadata(Protocol): | ||||||
|  |     def __len__(self) -> int: | ||||||
|  |         ...  # pragma: no cover | ||||||
|  |  | ||||||
|  |     def __contains__(self, item: str) -> bool: | ||||||
|  |         ...  # pragma: no cover | ||||||
|  |  | ||||||
|  |     def __getitem__(self, key: str) -> str: | ||||||
|  |         ...  # pragma: no cover | ||||||
|  |  | ||||||
|  |     def __iter__(self) -> Iterator[str]: | ||||||
|  |         ...  # pragma: no cover | ||||||
|  |  | ||||||
|  |     def get_all(self, name: str, failobj: _T = ...) -> Union[List[Any], _T]: | ||||||
|  |         """ | ||||||
|  |         Return all values associated with a possibly multi-valued key. | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def json(self) -> Dict[str, Union[str, List[str]]]: | ||||||
|  |         """ | ||||||
|  |         A JSON-compatible form of the metadata. | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class SimplePath(Protocol): | ||||||
|  |     """ | ||||||
|  |     A minimal subset of pathlib.Path required by PathDistribution. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def joinpath(self) -> 'SimplePath': | ||||||
|  |         ...  # pragma: no cover | ||||||
|  |  | ||||||
|  |     def __truediv__(self) -> 'SimplePath': | ||||||
|  |         ...  # pragma: no cover | ||||||
|  |  | ||||||
|  |     def parent(self) -> 'SimplePath': | ||||||
|  |         ...  # pragma: no cover | ||||||
|  |  | ||||||
|  |     def read_text(self) -> str: | ||||||
|  |         ...  # pragma: no cover | ||||||
							
								
								
									
										99
									
								
								Source/Libs/importlib_metadata/_text.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										99
									
								
								Source/Libs/importlib_metadata/_text.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,99 @@ | |||||||
|  | import re | ||||||
|  |  | ||||||
|  | from ._functools import method_cache | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # from jaraco.text 3.5 | ||||||
|  | class FoldedCase(str): | ||||||
|  |     """ | ||||||
|  |     A case insensitive string class; behaves just like str | ||||||
|  |     except compares equal when the only variation is case. | ||||||
|  |  | ||||||
|  |     >>> s = FoldedCase('hello world') | ||||||
|  |  | ||||||
|  |     >>> s == 'Hello World' | ||||||
|  |     True | ||||||
|  |  | ||||||
|  |     >>> 'Hello World' == s | ||||||
|  |     True | ||||||
|  |  | ||||||
|  |     >>> s != 'Hello World' | ||||||
|  |     False | ||||||
|  |  | ||||||
|  |     >>> s.index('O') | ||||||
|  |     4 | ||||||
|  |  | ||||||
|  |     >>> s.split('O') | ||||||
|  |     ['hell', ' w', 'rld'] | ||||||
|  |  | ||||||
|  |     >>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta'])) | ||||||
|  |     ['alpha', 'Beta', 'GAMMA'] | ||||||
|  |  | ||||||
|  |     Sequence membership is straightforward. | ||||||
|  |  | ||||||
|  |     >>> "Hello World" in [s] | ||||||
|  |     True | ||||||
|  |     >>> s in ["Hello World"] | ||||||
|  |     True | ||||||
|  |  | ||||||
|  |     You may test for set inclusion, but candidate and elements | ||||||
|  |     must both be folded. | ||||||
|  |  | ||||||
|  |     >>> FoldedCase("Hello World") in {s} | ||||||
|  |     True | ||||||
|  |     >>> s in {FoldedCase("Hello World")} | ||||||
|  |     True | ||||||
|  |  | ||||||
|  |     String inclusion works as long as the FoldedCase object | ||||||
|  |     is on the right. | ||||||
|  |  | ||||||
|  |     >>> "hello" in FoldedCase("Hello World") | ||||||
|  |     True | ||||||
|  |  | ||||||
|  |     But not if the FoldedCase object is on the left: | ||||||
|  |  | ||||||
|  |     >>> FoldedCase('hello') in 'Hello World' | ||||||
|  |     False | ||||||
|  |  | ||||||
|  |     In that case, use in_: | ||||||
|  |  | ||||||
|  |     >>> FoldedCase('hello').in_('Hello World') | ||||||
|  |     True | ||||||
|  |  | ||||||
|  |     >>> FoldedCase('hello') > FoldedCase('Hello') | ||||||
|  |     False | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __lt__(self, other): | ||||||
|  |         return self.lower() < other.lower() | ||||||
|  |  | ||||||
|  |     def __gt__(self, other): | ||||||
|  |         return self.lower() > other.lower() | ||||||
|  |  | ||||||
|  |     def __eq__(self, other): | ||||||
|  |         return self.lower() == other.lower() | ||||||
|  |  | ||||||
|  |     def __ne__(self, other): | ||||||
|  |         return self.lower() != other.lower() | ||||||
|  |  | ||||||
|  |     def __hash__(self): | ||||||
|  |         return hash(self.lower()) | ||||||
|  |  | ||||||
|  |     def __contains__(self, other): | ||||||
|  |         return super().lower().__contains__(other.lower()) | ||||||
|  |  | ||||||
|  |     def in_(self, other): | ||||||
|  |         "Does self appear in other?" | ||||||
|  |         return self in FoldedCase(other) | ||||||
|  |  | ||||||
|  |     # cache lower since it's likely to be called frequently. | ||||||
|  |     @method_cache | ||||||
|  |     def lower(self): | ||||||
|  |         return super().lower() | ||||||
|  |  | ||||||
|  |     def index(self, sub): | ||||||
|  |         return self.lower().index(sub.lower()) | ||||||
|  |  | ||||||
|  |     def split(self, splitter=' ', maxsplit=0): | ||||||
|  |         pattern = re.compile(re.escape(splitter), re.I) | ||||||
|  |         return pattern.split(self, maxsplit) | ||||||
							
								
								
									
										0
									
								
								Source/Libs/importlib_metadata/py.typed
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										0
									
								
								Source/Libs/importlib_metadata/py.typed
									
									
									
									
									
										Normal file
									
								
							
							
								
								
									
										29
									
								
								Source/Libs/markdown/LICENSE.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										29
									
								
								Source/Libs/markdown/LICENSE.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,29 @@ | |||||||
|  | Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later) | ||||||
|  | Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) | ||||||
|  | Copyright 2004 Manfred Stienstra (the original version) | ||||||
|  |  | ||||||
|  | All rights reserved. | ||||||
|  |  | ||||||
|  | Redistribution and use in source and binary forms, with or without | ||||||
|  | modification, are permitted provided that the following conditions are met: | ||||||
|  |  | ||||||
|  | * Redistributions of source code must retain the above copyright | ||||||
|  |   notice, this list of conditions and the following disclaimer. | ||||||
|  | * Redistributions in binary form must reproduce the above copyright | ||||||
|  |   notice, this list of conditions and the following disclaimer in the | ||||||
|  |   documentation and/or other materials provided with the distribution. | ||||||
|  | * Neither the name of the Python Markdown Project nor the | ||||||
|  |   names of its contributors may be used to endorse or promote products | ||||||
|  |   derived from this software without specific prior written permission. | ||||||
|  |  | ||||||
|  | THIS SOFTWARE IS PROVIDED BY THE PYTHON MARKDOWN PROJECT ''AS IS'' AND ANY | ||||||
|  | EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||||||
|  | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||||||
|  | DISCLAIMED. IN NO EVENT SHALL ANY CONTRIBUTORS TO THE PYTHON MARKDOWN PROJECT | ||||||
|  | BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||||||
|  | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||||||
|  | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | ||||||
|  | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | ||||||
|  | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||||||
|  | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||||||
|  | POSSIBILITY OF SUCH DAMAGE. | ||||||
							
								
								
									
										61
									
								
								Source/Libs/markdown/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										61
									
								
								Source/Libs/markdown/__init__.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,61 @@ | |||||||
|  | """ | ||||||
|  | Python Markdown | ||||||
|  |  | ||||||
|  | A Python implementation of John Gruber's Markdown. | ||||||
|  |  | ||||||
|  | Documentation: https://python-markdown.github.io/ | ||||||
|  | GitHub: https://github.com/Python-Markdown/markdown/ | ||||||
|  | PyPI: https://pypi.org/project/Markdown/ | ||||||
|  |  | ||||||
|  | Started by Manfred Stienstra (http://www.dwerg.net/). | ||||||
|  | Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). | ||||||
|  | Currently maintained by Waylan Limberg (https://github.com/waylan), | ||||||
|  | Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). | ||||||
|  |  | ||||||
|  | Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later) | ||||||
|  | Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) | ||||||
|  | Copyright 2004 Manfred Stienstra (the original version) | ||||||
|  |  | ||||||
|  | License: BSD (see LICENSE.md for details). | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | import sys | ||||||
|  |  | ||||||
|  | # TODO: Remove this check at some point in the future. | ||||||
|  | # (also remove flake8's 'ignore E402' comments below) | ||||||
|  | if sys.version_info[0] < 3:  # pragma: no cover | ||||||
|  |     raise ImportError('A recent version of Python 3 is required.') | ||||||
|  |  | ||||||
|  | from .core import Markdown, markdown, markdownFromFile  # noqa: E402 | ||||||
|  | from .util import PY37                                  # noqa: E402 | ||||||
|  | from .pep562 import Pep562                              # noqa: E402 | ||||||
|  | from .__meta__ import __version__, __version_info__     # noqa: E402 | ||||||
|  | import warnings                                         # noqa: E402 | ||||||
|  |  | ||||||
|  | # For backward compatibility as some extensions expect it... | ||||||
|  | from .extensions import Extension  # noqa | ||||||
|  |  | ||||||
|  | __all__ = ['Markdown', 'markdown', 'markdownFromFile'] | ||||||
|  |  | ||||||
|  | __deprecated__ = { | ||||||
|  |     "version": ("__version__", __version__), | ||||||
|  |     "version_info": ("__version_info__", __version_info__) | ||||||
|  | } | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def __getattr__(name): | ||||||
|  |     """Get attribute.""" | ||||||
|  |  | ||||||
|  |     deprecated = __deprecated__.get(name) | ||||||
|  |     if deprecated: | ||||||
|  |         warnings.warn( | ||||||
|  |             "'{}' is deprecated. Use '{}' instead.".format(name, deprecated[0]), | ||||||
|  |             category=DeprecationWarning, | ||||||
|  |             stacklevel=(3 if PY37 else 4) | ||||||
|  |         ) | ||||||
|  |         return deprecated[1] | ||||||
|  |     raise AttributeError("module '{}' has no attribute '{}'".format(__name__, name)) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | if not PY37: | ||||||
|  |     Pep562(__name__) | ||||||
							
								
								
									
										151
									
								
								Source/Libs/markdown/__main__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										151
									
								
								Source/Libs/markdown/__main__.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,151 @@ | |||||||
|  | """ | ||||||
|  | Python Markdown | ||||||
|  |  | ||||||
|  | A Python implementation of John Gruber's Markdown. | ||||||
|  |  | ||||||
|  | Documentation: https://python-markdown.github.io/ | ||||||
|  | GitHub: https://github.com/Python-Markdown/markdown/ | ||||||
|  | PyPI: https://pypi.org/project/Markdown/ | ||||||
|  |  | ||||||
|  | Started by Manfred Stienstra (http://www.dwerg.net/). | ||||||
|  | Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). | ||||||
|  | Currently maintained by Waylan Limberg (https://github.com/waylan), | ||||||
|  | Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). | ||||||
|  |  | ||||||
|  | Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later) | ||||||
|  | Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) | ||||||
|  | Copyright 2004 Manfred Stienstra (the original version) | ||||||
|  |  | ||||||
|  | License: BSD (see LICENSE.md for details). | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | import sys | ||||||
|  | import optparse | ||||||
|  | import codecs | ||||||
|  | import warnings | ||||||
|  | import markdown | ||||||
|  | try: | ||||||
|  |     # We use `unsafe_load` because users may need to pass in actual Python | ||||||
|  |     # objects. As this is only available from the CLI, the user has much | ||||||
|  |     # worse problems if an attacker can use this as an attach vector. | ||||||
|  |     from yaml import unsafe_load as yaml_load | ||||||
|  | except ImportError:  # pragma: no cover | ||||||
|  |     try: | ||||||
|  |         # Fall back to PyYAML <5.1 | ||||||
|  |         from yaml import load as yaml_load | ||||||
|  |     except ImportError: | ||||||
|  |         # Fall back to JSON | ||||||
|  |         from json import load as yaml_load | ||||||
|  |  | ||||||
|  | import logging | ||||||
|  | from logging import DEBUG, WARNING, CRITICAL | ||||||
|  |  | ||||||
|  | logger = logging.getLogger('MARKDOWN') | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def parse_options(args=None, values=None): | ||||||
|  |     """ | ||||||
|  |     Define and parse `optparse` options for command-line usage. | ||||||
|  |     """ | ||||||
|  |     usage = """%prog [options] [INPUTFILE] | ||||||
|  |        (STDIN is assumed if no INPUTFILE is given)""" | ||||||
|  |     desc = "A Python implementation of John Gruber's Markdown. " \ | ||||||
|  |            "https://Python-Markdown.github.io/" | ||||||
|  |     ver = "%%prog %s" % markdown.__version__ | ||||||
|  |  | ||||||
|  |     parser = optparse.OptionParser(usage=usage, description=desc, version=ver) | ||||||
|  |     parser.add_option("-f", "--file", dest="filename", default=None, | ||||||
|  |                       help="Write output to OUTPUT_FILE. Defaults to STDOUT.", | ||||||
|  |                       metavar="OUTPUT_FILE") | ||||||
|  |     parser.add_option("-e", "--encoding", dest="encoding", | ||||||
|  |                       help="Encoding for input and output files.",) | ||||||
|  |     parser.add_option("-o", "--output_format", dest="output_format", | ||||||
|  |                       default='xhtml', metavar="OUTPUT_FORMAT", | ||||||
|  |                       help="Use output format 'xhtml' (default) or 'html'.") | ||||||
|  |     parser.add_option("-n", "--no_lazy_ol", dest="lazy_ol", | ||||||
|  |                       action='store_false', default=True, | ||||||
|  |                       help="Observe number of first item of ordered lists.") | ||||||
|  |     parser.add_option("-x", "--extension", action="append", dest="extensions", | ||||||
|  |                       help="Load extension EXTENSION.", metavar="EXTENSION") | ||||||
|  |     parser.add_option("-c", "--extension_configs", | ||||||
|  |                       dest="configfile", default=None, | ||||||
|  |                       help="Read extension configurations from CONFIG_FILE. " | ||||||
|  |                       "CONFIG_FILE must be of JSON or YAML format. YAML " | ||||||
|  |                       "format requires that a python YAML library be " | ||||||
|  |                       "installed. The parsed JSON or YAML must result in a " | ||||||
|  |                       "python dictionary which would be accepted by the " | ||||||
|  |                       "'extension_configs' keyword on the markdown.Markdown " | ||||||
|  |                       "class. The extensions must also be loaded with the " | ||||||
|  |                       "`--extension` option.", | ||||||
|  |                       metavar="CONFIG_FILE") | ||||||
|  |     parser.add_option("-q", "--quiet", default=CRITICAL, | ||||||
|  |                       action="store_const", const=CRITICAL+10, dest="verbose", | ||||||
|  |                       help="Suppress all warnings.") | ||||||
|  |     parser.add_option("-v", "--verbose", | ||||||
|  |                       action="store_const", const=WARNING, dest="verbose", | ||||||
|  |                       help="Print all warnings.") | ||||||
|  |     parser.add_option("--noisy", | ||||||
|  |                       action="store_const", const=DEBUG, dest="verbose", | ||||||
|  |                       help="Print debug messages.") | ||||||
|  |  | ||||||
|  |     (options, args) = parser.parse_args(args, values) | ||||||
|  |  | ||||||
|  |     if len(args) == 0: | ||||||
|  |         input_file = None | ||||||
|  |     else: | ||||||
|  |         input_file = args[0] | ||||||
|  |  | ||||||
|  |     if not options.extensions: | ||||||
|  |         options.extensions = [] | ||||||
|  |  | ||||||
|  |     extension_configs = {} | ||||||
|  |     if options.configfile: | ||||||
|  |         with codecs.open( | ||||||
|  |             options.configfile, mode="r", encoding=options.encoding | ||||||
|  |         ) as fp: | ||||||
|  |             try: | ||||||
|  |                 extension_configs = yaml_load(fp) | ||||||
|  |             except Exception as e: | ||||||
|  |                 message = "Failed parsing extension config file: %s" % \ | ||||||
|  |                           options.configfile | ||||||
|  |                 e.args = (message,) + e.args[1:] | ||||||
|  |                 raise | ||||||
|  |  | ||||||
|  |     opts = { | ||||||
|  |         'input': input_file, | ||||||
|  |         'output': options.filename, | ||||||
|  |         'extensions': options.extensions, | ||||||
|  |         'extension_configs': extension_configs, | ||||||
|  |         'encoding': options.encoding, | ||||||
|  |         'output_format': options.output_format, | ||||||
|  |         'lazy_ol': options.lazy_ol | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     return opts, options.verbose | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def run():  # pragma: no cover | ||||||
|  |     """Run Markdown from the command line.""" | ||||||
|  |  | ||||||
|  |     # Parse options and adjust logging level if necessary | ||||||
|  |     options, logging_level = parse_options() | ||||||
|  |     if not options: | ||||||
|  |         sys.exit(2) | ||||||
|  |     logger.setLevel(logging_level) | ||||||
|  |     console_handler = logging.StreamHandler() | ||||||
|  |     logger.addHandler(console_handler) | ||||||
|  |     if logging_level <= WARNING: | ||||||
|  |         # Ensure deprecation warnings get displayed | ||||||
|  |         warnings.filterwarnings('default') | ||||||
|  |         logging.captureWarnings(True) | ||||||
|  |         warn_logger = logging.getLogger('py.warnings') | ||||||
|  |         warn_logger.addHandler(console_handler) | ||||||
|  |  | ||||||
|  |     # Run | ||||||
|  |     markdown.markdownFromFile(**options) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | if __name__ == '__main__':  # pragma: no cover | ||||||
|  |     # Support running module as a commandline command. | ||||||
|  |     # `python -m markdown [options] [args]`. | ||||||
|  |     run() | ||||||
							
								
								
									
										49
									
								
								Source/Libs/markdown/__meta__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										49
									
								
								Source/Libs/markdown/__meta__.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,49 @@ | |||||||
|  | """ | ||||||
|  | Python Markdown | ||||||
|  |  | ||||||
|  | A Python implementation of John Gruber's Markdown. | ||||||
|  |  | ||||||
|  | Documentation: https://python-markdown.github.io/ | ||||||
|  | GitHub: https://github.com/Python-Markdown/markdown/ | ||||||
|  | PyPI: https://pypi.org/project/Markdown/ | ||||||
|  |  | ||||||
|  | Started by Manfred Stienstra (http://www.dwerg.net/). | ||||||
|  | Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). | ||||||
|  | Currently maintained by Waylan Limberg (https://github.com/waylan), | ||||||
|  | Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). | ||||||
|  |  | ||||||
|  | Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later) | ||||||
|  | Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) | ||||||
|  | Copyright 2004 Manfred Stienstra (the original version) | ||||||
|  |  | ||||||
|  | License: BSD (see LICENSE.md for details). | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | # __version_info__ format: | ||||||
|  | # (major, minor, patch, dev/alpha/beta/rc/final, #) | ||||||
|  | # (1, 1, 2, 'dev', 0) => "1.1.2.dev0" | ||||||
|  | # (1, 1, 2, 'alpha', 1) => "1.1.2a1" | ||||||
|  | # (1, 2, 0, 'beta', 2) => "1.2b2" | ||||||
|  | # (1, 2, 0, 'rc', 4) => "1.2rc4" | ||||||
|  | # (1, 2, 0, 'final', 0) => "1.2" | ||||||
|  | __version_info__ = (3, 3, 7, 'final', 0) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def _get_version(version_info): | ||||||
|  |     " Returns a PEP 440-compliant version number from version_info. " | ||||||
|  |     assert len(version_info) == 5 | ||||||
|  |     assert version_info[3] in ('dev', 'alpha', 'beta', 'rc', 'final') | ||||||
|  |  | ||||||
|  |     parts = 2 if version_info[2] == 0 else 3 | ||||||
|  |     v = '.'.join(map(str, version_info[:parts])) | ||||||
|  |  | ||||||
|  |     if version_info[3] == 'dev': | ||||||
|  |         v += '.dev' + str(version_info[4]) | ||||||
|  |     elif version_info[3] != 'final': | ||||||
|  |         mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'} | ||||||
|  |         v += mapping[version_info[3]] + str(version_info[4]) | ||||||
|  |  | ||||||
|  |     return v | ||||||
|  |  | ||||||
|  |  | ||||||
|  | __version__ = _get_version(__version_info__) | ||||||
							
								
								
									
										125
									
								
								Source/Libs/markdown/blockparser.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										125
									
								
								Source/Libs/markdown/blockparser.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,125 @@ | |||||||
|  | """ | ||||||
|  | Python Markdown | ||||||
|  |  | ||||||
|  | A Python implementation of John Gruber's Markdown. | ||||||
|  |  | ||||||
|  | Documentation: https://python-markdown.github.io/ | ||||||
|  | GitHub: https://github.com/Python-Markdown/markdown/ | ||||||
|  | PyPI: https://pypi.org/project/Markdown/ | ||||||
|  |  | ||||||
|  | Started by Manfred Stienstra (http://www.dwerg.net/). | ||||||
|  | Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). | ||||||
|  | Currently maintained by Waylan Limberg (https://github.com/waylan), | ||||||
|  | Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). | ||||||
|  |  | ||||||
|  | Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later) | ||||||
|  | Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) | ||||||
|  | Copyright 2004 Manfred Stienstra (the original version) | ||||||
|  |  | ||||||
|  | License: BSD (see LICENSE.md for details). | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | import xml.etree.ElementTree as etree | ||||||
|  | from . import util | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class State(list): | ||||||
|  |     """ Track the current and nested state of the parser. | ||||||
|  |  | ||||||
|  |     This utility class is used to track the state of the BlockParser and | ||||||
|  |     support multiple levels if nesting. It's just a simple API wrapped around | ||||||
|  |     a list. Each time a state is set, that state is appended to the end of the | ||||||
|  |     list. Each time a state is reset, that state is removed from the end of | ||||||
|  |     the list. | ||||||
|  |  | ||||||
|  |     Therefore, each time a state is set for a nested block, that state must be | ||||||
|  |     reset when we back out of that level of nesting or the state could be | ||||||
|  |     corrupted. | ||||||
|  |  | ||||||
|  |     While all the methods of a list object are available, only the three | ||||||
|  |     defined below need be used. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def set(self, state): | ||||||
|  |         """ Set a new state. """ | ||||||
|  |         self.append(state) | ||||||
|  |  | ||||||
|  |     def reset(self): | ||||||
|  |         """ Step back one step in nested state. """ | ||||||
|  |         self.pop() | ||||||
|  |  | ||||||
|  |     def isstate(self, state): | ||||||
|  |         """ Test that top (current) level is of given state. """ | ||||||
|  |         if len(self): | ||||||
|  |             return self[-1] == state | ||||||
|  |         else: | ||||||
|  |             return False | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class BlockParser: | ||||||
|  |     """ Parse Markdown blocks into an ElementTree object. | ||||||
|  |  | ||||||
|  |     A wrapper class that stitches the various BlockProcessors together, | ||||||
|  |     looping through them and creating an ElementTree object. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, md): | ||||||
|  |         self.blockprocessors = util.Registry() | ||||||
|  |         self.state = State() | ||||||
|  |         self.md = md | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     @util.deprecated("Use 'md' instead.") | ||||||
|  |     def markdown(self): | ||||||
|  |         # TODO: remove this later | ||||||
|  |         return self.md | ||||||
|  |  | ||||||
|  |     def parseDocument(self, lines): | ||||||
|  |         """ Parse a markdown document into an ElementTree. | ||||||
|  |  | ||||||
|  |         Given a list of lines, an ElementTree object (not just a parent | ||||||
|  |         Element) is created and the root element is passed to the parser | ||||||
|  |         as the parent. The ElementTree object is returned. | ||||||
|  |  | ||||||
|  |         This should only be called on an entire document, not pieces. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         # Create a ElementTree from the lines | ||||||
|  |         self.root = etree.Element(self.md.doc_tag) | ||||||
|  |         self.parseChunk(self.root, '\n'.join(lines)) | ||||||
|  |         return etree.ElementTree(self.root) | ||||||
|  |  | ||||||
|  |     def parseChunk(self, parent, text): | ||||||
|  |         """ Parse a chunk of markdown text and attach to given etree node. | ||||||
|  |  | ||||||
|  |         While the ``text`` argument is generally assumed to contain multiple | ||||||
|  |         blocks which will be split on blank lines, it could contain only one | ||||||
|  |         block. Generally, this method would be called by extensions when | ||||||
|  |         block parsing is required. | ||||||
|  |  | ||||||
|  |         The ``parent`` etree Element passed in is altered in place. | ||||||
|  |         Nothing is returned. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         self.parseBlocks(parent, text.split('\n\n')) | ||||||
|  |  | ||||||
|  |     def parseBlocks(self, parent, blocks): | ||||||
|  |         """ Process blocks of markdown text and attach to given etree node. | ||||||
|  |  | ||||||
|  |         Given a list of ``blocks``, each blockprocessor is stepped through | ||||||
|  |         until there are no blocks left. While an extension could potentially | ||||||
|  |         call this method directly, it's generally expected to be used | ||||||
|  |         internally. | ||||||
|  |  | ||||||
|  |         This is a public method as an extension may need to add/alter | ||||||
|  |         additional BlockProcessors which call this method to recursively | ||||||
|  |         parse a nested block. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         while blocks: | ||||||
|  |             for processor in self.blockprocessors: | ||||||
|  |                 if processor.test(parent, blocks[0]): | ||||||
|  |                     if processor.run(parent, blocks) is not False: | ||||||
|  |                         # run returns True or None | ||||||
|  |                         break | ||||||
							
								
								
									
										623
									
								
								Source/Libs/markdown/blockprocessors.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										623
									
								
								Source/Libs/markdown/blockprocessors.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,623 @@ | |||||||
|  | """ | ||||||
|  | Python Markdown | ||||||
|  |  | ||||||
|  | A Python implementation of John Gruber's Markdown. | ||||||
|  |  | ||||||
|  | Documentation: https://python-markdown.github.io/ | ||||||
|  | GitHub: https://github.com/Python-Markdown/markdown/ | ||||||
|  | PyPI: https://pypi.org/project/Markdown/ | ||||||
|  |  | ||||||
|  | Started by Manfred Stienstra (http://www.dwerg.net/). | ||||||
|  | Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). | ||||||
|  | Currently maintained by Waylan Limberg (https://github.com/waylan), | ||||||
|  | Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). | ||||||
|  |  | ||||||
|  | Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later) | ||||||
|  | Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) | ||||||
|  | Copyright 2004 Manfred Stienstra (the original version) | ||||||
|  |  | ||||||
|  | License: BSD (see LICENSE.md for details). | ||||||
|  |  | ||||||
|  | CORE MARKDOWN BLOCKPARSER | ||||||
|  | =========================================================================== | ||||||
|  |  | ||||||
|  | This parser handles basic parsing of Markdown blocks.  It doesn't concern | ||||||
|  | itself with inline elements such as **bold** or *italics*, but rather just | ||||||
|  | catches blocks, lists, quotes, etc. | ||||||
|  |  | ||||||
|  | The BlockParser is made up of a bunch of BlockProcessors, each handling a | ||||||
|  | different type of block. Extensions may add/replace/remove BlockProcessors | ||||||
|  | as they need to alter how markdown blocks are parsed. | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | import logging | ||||||
|  | import re | ||||||
|  | import xml.etree.ElementTree as etree | ||||||
|  | from . import util | ||||||
|  | from .blockparser import BlockParser | ||||||
|  |  | ||||||
|  | logger = logging.getLogger('MARKDOWN') | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def build_block_parser(md, **kwargs): | ||||||
|  |     """ Build the default block parser used by Markdown. """ | ||||||
|  |     parser = BlockParser(md) | ||||||
|  |     parser.blockprocessors.register(EmptyBlockProcessor(parser), 'empty', 100) | ||||||
|  |     parser.blockprocessors.register(ListIndentProcessor(parser), 'indent', 90) | ||||||
|  |     parser.blockprocessors.register(CodeBlockProcessor(parser), 'code', 80) | ||||||
|  |     parser.blockprocessors.register(HashHeaderProcessor(parser), 'hashheader', 70) | ||||||
|  |     parser.blockprocessors.register(SetextHeaderProcessor(parser), 'setextheader', 60) | ||||||
|  |     parser.blockprocessors.register(HRProcessor(parser), 'hr', 50) | ||||||
|  |     parser.blockprocessors.register(OListProcessor(parser), 'olist', 40) | ||||||
|  |     parser.blockprocessors.register(UListProcessor(parser), 'ulist', 30) | ||||||
|  |     parser.blockprocessors.register(BlockQuoteProcessor(parser), 'quote', 20) | ||||||
|  |     parser.blockprocessors.register(ReferenceProcessor(parser), 'reference', 15) | ||||||
|  |     parser.blockprocessors.register(ParagraphProcessor(parser), 'paragraph', 10) | ||||||
|  |     return parser | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class BlockProcessor: | ||||||
|  |     """ Base class for block processors. | ||||||
|  |  | ||||||
|  |     Each subclass will provide the methods below to work with the source and | ||||||
|  |     tree. Each processor will need to define it's own ``test`` and ``run`` | ||||||
|  |     methods. The ``test`` method should return True or False, to indicate | ||||||
|  |     whether the current block should be processed by this processor. If the | ||||||
|  |     test passes, the parser will call the processors ``run`` method. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, parser): | ||||||
|  |         self.parser = parser | ||||||
|  |         self.tab_length = parser.md.tab_length | ||||||
|  |  | ||||||
|  |     def lastChild(self, parent): | ||||||
|  |         """ Return the last child of an etree element. """ | ||||||
|  |         if len(parent): | ||||||
|  |             return parent[-1] | ||||||
|  |         else: | ||||||
|  |             return None | ||||||
|  |  | ||||||
|  |     def detab(self, text, length=None): | ||||||
|  |         """ Remove a tab from the front of each line of the given text. """ | ||||||
|  |         if length is None: | ||||||
|  |             length = self.tab_length | ||||||
|  |         newtext = [] | ||||||
|  |         lines = text.split('\n') | ||||||
|  |         for line in lines: | ||||||
|  |             if line.startswith(' ' * length): | ||||||
|  |                 newtext.append(line[length:]) | ||||||
|  |             elif not line.strip(): | ||||||
|  |                 newtext.append('') | ||||||
|  |             else: | ||||||
|  |                 break | ||||||
|  |         return '\n'.join(newtext), '\n'.join(lines[len(newtext):]) | ||||||
|  |  | ||||||
|  |     def looseDetab(self, text, level=1): | ||||||
|  |         """ Remove a tab from front of lines but allowing dedented lines. """ | ||||||
|  |         lines = text.split('\n') | ||||||
|  |         for i in range(len(lines)): | ||||||
|  |             if lines[i].startswith(' '*self.tab_length*level): | ||||||
|  |                 lines[i] = lines[i][self.tab_length*level:] | ||||||
|  |         return '\n'.join(lines) | ||||||
|  |  | ||||||
|  |     def test(self, parent, block): | ||||||
|  |         """ Test for block type. Must be overridden by subclasses. | ||||||
|  |  | ||||||
|  |         As the parser loops through processors, it will call the ``test`` | ||||||
|  |         method on each to determine if the given block of text is of that | ||||||
|  |         type. This method must return a boolean ``True`` or ``False``. The | ||||||
|  |         actual method of testing is left to the needs of that particular | ||||||
|  |         block type. It could be as simple as ``block.startswith(some_string)`` | ||||||
|  |         or a complex regular expression. As the block type may be different | ||||||
|  |         depending on the parent of the block (i.e. inside a list), the parent | ||||||
|  |         etree element is also provided and may be used as part of the test. | ||||||
|  |  | ||||||
|  |         Keywords: | ||||||
|  |  | ||||||
|  |         * ``parent``: A etree element which will be the parent of the block. | ||||||
|  |         * ``block``: A block of text from the source which has been split at | ||||||
|  |             blank lines. | ||||||
|  |         """ | ||||||
|  |         pass  # pragma: no cover | ||||||
|  |  | ||||||
|  |     def run(self, parent, blocks): | ||||||
|  |         """ Run processor. Must be overridden by subclasses. | ||||||
|  |  | ||||||
|  |         When the parser determines the appropriate type of a block, the parser | ||||||
|  |         will call the corresponding processor's ``run`` method. This method | ||||||
|  |         should parse the individual lines of the block and append them to | ||||||
|  |         the etree. | ||||||
|  |  | ||||||
|  |         Note that both the ``parent`` and ``etree`` keywords are pointers | ||||||
|  |         to instances of the objects which should be edited in place. Each | ||||||
|  |         processor must make changes to the existing objects as there is no | ||||||
|  |         mechanism to return new/different objects to replace them. | ||||||
|  |  | ||||||
|  |         This means that this method should be adding SubElements or adding text | ||||||
|  |         to the parent, and should remove (``pop``) or add (``insert``) items to | ||||||
|  |         the list of blocks. | ||||||
|  |  | ||||||
|  |         Keywords: | ||||||
|  |  | ||||||
|  |         * ``parent``: A etree element which is the parent of the current block. | ||||||
|  |         * ``blocks``: A list of all remaining blocks of the document. | ||||||
|  |         """ | ||||||
|  |         pass  # pragma: no cover | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class ListIndentProcessor(BlockProcessor): | ||||||
|  |     """ Process children of list items. | ||||||
|  |  | ||||||
|  |     Example: | ||||||
|  |         * a list item | ||||||
|  |             process this part | ||||||
|  |  | ||||||
|  |             or this part | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     ITEM_TYPES = ['li'] | ||||||
|  |     LIST_TYPES = ['ul', 'ol'] | ||||||
|  |  | ||||||
|  |     def __init__(self, *args): | ||||||
|  |         super().__init__(*args) | ||||||
|  |         self.INDENT_RE = re.compile(r'^(([ ]{%s})+)' % self.tab_length) | ||||||
|  |  | ||||||
|  |     def test(self, parent, block): | ||||||
|  |         return block.startswith(' '*self.tab_length) and \ | ||||||
|  |             not self.parser.state.isstate('detabbed') and \ | ||||||
|  |             (parent.tag in self.ITEM_TYPES or | ||||||
|  |                 (len(parent) and parent[-1] is not None and | ||||||
|  |                     (parent[-1].tag in self.LIST_TYPES))) | ||||||
|  |  | ||||||
|  |     def run(self, parent, blocks): | ||||||
|  |         block = blocks.pop(0) | ||||||
|  |         level, sibling = self.get_level(parent, block) | ||||||
|  |         block = self.looseDetab(block, level) | ||||||
|  |  | ||||||
|  |         self.parser.state.set('detabbed') | ||||||
|  |         if parent.tag in self.ITEM_TYPES: | ||||||
|  |             # It's possible that this parent has a 'ul' or 'ol' child list | ||||||
|  |             # with a member.  If that is the case, then that should be the | ||||||
|  |             # parent.  This is intended to catch the edge case of an indented | ||||||
|  |             # list whose first member was parsed previous to this point | ||||||
|  |             # see OListProcessor | ||||||
|  |             if len(parent) and parent[-1].tag in self.LIST_TYPES: | ||||||
|  |                 self.parser.parseBlocks(parent[-1], [block]) | ||||||
|  |             else: | ||||||
|  |                 # The parent is already a li. Just parse the child block. | ||||||
|  |                 self.parser.parseBlocks(parent, [block]) | ||||||
|  |         elif sibling.tag in self.ITEM_TYPES: | ||||||
|  |             # The sibling is a li. Use it as parent. | ||||||
|  |             self.parser.parseBlocks(sibling, [block]) | ||||||
|  |         elif len(sibling) and sibling[-1].tag in self.ITEM_TYPES: | ||||||
|  |             # The parent is a list (``ol`` or ``ul``) which has children. | ||||||
|  |             # Assume the last child li is the parent of this block. | ||||||
|  |             if sibling[-1].text: | ||||||
|  |                 # If the parent li has text, that text needs to be moved to a p | ||||||
|  |                 # The p must be 'inserted' at beginning of list in the event | ||||||
|  |                 # that other children already exist i.e.; a nested sublist. | ||||||
|  |                 p = etree.Element('p') | ||||||
|  |                 p.text = sibling[-1].text | ||||||
|  |                 sibling[-1].text = '' | ||||||
|  |                 sibling[-1].insert(0, p) | ||||||
|  |             self.parser.parseChunk(sibling[-1], block) | ||||||
|  |         else: | ||||||
|  |             self.create_item(sibling, block) | ||||||
|  |         self.parser.state.reset() | ||||||
|  |  | ||||||
|  |     def create_item(self, parent, block): | ||||||
|  |         """ Create a new li and parse the block with it as the parent. """ | ||||||
|  |         li = etree.SubElement(parent, 'li') | ||||||
|  |         self.parser.parseBlocks(li, [block]) | ||||||
|  |  | ||||||
|  |     def get_level(self, parent, block): | ||||||
|  |         """ Get level of indent based on list level. """ | ||||||
|  |         # Get indent level | ||||||
|  |         m = self.INDENT_RE.match(block) | ||||||
|  |         if m: | ||||||
|  |             indent_level = len(m.group(1))/self.tab_length | ||||||
|  |         else: | ||||||
|  |             indent_level = 0 | ||||||
|  |         if self.parser.state.isstate('list'): | ||||||
|  |             # We're in a tightlist - so we already are at correct parent. | ||||||
|  |             level = 1 | ||||||
|  |         else: | ||||||
|  |             # We're in a looselist - so we need to find parent. | ||||||
|  |             level = 0 | ||||||
|  |         # Step through children of tree to find matching indent level. | ||||||
|  |         while indent_level > level: | ||||||
|  |             child = self.lastChild(parent) | ||||||
|  |             if (child is not None and | ||||||
|  |                (child.tag in self.LIST_TYPES or child.tag in self.ITEM_TYPES)): | ||||||
|  |                 if child.tag in self.LIST_TYPES: | ||||||
|  |                     level += 1 | ||||||
|  |                 parent = child | ||||||
|  |             else: | ||||||
|  |                 # No more child levels. If we're short of indent_level, | ||||||
|  |                 # we have a code block. So we stop here. | ||||||
|  |                 break | ||||||
|  |         return level, parent | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class CodeBlockProcessor(BlockProcessor): | ||||||
|  |     """ Process code blocks. """ | ||||||
|  |  | ||||||
|  |     def test(self, parent, block): | ||||||
|  |         return block.startswith(' '*self.tab_length) | ||||||
|  |  | ||||||
|  |     def run(self, parent, blocks): | ||||||
|  |         sibling = self.lastChild(parent) | ||||||
|  |         block = blocks.pop(0) | ||||||
|  |         theRest = '' | ||||||
|  |         if (sibling is not None and sibling.tag == "pre" and | ||||||
|  |            len(sibling) and sibling[0].tag == "code"): | ||||||
|  |             # The previous block was a code block. As blank lines do not start | ||||||
|  |             # new code blocks, append this block to the previous, adding back | ||||||
|  |             # linebreaks removed from the split into a list. | ||||||
|  |             code = sibling[0] | ||||||
|  |             block, theRest = self.detab(block) | ||||||
|  |             code.text = util.AtomicString( | ||||||
|  |                 '{}\n{}\n'.format(code.text, util.code_escape(block.rstrip())) | ||||||
|  |             ) | ||||||
|  |         else: | ||||||
|  |             # This is a new codeblock. Create the elements and insert text. | ||||||
|  |             pre = etree.SubElement(parent, 'pre') | ||||||
|  |             code = etree.SubElement(pre, 'code') | ||||||
|  |             block, theRest = self.detab(block) | ||||||
|  |             code.text = util.AtomicString('%s\n' % util.code_escape(block.rstrip())) | ||||||
|  |         if theRest: | ||||||
|  |             # This block contained unindented line(s) after the first indented | ||||||
|  |             # line. Insert these lines as the first block of the master blocks | ||||||
|  |             # list for future processing. | ||||||
|  |             blocks.insert(0, theRest) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class BlockQuoteProcessor(BlockProcessor): | ||||||
|  |  | ||||||
|  |     RE = re.compile(r'(^|\n)[ ]{0,3}>[ ]?(.*)') | ||||||
|  |  | ||||||
|  |     def test(self, parent, block): | ||||||
|  |         return bool(self.RE.search(block)) and not util.nearing_recursion_limit() | ||||||
|  |  | ||||||
|  |     def run(self, parent, blocks): | ||||||
|  |         block = blocks.pop(0) | ||||||
|  |         m = self.RE.search(block) | ||||||
|  |         if m: | ||||||
|  |             before = block[:m.start()]  # Lines before blockquote | ||||||
|  |             # Pass lines before blockquote in recursively for parsing first. | ||||||
|  |             self.parser.parseBlocks(parent, [before]) | ||||||
|  |             # Remove ``> `` from beginning of each line. | ||||||
|  |             block = '\n'.join( | ||||||
|  |                 [self.clean(line) for line in block[m.start():].split('\n')] | ||||||
|  |             ) | ||||||
|  |         sibling = self.lastChild(parent) | ||||||
|  |         if sibling is not None and sibling.tag == "blockquote": | ||||||
|  |             # Previous block was a blockquote so set that as this blocks parent | ||||||
|  |             quote = sibling | ||||||
|  |         else: | ||||||
|  |             # This is a new blockquote. Create a new parent element. | ||||||
|  |             quote = etree.SubElement(parent, 'blockquote') | ||||||
|  |         # Recursively parse block with blockquote as parent. | ||||||
|  |         # change parser state so blockquotes embedded in lists use p tags | ||||||
|  |         self.parser.state.set('blockquote') | ||||||
|  |         self.parser.parseChunk(quote, block) | ||||||
|  |         self.parser.state.reset() | ||||||
|  |  | ||||||
|  |     def clean(self, line): | ||||||
|  |         """ Remove ``>`` from beginning of a line. """ | ||||||
|  |         m = self.RE.match(line) | ||||||
|  |         if line.strip() == ">": | ||||||
|  |             return "" | ||||||
|  |         elif m: | ||||||
|  |             return m.group(2) | ||||||
|  |         else: | ||||||
|  |             return line | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class OListProcessor(BlockProcessor): | ||||||
|  |     """ Process ordered list blocks. """ | ||||||
|  |  | ||||||
|  |     TAG = 'ol' | ||||||
|  |     # The integer (python string) with which the lists starts (default=1) | ||||||
|  |     # Eg: If list is initialized as) | ||||||
|  |     #   3. Item | ||||||
|  |     # The ol tag will get starts="3" attribute | ||||||
|  |     STARTSWITH = '1' | ||||||
|  |     # Lazy ol - ignore startswith | ||||||
|  |     LAZY_OL = True | ||||||
|  |     # List of allowed sibling tags. | ||||||
|  |     SIBLING_TAGS = ['ol', 'ul'] | ||||||
|  |  | ||||||
|  |     def __init__(self, parser): | ||||||
|  |         super().__init__(parser) | ||||||
|  |         # Detect an item (``1. item``). ``group(1)`` contains contents of item. | ||||||
|  |         self.RE = re.compile(r'^[ ]{0,%d}\d+\.[ ]+(.*)' % (self.tab_length - 1)) | ||||||
|  |         # Detect items on secondary lines. they can be of either list type. | ||||||
|  |         self.CHILD_RE = re.compile(r'^[ ]{0,%d}((\d+\.)|[*+-])[ ]+(.*)' % | ||||||
|  |                                    (self.tab_length - 1)) | ||||||
|  |         # Detect indented (nested) items of either type | ||||||
|  |         self.INDENT_RE = re.compile(r'^[ ]{%d,%d}((\d+\.)|[*+-])[ ]+.*' % | ||||||
|  |                                     (self.tab_length, self.tab_length * 2 - 1)) | ||||||
|  |  | ||||||
|  |     def test(self, parent, block): | ||||||
|  |         return bool(self.RE.match(block)) | ||||||
|  |  | ||||||
|  |     def run(self, parent, blocks): | ||||||
|  |         # Check fr multiple items in one block. | ||||||
|  |         items = self.get_items(blocks.pop(0)) | ||||||
|  |         sibling = self.lastChild(parent) | ||||||
|  |  | ||||||
|  |         if sibling is not None and sibling.tag in self.SIBLING_TAGS: | ||||||
|  |             # Previous block was a list item, so set that as parent | ||||||
|  |             lst = sibling | ||||||
|  |             # make sure previous item is in a p- if the item has text, | ||||||
|  |             # then it isn't in a p | ||||||
|  |             if lst[-1].text: | ||||||
|  |                 # since it's possible there are other children for this | ||||||
|  |                 # sibling, we can't just SubElement the p, we need to | ||||||
|  |                 # insert it as the first item. | ||||||
|  |                 p = etree.Element('p') | ||||||
|  |                 p.text = lst[-1].text | ||||||
|  |                 lst[-1].text = '' | ||||||
|  |                 lst[-1].insert(0, p) | ||||||
|  |             # if the last item has a tail, then the tail needs to be put in a p | ||||||
|  |             # likely only when a header is not followed by a blank line | ||||||
|  |             lch = self.lastChild(lst[-1]) | ||||||
|  |             if lch is not None and lch.tail: | ||||||
|  |                 p = etree.SubElement(lst[-1], 'p') | ||||||
|  |                 p.text = lch.tail.lstrip() | ||||||
|  |                 lch.tail = '' | ||||||
|  |  | ||||||
|  |             # parse first block differently as it gets wrapped in a p. | ||||||
|  |             li = etree.SubElement(lst, 'li') | ||||||
|  |             self.parser.state.set('looselist') | ||||||
|  |             firstitem = items.pop(0) | ||||||
|  |             self.parser.parseBlocks(li, [firstitem]) | ||||||
|  |             self.parser.state.reset() | ||||||
|  |         elif parent.tag in ['ol', 'ul']: | ||||||
|  |             # this catches the edge case of a multi-item indented list whose | ||||||
|  |             # first item is in a blank parent-list item: | ||||||
|  |             # * * subitem1 | ||||||
|  |             #     * subitem2 | ||||||
|  |             # see also ListIndentProcessor | ||||||
|  |             lst = parent | ||||||
|  |         else: | ||||||
|  |             # This is a new list so create parent with appropriate tag. | ||||||
|  |             lst = etree.SubElement(parent, self.TAG) | ||||||
|  |             # Check if a custom start integer is set | ||||||
|  |             if not self.LAZY_OL and self.STARTSWITH != '1': | ||||||
|  |                 lst.attrib['start'] = self.STARTSWITH | ||||||
|  |  | ||||||
|  |         self.parser.state.set('list') | ||||||
|  |         # Loop through items in block, recursively parsing each with the | ||||||
|  |         # appropriate parent. | ||||||
|  |         for item in items: | ||||||
|  |             if item.startswith(' '*self.tab_length): | ||||||
|  |                 # Item is indented. Parse with last item as parent | ||||||
|  |                 self.parser.parseBlocks(lst[-1], [item]) | ||||||
|  |             else: | ||||||
|  |                 # New item. Create li and parse with it as parent | ||||||
|  |                 li = etree.SubElement(lst, 'li') | ||||||
|  |                 self.parser.parseBlocks(li, [item]) | ||||||
|  |         self.parser.state.reset() | ||||||
|  |  | ||||||
|  |     def get_items(self, block): | ||||||
|  |         """ Break a block into list items. """ | ||||||
|  |         items = [] | ||||||
|  |         for line in block.split('\n'): | ||||||
|  |             m = self.CHILD_RE.match(line) | ||||||
|  |             if m: | ||||||
|  |                 # This is a new list item | ||||||
|  |                 # Check first item for the start index | ||||||
|  |                 if not items and self.TAG == 'ol': | ||||||
|  |                     # Detect the integer value of first list item | ||||||
|  |                     INTEGER_RE = re.compile(r'(\d+)') | ||||||
|  |                     self.STARTSWITH = INTEGER_RE.match(m.group(1)).group() | ||||||
|  |                 # Append to the list | ||||||
|  |                 items.append(m.group(3)) | ||||||
|  |             elif self.INDENT_RE.match(line): | ||||||
|  |                 # This is an indented (possibly nested) item. | ||||||
|  |                 if items[-1].startswith(' '*self.tab_length): | ||||||
|  |                     # Previous item was indented. Append to that item. | ||||||
|  |                     items[-1] = '{}\n{}'.format(items[-1], line) | ||||||
|  |                 else: | ||||||
|  |                     items.append(line) | ||||||
|  |             else: | ||||||
|  |                 # This is another line of previous item. Append to that item. | ||||||
|  |                 items[-1] = '{}\n{}'.format(items[-1], line) | ||||||
|  |         return items | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class UListProcessor(OListProcessor): | ||||||
|  |     """ Process unordered list blocks. """ | ||||||
|  |  | ||||||
|  |     TAG = 'ul' | ||||||
|  |  | ||||||
|  |     def __init__(self, parser): | ||||||
|  |         super().__init__(parser) | ||||||
|  |         # Detect an item (``1. item``). ``group(1)`` contains contents of item. | ||||||
|  |         self.RE = re.compile(r'^[ ]{0,%d}[*+-][ ]+(.*)' % (self.tab_length - 1)) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class HashHeaderProcessor(BlockProcessor): | ||||||
|  |     """ Process Hash Headers. """ | ||||||
|  |  | ||||||
|  |     # Detect a header at start of any line in block | ||||||
|  |     RE = re.compile(r'(?:^|\n)(?P<level>#{1,6})(?P<header>(?:\\.|[^\\])*?)#*(?:\n|$)') | ||||||
|  |  | ||||||
|  |     def test(self, parent, block): | ||||||
|  |         return bool(self.RE.search(block)) | ||||||
|  |  | ||||||
|  |     def run(self, parent, blocks): | ||||||
|  |         block = blocks.pop(0) | ||||||
|  |         m = self.RE.search(block) | ||||||
|  |         if m: | ||||||
|  |             before = block[:m.start()]  # All lines before header | ||||||
|  |             after = block[m.end():]     # All lines after header | ||||||
|  |             if before: | ||||||
|  |                 # As the header was not the first line of the block and the | ||||||
|  |                 # lines before the header must be parsed first, | ||||||
|  |                 # recursively parse this lines as a block. | ||||||
|  |                 self.parser.parseBlocks(parent, [before]) | ||||||
|  |             # Create header using named groups from RE | ||||||
|  |             h = etree.SubElement(parent, 'h%d' % len(m.group('level'))) | ||||||
|  |             h.text = m.group('header').strip() | ||||||
|  |             if after: | ||||||
|  |                 # Insert remaining lines as first block for future parsing. | ||||||
|  |                 blocks.insert(0, after) | ||||||
|  |         else:  # pragma: no cover | ||||||
|  |             # This should never happen, but just in case... | ||||||
|  |             logger.warn("We've got a problem header: %r" % block) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class SetextHeaderProcessor(BlockProcessor): | ||||||
|  |     """ Process Setext-style Headers. """ | ||||||
|  |  | ||||||
|  |     # Detect Setext-style header. Must be first 2 lines of block. | ||||||
|  |     RE = re.compile(r'^.*?\n[=-]+[ ]*(\n|$)', re.MULTILINE) | ||||||
|  |  | ||||||
|  |     def test(self, parent, block): | ||||||
|  |         return bool(self.RE.match(block)) | ||||||
|  |  | ||||||
|  |     def run(self, parent, blocks): | ||||||
|  |         lines = blocks.pop(0).split('\n') | ||||||
|  |         # Determine level. ``=`` is 1 and ``-`` is 2. | ||||||
|  |         if lines[1].startswith('='): | ||||||
|  |             level = 1 | ||||||
|  |         else: | ||||||
|  |             level = 2 | ||||||
|  |         h = etree.SubElement(parent, 'h%d' % level) | ||||||
|  |         h.text = lines[0].strip() | ||||||
|  |         if len(lines) > 2: | ||||||
|  |             # Block contains additional lines. Add to  master blocks for later. | ||||||
|  |             blocks.insert(0, '\n'.join(lines[2:])) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class HRProcessor(BlockProcessor): | ||||||
|  |     """ Process Horizontal Rules. """ | ||||||
|  |  | ||||||
|  |     # Python's re module doesn't officially support atomic grouping. However you can fake it. | ||||||
|  |     # See https://stackoverflow.com/a/13577411/866026 | ||||||
|  |     RE = r'^[ ]{0,3}(?=(?P<atomicgroup>(-+[ ]{0,2}){3,}|(_+[ ]{0,2}){3,}|(\*+[ ]{0,2}){3,}))(?P=atomicgroup)[ ]*$' | ||||||
|  |     # Detect hr on any line of a block. | ||||||
|  |     SEARCH_RE = re.compile(RE, re.MULTILINE) | ||||||
|  |  | ||||||
|  |     def test(self, parent, block): | ||||||
|  |         m = self.SEARCH_RE.search(block) | ||||||
|  |         if m: | ||||||
|  |             # Save match object on class instance so we can use it later. | ||||||
|  |             self.match = m | ||||||
|  |             return True | ||||||
|  |         return False | ||||||
|  |  | ||||||
|  |     def run(self, parent, blocks): | ||||||
|  |         block = blocks.pop(0) | ||||||
|  |         match = self.match | ||||||
|  |         # Check for lines in block before hr. | ||||||
|  |         prelines = block[:match.start()].rstrip('\n') | ||||||
|  |         if prelines: | ||||||
|  |             # Recursively parse lines before hr so they get parsed first. | ||||||
|  |             self.parser.parseBlocks(parent, [prelines]) | ||||||
|  |         # create hr | ||||||
|  |         etree.SubElement(parent, 'hr') | ||||||
|  |         # check for lines in block after hr. | ||||||
|  |         postlines = block[match.end():].lstrip('\n') | ||||||
|  |         if postlines: | ||||||
|  |             # Add lines after hr to master blocks for later parsing. | ||||||
|  |             blocks.insert(0, postlines) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class EmptyBlockProcessor(BlockProcessor): | ||||||
|  |     """ Process blocks that are empty or start with an empty line. """ | ||||||
|  |  | ||||||
|  |     def test(self, parent, block): | ||||||
|  |         return not block or block.startswith('\n') | ||||||
|  |  | ||||||
|  |     def run(self, parent, blocks): | ||||||
|  |         block = blocks.pop(0) | ||||||
|  |         filler = '\n\n' | ||||||
|  |         if block: | ||||||
|  |             # Starts with empty line | ||||||
|  |             # Only replace a single line. | ||||||
|  |             filler = '\n' | ||||||
|  |             # Save the rest for later. | ||||||
|  |             theRest = block[1:] | ||||||
|  |             if theRest: | ||||||
|  |                 # Add remaining lines to master blocks for later. | ||||||
|  |                 blocks.insert(0, theRest) | ||||||
|  |         sibling = self.lastChild(parent) | ||||||
|  |         if (sibling is not None and sibling.tag == 'pre' and | ||||||
|  |            len(sibling) and sibling[0].tag == 'code'): | ||||||
|  |             # Last block is a codeblock. Append to preserve whitespace. | ||||||
|  |             sibling[0].text = util.AtomicString( | ||||||
|  |                 '{}{}'.format(sibling[0].text, filler) | ||||||
|  |             ) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class ReferenceProcessor(BlockProcessor): | ||||||
|  |     """ Process link references. """ | ||||||
|  |     RE = re.compile( | ||||||
|  |         r'^[ ]{0,3}\[([^\[\]]*)\]:[ ]*\n?[ ]*([^\s]+)[ ]*(?:\n[ ]*)?((["\'])(.*)\4[ ]*|\((.*)\)[ ]*)?$', re.MULTILINE | ||||||
|  |     ) | ||||||
|  |  | ||||||
|  |     def test(self, parent, block): | ||||||
|  |         return True | ||||||
|  |  | ||||||
|  |     def run(self, parent, blocks): | ||||||
|  |         block = blocks.pop(0) | ||||||
|  |         m = self.RE.search(block) | ||||||
|  |         if m: | ||||||
|  |             id = m.group(1).strip().lower() | ||||||
|  |             link = m.group(2).lstrip('<').rstrip('>') | ||||||
|  |             title = m.group(5) or m.group(6) | ||||||
|  |             self.parser.md.references[id] = (link, title) | ||||||
|  |             if block[m.end():].strip(): | ||||||
|  |                 # Add any content after match back to blocks as separate block | ||||||
|  |                 blocks.insert(0, block[m.end():].lstrip('\n')) | ||||||
|  |             if block[:m.start()].strip(): | ||||||
|  |                 # Add any content before match back to blocks as separate block | ||||||
|  |                 blocks.insert(0, block[:m.start()].rstrip('\n')) | ||||||
|  |             return True | ||||||
|  |         # No match. Restore block. | ||||||
|  |         blocks.insert(0, block) | ||||||
|  |         return False | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class ParagraphProcessor(BlockProcessor): | ||||||
|  |     """ Process Paragraph blocks. """ | ||||||
|  |  | ||||||
|  |     def test(self, parent, block): | ||||||
|  |         return True | ||||||
|  |  | ||||||
|  |     def run(self, parent, blocks): | ||||||
|  |         block = blocks.pop(0) | ||||||
|  |         if block.strip(): | ||||||
|  |             # Not a blank block. Add to parent, otherwise throw it away. | ||||||
|  |             if self.parser.state.isstate('list'): | ||||||
|  |                 # The parent is a tight-list. | ||||||
|  |                 # | ||||||
|  |                 # Check for any children. This will likely only happen in a | ||||||
|  |                 # tight-list when a header isn't followed by a blank line. | ||||||
|  |                 # For example: | ||||||
|  |                 # | ||||||
|  |                 #     * # Header | ||||||
|  |                 #     Line 2 of list item - not part of header. | ||||||
|  |                 sibling = self.lastChild(parent) | ||||||
|  |                 if sibling is not None: | ||||||
|  |                     # Insetrt after sibling. | ||||||
|  |                     if sibling.tail: | ||||||
|  |                         sibling.tail = '{}\n{}'.format(sibling.tail, block) | ||||||
|  |                     else: | ||||||
|  |                         sibling.tail = '\n%s' % block | ||||||
|  |                 else: | ||||||
|  |                     # Append to parent.text | ||||||
|  |                     if parent.text: | ||||||
|  |                         parent.text = '{}\n{}'.format(parent.text, block) | ||||||
|  |                     else: | ||||||
|  |                         parent.text = block.lstrip() | ||||||
|  |             else: | ||||||
|  |                 # Create a regular paragraph | ||||||
|  |                 p = etree.SubElement(parent, 'p') | ||||||
|  |                 p.text = block.lstrip() | ||||||
							
								
								
									
										400
									
								
								Source/Libs/markdown/core.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										400
									
								
								Source/Libs/markdown/core.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,400 @@ | |||||||
|  | """ | ||||||
|  | Python Markdown | ||||||
|  |  | ||||||
|  | A Python implementation of John Gruber's Markdown. | ||||||
|  |  | ||||||
|  | Documentation: https://python-markdown.github.io/ | ||||||
|  | GitHub: https://github.com/Python-Markdown/markdown/ | ||||||
|  | PyPI: https://pypi.org/project/Markdown/ | ||||||
|  |  | ||||||
|  | Started by Manfred Stienstra (http://www.dwerg.net/). | ||||||
|  | Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). | ||||||
|  | Currently maintained by Waylan Limberg (https://github.com/waylan), | ||||||
|  | Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). | ||||||
|  |  | ||||||
|  | Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later) | ||||||
|  | Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) | ||||||
|  | Copyright 2004 Manfred Stienstra (the original version) | ||||||
|  |  | ||||||
|  | License: BSD (see LICENSE.md for details). | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | import codecs | ||||||
|  | import sys | ||||||
|  | import logging | ||||||
|  | import importlib | ||||||
|  | from . import util | ||||||
|  | from .preprocessors import build_preprocessors | ||||||
|  | from .blockprocessors import build_block_parser | ||||||
|  | from .treeprocessors import build_treeprocessors | ||||||
|  | from .inlinepatterns import build_inlinepatterns | ||||||
|  | from .postprocessors import build_postprocessors | ||||||
|  | from .extensions import Extension | ||||||
|  | from .serializers import to_html_string, to_xhtml_string | ||||||
|  |  | ||||||
|  | __all__ = ['Markdown', 'markdown', 'markdownFromFile'] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | logger = logging.getLogger('MARKDOWN') | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class Markdown: | ||||||
|  |     """Convert Markdown to HTML.""" | ||||||
|  |  | ||||||
|  |     doc_tag = "div"     # Element used to wrap document - later removed | ||||||
|  |  | ||||||
|  |     output_formats = { | ||||||
|  |         'html':   to_html_string, | ||||||
|  |         'xhtml':  to_xhtml_string, | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     def __init__(self, **kwargs): | ||||||
|  |         """ | ||||||
|  |         Creates a new Markdown instance. | ||||||
|  |  | ||||||
|  |         Keyword arguments: | ||||||
|  |  | ||||||
|  |         * extensions: A list of extensions. | ||||||
|  |             If an item is an instance of a subclass of `markdown.extension.Extension`, the  instance will be used | ||||||
|  |             as-is. If an item is of type string, first an entry point will be loaded. If that fails, the string is | ||||||
|  |             assumed to use Python dot notation (`path.to.module:ClassName`) to load a markdown.Extension subclass. If | ||||||
|  |             no class is specified, then a `makeExtension` function is called within the specified module. | ||||||
|  |         * extension_configs: Configuration settings for extensions. | ||||||
|  |         * output_format: Format of output. Supported formats are: | ||||||
|  |             * "xhtml": Outputs XHTML style tags. Default. | ||||||
|  |             * "html": Outputs HTML style tags. | ||||||
|  |         * tab_length: Length of tabs in the source. Default: 4 | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |         self.tab_length = kwargs.get('tab_length', 4) | ||||||
|  |  | ||||||
|  |         self.ESCAPED_CHARS = ['\\', '`', '*', '_', '{', '}', '[', ']', | ||||||
|  |                               '(', ')', '>', '#', '+', '-', '.', '!'] | ||||||
|  |  | ||||||
|  |         self.block_level_elements = [ | ||||||
|  |             # Elements which are invalid to wrap in a `<p>` tag. | ||||||
|  |             # See https://w3c.github.io/html/grouping-content.html#the-p-element | ||||||
|  |             'address', 'article', 'aside', 'blockquote', 'details', 'div', 'dl', | ||||||
|  |             'fieldset', 'figcaption', 'figure', 'footer', 'form', 'h1', 'h2', 'h3', | ||||||
|  |             'h4', 'h5', 'h6', 'header', 'hgroup', 'hr', 'main', 'menu', 'nav', 'ol', | ||||||
|  |             'p', 'pre', 'section', 'table', 'ul', | ||||||
|  |             # Other elements which Markdown should not be mucking up the contents of. | ||||||
|  |             'canvas', 'colgroup', 'dd', 'body', 'dt', 'group', 'iframe', 'li', 'legend', | ||||||
|  |             'math', 'map', 'noscript', 'output', 'object', 'option', 'progress', 'script', | ||||||
|  |             'style', 'summary', 'tbody', 'td', 'textarea', 'tfoot', 'th', 'thead', 'tr', 'video' | ||||||
|  |         ] | ||||||
|  |  | ||||||
|  |         self.registeredExtensions = [] | ||||||
|  |         self.docType = "" | ||||||
|  |         self.stripTopLevelTags = True | ||||||
|  |  | ||||||
|  |         self.build_parser() | ||||||
|  |  | ||||||
|  |         self.references = {} | ||||||
|  |         self.htmlStash = util.HtmlStash() | ||||||
|  |         self.registerExtensions(extensions=kwargs.get('extensions', []), | ||||||
|  |                                 configs=kwargs.get('extension_configs', {})) | ||||||
|  |         self.set_output_format(kwargs.get('output_format', 'xhtml')) | ||||||
|  |         self.reset() | ||||||
|  |  | ||||||
|  |     def build_parser(self): | ||||||
|  |         """ Build the parser from the various parts. """ | ||||||
|  |         self.preprocessors = build_preprocessors(self) | ||||||
|  |         self.parser = build_block_parser(self) | ||||||
|  |         self.inlinePatterns = build_inlinepatterns(self) | ||||||
|  |         self.treeprocessors = build_treeprocessors(self) | ||||||
|  |         self.postprocessors = build_postprocessors(self) | ||||||
|  |         return self | ||||||
|  |  | ||||||
|  |     def registerExtensions(self, extensions, configs): | ||||||
|  |         """ | ||||||
|  |         Register extensions with this instance of Markdown. | ||||||
|  |  | ||||||
|  |         Keyword arguments: | ||||||
|  |  | ||||||
|  |         * extensions: A list of extensions, which can either | ||||||
|  |            be strings or objects. | ||||||
|  |         * configs: A dictionary mapping extension names to config options. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         for ext in extensions: | ||||||
|  |             if isinstance(ext, str): | ||||||
|  |                 ext = self.build_extension(ext, configs.get(ext, {})) | ||||||
|  |             if isinstance(ext, Extension): | ||||||
|  |                 ext._extendMarkdown(self) | ||||||
|  |                 logger.debug( | ||||||
|  |                     'Successfully loaded extension "%s.%s".' | ||||||
|  |                     % (ext.__class__.__module__, ext.__class__.__name__) | ||||||
|  |                 ) | ||||||
|  |         return self | ||||||
|  |  | ||||||
|  |     def build_extension(self, ext_name, configs): | ||||||
|  |         """ | ||||||
|  |         Build extension from a string name, then return an instance. | ||||||
|  |  | ||||||
|  |         First attempt to load an entry point. The string name must be registered as an entry point in the | ||||||
|  |         `markdown.extensions` group which points to a subclass of the `markdown.extensions.Extension` class. | ||||||
|  |         If multiple distributions have registered the same name, the first one found is returned. | ||||||
|  |  | ||||||
|  |         If no entry point is found, assume dot notation (`path.to.module:ClassName`). Load the specified class and | ||||||
|  |         return an instance. If no class is specified, import the module and call a `makeExtension` function and return | ||||||
|  |         the Extension instance returned by that function. | ||||||
|  |         """ | ||||||
|  |         configs = dict(configs) | ||||||
|  |  | ||||||
|  |         entry_points = [ep for ep in util.INSTALLED_EXTENSIONS if ep.name == ext_name] | ||||||
|  |         if entry_points: | ||||||
|  |             ext = entry_points[0].load() | ||||||
|  |             return ext(**configs) | ||||||
|  |  | ||||||
|  |         # Get class name (if provided): `path.to.module:ClassName` | ||||||
|  |         ext_name, class_name = ext_name.split(':', 1) if ':' in ext_name else (ext_name, '') | ||||||
|  |  | ||||||
|  |         try: | ||||||
|  |             module = importlib.import_module('Libs.markdown.extensions.'+ext_name) | ||||||
|  |             logger.debug( | ||||||
|  |                 'Successfully imported extension module "%s".' % ext_name | ||||||
|  |             ) | ||||||
|  |         except ImportError as e: | ||||||
|  |             message = 'Failed loading extension "%s".' % ext_name | ||||||
|  |             e.args = (message,) + e.args[1:] | ||||||
|  |             raise | ||||||
|  |  | ||||||
|  |         if class_name: | ||||||
|  |             # Load given class name from module. | ||||||
|  |             return getattr(module, class_name)(**configs) | ||||||
|  |         else: | ||||||
|  |             # Expect  makeExtension() function to return a class. | ||||||
|  |             try: | ||||||
|  |                 return module.makeExtension(**configs) | ||||||
|  |             except AttributeError as e: | ||||||
|  |                 message = e.args[0] | ||||||
|  |                 message = "Failed to initiate extension " \ | ||||||
|  |                           "'%s': %s" % (ext_name, message) | ||||||
|  |                 e.args = (message,) + e.args[1:] | ||||||
|  |                 raise | ||||||
|  |  | ||||||
|  |     def registerExtension(self, extension): | ||||||
|  |         """ This gets called by the extension """ | ||||||
|  |         self.registeredExtensions.append(extension) | ||||||
|  |         return self | ||||||
|  |  | ||||||
|  |     def reset(self): | ||||||
|  |         """ | ||||||
|  |         Resets all state variables so that we can start with a new text. | ||||||
|  |         """ | ||||||
|  |         self.htmlStash.reset() | ||||||
|  |         self.references.clear() | ||||||
|  |  | ||||||
|  |         for extension in self.registeredExtensions: | ||||||
|  |             if hasattr(extension, 'reset'): | ||||||
|  |                 extension.reset() | ||||||
|  |  | ||||||
|  |         return self | ||||||
|  |  | ||||||
|  |     def set_output_format(self, format): | ||||||
|  |         """ Set the output format for the class instance. """ | ||||||
|  |         self.output_format = format.lower().rstrip('145')  # ignore num | ||||||
|  |         try: | ||||||
|  |             self.serializer = self.output_formats[self.output_format] | ||||||
|  |         except KeyError as e: | ||||||
|  |             valid_formats = list(self.output_formats.keys()) | ||||||
|  |             valid_formats.sort() | ||||||
|  |             message = 'Invalid Output Format: "%s". Use one of %s.' \ | ||||||
|  |                 % (self.output_format, | ||||||
|  |                    '"' + '", "'.join(valid_formats) + '"') | ||||||
|  |             e.args = (message,) + e.args[1:] | ||||||
|  |             raise | ||||||
|  |         return self | ||||||
|  |  | ||||||
|  |     def is_block_level(self, tag): | ||||||
|  |         """Check if the tag is a block level HTML tag.""" | ||||||
|  |         if isinstance(tag, str): | ||||||
|  |             return tag.lower().rstrip('/') in self.block_level_elements | ||||||
|  |         # Some ElementTree tags are not strings, so return False. | ||||||
|  |         return False | ||||||
|  |  | ||||||
|  |     def convert(self, source): | ||||||
|  |         """ | ||||||
|  |         Convert markdown to serialized XHTML or HTML. | ||||||
|  |  | ||||||
|  |         Keyword arguments: | ||||||
|  |  | ||||||
|  |         * source: Source text as a Unicode string. | ||||||
|  |  | ||||||
|  |         Markdown processing takes place in five steps: | ||||||
|  |  | ||||||
|  |         1. A bunch of "preprocessors" munge the input text. | ||||||
|  |         2. BlockParser() parses the high-level structural elements of the | ||||||
|  |            pre-processed text into an ElementTree. | ||||||
|  |         3. A bunch of "treeprocessors" are run against the ElementTree. One | ||||||
|  |            such treeprocessor runs InlinePatterns against the ElementTree, | ||||||
|  |            detecting inline markup. | ||||||
|  |         4. Some post-processors are run against the text after the ElementTree | ||||||
|  |            has been serialized into text. | ||||||
|  |         5. The output is written to a string. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |         # Fixup the source text | ||||||
|  |         if not source.strip(): | ||||||
|  |             return ''  # a blank unicode string | ||||||
|  |  | ||||||
|  |         try: | ||||||
|  |             source = str(source) | ||||||
|  |         except UnicodeDecodeError as e:  # pragma: no cover | ||||||
|  |             # Customise error message while maintaining original trackback | ||||||
|  |             e.reason += '. -- Note: Markdown only accepts unicode input!' | ||||||
|  |             raise | ||||||
|  |  | ||||||
|  |         # Split into lines and run the line preprocessors. | ||||||
|  |         self.lines = source.split("\n") | ||||||
|  |         for prep in self.preprocessors: | ||||||
|  |             self.lines = prep.run(self.lines) | ||||||
|  |  | ||||||
|  |         # Parse the high-level elements. | ||||||
|  |         root = self.parser.parseDocument(self.lines).getroot() | ||||||
|  |  | ||||||
|  |         # Run the tree-processors | ||||||
|  |         for treeprocessor in self.treeprocessors: | ||||||
|  |             newRoot = treeprocessor.run(root) | ||||||
|  |             if newRoot is not None: | ||||||
|  |                 root = newRoot | ||||||
|  |  | ||||||
|  |         # Serialize _properly_.  Strip top-level tags. | ||||||
|  |         output = self.serializer(root) | ||||||
|  |         if self.stripTopLevelTags: | ||||||
|  |             try: | ||||||
|  |                 start = output.index( | ||||||
|  |                     '<%s>' % self.doc_tag) + len(self.doc_tag) + 2 | ||||||
|  |                 end = output.rindex('</%s>' % self.doc_tag) | ||||||
|  |                 output = output[start:end].strip() | ||||||
|  |             except ValueError as e:  # pragma: no cover | ||||||
|  |                 if output.strip().endswith('<%s />' % self.doc_tag): | ||||||
|  |                     # We have an empty document | ||||||
|  |                     output = '' | ||||||
|  |                 else: | ||||||
|  |                     # We have a serious problem | ||||||
|  |                     raise ValueError('Markdown failed to strip top-level ' | ||||||
|  |                                      'tags. Document=%r' % output.strip()) from e | ||||||
|  |  | ||||||
|  |         # Run the text post-processors | ||||||
|  |         for pp in self.postprocessors: | ||||||
|  |             output = pp.run(output) | ||||||
|  |  | ||||||
|  |         return output.strip() | ||||||
|  |  | ||||||
|  |     def convertFile(self, input=None, output=None, encoding=None): | ||||||
|  |         """Converts a markdown file and returns the HTML as a unicode string. | ||||||
|  |  | ||||||
|  |         Decodes the file using the provided encoding (defaults to utf-8), | ||||||
|  |         passes the file content to markdown, and outputs the html to either | ||||||
|  |         the provided stream or the file with provided name, using the same | ||||||
|  |         encoding as the source file. The 'xmlcharrefreplace' error handler is | ||||||
|  |         used when encoding the output. | ||||||
|  |  | ||||||
|  |         **Note:** This is the only place that decoding and encoding of unicode | ||||||
|  |         takes place in Python-Markdown.  (All other code is unicode-in / | ||||||
|  |         unicode-out.) | ||||||
|  |  | ||||||
|  |         Keyword arguments: | ||||||
|  |  | ||||||
|  |         * input: File object or path. Reads from stdin if `None`. | ||||||
|  |         * output: File object or path. Writes to stdout if `None`. | ||||||
|  |         * encoding: Encoding of input and output files. Defaults to utf-8. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |         encoding = encoding or "utf-8" | ||||||
|  |  | ||||||
|  |         # Read the source | ||||||
|  |         if input: | ||||||
|  |             if isinstance(input, str): | ||||||
|  |                 input_file = codecs.open(input, mode="r", encoding=encoding) | ||||||
|  |             else: | ||||||
|  |                 input_file = codecs.getreader(encoding)(input) | ||||||
|  |             text = input_file.read() | ||||||
|  |             input_file.close() | ||||||
|  |         else: | ||||||
|  |             text = sys.stdin.read() | ||||||
|  |             if not isinstance(text, str):  # pragma: no cover | ||||||
|  |                 text = text.decode(encoding) | ||||||
|  |  | ||||||
|  |         text = text.lstrip('\ufeff')  # remove the byte-order mark | ||||||
|  |  | ||||||
|  |         # Convert | ||||||
|  |         html = self.convert(text) | ||||||
|  |  | ||||||
|  |         # Write to file or stdout | ||||||
|  |         if output: | ||||||
|  |             if isinstance(output, str): | ||||||
|  |                 output_file = codecs.open(output, "w", | ||||||
|  |                                           encoding=encoding, | ||||||
|  |                                           errors="xmlcharrefreplace") | ||||||
|  |                 output_file.write(html) | ||||||
|  |                 output_file.close() | ||||||
|  |             else: | ||||||
|  |                 writer = codecs.getwriter(encoding) | ||||||
|  |                 output_file = writer(output, errors="xmlcharrefreplace") | ||||||
|  |                 output_file.write(html) | ||||||
|  |                 # Don't close here. User may want to write more. | ||||||
|  |         else: | ||||||
|  |             # Encode manually and write bytes to stdout. | ||||||
|  |             html = html.encode(encoding, "xmlcharrefreplace") | ||||||
|  |             try: | ||||||
|  |                 # Write bytes directly to buffer (Python 3). | ||||||
|  |                 sys.stdout.buffer.write(html) | ||||||
|  |             except AttributeError:  # pragma: no cover | ||||||
|  |                 # Probably Python 2, which works with bytes by default. | ||||||
|  |                 sys.stdout.write(html) | ||||||
|  |  | ||||||
|  |         return self | ||||||
|  |  | ||||||
|  |  | ||||||
|  | """ | ||||||
|  | EXPORTED FUNCTIONS | ||||||
|  | ============================================================================= | ||||||
|  |  | ||||||
|  | Those are the two functions we really mean to export: markdown() and | ||||||
|  | markdownFromFile(). | ||||||
|  | """ | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def markdown(text, **kwargs): | ||||||
|  |     """Convert a markdown string to HTML and return HTML as a unicode string. | ||||||
|  |  | ||||||
|  |     This is a shortcut function for `Markdown` class to cover the most | ||||||
|  |     basic use case.  It initializes an instance of Markdown, loads the | ||||||
|  |     necessary extensions and runs the parser on the given text. | ||||||
|  |  | ||||||
|  |     Keyword arguments: | ||||||
|  |  | ||||||
|  |     * text: Markdown formatted text as Unicode or ASCII string. | ||||||
|  |     * Any arguments accepted by the Markdown class. | ||||||
|  |  | ||||||
|  |     Returns: An HTML document as a string. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |     md = Markdown(**kwargs) | ||||||
|  |     return md.convert(text) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def markdownFromFile(**kwargs): | ||||||
|  |     """Read markdown code from a file and write it to a file or a stream. | ||||||
|  |  | ||||||
|  |     This is a shortcut function which initializes an instance of Markdown, | ||||||
|  |     and calls the convertFile method rather than convert. | ||||||
|  |  | ||||||
|  |     Keyword arguments: | ||||||
|  |  | ||||||
|  |     * input: a file name or readable object. | ||||||
|  |     * output: a file name or writable object. | ||||||
|  |     * encoding: Encoding of input and output. | ||||||
|  |     * Any arguments accepted by the Markdown class. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |     md = Markdown(**kwargs) | ||||||
|  |     md.convertFile(kwargs.get('input', None), | ||||||
|  |                    kwargs.get('output', None), | ||||||
|  |                    kwargs.get('encoding', None)) | ||||||
							
								
								
									
										107
									
								
								Source/Libs/markdown/extensions/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										107
									
								
								Source/Libs/markdown/extensions/__init__.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,107 @@ | |||||||
|  | """ | ||||||
|  | Python Markdown | ||||||
|  |  | ||||||
|  | A Python implementation of John Gruber's Markdown. | ||||||
|  |  | ||||||
|  | Documentation: https://python-markdown.github.io/ | ||||||
|  | GitHub: https://github.com/Python-Markdown/markdown/ | ||||||
|  | PyPI: https://pypi.org/project/Markdown/ | ||||||
|  |  | ||||||
|  | Started by Manfred Stienstra (http://www.dwerg.net/). | ||||||
|  | Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). | ||||||
|  | Currently maintained by Waylan Limberg (https://github.com/waylan), | ||||||
|  | Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). | ||||||
|  |  | ||||||
|  | Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later) | ||||||
|  | Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) | ||||||
|  | Copyright 2004 Manfred Stienstra (the original version) | ||||||
|  |  | ||||||
|  | License: BSD (see LICENSE.md for details). | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | import warnings | ||||||
|  | from ..util import parseBoolValue | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class Extension: | ||||||
|  |     """ Base class for extensions to subclass. """ | ||||||
|  |  | ||||||
|  |     # Default config -- to be overridden by a subclass | ||||||
|  |     # Must be of the following format: | ||||||
|  |     #     { | ||||||
|  |     #       'key': ['value', 'description'] | ||||||
|  |     #     } | ||||||
|  |     # Note that Extension.setConfig will raise a KeyError | ||||||
|  |     # if a default is not set here. | ||||||
|  |     config = {} | ||||||
|  |  | ||||||
|  |     def __init__(self, **kwargs): | ||||||
|  |         """ Initiate Extension and set up configs. """ | ||||||
|  |         self.setConfigs(kwargs) | ||||||
|  |  | ||||||
|  |     def getConfig(self, key, default=''): | ||||||
|  |         """ Return a setting for the given key or an empty string. """ | ||||||
|  |         if key in self.config: | ||||||
|  |             return self.config[key][0] | ||||||
|  |         else: | ||||||
|  |             return default | ||||||
|  |  | ||||||
|  |     def getConfigs(self): | ||||||
|  |         """ Return all configs settings as a dict. """ | ||||||
|  |         return {key: self.getConfig(key) for key in self.config.keys()} | ||||||
|  |  | ||||||
|  |     def getConfigInfo(self): | ||||||
|  |         """ Return all config descriptions as a list of tuples. """ | ||||||
|  |         return [(key, self.config[key][1]) for key in self.config.keys()] | ||||||
|  |  | ||||||
|  |     def setConfig(self, key, value): | ||||||
|  |         """ Set a config setting for `key` with the given `value`. """ | ||||||
|  |         if isinstance(self.config[key][0], bool): | ||||||
|  |             value = parseBoolValue(value) | ||||||
|  |         if self.config[key][0] is None: | ||||||
|  |             value = parseBoolValue(value, preserve_none=True) | ||||||
|  |         self.config[key][0] = value | ||||||
|  |  | ||||||
|  |     def setConfigs(self, items): | ||||||
|  |         """ Set multiple config settings given a dict or list of tuples. """ | ||||||
|  |         if hasattr(items, 'items'): | ||||||
|  |             # it's a dict | ||||||
|  |             items = items.items() | ||||||
|  |         for key, value in items: | ||||||
|  |             self.setConfig(key, value) | ||||||
|  |  | ||||||
|  |     def _extendMarkdown(self, *args): | ||||||
|  |         """ Private wrapper around extendMarkdown. """ | ||||||
|  |         md = args[0] | ||||||
|  |         try: | ||||||
|  |             self.extendMarkdown(md) | ||||||
|  |         except TypeError as e: | ||||||
|  |             if "missing 1 required positional argument" in str(e): | ||||||
|  |                 # Must be a 2.x extension. Pass in a dumby md_globals. | ||||||
|  |                 self.extendMarkdown(md, {}) | ||||||
|  |                 warnings.warn( | ||||||
|  |                     "The 'md_globals' parameter of '{}.{}.extendMarkdown' is " | ||||||
|  |                     "deprecated.".format(self.__class__.__module__, self.__class__.__name__), | ||||||
|  |                     category=DeprecationWarning, | ||||||
|  |                     stacklevel=2 | ||||||
|  |                 ) | ||||||
|  |             else: | ||||||
|  |                 raise | ||||||
|  |  | ||||||
|  |     def extendMarkdown(self, md): | ||||||
|  |         """ | ||||||
|  |         Add the various processors and patterns to the Markdown Instance. | ||||||
|  |  | ||||||
|  |         This method must be overridden by every extension. | ||||||
|  |  | ||||||
|  |         Keyword arguments: | ||||||
|  |  | ||||||
|  |         * md: The Markdown instance. | ||||||
|  |  | ||||||
|  |         * md_globals: Global variables in the markdown module namespace. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         raise NotImplementedError( | ||||||
|  |             'Extension "%s.%s" must define an "extendMarkdown"' | ||||||
|  |             'method.' % (self.__class__.__module__, self.__class__.__name__) | ||||||
|  |         ) | ||||||
							
								
								
									
										99
									
								
								Source/Libs/markdown/extensions/abbr.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										99
									
								
								Source/Libs/markdown/extensions/abbr.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,99 @@ | |||||||
|  | ''' | ||||||
|  | Abbreviation Extension for Python-Markdown | ||||||
|  | ========================================== | ||||||
|  |  | ||||||
|  | This extension adds abbreviation handling to Python-Markdown. | ||||||
|  |  | ||||||
|  | See <https://Python-Markdown.github.io/extensions/abbreviations> | ||||||
|  | for documentation. | ||||||
|  |  | ||||||
|  | Oringinal code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/) and | ||||||
|  |  [Seemant Kulleen](http://www.kulleen.org/) | ||||||
|  |  | ||||||
|  | All changes Copyright 2008-2014 The Python Markdown Project | ||||||
|  |  | ||||||
|  | License: [BSD](https://opensource.org/licenses/bsd-license.php) | ||||||
|  |  | ||||||
|  | ''' | ||||||
|  |  | ||||||
|  | from . import Extension | ||||||
|  | from ..blockprocessors import BlockProcessor | ||||||
|  | from ..inlinepatterns import InlineProcessor | ||||||
|  | from ..util import AtomicString | ||||||
|  | import re | ||||||
|  | import xml.etree.ElementTree as etree | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class AbbrExtension(Extension): | ||||||
|  |     """ Abbreviation Extension for Python-Markdown. """ | ||||||
|  |  | ||||||
|  |     def extendMarkdown(self, md): | ||||||
|  |         """ Insert AbbrPreprocessor before ReferencePreprocessor. """ | ||||||
|  |         md.parser.blockprocessors.register(AbbrPreprocessor(md.parser), 'abbr', 16) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class AbbrPreprocessor(BlockProcessor): | ||||||
|  |     """ Abbreviation Preprocessor - parse text for abbr references. """ | ||||||
|  |  | ||||||
|  |     RE = re.compile(r'^[*]\[(?P<abbr>[^\]]*)\][ ]?:[ ]*\n?[ ]*(?P<title>.*)$', re.MULTILINE) | ||||||
|  |  | ||||||
|  |     def test(self, parent, block): | ||||||
|  |         return True | ||||||
|  |  | ||||||
|  |     def run(self, parent, blocks): | ||||||
|  |         ''' | ||||||
|  |         Find and remove all Abbreviation references from the text. | ||||||
|  |         Each reference is set as a new AbbrPattern in the markdown instance. | ||||||
|  |  | ||||||
|  |         ''' | ||||||
|  |         block = blocks.pop(0) | ||||||
|  |         m = self.RE.search(block) | ||||||
|  |         if m: | ||||||
|  |             abbr = m.group('abbr').strip() | ||||||
|  |             title = m.group('title').strip() | ||||||
|  |             self.parser.md.inlinePatterns.register( | ||||||
|  |                 AbbrInlineProcessor(self._generate_pattern(abbr), title), 'abbr-%s' % abbr, 2 | ||||||
|  |             ) | ||||||
|  |             if block[m.end():].strip(): | ||||||
|  |                 # Add any content after match back to blocks as separate block | ||||||
|  |                 blocks.insert(0, block[m.end():].lstrip('\n')) | ||||||
|  |             if block[:m.start()].strip(): | ||||||
|  |                 # Add any content before match back to blocks as separate block | ||||||
|  |                 blocks.insert(0, block[:m.start()].rstrip('\n')) | ||||||
|  |             return True | ||||||
|  |         # No match. Restore block. | ||||||
|  |         blocks.insert(0, block) | ||||||
|  |         return False | ||||||
|  |  | ||||||
|  |     def _generate_pattern(self, text): | ||||||
|  |         ''' | ||||||
|  |         Given a string, returns an regex pattern to match that string. | ||||||
|  |  | ||||||
|  |         'HTML' -> r'(?P<abbr>[H][T][M][L])' | ||||||
|  |  | ||||||
|  |         Note: we force each char as a literal match (in brackets) as we don't | ||||||
|  |         know what they will be beforehand. | ||||||
|  |  | ||||||
|  |         ''' | ||||||
|  |         chars = list(text) | ||||||
|  |         for i in range(len(chars)): | ||||||
|  |             chars[i] = r'[%s]' % chars[i] | ||||||
|  |         return r'(?P<abbr>\b%s\b)' % (r''.join(chars)) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class AbbrInlineProcessor(InlineProcessor): | ||||||
|  |     """ Abbreviation inline pattern. """ | ||||||
|  |  | ||||||
|  |     def __init__(self, pattern, title): | ||||||
|  |         super().__init__(pattern) | ||||||
|  |         self.title = title | ||||||
|  |  | ||||||
|  |     def handleMatch(self, m, data): | ||||||
|  |         abbr = etree.Element('abbr') | ||||||
|  |         abbr.text = AtomicString(m.group('abbr')) | ||||||
|  |         abbr.set('title', self.title) | ||||||
|  |         return abbr, m.start(0), m.end(0) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def makeExtension(**kwargs):  # pragma: no cover | ||||||
|  |     return AbbrExtension(**kwargs) | ||||||
							
								
								
									
										170
									
								
								Source/Libs/markdown/extensions/admonition.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										170
									
								
								Source/Libs/markdown/extensions/admonition.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,170 @@ | |||||||
|  | """ | ||||||
|  | Admonition extension for Python-Markdown | ||||||
|  | ======================================== | ||||||
|  |  | ||||||
|  | Adds rST-style admonitions. Inspired by [rST][] feature with the same name. | ||||||
|  |  | ||||||
|  | [rST]: http://docutils.sourceforge.net/docs/ref/rst/directives.html#specific-admonitions  # noqa | ||||||
|  |  | ||||||
|  | See <https://Python-Markdown.github.io/extensions/admonition> | ||||||
|  | for documentation. | ||||||
|  |  | ||||||
|  | Original code Copyright [Tiago Serafim](https://www.tiagoserafim.com/). | ||||||
|  |  | ||||||
|  | All changes Copyright The Python Markdown Project | ||||||
|  |  | ||||||
|  | License: [BSD](https://opensource.org/licenses/bsd-license.php) | ||||||
|  |  | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | from . import Extension | ||||||
|  | from ..blockprocessors import BlockProcessor | ||||||
|  | import xml.etree.ElementTree as etree | ||||||
|  | import re | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class AdmonitionExtension(Extension): | ||||||
|  |     """ Admonition extension for Python-Markdown. """ | ||||||
|  |  | ||||||
|  |     def extendMarkdown(self, md): | ||||||
|  |         """ Add Admonition to Markdown instance. """ | ||||||
|  |         md.registerExtension(self) | ||||||
|  |  | ||||||
|  |         md.parser.blockprocessors.register(AdmonitionProcessor(md.parser), 'admonition', 105) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class AdmonitionProcessor(BlockProcessor): | ||||||
|  |  | ||||||
|  |     CLASSNAME = 'admonition' | ||||||
|  |     CLASSNAME_TITLE = 'admonition-title' | ||||||
|  |     RE = re.compile(r'(?:^|\n)!!! ?([\w\-]+(?: +[\w\-]+)*)(?: +"(.*?)")? *(?:\n|$)') | ||||||
|  |     RE_SPACES = re.compile('  +') | ||||||
|  |  | ||||||
|  |     def __init__(self, parser): | ||||||
|  |         """Initialization.""" | ||||||
|  |  | ||||||
|  |         super().__init__(parser) | ||||||
|  |  | ||||||
|  |         self.current_sibling = None | ||||||
|  |         self.content_indention = 0 | ||||||
|  |  | ||||||
|  |     def parse_content(self, parent, block): | ||||||
|  |         """Get sibling admonition. | ||||||
|  |  | ||||||
|  |         Retrieve the appropriate sibling element. This can get tricky when | ||||||
|  |         dealing with lists. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |         old_block = block | ||||||
|  |         the_rest = '' | ||||||
|  |  | ||||||
|  |         # We already acquired the block via test | ||||||
|  |         if self.current_sibling is not None: | ||||||
|  |             sibling = self.current_sibling | ||||||
|  |             block, the_rest = self.detab(block, self.content_indent) | ||||||
|  |             self.current_sibling = None | ||||||
|  |             self.content_indent = 0 | ||||||
|  |             return sibling, block, the_rest | ||||||
|  |  | ||||||
|  |         sibling = self.lastChild(parent) | ||||||
|  |  | ||||||
|  |         if sibling is None or sibling.get('class', '').find(self.CLASSNAME) == -1: | ||||||
|  |             sibling = None | ||||||
|  |         else: | ||||||
|  |             # If the last child is a list and the content is sufficiently indented | ||||||
|  |             # to be under it, then the content's sibling is in the list. | ||||||
|  |             last_child = self.lastChild(sibling) | ||||||
|  |             indent = 0 | ||||||
|  |             while last_child: | ||||||
|  |                 if ( | ||||||
|  |                     sibling and block.startswith(' ' * self.tab_length * 2) and | ||||||
|  |                     last_child and last_child.tag in ('ul', 'ol', 'dl') | ||||||
|  |                 ): | ||||||
|  |  | ||||||
|  |                     # The expectation is that we'll find an <li> or <dt>. | ||||||
|  |                     # We should get its last child as well. | ||||||
|  |                     sibling = self.lastChild(last_child) | ||||||
|  |                     last_child = self.lastChild(sibling) if sibling else None | ||||||
|  |  | ||||||
|  |                     # Context has been lost at this point, so we must adjust the | ||||||
|  |                     # text's indentation level so it will be evaluated correctly | ||||||
|  |                     # under the list. | ||||||
|  |                     block = block[self.tab_length:] | ||||||
|  |                     indent += self.tab_length | ||||||
|  |                 else: | ||||||
|  |                     last_child = None | ||||||
|  |  | ||||||
|  |             if not block.startswith(' ' * self.tab_length): | ||||||
|  |                 sibling = None | ||||||
|  |  | ||||||
|  |             if sibling is not None: | ||||||
|  |                 indent += self.tab_length | ||||||
|  |                 block, the_rest = self.detab(old_block, indent) | ||||||
|  |                 self.current_sibling = sibling | ||||||
|  |                 self.content_indent = indent | ||||||
|  |  | ||||||
|  |         return sibling, block, the_rest | ||||||
|  |  | ||||||
|  |     def test(self, parent, block): | ||||||
|  |  | ||||||
|  |         if self.RE.search(block): | ||||||
|  |             return True | ||||||
|  |         else: | ||||||
|  |             return self.parse_content(parent, block)[0] is not None | ||||||
|  |  | ||||||
|  |     def run(self, parent, blocks): | ||||||
|  |         block = blocks.pop(0) | ||||||
|  |         m = self.RE.search(block) | ||||||
|  |  | ||||||
|  |         if m: | ||||||
|  |             if m.start() > 0: | ||||||
|  |                 self.parser.parseBlocks(parent, [block[:m.start()]]) | ||||||
|  |             block = block[m.end():]  # removes the first line | ||||||
|  |             block, theRest = self.detab(block) | ||||||
|  |         else: | ||||||
|  |             sibling, block, theRest = self.parse_content(parent, block) | ||||||
|  |  | ||||||
|  |         if m: | ||||||
|  |             klass, title = self.get_class_and_title(m) | ||||||
|  |             div = etree.SubElement(parent, 'div') | ||||||
|  |             div.set('class', '{} {}'.format(self.CLASSNAME, klass)) | ||||||
|  |             if title: | ||||||
|  |                 p = etree.SubElement(div, 'p') | ||||||
|  |                 p.text = title | ||||||
|  |                 p.set('class', self.CLASSNAME_TITLE) | ||||||
|  |         else: | ||||||
|  |             # Sibling is a list item, but we need to wrap it's content should be wrapped in <p> | ||||||
|  |             if sibling.tag in ('li', 'dd') and sibling.text: | ||||||
|  |                 text = sibling.text | ||||||
|  |                 sibling.text = '' | ||||||
|  |                 p = etree.SubElement(sibling, 'p') | ||||||
|  |                 p.text = text | ||||||
|  |  | ||||||
|  |             div = sibling | ||||||
|  |  | ||||||
|  |         self.parser.parseChunk(div, block) | ||||||
|  |  | ||||||
|  |         if theRest: | ||||||
|  |             # This block contained unindented line(s) after the first indented | ||||||
|  |             # line. Insert these lines as the first block of the master blocks | ||||||
|  |             # list for future processing. | ||||||
|  |             blocks.insert(0, theRest) | ||||||
|  |  | ||||||
|  |     def get_class_and_title(self, match): | ||||||
|  |         klass, title = match.group(1).lower(), match.group(2) | ||||||
|  |         klass = self.RE_SPACES.sub(' ', klass) | ||||||
|  |         if title is None: | ||||||
|  |             # no title was provided, use the capitalized classname as title | ||||||
|  |             # e.g.: `!!! note` will render | ||||||
|  |             # `<p class="admonition-title">Note</p>` | ||||||
|  |             title = klass.split(' ', 1)[0].capitalize() | ||||||
|  |         elif title == '': | ||||||
|  |             # an explicit blank title should not be rendered | ||||||
|  |             # e.g.: `!!! warning ""` will *not* render `p` with a title | ||||||
|  |             title = None | ||||||
|  |         return klass, title | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def makeExtension(**kwargs):  # pragma: no cover | ||||||
|  |     return AdmonitionExtension(**kwargs) | ||||||
							
								
								
									
										166
									
								
								Source/Libs/markdown/extensions/attr_list.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										166
									
								
								Source/Libs/markdown/extensions/attr_list.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,166 @@ | |||||||
|  | """ | ||||||
|  | Attribute List Extension for Python-Markdown | ||||||
|  | ============================================ | ||||||
|  |  | ||||||
|  | Adds attribute list syntax. Inspired by | ||||||
|  | [maruku](http://maruku.rubyforge.org/proposal.html#attribute_lists)'s | ||||||
|  | feature of the same name. | ||||||
|  |  | ||||||
|  | See <https://Python-Markdown.github.io/extensions/attr_list> | ||||||
|  | for documentation. | ||||||
|  |  | ||||||
|  | Original code Copyright 2011 [Waylan Limberg](http://achinghead.com/). | ||||||
|  |  | ||||||
|  | All changes Copyright 2011-2014 The Python Markdown Project | ||||||
|  |  | ||||||
|  | License: [BSD](https://opensource.org/licenses/bsd-license.php) | ||||||
|  |  | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | from . import Extension | ||||||
|  | from ..treeprocessors import Treeprocessor | ||||||
|  | import re | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def _handle_double_quote(s, t): | ||||||
|  |     k, v = t.split('=', 1) | ||||||
|  |     return k, v.strip('"') | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def _handle_single_quote(s, t): | ||||||
|  |     k, v = t.split('=', 1) | ||||||
|  |     return k, v.strip("'") | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def _handle_key_value(s, t): | ||||||
|  |     return t.split('=', 1) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def _handle_word(s, t): | ||||||
|  |     if t.startswith('.'): | ||||||
|  |         return '.', t[1:] | ||||||
|  |     if t.startswith('#'): | ||||||
|  |         return 'id', t[1:] | ||||||
|  |     return t, t | ||||||
|  |  | ||||||
|  |  | ||||||
|  | _scanner = re.Scanner([ | ||||||
|  |     (r'[^ =]+=".*?"', _handle_double_quote), | ||||||
|  |     (r"[^ =]+='.*?'", _handle_single_quote), | ||||||
|  |     (r'[^ =]+=[^ =]+', _handle_key_value), | ||||||
|  |     (r'[^ =]+', _handle_word), | ||||||
|  |     (r' ', None) | ||||||
|  | ]) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def get_attrs(str): | ||||||
|  |     """ Parse attribute list and return a list of attribute tuples. """ | ||||||
|  |     return _scanner.scan(str)[0] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def isheader(elem): | ||||||
|  |     return elem.tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6'] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class AttrListTreeprocessor(Treeprocessor): | ||||||
|  |  | ||||||
|  |     BASE_RE = r'\{\:?[ ]*([^\}\n ][^\}\n]*)[ ]*\}' | ||||||
|  |     HEADER_RE = re.compile(r'[ ]+{}[ ]*$'.format(BASE_RE)) | ||||||
|  |     BLOCK_RE = re.compile(r'\n[ ]*{}[ ]*$'.format(BASE_RE)) | ||||||
|  |     INLINE_RE = re.compile(r'^{}'.format(BASE_RE)) | ||||||
|  |     NAME_RE = re.compile(r'[^A-Z_a-z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02ff' | ||||||
|  |                          r'\u0370-\u037d\u037f-\u1fff\u200c-\u200d' | ||||||
|  |                          r'\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff' | ||||||
|  |                          r'\uf900-\ufdcf\ufdf0-\ufffd' | ||||||
|  |                          r'\:\-\.0-9\u00b7\u0300-\u036f\u203f-\u2040]+') | ||||||
|  |  | ||||||
|  |     def run(self, doc): | ||||||
|  |         for elem in doc.iter(): | ||||||
|  |             if self.md.is_block_level(elem.tag): | ||||||
|  |                 # Block level: check for attrs on last line of text | ||||||
|  |                 RE = self.BLOCK_RE | ||||||
|  |                 if isheader(elem) or elem.tag in ['dt', 'td', 'th']: | ||||||
|  |                     # header, def-term, or table cell: check for attrs at end of element | ||||||
|  |                     RE = self.HEADER_RE | ||||||
|  |                 if len(elem) and elem.tag == 'li': | ||||||
|  |                     # special case list items. children may include a ul or ol. | ||||||
|  |                     pos = None | ||||||
|  |                     # find the ul or ol position | ||||||
|  |                     for i, child in enumerate(elem): | ||||||
|  |                         if child.tag in ['ul', 'ol']: | ||||||
|  |                             pos = i | ||||||
|  |                             break | ||||||
|  |                     if pos is None and elem[-1].tail: | ||||||
|  |                         # use tail of last child. no ul or ol. | ||||||
|  |                         m = RE.search(elem[-1].tail) | ||||||
|  |                         if m: | ||||||
|  |                             self.assign_attrs(elem, m.group(1)) | ||||||
|  |                             elem[-1].tail = elem[-1].tail[:m.start()] | ||||||
|  |                     elif pos is not None and pos > 0 and elem[pos-1].tail: | ||||||
|  |                         # use tail of last child before ul or ol | ||||||
|  |                         m = RE.search(elem[pos-1].tail) | ||||||
|  |                         if m: | ||||||
|  |                             self.assign_attrs(elem, m.group(1)) | ||||||
|  |                             elem[pos-1].tail = elem[pos-1].tail[:m.start()] | ||||||
|  |                     elif elem.text: | ||||||
|  |                         # use text. ul is first child. | ||||||
|  |                         m = RE.search(elem.text) | ||||||
|  |                         if m: | ||||||
|  |                             self.assign_attrs(elem, m.group(1)) | ||||||
|  |                             elem.text = elem.text[:m.start()] | ||||||
|  |                 elif len(elem) and elem[-1].tail: | ||||||
|  |                     # has children. Get from tail of last child | ||||||
|  |                     m = RE.search(elem[-1].tail) | ||||||
|  |                     if m: | ||||||
|  |                         self.assign_attrs(elem, m.group(1)) | ||||||
|  |                         elem[-1].tail = elem[-1].tail[:m.start()] | ||||||
|  |                         if isheader(elem): | ||||||
|  |                             # clean up trailing #s | ||||||
|  |                             elem[-1].tail = elem[-1].tail.rstrip('#').rstrip() | ||||||
|  |                 elif elem.text: | ||||||
|  |                     # no children. Get from text. | ||||||
|  |                     m = RE.search(elem.text) | ||||||
|  |                     if m: | ||||||
|  |                         self.assign_attrs(elem, m.group(1)) | ||||||
|  |                         elem.text = elem.text[:m.start()] | ||||||
|  |                         if isheader(elem): | ||||||
|  |                             # clean up trailing #s | ||||||
|  |                             elem.text = elem.text.rstrip('#').rstrip() | ||||||
|  |             else: | ||||||
|  |                 # inline: check for attrs at start of tail | ||||||
|  |                 if elem.tail: | ||||||
|  |                     m = self.INLINE_RE.match(elem.tail) | ||||||
|  |                     if m: | ||||||
|  |                         self.assign_attrs(elem, m.group(1)) | ||||||
|  |                         elem.tail = elem.tail[m.end():] | ||||||
|  |  | ||||||
|  |     def assign_attrs(self, elem, attrs): | ||||||
|  |         """ Assign attrs to element. """ | ||||||
|  |         for k, v in get_attrs(attrs): | ||||||
|  |             if k == '.': | ||||||
|  |                 # add to class | ||||||
|  |                 cls = elem.get('class') | ||||||
|  |                 if cls: | ||||||
|  |                     elem.set('class', '{} {}'.format(cls, v)) | ||||||
|  |                 else: | ||||||
|  |                     elem.set('class', v) | ||||||
|  |             else: | ||||||
|  |                 # assign attr k with v | ||||||
|  |                 elem.set(self.sanitize_name(k), v) | ||||||
|  |  | ||||||
|  |     def sanitize_name(self, name): | ||||||
|  |         """ | ||||||
|  |         Sanitize name as 'an XML Name, minus the ":"'. | ||||||
|  |         See https://www.w3.org/TR/REC-xml-names/#NT-NCName | ||||||
|  |         """ | ||||||
|  |         return self.NAME_RE.sub('_', name) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class AttrListExtension(Extension): | ||||||
|  |     def extendMarkdown(self, md): | ||||||
|  |         md.treeprocessors.register(AttrListTreeprocessor(md), 'attr_list', 8) | ||||||
|  |         md.registerExtension(self) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def makeExtension(**kwargs):  # pragma: no cover | ||||||
|  |     return AttrListExtension(**kwargs) | ||||||
							
								
								
									
										308
									
								
								Source/Libs/markdown/extensions/codehilite.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										308
									
								
								Source/Libs/markdown/extensions/codehilite.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,308 @@ | |||||||
|  | """ | ||||||
|  | CodeHilite Extension for Python-Markdown | ||||||
|  | ======================================== | ||||||
|  |  | ||||||
|  | Adds code/syntax highlighting to standard Python-Markdown code blocks. | ||||||
|  |  | ||||||
|  | See <https://Python-Markdown.github.io/extensions/code_hilite> | ||||||
|  | for documentation. | ||||||
|  |  | ||||||
|  | Original code Copyright 2006-2008 [Waylan Limberg](http://achinghead.com/). | ||||||
|  |  | ||||||
|  | All changes Copyright 2008-2014 The Python Markdown Project | ||||||
|  |  | ||||||
|  | License: [BSD](https://opensource.org/licenses/bsd-license.php) | ||||||
|  |  | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | from . import Extension | ||||||
|  | from ..treeprocessors import Treeprocessor | ||||||
|  | from ..util import parseBoolValue | ||||||
|  |  | ||||||
|  | try:  # pragma: no cover | ||||||
|  |     from pygments import highlight | ||||||
|  |     from pygments.lexers import get_lexer_by_name, guess_lexer | ||||||
|  |     from pygments.formatters import get_formatter_by_name | ||||||
|  |     pygments = True | ||||||
|  | except ImportError:  # pragma: no cover | ||||||
|  |     pygments = False | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def parse_hl_lines(expr): | ||||||
|  |     """Support our syntax for emphasizing certain lines of code. | ||||||
|  |  | ||||||
|  |     expr should be like '1 2' to emphasize lines 1 and 2 of a code block. | ||||||
|  |     Returns a list of ints, the line numbers to emphasize. | ||||||
|  |     """ | ||||||
|  |     if not expr: | ||||||
|  |         return [] | ||||||
|  |  | ||||||
|  |     try: | ||||||
|  |         return list(map(int, expr.split())) | ||||||
|  |     except ValueError:  # pragma: no cover | ||||||
|  |         return [] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # ------------------ The Main CodeHilite Class ---------------------- | ||||||
|  | class CodeHilite: | ||||||
|  |     """ | ||||||
|  |     Determine language of source code, and pass it on to the Pygments highlighter. | ||||||
|  |  | ||||||
|  |     Usage: | ||||||
|  |         code = CodeHilite(src=some_code, lang='python') | ||||||
|  |         html = code.hilite() | ||||||
|  |  | ||||||
|  |     Arguments: | ||||||
|  |     * src: Source string or any object with a .readline attribute. | ||||||
|  |  | ||||||
|  |     * lang: String name of Pygments lexer to use for highlighting. Default: `None`. | ||||||
|  |  | ||||||
|  |     * guess_lang: Auto-detect which lexer to use. Ignored if `lang` is set to a valid | ||||||
|  |       value. Default: `True`. | ||||||
|  |  | ||||||
|  |     * use_pygments: Pass code to pygments for code highlighting. If `False`, the code is | ||||||
|  |       instead wrapped for highlighting by a JavaScript library. Default: `True`. | ||||||
|  |  | ||||||
|  |     * linenums: An alias to Pygments `linenos` formatter option. Default: `None`. | ||||||
|  |  | ||||||
|  |     * css_class: An alias to Pygments `cssclass` formatter option. Default: 'codehilite'. | ||||||
|  |  | ||||||
|  |     * lang_prefix: Prefix prepended to the language when `use_pygments` is `False`. | ||||||
|  |       Default: "language-". | ||||||
|  |  | ||||||
|  |     Other Options: | ||||||
|  |     Any other options are accepted and passed on to the lexer and formatter. Therefore, | ||||||
|  |     valid options include any options which are accepted by the `html` formatter or | ||||||
|  |     whichever lexer the code's language uses. Note that most lexers do not have any | ||||||
|  |     options. However, a few have very useful options, such as PHP's `startinline` option. | ||||||
|  |     Any invalid options are ignored without error. | ||||||
|  |  | ||||||
|  |     Formatter options: https://pygments.org/docs/formatters/#HtmlFormatter | ||||||
|  |     Lexer Options: https://pygments.org/docs/lexers/ | ||||||
|  |  | ||||||
|  |     Advanced Usage: | ||||||
|  |         code = CodeHilite( | ||||||
|  |             src = some_code, | ||||||
|  |             lang = 'php', | ||||||
|  |             startinline = True,      # Lexer option. Snippet does not start with `<?php`. | ||||||
|  |             linenostart = 42,        # Formatter option. Snippet starts on line 42. | ||||||
|  |             hl_lines = [45, 49, 50], # Formatter option. Highlight lines 45, 49, and 50. | ||||||
|  |             linenos = 'inline'       # Formatter option. Avoid alignment problems. | ||||||
|  |         ) | ||||||
|  |         html = code.hilite() | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, src, **options): | ||||||
|  |         self.src = src | ||||||
|  |         self.lang = options.pop('lang', None) | ||||||
|  |         self.guess_lang = options.pop('guess_lang', True) | ||||||
|  |         self.use_pygments = options.pop('use_pygments', True) | ||||||
|  |         self.lang_prefix = options.pop('lang_prefix', 'language-') | ||||||
|  |  | ||||||
|  |         if 'linenos' not in options: | ||||||
|  |             options['linenos'] = options.pop('linenums', None) | ||||||
|  |         if 'cssclass' not in options: | ||||||
|  |             options['cssclass'] = options.pop('css_class', 'codehilite') | ||||||
|  |         if 'wrapcode' not in options: | ||||||
|  |             # Override pygments default | ||||||
|  |             options['wrapcode'] = True | ||||||
|  |         # Disallow use of `full` option | ||||||
|  |         options['full'] = False | ||||||
|  |  | ||||||
|  |         self.options = options | ||||||
|  |  | ||||||
|  |     def hilite(self, shebang=True): | ||||||
|  |         """ | ||||||
|  |         Pass code to the [Pygments](http://pygments.pocoo.org/) highliter with | ||||||
|  |         optional line numbers. The output should then be styled with css to | ||||||
|  |         your liking. No styles are applied by default - only styling hooks | ||||||
|  |         (i.e.: <span class="k">). | ||||||
|  |  | ||||||
|  |         returns : A string of html. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |         self.src = self.src.strip('\n') | ||||||
|  |  | ||||||
|  |         if self.lang is None and shebang: | ||||||
|  |             self._parseHeader() | ||||||
|  |  | ||||||
|  |         if pygments and self.use_pygments: | ||||||
|  |             try: | ||||||
|  |                 lexer = get_lexer_by_name(self.lang, **self.options) | ||||||
|  |             except ValueError: | ||||||
|  |                 try: | ||||||
|  |                     if self.guess_lang: | ||||||
|  |                         lexer = guess_lexer(self.src, **self.options) | ||||||
|  |                     else: | ||||||
|  |                         lexer = get_lexer_by_name('text', **self.options) | ||||||
|  |                 except ValueError:  # pragma: no cover | ||||||
|  |                     lexer = get_lexer_by_name('text', **self.options) | ||||||
|  |             formatter = get_formatter_by_name('html', **self.options) | ||||||
|  |             return highlight(self.src, lexer, formatter) | ||||||
|  |         else: | ||||||
|  |             # just escape and build markup usable by JS highlighting libs | ||||||
|  |             txt = self.src.replace('&', '&') | ||||||
|  |             txt = txt.replace('<', '<') | ||||||
|  |             txt = txt.replace('>', '>') | ||||||
|  |             txt = txt.replace('"', '"') | ||||||
|  |             classes = [] | ||||||
|  |             if self.lang: | ||||||
|  |                 classes.append('{}{}'.format(self.lang_prefix, self.lang)) | ||||||
|  |             if self.options['linenos']: | ||||||
|  |                 classes.append('linenums') | ||||||
|  |             class_str = '' | ||||||
|  |             if classes: | ||||||
|  |                 class_str = ' class="{}"'.format(' '.join(classes)) | ||||||
|  |             return '<pre class="{}"><code{}>{}\n</code></pre>\n'.format( | ||||||
|  |                 self.options['cssclass'], | ||||||
|  |                 class_str, | ||||||
|  |                 txt | ||||||
|  |             ) | ||||||
|  |  | ||||||
|  |     def _parseHeader(self): | ||||||
|  |         """ | ||||||
|  |         Determines language of a code block from shebang line and whether the | ||||||
|  |         said line should be removed or left in place. If the sheband line | ||||||
|  |         contains a path (even a single /) then it is assumed to be a real | ||||||
|  |         shebang line and left alone. However, if no path is given | ||||||
|  |         (e.i.: #!python or :::python) then it is assumed to be a mock shebang | ||||||
|  |         for language identification of a code fragment and removed from the | ||||||
|  |         code block prior to processing for code highlighting. When a mock | ||||||
|  |         shebang (e.i: #!python) is found, line numbering is turned on. When | ||||||
|  |         colons are found in place of a shebang (e.i.: :::python), line | ||||||
|  |         numbering is left in the current state - off by default. | ||||||
|  |  | ||||||
|  |         Also parses optional list of highlight lines, like: | ||||||
|  |  | ||||||
|  |             :::python hl_lines="1 3" | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |         import re | ||||||
|  |  | ||||||
|  |         # split text into lines | ||||||
|  |         lines = self.src.split("\n") | ||||||
|  |         # pull first line to examine | ||||||
|  |         fl = lines.pop(0) | ||||||
|  |  | ||||||
|  |         c = re.compile(r''' | ||||||
|  |             (?:(?:^::+)|(?P<shebang>^[#]!)) # Shebang or 2 or more colons | ||||||
|  |             (?P<path>(?:/\w+)*[/ ])?        # Zero or 1 path | ||||||
|  |             (?P<lang>[\w#.+-]*)             # The language | ||||||
|  |             \s*                             # Arbitrary whitespace | ||||||
|  |             # Optional highlight lines, single- or double-quote-delimited | ||||||
|  |             (hl_lines=(?P<quot>"|')(?P<hl_lines>.*?)(?P=quot))? | ||||||
|  |             ''',  re.VERBOSE) | ||||||
|  |         # search first line for shebang | ||||||
|  |         m = c.search(fl) | ||||||
|  |         if m: | ||||||
|  |             # we have a match | ||||||
|  |             try: | ||||||
|  |                 self.lang = m.group('lang').lower() | ||||||
|  |             except IndexError:  # pragma: no cover | ||||||
|  |                 self.lang = None | ||||||
|  |             if m.group('path'): | ||||||
|  |                 # path exists - restore first line | ||||||
|  |                 lines.insert(0, fl) | ||||||
|  |             if self.options['linenos'] is None and m.group('shebang'): | ||||||
|  |                 # Overridable and Shebang exists - use line numbers | ||||||
|  |                 self.options['linenos'] = True | ||||||
|  |  | ||||||
|  |             self.options['hl_lines'] = parse_hl_lines(m.group('hl_lines')) | ||||||
|  |         else: | ||||||
|  |             # No match | ||||||
|  |             lines.insert(0, fl) | ||||||
|  |  | ||||||
|  |         self.src = "\n".join(lines).strip("\n") | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # ------------------ The Markdown Extension ------------------------------- | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class HiliteTreeprocessor(Treeprocessor): | ||||||
|  |     """ Highlight source code in code blocks. """ | ||||||
|  |  | ||||||
|  |     def code_unescape(self, text): | ||||||
|  |         """Unescape code.""" | ||||||
|  |         text = text.replace("<", "<") | ||||||
|  |         text = text.replace(">", ">") | ||||||
|  |         # Escaped '&' should be replaced at the end to avoid | ||||||
|  |         # conflicting with < and >. | ||||||
|  |         text = text.replace("&", "&") | ||||||
|  |         return text | ||||||
|  |  | ||||||
|  |     def run(self, root): | ||||||
|  |         """ Find code blocks and store in htmlStash. """ | ||||||
|  |         blocks = root.iter('pre') | ||||||
|  |         for block in blocks: | ||||||
|  |             if len(block) == 1 and block[0].tag == 'code': | ||||||
|  |                 local_config = self.config.copy() | ||||||
|  |                 code = CodeHilite( | ||||||
|  |                     self.code_unescape(block[0].text), | ||||||
|  |                     tab_length=self.md.tab_length, | ||||||
|  |                     style=local_config.pop('pygments_style', 'default'), | ||||||
|  |                     **local_config | ||||||
|  |                 ) | ||||||
|  |                 placeholder = self.md.htmlStash.store(code.hilite()) | ||||||
|  |                 # Clear codeblock in etree instance | ||||||
|  |                 block.clear() | ||||||
|  |                 # Change to p element which will later | ||||||
|  |                 # be removed when inserting raw html | ||||||
|  |                 block.tag = 'p' | ||||||
|  |                 block.text = placeholder | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class CodeHiliteExtension(Extension): | ||||||
|  |     """ Add source code highlighting to markdown codeblocks. """ | ||||||
|  |  | ||||||
|  |     def __init__(self, **kwargs): | ||||||
|  |         # define default configs | ||||||
|  |         self.config = { | ||||||
|  |             'linenums': [None, | ||||||
|  |                          "Use lines numbers. True|table|inline=yes, False=no, None=auto"], | ||||||
|  |             'guess_lang': [True, | ||||||
|  |                            "Automatic language detection - Default: True"], | ||||||
|  |             'css_class': ["codehilite", | ||||||
|  |                           "Set class name for wrapper <div> - " | ||||||
|  |                           "Default: codehilite"], | ||||||
|  |             'pygments_style': ['default', | ||||||
|  |                                'Pygments HTML Formatter Style ' | ||||||
|  |                                '(Colorscheme) - Default: default'], | ||||||
|  |             'noclasses': [False, | ||||||
|  |                           'Use inline styles instead of CSS classes - ' | ||||||
|  |                           'Default false'], | ||||||
|  |             'use_pygments': [True, | ||||||
|  |                              'Use Pygments to Highlight code blocks. ' | ||||||
|  |                              'Disable if using a JavaScript library. ' | ||||||
|  |                              'Default: True'], | ||||||
|  |             'lang_prefix': [ | ||||||
|  |                 'language-', | ||||||
|  |                 'Prefix prepended to the language when use_pygments is false. Default: "language-"' | ||||||
|  |             ] | ||||||
|  |             } | ||||||
|  |  | ||||||
|  |         for key, value in kwargs.items(): | ||||||
|  |             if key in self.config: | ||||||
|  |                 self.setConfig(key, value) | ||||||
|  |             else: | ||||||
|  |                 # manually set unknown keywords. | ||||||
|  |                 if isinstance(value, str): | ||||||
|  |                     try: | ||||||
|  |                         # Attempt to parse str as a bool value | ||||||
|  |                         value = parseBoolValue(value, preserve_none=True) | ||||||
|  |                     except ValueError: | ||||||
|  |                         pass  # Assume it's not a bool value. Use as-is. | ||||||
|  |                 self.config[key] = [value, ''] | ||||||
|  |  | ||||||
|  |     def extendMarkdown(self, md): | ||||||
|  |         """ Add HilitePostprocessor to Markdown instance. """ | ||||||
|  |         hiliter = HiliteTreeprocessor(md) | ||||||
|  |         hiliter.config = self.getConfigs() | ||||||
|  |         md.treeprocessors.register(hiliter, 'hilite', 30) | ||||||
|  |  | ||||||
|  |         md.registerExtension(self) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def makeExtension(**kwargs):  # pragma: no cover | ||||||
|  |     return CodeHiliteExtension(**kwargs) | ||||||
							
								
								
									
										111
									
								
								Source/Libs/markdown/extensions/def_list.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										111
									
								
								Source/Libs/markdown/extensions/def_list.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,111 @@ | |||||||
|  | """ | ||||||
|  | Definition List Extension for Python-Markdown | ||||||
|  | ============================================= | ||||||
|  |  | ||||||
|  | Adds parsing of Definition Lists to Python-Markdown. | ||||||
|  |  | ||||||
|  | See <https://Python-Markdown.github.io/extensions/definition_lists> | ||||||
|  | for documentation. | ||||||
|  |  | ||||||
|  | Original code Copyright 2008 [Waylan Limberg](http://achinghead.com) | ||||||
|  |  | ||||||
|  | All changes Copyright 2008-2014 The Python Markdown Project | ||||||
|  |  | ||||||
|  | License: [BSD](https://opensource.org/licenses/bsd-license.php) | ||||||
|  |  | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | from . import Extension | ||||||
|  | from ..blockprocessors import BlockProcessor, ListIndentProcessor | ||||||
|  | import xml.etree.ElementTree as etree | ||||||
|  | import re | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class DefListProcessor(BlockProcessor): | ||||||
|  |     """ Process Definition Lists. """ | ||||||
|  |  | ||||||
|  |     RE = re.compile(r'(^|\n)[ ]{0,3}:[ ]{1,3}(.*?)(\n|$)') | ||||||
|  |     NO_INDENT_RE = re.compile(r'^[ ]{0,3}[^ :]') | ||||||
|  |  | ||||||
|  |     def test(self, parent, block): | ||||||
|  |         return bool(self.RE.search(block)) | ||||||
|  |  | ||||||
|  |     def run(self, parent, blocks): | ||||||
|  |  | ||||||
|  |         raw_block = blocks.pop(0) | ||||||
|  |         m = self.RE.search(raw_block) | ||||||
|  |         terms = [term.strip() for term in | ||||||
|  |                  raw_block[:m.start()].split('\n') if term.strip()] | ||||||
|  |         block = raw_block[m.end():] | ||||||
|  |         no_indent = self.NO_INDENT_RE.match(block) | ||||||
|  |         if no_indent: | ||||||
|  |             d, theRest = (block, None) | ||||||
|  |         else: | ||||||
|  |             d, theRest = self.detab(block) | ||||||
|  |         if d: | ||||||
|  |             d = '{}\n{}'.format(m.group(2), d) | ||||||
|  |         else: | ||||||
|  |             d = m.group(2) | ||||||
|  |         sibling = self.lastChild(parent) | ||||||
|  |         if not terms and sibling is None: | ||||||
|  |             # This is not a definition item. Most likely a paragraph that | ||||||
|  |             # starts with a colon at the beginning of a document or list. | ||||||
|  |             blocks.insert(0, raw_block) | ||||||
|  |             return False | ||||||
|  |         if not terms and sibling.tag == 'p': | ||||||
|  |             # The previous paragraph contains the terms | ||||||
|  |             state = 'looselist' | ||||||
|  |             terms = sibling.text.split('\n') | ||||||
|  |             parent.remove(sibling) | ||||||
|  |             # Acquire new sibling | ||||||
|  |             sibling = self.lastChild(parent) | ||||||
|  |         else: | ||||||
|  |             state = 'list' | ||||||
|  |  | ||||||
|  |         if sibling is not None and sibling.tag == 'dl': | ||||||
|  |             # This is another item on an existing list | ||||||
|  |             dl = sibling | ||||||
|  |             if not terms and len(dl) and dl[-1].tag == 'dd' and len(dl[-1]): | ||||||
|  |                 state = 'looselist' | ||||||
|  |         else: | ||||||
|  |             # This is a new list | ||||||
|  |             dl = etree.SubElement(parent, 'dl') | ||||||
|  |         # Add terms | ||||||
|  |         for term in terms: | ||||||
|  |             dt = etree.SubElement(dl, 'dt') | ||||||
|  |             dt.text = term | ||||||
|  |         # Add definition | ||||||
|  |         self.parser.state.set(state) | ||||||
|  |         dd = etree.SubElement(dl, 'dd') | ||||||
|  |         self.parser.parseBlocks(dd, [d]) | ||||||
|  |         self.parser.state.reset() | ||||||
|  |  | ||||||
|  |         if theRest: | ||||||
|  |             blocks.insert(0, theRest) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class DefListIndentProcessor(ListIndentProcessor): | ||||||
|  |     """ Process indented children of definition list items. """ | ||||||
|  |  | ||||||
|  |     # Definition lists need to be aware of all list types | ||||||
|  |     ITEM_TYPES = ['dd', 'li'] | ||||||
|  |     LIST_TYPES = ['dl', 'ol', 'ul'] | ||||||
|  |  | ||||||
|  |     def create_item(self, parent, block): | ||||||
|  |         """ Create a new dd or li (depending on parent) and parse the block with it as the parent. """ | ||||||
|  |  | ||||||
|  |         dd = etree.SubElement(parent, 'dd') | ||||||
|  |         self.parser.parseBlocks(dd, [block]) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class DefListExtension(Extension): | ||||||
|  |     """ Add definition lists to Markdown. """ | ||||||
|  |  | ||||||
|  |     def extendMarkdown(self, md): | ||||||
|  |         """ Add an instance of DefListProcessor to BlockParser. """ | ||||||
|  |         md.parser.blockprocessors.register(DefListIndentProcessor(md.parser), 'defindent', 85) | ||||||
|  |         md.parser.blockprocessors.register(DefListProcessor(md.parser), 'deflist', 25) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def makeExtension(**kwargs):  # pragma: no cover | ||||||
|  |     return DefListExtension(**kwargs) | ||||||
							
								
								
									
										58
									
								
								Source/Libs/markdown/extensions/extra.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										58
									
								
								Source/Libs/markdown/extensions/extra.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,58 @@ | |||||||
|  | """ | ||||||
|  | Python-Markdown Extra Extension | ||||||
|  | =============================== | ||||||
|  |  | ||||||
|  | A compilation of various Python-Markdown extensions that imitates | ||||||
|  | [PHP Markdown Extra](http://michelf.com/projects/php-markdown/extra/). | ||||||
|  |  | ||||||
|  | Note that each of the individual extensions still need to be available | ||||||
|  | on your PYTHONPATH. This extension simply wraps them all up as a | ||||||
|  | convenience so that only one extension needs to be listed when | ||||||
|  | initiating Markdown. See the documentation for each individual | ||||||
|  | extension for specifics about that extension. | ||||||
|  |  | ||||||
|  | There may be additional extensions that are distributed with | ||||||
|  | Python-Markdown that are not included here in Extra. Those extensions | ||||||
|  | are not part of PHP Markdown Extra, and therefore, not part of | ||||||
|  | Python-Markdown Extra. If you really would like Extra to include | ||||||
|  | additional extensions, we suggest creating your own clone of Extra | ||||||
|  | under a different name. You could also edit the `extensions` global | ||||||
|  | variable defined below, but be aware that such changes may be lost | ||||||
|  | when you upgrade to any future version of Python-Markdown. | ||||||
|  |  | ||||||
|  | See <https://Python-Markdown.github.io/extensions/extra> | ||||||
|  | for documentation. | ||||||
|  |  | ||||||
|  | Copyright The Python Markdown Project | ||||||
|  |  | ||||||
|  | License: [BSD](https://opensource.org/licenses/bsd-license.php) | ||||||
|  |  | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | from . import Extension | ||||||
|  |  | ||||||
|  | extensions = [ | ||||||
|  |     'fenced_code', | ||||||
|  |     'footnotes', | ||||||
|  |     'attr_list', | ||||||
|  |     'def_list', | ||||||
|  |     'tables', | ||||||
|  |     'abbr', | ||||||
|  |     'md_in_html' | ||||||
|  | ] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class ExtraExtension(Extension): | ||||||
|  |     """ Add various extensions to Markdown class.""" | ||||||
|  |  | ||||||
|  |     def __init__(self, **kwargs): | ||||||
|  |         """ config is a dumb holder which gets passed to actual ext later. """ | ||||||
|  |         self.config = kwargs | ||||||
|  |  | ||||||
|  |     def extendMarkdown(self, md): | ||||||
|  |         """ Register extension instances. """ | ||||||
|  |         md.registerExtensions(extensions, self.config) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def makeExtension(**kwargs):  # pragma: no cover | ||||||
|  |     return ExtraExtension(**kwargs) | ||||||
							
								
								
									
										174
									
								
								Source/Libs/markdown/extensions/fenced_code.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										174
									
								
								Source/Libs/markdown/extensions/fenced_code.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,174 @@ | |||||||
|  | """ | ||||||
|  | Fenced Code Extension for Python Markdown | ||||||
|  | ========================================= | ||||||
|  |  | ||||||
|  | This extension adds Fenced Code Blocks to Python-Markdown. | ||||||
|  |  | ||||||
|  | See <https://Python-Markdown.github.io/extensions/fenced_code_blocks> | ||||||
|  | for documentation. | ||||||
|  |  | ||||||
|  | Original code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/). | ||||||
|  |  | ||||||
|  |  | ||||||
|  | All changes Copyright 2008-2014 The Python Markdown Project | ||||||
|  |  | ||||||
|  | License: [BSD](https://opensource.org/licenses/bsd-license.php) | ||||||
|  | """ | ||||||
|  |  | ||||||
|  |  | ||||||
|  | from textwrap import dedent | ||||||
|  | from . import Extension | ||||||
|  | from ..preprocessors import Preprocessor | ||||||
|  | from .codehilite import CodeHilite, CodeHiliteExtension, parse_hl_lines | ||||||
|  | from .attr_list import get_attrs, AttrListExtension | ||||||
|  | from ..util import parseBoolValue | ||||||
|  | from ..serializers import _escape_attrib_html | ||||||
|  | import re | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class FencedCodeExtension(Extension): | ||||||
|  |     def __init__(self, **kwargs): | ||||||
|  |         self.config = { | ||||||
|  |             'lang_prefix': ['language-', 'Prefix prepended to the language. Default: "language-"'] | ||||||
|  |         } | ||||||
|  |         super().__init__(**kwargs) | ||||||
|  |  | ||||||
|  |     def extendMarkdown(self, md): | ||||||
|  |         """ Add FencedBlockPreprocessor to the Markdown instance. """ | ||||||
|  |         md.registerExtension(self) | ||||||
|  |  | ||||||
|  |         md.preprocessors.register(FencedBlockPreprocessor(md, self.getConfigs()), 'fenced_code_block', 25) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class FencedBlockPreprocessor(Preprocessor): | ||||||
|  |     FENCED_BLOCK_RE = re.compile( | ||||||
|  |         dedent(r''' | ||||||
|  |             (?P<fence>^(?:~{3,}|`{3,}))[ ]*                          # opening fence | ||||||
|  |             ((\{(?P<attrs>[^\}\n]*)\})|                              # (optional {attrs} or | ||||||
|  |             (\.?(?P<lang>[\w#.+-]*)[ ]*)?                            # optional (.)lang | ||||||
|  |             (hl_lines=(?P<quot>"|')(?P<hl_lines>.*?)(?P=quot)[ ]*)?) # optional hl_lines) | ||||||
|  |             \n                                                       # newline (end of opening fence) | ||||||
|  |             (?P<code>.*?)(?<=\n)                                     # the code block | ||||||
|  |             (?P=fence)[ ]*$                                          # closing fence | ||||||
|  |         '''), | ||||||
|  |         re.MULTILINE | re.DOTALL | re.VERBOSE | ||||||
|  |     ) | ||||||
|  |  | ||||||
|  |     def __init__(self, md, config): | ||||||
|  |         super().__init__(md) | ||||||
|  |         self.config = config | ||||||
|  |         self.checked_for_deps = False | ||||||
|  |         self.codehilite_conf = {} | ||||||
|  |         self.use_attr_list = False | ||||||
|  |         # List of options to convert to bool values | ||||||
|  |         self.bool_options = [ | ||||||
|  |             'linenums', | ||||||
|  |             'guess_lang', | ||||||
|  |             'noclasses', | ||||||
|  |             'use_pygments' | ||||||
|  |         ] | ||||||
|  |  | ||||||
|  |     def run(self, lines): | ||||||
|  |         """ Match and store Fenced Code Blocks in the HtmlStash. """ | ||||||
|  |  | ||||||
|  |         # Check for dependent extensions | ||||||
|  |         if not self.checked_for_deps: | ||||||
|  |             for ext in self.md.registeredExtensions: | ||||||
|  |                 if isinstance(ext, CodeHiliteExtension): | ||||||
|  |                     self.codehilite_conf = ext.getConfigs() | ||||||
|  |                 if isinstance(ext, AttrListExtension): | ||||||
|  |                     self.use_attr_list = True | ||||||
|  |  | ||||||
|  |             self.checked_for_deps = True | ||||||
|  |  | ||||||
|  |         text = "\n".join(lines) | ||||||
|  |         while 1: | ||||||
|  |             m = self.FENCED_BLOCK_RE.search(text) | ||||||
|  |             if m: | ||||||
|  |                 lang, id, classes, config = None, '', [], {} | ||||||
|  |                 if m.group('attrs'): | ||||||
|  |                     id, classes, config = self.handle_attrs(get_attrs(m.group('attrs'))) | ||||||
|  |                     if len(classes): | ||||||
|  |                         lang = classes.pop(0) | ||||||
|  |                 else: | ||||||
|  |                     if m.group('lang'): | ||||||
|  |                         lang = m.group('lang') | ||||||
|  |                     if m.group('hl_lines'): | ||||||
|  |                         # Support hl_lines outside of attrs for backward-compatibility | ||||||
|  |                         config['hl_lines'] = parse_hl_lines(m.group('hl_lines')) | ||||||
|  |  | ||||||
|  |                 # If config is not empty, then the codehighlite extension | ||||||
|  |                 # is enabled, so we call it to highlight the code | ||||||
|  |                 if self.codehilite_conf and self.codehilite_conf['use_pygments'] and config.get('use_pygments', True): | ||||||
|  |                     local_config = self.codehilite_conf.copy() | ||||||
|  |                     local_config.update(config) | ||||||
|  |                     # Combine classes with cssclass. Ensure cssclass is at end | ||||||
|  |                     # as pygments appends a suffix under certain circumstances. | ||||||
|  |                     # Ignore ID as Pygments does not offer an option to set it. | ||||||
|  |                     if classes: | ||||||
|  |                         local_config['css_class'] = '{} {}'.format( | ||||||
|  |                             ' '.join(classes), | ||||||
|  |                             local_config['css_class'] | ||||||
|  |                         ) | ||||||
|  |                     highliter = CodeHilite( | ||||||
|  |                         m.group('code'), | ||||||
|  |                         lang=lang, | ||||||
|  |                         style=local_config.pop('pygments_style', 'default'), | ||||||
|  |                         **local_config | ||||||
|  |                     ) | ||||||
|  |  | ||||||
|  |                     code = highliter.hilite(shebang=False) | ||||||
|  |                 else: | ||||||
|  |                     id_attr = lang_attr = class_attr = kv_pairs = '' | ||||||
|  |                     if lang: | ||||||
|  |                         prefix = self.config.get('lang_prefix', 'language-') | ||||||
|  |                         lang_attr = f' class="{prefix}{_escape_attrib_html(lang)}"' | ||||||
|  |                     if classes: | ||||||
|  |                         class_attr = f' class="{_escape_attrib_html(" ".join(classes))}"' | ||||||
|  |                     if id: | ||||||
|  |                         id_attr = f' id="{_escape_attrib_html(id)}"' | ||||||
|  |                     if self.use_attr_list and config and not config.get('use_pygments', False): | ||||||
|  |                         # Only assign key/value pairs to code element if attr_list ext is enabled, key/value pairs | ||||||
|  |                         # were defined on the code block, and the `use_pygments` key was not set to True. The | ||||||
|  |                         # `use_pygments` key could be either set to False or not defined. It is omitted from output. | ||||||
|  |                         kv_pairs = ''.join( | ||||||
|  |                             f' {k}="{_escape_attrib_html(v)}"' for k, v in config.items() if k != 'use_pygments' | ||||||
|  |                         ) | ||||||
|  |                     code = self._escape(m.group('code')) | ||||||
|  |                     code = f'<pre{id_attr}{class_attr}><code{lang_attr}{kv_pairs}>{code}</code></pre>' | ||||||
|  |  | ||||||
|  |                 placeholder = self.md.htmlStash.store(code) | ||||||
|  |                 text = f'{text[:m.start()]}\n{placeholder}\n{text[m.end():]}' | ||||||
|  |             else: | ||||||
|  |                 break | ||||||
|  |         return text.split("\n") | ||||||
|  |  | ||||||
|  |     def handle_attrs(self, attrs): | ||||||
|  |         """ Return tuple: (id, [list, of, classes], {configs}) """ | ||||||
|  |         id = '' | ||||||
|  |         classes = [] | ||||||
|  |         configs = {} | ||||||
|  |         for k, v in attrs: | ||||||
|  |             if k == 'id': | ||||||
|  |                 id = v | ||||||
|  |             elif k == '.': | ||||||
|  |                 classes.append(v) | ||||||
|  |             elif k == 'hl_lines': | ||||||
|  |                 configs[k] = parse_hl_lines(v) | ||||||
|  |             elif k in self.bool_options: | ||||||
|  |                 configs[k] = parseBoolValue(v, fail_on_errors=False, preserve_none=True) | ||||||
|  |             else: | ||||||
|  |                 configs[k] = v | ||||||
|  |         return id, classes, configs | ||||||
|  |  | ||||||
|  |     def _escape(self, txt): | ||||||
|  |         """ basic html escaping """ | ||||||
|  |         txt = txt.replace('&', '&') | ||||||
|  |         txt = txt.replace('<', '<') | ||||||
|  |         txt = txt.replace('>', '>') | ||||||
|  |         txt = txt.replace('"', '"') | ||||||
|  |         return txt | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def makeExtension(**kwargs):  # pragma: no cover | ||||||
|  |     return FencedCodeExtension(**kwargs) | ||||||
							
								
								
									
										402
									
								
								Source/Libs/markdown/extensions/footnotes.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										402
									
								
								Source/Libs/markdown/extensions/footnotes.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,402 @@ | |||||||
|  | """ | ||||||
|  | Footnotes Extension for Python-Markdown | ||||||
|  | ======================================= | ||||||
|  |  | ||||||
|  | Adds footnote handling to Python-Markdown. | ||||||
|  |  | ||||||
|  | See <https://Python-Markdown.github.io/extensions/footnotes> | ||||||
|  | for documentation. | ||||||
|  |  | ||||||
|  | Copyright The Python Markdown Project | ||||||
|  |  | ||||||
|  | License: [BSD](https://opensource.org/licenses/bsd-license.php) | ||||||
|  |  | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | from . import Extension | ||||||
|  | from ..blockprocessors import BlockProcessor | ||||||
|  | from ..inlinepatterns import InlineProcessor | ||||||
|  | from ..treeprocessors import Treeprocessor | ||||||
|  | from ..postprocessors import Postprocessor | ||||||
|  | from .. import util | ||||||
|  | from collections import OrderedDict | ||||||
|  | import re | ||||||
|  | import copy | ||||||
|  | import xml.etree.ElementTree as etree | ||||||
|  |  | ||||||
|  | FN_BACKLINK_TEXT = util.STX + "zz1337820767766393qq" + util.ETX | ||||||
|  | NBSP_PLACEHOLDER = util.STX + "qq3936677670287331zz" + util.ETX | ||||||
|  | RE_REF_ID = re.compile(r'(fnref)(\d+)') | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class FootnoteExtension(Extension): | ||||||
|  |     """ Footnote Extension. """ | ||||||
|  |  | ||||||
|  |     def __init__(self, **kwargs): | ||||||
|  |         """ Setup configs. """ | ||||||
|  |  | ||||||
|  |         self.config = { | ||||||
|  |             'PLACE_MARKER': | ||||||
|  |                 ["///Footnotes Go Here///", | ||||||
|  |                  "The text string that marks where the footnotes go"], | ||||||
|  |             'UNIQUE_IDS': | ||||||
|  |                 [False, | ||||||
|  |                  "Avoid name collisions across " | ||||||
|  |                  "multiple calls to reset()."], | ||||||
|  |             "BACKLINK_TEXT": | ||||||
|  |                 ["↩", | ||||||
|  |                  "The text string that links from the footnote " | ||||||
|  |                  "to the reader's place."], | ||||||
|  |             "BACKLINK_TITLE": | ||||||
|  |                 ["Jump back to footnote %d in the text", | ||||||
|  |                  "The text string used for the title HTML attribute " | ||||||
|  |                  "of the backlink. %d will be replaced by the " | ||||||
|  |                  "footnote number."], | ||||||
|  |             "SEPARATOR": | ||||||
|  |                 [":", | ||||||
|  |                  "Footnote separator."] | ||||||
|  |         } | ||||||
|  |         super().__init__(**kwargs) | ||||||
|  |  | ||||||
|  |         # In multiple invocations, emit links that don't get tangled. | ||||||
|  |         self.unique_prefix = 0 | ||||||
|  |         self.found_refs = {} | ||||||
|  |         self.used_refs = set() | ||||||
|  |  | ||||||
|  |         self.reset() | ||||||
|  |  | ||||||
|  |     def extendMarkdown(self, md): | ||||||
|  |         """ Add pieces to Markdown. """ | ||||||
|  |         md.registerExtension(self) | ||||||
|  |         self.parser = md.parser | ||||||
|  |         self.md = md | ||||||
|  |         # Insert a blockprocessor before ReferencePreprocessor | ||||||
|  |         md.parser.blockprocessors.register(FootnoteBlockProcessor(self), 'footnote', 17) | ||||||
|  |  | ||||||
|  |         # Insert an inline pattern before ImageReferencePattern | ||||||
|  |         FOOTNOTE_RE = r'\[\^([^\]]*)\]'  # blah blah [^1] blah | ||||||
|  |         md.inlinePatterns.register(FootnoteInlineProcessor(FOOTNOTE_RE, self), 'footnote', 175) | ||||||
|  |         # Insert a tree-processor that would actually add the footnote div | ||||||
|  |         # This must be before all other treeprocessors (i.e., inline and | ||||||
|  |         # codehilite) so they can run on the the contents of the div. | ||||||
|  |         md.treeprocessors.register(FootnoteTreeprocessor(self), 'footnote', 50) | ||||||
|  |  | ||||||
|  |         # Insert a tree-processor that will run after inline is done. | ||||||
|  |         # In this tree-processor we want to check our duplicate footnote tracker | ||||||
|  |         # And add additional backrefs to the footnote pointing back to the | ||||||
|  |         # duplicated references. | ||||||
|  |         md.treeprocessors.register(FootnotePostTreeprocessor(self), 'footnote-duplicate', 15) | ||||||
|  |  | ||||||
|  |         # Insert a postprocessor after amp_substitute processor | ||||||
|  |         md.postprocessors.register(FootnotePostprocessor(self), 'footnote', 25) | ||||||
|  |  | ||||||
|  |     def reset(self): | ||||||
|  |         """ Clear footnotes on reset, and prepare for distinct document. """ | ||||||
|  |         self.footnotes = OrderedDict() | ||||||
|  |         self.unique_prefix += 1 | ||||||
|  |         self.found_refs = {} | ||||||
|  |         self.used_refs = set() | ||||||
|  |  | ||||||
|  |     def unique_ref(self, reference, found=False): | ||||||
|  |         """ Get a unique reference if there are duplicates. """ | ||||||
|  |         if not found: | ||||||
|  |             return reference | ||||||
|  |  | ||||||
|  |         original_ref = reference | ||||||
|  |         while reference in self.used_refs: | ||||||
|  |             ref, rest = reference.split(self.get_separator(), 1) | ||||||
|  |             m = RE_REF_ID.match(ref) | ||||||
|  |             if m: | ||||||
|  |                 reference = '%s%d%s%s' % (m.group(1), int(m.group(2))+1, self.get_separator(), rest) | ||||||
|  |             else: | ||||||
|  |                 reference = '%s%d%s%s' % (ref, 2, self.get_separator(), rest) | ||||||
|  |  | ||||||
|  |         self.used_refs.add(reference) | ||||||
|  |         if original_ref in self.found_refs: | ||||||
|  |             self.found_refs[original_ref] += 1 | ||||||
|  |         else: | ||||||
|  |             self.found_refs[original_ref] = 1 | ||||||
|  |         return reference | ||||||
|  |  | ||||||
|  |     def findFootnotesPlaceholder(self, root): | ||||||
|  |         """ Return ElementTree Element that contains Footnote placeholder. """ | ||||||
|  |         def finder(element): | ||||||
|  |             for child in element: | ||||||
|  |                 if child.text: | ||||||
|  |                     if child.text.find(self.getConfig("PLACE_MARKER")) > -1: | ||||||
|  |                         return child, element, True | ||||||
|  |                 if child.tail: | ||||||
|  |                     if child.tail.find(self.getConfig("PLACE_MARKER")) > -1: | ||||||
|  |                         return child, element, False | ||||||
|  |                 child_res = finder(child) | ||||||
|  |                 if child_res is not None: | ||||||
|  |                     return child_res | ||||||
|  |             return None | ||||||
|  |  | ||||||
|  |         res = finder(root) | ||||||
|  |         return res | ||||||
|  |  | ||||||
|  |     def setFootnote(self, id, text): | ||||||
|  |         """ Store a footnote for later retrieval. """ | ||||||
|  |         self.footnotes[id] = text | ||||||
|  |  | ||||||
|  |     def get_separator(self): | ||||||
|  |         """ Get the footnote separator. """ | ||||||
|  |         return self.getConfig("SEPARATOR") | ||||||
|  |  | ||||||
|  |     def makeFootnoteId(self, id): | ||||||
|  |         """ Return footnote link id. """ | ||||||
|  |         if self.getConfig("UNIQUE_IDS"): | ||||||
|  |             return 'fn%s%d-%s' % (self.get_separator(), self.unique_prefix, id) | ||||||
|  |         else: | ||||||
|  |             return 'fn{}{}'.format(self.get_separator(), id) | ||||||
|  |  | ||||||
|  |     def makeFootnoteRefId(self, id, found=False): | ||||||
|  |         """ Return footnote back-link id. """ | ||||||
|  |         if self.getConfig("UNIQUE_IDS"): | ||||||
|  |             return self.unique_ref('fnref%s%d-%s' % (self.get_separator(), self.unique_prefix, id), found) | ||||||
|  |         else: | ||||||
|  |             return self.unique_ref('fnref{}{}'.format(self.get_separator(), id), found) | ||||||
|  |  | ||||||
|  |     def makeFootnotesDiv(self, root): | ||||||
|  |         """ Return div of footnotes as et Element. """ | ||||||
|  |  | ||||||
|  |         if not list(self.footnotes.keys()): | ||||||
|  |             return None | ||||||
|  |  | ||||||
|  |         div = etree.Element("div") | ||||||
|  |         div.set('class', 'footnote') | ||||||
|  |         etree.SubElement(div, "hr") | ||||||
|  |         ol = etree.SubElement(div, "ol") | ||||||
|  |         surrogate_parent = etree.Element("div") | ||||||
|  |  | ||||||
|  |         for index, id in enumerate(self.footnotes.keys(), start=1): | ||||||
|  |             li = etree.SubElement(ol, "li") | ||||||
|  |             li.set("id", self.makeFootnoteId(id)) | ||||||
|  |             # Parse footnote with surrogate parent as li cannot be used. | ||||||
|  |             # List block handlers have special logic to deal with li. | ||||||
|  |             # When we are done parsing, we will copy everything over to li. | ||||||
|  |             self.parser.parseChunk(surrogate_parent, self.footnotes[id]) | ||||||
|  |             for el in list(surrogate_parent): | ||||||
|  |                 li.append(el) | ||||||
|  |                 surrogate_parent.remove(el) | ||||||
|  |             backlink = etree.Element("a") | ||||||
|  |             backlink.set("href", "#" + self.makeFootnoteRefId(id)) | ||||||
|  |             backlink.set("class", "footnote-backref") | ||||||
|  |             backlink.set( | ||||||
|  |                 "title", | ||||||
|  |                 self.getConfig("BACKLINK_TITLE") % (index) | ||||||
|  |             ) | ||||||
|  |             backlink.text = FN_BACKLINK_TEXT | ||||||
|  |  | ||||||
|  |             if len(li): | ||||||
|  |                 node = li[-1] | ||||||
|  |                 if node.tag == "p": | ||||||
|  |                     node.text = node.text + NBSP_PLACEHOLDER | ||||||
|  |                     node.append(backlink) | ||||||
|  |                 else: | ||||||
|  |                     p = etree.SubElement(li, "p") | ||||||
|  |                     p.append(backlink) | ||||||
|  |         return div | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class FootnoteBlockProcessor(BlockProcessor): | ||||||
|  |     """ Find all footnote references and store for later use. """ | ||||||
|  |  | ||||||
|  |     RE = re.compile(r'^[ ]{0,3}\[\^([^\]]*)\]:[ ]*(.*)$', re.MULTILINE) | ||||||
|  |  | ||||||
|  |     def __init__(self, footnotes): | ||||||
|  |         super().__init__(footnotes.parser) | ||||||
|  |         self.footnotes = footnotes | ||||||
|  |  | ||||||
|  |     def test(self, parent, block): | ||||||
|  |         return True | ||||||
|  |  | ||||||
|  |     def run(self, parent, blocks): | ||||||
|  |         """ Find, set, and remove footnote definitions. """ | ||||||
|  |         block = blocks.pop(0) | ||||||
|  |         m = self.RE.search(block) | ||||||
|  |         if m: | ||||||
|  |             id = m.group(1) | ||||||
|  |             fn_blocks = [m.group(2)] | ||||||
|  |  | ||||||
|  |             # Handle rest of block | ||||||
|  |             therest = block[m.end():].lstrip('\n') | ||||||
|  |             m2 = self.RE.search(therest) | ||||||
|  |             if m2: | ||||||
|  |                 # Another footnote exists in the rest of this block. | ||||||
|  |                 # Any content before match is continuation of this footnote, which may be lazily indented. | ||||||
|  |                 before = therest[:m2.start()].rstrip('\n') | ||||||
|  |                 fn_blocks[0] = '\n'.join([fn_blocks[0], self.detab(before)]).lstrip('\n') | ||||||
|  |                 # Add back to blocks everything from beginning of match forward for next iteration. | ||||||
|  |                 blocks.insert(0, therest[m2.start():]) | ||||||
|  |             else: | ||||||
|  |                 # All remaining lines of block are continuation of this footnote, which may be lazily indented. | ||||||
|  |                 fn_blocks[0] = '\n'.join([fn_blocks[0], self.detab(therest)]).strip('\n') | ||||||
|  |  | ||||||
|  |                 # Check for child elements in remaining blocks. | ||||||
|  |                 fn_blocks.extend(self.detectTabbed(blocks)) | ||||||
|  |  | ||||||
|  |             footnote = "\n\n".join(fn_blocks) | ||||||
|  |             self.footnotes.setFootnote(id, footnote.rstrip()) | ||||||
|  |  | ||||||
|  |             if block[:m.start()].strip(): | ||||||
|  |                 # Add any content before match back to blocks as separate block | ||||||
|  |                 blocks.insert(0, block[:m.start()].rstrip('\n')) | ||||||
|  |             return True | ||||||
|  |         # No match. Restore block. | ||||||
|  |         blocks.insert(0, block) | ||||||
|  |         return False | ||||||
|  |  | ||||||
|  |     def detectTabbed(self, blocks): | ||||||
|  |         """ Find indented text and remove indent before further proccesing. | ||||||
|  |  | ||||||
|  |         Returns: a list of blocks with indentation removed. | ||||||
|  |         """ | ||||||
|  |         fn_blocks = [] | ||||||
|  |         while blocks: | ||||||
|  |             if blocks[0].startswith(' '*4): | ||||||
|  |                 block = blocks.pop(0) | ||||||
|  |                 # Check for new footnotes within this block and split at new footnote. | ||||||
|  |                 m = self.RE.search(block) | ||||||
|  |                 if m: | ||||||
|  |                     # Another footnote exists in this block. | ||||||
|  |                     # Any content before match is continuation of this footnote, which may be lazily indented. | ||||||
|  |                     before = block[:m.start()].rstrip('\n') | ||||||
|  |                     fn_blocks.append(self.detab(before)) | ||||||
|  |                     # Add back to blocks everything from beginning of match forward for next iteration. | ||||||
|  |                     blocks.insert(0, block[m.start():]) | ||||||
|  |                     # End of this footnote. | ||||||
|  |                     break | ||||||
|  |                 else: | ||||||
|  |                     # Entire block is part of this footnote. | ||||||
|  |                     fn_blocks.append(self.detab(block)) | ||||||
|  |             else: | ||||||
|  |                 # End of this footnote. | ||||||
|  |                 break | ||||||
|  |         return fn_blocks | ||||||
|  |  | ||||||
|  |     def detab(self, block): | ||||||
|  |         """ Remove one level of indent from a block. | ||||||
|  |  | ||||||
|  |         Preserve lazily indented blocks by only removing indent from indented lines. | ||||||
|  |         """ | ||||||
|  |         lines = block.split('\n') | ||||||
|  |         for i, line in enumerate(lines): | ||||||
|  |             if line.startswith(' '*4): | ||||||
|  |                 lines[i] = line[4:] | ||||||
|  |         return '\n'.join(lines) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class FootnoteInlineProcessor(InlineProcessor): | ||||||
|  |     """ InlinePattern for footnote markers in a document's body text. """ | ||||||
|  |  | ||||||
|  |     def __init__(self, pattern, footnotes): | ||||||
|  |         super().__init__(pattern) | ||||||
|  |         self.footnotes = footnotes | ||||||
|  |  | ||||||
|  |     def handleMatch(self, m, data): | ||||||
|  |         id = m.group(1) | ||||||
|  |         if id in self.footnotes.footnotes.keys(): | ||||||
|  |             sup = etree.Element("sup") | ||||||
|  |             a = etree.SubElement(sup, "a") | ||||||
|  |             sup.set('id', self.footnotes.makeFootnoteRefId(id, found=True)) | ||||||
|  |             a.set('href', '#' + self.footnotes.makeFootnoteId(id)) | ||||||
|  |             a.set('class', 'footnote-ref') | ||||||
|  |             a.text = str(list(self.footnotes.footnotes.keys()).index(id) + 1) | ||||||
|  |             return sup, m.start(0), m.end(0) | ||||||
|  |         else: | ||||||
|  |             return None, None, None | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class FootnotePostTreeprocessor(Treeprocessor): | ||||||
|  |     """ Amend footnote div with duplicates. """ | ||||||
|  |  | ||||||
|  |     def __init__(self, footnotes): | ||||||
|  |         self.footnotes = footnotes | ||||||
|  |  | ||||||
|  |     def add_duplicates(self, li, duplicates): | ||||||
|  |         """ Adjust current li and add the duplicates: fnref2, fnref3, etc. """ | ||||||
|  |         for link in li.iter('a'): | ||||||
|  |             # Find the link that needs to be duplicated. | ||||||
|  |             if link.attrib.get('class', '') == 'footnote-backref': | ||||||
|  |                 ref, rest = link.attrib['href'].split(self.footnotes.get_separator(), 1) | ||||||
|  |                 # Duplicate link the number of times we need to | ||||||
|  |                 # and point the to the appropriate references. | ||||||
|  |                 links = [] | ||||||
|  |                 for index in range(2, duplicates + 1): | ||||||
|  |                     sib_link = copy.deepcopy(link) | ||||||
|  |                     sib_link.attrib['href'] = '%s%d%s%s' % (ref, index, self.footnotes.get_separator(), rest) | ||||||
|  |                     links.append(sib_link) | ||||||
|  |                     self.offset += 1 | ||||||
|  |                 # Add all the new duplicate links. | ||||||
|  |                 el = list(li)[-1] | ||||||
|  |                 for link in links: | ||||||
|  |                     el.append(link) | ||||||
|  |                 break | ||||||
|  |  | ||||||
|  |     def get_num_duplicates(self, li): | ||||||
|  |         """ Get the number of duplicate refs of the footnote. """ | ||||||
|  |         fn, rest = li.attrib.get('id', '').split(self.footnotes.get_separator(), 1) | ||||||
|  |         link_id = '{}ref{}{}'.format(fn, self.footnotes.get_separator(), rest) | ||||||
|  |         return self.footnotes.found_refs.get(link_id, 0) | ||||||
|  |  | ||||||
|  |     def handle_duplicates(self, parent): | ||||||
|  |         """ Find duplicate footnotes and format and add the duplicates. """ | ||||||
|  |         for li in list(parent): | ||||||
|  |             # Check number of duplicates footnotes and insert | ||||||
|  |             # additional links if needed. | ||||||
|  |             count = self.get_num_duplicates(li) | ||||||
|  |             if count > 1: | ||||||
|  |                 self.add_duplicates(li, count) | ||||||
|  |  | ||||||
|  |     def run(self, root): | ||||||
|  |         """ Crawl the footnote div and add missing duplicate footnotes. """ | ||||||
|  |         self.offset = 0 | ||||||
|  |         for div in root.iter('div'): | ||||||
|  |             if div.attrib.get('class', '') == 'footnote': | ||||||
|  |                 # Footnotes should be under the first ordered list under | ||||||
|  |                 # the footnote div.  So once we find it, quit. | ||||||
|  |                 for ol in div.iter('ol'): | ||||||
|  |                     self.handle_duplicates(ol) | ||||||
|  |                     break | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class FootnoteTreeprocessor(Treeprocessor): | ||||||
|  |     """ Build and append footnote div to end of document. """ | ||||||
|  |  | ||||||
|  |     def __init__(self, footnotes): | ||||||
|  |         self.footnotes = footnotes | ||||||
|  |  | ||||||
|  |     def run(self, root): | ||||||
|  |         footnotesDiv = self.footnotes.makeFootnotesDiv(root) | ||||||
|  |         if footnotesDiv is not None: | ||||||
|  |             result = self.footnotes.findFootnotesPlaceholder(root) | ||||||
|  |             if result: | ||||||
|  |                 child, parent, isText = result | ||||||
|  |                 ind = list(parent).index(child) | ||||||
|  |                 if isText: | ||||||
|  |                     parent.remove(child) | ||||||
|  |                     parent.insert(ind, footnotesDiv) | ||||||
|  |                 else: | ||||||
|  |                     parent.insert(ind + 1, footnotesDiv) | ||||||
|  |                     child.tail = None | ||||||
|  |             else: | ||||||
|  |                 root.append(footnotesDiv) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class FootnotePostprocessor(Postprocessor): | ||||||
|  |     """ Replace placeholders with html entities. """ | ||||||
|  |     def __init__(self, footnotes): | ||||||
|  |         self.footnotes = footnotes | ||||||
|  |  | ||||||
|  |     def run(self, text): | ||||||
|  |         text = text.replace( | ||||||
|  |             FN_BACKLINK_TEXT, self.footnotes.getConfig("BACKLINK_TEXT") | ||||||
|  |         ) | ||||||
|  |         return text.replace(NBSP_PLACEHOLDER, " ") | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def makeExtension(**kwargs):  # pragma: no cover | ||||||
|  |     """ Return an instance of the FootnoteExtension """ | ||||||
|  |     return FootnoteExtension(**kwargs) | ||||||
							
								
								
									
										67
									
								
								Source/Libs/markdown/extensions/legacy_attrs.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										67
									
								
								Source/Libs/markdown/extensions/legacy_attrs.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,67 @@ | |||||||
|  | """ | ||||||
|  | Python Markdown | ||||||
|  |  | ||||||
|  | A Python implementation of John Gruber's Markdown. | ||||||
|  |  | ||||||
|  | Documentation: https://python-markdown.github.io/ | ||||||
|  | GitHub: https://github.com/Python-Markdown/markdown/ | ||||||
|  | PyPI: https://pypi.org/project/Markdown/ | ||||||
|  |  | ||||||
|  | Started by Manfred Stienstra (http://www.dwerg.net/). | ||||||
|  | Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). | ||||||
|  | Currently maintained by Waylan Limberg (https://github.com/waylan), | ||||||
|  | Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). | ||||||
|  |  | ||||||
|  | Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later) | ||||||
|  | Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) | ||||||
|  | Copyright 2004 Manfred Stienstra (the original version) | ||||||
|  |  | ||||||
|  | License: BSD (see LICENSE.md for details). | ||||||
|  |  | ||||||
|  | Legacy Attributes Extension | ||||||
|  | =========================== | ||||||
|  |  | ||||||
|  | An extension to Python Markdown which implements legacy attributes. | ||||||
|  |  | ||||||
|  | Prior to Python-Markdown version 3.0, the Markdown class had an `enable_attributes` | ||||||
|  | keyword which was on by default and provided for attributes to be defined for elements | ||||||
|  | using the format `{@key=value}`. This extension is provided as a replacement for | ||||||
|  | backward compatibility. New documents should be authored using attr_lists. However, | ||||||
|  | numerious documents exist which have been using the old attribute format for many | ||||||
|  | years. This extension can be used to continue to render those documents correctly. | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | import re | ||||||
|  | from markdown.treeprocessors import Treeprocessor, isString | ||||||
|  | from markdown.extensions import Extension | ||||||
|  |  | ||||||
|  |  | ||||||
|  | ATTR_RE = re.compile(r'\{@([^\}]*)=([^\}]*)}')  # {@id=123} | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class LegacyAttrs(Treeprocessor): | ||||||
|  |     def run(self, doc): | ||||||
|  |         """Find and set values of attributes ({@key=value}). """ | ||||||
|  |         for el in doc.iter(): | ||||||
|  |             alt = el.get('alt', None) | ||||||
|  |             if alt is not None: | ||||||
|  |                 el.set('alt', self.handleAttributes(el, alt)) | ||||||
|  |             if el.text and isString(el.text): | ||||||
|  |                 el.text = self.handleAttributes(el, el.text) | ||||||
|  |             if el.tail and isString(el.tail): | ||||||
|  |                 el.tail = self.handleAttributes(el, el.tail) | ||||||
|  |  | ||||||
|  |     def handleAttributes(self, el, txt): | ||||||
|  |         """ Set attributes and return text without definitions. """ | ||||||
|  |         def attributeCallback(match): | ||||||
|  |             el.set(match.group(1), match.group(2).replace('\n', ' ')) | ||||||
|  |         return ATTR_RE.sub(attributeCallback, txt) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class LegacyAttrExtension(Extension): | ||||||
|  |     def extendMarkdown(self, md): | ||||||
|  |         md.treeprocessors.register(LegacyAttrs(md), 'legacyattrs', 15) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def makeExtension(**kwargs):  # pragma: no cover | ||||||
|  |     return LegacyAttrExtension(**kwargs) | ||||||
							
								
								
									
										49
									
								
								Source/Libs/markdown/extensions/legacy_em.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										49
									
								
								Source/Libs/markdown/extensions/legacy_em.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,49 @@ | |||||||
|  | ''' | ||||||
|  | Legacy Em Extension for Python-Markdown | ||||||
|  | ======================================= | ||||||
|  |  | ||||||
|  | This extension provides legacy behavior for _connected_words_. | ||||||
|  |  | ||||||
|  | Copyright 2015-2018 The Python Markdown Project | ||||||
|  |  | ||||||
|  | License: [BSD](https://opensource.org/licenses/bsd-license.php) | ||||||
|  |  | ||||||
|  | ''' | ||||||
|  |  | ||||||
|  | from . import Extension | ||||||
|  | from ..inlinepatterns import UnderscoreProcessor, EmStrongItem, EM_STRONG2_RE, STRONG_EM2_RE | ||||||
|  | import re | ||||||
|  |  | ||||||
|  | # _emphasis_ | ||||||
|  | EMPHASIS_RE = r'(_)([^_]+)\1' | ||||||
|  |  | ||||||
|  | # __strong__ | ||||||
|  | STRONG_RE = r'(_{2})(.+?)\1' | ||||||
|  |  | ||||||
|  | # __strong_em___ | ||||||
|  | STRONG_EM_RE = r'(_)\1(?!\1)([^_]+?)\1(?!\1)(.+?)\1{3}' | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class LegacyUnderscoreProcessor(UnderscoreProcessor): | ||||||
|  |     """Emphasis processor for handling strong and em matches inside underscores.""" | ||||||
|  |  | ||||||
|  |     PATTERNS = [ | ||||||
|  |         EmStrongItem(re.compile(EM_STRONG2_RE, re.DOTALL | re.UNICODE), 'double', 'strong,em'), | ||||||
|  |         EmStrongItem(re.compile(STRONG_EM2_RE, re.DOTALL | re.UNICODE), 'double', 'em,strong'), | ||||||
|  |         EmStrongItem(re.compile(STRONG_EM_RE, re.DOTALL | re.UNICODE), 'double2', 'strong,em'), | ||||||
|  |         EmStrongItem(re.compile(STRONG_RE, re.DOTALL | re.UNICODE), 'single', 'strong'), | ||||||
|  |         EmStrongItem(re.compile(EMPHASIS_RE, re.DOTALL | re.UNICODE), 'single', 'em') | ||||||
|  |     ] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class LegacyEmExtension(Extension): | ||||||
|  |     """ Add legacy_em extension to Markdown class.""" | ||||||
|  |  | ||||||
|  |     def extendMarkdown(self, md): | ||||||
|  |         """ Modify inline patterns. """ | ||||||
|  |         md.inlinePatterns.register(LegacyUnderscoreProcessor(r'_'), 'em_strong2', 50) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def makeExtension(**kwargs):  # pragma: no cover | ||||||
|  |     """ Return an instance of the LegacyEmExtension """ | ||||||
|  |     return LegacyEmExtension(**kwargs) | ||||||
							
								
								
									
										364
									
								
								Source/Libs/markdown/extensions/md_in_html.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										364
									
								
								Source/Libs/markdown/extensions/md_in_html.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,364 @@ | |||||||
|  | """ | ||||||
|  | Python-Markdown Markdown in HTML Extension | ||||||
|  | =============================== | ||||||
|  |  | ||||||
|  | An implementation of [PHP Markdown Extra](http://michelf.com/projects/php-markdown/extra/)'s | ||||||
|  | parsing of Markdown syntax in raw HTML. | ||||||
|  |  | ||||||
|  | See <https://Python-Markdown.github.io/extensions/raw_html> | ||||||
|  | for documentation. | ||||||
|  |  | ||||||
|  | Copyright The Python Markdown Project | ||||||
|  |  | ||||||
|  | License: [BSD](https://opensource.org/licenses/bsd-license.php) | ||||||
|  |  | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | from . import Extension | ||||||
|  | from ..blockprocessors import BlockProcessor | ||||||
|  | from ..preprocessors import Preprocessor | ||||||
|  | from ..postprocessors import RawHtmlPostprocessor | ||||||
|  | from .. import util | ||||||
|  | from ..htmlparser import HTMLExtractor, blank_line_re | ||||||
|  | import xml.etree.ElementTree as etree | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class HTMLExtractorExtra(HTMLExtractor): | ||||||
|  |     """ | ||||||
|  |     Override HTMLExtractor and create etree Elements for any elements which should have content parsed as Markdown. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, md, *args, **kwargs): | ||||||
|  |         # All block-level tags. | ||||||
|  |         self.block_level_tags = set(md.block_level_elements.copy()) | ||||||
|  |         # Block-level tags in which the content only gets span level parsing | ||||||
|  |         self.span_tags = set( | ||||||
|  |             ['address', 'dd', 'dt', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'legend', 'li', 'p', 'summary', 'td', 'th'] | ||||||
|  |         ) | ||||||
|  |         # Block-level tags which never get their content parsed. | ||||||
|  |         self.raw_tags = set(['canvas', 'math', 'option', 'pre', 'script', 'style', 'textarea']) | ||||||
|  |  | ||||||
|  |         super().__init__(md, *args, **kwargs) | ||||||
|  |  | ||||||
|  |         # Block-level tags in which the content gets parsed as blocks | ||||||
|  |         self.block_tags = set(self.block_level_tags) - (self.span_tags | self.raw_tags | self.empty_tags) | ||||||
|  |         self.span_and_blocks_tags = self.block_tags | self.span_tags | ||||||
|  |  | ||||||
|  |     def reset(self): | ||||||
|  |         """Reset this instance.  Loses all unprocessed data.""" | ||||||
|  |         self.mdstack = []  # When markdown=1, stack contains a list of tags | ||||||
|  |         self.treebuilder = etree.TreeBuilder() | ||||||
|  |         self.mdstate = []  # one of 'block', 'span', 'off', or None | ||||||
|  |         super().reset() | ||||||
|  |  | ||||||
|  |     def close(self): | ||||||
|  |         """Handle any buffered data.""" | ||||||
|  |         super().close() | ||||||
|  |         # Handle any unclosed tags. | ||||||
|  |         if self.mdstack: | ||||||
|  |             # Close the outermost parent. handle_endtag will close all unclosed children. | ||||||
|  |             self.handle_endtag(self.mdstack[0]) | ||||||
|  |  | ||||||
|  |     def get_element(self): | ||||||
|  |         """ Return element from treebuilder and reset treebuilder for later use. """ | ||||||
|  |         element = self.treebuilder.close() | ||||||
|  |         self.treebuilder = etree.TreeBuilder() | ||||||
|  |         return element | ||||||
|  |  | ||||||
|  |     def get_state(self, tag, attrs): | ||||||
|  |         """ Return state from tag and `markdown` attr. One of 'block', 'span', or 'off'. """ | ||||||
|  |         md_attr = attrs.get('markdown', '0') | ||||||
|  |         if md_attr == 'markdown': | ||||||
|  |             # `<tag markdown>` is the same as `<tag markdown='1'>`. | ||||||
|  |             md_attr = '1' | ||||||
|  |         parent_state = self.mdstate[-1] if self.mdstate else None | ||||||
|  |         if parent_state == 'off' or (parent_state == 'span' and md_attr != '0'): | ||||||
|  |             # Only use the parent state if it is more restrictive than the markdown attribute. | ||||||
|  |             md_attr = parent_state | ||||||
|  |         if ((md_attr == '1' and tag in self.block_tags) or | ||||||
|  |                 (md_attr == 'block' and tag in self.span_and_blocks_tags)): | ||||||
|  |             return 'block' | ||||||
|  |         elif ((md_attr == '1' and tag in self.span_tags) or | ||||||
|  |               (md_attr == 'span' and tag in self.span_and_blocks_tags)): | ||||||
|  |             return 'span' | ||||||
|  |         elif tag in self.block_level_tags: | ||||||
|  |             return 'off' | ||||||
|  |         else:  # pragma: no cover | ||||||
|  |             return None | ||||||
|  |  | ||||||
|  |     def handle_starttag(self, tag, attrs): | ||||||
|  |         # Handle tags that should always be empty and do not specify a closing tag | ||||||
|  |         if tag in self.empty_tags and (self.at_line_start() or self.intail): | ||||||
|  |             attrs = {key: value if value is not None else key for key, value in attrs} | ||||||
|  |             if "markdown" in attrs: | ||||||
|  |                 attrs.pop('markdown') | ||||||
|  |                 element = etree.Element(tag, attrs) | ||||||
|  |                 data = etree.tostring(element, encoding='unicode', method='html') | ||||||
|  |             else: | ||||||
|  |                 data = self.get_starttag_text() | ||||||
|  |             self.handle_empty_tag(data, True) | ||||||
|  |             return | ||||||
|  |  | ||||||
|  |         if tag in self.block_level_tags and (self.at_line_start() or self.intail): | ||||||
|  |             # Valueless attr (ex: `<tag checked>`) results in `[('checked', None)]`. | ||||||
|  |             # Convert to `{'checked': 'checked'}`. | ||||||
|  |             attrs = {key: value if value is not None else key for key, value in attrs} | ||||||
|  |             state = self.get_state(tag, attrs) | ||||||
|  |             if self.inraw or (state in [None, 'off'] and not self.mdstack): | ||||||
|  |                 # fall back to default behavior | ||||||
|  |                 attrs.pop('markdown', None) | ||||||
|  |                 super().handle_starttag(tag, attrs) | ||||||
|  |             else: | ||||||
|  |                 if 'p' in self.mdstack and tag in self.block_level_tags: | ||||||
|  |                     # Close unclosed 'p' tag | ||||||
|  |                     self.handle_endtag('p') | ||||||
|  |                 self.mdstate.append(state) | ||||||
|  |                 self.mdstack.append(tag) | ||||||
|  |                 attrs['markdown'] = state | ||||||
|  |                 self.treebuilder.start(tag, attrs) | ||||||
|  |         else: | ||||||
|  |             # Span level tag | ||||||
|  |             if self.inraw: | ||||||
|  |                 super().handle_starttag(tag, attrs) | ||||||
|  |             else: | ||||||
|  |                 text = self.get_starttag_text() | ||||||
|  |                 if self.mdstate and self.mdstate[-1] == "off": | ||||||
|  |                     self.handle_data(self.md.htmlStash.store(text)) | ||||||
|  |                 else: | ||||||
|  |                     self.handle_data(text) | ||||||
|  |                 if tag in self.CDATA_CONTENT_ELEMENTS: | ||||||
|  |                     # This is presumably a standalone tag in a code span (see #1036). | ||||||
|  |                     self.clear_cdata_mode() | ||||||
|  |  | ||||||
|  |     def handle_endtag(self, tag): | ||||||
|  |         if tag in self.block_level_tags: | ||||||
|  |             if self.inraw: | ||||||
|  |                 super().handle_endtag(tag) | ||||||
|  |             elif tag in self.mdstack: | ||||||
|  |                 # Close element and any unclosed children | ||||||
|  |                 while self.mdstack: | ||||||
|  |                     item = self.mdstack.pop() | ||||||
|  |                     self.mdstate.pop() | ||||||
|  |                     self.treebuilder.end(item) | ||||||
|  |                     if item == tag: | ||||||
|  |                         break | ||||||
|  |                 if not self.mdstack: | ||||||
|  |                     # Last item in stack is closed. Stash it | ||||||
|  |                     element = self.get_element() | ||||||
|  |                     # Get last entry to see if it ends in newlines | ||||||
|  |                     # If it is an element, assume there is no newlines | ||||||
|  |                     item = self.cleandoc[-1] if self.cleandoc else '' | ||||||
|  |                     # If we only have one newline before block element, add another | ||||||
|  |                     if not item.endswith('\n\n') and item.endswith('\n'): | ||||||
|  |                         self.cleandoc.append('\n') | ||||||
|  |                     self.cleandoc.append(self.md.htmlStash.store(element)) | ||||||
|  |                     self.cleandoc.append('\n\n') | ||||||
|  |                     self.state = [] | ||||||
|  |                     # Check if element has a tail | ||||||
|  |                     if not blank_line_re.match( | ||||||
|  |                             self.rawdata[self.line_offset + self.offset + len(self.get_endtag_text(tag)):]): | ||||||
|  |                         # More content exists after endtag. | ||||||
|  |                         self.intail = True | ||||||
|  |             else: | ||||||
|  |                 # Treat orphan closing tag as a span level tag. | ||||||
|  |                 text = self.get_endtag_text(tag) | ||||||
|  |                 if self.mdstate and self.mdstate[-1] == "off": | ||||||
|  |                     self.handle_data(self.md.htmlStash.store(text)) | ||||||
|  |                 else: | ||||||
|  |                     self.handle_data(text) | ||||||
|  |         else: | ||||||
|  |             # Span level tag | ||||||
|  |             if self.inraw: | ||||||
|  |                 super().handle_endtag(tag) | ||||||
|  |             else: | ||||||
|  |                 text = self.get_endtag_text(tag) | ||||||
|  |                 if self.mdstate and self.mdstate[-1] == "off": | ||||||
|  |                     self.handle_data(self.md.htmlStash.store(text)) | ||||||
|  |                 else: | ||||||
|  |                     self.handle_data(text) | ||||||
|  |  | ||||||
|  |     def handle_startendtag(self, tag, attrs): | ||||||
|  |         if tag in self.empty_tags: | ||||||
|  |             attrs = {key: value if value is not None else key for key, value in attrs} | ||||||
|  |             if "markdown" in attrs: | ||||||
|  |                 attrs.pop('markdown') | ||||||
|  |                 element = etree.Element(tag, attrs) | ||||||
|  |                 data = etree.tostring(element, encoding='unicode', method='html') | ||||||
|  |             else: | ||||||
|  |                 data = self.get_starttag_text() | ||||||
|  |         else: | ||||||
|  |             data = self.get_starttag_text() | ||||||
|  |         self.handle_empty_tag(data, is_block=self.md.is_block_level(tag)) | ||||||
|  |  | ||||||
|  |     def handle_data(self, data): | ||||||
|  |         if self.intail and '\n' in data: | ||||||
|  |             self.intail = False | ||||||
|  |         if self.inraw or not self.mdstack: | ||||||
|  |             super().handle_data(data) | ||||||
|  |         else: | ||||||
|  |             self.treebuilder.data(data) | ||||||
|  |  | ||||||
|  |     def handle_empty_tag(self, data, is_block): | ||||||
|  |         if self.inraw or not self.mdstack: | ||||||
|  |             super().handle_empty_tag(data, is_block) | ||||||
|  |         else: | ||||||
|  |             if self.at_line_start() and is_block: | ||||||
|  |                 self.handle_data('\n' + self.md.htmlStash.store(data) + '\n\n') | ||||||
|  |             else: | ||||||
|  |                 self.handle_data(self.md.htmlStash.store(data)) | ||||||
|  |  | ||||||
|  |     def parse_pi(self, i): | ||||||
|  |         if self.at_line_start() or self.intail or self.mdstack: | ||||||
|  |             # The same override exists in HTMLExtractor without the check | ||||||
|  |             # for mdstack. Therefore, use HTMLExtractor's parent instead. | ||||||
|  |             return super(HTMLExtractor, self).parse_pi(i) | ||||||
|  |         # This is not the beginning of a raw block so treat as plain data | ||||||
|  |         # and avoid consuming any tags which may follow (see #1066). | ||||||
|  |         self.handle_data('<?') | ||||||
|  |         return i + 2 | ||||||
|  |  | ||||||
|  |     def parse_html_declaration(self, i): | ||||||
|  |         if self.at_line_start() or self.intail or self.mdstack: | ||||||
|  |             # The same override exists in HTMLExtractor without the check | ||||||
|  |             # for mdstack. Therefore, use HTMLExtractor's parent instead. | ||||||
|  |             return super(HTMLExtractor, self).parse_html_declaration(i) | ||||||
|  |         # This is not the beginning of a raw block so treat as plain data | ||||||
|  |         # and avoid consuming any tags which may follow (see #1066). | ||||||
|  |         self.handle_data('<!') | ||||||
|  |         return i + 2 | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class HtmlBlockPreprocessor(Preprocessor): | ||||||
|  |     """Remove html blocks from the text and store them for later retrieval.""" | ||||||
|  |  | ||||||
|  |     def run(self, lines): | ||||||
|  |         source = '\n'.join(lines) | ||||||
|  |         parser = HTMLExtractorExtra(self.md) | ||||||
|  |         parser.feed(source) | ||||||
|  |         parser.close() | ||||||
|  |         return ''.join(parser.cleandoc).split('\n') | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class MarkdownInHtmlProcessor(BlockProcessor): | ||||||
|  |     """Process Markdown Inside HTML Blocks which have been stored in the HtmlStash.""" | ||||||
|  |  | ||||||
|  |     def test(self, parent, block): | ||||||
|  |         # ALways return True. `run` will return `False` it not a valid match. | ||||||
|  |         return True | ||||||
|  |  | ||||||
|  |     def parse_element_content(self, element): | ||||||
|  |         """ | ||||||
|  |         Recursively parse the text content of an etree Element as Markdown. | ||||||
|  |  | ||||||
|  |         Any block level elements generated from the Markdown will be inserted as children of the element in place | ||||||
|  |         of the text content. All `markdown` attributes are removed. For any elements in which Markdown parsing has | ||||||
|  |         been disabled, the text content of it and its chidlren are wrapped in an `AtomicString`. | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |         md_attr = element.attrib.pop('markdown', 'off') | ||||||
|  |  | ||||||
|  |         if md_attr == 'block': | ||||||
|  |             # Parse content as block level | ||||||
|  |             # The order in which the different parts are parsed (text, children, tails) is important here as the | ||||||
|  |             # order of elements needs to be preserved. We can't be inserting items at a later point in the current | ||||||
|  |             # iteration as we don't want to do raw processing on elements created from parsing Markdown text (for | ||||||
|  |             # example). Therefore, the order of operations is children, tails, text. | ||||||
|  |  | ||||||
|  |             # Recursively parse existing children from raw HTML | ||||||
|  |             for child in list(element): | ||||||
|  |                 self.parse_element_content(child) | ||||||
|  |  | ||||||
|  |             # Parse Markdown text in tail of children. Do this separate to avoid raw HTML parsing. | ||||||
|  |             # Save the position of each item to be inserted later in reverse. | ||||||
|  |             tails = [] | ||||||
|  |             for pos, child in enumerate(element): | ||||||
|  |                 if child.tail: | ||||||
|  |                     block = child.tail.rstrip('\n') | ||||||
|  |                     child.tail = '' | ||||||
|  |                     # Use a dummy placeholder element. | ||||||
|  |                     dummy = etree.Element('div') | ||||||
|  |                     self.parser.parseBlocks(dummy, block.split('\n\n')) | ||||||
|  |                     children = list(dummy) | ||||||
|  |                     children.reverse() | ||||||
|  |                     tails.append((pos + 1, children)) | ||||||
|  |  | ||||||
|  |             # Insert the elements created from the tails in reverse. | ||||||
|  |             tails.reverse() | ||||||
|  |             for pos, tail in tails: | ||||||
|  |                 for item in tail: | ||||||
|  |                     element.insert(pos, item) | ||||||
|  |  | ||||||
|  |             # Parse Markdown text content. Do this last to avoid raw HTML parsing. | ||||||
|  |             if element.text: | ||||||
|  |                 block = element.text.rstrip('\n') | ||||||
|  |                 element.text = '' | ||||||
|  |                 # Use a dummy placeholder element as the content needs to get inserted before existing children. | ||||||
|  |                 dummy = etree.Element('div') | ||||||
|  |                 self.parser.parseBlocks(dummy, block.split('\n\n')) | ||||||
|  |                 children = list(dummy) | ||||||
|  |                 children.reverse() | ||||||
|  |                 for child in children: | ||||||
|  |                     element.insert(0, child) | ||||||
|  |  | ||||||
|  |         elif md_attr == 'span': | ||||||
|  |             # Span level parsing will be handled by inlineprocessors. | ||||||
|  |             # Walk children here to remove any `markdown` attributes. | ||||||
|  |             for child in list(element): | ||||||
|  |                 self.parse_element_content(child) | ||||||
|  |  | ||||||
|  |         else: | ||||||
|  |             # Disable inline parsing for everything else | ||||||
|  |             if element.text is None: | ||||||
|  |                 element.text = '' | ||||||
|  |             element.text = util.AtomicString(element.text) | ||||||
|  |             for child in list(element): | ||||||
|  |                 self.parse_element_content(child) | ||||||
|  |                 if child.tail: | ||||||
|  |                     child.tail = util.AtomicString(child.tail) | ||||||
|  |  | ||||||
|  |     def run(self, parent, blocks): | ||||||
|  |         m = util.HTML_PLACEHOLDER_RE.match(blocks[0]) | ||||||
|  |         if m: | ||||||
|  |             index = int(m.group(1)) | ||||||
|  |             element = self.parser.md.htmlStash.rawHtmlBlocks[index] | ||||||
|  |             if isinstance(element, etree.Element): | ||||||
|  |                 # We have a matched element. Process it. | ||||||
|  |                 blocks.pop(0) | ||||||
|  |                 self.parse_element_content(element) | ||||||
|  |                 parent.append(element) | ||||||
|  |                 # Cleanup stash. Replace element with empty string to avoid confusing postprocessor. | ||||||
|  |                 self.parser.md.htmlStash.rawHtmlBlocks.pop(index) | ||||||
|  |                 self.parser.md.htmlStash.rawHtmlBlocks.insert(index, '') | ||||||
|  |                 # Confirm the match to the blockparser. | ||||||
|  |                 return True | ||||||
|  |         # No match found. | ||||||
|  |         return False | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class MarkdownInHTMLPostprocessor(RawHtmlPostprocessor): | ||||||
|  |     def stash_to_string(self, text): | ||||||
|  |         """ Override default to handle any etree elements still in the stash. """ | ||||||
|  |         if isinstance(text, etree.Element): | ||||||
|  |             return self.md.serializer(text) | ||||||
|  |         else: | ||||||
|  |             return str(text) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class MarkdownInHtmlExtension(Extension): | ||||||
|  |     """Add Markdown parsing in HTML to Markdown class.""" | ||||||
|  |  | ||||||
|  |     def extendMarkdown(self, md): | ||||||
|  |         """ Register extension instances. """ | ||||||
|  |  | ||||||
|  |         # Replace raw HTML preprocessor | ||||||
|  |         md.preprocessors.register(HtmlBlockPreprocessor(md), 'html_block', 20) | ||||||
|  |         # Add blockprocessor which handles the placeholders for etree elements | ||||||
|  |         md.parser.blockprocessors.register( | ||||||
|  |             MarkdownInHtmlProcessor(md.parser), 'markdown_block', 105 | ||||||
|  |         ) | ||||||
|  |         # Replace raw HTML postprocessor | ||||||
|  |         md.postprocessors.register(MarkdownInHTMLPostprocessor(md), 'raw_html', 30) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def makeExtension(**kwargs):  # pragma: no cover | ||||||
|  |     return MarkdownInHtmlExtension(**kwargs) | ||||||
							
								
								
									
										79
									
								
								Source/Libs/markdown/extensions/meta.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										79
									
								
								Source/Libs/markdown/extensions/meta.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,79 @@ | |||||||
|  | """ | ||||||
|  | Meta Data Extension for Python-Markdown | ||||||
|  | ======================================= | ||||||
|  |  | ||||||
|  | This extension adds Meta Data handling to markdown. | ||||||
|  |  | ||||||
|  | See <https://Python-Markdown.github.io/extensions/meta_data> | ||||||
|  | for documentation. | ||||||
|  |  | ||||||
|  | Original code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com). | ||||||
|  |  | ||||||
|  | All changes Copyright 2008-2014 The Python Markdown Project | ||||||
|  |  | ||||||
|  | License: [BSD](https://opensource.org/licenses/bsd-license.php) | ||||||
|  |  | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | from . import Extension | ||||||
|  | from ..preprocessors import Preprocessor | ||||||
|  | import re | ||||||
|  | import logging | ||||||
|  |  | ||||||
|  | log = logging.getLogger('MARKDOWN') | ||||||
|  |  | ||||||
|  | # Global Vars | ||||||
|  | META_RE = re.compile(r'^[ ]{0,3}(?P<key>[A-Za-z0-9_-]+):\s*(?P<value>.*)') | ||||||
|  | META_MORE_RE = re.compile(r'^[ ]{4,}(?P<value>.*)') | ||||||
|  | BEGIN_RE = re.compile(r'^-{3}(\s.*)?') | ||||||
|  | END_RE = re.compile(r'^(-{3}|\.{3})(\s.*)?') | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class MetaExtension (Extension): | ||||||
|  |     """ Meta-Data extension for Python-Markdown. """ | ||||||
|  |  | ||||||
|  |     def extendMarkdown(self, md): | ||||||
|  |         """ Add MetaPreprocessor to Markdown instance. """ | ||||||
|  |         md.registerExtension(self) | ||||||
|  |         self.md = md | ||||||
|  |         md.preprocessors.register(MetaPreprocessor(md), 'meta', 27) | ||||||
|  |  | ||||||
|  |     def reset(self): | ||||||
|  |         self.md.Meta = {} | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class MetaPreprocessor(Preprocessor): | ||||||
|  |     """ Get Meta-Data. """ | ||||||
|  |  | ||||||
|  |     def run(self, lines): | ||||||
|  |         """ Parse Meta-Data and store in Markdown.Meta. """ | ||||||
|  |         meta = {} | ||||||
|  |         key = None | ||||||
|  |         if lines and BEGIN_RE.match(lines[0]): | ||||||
|  |             lines.pop(0) | ||||||
|  |         while lines: | ||||||
|  |             line = lines.pop(0) | ||||||
|  |             m1 = META_RE.match(line) | ||||||
|  |             if line.strip() == '' or END_RE.match(line): | ||||||
|  |                 break  # blank line or end of YAML header - done | ||||||
|  |             if m1: | ||||||
|  |                 key = m1.group('key').lower().strip() | ||||||
|  |                 value = m1.group('value').strip() | ||||||
|  |                 try: | ||||||
|  |                     meta[key].append(value) | ||||||
|  |                 except KeyError: | ||||||
|  |                     meta[key] = [value] | ||||||
|  |             else: | ||||||
|  |                 m2 = META_MORE_RE.match(line) | ||||||
|  |                 if m2 and key: | ||||||
|  |                     # Add another line to existing key | ||||||
|  |                     meta[key].append(m2.group('value').strip()) | ||||||
|  |                 else: | ||||||
|  |                     lines.insert(0, line) | ||||||
|  |                     break  # no meta data - done | ||||||
|  |         self.md.Meta = meta | ||||||
|  |         return lines | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def makeExtension(**kwargs):  # pragma: no cover | ||||||
|  |     return MetaExtension(**kwargs) | ||||||
							
								
								
									
										33
									
								
								Source/Libs/markdown/extensions/nl2br.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										33
									
								
								Source/Libs/markdown/extensions/nl2br.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,33 @@ | |||||||
|  | """ | ||||||
|  | NL2BR Extension | ||||||
|  | =============== | ||||||
|  |  | ||||||
|  | A Python-Markdown extension to treat newlines as hard breaks; like | ||||||
|  | GitHub-flavored Markdown does. | ||||||
|  |  | ||||||
|  | See <https://Python-Markdown.github.io/extensions/nl2br> | ||||||
|  | for documentation. | ||||||
|  |  | ||||||
|  | Oringinal code Copyright 2011 [Brian Neal](https://deathofagremmie.com/) | ||||||
|  |  | ||||||
|  | All changes Copyright 2011-2014 The Python Markdown Project | ||||||
|  |  | ||||||
|  | License: [BSD](https://opensource.org/licenses/bsd-license.php) | ||||||
|  |  | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | from . import Extension | ||||||
|  | from ..inlinepatterns import SubstituteTagInlineProcessor | ||||||
|  |  | ||||||
|  | BR_RE = r'\n' | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class Nl2BrExtension(Extension): | ||||||
|  |  | ||||||
|  |     def extendMarkdown(self, md): | ||||||
|  |         br_tag = SubstituteTagInlineProcessor(BR_RE, 'br') | ||||||
|  |         md.inlinePatterns.register(br_tag, 'nl', 5) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def makeExtension(**kwargs):  # pragma: no cover | ||||||
|  |     return Nl2BrExtension(**kwargs) | ||||||
							
								
								
									
										54
									
								
								Source/Libs/markdown/extensions/sane_lists.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										54
									
								
								Source/Libs/markdown/extensions/sane_lists.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,54 @@ | |||||||
|  | """ | ||||||
|  | Sane List Extension for Python-Markdown | ||||||
|  | ======================================= | ||||||
|  |  | ||||||
|  | Modify the behavior of Lists in Python-Markdown to act in a sane manor. | ||||||
|  |  | ||||||
|  | See <https://Python-Markdown.github.io/extensions/sane_lists> | ||||||
|  | for documentation. | ||||||
|  |  | ||||||
|  | Original code Copyright 2011 [Waylan Limberg](http://achinghead.com) | ||||||
|  |  | ||||||
|  | All changes Copyright 2011-2014 The Python Markdown Project | ||||||
|  |  | ||||||
|  | License: [BSD](https://opensource.org/licenses/bsd-license.php) | ||||||
|  |  | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | from . import Extension | ||||||
|  | from ..blockprocessors import OListProcessor, UListProcessor | ||||||
|  | import re | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class SaneOListProcessor(OListProcessor): | ||||||
|  |  | ||||||
|  |     SIBLING_TAGS = ['ol'] | ||||||
|  |     LAZY_OL = False | ||||||
|  |  | ||||||
|  |     def __init__(self, parser): | ||||||
|  |         super().__init__(parser) | ||||||
|  |         self.CHILD_RE = re.compile(r'^[ ]{0,%d}((\d+\.))[ ]+(.*)' % | ||||||
|  |                                    (self.tab_length - 1)) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class SaneUListProcessor(UListProcessor): | ||||||
|  |  | ||||||
|  |     SIBLING_TAGS = ['ul'] | ||||||
|  |  | ||||||
|  |     def __init__(self, parser): | ||||||
|  |         super().__init__(parser) | ||||||
|  |         self.CHILD_RE = re.compile(r'^[ ]{0,%d}(([*+-]))[ ]+(.*)' % | ||||||
|  |                                    (self.tab_length - 1)) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class SaneListExtension(Extension): | ||||||
|  |     """ Add sane lists to Markdown. """ | ||||||
|  |  | ||||||
|  |     def extendMarkdown(self, md): | ||||||
|  |         """ Override existing Processors. """ | ||||||
|  |         md.parser.blockprocessors.register(SaneOListProcessor(md.parser), 'olist', 40) | ||||||
|  |         md.parser.blockprocessors.register(SaneUListProcessor(md.parser), 'ulist', 30) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def makeExtension(**kwargs):  # pragma: no cover | ||||||
|  |     return SaneListExtension(**kwargs) | ||||||
							
								
								
									
										263
									
								
								Source/Libs/markdown/extensions/smarty.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										263
									
								
								Source/Libs/markdown/extensions/smarty.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,263 @@ | |||||||
|  | ''' | ||||||
|  | Smarty extension for Python-Markdown | ||||||
|  | ==================================== | ||||||
|  |  | ||||||
|  | Adds conversion of ASCII dashes, quotes and ellipses to their HTML | ||||||
|  | entity equivalents. | ||||||
|  |  | ||||||
|  | See <https://Python-Markdown.github.io/extensions/smarty> | ||||||
|  | for documentation. | ||||||
|  |  | ||||||
|  | Author: 2013, Dmitry Shachnev <mitya57@gmail.com> | ||||||
|  |  | ||||||
|  | All changes Copyright 2013-2014 The Python Markdown Project | ||||||
|  |  | ||||||
|  | License: [BSD](https://opensource.org/licenses/bsd-license.php) | ||||||
|  |  | ||||||
|  | SmartyPants license: | ||||||
|  |  | ||||||
|  |    Copyright (c) 2003 John Gruber <https://daringfireball.net/> | ||||||
|  |    All rights reserved. | ||||||
|  |  | ||||||
|  |    Redistribution and use in source and binary forms, with or without | ||||||
|  |    modification, are permitted provided that the following conditions are | ||||||
|  |    met: | ||||||
|  |  | ||||||
|  |    *  Redistributions of source code must retain the above copyright | ||||||
|  |       notice, this list of conditions and the following disclaimer. | ||||||
|  |  | ||||||
|  |    *  Redistributions in binary form must reproduce the above copyright | ||||||
|  |       notice, this list of conditions and the following disclaimer in | ||||||
|  |       the documentation and/or other materials provided with the | ||||||
|  |       distribution. | ||||||
|  |  | ||||||
|  |    *  Neither the name "SmartyPants" nor the names of its contributors | ||||||
|  |       may be used to endorse or promote products derived from this | ||||||
|  |       software without specific prior written permission. | ||||||
|  |  | ||||||
|  |    This software is provided by the copyright holders and contributors "as | ||||||
|  |    is" and any express or implied warranties, including, but not limited | ||||||
|  |    to, the implied warranties of merchantability and fitness for a | ||||||
|  |    particular purpose are disclaimed. In no event shall the copyright | ||||||
|  |    owner or contributors be liable for any direct, indirect, incidental, | ||||||
|  |    special, exemplary, or consequential damages (including, but not | ||||||
|  |    limited to, procurement of substitute goods or services; loss of use, | ||||||
|  |    data, or profits; or business interruption) however caused and on any | ||||||
|  |    theory of liability, whether in contract, strict liability, or tort | ||||||
|  |    (including negligence or otherwise) arising in any way out of the use | ||||||
|  |    of this software, even if advised of the possibility of such damage. | ||||||
|  |  | ||||||
|  |  | ||||||
|  | smartypants.py license: | ||||||
|  |  | ||||||
|  |    smartypants.py is a derivative work of SmartyPants. | ||||||
|  |    Copyright (c) 2004, 2007 Chad Miller <http://web.chad.org/> | ||||||
|  |  | ||||||
|  |    Redistribution and use in source and binary forms, with or without | ||||||
|  |    modification, are permitted provided that the following conditions are | ||||||
|  |    met: | ||||||
|  |  | ||||||
|  |    *  Redistributions of source code must retain the above copyright | ||||||
|  |       notice, this list of conditions and the following disclaimer. | ||||||
|  |  | ||||||
|  |    *  Redistributions in binary form must reproduce the above copyright | ||||||
|  |       notice, this list of conditions and the following disclaimer in | ||||||
|  |       the documentation and/or other materials provided with the | ||||||
|  |       distribution. | ||||||
|  |  | ||||||
|  |    This software is provided by the copyright holders and contributors "as | ||||||
|  |    is" and any express or implied warranties, including, but not limited | ||||||
|  |    to, the implied warranties of merchantability and fitness for a | ||||||
|  |    particular purpose are disclaimed. In no event shall the copyright | ||||||
|  |    owner or contributors be liable for any direct, indirect, incidental, | ||||||
|  |    special, exemplary, or consequential damages (including, but not | ||||||
|  |    limited to, procurement of substitute goods or services; loss of use, | ||||||
|  |    data, or profits; or business interruption) however caused and on any | ||||||
|  |    theory of liability, whether in contract, strict liability, or tort | ||||||
|  |    (including negligence or otherwise) arising in any way out of the use | ||||||
|  |    of this software, even if advised of the possibility of such damage. | ||||||
|  |  | ||||||
|  | ''' | ||||||
|  |  | ||||||
|  |  | ||||||
|  | from . import Extension | ||||||
|  | from ..inlinepatterns import HtmlInlineProcessor, HTML_RE | ||||||
|  | from ..treeprocessors import InlineProcessor | ||||||
|  | from ..util import Registry, deprecated | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Constants for quote education. | ||||||
|  | punctClass = r"""[!"#\$\%'()*+,-.\/:;<=>?\@\[\\\]\^_`{|}~]""" | ||||||
|  | endOfWordClass = r"[\s.,;:!?)]" | ||||||
|  | closeClass = r"[^\ \t\r\n\[\{\(\-\u0002\u0003]" | ||||||
|  |  | ||||||
|  | openingQuotesBase = ( | ||||||
|  |     r'(\s'               # a  whitespace char | ||||||
|  |     r'| '           # or a non-breaking space entity | ||||||
|  |     r'|--'               # or dashes | ||||||
|  |     r'|–|—'              # or unicode | ||||||
|  |     r'|&[mn]dash;'       # or named dash entities | ||||||
|  |     r'|–|—'  # or decimal entities | ||||||
|  |     r')' | ||||||
|  | ) | ||||||
|  |  | ||||||
|  | substitutions = { | ||||||
|  |     'mdash': '—', | ||||||
|  |     'ndash': '–', | ||||||
|  |     'ellipsis': '…', | ||||||
|  |     'left-angle-quote': '«', | ||||||
|  |     'right-angle-quote': '»', | ||||||
|  |     'left-single-quote': '‘', | ||||||
|  |     'right-single-quote': '’', | ||||||
|  |     'left-double-quote': '“', | ||||||
|  |     'right-double-quote': '”', | ||||||
|  | } | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Special case if the very first character is a quote | ||||||
|  | # followed by punctuation at a non-word-break. Close the quotes by brute force: | ||||||
|  | singleQuoteStartRe = r"^'(?=%s\B)" % punctClass | ||||||
|  | doubleQuoteStartRe = r'^"(?=%s\B)' % punctClass | ||||||
|  |  | ||||||
|  | # Special case for double sets of quotes, e.g.: | ||||||
|  | #   <p>He said, "'Quoted' words in a larger quote."</p> | ||||||
|  | doubleQuoteSetsRe = r""""'(?=\w)""" | ||||||
|  | singleQuoteSetsRe = r"""'"(?=\w)""" | ||||||
|  |  | ||||||
|  | # Special case for decade abbreviations (the '80s): | ||||||
|  | decadeAbbrRe = r"(?<!\w)'(?=\d{2}s)" | ||||||
|  |  | ||||||
|  | # Get most opening double quotes: | ||||||
|  | openingDoubleQuotesRegex = r'%s"(?=\w)' % openingQuotesBase | ||||||
|  |  | ||||||
|  | # Double closing quotes: | ||||||
|  | closingDoubleQuotesRegex = r'"(?=\s)' | ||||||
|  | closingDoubleQuotesRegex2 = '(?<=%s)"' % closeClass | ||||||
|  |  | ||||||
|  | # Get most opening single quotes: | ||||||
|  | openingSingleQuotesRegex = r"%s'(?=\w)" % openingQuotesBase | ||||||
|  |  | ||||||
|  | # Single closing quotes: | ||||||
|  | closingSingleQuotesRegex = r"(?<=%s)'(?!\s|s\b|\d)" % closeClass | ||||||
|  | closingSingleQuotesRegex2 = r"(?<=%s)'(\s|s\b)" % closeClass | ||||||
|  |  | ||||||
|  | # All remaining quotes should be opening ones | ||||||
|  | remainingSingleQuotesRegex = r"'" | ||||||
|  | remainingDoubleQuotesRegex = r'"' | ||||||
|  |  | ||||||
|  | HTML_STRICT_RE = HTML_RE + r'(?!\>)' | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class SubstituteTextPattern(HtmlInlineProcessor): | ||||||
|  |     def __init__(self, pattern, replace, md): | ||||||
|  |         """ Replaces matches with some text. """ | ||||||
|  |         HtmlInlineProcessor.__init__(self, pattern) | ||||||
|  |         self.replace = replace | ||||||
|  |         self.md = md | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     @deprecated("Use 'md' instead.") | ||||||
|  |     def markdown(self): | ||||||
|  |         # TODO: remove this later | ||||||
|  |         return self.md | ||||||
|  |  | ||||||
|  |     def handleMatch(self, m, data): | ||||||
|  |         result = '' | ||||||
|  |         for part in self.replace: | ||||||
|  |             if isinstance(part, int): | ||||||
|  |                 result += m.group(part) | ||||||
|  |             else: | ||||||
|  |                 result += self.md.htmlStash.store(part) | ||||||
|  |         return result, m.start(0), m.end(0) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class SmartyExtension(Extension): | ||||||
|  |     def __init__(self, **kwargs): | ||||||
|  |         self.config = { | ||||||
|  |             'smart_quotes': [True, 'Educate quotes'], | ||||||
|  |             'smart_angled_quotes': [False, 'Educate angled quotes'], | ||||||
|  |             'smart_dashes': [True, 'Educate dashes'], | ||||||
|  |             'smart_ellipses': [True, 'Educate ellipses'], | ||||||
|  |             'substitutions': [{}, 'Overwrite default substitutions'], | ||||||
|  |         } | ||||||
|  |         super().__init__(**kwargs) | ||||||
|  |         self.substitutions = dict(substitutions) | ||||||
|  |         self.substitutions.update(self.getConfig('substitutions', default={})) | ||||||
|  |  | ||||||
|  |     def _addPatterns(self, md, patterns, serie, priority): | ||||||
|  |         for ind, pattern in enumerate(patterns): | ||||||
|  |             pattern += (md,) | ||||||
|  |             pattern = SubstituteTextPattern(*pattern) | ||||||
|  |             name = 'smarty-%s-%d' % (serie, ind) | ||||||
|  |             self.inlinePatterns.register(pattern, name, priority-ind) | ||||||
|  |  | ||||||
|  |     def educateDashes(self, md): | ||||||
|  |         emDashesPattern = SubstituteTextPattern( | ||||||
|  |             r'(?<!-)---(?!-)', (self.substitutions['mdash'],), md | ||||||
|  |         ) | ||||||
|  |         enDashesPattern = SubstituteTextPattern( | ||||||
|  |             r'(?<!-)--(?!-)', (self.substitutions['ndash'],), md | ||||||
|  |         ) | ||||||
|  |         self.inlinePatterns.register(emDashesPattern, 'smarty-em-dashes', 50) | ||||||
|  |         self.inlinePatterns.register(enDashesPattern, 'smarty-en-dashes', 45) | ||||||
|  |  | ||||||
|  |     def educateEllipses(self, md): | ||||||
|  |         ellipsesPattern = SubstituteTextPattern( | ||||||
|  |             r'(?<!\.)\.{3}(?!\.)', (self.substitutions['ellipsis'],), md | ||||||
|  |         ) | ||||||
|  |         self.inlinePatterns.register(ellipsesPattern, 'smarty-ellipses', 10) | ||||||
|  |  | ||||||
|  |     def educateAngledQuotes(self, md): | ||||||
|  |         leftAngledQuotePattern = SubstituteTextPattern( | ||||||
|  |             r'\<\<', (self.substitutions['left-angle-quote'],), md | ||||||
|  |         ) | ||||||
|  |         rightAngledQuotePattern = SubstituteTextPattern( | ||||||
|  |             r'\>\>', (self.substitutions['right-angle-quote'],), md | ||||||
|  |         ) | ||||||
|  |         self.inlinePatterns.register(leftAngledQuotePattern, 'smarty-left-angle-quotes', 40) | ||||||
|  |         self.inlinePatterns.register(rightAngledQuotePattern, 'smarty-right-angle-quotes', 35) | ||||||
|  |  | ||||||
|  |     def educateQuotes(self, md): | ||||||
|  |         lsquo = self.substitutions['left-single-quote'] | ||||||
|  |         rsquo = self.substitutions['right-single-quote'] | ||||||
|  |         ldquo = self.substitutions['left-double-quote'] | ||||||
|  |         rdquo = self.substitutions['right-double-quote'] | ||||||
|  |         patterns = ( | ||||||
|  |             (singleQuoteStartRe, (rsquo,)), | ||||||
|  |             (doubleQuoteStartRe, (rdquo,)), | ||||||
|  |             (doubleQuoteSetsRe, (ldquo + lsquo,)), | ||||||
|  |             (singleQuoteSetsRe, (lsquo + ldquo,)), | ||||||
|  |             (decadeAbbrRe, (rsquo,)), | ||||||
|  |             (openingSingleQuotesRegex, (1, lsquo)), | ||||||
|  |             (closingSingleQuotesRegex, (rsquo,)), | ||||||
|  |             (closingSingleQuotesRegex2, (rsquo, 1)), | ||||||
|  |             (remainingSingleQuotesRegex, (lsquo,)), | ||||||
|  |             (openingDoubleQuotesRegex, (1, ldquo)), | ||||||
|  |             (closingDoubleQuotesRegex, (rdquo,)), | ||||||
|  |             (closingDoubleQuotesRegex2, (rdquo,)), | ||||||
|  |             (remainingDoubleQuotesRegex, (ldquo,)) | ||||||
|  |         ) | ||||||
|  |         self._addPatterns(md, patterns, 'quotes', 30) | ||||||
|  |  | ||||||
|  |     def extendMarkdown(self, md): | ||||||
|  |         configs = self.getConfigs() | ||||||
|  |         self.inlinePatterns = Registry() | ||||||
|  |         if configs['smart_ellipses']: | ||||||
|  |             self.educateEllipses(md) | ||||||
|  |         if configs['smart_quotes']: | ||||||
|  |             self.educateQuotes(md) | ||||||
|  |         if configs['smart_angled_quotes']: | ||||||
|  |             self.educateAngledQuotes(md) | ||||||
|  |             # Override HTML_RE from inlinepatterns.py so that it does not | ||||||
|  |             # process tags with duplicate closing quotes. | ||||||
|  |             md.inlinePatterns.register(HtmlInlineProcessor(HTML_STRICT_RE, md), 'html', 90) | ||||||
|  |         if configs['smart_dashes']: | ||||||
|  |             self.educateDashes(md) | ||||||
|  |         inlineProcessor = InlineProcessor(md) | ||||||
|  |         inlineProcessor.inlinePatterns = self.inlinePatterns | ||||||
|  |         md.treeprocessors.register(inlineProcessor, 'smarty', 2) | ||||||
|  |         md.ESCAPED_CHARS.extend(['"', "'"]) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def makeExtension(**kwargs):  # pragma: no cover | ||||||
|  |     return SmartyExtension(**kwargs) | ||||||
							
								
								
									
										223
									
								
								Source/Libs/markdown/extensions/tables.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										223
									
								
								Source/Libs/markdown/extensions/tables.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,223 @@ | |||||||
|  | """ | ||||||
|  | Tables Extension for Python-Markdown | ||||||
|  | ==================================== | ||||||
|  |  | ||||||
|  | Added parsing of tables to Python-Markdown. | ||||||
|  |  | ||||||
|  | See <https://Python-Markdown.github.io/extensions/tables> | ||||||
|  | for documentation. | ||||||
|  |  | ||||||
|  | Original code Copyright 2009 [Waylan Limberg](http://achinghead.com) | ||||||
|  |  | ||||||
|  | All changes Copyright 2008-2014 The Python Markdown Project | ||||||
|  |  | ||||||
|  | License: [BSD](https://opensource.org/licenses/bsd-license.php) | ||||||
|  |  | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | from . import Extension | ||||||
|  | from ..blockprocessors import BlockProcessor | ||||||
|  | import xml.etree.ElementTree as etree | ||||||
|  | import re | ||||||
|  | PIPE_NONE = 0 | ||||||
|  | PIPE_LEFT = 1 | ||||||
|  | PIPE_RIGHT = 2 | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class TableProcessor(BlockProcessor): | ||||||
|  |     """ Process Tables. """ | ||||||
|  |  | ||||||
|  |     RE_CODE_PIPES = re.compile(r'(?:(\\\\)|(\\`+)|(`+)|(\\\|)|(\|))') | ||||||
|  |     RE_END_BORDER = re.compile(r'(?<!\\)(?:\\\\)*\|$') | ||||||
|  |  | ||||||
|  |     def __init__(self, parser): | ||||||
|  |         self.border = False | ||||||
|  |         self.separator = '' | ||||||
|  |         super().__init__(parser) | ||||||
|  |  | ||||||
|  |     def test(self, parent, block): | ||||||
|  |         """ | ||||||
|  |         Ensure first two rows (column header and separator row) are valid table rows. | ||||||
|  |  | ||||||
|  |         Keep border check and separator row do avoid repeating the work. | ||||||
|  |         """ | ||||||
|  |         is_table = False | ||||||
|  |         rows = [row.strip(' ') for row in block.split('\n')] | ||||||
|  |         if len(rows) > 1: | ||||||
|  |             header0 = rows[0] | ||||||
|  |             self.border = PIPE_NONE | ||||||
|  |             if header0.startswith('|'): | ||||||
|  |                 self.border |= PIPE_LEFT | ||||||
|  |             if self.RE_END_BORDER.search(header0) is not None: | ||||||
|  |                 self.border |= PIPE_RIGHT | ||||||
|  |             row = self._split_row(header0) | ||||||
|  |             row0_len = len(row) | ||||||
|  |             is_table = row0_len > 1 | ||||||
|  |  | ||||||
|  |             # Each row in a single column table needs at least one pipe. | ||||||
|  |             if not is_table and row0_len == 1 and self.border: | ||||||
|  |                 for index in range(1, len(rows)): | ||||||
|  |                     is_table = rows[index].startswith('|') | ||||||
|  |                     if not is_table: | ||||||
|  |                         is_table = self.RE_END_BORDER.search(rows[index]) is not None | ||||||
|  |                     if not is_table: | ||||||
|  |                         break | ||||||
|  |  | ||||||
|  |             if is_table: | ||||||
|  |                 row = self._split_row(rows[1]) | ||||||
|  |                 is_table = (len(row) == row0_len) and set(''.join(row)) <= set('|:- ') | ||||||
|  |                 if is_table: | ||||||
|  |                     self.separator = row | ||||||
|  |  | ||||||
|  |         return is_table | ||||||
|  |  | ||||||
|  |     def run(self, parent, blocks): | ||||||
|  |         """ Parse a table block and build table. """ | ||||||
|  |         block = blocks.pop(0).split('\n') | ||||||
|  |         header = block[0].strip(' ') | ||||||
|  |         rows = [] if len(block) < 3 else block[2:] | ||||||
|  |  | ||||||
|  |         # Get alignment of columns | ||||||
|  |         align = [] | ||||||
|  |         for c in self.separator: | ||||||
|  |             c = c.strip(' ') | ||||||
|  |             if c.startswith(':') and c.endswith(':'): | ||||||
|  |                 align.append('center') | ||||||
|  |             elif c.startswith(':'): | ||||||
|  |                 align.append('left') | ||||||
|  |             elif c.endswith(':'): | ||||||
|  |                 align.append('right') | ||||||
|  |             else: | ||||||
|  |                 align.append(None) | ||||||
|  |  | ||||||
|  |         # Build table | ||||||
|  |         table = etree.SubElement(parent, 'table') | ||||||
|  |         thead = etree.SubElement(table, 'thead') | ||||||
|  |         self._build_row(header, thead, align) | ||||||
|  |         tbody = etree.SubElement(table, 'tbody') | ||||||
|  |         if len(rows) == 0: | ||||||
|  |             # Handle empty table | ||||||
|  |             self._build_empty_row(tbody, align) | ||||||
|  |         else: | ||||||
|  |             for row in rows: | ||||||
|  |                 self._build_row(row.strip(' '), tbody, align) | ||||||
|  |  | ||||||
|  |     def _build_empty_row(self, parent, align): | ||||||
|  |         """Build an empty row.""" | ||||||
|  |         tr = etree.SubElement(parent, 'tr') | ||||||
|  |         count = len(align) | ||||||
|  |         while count: | ||||||
|  |             etree.SubElement(tr, 'td') | ||||||
|  |             count -= 1 | ||||||
|  |  | ||||||
|  |     def _build_row(self, row, parent, align): | ||||||
|  |         """ Given a row of text, build table cells. """ | ||||||
|  |         tr = etree.SubElement(parent, 'tr') | ||||||
|  |         tag = 'td' | ||||||
|  |         if parent.tag == 'thead': | ||||||
|  |             tag = 'th' | ||||||
|  |         cells = self._split_row(row) | ||||||
|  |         # We use align here rather than cells to ensure every row | ||||||
|  |         # contains the same number of columns. | ||||||
|  |         for i, a in enumerate(align): | ||||||
|  |             c = etree.SubElement(tr, tag) | ||||||
|  |             try: | ||||||
|  |                 c.text = cells[i].strip(' ') | ||||||
|  |             except IndexError:  # pragma: no cover | ||||||
|  |                 c.text = "" | ||||||
|  |             if a: | ||||||
|  |                 c.set('align', a) | ||||||
|  |  | ||||||
|  |     def _split_row(self, row): | ||||||
|  |         """ split a row of text into list of cells. """ | ||||||
|  |         if self.border: | ||||||
|  |             if row.startswith('|'): | ||||||
|  |                 row = row[1:] | ||||||
|  |             row = self.RE_END_BORDER.sub('', row) | ||||||
|  |         return self._split(row) | ||||||
|  |  | ||||||
|  |     def _split(self, row): | ||||||
|  |         """ split a row of text with some code into a list of cells. """ | ||||||
|  |         elements = [] | ||||||
|  |         pipes = [] | ||||||
|  |         tics = [] | ||||||
|  |         tic_points = [] | ||||||
|  |         tic_region = [] | ||||||
|  |         good_pipes = [] | ||||||
|  |  | ||||||
|  |         # Parse row | ||||||
|  |         # Throw out \\, and \| | ||||||
|  |         for m in self.RE_CODE_PIPES.finditer(row): | ||||||
|  |             # Store ` data (len, start_pos, end_pos) | ||||||
|  |             if m.group(2): | ||||||
|  |                 # \`+ | ||||||
|  |                 # Store length of each tic group: subtract \ | ||||||
|  |                 tics.append(len(m.group(2)) - 1) | ||||||
|  |                 # Store start of group, end of group, and escape length | ||||||
|  |                 tic_points.append((m.start(2), m.end(2) - 1, 1)) | ||||||
|  |             elif m.group(3): | ||||||
|  |                 # `+ | ||||||
|  |                 # Store length of each tic group | ||||||
|  |                 tics.append(len(m.group(3))) | ||||||
|  |                 # Store start of group, end of group, and escape length | ||||||
|  |                 tic_points.append((m.start(3), m.end(3) - 1, 0)) | ||||||
|  |             # Store pipe location | ||||||
|  |             elif m.group(5): | ||||||
|  |                 pipes.append(m.start(5)) | ||||||
|  |  | ||||||
|  |         # Pair up tics according to size if possible | ||||||
|  |         # Subtract the escape length *only* from the opening. | ||||||
|  |         # Walk through tic list and see if tic has a close. | ||||||
|  |         # Store the tic region (start of region, end of region). | ||||||
|  |         pos = 0 | ||||||
|  |         tic_len = len(tics) | ||||||
|  |         while pos < tic_len: | ||||||
|  |             try: | ||||||
|  |                 tic_size = tics[pos] - tic_points[pos][2] | ||||||
|  |                 if tic_size == 0: | ||||||
|  |                     raise ValueError | ||||||
|  |                 index = tics[pos + 1:].index(tic_size) + 1 | ||||||
|  |                 tic_region.append((tic_points[pos][0], tic_points[pos + index][1])) | ||||||
|  |                 pos += index + 1 | ||||||
|  |             except ValueError: | ||||||
|  |                 pos += 1 | ||||||
|  |  | ||||||
|  |         # Resolve pipes.  Check if they are within a tic pair region. | ||||||
|  |         # Walk through pipes comparing them to each region. | ||||||
|  |         #     - If pipe position is less that a region, it isn't in a region | ||||||
|  |         #     - If it is within a region, we don't want it, so throw it out | ||||||
|  |         #     - If we didn't throw it out, it must be a table pipe | ||||||
|  |         for pipe in pipes: | ||||||
|  |             throw_out = False | ||||||
|  |             for region in tic_region: | ||||||
|  |                 if pipe < region[0]: | ||||||
|  |                     # Pipe is not in a region | ||||||
|  |                     break | ||||||
|  |                 elif region[0] <= pipe <= region[1]: | ||||||
|  |                     # Pipe is within a code region.  Throw it out. | ||||||
|  |                     throw_out = True | ||||||
|  |                     break | ||||||
|  |             if not throw_out: | ||||||
|  |                 good_pipes.append(pipe) | ||||||
|  |  | ||||||
|  |         # Split row according to table delimiters. | ||||||
|  |         pos = 0 | ||||||
|  |         for pipe in good_pipes: | ||||||
|  |             elements.append(row[pos:pipe]) | ||||||
|  |             pos = pipe + 1 | ||||||
|  |         elements.append(row[pos:]) | ||||||
|  |         return elements | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class TableExtension(Extension): | ||||||
|  |     """ Add tables to Markdown. """ | ||||||
|  |  | ||||||
|  |     def extendMarkdown(self, md): | ||||||
|  |         """ Add an instance of TableProcessor to BlockParser. """ | ||||||
|  |         if '|' not in md.ESCAPED_CHARS: | ||||||
|  |             md.ESCAPED_CHARS.append('|') | ||||||
|  |         md.parser.blockprocessors.register(TableProcessor(md.parser), 'table', 75) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def makeExtension(**kwargs):  # pragma: no cover | ||||||
|  |     return TableExtension(**kwargs) | ||||||
							
								
								
									
										380
									
								
								Source/Libs/markdown/extensions/toc.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										380
									
								
								Source/Libs/markdown/extensions/toc.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,380 @@ | |||||||
|  | """ | ||||||
|  | Table of Contents Extension for Python-Markdown | ||||||
|  | =============================================== | ||||||
|  |  | ||||||
|  | See <https://Python-Markdown.github.io/extensions/toc> | ||||||
|  | for documentation. | ||||||
|  |  | ||||||
|  | Oringinal code Copyright 2008 [Jack Miller](https://codezen.org/) | ||||||
|  |  | ||||||
|  | All changes Copyright 2008-2014 The Python Markdown Project | ||||||
|  |  | ||||||
|  | License: [BSD](https://opensource.org/licenses/bsd-license.php) | ||||||
|  |  | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | from . import Extension | ||||||
|  | from ..treeprocessors import Treeprocessor | ||||||
|  | from ..util import code_escape, parseBoolValue, AMP_SUBSTITUTE, HTML_PLACEHOLDER_RE, AtomicString | ||||||
|  | from ..postprocessors import UnescapePostprocessor | ||||||
|  | import re | ||||||
|  | import html | ||||||
|  | import unicodedata | ||||||
|  | import xml.etree.ElementTree as etree | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def slugify(value, separator, unicode=False): | ||||||
|  |     """ Slugify a string, to make it URL friendly. """ | ||||||
|  |     if not unicode: | ||||||
|  |         # Replace Extended Latin characters with ASCII, i.e. žlutý → zluty | ||||||
|  |         value = unicodedata.normalize('NFKD', value) | ||||||
|  |         value = value.encode('ascii', 'ignore').decode('ascii') | ||||||
|  |     value = re.sub(r'[^\w\s-]', '', value).strip().lower() | ||||||
|  |     return re.sub(r'[{}\s]+'.format(separator), separator, value) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def slugify_unicode(value, separator): | ||||||
|  |     """ Slugify a string, to make it URL friendly while preserving Unicode characters. """ | ||||||
|  |     return slugify(value, separator, unicode=True) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | IDCOUNT_RE = re.compile(r'^(.*)_([0-9]+)$') | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def unique(id, ids): | ||||||
|  |     """ Ensure id is unique in set of ids. Append '_1', '_2'... if not """ | ||||||
|  |     while id in ids or not id: | ||||||
|  |         m = IDCOUNT_RE.match(id) | ||||||
|  |         if m: | ||||||
|  |             id = '%s_%d' % (m.group(1), int(m.group(2))+1) | ||||||
|  |         else: | ||||||
|  |             id = '%s_%d' % (id, 1) | ||||||
|  |     ids.add(id) | ||||||
|  |     return id | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def get_name(el): | ||||||
|  |     """Get title name.""" | ||||||
|  |  | ||||||
|  |     text = [] | ||||||
|  |     for c in el.itertext(): | ||||||
|  |         if isinstance(c, AtomicString): | ||||||
|  |             text.append(html.unescape(c)) | ||||||
|  |         else: | ||||||
|  |             text.append(c) | ||||||
|  |     return ''.join(text).strip() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def stashedHTML2text(text, md, strip_entities=True): | ||||||
|  |     """ Extract raw HTML from stash, reduce to plain text and swap with placeholder. """ | ||||||
|  |     def _html_sub(m): | ||||||
|  |         """ Substitute raw html with plain text. """ | ||||||
|  |         try: | ||||||
|  |             raw = md.htmlStash.rawHtmlBlocks[int(m.group(1))] | ||||||
|  |         except (IndexError, TypeError):  # pragma: no cover | ||||||
|  |             return m.group(0) | ||||||
|  |         # Strip out tags and/or entities - leaving text | ||||||
|  |         res = re.sub(r'(<[^>]+>)', '', raw) | ||||||
|  |         if strip_entities: | ||||||
|  |             res = re.sub(r'(&[\#a-zA-Z0-9]+;)', '', res) | ||||||
|  |         return res | ||||||
|  |  | ||||||
|  |     return HTML_PLACEHOLDER_RE.sub(_html_sub, text) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def unescape(text): | ||||||
|  |     """ Unescape escaped text. """ | ||||||
|  |     c = UnescapePostprocessor() | ||||||
|  |     return c.run(text) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def nest_toc_tokens(toc_list): | ||||||
|  |     """Given an unsorted list with errors and skips, return a nested one. | ||||||
|  |     [{'level': 1}, {'level': 2}] | ||||||
|  |     => | ||||||
|  |     [{'level': 1, 'children': [{'level': 2, 'children': []}]}] | ||||||
|  |  | ||||||
|  |     A wrong list is also converted: | ||||||
|  |     [{'level': 2}, {'level': 1}] | ||||||
|  |     => | ||||||
|  |     [{'level': 2, 'children': []}, {'level': 1, 'children': []}] | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     ordered_list = [] | ||||||
|  |     if len(toc_list): | ||||||
|  |         # Initialize everything by processing the first entry | ||||||
|  |         last = toc_list.pop(0) | ||||||
|  |         last['children'] = [] | ||||||
|  |         levels = [last['level']] | ||||||
|  |         ordered_list.append(last) | ||||||
|  |         parents = [] | ||||||
|  |  | ||||||
|  |         # Walk the rest nesting the entries properly | ||||||
|  |         while toc_list: | ||||||
|  |             t = toc_list.pop(0) | ||||||
|  |             current_level = t['level'] | ||||||
|  |             t['children'] = [] | ||||||
|  |  | ||||||
|  |             # Reduce depth if current level < last item's level | ||||||
|  |             if current_level < levels[-1]: | ||||||
|  |                 # Pop last level since we know we are less than it | ||||||
|  |                 levels.pop() | ||||||
|  |  | ||||||
|  |                 # Pop parents and levels we are less than or equal to | ||||||
|  |                 to_pop = 0 | ||||||
|  |                 for p in reversed(parents): | ||||||
|  |                     if current_level <= p['level']: | ||||||
|  |                         to_pop += 1 | ||||||
|  |                     else:  # pragma: no cover | ||||||
|  |                         break | ||||||
|  |                 if to_pop: | ||||||
|  |                     levels = levels[:-to_pop] | ||||||
|  |                     parents = parents[:-to_pop] | ||||||
|  |  | ||||||
|  |                 # Note current level as last | ||||||
|  |                 levels.append(current_level) | ||||||
|  |  | ||||||
|  |             # Level is the same, so append to | ||||||
|  |             # the current parent (if available) | ||||||
|  |             if current_level == levels[-1]: | ||||||
|  |                 (parents[-1]['children'] if parents | ||||||
|  |                  else ordered_list).append(t) | ||||||
|  |  | ||||||
|  |             # Current level is > last item's level, | ||||||
|  |             # So make last item a parent and append current as child | ||||||
|  |             else: | ||||||
|  |                 last['children'].append(t) | ||||||
|  |                 parents.append(last) | ||||||
|  |                 levels.append(current_level) | ||||||
|  |             last = t | ||||||
|  |  | ||||||
|  |     return ordered_list | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class TocTreeprocessor(Treeprocessor): | ||||||
|  |     def __init__(self, md, config): | ||||||
|  |         super().__init__(md) | ||||||
|  |  | ||||||
|  |         self.marker = config["marker"] | ||||||
|  |         self.title = config["title"] | ||||||
|  |         self.base_level = int(config["baselevel"]) - 1 | ||||||
|  |         self.slugify = config["slugify"] | ||||||
|  |         self.sep = config["separator"] | ||||||
|  |         self.use_anchors = parseBoolValue(config["anchorlink"]) | ||||||
|  |         self.anchorlink_class = config["anchorlink_class"] | ||||||
|  |         self.use_permalinks = parseBoolValue(config["permalink"], False) | ||||||
|  |         if self.use_permalinks is None: | ||||||
|  |             self.use_permalinks = config["permalink"] | ||||||
|  |         self.permalink_class = config["permalink_class"] | ||||||
|  |         self.permalink_title = config["permalink_title"] | ||||||
|  |         self.header_rgx = re.compile("[Hh][123456]") | ||||||
|  |         if isinstance(config["toc_depth"], str) and '-' in config["toc_depth"]: | ||||||
|  |             self.toc_top, self.toc_bottom = [int(x) for x in config["toc_depth"].split('-')] | ||||||
|  |         else: | ||||||
|  |             self.toc_top = 1 | ||||||
|  |             self.toc_bottom = int(config["toc_depth"]) | ||||||
|  |  | ||||||
|  |     def iterparent(self, node): | ||||||
|  |         ''' Iterator wrapper to get allowed parent and child all at once. ''' | ||||||
|  |  | ||||||
|  |         # We do not allow the marker inside a header as that | ||||||
|  |         # would causes an enless loop of placing a new TOC | ||||||
|  |         # inside previously generated TOC. | ||||||
|  |         for child in node: | ||||||
|  |             if not self.header_rgx.match(child.tag) and child.tag not in ['pre', 'code']: | ||||||
|  |                 yield node, child | ||||||
|  |                 yield from self.iterparent(child) | ||||||
|  |  | ||||||
|  |     def replace_marker(self, root, elem): | ||||||
|  |         ''' Replace marker with elem. ''' | ||||||
|  |         for (p, c) in self.iterparent(root): | ||||||
|  |             text = ''.join(c.itertext()).strip() | ||||||
|  |             if not text: | ||||||
|  |                 continue | ||||||
|  |  | ||||||
|  |             # To keep the output from screwing up the | ||||||
|  |             # validation by putting a <div> inside of a <p> | ||||||
|  |             # we actually replace the <p> in its entirety. | ||||||
|  |  | ||||||
|  |             # The <p> element may contain more than a single text content | ||||||
|  |             # (nl2br can introduce a <br>). In this situation, c.text returns | ||||||
|  |             # the very first content, ignore children contents or tail content. | ||||||
|  |             # len(c) == 0 is here to ensure there is only text in the <p>. | ||||||
|  |             if c.text and c.text.strip() == self.marker and len(c) == 0: | ||||||
|  |                 for i in range(len(p)): | ||||||
|  |                     if p[i] == c: | ||||||
|  |                         p[i] = elem | ||||||
|  |                         break | ||||||
|  |  | ||||||
|  |     def set_level(self, elem): | ||||||
|  |         ''' Adjust header level according to base level. ''' | ||||||
|  |         level = int(elem.tag[-1]) + self.base_level | ||||||
|  |         if level > 6: | ||||||
|  |             level = 6 | ||||||
|  |         elem.tag = 'h%d' % level | ||||||
|  |  | ||||||
|  |     def add_anchor(self, c, elem_id):  # @ReservedAssignment | ||||||
|  |         anchor = etree.Element("a") | ||||||
|  |         anchor.text = c.text | ||||||
|  |         anchor.attrib["href"] = "#" + elem_id | ||||||
|  |         anchor.attrib["class"] = self.anchorlink_class | ||||||
|  |         c.text = "" | ||||||
|  |         for elem in c: | ||||||
|  |             anchor.append(elem) | ||||||
|  |         while len(c): | ||||||
|  |             c.remove(c[0]) | ||||||
|  |         c.append(anchor) | ||||||
|  |  | ||||||
|  |     def add_permalink(self, c, elem_id): | ||||||
|  |         permalink = etree.Element("a") | ||||||
|  |         permalink.text = ("%spara;" % AMP_SUBSTITUTE | ||||||
|  |                           if self.use_permalinks is True | ||||||
|  |                           else self.use_permalinks) | ||||||
|  |         permalink.attrib["href"] = "#" + elem_id | ||||||
|  |         permalink.attrib["class"] = self.permalink_class | ||||||
|  |         if self.permalink_title: | ||||||
|  |             permalink.attrib["title"] = self.permalink_title | ||||||
|  |         c.append(permalink) | ||||||
|  |  | ||||||
|  |     def build_toc_div(self, toc_list): | ||||||
|  |         """ Return a string div given a toc list. """ | ||||||
|  |         div = etree.Element("div") | ||||||
|  |         div.attrib["class"] = "toc" | ||||||
|  |  | ||||||
|  |         # Add title to the div | ||||||
|  |         if self.title: | ||||||
|  |             header = etree.SubElement(div, "span") | ||||||
|  |             header.attrib["class"] = "toctitle" | ||||||
|  |             header.text = self.title | ||||||
|  |  | ||||||
|  |         def build_etree_ul(toc_list, parent): | ||||||
|  |             ul = etree.SubElement(parent, "ul") | ||||||
|  |             for item in toc_list: | ||||||
|  |                 # List item link, to be inserted into the toc div | ||||||
|  |                 li = etree.SubElement(ul, "li") | ||||||
|  |                 link = etree.SubElement(li, "a") | ||||||
|  |                 link.text = item.get('name', '') | ||||||
|  |                 link.attrib["href"] = '#' + item.get('id', '') | ||||||
|  |                 if item['children']: | ||||||
|  |                     build_etree_ul(item['children'], li) | ||||||
|  |             return ul | ||||||
|  |  | ||||||
|  |         build_etree_ul(toc_list, div) | ||||||
|  |  | ||||||
|  |         if 'prettify' in self.md.treeprocessors: | ||||||
|  |             self.md.treeprocessors['prettify'].run(div) | ||||||
|  |  | ||||||
|  |         return div | ||||||
|  |  | ||||||
|  |     def run(self, doc): | ||||||
|  |         # Get a list of id attributes | ||||||
|  |         used_ids = set() | ||||||
|  |         for el in doc.iter(): | ||||||
|  |             if "id" in el.attrib: | ||||||
|  |                 used_ids.add(el.attrib["id"]) | ||||||
|  |  | ||||||
|  |         toc_tokens = [] | ||||||
|  |         for el in doc.iter(): | ||||||
|  |             if isinstance(el.tag, str) and self.header_rgx.match(el.tag): | ||||||
|  |                 self.set_level(el) | ||||||
|  |                 text = get_name(el) | ||||||
|  |  | ||||||
|  |                 # Do not override pre-existing ids | ||||||
|  |                 if "id" not in el.attrib: | ||||||
|  |                     innertext = unescape(stashedHTML2text(text, self.md)) | ||||||
|  |                     el.attrib["id"] = unique(self.slugify(innertext, self.sep), used_ids) | ||||||
|  |  | ||||||
|  |                 if int(el.tag[-1]) >= self.toc_top and int(el.tag[-1]) <= self.toc_bottom: | ||||||
|  |                     toc_tokens.append({ | ||||||
|  |                         'level': int(el.tag[-1]), | ||||||
|  |                         'id': el.attrib["id"], | ||||||
|  |                         'name': unescape(stashedHTML2text( | ||||||
|  |                             code_escape(el.attrib.get('data-toc-label', text)), | ||||||
|  |                             self.md, strip_entities=False | ||||||
|  |                         )) | ||||||
|  |                     }) | ||||||
|  |  | ||||||
|  |                 # Remove the data-toc-label attribute as it is no longer needed | ||||||
|  |                 if 'data-toc-label' in el.attrib: | ||||||
|  |                     del el.attrib['data-toc-label'] | ||||||
|  |  | ||||||
|  |                 if self.use_anchors: | ||||||
|  |                     self.add_anchor(el, el.attrib["id"]) | ||||||
|  |                 if self.use_permalinks not in [False, None]: | ||||||
|  |                     self.add_permalink(el, el.attrib["id"]) | ||||||
|  |  | ||||||
|  |         toc_tokens = nest_toc_tokens(toc_tokens) | ||||||
|  |         div = self.build_toc_div(toc_tokens) | ||||||
|  |         if self.marker: | ||||||
|  |             self.replace_marker(doc, div) | ||||||
|  |  | ||||||
|  |         # serialize and attach to markdown instance. | ||||||
|  |         toc = self.md.serializer(div) | ||||||
|  |         for pp in self.md.postprocessors: | ||||||
|  |             toc = pp.run(toc) | ||||||
|  |         self.md.toc_tokens = toc_tokens | ||||||
|  |         self.md.toc = toc | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class TocExtension(Extension): | ||||||
|  |  | ||||||
|  |     TreeProcessorClass = TocTreeprocessor | ||||||
|  |  | ||||||
|  |     def __init__(self, **kwargs): | ||||||
|  |         self.config = { | ||||||
|  |             "marker": ['[TOC]', | ||||||
|  |                        'Text to find and replace with Table of Contents - ' | ||||||
|  |                        'Set to an empty string to disable. Defaults to "[TOC]"'], | ||||||
|  |             "title": ["", | ||||||
|  |                       "Title to insert into TOC <div> - " | ||||||
|  |                       "Defaults to an empty string"], | ||||||
|  |             "anchorlink": [False, | ||||||
|  |                            "True if header should be a self link - " | ||||||
|  |                            "Defaults to False"], | ||||||
|  |             "anchorlink_class": ['toclink', | ||||||
|  |                                  'CSS class(es) used for the link. ' | ||||||
|  |                                  'Defaults to "toclink"'], | ||||||
|  |             "permalink": [0, | ||||||
|  |                           "True or link text if a Sphinx-style permalink should " | ||||||
|  |                           "be added - Defaults to False"], | ||||||
|  |             "permalink_class": ['headerlink', | ||||||
|  |                                 'CSS class(es) used for the link. ' | ||||||
|  |                                 'Defaults to "headerlink"'], | ||||||
|  |             "permalink_title": ["Permanent link", | ||||||
|  |                                 "Title attribute of the permalink - " | ||||||
|  |                                 "Defaults to 'Permanent link'"], | ||||||
|  |             "baselevel": ['1', 'Base level for headers.'], | ||||||
|  |             "slugify": [slugify, | ||||||
|  |                         "Function to generate anchors based on header text - " | ||||||
|  |                         "Defaults to the headerid ext's slugify function."], | ||||||
|  |             'separator': ['-', 'Word separator. Defaults to "-".'], | ||||||
|  |             "toc_depth": [6, | ||||||
|  |                           'Define the range of section levels to include in' | ||||||
|  |                           'the Table of Contents. A single integer (b) defines' | ||||||
|  |                           'the bottom section level (<h1>..<hb>) only.' | ||||||
|  |                           'A string consisting of two digits separated by a hyphen' | ||||||
|  |                           'in between ("2-5"), define the top (t) and the' | ||||||
|  |                           'bottom (b) (<ht>..<hb>). Defaults to `6` (bottom).'], | ||||||
|  |         } | ||||||
|  |  | ||||||
|  |         super().__init__(**kwargs) | ||||||
|  |  | ||||||
|  |     def extendMarkdown(self, md): | ||||||
|  |         md.registerExtension(self) | ||||||
|  |         self.md = md | ||||||
|  |         self.reset() | ||||||
|  |         tocext = self.TreeProcessorClass(md, self.getConfigs()) | ||||||
|  |         # Headerid ext is set to '>prettify'. With this set to '_end', | ||||||
|  |         # it should always come after headerid ext (and honor ids assigned | ||||||
|  |         # by the header id extension) if both are used. Same goes for | ||||||
|  |         # attr_list extension. This must come last because we don't want | ||||||
|  |         # to redefine ids after toc is created. But we do want toc prettified. | ||||||
|  |         md.treeprocessors.register(tocext, 'toc', 5) | ||||||
|  |  | ||||||
|  |     def reset(self): | ||||||
|  |         self.md.toc = '' | ||||||
|  |         self.md.toc_tokens = [] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def makeExtension(**kwargs):  # pragma: no cover | ||||||
|  |     return TocExtension(**kwargs) | ||||||
							
								
								
									
										87
									
								
								Source/Libs/markdown/extensions/wikilinks.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										87
									
								
								Source/Libs/markdown/extensions/wikilinks.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,87 @@ | |||||||
|  | ''' | ||||||
|  | WikiLinks Extension for Python-Markdown | ||||||
|  | ====================================== | ||||||
|  |  | ||||||
|  | Converts [[WikiLinks]] to relative links. | ||||||
|  |  | ||||||
|  | See <https://Python-Markdown.github.io/extensions/wikilinks> | ||||||
|  | for documentation. | ||||||
|  |  | ||||||
|  | Original code Copyright [Waylan Limberg](http://achinghead.com/). | ||||||
|  |  | ||||||
|  | All changes Copyright The Python Markdown Project | ||||||
|  |  | ||||||
|  | License: [BSD](https://opensource.org/licenses/bsd-license.php) | ||||||
|  |  | ||||||
|  | ''' | ||||||
|  |  | ||||||
|  | from . import Extension | ||||||
|  | from ..inlinepatterns import InlineProcessor | ||||||
|  | import xml.etree.ElementTree as etree | ||||||
|  | import re | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def build_url(label, base, end): | ||||||
|  |     """ Build a url from the label, a base, and an end. """ | ||||||
|  |     clean_label = re.sub(r'([ ]+_)|(_[ ]+)|([ ]+)', '_', label) | ||||||
|  |     return '{}{}{}'.format(base, clean_label, end) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class WikiLinkExtension(Extension): | ||||||
|  |  | ||||||
|  |     def __init__(self, **kwargs): | ||||||
|  |         self.config = { | ||||||
|  |             'base_url': ['/', 'String to append to beginning or URL.'], | ||||||
|  |             'end_url': ['/', 'String to append to end of URL.'], | ||||||
|  |             'html_class': ['wikilink', 'CSS hook. Leave blank for none.'], | ||||||
|  |             'build_url': [build_url, 'Callable formats URL from label.'], | ||||||
|  |         } | ||||||
|  |  | ||||||
|  |         super().__init__(**kwargs) | ||||||
|  |  | ||||||
|  |     def extendMarkdown(self, md): | ||||||
|  |         self.md = md | ||||||
|  |  | ||||||
|  |         # append to end of inline patterns | ||||||
|  |         WIKILINK_RE = r'\[\[([\w0-9_ -]+)\]\]' | ||||||
|  |         wikilinkPattern = WikiLinksInlineProcessor(WIKILINK_RE, self.getConfigs()) | ||||||
|  |         wikilinkPattern.md = md | ||||||
|  |         md.inlinePatterns.register(wikilinkPattern, 'wikilink', 75) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class WikiLinksInlineProcessor(InlineProcessor): | ||||||
|  |     def __init__(self, pattern, config): | ||||||
|  |         super().__init__(pattern) | ||||||
|  |         self.config = config | ||||||
|  |  | ||||||
|  |     def handleMatch(self, m, data): | ||||||
|  |         if m.group(1).strip(): | ||||||
|  |             base_url, end_url, html_class = self._getMeta() | ||||||
|  |             label = m.group(1).strip() | ||||||
|  |             url = self.config['build_url'](label, base_url, end_url) | ||||||
|  |             a = etree.Element('a') | ||||||
|  |             a.text = label | ||||||
|  |             a.set('href', url) | ||||||
|  |             if html_class: | ||||||
|  |                 a.set('class', html_class) | ||||||
|  |         else: | ||||||
|  |             a = '' | ||||||
|  |         return a, m.start(0), m.end(0) | ||||||
|  |  | ||||||
|  |     def _getMeta(self): | ||||||
|  |         """ Return meta data or config data. """ | ||||||
|  |         base_url = self.config['base_url'] | ||||||
|  |         end_url = self.config['end_url'] | ||||||
|  |         html_class = self.config['html_class'] | ||||||
|  |         if hasattr(self.md, 'Meta'): | ||||||
|  |             if 'wiki_base_url' in self.md.Meta: | ||||||
|  |                 base_url = self.md.Meta['wiki_base_url'][0] | ||||||
|  |             if 'wiki_end_url' in self.md.Meta: | ||||||
|  |                 end_url = self.md.Meta['wiki_end_url'][0] | ||||||
|  |             if 'wiki_html_class' in self.md.Meta: | ||||||
|  |                 html_class = self.md.Meta['wiki_html_class'][0] | ||||||
|  |         return base_url, end_url, html_class | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def makeExtension(**kwargs):  # pragma: no cover | ||||||
|  |     return WikiLinkExtension(**kwargs) | ||||||
							
								
								
									
										323
									
								
								Source/Libs/markdown/htmlparser.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										323
									
								
								Source/Libs/markdown/htmlparser.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,323 @@ | |||||||
|  | """ | ||||||
|  | Python Markdown | ||||||
|  |  | ||||||
|  | A Python implementation of John Gruber's Markdown. | ||||||
|  |  | ||||||
|  | Documentation: https://python-markdown.github.io/ | ||||||
|  | GitHub: https://github.com/Python-Markdown/markdown/ | ||||||
|  | PyPI: https://pypi.org/project/Markdown/ | ||||||
|  |  | ||||||
|  | Started by Manfred Stienstra (http://www.dwerg.net/). | ||||||
|  | Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). | ||||||
|  | Currently maintained by Waylan Limberg (https://github.com/waylan), | ||||||
|  | Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). | ||||||
|  |  | ||||||
|  | Copyright 2007-2020 The Python Markdown Project (v. 1.7 and later) | ||||||
|  | Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) | ||||||
|  | Copyright 2004 Manfred Stienstra (the original version) | ||||||
|  |  | ||||||
|  | License: BSD (see LICENSE.md for details). | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | import re | ||||||
|  | import importlib | ||||||
|  | import sys | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Import a copy of the html.parser lib as `htmlparser` so we can monkeypatch it. | ||||||
|  | # Users can still do `from html import parser` and get the default behavior. | ||||||
|  | spec = importlib.util.find_spec('html.parser') | ||||||
|  | htmlparser = importlib.util.module_from_spec(spec) | ||||||
|  | spec.loader.exec_module(htmlparser) | ||||||
|  | sys.modules['htmlparser'] = htmlparser | ||||||
|  |  | ||||||
|  | # Monkeypatch HTMLParser to only accept `?>` to close Processing Instructions. | ||||||
|  | htmlparser.piclose = re.compile(r'\?>') | ||||||
|  | # Monkeypatch HTMLParser to only recognize entity references with a closing semicolon. | ||||||
|  | htmlparser.entityref = re.compile(r'&([a-zA-Z][-.a-zA-Z0-9]*);') | ||||||
|  | # Monkeypatch HTMLParser to no longer support partial entities. We are always feeding a complete block, | ||||||
|  | # so the 'incomplete' functionality is unnecessary. As the entityref regex is run right before incomplete, | ||||||
|  | # and the two regex are the same, then incomplete will simply never match and we avoid the logic within. | ||||||
|  | htmlparser.incomplete = htmlparser.entityref | ||||||
|  | # Monkeypatch HTMLParser to not accept a backtick in a tag name, attribute name, or bare value. | ||||||
|  | htmlparser.locatestarttagend_tolerant = re.compile(r""" | ||||||
|  |   <[a-zA-Z][^`\t\n\r\f />\x00]*       # tag name <= added backtick here | ||||||
|  |   (?:[\s/]*                           # optional whitespace before attribute name | ||||||
|  |     (?:(?<=['"\s/])[^`\s/>][^\s/=>]*  # attribute name <= added backtick here | ||||||
|  |       (?:\s*=+\s*                     # value indicator | ||||||
|  |         (?:'[^']*'                    # LITA-enclosed value | ||||||
|  |           |"[^"]*"                    # LIT-enclosed value | ||||||
|  |           |(?!['"])[^`>\s]*           # bare value <= added backtick here | ||||||
|  |          ) | ||||||
|  |          (?:\s*,)*                    # possibly followed by a comma | ||||||
|  |        )?(?:\s|/(?!>))* | ||||||
|  |      )* | ||||||
|  |    )? | ||||||
|  |   \s*                                 # trailing whitespace | ||||||
|  | """, re.VERBOSE) | ||||||
|  |  | ||||||
|  | # Match a blank line at the start of a block of text (two newlines). | ||||||
|  | # The newlines may be preceded by additional whitespace. | ||||||
|  | blank_line_re = re.compile(r'^([ ]*\n){2}') | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class HTMLExtractor(htmlparser.HTMLParser): | ||||||
|  |     """ | ||||||
|  |     Extract raw HTML from text. | ||||||
|  |  | ||||||
|  |     The raw HTML is stored in the `htmlStash` of the Markdown instance passed | ||||||
|  |     to `md` and the remaining text is stored in `cleandoc` as a list of strings. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, md, *args, **kwargs): | ||||||
|  |         if 'convert_charrefs' not in kwargs: | ||||||
|  |             kwargs['convert_charrefs'] = False | ||||||
|  |  | ||||||
|  |         # Block tags that should contain no content (self closing) | ||||||
|  |         self.empty_tags = set(['hr']) | ||||||
|  |  | ||||||
|  |         # This calls self.reset | ||||||
|  |         super().__init__(*args, **kwargs) | ||||||
|  |         self.md = md | ||||||
|  |  | ||||||
|  |     def reset(self): | ||||||
|  |         """Reset this instance.  Loses all unprocessed data.""" | ||||||
|  |         self.inraw = False | ||||||
|  |         self.intail = False | ||||||
|  |         self.stack = []  # When inraw==True, stack contains a list of tags | ||||||
|  |         self._cache = [] | ||||||
|  |         self.cleandoc = [] | ||||||
|  |         super().reset() | ||||||
|  |  | ||||||
|  |     def close(self): | ||||||
|  |         """Handle any buffered data.""" | ||||||
|  |         super().close() | ||||||
|  |         if len(self.rawdata): | ||||||
|  |             # Temp fix for https://bugs.python.org/issue41989 | ||||||
|  |             # TODO: remove this when the bug is fixed in all supported Python versions. | ||||||
|  |             if self.convert_charrefs and not self.cdata_elem:  # pragma: no cover | ||||||
|  |                 self.handle_data(htmlparser.unescape(self.rawdata)) | ||||||
|  |             else: | ||||||
|  |                 self.handle_data(self.rawdata) | ||||||
|  |         # Handle any unclosed tags. | ||||||
|  |         if len(self._cache): | ||||||
|  |             self.cleandoc.append(self.md.htmlStash.store(''.join(self._cache))) | ||||||
|  |             self._cache = [] | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def line_offset(self): | ||||||
|  |         """Returns char index in self.rawdata for the start of the current line. """ | ||||||
|  |         if self.lineno > 1 and '\n' in self.rawdata: | ||||||
|  |             m = re.match(r'([^\n]*\n){{{}}}'.format(self.lineno-1), self.rawdata) | ||||||
|  |             if m: | ||||||
|  |                 return m.end() | ||||||
|  |             else:  # pragma: no cover | ||||||
|  |                 # Value of self.lineno must exceed total number of lines. | ||||||
|  |                 # Find index of beginning of last line. | ||||||
|  |                 return self.rawdata.rfind('\n') | ||||||
|  |         return 0 | ||||||
|  |  | ||||||
|  |     def at_line_start(self): | ||||||
|  |         """ | ||||||
|  |         Returns True if current position is at start of line. | ||||||
|  |  | ||||||
|  |         Allows for up to three blank spaces at start of line. | ||||||
|  |         """ | ||||||
|  |         if self.offset == 0: | ||||||
|  |             return True | ||||||
|  |         if self.offset > 3: | ||||||
|  |             return False | ||||||
|  |         # Confirm up to first 3 chars are whitespace | ||||||
|  |         return self.rawdata[self.line_offset:self.line_offset + self.offset].strip() == '' | ||||||
|  |  | ||||||
|  |     def get_endtag_text(self, tag): | ||||||
|  |         """ | ||||||
|  |         Returns the text of the end tag. | ||||||
|  |  | ||||||
|  |         If it fails to extract the actual text from the raw data, it builds a closing tag with `tag`. | ||||||
|  |         """ | ||||||
|  |         # Attempt to extract actual tag from raw source text | ||||||
|  |         start = self.line_offset + self.offset | ||||||
|  |         m = htmlparser.endendtag.search(self.rawdata, start) | ||||||
|  |         if m: | ||||||
|  |             return self.rawdata[start:m.end()] | ||||||
|  |         else:  # pragma: no cover | ||||||
|  |             # Failed to extract from raw data. Assume well formed and lowercase. | ||||||
|  |             return '</{}>'.format(tag) | ||||||
|  |  | ||||||
|  |     def handle_starttag(self, tag, attrs): | ||||||
|  |         # Handle tags that should always be empty and do not specify a closing tag | ||||||
|  |         if tag in self.empty_tags: | ||||||
|  |             self.handle_startendtag(tag, attrs) | ||||||
|  |             return | ||||||
|  |  | ||||||
|  |         if self.md.is_block_level(tag) and (self.intail or (self.at_line_start() and not self.inraw)): | ||||||
|  |             # Started a new raw block. Prepare stack. | ||||||
|  |             self.inraw = True | ||||||
|  |             self.cleandoc.append('\n') | ||||||
|  |  | ||||||
|  |         text = self.get_starttag_text() | ||||||
|  |         if self.inraw: | ||||||
|  |             self.stack.append(tag) | ||||||
|  |             self._cache.append(text) | ||||||
|  |         else: | ||||||
|  |             self.cleandoc.append(text) | ||||||
|  |             if tag in self.CDATA_CONTENT_ELEMENTS: | ||||||
|  |                 # This is presumably a standalone tag in a code span (see #1036). | ||||||
|  |                 self.clear_cdata_mode() | ||||||
|  |  | ||||||
|  |     def handle_endtag(self, tag): | ||||||
|  |         text = self.get_endtag_text(tag) | ||||||
|  |  | ||||||
|  |         if self.inraw: | ||||||
|  |             self._cache.append(text) | ||||||
|  |             if tag in self.stack: | ||||||
|  |                 # Remove tag from stack | ||||||
|  |                 while self.stack: | ||||||
|  |                     if self.stack.pop() == tag: | ||||||
|  |                         break | ||||||
|  |             if len(self.stack) == 0: | ||||||
|  |                 # End of raw block. | ||||||
|  |                 if blank_line_re.match(self.rawdata[self.line_offset + self.offset + len(text):]): | ||||||
|  |                     # Preserve blank line and end of raw block. | ||||||
|  |                     self._cache.append('\n') | ||||||
|  |                 else: | ||||||
|  |                     # More content exists after endtag. | ||||||
|  |                     self.intail = True | ||||||
|  |                 # Reset stack. | ||||||
|  |                 self.inraw = False | ||||||
|  |                 self.cleandoc.append(self.md.htmlStash.store(''.join(self._cache))) | ||||||
|  |                 # Insert blank line between this and next line. | ||||||
|  |                 self.cleandoc.append('\n\n') | ||||||
|  |                 self._cache = [] | ||||||
|  |         else: | ||||||
|  |             self.cleandoc.append(text) | ||||||
|  |  | ||||||
|  |     def handle_data(self, data): | ||||||
|  |         if self.intail and '\n' in data: | ||||||
|  |             self.intail = False | ||||||
|  |         if self.inraw: | ||||||
|  |             self._cache.append(data) | ||||||
|  |         else: | ||||||
|  |             self.cleandoc.append(data) | ||||||
|  |  | ||||||
|  |     def handle_empty_tag(self, data, is_block): | ||||||
|  |         """ Handle empty tags (`<data>`). """ | ||||||
|  |         if self.inraw or self.intail: | ||||||
|  |             # Append this to the existing raw block | ||||||
|  |             self._cache.append(data) | ||||||
|  |         elif self.at_line_start() and is_block: | ||||||
|  |             # Handle this as a standalone raw block | ||||||
|  |             if blank_line_re.match(self.rawdata[self.line_offset + self.offset + len(data):]): | ||||||
|  |                 # Preserve blank line after tag in raw block. | ||||||
|  |                 data += '\n' | ||||||
|  |             else: | ||||||
|  |                 # More content exists after tag. | ||||||
|  |                 self.intail = True | ||||||
|  |             item = self.cleandoc[-1] if self.cleandoc else '' | ||||||
|  |             # If we only have one newline before block element, add another | ||||||
|  |             if not item.endswith('\n\n') and item.endswith('\n'): | ||||||
|  |                 self.cleandoc.append('\n') | ||||||
|  |             self.cleandoc.append(self.md.htmlStash.store(data)) | ||||||
|  |             # Insert blank line between this and next line. | ||||||
|  |             self.cleandoc.append('\n\n') | ||||||
|  |         else: | ||||||
|  |             self.cleandoc.append(data) | ||||||
|  |  | ||||||
|  |     def handle_startendtag(self, tag, attrs): | ||||||
|  |         self.handle_empty_tag(self.get_starttag_text(), is_block=self.md.is_block_level(tag)) | ||||||
|  |  | ||||||
|  |     def handle_charref(self, name): | ||||||
|  |         self.handle_empty_tag('&#{};'.format(name), is_block=False) | ||||||
|  |  | ||||||
|  |     def handle_entityref(self, name): | ||||||
|  |         self.handle_empty_tag('&{};'.format(name), is_block=False) | ||||||
|  |  | ||||||
|  |     def handle_comment(self, data): | ||||||
|  |         self.handle_empty_tag('<!--{}-->'.format(data), is_block=True) | ||||||
|  |  | ||||||
|  |     def handle_decl(self, data): | ||||||
|  |         self.handle_empty_tag('<!{}>'.format(data), is_block=True) | ||||||
|  |  | ||||||
|  |     def handle_pi(self, data): | ||||||
|  |         self.handle_empty_tag('<?{}?>'.format(data), is_block=True) | ||||||
|  |  | ||||||
|  |     def unknown_decl(self, data): | ||||||
|  |         end = ']]>' if data.startswith('CDATA[') else ']>' | ||||||
|  |         self.handle_empty_tag('<![{}{}'.format(data, end), is_block=True) | ||||||
|  |  | ||||||
|  |     def parse_pi(self, i): | ||||||
|  |         if self.at_line_start() or self.intail: | ||||||
|  |             return super().parse_pi(i) | ||||||
|  |         # This is not the beginning of a raw block so treat as plain data | ||||||
|  |         # and avoid consuming any tags which may follow (see #1066). | ||||||
|  |         self.handle_data('<?') | ||||||
|  |         return i + 2 | ||||||
|  |  | ||||||
|  |     def parse_html_declaration(self, i): | ||||||
|  |         if self.at_line_start() or self.intail: | ||||||
|  |             return super().parse_html_declaration(i) | ||||||
|  |         # This is not the beginning of a raw block so treat as plain data | ||||||
|  |         # and avoid consuming any tags which may follow (see #1066). | ||||||
|  |         self.handle_data('<!') | ||||||
|  |         return i + 2 | ||||||
|  |  | ||||||
|  |     # The rest has been copied from base class in standard lib to address #1036. | ||||||
|  |     # As __startag_text is private, all references to it must be in this subclass. | ||||||
|  |     # The last few lines of parse_starttag are reversed so that handle_starttag | ||||||
|  |     # can override cdata_mode in certain situations (in a code span). | ||||||
|  |     __starttag_text = None | ||||||
|  |  | ||||||
|  |     def get_starttag_text(self): | ||||||
|  |         """Return full source of start tag: '<...>'.""" | ||||||
|  |         return self.__starttag_text | ||||||
|  |  | ||||||
|  |     def parse_starttag(self, i):  # pragma: no cover | ||||||
|  |         self.__starttag_text = None | ||||||
|  |         endpos = self.check_for_whole_start_tag(i) | ||||||
|  |         if endpos < 0: | ||||||
|  |             return endpos | ||||||
|  |         rawdata = self.rawdata | ||||||
|  |         self.__starttag_text = rawdata[i:endpos] | ||||||
|  |  | ||||||
|  |         # Now parse the data between i+1 and j into a tag and attrs | ||||||
|  |         attrs = [] | ||||||
|  |         match = htmlparser.tagfind_tolerant.match(rawdata, i+1) | ||||||
|  |         assert match, 'unexpected call to parse_starttag()' | ||||||
|  |         k = match.end() | ||||||
|  |         self.lasttag = tag = match.group(1).lower() | ||||||
|  |         while k < endpos: | ||||||
|  |             m = htmlparser.attrfind_tolerant.match(rawdata, k) | ||||||
|  |             if not m: | ||||||
|  |                 break | ||||||
|  |             attrname, rest, attrvalue = m.group(1, 2, 3) | ||||||
|  |             if not rest: | ||||||
|  |                 attrvalue = None | ||||||
|  |             elif attrvalue[:1] == '\'' == attrvalue[-1:] or \ | ||||||
|  |                  attrvalue[:1] == '"' == attrvalue[-1:]:  # noqa: E127 | ||||||
|  |                 attrvalue = attrvalue[1:-1] | ||||||
|  |             if attrvalue: | ||||||
|  |                 attrvalue = htmlparser.unescape(attrvalue) | ||||||
|  |             attrs.append((attrname.lower(), attrvalue)) | ||||||
|  |             k = m.end() | ||||||
|  |  | ||||||
|  |         end = rawdata[k:endpos].strip() | ||||||
|  |         if end not in (">", "/>"): | ||||||
|  |             lineno, offset = self.getpos() | ||||||
|  |             if "\n" in self.__starttag_text: | ||||||
|  |                 lineno = lineno + self.__starttag_text.count("\n") | ||||||
|  |                 offset = len(self.__starttag_text) \ | ||||||
|  |                          - self.__starttag_text.rfind("\n")  # noqa: E127 | ||||||
|  |             else: | ||||||
|  |                 offset = offset + len(self.__starttag_text) | ||||||
|  |             self.handle_data(rawdata[i:endpos]) | ||||||
|  |             return endpos | ||||||
|  |         if end.endswith('/>'): | ||||||
|  |             # XHTML-style empty tag: <span attr="value" /> | ||||||
|  |             self.handle_startendtag(tag, attrs) | ||||||
|  |         else: | ||||||
|  |             # *** set cdata_mode first so we can override it in handle_starttag (see #1036) *** | ||||||
|  |             if tag in self.CDATA_CONTENT_ELEMENTS: | ||||||
|  |                 self.set_cdata_mode(tag) | ||||||
|  |             self.handle_starttag(tag, attrs) | ||||||
|  |         return endpos | ||||||
							
								
								
									
										892
									
								
								Source/Libs/markdown/inlinepatterns.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										892
									
								
								Source/Libs/markdown/inlinepatterns.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,892 @@ | |||||||
|  | """ | ||||||
|  | Python Markdown | ||||||
|  |  | ||||||
|  | A Python implementation of John Gruber's Markdown. | ||||||
|  |  | ||||||
|  | Documentation: https://python-markdown.github.io/ | ||||||
|  | GitHub: https://github.com/Python-Markdown/markdown/ | ||||||
|  | PyPI: https://pypi.org/project/Markdown/ | ||||||
|  |  | ||||||
|  | Started by Manfred Stienstra (http://www.dwerg.net/). | ||||||
|  | Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). | ||||||
|  | Currently maintained by Waylan Limberg (https://github.com/waylan), | ||||||
|  | Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). | ||||||
|  |  | ||||||
|  | Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later) | ||||||
|  | Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) | ||||||
|  | Copyright 2004 Manfred Stienstra (the original version) | ||||||
|  |  | ||||||
|  | License: BSD (see LICENSE.md for details). | ||||||
|  |  | ||||||
|  | INLINE PATTERNS | ||||||
|  | ============================================================================= | ||||||
|  |  | ||||||
|  | Inline patterns such as *emphasis* are handled by means of auxiliary | ||||||
|  | objects, one per pattern.  Pattern objects must be instances of classes | ||||||
|  | that extend markdown.Pattern.  Each pattern object uses a single regular | ||||||
|  | expression and needs support the following methods: | ||||||
|  |  | ||||||
|  |     pattern.getCompiledRegExp() # returns a regular expression | ||||||
|  |  | ||||||
|  |     pattern.handleMatch(m) # takes a match object and returns | ||||||
|  |                            # an ElementTree element or just plain text | ||||||
|  |  | ||||||
|  | All of python markdown's built-in patterns subclass from Pattern, | ||||||
|  | but you can add additional patterns that don't. | ||||||
|  |  | ||||||
|  | Also note that all the regular expressions used by inline must | ||||||
|  | capture the whole block.  For this reason, they all start with | ||||||
|  | '^(.*)' and end with '(.*)!'.  In case with built-in expression | ||||||
|  | Pattern takes care of adding the "^(.*)" and "(.*)!". | ||||||
|  |  | ||||||
|  | Finally, the order in which regular expressions are applied is very | ||||||
|  | important - e.g. if we first replace http://.../ links with <a> tags | ||||||
|  | and _then_ try to replace inline html, we would end up with a mess. | ||||||
|  | So, we apply the expressions in the following order: | ||||||
|  |  | ||||||
|  | * escape and backticks have to go before everything else, so | ||||||
|  |   that we can preempt any markdown patterns by escaping them. | ||||||
|  |  | ||||||
|  | * then we handle auto-links (must be done before inline html) | ||||||
|  |  | ||||||
|  | * then we handle inline HTML.  At this point we will simply | ||||||
|  |   replace all inline HTML strings with a placeholder and add | ||||||
|  |   the actual HTML to a hash. | ||||||
|  |  | ||||||
|  | * then inline images (must be done before links) | ||||||
|  |  | ||||||
|  | * then bracketed links, first regular then reference-style | ||||||
|  |  | ||||||
|  | * finally we apply strong and emphasis | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | from . import util | ||||||
|  | from collections import namedtuple | ||||||
|  | import re | ||||||
|  | import xml.etree.ElementTree as etree | ||||||
|  | try:  # pragma: no cover | ||||||
|  |     from html import entities | ||||||
|  | except ImportError:  # pragma: no cover | ||||||
|  |     import htmlentitydefs as entities | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def build_inlinepatterns(md, **kwargs): | ||||||
|  |     """ Build the default set of inline patterns for Markdown. """ | ||||||
|  |     inlinePatterns = util.Registry() | ||||||
|  |     inlinePatterns.register(BacktickInlineProcessor(BACKTICK_RE), 'backtick', 190) | ||||||
|  |     inlinePatterns.register(EscapeInlineProcessor(ESCAPE_RE, md), 'escape', 180) | ||||||
|  |     inlinePatterns.register(ReferenceInlineProcessor(REFERENCE_RE, md), 'reference', 170) | ||||||
|  |     inlinePatterns.register(LinkInlineProcessor(LINK_RE, md), 'link', 160) | ||||||
|  |     inlinePatterns.register(ImageInlineProcessor(IMAGE_LINK_RE, md), 'image_link', 150) | ||||||
|  |     inlinePatterns.register( | ||||||
|  |         ImageReferenceInlineProcessor(IMAGE_REFERENCE_RE, md), 'image_reference', 140 | ||||||
|  |     ) | ||||||
|  |     inlinePatterns.register( | ||||||
|  |         ShortReferenceInlineProcessor(REFERENCE_RE, md), 'short_reference', 130 | ||||||
|  |     ) | ||||||
|  |     inlinePatterns.register( | ||||||
|  |         ShortImageReferenceInlineProcessor(IMAGE_REFERENCE_RE, md), 'short_image_ref', 125 | ||||||
|  |     ) | ||||||
|  |     inlinePatterns.register(AutolinkInlineProcessor(AUTOLINK_RE, md), 'autolink', 120) | ||||||
|  |     inlinePatterns.register(AutomailInlineProcessor(AUTOMAIL_RE, md), 'automail', 110) | ||||||
|  |     inlinePatterns.register(SubstituteTagInlineProcessor(LINE_BREAK_RE, 'br'), 'linebreak', 100) | ||||||
|  |     inlinePatterns.register(HtmlInlineProcessor(HTML_RE, md), 'html', 90) | ||||||
|  |     inlinePatterns.register(HtmlInlineProcessor(ENTITY_RE, md), 'entity', 80) | ||||||
|  |     inlinePatterns.register(SimpleTextInlineProcessor(NOT_STRONG_RE), 'not_strong', 70) | ||||||
|  |     inlinePatterns.register(AsteriskProcessor(r'\*'), 'em_strong', 60) | ||||||
|  |     inlinePatterns.register(UnderscoreProcessor(r'_'), 'em_strong2', 50) | ||||||
|  |     return inlinePatterns | ||||||
|  |  | ||||||
|  |  | ||||||
|  | """ | ||||||
|  | The actual regular expressions for patterns | ||||||
|  | ----------------------------------------------------------------------------- | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | NOIMG = r'(?<!\!)' | ||||||
|  |  | ||||||
|  | # `e=f()` or ``e=f("`")`` | ||||||
|  | BACKTICK_RE = r'(?:(?<!\\)((?:\\{2})+)(?=`+)|(?<!\\)(`+)(.+?)(?<!`)\2(?!`))' | ||||||
|  |  | ||||||
|  | # \< | ||||||
|  | ESCAPE_RE = r'\\(.)' | ||||||
|  |  | ||||||
|  | # *emphasis* | ||||||
|  | EMPHASIS_RE = r'(\*)([^\*]+)\1' | ||||||
|  |  | ||||||
|  | # **strong** | ||||||
|  | STRONG_RE = r'(\*{2})(.+?)\1' | ||||||
|  |  | ||||||
|  | # __smart__strong__ | ||||||
|  | SMART_STRONG_RE = r'(?<!\w)(_{2})(?!_)(.+?)(?<!_)\1(?!\w)' | ||||||
|  |  | ||||||
|  | # _smart_emphasis_ | ||||||
|  | SMART_EMPHASIS_RE = r'(?<!\w)(_)(?!_)(.+?)(?<!_)\1(?!\w)' | ||||||
|  |  | ||||||
|  | # __strong _em__ | ||||||
|  | SMART_STRONG_EM_RE = r'(?<!\w)(\_)\1(?!\1)(.+?)(?<!\w)\1(?!\1)(.+?)\1{3}(?!\w)' | ||||||
|  |  | ||||||
|  | # ***strongem*** or ***em*strong** | ||||||
|  | EM_STRONG_RE = r'(\*)\1{2}(.+?)\1(.*?)\1{2}' | ||||||
|  |  | ||||||
|  | # ___strongem___ or ___em_strong__ | ||||||
|  | EM_STRONG2_RE = r'(_)\1{2}(.+?)\1(.*?)\1{2}' | ||||||
|  |  | ||||||
|  | # ***strong**em* | ||||||
|  | STRONG_EM_RE = r'(\*)\1{2}(.+?)\1{2}(.*?)\1' | ||||||
|  |  | ||||||
|  | # ___strong__em_ | ||||||
|  | STRONG_EM2_RE = r'(_)\1{2}(.+?)\1{2}(.*?)\1' | ||||||
|  |  | ||||||
|  | # **strong*em*** | ||||||
|  | STRONG_EM3_RE = r'(\*)\1(?!\1)([^*]+?)\1(?!\1)(.+?)\1{3}' | ||||||
|  |  | ||||||
|  | # [text](url) or [text](<url>) or [text](url "title") | ||||||
|  | LINK_RE = NOIMG + r'\[' | ||||||
|  |  | ||||||
|  | #  or  | ||||||
|  | IMAGE_LINK_RE = r'\!\[' | ||||||
|  |  | ||||||
|  | # [Google][3] | ||||||
|  | REFERENCE_RE = LINK_RE | ||||||
|  |  | ||||||
|  | # ![alt text][2] | ||||||
|  | IMAGE_REFERENCE_RE = IMAGE_LINK_RE | ||||||
|  |  | ||||||
|  | # stand-alone * or _ | ||||||
|  | NOT_STRONG_RE = r'((^|\s)(\*|_)(\s|$))' | ||||||
|  |  | ||||||
|  | # <http://www.123.com> | ||||||
|  | AUTOLINK_RE = r'<((?:[Ff]|[Hh][Tt])[Tt][Pp][Ss]?://[^<>]*)>' | ||||||
|  |  | ||||||
|  | # <me@example.com> | ||||||
|  | AUTOMAIL_RE = r'<([^<> !]+@[^@<> ]+)>' | ||||||
|  |  | ||||||
|  | # <...> | ||||||
|  | HTML_RE = r'(<(\/?[a-zA-Z][^<>@ ]*( [^<>]*)?|!--(?:(?!<!--|-->).)*--)>)' | ||||||
|  |  | ||||||
|  | # "&" (decimal) or "&" (hex) or "&" (named) | ||||||
|  | ENTITY_RE = r'(&(?:\#[0-9]+|\#x[0-9a-fA-F]+|[a-zA-Z0-9]+);)' | ||||||
|  |  | ||||||
|  | # two spaces at end of line | ||||||
|  | LINE_BREAK_RE = r'  \n' | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def dequote(string): | ||||||
|  |     """Remove quotes from around a string.""" | ||||||
|  |     if ((string.startswith('"') and string.endswith('"')) or | ||||||
|  |        (string.startswith("'") and string.endswith("'"))): | ||||||
|  |         return string[1:-1] | ||||||
|  |     else: | ||||||
|  |         return string | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class EmStrongItem(namedtuple('EmStrongItem', ['pattern', 'builder', 'tags'])): | ||||||
|  |     """Emphasis/strong pattern item.""" | ||||||
|  |  | ||||||
|  |  | ||||||
|  | """ | ||||||
|  | The pattern classes | ||||||
|  | ----------------------------------------------------------------------------- | ||||||
|  | """ | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class Pattern:  # pragma: no cover | ||||||
|  |     """Base class that inline patterns subclass. """ | ||||||
|  |  | ||||||
|  |     ANCESTOR_EXCLUDES = tuple() | ||||||
|  |  | ||||||
|  |     def __init__(self, pattern, md=None): | ||||||
|  |         """ | ||||||
|  |         Create an instant of an inline pattern. | ||||||
|  |  | ||||||
|  |         Keyword arguments: | ||||||
|  |  | ||||||
|  |         * pattern: A regular expression that matches a pattern | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         self.pattern = pattern | ||||||
|  |         self.compiled_re = re.compile(r"^(.*?)%s(.*)$" % pattern, | ||||||
|  |                                       re.DOTALL | re.UNICODE) | ||||||
|  |  | ||||||
|  |         self.md = md | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     @util.deprecated("Use 'md' instead.") | ||||||
|  |     def markdown(self): | ||||||
|  |         # TODO: remove this later | ||||||
|  |         return self.md | ||||||
|  |  | ||||||
|  |     def getCompiledRegExp(self): | ||||||
|  |         """ Return a compiled regular expression. """ | ||||||
|  |         return self.compiled_re | ||||||
|  |  | ||||||
|  |     def handleMatch(self, m): | ||||||
|  |         """Return a ElementTree element from the given match. | ||||||
|  |  | ||||||
|  |         Subclasses should override this method. | ||||||
|  |  | ||||||
|  |         Keyword arguments: | ||||||
|  |  | ||||||
|  |         * m: A re match object containing a match of the pattern. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         pass  # pragma: no cover | ||||||
|  |  | ||||||
|  |     def type(self): | ||||||
|  |         """ Return class name, to define pattern type """ | ||||||
|  |         return self.__class__.__name__ | ||||||
|  |  | ||||||
|  |     def unescape(self, text): | ||||||
|  |         """ Return unescaped text given text with an inline placeholder. """ | ||||||
|  |         try: | ||||||
|  |             stash = self.md.treeprocessors['inline'].stashed_nodes | ||||||
|  |         except KeyError:  # pragma: no cover | ||||||
|  |             return text | ||||||
|  |  | ||||||
|  |         def get_stash(m): | ||||||
|  |             id = m.group(1) | ||||||
|  |             if id in stash: | ||||||
|  |                 value = stash.get(id) | ||||||
|  |                 if isinstance(value, str): | ||||||
|  |                     return value | ||||||
|  |                 else: | ||||||
|  |                     # An etree Element - return text content only | ||||||
|  |                     return ''.join(value.itertext()) | ||||||
|  |         return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class InlineProcessor(Pattern): | ||||||
|  |     """ | ||||||
|  |     Base class that inline patterns subclass. | ||||||
|  |  | ||||||
|  |     This is the newer style inline processor that uses a more | ||||||
|  |     efficient and flexible search approach. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, pattern, md=None): | ||||||
|  |         """ | ||||||
|  |         Create an instant of an inline pattern. | ||||||
|  |  | ||||||
|  |         Keyword arguments: | ||||||
|  |  | ||||||
|  |         * pattern: A regular expression that matches a pattern | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         self.pattern = pattern | ||||||
|  |         self.compiled_re = re.compile(pattern, re.DOTALL | re.UNICODE) | ||||||
|  |  | ||||||
|  |         # Api for Markdown to pass safe_mode into instance | ||||||
|  |         self.safe_mode = False | ||||||
|  |         self.md = md | ||||||
|  |  | ||||||
|  |     def handleMatch(self, m, data): | ||||||
|  |         """Return a ElementTree element from the given match and the | ||||||
|  |         start and end index of the matched text. | ||||||
|  |  | ||||||
|  |         If `start` and/or `end` are returned as `None`, it will be | ||||||
|  |         assumed that the processor did not find a valid region of text. | ||||||
|  |  | ||||||
|  |         Subclasses should override this method. | ||||||
|  |  | ||||||
|  |         Keyword arguments: | ||||||
|  |  | ||||||
|  |         * m: A re match object containing a match of the pattern. | ||||||
|  |         * data: The buffer current under analysis | ||||||
|  |  | ||||||
|  |         Returns: | ||||||
|  |  | ||||||
|  |         * el: The ElementTree element, text or None. | ||||||
|  |         * start: The start of the region that has been matched or None. | ||||||
|  |         * end: The end of the region that has been matched or None. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         pass  # pragma: no cover | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class SimpleTextPattern(Pattern):  # pragma: no cover | ||||||
|  |     """ Return a simple text of group(2) of a Pattern. """ | ||||||
|  |     def handleMatch(self, m): | ||||||
|  |         return m.group(2) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class SimpleTextInlineProcessor(InlineProcessor): | ||||||
|  |     """ Return a simple text of group(1) of a Pattern. """ | ||||||
|  |     def handleMatch(self, m, data): | ||||||
|  |         return m.group(1), m.start(0), m.end(0) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class EscapeInlineProcessor(InlineProcessor): | ||||||
|  |     """ Return an escaped character. """ | ||||||
|  |  | ||||||
|  |     def handleMatch(self, m, data): | ||||||
|  |         char = m.group(1) | ||||||
|  |         if char in self.md.ESCAPED_CHARS: | ||||||
|  |             return '{}{}{}'.format(util.STX, ord(char), util.ETX), m.start(0), m.end(0) | ||||||
|  |         else: | ||||||
|  |             return None, m.start(0), m.end(0) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class SimpleTagPattern(Pattern):  # pragma: no cover | ||||||
|  |     """ | ||||||
|  |     Return element of type `tag` with a text attribute of group(3) | ||||||
|  |     of a Pattern. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |     def __init__(self, pattern, tag): | ||||||
|  |         Pattern.__init__(self, pattern) | ||||||
|  |         self.tag = tag | ||||||
|  |  | ||||||
|  |     def handleMatch(self, m): | ||||||
|  |         el = etree.Element(self.tag) | ||||||
|  |         el.text = m.group(3) | ||||||
|  |         return el | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class SimpleTagInlineProcessor(InlineProcessor): | ||||||
|  |     """ | ||||||
|  |     Return element of type `tag` with a text attribute of group(2) | ||||||
|  |     of a Pattern. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |     def __init__(self, pattern, tag): | ||||||
|  |         InlineProcessor.__init__(self, pattern) | ||||||
|  |         self.tag = tag | ||||||
|  |  | ||||||
|  |     def handleMatch(self, m, data):  # pragma: no cover | ||||||
|  |         el = etree.Element(self.tag) | ||||||
|  |         el.text = m.group(2) | ||||||
|  |         return el, m.start(0), m.end(0) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class SubstituteTagPattern(SimpleTagPattern):  # pragma: no cover | ||||||
|  |     """ Return an element of type `tag` with no children. """ | ||||||
|  |     def handleMatch(self, m): | ||||||
|  |         return etree.Element(self.tag) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class SubstituteTagInlineProcessor(SimpleTagInlineProcessor): | ||||||
|  |     """ Return an element of type `tag` with no children. """ | ||||||
|  |     def handleMatch(self, m, data): | ||||||
|  |         return etree.Element(self.tag), m.start(0), m.end(0) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class BacktickInlineProcessor(InlineProcessor): | ||||||
|  |     """ Return a `<code>` element containing the matching text. """ | ||||||
|  |     def __init__(self, pattern): | ||||||
|  |         InlineProcessor.__init__(self, pattern) | ||||||
|  |         self.ESCAPED_BSLASH = '{}{}{}'.format(util.STX, ord('\\'), util.ETX) | ||||||
|  |         self.tag = 'code' | ||||||
|  |  | ||||||
|  |     def handleMatch(self, m, data): | ||||||
|  |         if m.group(3): | ||||||
|  |             el = etree.Element(self.tag) | ||||||
|  |             el.text = util.AtomicString(util.code_escape(m.group(3).strip())) | ||||||
|  |             return el, m.start(0), m.end(0) | ||||||
|  |         else: | ||||||
|  |             return m.group(1).replace('\\\\', self.ESCAPED_BSLASH), m.start(0), m.end(0) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class DoubleTagPattern(SimpleTagPattern):  # pragma: no cover | ||||||
|  |     """Return a ElementTree element nested in tag2 nested in tag1. | ||||||
|  |  | ||||||
|  |     Useful for strong emphasis etc. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |     def handleMatch(self, m): | ||||||
|  |         tag1, tag2 = self.tag.split(",") | ||||||
|  |         el1 = etree.Element(tag1) | ||||||
|  |         el2 = etree.SubElement(el1, tag2) | ||||||
|  |         el2.text = m.group(3) | ||||||
|  |         if len(m.groups()) == 5: | ||||||
|  |             el2.tail = m.group(4) | ||||||
|  |         return el1 | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class DoubleTagInlineProcessor(SimpleTagInlineProcessor): | ||||||
|  |     """Return a ElementTree element nested in tag2 nested in tag1. | ||||||
|  |  | ||||||
|  |     Useful for strong emphasis etc. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |     def handleMatch(self, m, data):  # pragma: no cover | ||||||
|  |         tag1, tag2 = self.tag.split(",") | ||||||
|  |         el1 = etree.Element(tag1) | ||||||
|  |         el2 = etree.SubElement(el1, tag2) | ||||||
|  |         el2.text = m.group(2) | ||||||
|  |         if len(m.groups()) == 3: | ||||||
|  |             el2.tail = m.group(3) | ||||||
|  |         return el1, m.start(0), m.end(0) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class HtmlInlineProcessor(InlineProcessor): | ||||||
|  |     """ Store raw inline html and return a placeholder. """ | ||||||
|  |     def handleMatch(self, m, data): | ||||||
|  |         rawhtml = self.unescape(m.group(1)) | ||||||
|  |         place_holder = self.md.htmlStash.store(rawhtml) | ||||||
|  |         return place_holder, m.start(0), m.end(0) | ||||||
|  |  | ||||||
|  |     def unescape(self, text): | ||||||
|  |         """ Return unescaped text given text with an inline placeholder. """ | ||||||
|  |         try: | ||||||
|  |             stash = self.md.treeprocessors['inline'].stashed_nodes | ||||||
|  |         except KeyError:  # pragma: no cover | ||||||
|  |             return text | ||||||
|  |  | ||||||
|  |         def get_stash(m): | ||||||
|  |             id = m.group(1) | ||||||
|  |             value = stash.get(id) | ||||||
|  |             if value is not None: | ||||||
|  |                 try: | ||||||
|  |                     return self.md.serializer(value) | ||||||
|  |                 except Exception: | ||||||
|  |                     return r'\%s' % value | ||||||
|  |  | ||||||
|  |         return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class AsteriskProcessor(InlineProcessor): | ||||||
|  |     """Emphasis processor for handling strong and em matches inside asterisks.""" | ||||||
|  |  | ||||||
|  |     PATTERNS = [ | ||||||
|  |         EmStrongItem(re.compile(EM_STRONG_RE, re.DOTALL | re.UNICODE), 'double', 'strong,em'), | ||||||
|  |         EmStrongItem(re.compile(STRONG_EM_RE, re.DOTALL | re.UNICODE), 'double', 'em,strong'), | ||||||
|  |         EmStrongItem(re.compile(STRONG_EM3_RE, re.DOTALL | re.UNICODE), 'double2', 'strong,em'), | ||||||
|  |         EmStrongItem(re.compile(STRONG_RE, re.DOTALL | re.UNICODE), 'single', 'strong'), | ||||||
|  |         EmStrongItem(re.compile(EMPHASIS_RE, re.DOTALL | re.UNICODE), 'single', 'em') | ||||||
|  |     ] | ||||||
|  |  | ||||||
|  |     def build_single(self, m, tag, idx): | ||||||
|  |         """Return single tag.""" | ||||||
|  |         el1 = etree.Element(tag) | ||||||
|  |         text = m.group(2) | ||||||
|  |         self.parse_sub_patterns(text, el1, None, idx) | ||||||
|  |         return el1 | ||||||
|  |  | ||||||
|  |     def build_double(self, m, tags, idx): | ||||||
|  |         """Return double tag.""" | ||||||
|  |  | ||||||
|  |         tag1, tag2 = tags.split(",") | ||||||
|  |         el1 = etree.Element(tag1) | ||||||
|  |         el2 = etree.Element(tag2) | ||||||
|  |         text = m.group(2) | ||||||
|  |         self.parse_sub_patterns(text, el2, None, idx) | ||||||
|  |         el1.append(el2) | ||||||
|  |         if len(m.groups()) == 3: | ||||||
|  |             text = m.group(3) | ||||||
|  |             self.parse_sub_patterns(text, el1, el2, idx) | ||||||
|  |         return el1 | ||||||
|  |  | ||||||
|  |     def build_double2(self, m, tags, idx): | ||||||
|  |         """Return double tags (variant 2): `<strong>text <em>text</em></strong>`.""" | ||||||
|  |  | ||||||
|  |         tag1, tag2 = tags.split(",") | ||||||
|  |         el1 = etree.Element(tag1) | ||||||
|  |         el2 = etree.Element(tag2) | ||||||
|  |         text = m.group(2) | ||||||
|  |         self.parse_sub_patterns(text, el1, None, idx) | ||||||
|  |         text = m.group(3) | ||||||
|  |         el1.append(el2) | ||||||
|  |         self.parse_sub_patterns(text, el2, None, idx) | ||||||
|  |         return el1 | ||||||
|  |  | ||||||
|  |     def parse_sub_patterns(self, data, parent, last, idx): | ||||||
|  |         """ | ||||||
|  |         Parses sub patterns. | ||||||
|  |  | ||||||
|  |         `data` (`str`): | ||||||
|  |             text to evaluate. | ||||||
|  |  | ||||||
|  |         `parent` (`etree.Element`): | ||||||
|  |             Parent to attach text and sub elements to. | ||||||
|  |  | ||||||
|  |         `last` (`etree.Element`): | ||||||
|  |             Last appended child to parent. Can also be None if parent has no children. | ||||||
|  |  | ||||||
|  |         `idx` (`int`): | ||||||
|  |             Current pattern index that was used to evaluate the parent. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |         offset = 0 | ||||||
|  |         pos = 0 | ||||||
|  |  | ||||||
|  |         length = len(data) | ||||||
|  |         while pos < length: | ||||||
|  |             # Find the start of potential emphasis or strong tokens | ||||||
|  |             if self.compiled_re.match(data, pos): | ||||||
|  |                 matched = False | ||||||
|  |                 # See if the we can match an emphasis/strong pattern | ||||||
|  |                 for index, item in enumerate(self.PATTERNS): | ||||||
|  |                     # Only evaluate patterns that are after what was used on the parent | ||||||
|  |                     if index <= idx: | ||||||
|  |                         continue | ||||||
|  |                     m = item.pattern.match(data, pos) | ||||||
|  |                     if m: | ||||||
|  |                         # Append child nodes to parent | ||||||
|  |                         # Text nodes should be appended to the last | ||||||
|  |                         # child if present, and if not, it should | ||||||
|  |                         # be added as the parent's text node. | ||||||
|  |                         text = data[offset:m.start(0)] | ||||||
|  |                         if text: | ||||||
|  |                             if last is not None: | ||||||
|  |                                 last.tail = text | ||||||
|  |                             else: | ||||||
|  |                                 parent.text = text | ||||||
|  |                         el = self.build_element(m, item.builder, item.tags, index) | ||||||
|  |                         parent.append(el) | ||||||
|  |                         last = el | ||||||
|  |                         # Move our position past the matched hunk | ||||||
|  |                         offset = pos = m.end(0) | ||||||
|  |                         matched = True | ||||||
|  |                 if not matched: | ||||||
|  |                     # We matched nothing, move on to the next character | ||||||
|  |                     pos += 1 | ||||||
|  |             else: | ||||||
|  |                 # Increment position as no potential emphasis start was found. | ||||||
|  |                 pos += 1 | ||||||
|  |  | ||||||
|  |         # Append any leftover text as a text node. | ||||||
|  |         text = data[offset:] | ||||||
|  |         if text: | ||||||
|  |             if last is not None: | ||||||
|  |                 last.tail = text | ||||||
|  |             else: | ||||||
|  |                 parent.text = text | ||||||
|  |  | ||||||
|  |     def build_element(self, m, builder, tags, index): | ||||||
|  |         """Element builder.""" | ||||||
|  |  | ||||||
|  |         if builder == 'double2': | ||||||
|  |             return self.build_double2(m, tags, index) | ||||||
|  |         elif builder == 'double': | ||||||
|  |             return self.build_double(m, tags, index) | ||||||
|  |         else: | ||||||
|  |             return self.build_single(m, tags, index) | ||||||
|  |  | ||||||
|  |     def handleMatch(self, m, data): | ||||||
|  |         """Parse patterns.""" | ||||||
|  |  | ||||||
|  |         el = None | ||||||
|  |         start = None | ||||||
|  |         end = None | ||||||
|  |  | ||||||
|  |         for index, item in enumerate(self.PATTERNS): | ||||||
|  |             m1 = item.pattern.match(data, m.start(0)) | ||||||
|  |             if m1: | ||||||
|  |                 start = m1.start(0) | ||||||
|  |                 end = m1.end(0) | ||||||
|  |                 el = self.build_element(m1, item.builder, item.tags, index) | ||||||
|  |                 break | ||||||
|  |         return el, start, end | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class UnderscoreProcessor(AsteriskProcessor): | ||||||
|  |     """Emphasis processor for handling strong and em matches inside underscores.""" | ||||||
|  |  | ||||||
|  |     PATTERNS = [ | ||||||
|  |         EmStrongItem(re.compile(EM_STRONG2_RE, re.DOTALL | re.UNICODE), 'double', 'strong,em'), | ||||||
|  |         EmStrongItem(re.compile(STRONG_EM2_RE, re.DOTALL | re.UNICODE), 'double', 'em,strong'), | ||||||
|  |         EmStrongItem(re.compile(SMART_STRONG_EM_RE, re.DOTALL | re.UNICODE), 'double2', 'strong,em'), | ||||||
|  |         EmStrongItem(re.compile(SMART_STRONG_RE, re.DOTALL | re.UNICODE), 'single', 'strong'), | ||||||
|  |         EmStrongItem(re.compile(SMART_EMPHASIS_RE, re.DOTALL | re.UNICODE), 'single', 'em') | ||||||
|  |     ] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class LinkInlineProcessor(InlineProcessor): | ||||||
|  |     """ Return a link element from the given match. """ | ||||||
|  |     RE_LINK = re.compile(r'''\(\s*(?:(<[^<>]*>)\s*(?:('[^']*'|"[^"]*")\s*)?\))?''', re.DOTALL | re.UNICODE) | ||||||
|  |     RE_TITLE_CLEAN = re.compile(r'\s') | ||||||
|  |  | ||||||
|  |     def handleMatch(self, m, data): | ||||||
|  |         text, index, handled = self.getText(data, m.end(0)) | ||||||
|  |  | ||||||
|  |         if not handled: | ||||||
|  |             return None, None, None | ||||||
|  |  | ||||||
|  |         href, title, index, handled = self.getLink(data, index) | ||||||
|  |         if not handled: | ||||||
|  |             return None, None, None | ||||||
|  |  | ||||||
|  |         el = etree.Element("a") | ||||||
|  |         el.text = text | ||||||
|  |  | ||||||
|  |         el.set("href", href) | ||||||
|  |  | ||||||
|  |         if title is not None: | ||||||
|  |             el.set("title", title) | ||||||
|  |  | ||||||
|  |         return el, m.start(0), index | ||||||
|  |  | ||||||
|  |     def getLink(self, data, index): | ||||||
|  |         """Parse data between `()` of `[Text]()` allowing recursive `()`. """ | ||||||
|  |  | ||||||
|  |         href = '' | ||||||
|  |         title = None | ||||||
|  |         handled = False | ||||||
|  |  | ||||||
|  |         m = self.RE_LINK.match(data, pos=index) | ||||||
|  |         if m and m.group(1): | ||||||
|  |             # Matches [Text](<link> "title") | ||||||
|  |             href = m.group(1)[1:-1].strip() | ||||||
|  |             if m.group(2): | ||||||
|  |                 title = m.group(2)[1:-1] | ||||||
|  |             index = m.end(0) | ||||||
|  |             handled = True | ||||||
|  |         elif m: | ||||||
|  |             # Track bracket nesting and index in string | ||||||
|  |             bracket_count = 1 | ||||||
|  |             backtrack_count = 1 | ||||||
|  |             start_index = m.end() | ||||||
|  |             index = start_index | ||||||
|  |             last_bracket = -1 | ||||||
|  |  | ||||||
|  |             # Primary (first found) quote tracking. | ||||||
|  |             quote = None | ||||||
|  |             start_quote = -1 | ||||||
|  |             exit_quote = -1 | ||||||
|  |             ignore_matches = False | ||||||
|  |  | ||||||
|  |             # Secondary (second found) quote tracking. | ||||||
|  |             alt_quote = None | ||||||
|  |             start_alt_quote = -1 | ||||||
|  |             exit_alt_quote = -1 | ||||||
|  |  | ||||||
|  |             # Track last character | ||||||
|  |             last = '' | ||||||
|  |  | ||||||
|  |             for pos in range(index, len(data)): | ||||||
|  |                 c = data[pos] | ||||||
|  |                 if c == '(': | ||||||
|  |                     # Count nested ( | ||||||
|  |                     # Don't increment the bracket count if we are sure we're in a title. | ||||||
|  |                     if not ignore_matches: | ||||||
|  |                         bracket_count += 1 | ||||||
|  |                     elif backtrack_count > 0: | ||||||
|  |                         backtrack_count -= 1 | ||||||
|  |                 elif c == ')': | ||||||
|  |                     # Match nested ) to ( | ||||||
|  |                     # Don't decrement if we are sure we are in a title that is unclosed. | ||||||
|  |                     if ((exit_quote != -1 and quote == last) or (exit_alt_quote != -1 and alt_quote == last)): | ||||||
|  |                         bracket_count = 0 | ||||||
|  |                     elif not ignore_matches: | ||||||
|  |                         bracket_count -= 1 | ||||||
|  |                     elif backtrack_count > 0: | ||||||
|  |                         backtrack_count -= 1 | ||||||
|  |                         # We've found our backup end location if the title doesn't resolve. | ||||||
|  |                         if backtrack_count == 0: | ||||||
|  |                             last_bracket = index + 1 | ||||||
|  |  | ||||||
|  |                 elif c in ("'", '"'): | ||||||
|  |                     # Quote has started | ||||||
|  |                     if not quote: | ||||||
|  |                         # We'll assume we are now in a title. | ||||||
|  |                         # Brackets are quoted, so no need to match them (except for the final one). | ||||||
|  |                         ignore_matches = True | ||||||
|  |                         backtrack_count = bracket_count | ||||||
|  |                         bracket_count = 1 | ||||||
|  |                         start_quote = index + 1 | ||||||
|  |                         quote = c | ||||||
|  |                     # Secondary quote (in case the first doesn't resolve): [text](link'"title") | ||||||
|  |                     elif c != quote and not alt_quote: | ||||||
|  |                         start_alt_quote = index + 1 | ||||||
|  |                         alt_quote = c | ||||||
|  |                     # Update primary quote match | ||||||
|  |                     elif c == quote: | ||||||
|  |                         exit_quote = index + 1 | ||||||
|  |                     # Update secondary quote match | ||||||
|  |                     elif alt_quote and c == alt_quote: | ||||||
|  |                         exit_alt_quote = index + 1 | ||||||
|  |  | ||||||
|  |                 index += 1 | ||||||
|  |  | ||||||
|  |                 # Link is closed, so let's break out of the loop | ||||||
|  |                 if bracket_count == 0: | ||||||
|  |                     # Get the title if we closed a title string right before link closed | ||||||
|  |                     if exit_quote >= 0 and quote == last: | ||||||
|  |                         href = data[start_index:start_quote - 1] | ||||||
|  |                         title = ''.join(data[start_quote:exit_quote - 1]) | ||||||
|  |                     elif exit_alt_quote >= 0 and alt_quote == last: | ||||||
|  |                         href = data[start_index:start_alt_quote - 1] | ||||||
|  |                         title = ''.join(data[start_alt_quote:exit_alt_quote - 1]) | ||||||
|  |                     else: | ||||||
|  |                         href = data[start_index:index - 1] | ||||||
|  |                     break | ||||||
|  |  | ||||||
|  |                 if c != ' ': | ||||||
|  |                     last = c | ||||||
|  |  | ||||||
|  |             # We have a scenario: [test](link"notitle) | ||||||
|  |             # When we enter a string, we stop tracking bracket resolution in the main counter, | ||||||
|  |             # but we do keep a backup counter up until we discover where we might resolve all brackets | ||||||
|  |             # if the title string fails to resolve. | ||||||
|  |             if bracket_count != 0 and backtrack_count == 0: | ||||||
|  |                 href = data[start_index:last_bracket - 1] | ||||||
|  |                 index = last_bracket | ||||||
|  |                 bracket_count = 0 | ||||||
|  |  | ||||||
|  |             handled = bracket_count == 0 | ||||||
|  |  | ||||||
|  |         if title is not None: | ||||||
|  |             title = self.RE_TITLE_CLEAN.sub(' ', dequote(self.unescape(title.strip()))) | ||||||
|  |  | ||||||
|  |         href = self.unescape(href).strip() | ||||||
|  |  | ||||||
|  |         return href, title, index, handled | ||||||
|  |  | ||||||
|  |     def getText(self, data, index): | ||||||
|  |         """Parse the content between `[]` of the start of an image or link | ||||||
|  |         resolving nested square brackets. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         bracket_count = 1 | ||||||
|  |         text = [] | ||||||
|  |         for pos in range(index, len(data)): | ||||||
|  |             c = data[pos] | ||||||
|  |             if c == ']': | ||||||
|  |                 bracket_count -= 1 | ||||||
|  |             elif c == '[': | ||||||
|  |                 bracket_count += 1 | ||||||
|  |             index += 1 | ||||||
|  |             if bracket_count == 0: | ||||||
|  |                 break | ||||||
|  |             text.append(c) | ||||||
|  |         return ''.join(text), index, bracket_count == 0 | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class ImageInlineProcessor(LinkInlineProcessor): | ||||||
|  |     """ Return a img element from the given match. """ | ||||||
|  |  | ||||||
|  |     def handleMatch(self, m, data): | ||||||
|  |         text, index, handled = self.getText(data, m.end(0)) | ||||||
|  |         if not handled: | ||||||
|  |             return None, None, None | ||||||
|  |  | ||||||
|  |         src, title, index, handled = self.getLink(data, index) | ||||||
|  |         if not handled: | ||||||
|  |             return None, None, None | ||||||
|  |  | ||||||
|  |         el = etree.Element("img") | ||||||
|  |  | ||||||
|  |         el.set("src", src) | ||||||
|  |  | ||||||
|  |         if title is not None: | ||||||
|  |             el.set("title", title) | ||||||
|  |  | ||||||
|  |         el.set('alt', self.unescape(text)) | ||||||
|  |         return el, m.start(0), index | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class ReferenceInlineProcessor(LinkInlineProcessor): | ||||||
|  |     """ Match to a stored reference and return link element. """ | ||||||
|  |     NEWLINE_CLEANUP_RE = re.compile(r'\s+', re.MULTILINE) | ||||||
|  |  | ||||||
|  |     RE_LINK = re.compile(r'\s?\[([^\]]*)\]', re.DOTALL | re.UNICODE) | ||||||
|  |  | ||||||
|  |     def handleMatch(self, m, data): | ||||||
|  |         text, index, handled = self.getText(data, m.end(0)) | ||||||
|  |         if not handled: | ||||||
|  |             return None, None, None | ||||||
|  |  | ||||||
|  |         id, end, handled = self.evalId(data, index, text) | ||||||
|  |         if not handled: | ||||||
|  |             return None, None, None | ||||||
|  |  | ||||||
|  |         # Clean up linebreaks in id | ||||||
|  |         id = self.NEWLINE_CLEANUP_RE.sub(' ', id) | ||||||
|  |         if id not in self.md.references:  # ignore undefined refs | ||||||
|  |             return None, m.start(0), end | ||||||
|  |  | ||||||
|  |         href, title = self.md.references[id] | ||||||
|  |  | ||||||
|  |         return self.makeTag(href, title, text), m.start(0), end | ||||||
|  |  | ||||||
|  |     def evalId(self, data, index, text): | ||||||
|  |         """ | ||||||
|  |         Evaluate the id portion of [ref][id]. | ||||||
|  |  | ||||||
|  |         If [ref][] use [ref]. | ||||||
|  |         """ | ||||||
|  |         m = self.RE_LINK.match(data, pos=index) | ||||||
|  |         if not m: | ||||||
|  |             return None, index, False | ||||||
|  |         else: | ||||||
|  |             id = m.group(1).lower() | ||||||
|  |             end = m.end(0) | ||||||
|  |             if not id: | ||||||
|  |                 id = text.lower() | ||||||
|  |         return id, end, True | ||||||
|  |  | ||||||
|  |     def makeTag(self, href, title, text): | ||||||
|  |         el = etree.Element('a') | ||||||
|  |  | ||||||
|  |         el.set('href', href) | ||||||
|  |         if title: | ||||||
|  |             el.set('title', title) | ||||||
|  |  | ||||||
|  |         el.text = text | ||||||
|  |         return el | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class ShortReferenceInlineProcessor(ReferenceInlineProcessor): | ||||||
|  |     """Short form of reference: [google]. """ | ||||||
|  |     def evalId(self, data, index, text): | ||||||
|  |         """Evaluate the id from of [ref]  """ | ||||||
|  |  | ||||||
|  |         return text.lower(), index, True | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class ImageReferenceInlineProcessor(ReferenceInlineProcessor): | ||||||
|  |     """ Match to a stored reference and return img element. """ | ||||||
|  |     def makeTag(self, href, title, text): | ||||||
|  |         el = etree.Element("img") | ||||||
|  |         el.set("src", href) | ||||||
|  |         if title: | ||||||
|  |             el.set("title", title) | ||||||
|  |         el.set("alt", self.unescape(text)) | ||||||
|  |         return el | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class ShortImageReferenceInlineProcessor(ImageReferenceInlineProcessor): | ||||||
|  |     """ Short form of inage reference: ![ref]. """ | ||||||
|  |     def evalId(self, data, index, text): | ||||||
|  |         """Evaluate the id from of [ref]  """ | ||||||
|  |  | ||||||
|  |         return text.lower(), index, True | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class AutolinkInlineProcessor(InlineProcessor): | ||||||
|  |     """ Return a link Element given an autolink (`<http://example/com>`). """ | ||||||
|  |     def handleMatch(self, m, data): | ||||||
|  |         el = etree.Element("a") | ||||||
|  |         el.set('href', self.unescape(m.group(1))) | ||||||
|  |         el.text = util.AtomicString(m.group(1)) | ||||||
|  |         return el, m.start(0), m.end(0) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class AutomailInlineProcessor(InlineProcessor): | ||||||
|  |     """ | ||||||
|  |     Return a mailto link Element given an automail link (`<foo@example.com>`). | ||||||
|  |     """ | ||||||
|  |     def handleMatch(self, m, data): | ||||||
|  |         el = etree.Element('a') | ||||||
|  |         email = self.unescape(m.group(1)) | ||||||
|  |         if email.startswith("mailto:"): | ||||||
|  |             email = email[len("mailto:"):] | ||||||
|  |  | ||||||
|  |         def codepoint2name(code): | ||||||
|  |             """Return entity definition by code, or the code if not defined.""" | ||||||
|  |             entity = entities.codepoint2name.get(code) | ||||||
|  |             if entity: | ||||||
|  |                 return "{}{};".format(util.AMP_SUBSTITUTE, entity) | ||||||
|  |             else: | ||||||
|  |                 return "%s#%d;" % (util.AMP_SUBSTITUTE, code) | ||||||
|  |  | ||||||
|  |         letters = [codepoint2name(ord(letter)) for letter in email] | ||||||
|  |         el.text = util.AtomicString(''.join(letters)) | ||||||
|  |  | ||||||
|  |         mailto = "mailto:" + email | ||||||
|  |         mailto = "".join([util.AMP_SUBSTITUTE + '#%d;' % | ||||||
|  |                           ord(letter) for letter in mailto]) | ||||||
|  |         el.set('href', mailto) | ||||||
|  |         return el, m.start(0), m.end(0) | ||||||
							
								
								
									
										245
									
								
								Source/Libs/markdown/pep562.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										245
									
								
								Source/Libs/markdown/pep562.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,245 @@ | |||||||
|  | """ | ||||||
|  | Backport of PEP 562. | ||||||
|  |  | ||||||
|  | https://pypi.org/search/?q=pep562 | ||||||
|  |  | ||||||
|  | Licensed under MIT | ||||||
|  | Copyright (c) 2018 Isaac Muse <isaacmuse@gmail.com> | ||||||
|  |  | ||||||
|  | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated | ||||||
|  | documentation files (the "Software"), to deal in the Software without restriction, including without limitation | ||||||
|  | the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, | ||||||
|  | and to permit persons to whom the Software is furnished to do so, subject to the following conditions: | ||||||
|  |  | ||||||
|  | The above copyright notice and this permission notice shall be included in all copies or substantial portions | ||||||
|  | of the Software. | ||||||
|  |  | ||||||
|  | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED | ||||||
|  | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||||||
|  | THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF | ||||||
|  | CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||||||
|  | IN THE SOFTWARE. | ||||||
|  | """ | ||||||
|  | import sys | ||||||
|  | from collections import namedtuple | ||||||
|  | import re | ||||||
|  |  | ||||||
|  | __all__ = ('Pep562',) | ||||||
|  |  | ||||||
|  | RE_VER = re.compile( | ||||||
|  |     r'''(?x) | ||||||
|  |     (?P<major>\d+)(?:\.(?P<minor>\d+))?(?:\.(?P<micro>\d+))? | ||||||
|  |     (?:(?P<type>a|b|rc)(?P<pre>\d+))? | ||||||
|  |     (?:\.post(?P<post>\d+))? | ||||||
|  |     (?:\.dev(?P<dev>\d+))? | ||||||
|  |     ''' | ||||||
|  | ) | ||||||
|  |  | ||||||
|  | REL_MAP = { | ||||||
|  |     ".dev": "", | ||||||
|  |     ".dev-alpha": "a", | ||||||
|  |     ".dev-beta": "b", | ||||||
|  |     ".dev-candidate": "rc", | ||||||
|  |     "alpha": "a", | ||||||
|  |     "beta": "b", | ||||||
|  |     "candidate": "rc", | ||||||
|  |     "final": "" | ||||||
|  | } | ||||||
|  |  | ||||||
|  | DEV_STATUS = { | ||||||
|  |     ".dev": "2 - Pre-Alpha", | ||||||
|  |     ".dev-alpha": "2 - Pre-Alpha", | ||||||
|  |     ".dev-beta": "2 - Pre-Alpha", | ||||||
|  |     ".dev-candidate": "2 - Pre-Alpha", | ||||||
|  |     "alpha": "3 - Alpha", | ||||||
|  |     "beta": "4 - Beta", | ||||||
|  |     "candidate": "4 - Beta", | ||||||
|  |     "final": "5 - Production/Stable" | ||||||
|  | } | ||||||
|  |  | ||||||
|  | PRE_REL_MAP = {"a": 'alpha', "b": 'beta', "rc": 'candidate'} | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class Version(namedtuple("Version", ["major", "minor", "micro", "release", "pre", "post", "dev"])): | ||||||
|  |     """ | ||||||
|  |     Get the version (PEP 440). | ||||||
|  |  | ||||||
|  |     A biased approach to the PEP 440 semantic version. | ||||||
|  |  | ||||||
|  |     Provides a tuple structure which is sorted for comparisons `v1 > v2` etc. | ||||||
|  |       (major, minor, micro, release type, pre-release build, post-release build, development release build) | ||||||
|  |     Release types are named in is such a way they are comparable with ease. | ||||||
|  |     Accessors to check if a development, pre-release, or post-release build. Also provides accessor to get | ||||||
|  |     development status for setup files. | ||||||
|  |  | ||||||
|  |     How it works (currently): | ||||||
|  |  | ||||||
|  |     - You must specify a release type as either `final`, `alpha`, `beta`, or `candidate`. | ||||||
|  |     - To define a development release, you can use either `.dev`, `.dev-alpha`, `.dev-beta`, or `.dev-candidate`. | ||||||
|  |       The dot is used to ensure all development specifiers are sorted before `alpha`. | ||||||
|  |       You can specify a `dev` number for development builds, but do not have to as implicit development releases | ||||||
|  |       are allowed. | ||||||
|  |     - You must specify a `pre` value greater than zero if using a prerelease as this project (not PEP 440) does not | ||||||
|  |       allow implicit prereleases. | ||||||
|  |     - You can optionally set `post` to a value greater than zero to make the build a post release. While post releases | ||||||
|  |       are technically allowed in prereleases, it is strongly discouraged, so we are rejecting them. It should be | ||||||
|  |       noted that we do not allow `post0` even though PEP 440 does not restrict this. This project specifically | ||||||
|  |       does not allow implicit post releases. | ||||||
|  |     - It should be noted that we do not support epochs `1!` or local versions `+some-custom.version-1`. | ||||||
|  |  | ||||||
|  |     Acceptable version releases: | ||||||
|  |  | ||||||
|  |     ``` | ||||||
|  |     Version(1, 0, 0, "final")                    1.0 | ||||||
|  |     Version(1, 2, 0, "final")                    1.2 | ||||||
|  |     Version(1, 2, 3, "final")                    1.2.3 | ||||||
|  |     Version(1, 2, 0, ".dev-alpha", pre=4)        1.2a4 | ||||||
|  |     Version(1, 2, 0, ".dev-beta", pre=4)         1.2b4 | ||||||
|  |     Version(1, 2, 0, ".dev-candidate", pre=4)    1.2rc4 | ||||||
|  |     Version(1, 2, 0, "final", post=1)            1.2.post1 | ||||||
|  |     Version(1, 2, 3, ".dev")                     1.2.3.dev0 | ||||||
|  |     Version(1, 2, 3, ".dev", dev=1)              1.2.3.dev1 | ||||||
|  |     ``` | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __new__(cls, major, minor, micro, release="final", pre=0, post=0, dev=0): | ||||||
|  |         """Validate version info.""" | ||||||
|  |  | ||||||
|  |         # Ensure all parts are positive integers. | ||||||
|  |         for value in (major, minor, micro, pre, post): | ||||||
|  |             if not (isinstance(value, int) and value >= 0): | ||||||
|  |                 raise ValueError("All version parts except 'release' should be integers.") | ||||||
|  |  | ||||||
|  |         if release not in REL_MAP: | ||||||
|  |             raise ValueError("'{}' is not a valid release type.".format(release)) | ||||||
|  |  | ||||||
|  |         # Ensure valid pre-release (we do not allow implicit pre-releases). | ||||||
|  |         if ".dev-candidate" < release < "final": | ||||||
|  |             if pre == 0: | ||||||
|  |                 raise ValueError("Implicit pre-releases not allowed.") | ||||||
|  |             elif dev: | ||||||
|  |                 raise ValueError("Version is not a development release.") | ||||||
|  |             elif post: | ||||||
|  |                 raise ValueError("Post-releases are not allowed with pre-releases.") | ||||||
|  |  | ||||||
|  |         # Ensure valid development or development/pre release | ||||||
|  |         elif release < "alpha": | ||||||
|  |             if release > ".dev" and pre == 0: | ||||||
|  |                 raise ValueError("Implicit pre-release not allowed.") | ||||||
|  |             elif post: | ||||||
|  |                 raise ValueError("Post-releases are not allowed with pre-releases.") | ||||||
|  |  | ||||||
|  |         # Ensure a valid normal release | ||||||
|  |         else: | ||||||
|  |             if pre: | ||||||
|  |                 raise ValueError("Version is not a pre-release.") | ||||||
|  |             elif dev: | ||||||
|  |                 raise ValueError("Version is not a development release.") | ||||||
|  |  | ||||||
|  |         return super().__new__(cls, major, minor, micro, release, pre, post, dev) | ||||||
|  |  | ||||||
|  |     def _is_pre(self): | ||||||
|  |         """Is prerelease.""" | ||||||
|  |  | ||||||
|  |         return self.pre > 0 | ||||||
|  |  | ||||||
|  |     def _is_dev(self): | ||||||
|  |         """Is development.""" | ||||||
|  |  | ||||||
|  |         return bool(self.release < "alpha") | ||||||
|  |  | ||||||
|  |     def _is_post(self): | ||||||
|  |         """Is post.""" | ||||||
|  |  | ||||||
|  |         return self.post > 0 | ||||||
|  |  | ||||||
|  |     def _get_dev_status(self):  # pragma: no cover | ||||||
|  |         """Get development status string.""" | ||||||
|  |  | ||||||
|  |         return DEV_STATUS[self.release] | ||||||
|  |  | ||||||
|  |     def _get_canonical(self): | ||||||
|  |         """Get the canonical output string.""" | ||||||
|  |  | ||||||
|  |         # Assemble major, minor, micro version and append `pre`, `post`, or `dev` if needed.. | ||||||
|  |         if self.micro == 0: | ||||||
|  |             ver = "{}.{}".format(self.major, self.minor) | ||||||
|  |         else: | ||||||
|  |             ver = "{}.{}.{}".format(self.major, self.minor, self.micro) | ||||||
|  |         if self._is_pre(): | ||||||
|  |             ver += '{}{}'.format(REL_MAP[self.release], self.pre) | ||||||
|  |         if self._is_post(): | ||||||
|  |             ver += ".post{}".format(self.post) | ||||||
|  |         if self._is_dev(): | ||||||
|  |             ver += ".dev{}".format(self.dev) | ||||||
|  |  | ||||||
|  |         return ver | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def parse_version(ver, pre=False): | ||||||
|  |     """Parse version into a comparable Version tuple.""" | ||||||
|  |  | ||||||
|  |     m = RE_VER.match(ver) | ||||||
|  |  | ||||||
|  |     # Handle major, minor, micro | ||||||
|  |     major = int(m.group('major')) | ||||||
|  |     minor = int(m.group('minor')) if m.group('minor') else 0 | ||||||
|  |     micro = int(m.group('micro')) if m.group('micro') else 0 | ||||||
|  |  | ||||||
|  |     # Handle pre releases | ||||||
|  |     if m.group('type'): | ||||||
|  |         release = PRE_REL_MAP[m.group('type')] | ||||||
|  |         pre = int(m.group('pre')) | ||||||
|  |     else: | ||||||
|  |         release = "final" | ||||||
|  |         pre = 0 | ||||||
|  |  | ||||||
|  |     # Handle development releases | ||||||
|  |     dev = m.group('dev') if m.group('dev') else 0 | ||||||
|  |     if m.group('dev'): | ||||||
|  |         dev = int(m.group('dev')) | ||||||
|  |         release = '.dev-' + release if pre else '.dev' | ||||||
|  |     else: | ||||||
|  |         dev = 0 | ||||||
|  |  | ||||||
|  |     # Handle post | ||||||
|  |     post = int(m.group('post')) if m.group('post') else 0 | ||||||
|  |  | ||||||
|  |     return Version(major, minor, micro, release, pre, post, dev) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class Pep562: | ||||||
|  |     """ | ||||||
|  |     Backport of PEP 562 <https://pypi.org/search/?q=pep562>. | ||||||
|  |  | ||||||
|  |     Wraps the module in a class that exposes the mechanics to override `__dir__` and `__getattr__`. | ||||||
|  |     The given module will be searched for overrides of `__dir__` and `__getattr__` and use them when needed. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, name): | ||||||
|  |         """Acquire `__getattr__` and `__dir__`, but only replace module for versions less than Python 3.7.""" | ||||||
|  |  | ||||||
|  |         self._module = sys.modules[name] | ||||||
|  |         self._get_attr = getattr(self._module, '__getattr__', None) | ||||||
|  |         self._get_dir = getattr(self._module, '__dir__', None) | ||||||
|  |         sys.modules[name] = self | ||||||
|  |  | ||||||
|  |     def __dir__(self): | ||||||
|  |         """Return the overridden `dir` if one was provided, else apply `dir` to the module.""" | ||||||
|  |  | ||||||
|  |         return self._get_dir() if self._get_dir else dir(self._module) | ||||||
|  |  | ||||||
|  |     def __getattr__(self, name): | ||||||
|  |         """Attempt to retrieve the attribute from the module, and if missing, use the overridden function if present.""" | ||||||
|  |  | ||||||
|  |         try: | ||||||
|  |             return getattr(self._module, name) | ||||||
|  |         except AttributeError: | ||||||
|  |             if self._get_attr: | ||||||
|  |                 return self._get_attr(name) | ||||||
|  |             raise | ||||||
|  |  | ||||||
|  |  | ||||||
|  | __version_info__ = Version(1, 0, 0, "final") | ||||||
|  | __version__ = __version_info__._get_canonical() | ||||||
							
								
								
									
										134
									
								
								Source/Libs/markdown/postprocessors.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										134
									
								
								Source/Libs/markdown/postprocessors.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,134 @@ | |||||||
|  | """ | ||||||
|  | Python Markdown | ||||||
|  |  | ||||||
|  | A Python implementation of John Gruber's Markdown. | ||||||
|  |  | ||||||
|  | Documentation: https://python-markdown.github.io/ | ||||||
|  | GitHub: https://github.com/Python-Markdown/markdown/ | ||||||
|  | PyPI: https://pypi.org/project/Markdown/ | ||||||
|  |  | ||||||
|  | Started by Manfred Stienstra (http://www.dwerg.net/). | ||||||
|  | Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). | ||||||
|  | Currently maintained by Waylan Limberg (https://github.com/waylan), | ||||||
|  | Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). | ||||||
|  |  | ||||||
|  | Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later) | ||||||
|  | Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) | ||||||
|  | Copyright 2004 Manfred Stienstra (the original version) | ||||||
|  |  | ||||||
|  | License: BSD (see LICENSE.md for details). | ||||||
|  |  | ||||||
|  | POST-PROCESSORS | ||||||
|  | ============================================================================= | ||||||
|  |  | ||||||
|  | Markdown also allows post-processors, which are similar to preprocessors in | ||||||
|  | that they need to implement a "run" method. However, they are run after core | ||||||
|  | processing. | ||||||
|  |  | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | from collections import OrderedDict | ||||||
|  | from . import util | ||||||
|  | import re | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def build_postprocessors(md, **kwargs): | ||||||
|  |     """ Build the default postprocessors for Markdown. """ | ||||||
|  |     postprocessors = util.Registry() | ||||||
|  |     postprocessors.register(RawHtmlPostprocessor(md), 'raw_html', 30) | ||||||
|  |     postprocessors.register(AndSubstitutePostprocessor(), 'amp_substitute', 20) | ||||||
|  |     postprocessors.register(UnescapePostprocessor(), 'unescape', 10) | ||||||
|  |     return postprocessors | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class Postprocessor(util.Processor): | ||||||
|  |     """ | ||||||
|  |     Postprocessors are run after the ElementTree it converted back into text. | ||||||
|  |  | ||||||
|  |     Each Postprocessor implements a "run" method that takes a pointer to a | ||||||
|  |     text string, modifies it as necessary and returns a text string. | ||||||
|  |  | ||||||
|  |     Postprocessors must extend markdown.Postprocessor. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def run(self, text): | ||||||
|  |         """ | ||||||
|  |         Subclasses of Postprocessor should implement a `run` method, which | ||||||
|  |         takes the html document as a single text string and returns a | ||||||
|  |         (possibly modified) string. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         pass  # pragma: no cover | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class RawHtmlPostprocessor(Postprocessor): | ||||||
|  |     """ Restore raw html to the document. """ | ||||||
|  |  | ||||||
|  |     BLOCK_LEVEL_REGEX = re.compile(r'^\<\/?([^ >]+)') | ||||||
|  |  | ||||||
|  |     def run(self, text): | ||||||
|  |         """ Iterate over html stash and restore html. """ | ||||||
|  |         replacements = OrderedDict() | ||||||
|  |         for i in range(self.md.htmlStash.html_counter): | ||||||
|  |             html = self.stash_to_string(self.md.htmlStash.rawHtmlBlocks[i]) | ||||||
|  |             if self.isblocklevel(html): | ||||||
|  |                 replacements["<p>{}</p>".format( | ||||||
|  |                     self.md.htmlStash.get_placeholder(i))] = html | ||||||
|  |             replacements[self.md.htmlStash.get_placeholder(i)] = html | ||||||
|  |  | ||||||
|  |         def substitute_match(m): | ||||||
|  |             key = m.group(0) | ||||||
|  |  | ||||||
|  |             if key not in replacements: | ||||||
|  |                 if key[3:-4] in replacements: | ||||||
|  |                     return f'<p>{ replacements[key[3:-4]] }</p>' | ||||||
|  |                 else: | ||||||
|  |                     return key | ||||||
|  |  | ||||||
|  |             return replacements[key] | ||||||
|  |  | ||||||
|  |         if replacements: | ||||||
|  |             base_placeholder = util.HTML_PLACEHOLDER % r'([0-9]+)' | ||||||
|  |             pattern = re.compile(f'<p>{ base_placeholder }</p>|{ base_placeholder }') | ||||||
|  |             processed_text = pattern.sub(substitute_match, text) | ||||||
|  |         else: | ||||||
|  |             return text | ||||||
|  |  | ||||||
|  |         if processed_text == text: | ||||||
|  |             return processed_text | ||||||
|  |         else: | ||||||
|  |             return self.run(processed_text) | ||||||
|  |  | ||||||
|  |     def isblocklevel(self, html): | ||||||
|  |         m = self.BLOCK_LEVEL_REGEX.match(html) | ||||||
|  |         if m: | ||||||
|  |             if m.group(1)[0] in ('!', '?', '@', '%'): | ||||||
|  |                 # Comment, php etc... | ||||||
|  |                 return True | ||||||
|  |             return self.md.is_block_level(m.group(1)) | ||||||
|  |         return False | ||||||
|  |  | ||||||
|  |     def stash_to_string(self, text): | ||||||
|  |         """ Convert a stashed object to a string. """ | ||||||
|  |         return str(text) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class AndSubstitutePostprocessor(Postprocessor): | ||||||
|  |     """ Restore valid entities """ | ||||||
|  |  | ||||||
|  |     def run(self, text): | ||||||
|  |         text = text.replace(util.AMP_SUBSTITUTE, "&") | ||||||
|  |         return text | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class UnescapePostprocessor(Postprocessor): | ||||||
|  |     """ Restore escaped chars """ | ||||||
|  |  | ||||||
|  |     RE = re.compile(r'{}(\d+){}'.format(util.STX, util.ETX)) | ||||||
|  |  | ||||||
|  |     def unescape(self, m): | ||||||
|  |         return chr(int(m.group(1))) | ||||||
|  |  | ||||||
|  |     def run(self, text): | ||||||
|  |         return self.RE.sub(self.unescape, text) | ||||||
							
								
								
									
										82
									
								
								Source/Libs/markdown/preprocessors.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										82
									
								
								Source/Libs/markdown/preprocessors.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,82 @@ | |||||||
|  | """ | ||||||
|  | Python Markdown | ||||||
|  |  | ||||||
|  | A Python implementation of John Gruber's Markdown. | ||||||
|  |  | ||||||
|  | Documentation: https://python-markdown.github.io/ | ||||||
|  | GitHub: https://github.com/Python-Markdown/markdown/ | ||||||
|  | PyPI: https://pypi.org/project/Markdown/ | ||||||
|  |  | ||||||
|  | Started by Manfred Stienstra (http://www.dwerg.net/). | ||||||
|  | Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). | ||||||
|  | Currently maintained by Waylan Limberg (https://github.com/waylan), | ||||||
|  | Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). | ||||||
|  |  | ||||||
|  | Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later) | ||||||
|  | Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) | ||||||
|  | Copyright 2004 Manfred Stienstra (the original version) | ||||||
|  |  | ||||||
|  | License: BSD (see LICENSE.md for details). | ||||||
|  |  | ||||||
|  | PRE-PROCESSORS | ||||||
|  | ============================================================================= | ||||||
|  |  | ||||||
|  | Preprocessors work on source text before we start doing anything too | ||||||
|  | complicated. | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | from . import util | ||||||
|  | from .htmlparser import HTMLExtractor | ||||||
|  | import re | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def build_preprocessors(md, **kwargs): | ||||||
|  |     """ Build the default set of preprocessors used by Markdown. """ | ||||||
|  |     preprocessors = util.Registry() | ||||||
|  |     preprocessors.register(NormalizeWhitespace(md), 'normalize_whitespace', 30) | ||||||
|  |     preprocessors.register(HtmlBlockPreprocessor(md), 'html_block', 20) | ||||||
|  |     return preprocessors | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class Preprocessor(util.Processor): | ||||||
|  |     """ | ||||||
|  |     Preprocessors are run after the text is broken into lines. | ||||||
|  |  | ||||||
|  |     Each preprocessor implements a "run" method that takes a pointer to a | ||||||
|  |     list of lines of the document, modifies it as necessary and returns | ||||||
|  |     either the same pointer or a pointer to a new list. | ||||||
|  |  | ||||||
|  |     Preprocessors must extend markdown.Preprocessor. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |     def run(self, lines): | ||||||
|  |         """ | ||||||
|  |         Each subclass of Preprocessor should override the `run` method, which | ||||||
|  |         takes the document as a list of strings split by newlines and returns | ||||||
|  |         the (possibly modified) list of lines. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         pass  # pragma: no cover | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class NormalizeWhitespace(Preprocessor): | ||||||
|  |     """ Normalize whitespace for consistent parsing. """ | ||||||
|  |  | ||||||
|  |     def run(self, lines): | ||||||
|  |         source = '\n'.join(lines) | ||||||
|  |         source = source.replace(util.STX, "").replace(util.ETX, "") | ||||||
|  |         source = source.replace("\r\n", "\n").replace("\r", "\n") + "\n\n" | ||||||
|  |         source = source.expandtabs(self.md.tab_length) | ||||||
|  |         source = re.sub(r'(?<=\n) +\n', '\n', source) | ||||||
|  |         return source.split('\n') | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class HtmlBlockPreprocessor(Preprocessor): | ||||||
|  |     """Remove html blocks from the text and store them for later retrieval.""" | ||||||
|  |  | ||||||
|  |     def run(self, lines): | ||||||
|  |         source = '\n'.join(lines) | ||||||
|  |         parser = HTMLExtractor(self.md) | ||||||
|  |         parser.feed(source) | ||||||
|  |         parser.close() | ||||||
|  |         return ''.join(parser.cleandoc).split('\n') | ||||||
							
								
								
									
										189
									
								
								Source/Libs/markdown/serializers.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										189
									
								
								Source/Libs/markdown/serializers.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,189 @@ | |||||||
|  | # markdown/searializers.py | ||||||
|  | # | ||||||
|  | # Add x/html serialization to Elementree | ||||||
|  | # Taken from ElementTree 1.3 preview with slight modifications | ||||||
|  | # | ||||||
|  | # Copyright (c) 1999-2007 by Fredrik Lundh.  All rights reserved. | ||||||
|  | # | ||||||
|  | # fredrik@pythonware.com | ||||||
|  | # https://www.pythonware.com/ | ||||||
|  | # | ||||||
|  | # -------------------------------------------------------------------- | ||||||
|  | # The ElementTree toolkit is | ||||||
|  | # | ||||||
|  | # Copyright (c) 1999-2007 by Fredrik Lundh | ||||||
|  | # | ||||||
|  | # By obtaining, using, and/or copying this software and/or its | ||||||
|  | # associated documentation, you agree that you have read, understood, | ||||||
|  | # and will comply with the following terms and conditions: | ||||||
|  | # | ||||||
|  | # Permission to use, copy, modify, and distribute this software and | ||||||
|  | # its associated documentation for any purpose and without fee is | ||||||
|  | # hereby granted, provided that the above copyright notice appears in | ||||||
|  | # all copies, and that both that copyright notice and this permission | ||||||
|  | # notice appear in supporting documentation, and that the name of | ||||||
|  | # Secret Labs AB or the author not be used in advertising or publicity | ||||||
|  | # pertaining to distribution of the software without specific, written | ||||||
|  | # prior permission. | ||||||
|  | # | ||||||
|  | # SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD | ||||||
|  | # TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- | ||||||
|  | # ABILITY AND FITNESS.  IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR | ||||||
|  | # BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY | ||||||
|  | # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, | ||||||
|  | # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS | ||||||
|  | # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE | ||||||
|  | # OF THIS SOFTWARE. | ||||||
|  | # -------------------------------------------------------------------- | ||||||
|  |  | ||||||
|  |  | ||||||
|  | from xml.etree.ElementTree import ProcessingInstruction | ||||||
|  | from xml.etree.ElementTree import Comment, ElementTree, QName | ||||||
|  | import re | ||||||
|  |  | ||||||
|  | __all__ = ['to_html_string', 'to_xhtml_string'] | ||||||
|  |  | ||||||
|  | HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr", | ||||||
|  |               "img", "input", "isindex", "link", "meta", "param") | ||||||
|  | RE_AMP = re.compile(r'&(?!(?:\#[0-9]+|\#x[0-9a-f]+|[0-9a-z]+);)', re.I) | ||||||
|  |  | ||||||
|  | try: | ||||||
|  |     HTML_EMPTY = set(HTML_EMPTY) | ||||||
|  | except NameError:  # pragma: no cover | ||||||
|  |     pass | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def _raise_serialization_error(text):  # pragma: no cover | ||||||
|  |     raise TypeError( | ||||||
|  |         "cannot serialize {!r} (type {})".format(text, type(text).__name__) | ||||||
|  |         ) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def _escape_cdata(text): | ||||||
|  |     # escape character data | ||||||
|  |     try: | ||||||
|  |         # it's worth avoiding do-nothing calls for strings that are | ||||||
|  |         # shorter than 500 character, or so.  assume that's, by far, | ||||||
|  |         # the most common case in most applications. | ||||||
|  |         if "&" in text: | ||||||
|  |             # Only replace & when not part of an entity | ||||||
|  |             text = RE_AMP.sub('&', text) | ||||||
|  |         if "<" in text: | ||||||
|  |             text = text.replace("<", "<") | ||||||
|  |         if ">" in text: | ||||||
|  |             text = text.replace(">", ">") | ||||||
|  |         return text | ||||||
|  |     except (TypeError, AttributeError):  # pragma: no cover | ||||||
|  |         _raise_serialization_error(text) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def _escape_attrib(text): | ||||||
|  |     # escape attribute value | ||||||
|  |     try: | ||||||
|  |         if "&" in text: | ||||||
|  |             # Only replace & when not part of an entity | ||||||
|  |             text = RE_AMP.sub('&', text) | ||||||
|  |         if "<" in text: | ||||||
|  |             text = text.replace("<", "<") | ||||||
|  |         if ">" in text: | ||||||
|  |             text = text.replace(">", ">") | ||||||
|  |         if "\"" in text: | ||||||
|  |             text = text.replace("\"", """) | ||||||
|  |         if "\n" in text: | ||||||
|  |             text = text.replace("\n", "
") | ||||||
|  |         return text | ||||||
|  |     except (TypeError, AttributeError):  # pragma: no cover | ||||||
|  |         _raise_serialization_error(text) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def _escape_attrib_html(text): | ||||||
|  |     # escape attribute value | ||||||
|  |     try: | ||||||
|  |         if "&" in text: | ||||||
|  |             # Only replace & when not part of an entity | ||||||
|  |             text = RE_AMP.sub('&', text) | ||||||
|  |         if "<" in text: | ||||||
|  |             text = text.replace("<", "<") | ||||||
|  |         if ">" in text: | ||||||
|  |             text = text.replace(">", ">") | ||||||
|  |         if "\"" in text: | ||||||
|  |             text = text.replace("\"", """) | ||||||
|  |         return text | ||||||
|  |     except (TypeError, AttributeError):  # pragma: no cover | ||||||
|  |         _raise_serialization_error(text) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def _serialize_html(write, elem, format): | ||||||
|  |     tag = elem.tag | ||||||
|  |     text = elem.text | ||||||
|  |     if tag is Comment: | ||||||
|  |         write("<!--%s-->" % _escape_cdata(text)) | ||||||
|  |     elif tag is ProcessingInstruction: | ||||||
|  |         write("<?%s?>" % _escape_cdata(text)) | ||||||
|  |     elif tag is None: | ||||||
|  |         if text: | ||||||
|  |             write(_escape_cdata(text)) | ||||||
|  |         for e in elem: | ||||||
|  |             _serialize_html(write, e, format) | ||||||
|  |     else: | ||||||
|  |         namespace_uri = None | ||||||
|  |         if isinstance(tag, QName): | ||||||
|  |             # QNAME objects store their data as a string: `{uri}tag` | ||||||
|  |             if tag.text[:1] == "{": | ||||||
|  |                 namespace_uri, tag = tag.text[1:].split("}", 1) | ||||||
|  |             else: | ||||||
|  |                 raise ValueError('QName objects must define a tag.') | ||||||
|  |         write("<" + tag) | ||||||
|  |         items = elem.items() | ||||||
|  |         if items: | ||||||
|  |             items = sorted(items)  # lexical order | ||||||
|  |             for k, v in items: | ||||||
|  |                 if isinstance(k, QName): | ||||||
|  |                     # Assume a text only QName | ||||||
|  |                     k = k.text | ||||||
|  |                 if isinstance(v, QName): | ||||||
|  |                     # Assume a text only QName | ||||||
|  |                     v = v.text | ||||||
|  |                 else: | ||||||
|  |                     v = _escape_attrib_html(v) | ||||||
|  |                 if k == v and format == 'html': | ||||||
|  |                     # handle boolean attributes | ||||||
|  |                     write(" %s" % v) | ||||||
|  |                 else: | ||||||
|  |                     write(' {}="{}"'.format(k, v)) | ||||||
|  |         if namespace_uri: | ||||||
|  |             write(' xmlns="%s"' % (_escape_attrib(namespace_uri))) | ||||||
|  |         if format == "xhtml" and tag.lower() in HTML_EMPTY: | ||||||
|  |             write(" />") | ||||||
|  |         else: | ||||||
|  |             write(">") | ||||||
|  |             if text: | ||||||
|  |                 if tag.lower() in ["script", "style"]: | ||||||
|  |                     write(text) | ||||||
|  |                 else: | ||||||
|  |                     write(_escape_cdata(text)) | ||||||
|  |             for e in elem: | ||||||
|  |                 _serialize_html(write, e, format) | ||||||
|  |             if tag.lower() not in HTML_EMPTY: | ||||||
|  |                 write("</" + tag + ">") | ||||||
|  |     if elem.tail: | ||||||
|  |         write(_escape_cdata(elem.tail)) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def _write_html(root, format="html"): | ||||||
|  |     assert root is not None | ||||||
|  |     data = [] | ||||||
|  |     write = data.append | ||||||
|  |     _serialize_html(write, root, format) | ||||||
|  |     return "".join(data) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # -------------------------------------------------------------------- | ||||||
|  | # public functions | ||||||
|  |  | ||||||
|  | def to_html_string(element): | ||||||
|  |     return _write_html(ElementTree(element).getroot(), format="html") | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def to_xhtml_string(element): | ||||||
|  |     return _write_html(ElementTree(element).getroot(), format="xhtml") | ||||||
							
								
								
									
										220
									
								
								Source/Libs/markdown/test_tools.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										220
									
								
								Source/Libs/markdown/test_tools.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,220 @@ | |||||||
|  | """ | ||||||
|  | Python Markdown | ||||||
|  |  | ||||||
|  | A Python implementation of John Gruber's Markdown. | ||||||
|  |  | ||||||
|  | Documentation: https://python-markdown.github.io/ | ||||||
|  | GitHub: https://github.com/Python-Markdown/markdown/ | ||||||
|  | PyPI: https://pypi.org/project/Markdown/ | ||||||
|  |  | ||||||
|  | Started by Manfred Stienstra (http://www.dwerg.net/). | ||||||
|  | Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). | ||||||
|  | Currently maintained by Waylan Limberg (https://github.com/waylan), | ||||||
|  | Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). | ||||||
|  |  | ||||||
|  | Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later) | ||||||
|  | Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) | ||||||
|  | Copyright 2004 Manfred Stienstra (the original version) | ||||||
|  |  | ||||||
|  | License: BSD (see LICENSE.md for details). | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | import os | ||||||
|  | import sys | ||||||
|  | import unittest | ||||||
|  | import textwrap | ||||||
|  | from . import markdown, Markdown, util | ||||||
|  |  | ||||||
|  | try: | ||||||
|  |     import tidylib | ||||||
|  | except ImportError: | ||||||
|  |     tidylib = None | ||||||
|  |  | ||||||
|  | __all__ = ['TestCase', 'LegacyTestCase', 'Kwargs'] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class TestCase(unittest.TestCase): | ||||||
|  |     """ | ||||||
|  |     A unittest.TestCase subclass with helpers for testing Markdown output. | ||||||
|  |  | ||||||
|  |     Define `default_kwargs` as a dict of keywords to pass to Markdown for each | ||||||
|  |     test. The defaults can be overridden on individual tests. | ||||||
|  |  | ||||||
|  |     The `assertMarkdownRenders` method accepts the source text, the expected | ||||||
|  |     output, and any keywords to pass to Markdown. The `default_kwargs` are used | ||||||
|  |     except where overridden by `kwargs`. The output and expected output are passed | ||||||
|  |     to `TestCase.assertMultiLineEqual`. An AssertionError is raised with a diff | ||||||
|  |     if the actual output does not equal the expected output. | ||||||
|  |  | ||||||
|  |     The `dedent` method is available to dedent triple-quoted strings if | ||||||
|  |     necessary. | ||||||
|  |  | ||||||
|  |     In all other respects, behaves as unittest.TestCase. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     default_kwargs = {} | ||||||
|  |  | ||||||
|  |     def assertMarkdownRenders(self, source, expected, expected_attrs=None, **kwargs): | ||||||
|  |         """ | ||||||
|  |         Test that source Markdown text renders to expected output with given keywords. | ||||||
|  |  | ||||||
|  |         `expected_attrs` accepts a dict. Each key should be the name of an attribute | ||||||
|  |         on the `Markdown` instance and the value should be the expected value after | ||||||
|  |         the source text is parsed by Markdown. After the expected output is tested, | ||||||
|  |         the expected value for each attribute is compared against the actual | ||||||
|  |         attribute of the `Markdown` instance using `TestCase.assertEqual`. | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |         expected_attrs = expected_attrs or {} | ||||||
|  |         kws = self.default_kwargs.copy() | ||||||
|  |         kws.update(kwargs) | ||||||
|  |         md = Markdown(**kws) | ||||||
|  |         output = md.convert(source) | ||||||
|  |         self.assertMultiLineEqual(output, expected) | ||||||
|  |         for key, value in expected_attrs.items(): | ||||||
|  |             self.assertEqual(getattr(md, key), value) | ||||||
|  |  | ||||||
|  |     def dedent(self, text): | ||||||
|  |         """ | ||||||
|  |         Dedent text. | ||||||
|  |         """ | ||||||
|  |  | ||||||
|  |         # TODO: If/when actual output ends with a newline, then use: | ||||||
|  |         # return textwrap.dedent(text.strip('/n')) | ||||||
|  |         return textwrap.dedent(text).strip() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class recursionlimit: | ||||||
|  |     """ | ||||||
|  |     A context manager which temporarily modifies the Python recursion limit. | ||||||
|  |  | ||||||
|  |     The testing framework, coverage, etc. may add an arbitrary number of levels to the depth. To maintain consistency | ||||||
|  |     in the tests, the current stack depth is determined when called, then added to the provided limit. | ||||||
|  |  | ||||||
|  |     Example usage: | ||||||
|  |  | ||||||
|  |         with recursionlimit(20): | ||||||
|  |             # test code here | ||||||
|  |  | ||||||
|  |     See https://stackoverflow.com/a/50120316/866026 | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, limit): | ||||||
|  |         self.limit = util._get_stack_depth() + limit | ||||||
|  |         self.old_limit = sys.getrecursionlimit() | ||||||
|  |  | ||||||
|  |     def __enter__(self): | ||||||
|  |         sys.setrecursionlimit(self.limit) | ||||||
|  |  | ||||||
|  |     def __exit__(self, type, value, tb): | ||||||
|  |         sys.setrecursionlimit(self.old_limit) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | ######################### | ||||||
|  | # Legacy Test Framework # | ||||||
|  | ######################### | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class Kwargs(dict): | ||||||
|  |     """ A dict like class for holding keyword arguments. """ | ||||||
|  |     pass | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def _normalize_whitespace(text): | ||||||
|  |     """ Normalize whitespace for a string of html using tidylib. """ | ||||||
|  |     output, errors = tidylib.tidy_fragment(text, options={ | ||||||
|  |         'drop_empty_paras': 0, | ||||||
|  |         'fix_backslash': 0, | ||||||
|  |         'fix_bad_comments': 0, | ||||||
|  |         'fix_uri': 0, | ||||||
|  |         'join_styles': 0, | ||||||
|  |         'lower_literals': 0, | ||||||
|  |         'merge_divs': 0, | ||||||
|  |         'output_xhtml': 1, | ||||||
|  |         'quote_ampersand': 0, | ||||||
|  |         'newline': 'LF' | ||||||
|  |     }) | ||||||
|  |     return output | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class LegacyTestMeta(type): | ||||||
|  |     def __new__(cls, name, bases, dct): | ||||||
|  |  | ||||||
|  |         def generate_test(infile, outfile, normalize, kwargs): | ||||||
|  |             def test(self): | ||||||
|  |                 with open(infile, encoding="utf-8") as f: | ||||||
|  |                     input = f.read() | ||||||
|  |                 with open(outfile, encoding="utf-8") as f: | ||||||
|  |                     # Normalize line endings | ||||||
|  |                     # (on Windows, git may have altered line endings). | ||||||
|  |                     expected = f.read().replace("\r\n", "\n") | ||||||
|  |                 output = markdown(input, **kwargs) | ||||||
|  |                 if tidylib and normalize: | ||||||
|  |                     try: | ||||||
|  |                         expected = _normalize_whitespace(expected) | ||||||
|  |                         output = _normalize_whitespace(output) | ||||||
|  |                     except OSError: | ||||||
|  |                         self.skipTest("Tidylib's c library not available.") | ||||||
|  |                 elif normalize: | ||||||
|  |                     self.skipTest('Tidylib not available.') | ||||||
|  |                 self.assertMultiLineEqual(output, expected) | ||||||
|  |             return test | ||||||
|  |  | ||||||
|  |         location = dct.get('location', '') | ||||||
|  |         exclude = dct.get('exclude', []) | ||||||
|  |         normalize = dct.get('normalize', False) | ||||||
|  |         input_ext = dct.get('input_ext', '.txt') | ||||||
|  |         output_ext = dct.get('output_ext', '.html') | ||||||
|  |         kwargs = dct.get('default_kwargs', Kwargs()) | ||||||
|  |  | ||||||
|  |         if os.path.isdir(location): | ||||||
|  |             for file in os.listdir(location): | ||||||
|  |                 infile = os.path.join(location, file) | ||||||
|  |                 if os.path.isfile(infile): | ||||||
|  |                     tname, ext = os.path.splitext(file) | ||||||
|  |                     if ext == input_ext: | ||||||
|  |                         outfile = os.path.join(location, tname + output_ext) | ||||||
|  |                         tname = tname.replace(' ', '_').replace('-', '_') | ||||||
|  |                         kws = kwargs.copy() | ||||||
|  |                         if tname in dct: | ||||||
|  |                             kws.update(dct[tname]) | ||||||
|  |                         test_name = 'test_%s' % tname | ||||||
|  |                         if tname not in exclude: | ||||||
|  |                             dct[test_name] = generate_test(infile, outfile, normalize, kws) | ||||||
|  |                         else: | ||||||
|  |                             dct[test_name] = unittest.skip('Excluded')(lambda: None) | ||||||
|  |  | ||||||
|  |         return type.__new__(cls, name, bases, dct) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class LegacyTestCase(unittest.TestCase, metaclass=LegacyTestMeta): | ||||||
|  |     """ | ||||||
|  |     A `unittest.TestCase` subclass for running Markdown's legacy file-based tests. | ||||||
|  |  | ||||||
|  |     A subclass should define various properties which point to a directory of | ||||||
|  |     text-based test files and define various behaviors/defaults for those tests. | ||||||
|  |     The following properties are supported: | ||||||
|  |  | ||||||
|  |     location: A path to the directory of test files. An absolute path is preferred. | ||||||
|  |     exclude: A list of tests to exclude. Each test name should comprise the filename | ||||||
|  |              without an extension. | ||||||
|  |     normalize: A boolean value indicating if the HTML should be normalized. | ||||||
|  |                Default: `False`. | ||||||
|  |     input_ext: A string containing the file extension of input files. Default: `.txt`. | ||||||
|  |     ouput_ext: A string containing the file extension of expected output files. | ||||||
|  |                Default: `html`. | ||||||
|  |     default_kwargs: A `Kwargs` instance which stores the default set of keyword | ||||||
|  |                     arguments for all test files in the directory. | ||||||
|  |  | ||||||
|  |     In addition, properties can be defined for each individual set of test files within | ||||||
|  |     the directory. The property should be given the name of the file without the file | ||||||
|  |     extension. Any spaces and dashes in the filename should be replaced with | ||||||
|  |     underscores. The value of the property should be a `Kwargs` instance which | ||||||
|  |     contains the keyword arguments that should be passed to `Markdown` for that | ||||||
|  |     test file. The keyword arguments will "update" the `default_kwargs`. | ||||||
|  |  | ||||||
|  |     When the class instance is created, it will walk the given directory and create | ||||||
|  |     a separate unitttest for each set of test files using the naming scheme: | ||||||
|  |     `test_filename`. One unittest will be run for each set of input and output files. | ||||||
|  |     """ | ||||||
|  |     pass | ||||||
							
								
								
									
										436
									
								
								Source/Libs/markdown/treeprocessors.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										436
									
								
								Source/Libs/markdown/treeprocessors.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,436 @@ | |||||||
|  | """ | ||||||
|  | Python Markdown | ||||||
|  |  | ||||||
|  | A Python implementation of John Gruber's Markdown. | ||||||
|  |  | ||||||
|  | Documentation: https://python-markdown.github.io/ | ||||||
|  | GitHub: https://github.com/Python-Markdown/markdown/ | ||||||
|  | PyPI: https://pypi.org/project/Markdown/ | ||||||
|  |  | ||||||
|  | Started by Manfred Stienstra (http://www.dwerg.net/). | ||||||
|  | Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). | ||||||
|  | Currently maintained by Waylan Limberg (https://github.com/waylan), | ||||||
|  | Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). | ||||||
|  |  | ||||||
|  | Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later) | ||||||
|  | Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) | ||||||
|  | Copyright 2004 Manfred Stienstra (the original version) | ||||||
|  |  | ||||||
|  | License: BSD (see LICENSE.md for details). | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | import xml.etree.ElementTree as etree | ||||||
|  | from . import util | ||||||
|  | from . import inlinepatterns | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def build_treeprocessors(md, **kwargs): | ||||||
|  |     """ Build the default treeprocessors for Markdown. """ | ||||||
|  |     treeprocessors = util.Registry() | ||||||
|  |     treeprocessors.register(InlineProcessor(md), 'inline', 20) | ||||||
|  |     treeprocessors.register(PrettifyTreeprocessor(md), 'prettify', 10) | ||||||
|  |     return treeprocessors | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def isString(s): | ||||||
|  |     """ Check if it's string """ | ||||||
|  |     if not isinstance(s, util.AtomicString): | ||||||
|  |         return isinstance(s, str) | ||||||
|  |     return False | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class Treeprocessor(util.Processor): | ||||||
|  |     """ | ||||||
|  |     Treeprocessors are run on the ElementTree object before serialization. | ||||||
|  |  | ||||||
|  |     Each Treeprocessor implements a "run" method that takes a pointer to an | ||||||
|  |     ElementTree, modifies it as necessary and returns an ElementTree | ||||||
|  |     object. | ||||||
|  |  | ||||||
|  |     Treeprocessors must extend markdown.Treeprocessor. | ||||||
|  |  | ||||||
|  |     """ | ||||||
|  |     def run(self, root): | ||||||
|  |         """ | ||||||
|  |         Subclasses of Treeprocessor should implement a `run` method, which | ||||||
|  |         takes a root ElementTree. This method can return another ElementTree | ||||||
|  |         object, and the existing root ElementTree will be replaced, or it can | ||||||
|  |         modify the current tree and return None. | ||||||
|  |         """ | ||||||
|  |         pass  # pragma: no cover | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class InlineProcessor(Treeprocessor): | ||||||
|  |     """ | ||||||
|  |     A Treeprocessor that traverses a tree, applying inline patterns. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self, md): | ||||||
|  |         self.__placeholder_prefix = util.INLINE_PLACEHOLDER_PREFIX | ||||||
|  |         self.__placeholder_suffix = util.ETX | ||||||
|  |         self.__placeholder_length = 4 + len(self.__placeholder_prefix) \ | ||||||
|  |                                       + len(self.__placeholder_suffix) | ||||||
|  |         self.__placeholder_re = util.INLINE_PLACEHOLDER_RE | ||||||
|  |         self.md = md | ||||||
|  |         self.inlinePatterns = md.inlinePatterns | ||||||
|  |         self.ancestors = [] | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     @util.deprecated("Use 'md' instead.") | ||||||
|  |     def markdown(self): | ||||||
|  |         # TODO: remove this later | ||||||
|  |         return self.md | ||||||
|  |  | ||||||
|  |     def __makePlaceholder(self, type): | ||||||
|  |         """ Generate a placeholder """ | ||||||
|  |         id = "%04d" % len(self.stashed_nodes) | ||||||
|  |         hash = util.INLINE_PLACEHOLDER % id | ||||||
|  |         return hash, id | ||||||
|  |  | ||||||
|  |     def __findPlaceholder(self, data, index): | ||||||
|  |         """ | ||||||
|  |         Extract id from data string, start from index | ||||||
|  |  | ||||||
|  |         Keyword arguments: | ||||||
|  |  | ||||||
|  |         * data: string | ||||||
|  |         * index: index, from which we start search | ||||||
|  |  | ||||||
|  |         Returns: placeholder id and string index, after the found placeholder. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         m = self.__placeholder_re.search(data, index) | ||||||
|  |         if m: | ||||||
|  |             return m.group(1), m.end() | ||||||
|  |         else: | ||||||
|  |             return None, index + 1 | ||||||
|  |  | ||||||
|  |     def __stashNode(self, node, type): | ||||||
|  |         """ Add node to stash """ | ||||||
|  |         placeholder, id = self.__makePlaceholder(type) | ||||||
|  |         self.stashed_nodes[id] = node | ||||||
|  |         return placeholder | ||||||
|  |  | ||||||
|  |     def __handleInline(self, data, patternIndex=0): | ||||||
|  |         """ | ||||||
|  |         Process string with inline patterns and replace it | ||||||
|  |         with placeholders | ||||||
|  |  | ||||||
|  |         Keyword arguments: | ||||||
|  |  | ||||||
|  |         * data: A line of Markdown text | ||||||
|  |         * patternIndex: The index of the inlinePattern to start with | ||||||
|  |  | ||||||
|  |         Returns: String with placeholders. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         if not isinstance(data, util.AtomicString): | ||||||
|  |             startIndex = 0 | ||||||
|  |             count = len(self.inlinePatterns) | ||||||
|  |             while patternIndex < count: | ||||||
|  |                 data, matched, startIndex = self.__applyPattern( | ||||||
|  |                     self.inlinePatterns[patternIndex], data, patternIndex, startIndex | ||||||
|  |                 ) | ||||||
|  |                 if not matched: | ||||||
|  |                     patternIndex += 1 | ||||||
|  |         return data | ||||||
|  |  | ||||||
|  |     def __processElementText(self, node, subnode, isText=True): | ||||||
|  |         """ | ||||||
|  |         Process placeholders in Element.text or Element.tail | ||||||
|  |         of Elements popped from self.stashed_nodes. | ||||||
|  |  | ||||||
|  |         Keywords arguments: | ||||||
|  |  | ||||||
|  |         * node: parent node | ||||||
|  |         * subnode: processing node | ||||||
|  |         * isText: bool variable, True - it's text, False - it's tail | ||||||
|  |  | ||||||
|  |         Returns: None | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         if isText: | ||||||
|  |             text = subnode.text | ||||||
|  |             subnode.text = None | ||||||
|  |         else: | ||||||
|  |             text = subnode.tail | ||||||
|  |             subnode.tail = None | ||||||
|  |  | ||||||
|  |         childResult = self.__processPlaceholders(text, subnode, isText) | ||||||
|  |  | ||||||
|  |         if not isText and node is not subnode: | ||||||
|  |             pos = list(node).index(subnode) + 1 | ||||||
|  |         else: | ||||||
|  |             pos = 0 | ||||||
|  |  | ||||||
|  |         childResult.reverse() | ||||||
|  |         for newChild in childResult: | ||||||
|  |             node.insert(pos, newChild[0]) | ||||||
|  |  | ||||||
|  |     def __processPlaceholders(self, data, parent, isText=True): | ||||||
|  |         """ | ||||||
|  |         Process string with placeholders and generate ElementTree tree. | ||||||
|  |  | ||||||
|  |         Keyword arguments: | ||||||
|  |  | ||||||
|  |         * data: string with placeholders instead of ElementTree elements. | ||||||
|  |         * parent: Element, which contains processing inline data | ||||||
|  |  | ||||||
|  |         Returns: list with ElementTree elements with applied inline patterns. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         def linkText(text): | ||||||
|  |             if text: | ||||||
|  |                 if result: | ||||||
|  |                     if result[-1][0].tail: | ||||||
|  |                         result[-1][0].tail += text | ||||||
|  |                     else: | ||||||
|  |                         result[-1][0].tail = text | ||||||
|  |                 elif not isText: | ||||||
|  |                     if parent.tail: | ||||||
|  |                         parent.tail += text | ||||||
|  |                     else: | ||||||
|  |                         parent.tail = text | ||||||
|  |                 else: | ||||||
|  |                     if parent.text: | ||||||
|  |                         parent.text += text | ||||||
|  |                     else: | ||||||
|  |                         parent.text = text | ||||||
|  |         result = [] | ||||||
|  |         strartIndex = 0 | ||||||
|  |         while data: | ||||||
|  |             index = data.find(self.__placeholder_prefix, strartIndex) | ||||||
|  |             if index != -1: | ||||||
|  |                 id, phEndIndex = self.__findPlaceholder(data, index) | ||||||
|  |  | ||||||
|  |                 if id in self.stashed_nodes: | ||||||
|  |                     node = self.stashed_nodes.get(id) | ||||||
|  |  | ||||||
|  |                     if index > 0: | ||||||
|  |                         text = data[strartIndex:index] | ||||||
|  |                         linkText(text) | ||||||
|  |  | ||||||
|  |                     if not isString(node):  # it's Element | ||||||
|  |                         for child in [node] + list(node): | ||||||
|  |                             if child.tail: | ||||||
|  |                                 if child.tail.strip(): | ||||||
|  |                                     self.__processElementText( | ||||||
|  |                                         node, child, False | ||||||
|  |                                     ) | ||||||
|  |                             if child.text: | ||||||
|  |                                 if child.text.strip(): | ||||||
|  |                                     self.__processElementText(child, child) | ||||||
|  |                     else:  # it's just a string | ||||||
|  |                         linkText(node) | ||||||
|  |                         strartIndex = phEndIndex | ||||||
|  |                         continue | ||||||
|  |  | ||||||
|  |                     strartIndex = phEndIndex | ||||||
|  |                     result.append((node, self.ancestors[:])) | ||||||
|  |  | ||||||
|  |                 else:  # wrong placeholder | ||||||
|  |                     end = index + len(self.__placeholder_prefix) | ||||||
|  |                     linkText(data[strartIndex:end]) | ||||||
|  |                     strartIndex = end | ||||||
|  |             else: | ||||||
|  |                 text = data[strartIndex:] | ||||||
|  |                 if isinstance(data, util.AtomicString): | ||||||
|  |                     # We don't want to loose the AtomicString | ||||||
|  |                     text = util.AtomicString(text) | ||||||
|  |                 linkText(text) | ||||||
|  |                 data = "" | ||||||
|  |  | ||||||
|  |         return result | ||||||
|  |  | ||||||
|  |     def __applyPattern(self, pattern, data, patternIndex, startIndex=0): | ||||||
|  |         """ | ||||||
|  |         Check if the line fits the pattern, create the necessary | ||||||
|  |         elements, add it to stashed_nodes. | ||||||
|  |  | ||||||
|  |         Keyword arguments: | ||||||
|  |  | ||||||
|  |         * data: the text to be processed | ||||||
|  |         * pattern: the pattern to be checked | ||||||
|  |         * patternIndex: index of current pattern | ||||||
|  |         * startIndex: string index, from which we start searching | ||||||
|  |  | ||||||
|  |         Returns: String with placeholders instead of ElementTree elements. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         new_style = isinstance(pattern, inlinepatterns.InlineProcessor) | ||||||
|  |  | ||||||
|  |         for exclude in pattern.ANCESTOR_EXCLUDES: | ||||||
|  |             if exclude.lower() in self.ancestors: | ||||||
|  |                 return data, False, 0 | ||||||
|  |  | ||||||
|  |         if new_style: | ||||||
|  |             match = None | ||||||
|  |             # Since handleMatch may reject our first match, | ||||||
|  |             # we iterate over the buffer looking for matches | ||||||
|  |             # until we can't find any more. | ||||||
|  |             for match in pattern.getCompiledRegExp().finditer(data, startIndex): | ||||||
|  |                 node, start, end = pattern.handleMatch(match, data) | ||||||
|  |                 if start is None or end is None: | ||||||
|  |                     startIndex += match.end(0) | ||||||
|  |                     match = None | ||||||
|  |                     continue | ||||||
|  |                 break | ||||||
|  |         else:  # pragma: no cover | ||||||
|  |             match = pattern.getCompiledRegExp().match(data[startIndex:]) | ||||||
|  |             leftData = data[:startIndex] | ||||||
|  |  | ||||||
|  |         if not match: | ||||||
|  |             return data, False, 0 | ||||||
|  |  | ||||||
|  |         if not new_style:  # pragma: no cover | ||||||
|  |             node = pattern.handleMatch(match) | ||||||
|  |             start = match.start(0) | ||||||
|  |             end = match.end(0) | ||||||
|  |  | ||||||
|  |         if node is None: | ||||||
|  |             return data, True, end | ||||||
|  |  | ||||||
|  |         if not isString(node): | ||||||
|  |             if not isinstance(node.text, util.AtomicString): | ||||||
|  |                 # We need to process current node too | ||||||
|  |                 for child in [node] + list(node): | ||||||
|  |                     if not isString(node): | ||||||
|  |                         if child.text: | ||||||
|  |                             self.ancestors.append(child.tag.lower()) | ||||||
|  |                             child.text = self.__handleInline( | ||||||
|  |                                 child.text, patternIndex + 1 | ||||||
|  |                             ) | ||||||
|  |                             self.ancestors.pop() | ||||||
|  |                         if child.tail: | ||||||
|  |                             child.tail = self.__handleInline( | ||||||
|  |                                 child.tail, patternIndex | ||||||
|  |                             ) | ||||||
|  |  | ||||||
|  |         placeholder = self.__stashNode(node, pattern.type()) | ||||||
|  |  | ||||||
|  |         if new_style: | ||||||
|  |             return "{}{}{}".format(data[:start], | ||||||
|  |                                    placeholder, data[end:]), True, 0 | ||||||
|  |         else:  # pragma: no cover | ||||||
|  |             return "{}{}{}{}".format(leftData, | ||||||
|  |                                      match.group(1), | ||||||
|  |                                      placeholder, match.groups()[-1]), True, 0 | ||||||
|  |  | ||||||
|  |     def __build_ancestors(self, parent, parents): | ||||||
|  |         """Build the ancestor list.""" | ||||||
|  |         ancestors = [] | ||||||
|  |         while parent is not None: | ||||||
|  |             if parent is not None: | ||||||
|  |                 ancestors.append(parent.tag.lower()) | ||||||
|  |             parent = self.parent_map.get(parent) | ||||||
|  |         ancestors.reverse() | ||||||
|  |         parents.extend(ancestors) | ||||||
|  |  | ||||||
|  |     def run(self, tree, ancestors=None): | ||||||
|  |         """Apply inline patterns to a parsed Markdown tree. | ||||||
|  |  | ||||||
|  |         Iterate over ElementTree, find elements with inline tag, apply inline | ||||||
|  |         patterns and append newly created Elements to tree.  If you don't | ||||||
|  |         want to process your data with inline patterns, instead of normal | ||||||
|  |         string, use subclass AtomicString: | ||||||
|  |  | ||||||
|  |             node.text = markdown.AtomicString("This will not be processed.") | ||||||
|  |  | ||||||
|  |         Arguments: | ||||||
|  |  | ||||||
|  |         * tree: ElementTree object, representing Markdown tree. | ||||||
|  |         * ancestors: List of parent tag names that precede the tree node (if needed). | ||||||
|  |  | ||||||
|  |         Returns: ElementTree object with applied inline patterns. | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         self.stashed_nodes = {} | ||||||
|  |  | ||||||
|  |         # Ensure a valid parent list, but copy passed in lists | ||||||
|  |         # to ensure we don't have the user accidentally change it on us. | ||||||
|  |         tree_parents = [] if ancestors is None else ancestors[:] | ||||||
|  |  | ||||||
|  |         self.parent_map = {c: p for p in tree.iter() for c in p} | ||||||
|  |         stack = [(tree, tree_parents)] | ||||||
|  |  | ||||||
|  |         while stack: | ||||||
|  |             currElement, parents = stack.pop() | ||||||
|  |  | ||||||
|  |             self.ancestors = parents | ||||||
|  |             self.__build_ancestors(currElement, self.ancestors) | ||||||
|  |  | ||||||
|  |             insertQueue = [] | ||||||
|  |             for child in currElement: | ||||||
|  |                 if child.text and not isinstance( | ||||||
|  |                     child.text, util.AtomicString | ||||||
|  |                 ): | ||||||
|  |                     self.ancestors.append(child.tag.lower()) | ||||||
|  |                     text = child.text | ||||||
|  |                     child.text = None | ||||||
|  |                     lst = self.__processPlaceholders( | ||||||
|  |                         self.__handleInline(text), child | ||||||
|  |                     ) | ||||||
|  |                     for item in lst: | ||||||
|  |                         self.parent_map[item[0]] = child | ||||||
|  |                     stack += lst | ||||||
|  |                     insertQueue.append((child, lst)) | ||||||
|  |                     self.ancestors.pop() | ||||||
|  |                 if child.tail: | ||||||
|  |                     tail = self.__handleInline(child.tail) | ||||||
|  |                     dumby = etree.Element('d') | ||||||
|  |                     child.tail = None | ||||||
|  |                     tailResult = self.__processPlaceholders(tail, dumby, False) | ||||||
|  |                     if dumby.tail: | ||||||
|  |                         child.tail = dumby.tail | ||||||
|  |                     pos = list(currElement).index(child) + 1 | ||||||
|  |                     tailResult.reverse() | ||||||
|  |                     for newChild in tailResult: | ||||||
|  |                         self.parent_map[newChild[0]] = currElement | ||||||
|  |                         currElement.insert(pos, newChild[0]) | ||||||
|  |                 if len(child): | ||||||
|  |                     self.parent_map[child] = currElement | ||||||
|  |                     stack.append((child, self.ancestors[:])) | ||||||
|  |  | ||||||
|  |             for element, lst in insertQueue: | ||||||
|  |                 for i, obj in enumerate(lst): | ||||||
|  |                     newChild = obj[0] | ||||||
|  |                     element.insert(i, newChild) | ||||||
|  |         return tree | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class PrettifyTreeprocessor(Treeprocessor): | ||||||
|  |     """ Add linebreaks to the html document. """ | ||||||
|  |  | ||||||
|  |     def _prettifyETree(self, elem): | ||||||
|  |         """ Recursively add linebreaks to ElementTree children. """ | ||||||
|  |  | ||||||
|  |         i = "\n" | ||||||
|  |         if self.md.is_block_level(elem.tag) and elem.tag not in ['code', 'pre']: | ||||||
|  |             if (not elem.text or not elem.text.strip()) \ | ||||||
|  |                     and len(elem) and self.md.is_block_level(elem[0].tag): | ||||||
|  |                 elem.text = i | ||||||
|  |             for e in elem: | ||||||
|  |                 if self.md.is_block_level(e.tag): | ||||||
|  |                     self._prettifyETree(e) | ||||||
|  |             if not elem.tail or not elem.tail.strip(): | ||||||
|  |                 elem.tail = i | ||||||
|  |         if not elem.tail or not elem.tail.strip(): | ||||||
|  |             elem.tail = i | ||||||
|  |  | ||||||
|  |     def run(self, root): | ||||||
|  |         """ Add linebreaks to ElementTree root object. """ | ||||||
|  |  | ||||||
|  |         self._prettifyETree(root) | ||||||
|  |         # Do <br />'s separately as they are often in the middle of | ||||||
|  |         # inline content and missed by _prettifyETree. | ||||||
|  |         brs = root.iter('br') | ||||||
|  |         for br in brs: | ||||||
|  |             if not br.tail or not br.tail.strip(): | ||||||
|  |                 br.tail = '\n' | ||||||
|  |             else: | ||||||
|  |                 br.tail = '\n%s' % br.tail | ||||||
|  |         # Clean up extra empty lines at end of code blocks. | ||||||
|  |         pres = root.iter('pre') | ||||||
|  |         for pre in pres: | ||||||
|  |             if len(pre) and pre[0].tag == 'code': | ||||||
|  |                 pre[0].text = util.AtomicString(pre[0].text.rstrip() + '\n') | ||||||
							
								
								
									
										485
									
								
								Source/Libs/markdown/util.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										485
									
								
								Source/Libs/markdown/util.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,485 @@ | |||||||
|  | """ | ||||||
|  | Python Markdown | ||||||
|  |  | ||||||
|  | A Python implementation of John Gruber's Markdown. | ||||||
|  |  | ||||||
|  | Documentation: https://python-markdown.github.io/ | ||||||
|  | GitHub: https://github.com/Python-Markdown/markdown/ | ||||||
|  | PyPI: https://pypi.org/project/Markdown/ | ||||||
|  |  | ||||||
|  | Started by Manfred Stienstra (http://www.dwerg.net/). | ||||||
|  | Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org). | ||||||
|  | Currently maintained by Waylan Limberg (https://github.com/waylan), | ||||||
|  | Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser). | ||||||
|  |  | ||||||
|  | Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later) | ||||||
|  | Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b) | ||||||
|  | Copyright 2004 Manfred Stienstra (the original version) | ||||||
|  |  | ||||||
|  | License: BSD (see LICENSE.md for details). | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | import re | ||||||
|  | import sys | ||||||
|  | import warnings | ||||||
|  | import xml.etree.ElementTree | ||||||
|  | from collections import namedtuple | ||||||
|  | from functools import wraps | ||||||
|  | from itertools import count | ||||||
|  |  | ||||||
|  | from .pep562 import Pep562 | ||||||
|  |  | ||||||
|  | if sys.version_info >= (3, 10): | ||||||
|  |     from importlib import metadata | ||||||
|  | else: | ||||||
|  |     # <PY310 use backport | ||||||
|  |     from .. import importlib_metadata as metadata | ||||||
|  |  | ||||||
|  | PY37 = (3, 7) <= sys.version_info | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # TODO: Remove deprecated variables in a future release. | ||||||
|  | __deprecated__ = { | ||||||
|  |     'etree': ('xml.etree.ElementTree', xml.etree.ElementTree), | ||||||
|  |     'string_type': ('str', str), | ||||||
|  |     'text_type': ('str', str), | ||||||
|  |     'int2str': ('chr', chr), | ||||||
|  |     'iterrange': ('range', range) | ||||||
|  | } | ||||||
|  |  | ||||||
|  |  | ||||||
|  | """ | ||||||
|  | Constants you might want to modify | ||||||
|  | ----------------------------------------------------------------------------- | ||||||
|  | """ | ||||||
|  |  | ||||||
|  |  | ||||||
|  | BLOCK_LEVEL_ELEMENTS = [ | ||||||
|  |     # Elements which are invalid to wrap in a `<p>` tag. | ||||||
|  |     # See https://w3c.github.io/html/grouping-content.html#the-p-element | ||||||
|  |     'address', 'article', 'aside', 'blockquote', 'details', 'div', 'dl', | ||||||
|  |     'fieldset', 'figcaption', 'figure', 'footer', 'form', 'h1', 'h2', 'h3', | ||||||
|  |     'h4', 'h5', 'h6', 'header', 'hgroup', 'hr', 'main', 'menu', 'nav', 'ol', | ||||||
|  |     'p', 'pre', 'section', 'table', 'ul', | ||||||
|  |     # Other elements which Markdown should not be mucking up the contents of. | ||||||
|  |     'canvas', 'colgroup', 'dd', 'body', 'dt', 'group', 'iframe', 'li', 'legend', | ||||||
|  |     'math', 'map', 'noscript', 'output', 'object', 'option', 'progress', 'script', | ||||||
|  |     'style', 'tbody', 'td', 'textarea', 'tfoot', 'th', 'thead', 'tr', 'video' | ||||||
|  | ] | ||||||
|  |  | ||||||
|  | # Placeholders | ||||||
|  | STX = '\u0002'  # Use STX ("Start of text") for start-of-placeholder | ||||||
|  | ETX = '\u0003'  # Use ETX ("End of text") for end-of-placeholder | ||||||
|  | INLINE_PLACEHOLDER_PREFIX = STX+"klzzwxh:" | ||||||
|  | INLINE_PLACEHOLDER = INLINE_PLACEHOLDER_PREFIX + "%s" + ETX | ||||||
|  | INLINE_PLACEHOLDER_RE = re.compile(INLINE_PLACEHOLDER % r'([0-9]+)') | ||||||
|  | AMP_SUBSTITUTE = STX+"amp"+ETX | ||||||
|  | HTML_PLACEHOLDER = STX + "wzxhzdk:%s" + ETX | ||||||
|  | HTML_PLACEHOLDER_RE = re.compile(HTML_PLACEHOLDER % r'([0-9]+)') | ||||||
|  | TAG_PLACEHOLDER = STX + "hzzhzkh:%s" + ETX | ||||||
|  |  | ||||||
|  |  | ||||||
|  | """ | ||||||
|  | Constants you probably do not need to change | ||||||
|  | ----------------------------------------------------------------------------- | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | # Only load extension entry_points once. | ||||||
|  | INSTALLED_EXTENSIONS = metadata.entry_points(group='markdown.extensions') | ||||||
|  | RTL_BIDI_RANGES = ( | ||||||
|  |     ('\u0590', '\u07FF'), | ||||||
|  |     # Hebrew (0590-05FF), Arabic (0600-06FF), | ||||||
|  |     # Syriac (0700-074F), Arabic supplement (0750-077F), | ||||||
|  |     # Thaana (0780-07BF), Nko (07C0-07FF). | ||||||
|  |     ('\u2D30', '\u2D7F')  # Tifinagh | ||||||
|  | ) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | """ | ||||||
|  | AUXILIARY GLOBAL FUNCTIONS | ||||||
|  | ============================================================================= | ||||||
|  | """ | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def deprecated(message, stacklevel=2): | ||||||
|  |     """ | ||||||
|  |     Raise a DeprecationWarning when wrapped function/method is called. | ||||||
|  |  | ||||||
|  |     Usage: | ||||||
|  |         @deprecated("This method will be removed in version X; use Y instead.") | ||||||
|  |         def some_method()" | ||||||
|  |             pass | ||||||
|  |     """ | ||||||
|  |     def wrapper(func): | ||||||
|  |         @wraps(func) | ||||||
|  |         def deprecated_func(*args, **kwargs): | ||||||
|  |             warnings.warn( | ||||||
|  |                 f"'{func.__name__}' is deprecated. {message}", | ||||||
|  |                 category=DeprecationWarning, | ||||||
|  |                 stacklevel=stacklevel | ||||||
|  |             ) | ||||||
|  |             return func(*args, **kwargs) | ||||||
|  |         return deprecated_func | ||||||
|  |     return wrapper | ||||||
|  |  | ||||||
|  |  | ||||||
|  | @deprecated("Use 'Markdown.is_block_level' instead.") | ||||||
|  | def isBlockLevel(tag): | ||||||
|  |     """Check if the tag is a block level HTML tag.""" | ||||||
|  |     if isinstance(tag, str): | ||||||
|  |         return tag.lower().rstrip('/') in BLOCK_LEVEL_ELEMENTS | ||||||
|  |     # Some ElementTree tags are not strings, so return False. | ||||||
|  |     return False | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def parseBoolValue(value, fail_on_errors=True, preserve_none=False): | ||||||
|  |     """Parses a string representing bool value. If parsing was successful, | ||||||
|  |        returns True or False. If preserve_none=True, returns True, False, | ||||||
|  |        or None. If parsing was not successful, raises  ValueError, or, if | ||||||
|  |        fail_on_errors=False, returns None.""" | ||||||
|  |     if not isinstance(value, str): | ||||||
|  |         if preserve_none and value is None: | ||||||
|  |             return value | ||||||
|  |         return bool(value) | ||||||
|  |     elif preserve_none and value.lower() == 'none': | ||||||
|  |         return None | ||||||
|  |     elif value.lower() in ('true', 'yes', 'y', 'on', '1'): | ||||||
|  |         return True | ||||||
|  |     elif value.lower() in ('false', 'no', 'n', 'off', '0', 'none'): | ||||||
|  |         return False | ||||||
|  |     elif fail_on_errors: | ||||||
|  |         raise ValueError('Cannot parse bool value: %r' % value) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def code_escape(text): | ||||||
|  |     """Escape code.""" | ||||||
|  |     if "&" in text: | ||||||
|  |         text = text.replace("&", "&") | ||||||
|  |     if "<" in text: | ||||||
|  |         text = text.replace("<", "<") | ||||||
|  |     if ">" in text: | ||||||
|  |         text = text.replace(">", ">") | ||||||
|  |     return text | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def _get_stack_depth(size=2): | ||||||
|  |     """Get current stack depth, performantly. | ||||||
|  |     """ | ||||||
|  |     frame = sys._getframe(size) | ||||||
|  |  | ||||||
|  |     for size in count(size): | ||||||
|  |         frame = frame.f_back | ||||||
|  |         if not frame: | ||||||
|  |             return size | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def nearing_recursion_limit(): | ||||||
|  |     """Return true if current stack depth is within 100 of maximum limit.""" | ||||||
|  |     return sys.getrecursionlimit() - _get_stack_depth() < 100 | ||||||
|  |  | ||||||
|  |  | ||||||
|  | """ | ||||||
|  | MISC AUXILIARY CLASSES | ||||||
|  | ============================================================================= | ||||||
|  | """ | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class AtomicString(str): | ||||||
|  |     """A string which should not be further processed.""" | ||||||
|  |     pass | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class Processor: | ||||||
|  |     def __init__(self, md=None): | ||||||
|  |         self.md = md | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     @deprecated("Use 'md' instead.") | ||||||
|  |     def markdown(self): | ||||||
|  |         # TODO: remove this later | ||||||
|  |         return self.md | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class HtmlStash: | ||||||
|  |     """ | ||||||
|  |     This class is used for stashing HTML objects that we extract | ||||||
|  |     in the beginning and replace with place-holders. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self): | ||||||
|  |         """ Create a HtmlStash. """ | ||||||
|  |         self.html_counter = 0  # for counting inline html segments | ||||||
|  |         self.rawHtmlBlocks = [] | ||||||
|  |         self.tag_counter = 0 | ||||||
|  |         self.tag_data = []  # list of dictionaries in the order tags appear | ||||||
|  |  | ||||||
|  |     def store(self, html): | ||||||
|  |         """ | ||||||
|  |         Saves an HTML segment for later reinsertion.  Returns a | ||||||
|  |         placeholder string that needs to be inserted into the | ||||||
|  |         document. | ||||||
|  |  | ||||||
|  |         Keyword arguments: | ||||||
|  |  | ||||||
|  |         * html: an html segment | ||||||
|  |  | ||||||
|  |         Returns : a placeholder string | ||||||
|  |  | ||||||
|  |         """ | ||||||
|  |         self.rawHtmlBlocks.append(html) | ||||||
|  |         placeholder = self.get_placeholder(self.html_counter) | ||||||
|  |         self.html_counter += 1 | ||||||
|  |         return placeholder | ||||||
|  |  | ||||||
|  |     def reset(self): | ||||||
|  |         self.html_counter = 0 | ||||||
|  |         self.rawHtmlBlocks = [] | ||||||
|  |  | ||||||
|  |     def get_placeholder(self, key): | ||||||
|  |         return HTML_PLACEHOLDER % key | ||||||
|  |  | ||||||
|  |     def store_tag(self, tag, attrs, left_index, right_index): | ||||||
|  |         """Store tag data and return a placeholder.""" | ||||||
|  |         self.tag_data.append({'tag': tag, 'attrs': attrs, | ||||||
|  |                               'left_index': left_index, | ||||||
|  |                               'right_index': right_index}) | ||||||
|  |         placeholder = TAG_PLACEHOLDER % str(self.tag_counter) | ||||||
|  |         self.tag_counter += 1  # equal to the tag's index in self.tag_data | ||||||
|  |         return placeholder | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # Used internally by `Registry` for each item in its sorted list. | ||||||
|  | # Provides an easier to read API when editing the code later. | ||||||
|  | # For example, `item.name` is more clear than `item[0]`. | ||||||
|  | _PriorityItem = namedtuple('PriorityItem', ['name', 'priority']) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class Registry: | ||||||
|  |     """ | ||||||
|  |     A priority sorted registry. | ||||||
|  |  | ||||||
|  |     A `Registry` instance provides two public methods to alter the data of the | ||||||
|  |     registry: `register` and `deregister`. Use `register` to add items and | ||||||
|  |     `deregister` to remove items. See each method for specifics. | ||||||
|  |  | ||||||
|  |     When registering an item, a "name" and a "priority" must be provided. All | ||||||
|  |     items are automatically sorted by "priority" from highest to lowest. The | ||||||
|  |     "name" is used to remove ("deregister") and get items. | ||||||
|  |  | ||||||
|  |     A `Registry` instance it like a list (which maintains order) when reading | ||||||
|  |     data. You may iterate over the items, get an item and get a count (length) | ||||||
|  |     of all items. You may also check that the registry contains an item. | ||||||
|  |  | ||||||
|  |     When getting an item you may use either the index of the item or the | ||||||
|  |     string-based "name". For example: | ||||||
|  |  | ||||||
|  |         registry = Registry() | ||||||
|  |         registry.register(SomeItem(), 'itemname', 20) | ||||||
|  |         # Get the item by index | ||||||
|  |         item = registry[0] | ||||||
|  |         # Get the item by name | ||||||
|  |         item = registry['itemname'] | ||||||
|  |  | ||||||
|  |     When checking that the registry contains an item, you may use either the | ||||||
|  |     string-based "name", or a reference to the actual item. For example: | ||||||
|  |  | ||||||
|  |         someitem = SomeItem() | ||||||
|  |         registry.register(someitem, 'itemname', 20) | ||||||
|  |         # Contains the name | ||||||
|  |         assert 'itemname' in registry | ||||||
|  |         # Contains the item instance | ||||||
|  |         assert someitem in registry | ||||||
|  |  | ||||||
|  |     The method `get_index_for_name` is also available to obtain the index of | ||||||
|  |     an item using that item's assigned "name". | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def __init__(self): | ||||||
|  |         self._data = {} | ||||||
|  |         self._priority = [] | ||||||
|  |         self._is_sorted = False | ||||||
|  |  | ||||||
|  |     def __contains__(self, item): | ||||||
|  |         if isinstance(item, str): | ||||||
|  |             # Check if an item exists by this name. | ||||||
|  |             return item in self._data.keys() | ||||||
|  |         # Check if this instance exists. | ||||||
|  |         return item in self._data.values() | ||||||
|  |  | ||||||
|  |     def __iter__(self): | ||||||
|  |         self._sort() | ||||||
|  |         return iter([self._data[k] for k, p in self._priority]) | ||||||
|  |  | ||||||
|  |     def __getitem__(self, key): | ||||||
|  |         self._sort() | ||||||
|  |         if isinstance(key, slice): | ||||||
|  |             data = Registry() | ||||||
|  |             for k, p in self._priority[key]: | ||||||
|  |                 data.register(self._data[k], k, p) | ||||||
|  |             return data | ||||||
|  |         if isinstance(key, int): | ||||||
|  |             return self._data[self._priority[key].name] | ||||||
|  |         return self._data[key] | ||||||
|  |  | ||||||
|  |     def __len__(self): | ||||||
|  |         return len(self._priority) | ||||||
|  |  | ||||||
|  |     def __repr__(self): | ||||||
|  |         return '<{}({})>'.format(self.__class__.__name__, list(self)) | ||||||
|  |  | ||||||
|  |     def get_index_for_name(self, name): | ||||||
|  |         """ | ||||||
|  |         Return the index of the given name. | ||||||
|  |         """ | ||||||
|  |         if name in self: | ||||||
|  |             self._sort() | ||||||
|  |             return self._priority.index( | ||||||
|  |                 [x for x in self._priority if x.name == name][0] | ||||||
|  |             ) | ||||||
|  |         raise ValueError('No item named "{}" exists.'.format(name)) | ||||||
|  |  | ||||||
|  |     def register(self, item, name, priority): | ||||||
|  |         """ | ||||||
|  |         Add an item to the registry with the given name and priority. | ||||||
|  |  | ||||||
|  |         Parameters: | ||||||
|  |  | ||||||
|  |         * `item`: The item being registered. | ||||||
|  |         * `name`: A string used to reference the item. | ||||||
|  |         * `priority`: An integer or float used to sort against all items. | ||||||
|  |  | ||||||
|  |         If an item is registered with a "name" which already exists, the | ||||||
|  |         existing item is replaced with the new item. Treat carefully as the | ||||||
|  |         old item is lost with no way to recover it. The new item will be | ||||||
|  |         sorted according to its priority and will **not** retain the position | ||||||
|  |         of the old item. | ||||||
|  |         """ | ||||||
|  |         if name in self: | ||||||
|  |             # Remove existing item of same name first | ||||||
|  |             self.deregister(name) | ||||||
|  |         self._is_sorted = False | ||||||
|  |         self._data[name] = item | ||||||
|  |         self._priority.append(_PriorityItem(name, priority)) | ||||||
|  |  | ||||||
|  |     def deregister(self, name, strict=True): | ||||||
|  |         """ | ||||||
|  |         Remove an item from the registry. | ||||||
|  |  | ||||||
|  |         Set `strict=False` to fail silently. | ||||||
|  |         """ | ||||||
|  |         try: | ||||||
|  |             index = self.get_index_for_name(name) | ||||||
|  |             del self._priority[index] | ||||||
|  |             del self._data[name] | ||||||
|  |         except ValueError: | ||||||
|  |             if strict: | ||||||
|  |                 raise | ||||||
|  |  | ||||||
|  |     def _sort(self): | ||||||
|  |         """ | ||||||
|  |         Sort the registry by priority from highest to lowest. | ||||||
|  |  | ||||||
|  |         This method is called internally and should never be explicitly called. | ||||||
|  |         """ | ||||||
|  |         if not self._is_sorted: | ||||||
|  |             self._priority.sort(key=lambda item: item.priority, reverse=True) | ||||||
|  |             self._is_sorted = True | ||||||
|  |  | ||||||
|  |     # Deprecated Methods which provide a smooth transition from OrderedDict | ||||||
|  |  | ||||||
|  |     def __setitem__(self, key, value): | ||||||
|  |         """ Register item with priority 5 less than lowest existing priority. """ | ||||||
|  |         if isinstance(key, str): | ||||||
|  |             warnings.warn( | ||||||
|  |                 'Using setitem to register a processor or pattern is deprecated. ' | ||||||
|  |                 'Use the `register` method instead.', | ||||||
|  |                 DeprecationWarning, | ||||||
|  |                 stacklevel=2, | ||||||
|  |             ) | ||||||
|  |             if key in self: | ||||||
|  |                 # Key already exists, replace without altering priority | ||||||
|  |                 self._data[key] = value | ||||||
|  |                 return | ||||||
|  |             if len(self) == 0: | ||||||
|  |                 # This is the first item. Set priority to 50. | ||||||
|  |                 priority = 50 | ||||||
|  |             else: | ||||||
|  |                 self._sort() | ||||||
|  |                 priority = self._priority[-1].priority - 5 | ||||||
|  |             self.register(value, key, priority) | ||||||
|  |         else: | ||||||
|  |             raise TypeError | ||||||
|  |  | ||||||
|  |     def __delitem__(self, key): | ||||||
|  |         """ Deregister an item by name. """ | ||||||
|  |         if key in self: | ||||||
|  |             self.deregister(key) | ||||||
|  |             warnings.warn( | ||||||
|  |                 'Using del to remove a processor or pattern is deprecated. ' | ||||||
|  |                 'Use the `deregister` method instead.', | ||||||
|  |                 DeprecationWarning, | ||||||
|  |                 stacklevel=2, | ||||||
|  |             ) | ||||||
|  |         else: | ||||||
|  |             raise KeyError('Cannot delete key {}, not registered.'.format(key)) | ||||||
|  |  | ||||||
|  |     def add(self, key, value, location): | ||||||
|  |         """ Register a key by location. """ | ||||||
|  |         if len(self) == 0: | ||||||
|  |             # This is the first item. Set priority to 50. | ||||||
|  |             priority = 50 | ||||||
|  |         elif location == '_begin': | ||||||
|  |             self._sort() | ||||||
|  |             # Set priority 5 greater than highest existing priority | ||||||
|  |             priority = self._priority[0].priority + 5 | ||||||
|  |         elif location == '_end': | ||||||
|  |             self._sort() | ||||||
|  |             # Set priority 5 less than lowest existing priority | ||||||
|  |             priority = self._priority[-1].priority - 5 | ||||||
|  |         elif location.startswith('<') or location.startswith('>'): | ||||||
|  |             # Set priority halfway between existing priorities. | ||||||
|  |             i = self.get_index_for_name(location[1:]) | ||||||
|  |             if location.startswith('<'): | ||||||
|  |                 after = self._priority[i].priority | ||||||
|  |                 if i > 0: | ||||||
|  |                     before = self._priority[i-1].priority | ||||||
|  |                 else: | ||||||
|  |                     # Location is first item` | ||||||
|  |                     before = after + 10 | ||||||
|  |             else: | ||||||
|  |                 # location.startswith('>') | ||||||
|  |                 before = self._priority[i].priority | ||||||
|  |                 if i < len(self) - 1: | ||||||
|  |                     after = self._priority[i+1].priority | ||||||
|  |                 else: | ||||||
|  |                     # location is last item | ||||||
|  |                     after = before - 10 | ||||||
|  |             priority = before - ((before - after) / 2) | ||||||
|  |         else: | ||||||
|  |             raise ValueError('Not a valid location: "%s". Location key ' | ||||||
|  |                              'must start with a ">" or "<".' % location) | ||||||
|  |         self.register(value, key, priority) | ||||||
|  |         warnings.warn( | ||||||
|  |             'Using the add method to register a processor or pattern is deprecated. ' | ||||||
|  |             'Use the `register` method instead.', | ||||||
|  |             DeprecationWarning, | ||||||
|  |             stacklevel=2, | ||||||
|  |         ) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def __getattr__(name): | ||||||
|  |     """Get attribute.""" | ||||||
|  |  | ||||||
|  |     deprecated = __deprecated__.get(name) | ||||||
|  |     if deprecated: | ||||||
|  |         warnings.warn( | ||||||
|  |             "'{}' is deprecated. Use '{}' instead.".format(name, deprecated[0]), | ||||||
|  |             category=DeprecationWarning, | ||||||
|  |             stacklevel=(3 if PY37 else 4) | ||||||
|  |         ) | ||||||
|  |         return deprecated[1] | ||||||
|  |     raise AttributeError("module '{}' has no attribute '{}'".format(__name__, name)) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | if not PY37: | ||||||
|  |     Pep562(__name__) | ||||||
							
								
								
									
										334
									
								
								Source/Libs/zipp.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										334
									
								
								Source/Libs/zipp.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,334 @@ | |||||||
|  | """ | ||||||
|  | Copyright Jason R. Coombs | ||||||
|  |  | ||||||
|  | Permission is hereby granted, free of charge, to any person obtaining a copy | ||||||
|  | of this software and associated documentation files (the "Software"), to | ||||||
|  | deal in the Software without restriction, including without limitation the | ||||||
|  | rights to use, copy, modify, merge, publish, distribute, sublicense, and/or | ||||||
|  | sell copies of the Software, and to permit persons to whom the Software is | ||||||
|  | furnished to do so, subject to the following conditions: | ||||||
|  |  | ||||||
|  | The above copyright notice and this permission notice shall be included in | ||||||
|  | all copies or substantial portions of the Software. | ||||||
|  |  | ||||||
|  | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||||
|  | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||||
|  | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||||||
|  | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||||
|  | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||||||
|  | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||||||
|  | IN THE SOFTWARE. | ||||||
|  | """ | ||||||
|  |  | ||||||
|  | import io | ||||||
|  | import posixpath | ||||||
|  | import zipfile | ||||||
|  | import itertools | ||||||
|  | import contextlib | ||||||
|  | import pathlib | ||||||
|  |  | ||||||
|  |  | ||||||
|  | __all__ = ['Path'] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def _parents(path): | ||||||
|  |     """ | ||||||
|  |     Given a path with elements separated by | ||||||
|  |     posixpath.sep, generate all parents of that path. | ||||||
|  |  | ||||||
|  |     >>> list(_parents('b/d')) | ||||||
|  |     ['b'] | ||||||
|  |     >>> list(_parents('/b/d/')) | ||||||
|  |     ['/b'] | ||||||
|  |     >>> list(_parents('b/d/f/')) | ||||||
|  |     ['b/d', 'b'] | ||||||
|  |     >>> list(_parents('b')) | ||||||
|  |     [] | ||||||
|  |     >>> list(_parents('')) | ||||||
|  |     [] | ||||||
|  |     """ | ||||||
|  |     return itertools.islice(_ancestry(path), 1, None) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def _ancestry(path): | ||||||
|  |     """ | ||||||
|  |     Given a path with elements separated by | ||||||
|  |     posixpath.sep, generate all elements of that path | ||||||
|  |  | ||||||
|  |     >>> list(_ancestry('b/d')) | ||||||
|  |     ['b/d', 'b'] | ||||||
|  |     >>> list(_ancestry('/b/d/')) | ||||||
|  |     ['/b/d', '/b'] | ||||||
|  |     >>> list(_ancestry('b/d/f/')) | ||||||
|  |     ['b/d/f', 'b/d', 'b'] | ||||||
|  |     >>> list(_ancestry('b')) | ||||||
|  |     ['b'] | ||||||
|  |     >>> list(_ancestry('')) | ||||||
|  |     [] | ||||||
|  |     """ | ||||||
|  |     path = path.rstrip(posixpath.sep) | ||||||
|  |     while path and path != posixpath.sep: | ||||||
|  |         yield path | ||||||
|  |         path, tail = posixpath.split(path) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | _dedupe = dict.fromkeys | ||||||
|  | """Deduplicate an iterable in original order""" | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def _difference(minuend, subtrahend): | ||||||
|  |     """ | ||||||
|  |     Return items in minuend not in subtrahend, retaining order | ||||||
|  |     with O(1) lookup. | ||||||
|  |     """ | ||||||
|  |     return itertools.filterfalse(set(subtrahend).__contains__, minuend) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class CompleteDirs(zipfile.ZipFile): | ||||||
|  |     """ | ||||||
|  |     A ZipFile subclass that ensures that implied directories | ||||||
|  |     are always included in the namelist. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     @staticmethod | ||||||
|  |     def _implied_dirs(names): | ||||||
|  |         parents = itertools.chain.from_iterable(map(_parents, names)) | ||||||
|  |         as_dirs = (p + posixpath.sep for p in parents) | ||||||
|  |         return _dedupe(_difference(as_dirs, names)) | ||||||
|  |  | ||||||
|  |     def namelist(self): | ||||||
|  |         names = super(CompleteDirs, self).namelist() | ||||||
|  |         return names + list(self._implied_dirs(names)) | ||||||
|  |  | ||||||
|  |     def _name_set(self): | ||||||
|  |         return set(self.namelist()) | ||||||
|  |  | ||||||
|  |     def resolve_dir(self, name): | ||||||
|  |         """ | ||||||
|  |         If the name represents a directory, return that name | ||||||
|  |         as a directory (with the trailing slash). | ||||||
|  |         """ | ||||||
|  |         names = self._name_set() | ||||||
|  |         dirname = name + '/' | ||||||
|  |         dir_match = name not in names and dirname in names | ||||||
|  |         return dirname if dir_match else name | ||||||
|  |  | ||||||
|  |     @classmethod | ||||||
|  |     def make(cls, source): | ||||||
|  |         """ | ||||||
|  |         Given a source (filename or zipfile), return an | ||||||
|  |         appropriate CompleteDirs subclass. | ||||||
|  |         """ | ||||||
|  |         if isinstance(source, CompleteDirs): | ||||||
|  |             return source | ||||||
|  |  | ||||||
|  |         if not isinstance(source, zipfile.ZipFile): | ||||||
|  |             return cls(source) | ||||||
|  |  | ||||||
|  |         # Only allow for FastLookup when supplied zipfile is read-only | ||||||
|  |         if 'r' not in source.mode: | ||||||
|  |             cls = CompleteDirs | ||||||
|  |  | ||||||
|  |         source.__class__ = cls | ||||||
|  |         return source | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class FastLookup(CompleteDirs): | ||||||
|  |     """ | ||||||
|  |     ZipFile subclass to ensure implicit | ||||||
|  |     dirs exist and are resolved rapidly. | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     def namelist(self): | ||||||
|  |         with contextlib.suppress(AttributeError): | ||||||
|  |             return self.__names | ||||||
|  |         self.__names = super(FastLookup, self).namelist() | ||||||
|  |         return self.__names | ||||||
|  |  | ||||||
|  |     def _name_set(self): | ||||||
|  |         with contextlib.suppress(AttributeError): | ||||||
|  |             return self.__lookup | ||||||
|  |         self.__lookup = super(FastLookup, self)._name_set() | ||||||
|  |         return self.__lookup | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class Path: | ||||||
|  |     """ | ||||||
|  |     A pathlib-compatible interface for zip files. | ||||||
|  |  | ||||||
|  |     Consider a zip file with this structure:: | ||||||
|  |  | ||||||
|  |         . | ||||||
|  |         ├── a.txt | ||||||
|  |         └── b | ||||||
|  |             ├── c.txt | ||||||
|  |             └── d | ||||||
|  |                 └── e.txt | ||||||
|  |  | ||||||
|  |     >>> data = io.BytesIO() | ||||||
|  |     >>> zf = zipfile.ZipFile(data, 'w') | ||||||
|  |     >>> zf.writestr('a.txt', 'content of a') | ||||||
|  |     >>> zf.writestr('b/c.txt', 'content of c') | ||||||
|  |     >>> zf.writestr('b/d/e.txt', 'content of e') | ||||||
|  |     >>> zf.filename = 'mem/abcde.zip' | ||||||
|  |  | ||||||
|  |     Path accepts the zipfile object itself or a filename | ||||||
|  |  | ||||||
|  |     >>> root = Path(zf) | ||||||
|  |  | ||||||
|  |     From there, several path operations are available. | ||||||
|  |  | ||||||
|  |     Directory iteration (including the zip file itself): | ||||||
|  |  | ||||||
|  |     >>> a, b = root.iterdir() | ||||||
|  |     >>> a | ||||||
|  |     Path('mem/abcde.zip', 'a.txt') | ||||||
|  |     >>> b | ||||||
|  |     Path('mem/abcde.zip', 'b/') | ||||||
|  |  | ||||||
|  |     name property: | ||||||
|  |  | ||||||
|  |     >>> b.name | ||||||
|  |     'b' | ||||||
|  |  | ||||||
|  |     join with divide operator: | ||||||
|  |  | ||||||
|  |     >>> c = b / 'c.txt' | ||||||
|  |     >>> c | ||||||
|  |     Path('mem/abcde.zip', 'b/c.txt') | ||||||
|  |     >>> c.name | ||||||
|  |     'c.txt' | ||||||
|  |  | ||||||
|  |     Read text: | ||||||
|  |  | ||||||
|  |     >>> c.read_text() | ||||||
|  |     'content of c' | ||||||
|  |  | ||||||
|  |     existence: | ||||||
|  |  | ||||||
|  |     >>> c.exists() | ||||||
|  |     True | ||||||
|  |     >>> (b / 'missing.txt').exists() | ||||||
|  |     False | ||||||
|  |  | ||||||
|  |     Coercion to string: | ||||||
|  |  | ||||||
|  |     >>> import os | ||||||
|  |     >>> str(c).replace(os.sep, posixpath.sep) | ||||||
|  |     'mem/abcde.zip/b/c.txt' | ||||||
|  |  | ||||||
|  |     At the root, ``name``, ``filename``, and ``parent`` | ||||||
|  |     resolve to the zipfile. Note these attributes are not | ||||||
|  |     valid and will raise a ``ValueError`` if the zipfile | ||||||
|  |     has no filename. | ||||||
|  |  | ||||||
|  |     >>> root.name | ||||||
|  |     'abcde.zip' | ||||||
|  |     >>> str(root.filename).replace(os.sep, posixpath.sep) | ||||||
|  |     'mem/abcde.zip' | ||||||
|  |     >>> str(root.parent) | ||||||
|  |     'mem' | ||||||
|  |     """ | ||||||
|  |  | ||||||
|  |     __repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})" | ||||||
|  |  | ||||||
|  |     def __init__(self, root, at=""): | ||||||
|  |         """ | ||||||
|  |         Construct a Path from a ZipFile or filename. | ||||||
|  |  | ||||||
|  |         Note: When the source is an existing ZipFile object, | ||||||
|  |         its type (__class__) will be mutated to a | ||||||
|  |         specialized type. If the caller wishes to retain the | ||||||
|  |         original type, the caller should either create a | ||||||
|  |         separate ZipFile object or pass a filename. | ||||||
|  |         """ | ||||||
|  |         self.root = FastLookup.make(root) | ||||||
|  |         self.at = at | ||||||
|  |  | ||||||
|  |     def open(self, mode='r', *args, pwd=None, **kwargs): | ||||||
|  |         """ | ||||||
|  |         Open this entry as text or binary following the semantics | ||||||
|  |         of ``pathlib.Path.open()`` by passing arguments through | ||||||
|  |         to io.TextIOWrapper(). | ||||||
|  |         """ | ||||||
|  |         if self.is_dir(): | ||||||
|  |             raise IsADirectoryError(self) | ||||||
|  |         zip_mode = mode[0] | ||||||
|  |         if not self.exists() and zip_mode == 'r': | ||||||
|  |             raise FileNotFoundError(self) | ||||||
|  |         stream = self.root.open(self.at, zip_mode, pwd=pwd) | ||||||
|  |         if 'b' in mode: | ||||||
|  |             if args or kwargs: | ||||||
|  |                 raise ValueError("encoding args invalid for binary operation") | ||||||
|  |             return stream | ||||||
|  |         return io.TextIOWrapper(stream, *args, **kwargs) | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def name(self): | ||||||
|  |         return pathlib.Path(self.at).name or self.filename.name | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def suffix(self): | ||||||
|  |         return pathlib.Path(self.at).suffix or self.filename.suffix | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def suffixes(self): | ||||||
|  |         return pathlib.Path(self.at).suffixes or self.filename.suffixes | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def stem(self): | ||||||
|  |         return pathlib.Path(self.at).stem or self.filename.stem | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def filename(self): | ||||||
|  |         return pathlib.Path(self.root.filename).joinpath(self.at) | ||||||
|  |  | ||||||
|  |     def read_text(self, *args, **kwargs): | ||||||
|  |         with self.open('r', *args, **kwargs) as strm: | ||||||
|  |             return strm.read() | ||||||
|  |  | ||||||
|  |     def read_bytes(self): | ||||||
|  |         with self.open('rb') as strm: | ||||||
|  |             return strm.read() | ||||||
|  |  | ||||||
|  |     def _is_child(self, path): | ||||||
|  |         return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/") | ||||||
|  |  | ||||||
|  |     def _next(self, at): | ||||||
|  |         return self.__class__(self.root, at) | ||||||
|  |  | ||||||
|  |     def is_dir(self): | ||||||
|  |         return not self.at or self.at.endswith("/") | ||||||
|  |  | ||||||
|  |     def is_file(self): | ||||||
|  |         return self.exists() and not self.is_dir() | ||||||
|  |  | ||||||
|  |     def exists(self): | ||||||
|  |         return self.at in self.root._name_set() | ||||||
|  |  | ||||||
|  |     def iterdir(self): | ||||||
|  |         if not self.is_dir(): | ||||||
|  |             raise ValueError("Can't listdir a file") | ||||||
|  |         subs = map(self._next, self.root.namelist()) | ||||||
|  |         return filter(self._is_child, subs) | ||||||
|  |  | ||||||
|  |     def __str__(self): | ||||||
|  |         return posixpath.join(self.root.filename, self.at) | ||||||
|  |  | ||||||
|  |     def __repr__(self): | ||||||
|  |         return self.__repr.format(self=self) | ||||||
|  |  | ||||||
|  |     def joinpath(self, *other): | ||||||
|  |         next = posixpath.join(self.at, *other) | ||||||
|  |         return self._next(self.root.resolve_dir(next)) | ||||||
|  |  | ||||||
|  |     __truediv__ = joinpath | ||||||
|  |  | ||||||
|  |     @property | ||||||
|  |     def parent(self): | ||||||
|  |         if not self.at: | ||||||
|  |             return self.filename.parent | ||||||
|  |         parent_at = posixpath.dirname(self.at.rstrip('/')) | ||||||
|  |         if parent_at: | ||||||
|  |             parent_at += '/' | ||||||
|  |         return self._next(parent_at) | ||||||
							
								
								
									
										1
									
								
								Source/node_modules/.bin/acorn
									
									
									
										generated
									
									
										vendored
									
									
										Symbolic link
									
								
							
							
						
						
									
										1
									
								
								Source/node_modules/.bin/acorn
									
									
									
										generated
									
									
										vendored
									
									
										Symbolic link
									
								
							| @@ -0,0 +1 @@ | |||||||
|  | ../acorn/bin/acorn | ||||||
							
								
								
									
										1
									
								
								Source/node_modules/.bin/babylon
									
									
									
										generated
									
									
										vendored
									
									
										Symbolic link
									
								
							
							
						
						
									
										1
									
								
								Source/node_modules/.bin/babylon
									
									
									
										generated
									
									
										vendored
									
									
										Symbolic link
									
								
							| @@ -0,0 +1 @@ | |||||||
|  | ../babylon/bin/babylon.js | ||||||
							
								
								
									
										1
									
								
								Source/node_modules/.bin/mkdirp
									
									
									
										generated
									
									
										vendored
									
									
										Symbolic link
									
								
							
							
						
						
									
										1
									
								
								Source/node_modules/.bin/mkdirp
									
									
									
										generated
									
									
										vendored
									
									
										Symbolic link
									
								
							| @@ -0,0 +1 @@ | |||||||
|  | ../mkdirp/bin/cmd.js | ||||||
							
								
								
									
										1
									
								
								Source/node_modules/.bin/pug
									
									
									
										generated
									
									
										vendored
									
									
										Symbolic link
									
								
							
							
						
						
									
										1
									
								
								Source/node_modules/.bin/pug
									
									
									
										generated
									
									
										vendored
									
									
										Symbolic link
									
								
							| @@ -0,0 +1 @@ | |||||||
|  | ../pug-cli/index.js | ||||||
							
								
								
									
										1
									
								
								Source/node_modules/.bin/resolve
									
									
									
										generated
									
									
										vendored
									
									
										Symbolic link
									
								
							
							
						
						
									
										1
									
								
								Source/node_modules/.bin/resolve
									
									
									
										generated
									
									
										vendored
									
									
										Symbolic link
									
								
							| @@ -0,0 +1 @@ | |||||||
|  | ../resolve/bin/resolve | ||||||
							
								
								
									
										1
									
								
								Source/node_modules/.bin/uglifyjs
									
									
									
										generated
									
									
										vendored
									
									
										Symbolic link
									
								
							
							
						
						
									
										1
									
								
								Source/node_modules/.bin/uglifyjs
									
									
									
										generated
									
									
										vendored
									
									
										Symbolic link
									
								
							| @@ -0,0 +1 @@ | |||||||
|  | ../uglify-js/bin/uglifyjs | ||||||
							
								
								
									
										729
									
								
								Source/node_modules/.package-lock.json
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										729
									
								
								Source/node_modules/.package-lock.json
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,729 @@ | |||||||
|  | { | ||||||
|  |   "name": "Public", | ||||||
|  |   "lockfileVersion": 2, | ||||||
|  |   "requires": true, | ||||||
|  |   "packages": { | ||||||
|  |     "node_modules/@types/babel-types": { | ||||||
|  |       "version": "7.0.11", | ||||||
|  |       "resolved": "https://registry.npmjs.org/@types/babel-types/-/babel-types-7.0.11.tgz", | ||||||
|  |       "integrity": "sha512-pkPtJUUY+Vwv6B1inAz55rQvivClHJxc9aVEPPmaq2cbyeMLCiDpbKpcKyX4LAwpNGi+SHBv0tHv6+0gXv0P2A==" | ||||||
|  |     }, | ||||||
|  |     "node_modules/@types/babylon": { | ||||||
|  |       "version": "6.16.6", | ||||||
|  |       "resolved": "https://registry.npmjs.org/@types/babylon/-/babylon-6.16.6.tgz", | ||||||
|  |       "integrity": "sha512-G4yqdVlhr6YhzLXFKy5F7HtRBU8Y23+iWy7UKthMq/OSQnL1hbsoeXESQ2LY8zEDlknipDG3nRGhUC9tkwvy/w==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "@types/babel-types": "*" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/acorn": { | ||||||
|  |       "version": "4.0.13", | ||||||
|  |       "resolved": "https://registry.npmjs.org/acorn/-/acorn-4.0.13.tgz", | ||||||
|  |       "integrity": "sha512-fu2ygVGuMmlzG8ZeRJ0bvR41nsAkxxhbyk8bZ1SS521Z7vmgJFTQQlfz/Mp/nJexGBz+v8sC9bM6+lNgskt4Ug==", | ||||||
|  |       "bin": { | ||||||
|  |         "acorn": "bin/acorn" | ||||||
|  |       }, | ||||||
|  |       "engines": { | ||||||
|  |         "node": ">=0.4.0" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/acorn-globals": { | ||||||
|  |       "version": "3.1.0", | ||||||
|  |       "resolved": "https://registry.npmjs.org/acorn-globals/-/acorn-globals-3.1.0.tgz", | ||||||
|  |       "integrity": "sha512-uWttZCk96+7itPxK8xCzY86PnxKTMrReKDqrHzv42VQY0K30PUO8WY13WMOuI+cOdX4EIdzdvQ8k6jkuGRFMYw==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "acorn": "^4.0.4" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/align-text": { | ||||||
|  |       "version": "0.1.4", | ||||||
|  |       "resolved": "https://registry.npmjs.org/align-text/-/align-text-0.1.4.tgz", | ||||||
|  |       "integrity": "sha512-GrTZLRpmp6wIC2ztrWW9MjjTgSKccffgFagbNDOX95/dcjEcYZibYTeaOntySQLcdw1ztBoFkviiUvTMbb9MYg==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "kind-of": "^3.0.2", | ||||||
|  |         "longest": "^1.0.1", | ||||||
|  |         "repeat-string": "^1.5.2" | ||||||
|  |       }, | ||||||
|  |       "engines": { | ||||||
|  |         "node": ">=0.10.0" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/ansi-regex": { | ||||||
|  |       "version": "2.1.1", | ||||||
|  |       "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", | ||||||
|  |       "integrity": "sha512-TIGnTpdo+E3+pCyAluZvtED5p5wCqLdezCyhPZzKPcxvFplEt4i+W7OONCKgeZFT3+y5NZZfOOS/Bdcanm1MYA==", | ||||||
|  |       "engines": { | ||||||
|  |         "node": ">=0.10.0" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/ansi-styles": { | ||||||
|  |       "version": "2.2.1", | ||||||
|  |       "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz", | ||||||
|  |       "integrity": "sha512-kmCevFghRiWM7HB5zTPULl4r9bVFSWjz62MhqizDGUrq2NWuNMQyuv4tHHoKJHs69M/MF64lEcHdYIocrdWQYA==", | ||||||
|  |       "engines": { | ||||||
|  |         "node": ">=0.10.0" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/asap": { | ||||||
|  |       "version": "2.0.6", | ||||||
|  |       "resolved": "https://registry.npmjs.org/asap/-/asap-2.0.6.tgz", | ||||||
|  |       "integrity": "sha512-BSHWgDSAiKs50o2Re8ppvp3seVHXSRM44cdSsT9FfNEUUZLOGWVCsiWaRPWM1Znn+mqZ1OfVZ3z3DWEzSp7hRA==" | ||||||
|  |     }, | ||||||
|  |     "node_modules/babel-runtime": { | ||||||
|  |       "version": "6.26.0", | ||||||
|  |       "resolved": "https://registry.npmjs.org/babel-runtime/-/babel-runtime-6.26.0.tgz", | ||||||
|  |       "integrity": "sha512-ITKNuq2wKlW1fJg9sSW52eepoYgZBggvOAHC0u/CYu/qxQ9EVzThCgR69BnSXLHjy2f7SY5zaQ4yt7H9ZVxY2g==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "core-js": "^2.4.0", | ||||||
|  |         "regenerator-runtime": "^0.11.0" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/babel-types": { | ||||||
|  |       "version": "6.26.0", | ||||||
|  |       "resolved": "https://registry.npmjs.org/babel-types/-/babel-types-6.26.0.tgz", | ||||||
|  |       "integrity": "sha512-zhe3V/26rCWsEZK8kZN+HaQj5yQ1CilTObixFzKW1UWjqG7618Twz6YEsCnjfg5gBcJh02DrpCkS9h98ZqDY+g==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "babel-runtime": "^6.26.0", | ||||||
|  |         "esutils": "^2.0.2", | ||||||
|  |         "lodash": "^4.17.4", | ||||||
|  |         "to-fast-properties": "^1.0.3" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/babylon": { | ||||||
|  |       "version": "6.18.0", | ||||||
|  |       "resolved": "https://registry.npmjs.org/babylon/-/babylon-6.18.0.tgz", | ||||||
|  |       "integrity": "sha512-q/UEjfGJ2Cm3oKV71DJz9d25TPnq5rhBVL2Q4fA5wcC3jcrdn7+SssEybFIxwAvvP+YCsCYNKughoF33GxgycQ==", | ||||||
|  |       "bin": { | ||||||
|  |         "babylon": "bin/babylon.js" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/call-bind": { | ||||||
|  |       "version": "1.0.2", | ||||||
|  |       "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", | ||||||
|  |       "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "function-bind": "^1.1.1", | ||||||
|  |         "get-intrinsic": "^1.0.2" | ||||||
|  |       }, | ||||||
|  |       "funding": { | ||||||
|  |         "url": "https://github.com/sponsors/ljharb" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/camelcase": { | ||||||
|  |       "version": "1.2.1", | ||||||
|  |       "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-1.2.1.tgz", | ||||||
|  |       "integrity": "sha512-wzLkDa4K/mzI1OSITC+DUyjgIl/ETNHE9QvYgy6J6Jvqyyz4C0Xfd+lQhb19sX2jMpZV4IssUn0VDVmglV+s4g==", | ||||||
|  |       "engines": { | ||||||
|  |         "node": ">=0.10.0" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/center-align": { | ||||||
|  |       "version": "0.1.3", | ||||||
|  |       "resolved": "https://registry.npmjs.org/center-align/-/center-align-0.1.3.tgz", | ||||||
|  |       "integrity": "sha512-Baz3aNe2gd2LP2qk5U+sDk/m4oSuwSDcBfayTCTBoWpfIGO5XFxPmjILQII4NGiZjD6DoDI6kf7gKaxkf7s3VQ==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "align-text": "^0.1.3", | ||||||
|  |         "lazy-cache": "^1.0.3" | ||||||
|  |       }, | ||||||
|  |       "engines": { | ||||||
|  |         "node": ">=0.10.0" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/chalk": { | ||||||
|  |       "version": "1.1.3", | ||||||
|  |       "resolved": "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz", | ||||||
|  |       "integrity": "sha512-U3lRVLMSlsCfjqYPbLyVv11M9CPW4I728d6TCKMAOJueEeB9/8o+eSsMnxPJD+Q+K909sdESg7C+tIkoH6on1A==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "ansi-styles": "^2.2.1", | ||||||
|  |         "escape-string-regexp": "^1.0.2", | ||||||
|  |         "has-ansi": "^2.0.0", | ||||||
|  |         "strip-ansi": "^3.0.0", | ||||||
|  |         "supports-color": "^2.0.0" | ||||||
|  |       }, | ||||||
|  |       "engines": { | ||||||
|  |         "node": ">=0.10.0" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/character-parser": { | ||||||
|  |       "version": "2.2.0", | ||||||
|  |       "resolved": "https://registry.npmjs.org/character-parser/-/character-parser-2.2.0.tgz", | ||||||
|  |       "integrity": "sha512-+UqJQjFEFaTAs3bNsF2j2kEN1baG/zghZbdqoYEDxGZtJo9LBzl1A+m0D4n3qKx8N2FNv8/Xp6yV9mQmBuptaw==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "is-regex": "^1.0.3" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/clean-css": { | ||||||
|  |       "version": "4.2.4", | ||||||
|  |       "resolved": "https://registry.npmjs.org/clean-css/-/clean-css-4.2.4.tgz", | ||||||
|  |       "integrity": "sha512-EJUDT7nDVFDvaQgAo2G/PJvxmp1o/c6iXLbswsBbUFXi1Nr+AjA2cKmfbKDMjMvzEe75g3P6JkaDDAKk96A85A==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "source-map": "~0.6.0" | ||||||
|  |       }, | ||||||
|  |       "engines": { | ||||||
|  |         "node": ">= 4.0" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/cliui": { | ||||||
|  |       "version": "2.1.0", | ||||||
|  |       "resolved": "https://registry.npmjs.org/cliui/-/cliui-2.1.0.tgz", | ||||||
|  |       "integrity": "sha512-GIOYRizG+TGoc7Wgc1LiOTLare95R3mzKgoln+Q/lE4ceiYH19gUpl0l0Ffq4lJDEf3FxujMe6IBfOCs7pfqNA==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "center-align": "^0.1.1", | ||||||
|  |         "right-align": "^0.1.1", | ||||||
|  |         "wordwrap": "0.0.2" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/commander": { | ||||||
|  |       "version": "2.20.3", | ||||||
|  |       "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", | ||||||
|  |       "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==" | ||||||
|  |     }, | ||||||
|  |     "node_modules/constantinople": { | ||||||
|  |       "version": "3.1.2", | ||||||
|  |       "resolved": "https://registry.npmjs.org/constantinople/-/constantinople-3.1.2.tgz", | ||||||
|  |       "integrity": "sha512-yePcBqEFhLOqSBtwYOGGS1exHo/s1xjekXiinh4itpNQGCu4KA1euPh1fg07N2wMITZXQkBz75Ntdt1ctGZouw==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "@types/babel-types": "^7.0.0", | ||||||
|  |         "@types/babylon": "^6.16.2", | ||||||
|  |         "babel-types": "^6.26.0", | ||||||
|  |         "babylon": "^6.18.0" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/core-js": { | ||||||
|  |       "version": "2.6.12", | ||||||
|  |       "resolved": "https://registry.npmjs.org/core-js/-/core-js-2.6.12.tgz", | ||||||
|  |       "integrity": "sha512-Kb2wC0fvsWfQrgk8HU5lW6U/Lcs8+9aaYcy4ZFc6DDlo4nZ7n70dEgE5rtR0oG6ufKDUnrwfWL1mXR5ljDatrQ==", | ||||||
|  |       "deprecated": "core-js@<3.4 is no longer maintained and not recommended for usage due to the number of issues. Because of the V8 engine whims, feature detection in old core-js versions could cause a slowdown up to 100x even if nothing is polyfilled. Please, upgrade your dependencies to the actual version of core-js.", | ||||||
|  |       "hasInstallScript": true | ||||||
|  |     }, | ||||||
|  |     "node_modules/decamelize": { | ||||||
|  |       "version": "1.2.0", | ||||||
|  |       "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", | ||||||
|  |       "integrity": "sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==", | ||||||
|  |       "engines": { | ||||||
|  |         "node": ">=0.10.0" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/doctypes": { | ||||||
|  |       "version": "1.1.0", | ||||||
|  |       "resolved": "https://registry.npmjs.org/doctypes/-/doctypes-1.1.0.tgz", | ||||||
|  |       "integrity": "sha512-LLBi6pEqS6Do3EKQ3J0NqHWV5hhb78Pi8vvESYwyOy2c31ZEZVdtitdzsQsKb7878PEERhzUk0ftqGhG6Mz+pQ==" | ||||||
|  |     }, | ||||||
|  |     "node_modules/escape-string-regexp": { | ||||||
|  |       "version": "1.0.5", | ||||||
|  |       "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", | ||||||
|  |       "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", | ||||||
|  |       "engines": { | ||||||
|  |         "node": ">=0.8.0" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/esutils": { | ||||||
|  |       "version": "2.0.3", | ||||||
|  |       "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", | ||||||
|  |       "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", | ||||||
|  |       "engines": { | ||||||
|  |         "node": ">=0.10.0" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/function-bind": { | ||||||
|  |       "version": "1.1.1", | ||||||
|  |       "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", | ||||||
|  |       "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" | ||||||
|  |     }, | ||||||
|  |     "node_modules/get-intrinsic": { | ||||||
|  |       "version": "1.1.2", | ||||||
|  |       "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.2.tgz", | ||||||
|  |       "integrity": "sha512-Jfm3OyCxHh9DJyc28qGk+JmfkpO41A4XkneDSujN9MDXrm4oDKdHvndhZ2dN94+ERNfkYJWDclW6k2L/ZGHjXA==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "function-bind": "^1.1.1", | ||||||
|  |         "has": "^1.0.3", | ||||||
|  |         "has-symbols": "^1.0.3" | ||||||
|  |       }, | ||||||
|  |       "funding": { | ||||||
|  |         "url": "https://github.com/sponsors/ljharb" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/has": { | ||||||
|  |       "version": "1.0.3", | ||||||
|  |       "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", | ||||||
|  |       "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "function-bind": "^1.1.1" | ||||||
|  |       }, | ||||||
|  |       "engines": { | ||||||
|  |         "node": ">= 0.4.0" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/has-ansi": { | ||||||
|  |       "version": "2.0.0", | ||||||
|  |       "resolved": "https://registry.npmjs.org/has-ansi/-/has-ansi-2.0.0.tgz", | ||||||
|  |       "integrity": "sha512-C8vBJ8DwUCx19vhm7urhTuUsr4/IyP6l4VzNQDv+ryHQObW3TTTp9yB68WpYgRe2bbaGuZ/se74IqFeVnMnLZg==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "ansi-regex": "^2.0.0" | ||||||
|  |       }, | ||||||
|  |       "engines": { | ||||||
|  |         "node": ">=0.10.0" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/has-symbols": { | ||||||
|  |       "version": "1.0.3", | ||||||
|  |       "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", | ||||||
|  |       "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", | ||||||
|  |       "engines": { | ||||||
|  |         "node": ">= 0.4" | ||||||
|  |       }, | ||||||
|  |       "funding": { | ||||||
|  |         "url": "https://github.com/sponsors/ljharb" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/has-tostringtag": { | ||||||
|  |       "version": "1.0.0", | ||||||
|  |       "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.0.tgz", | ||||||
|  |       "integrity": "sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "has-symbols": "^1.0.2" | ||||||
|  |       }, | ||||||
|  |       "engines": { | ||||||
|  |         "node": ">= 0.4" | ||||||
|  |       }, | ||||||
|  |       "funding": { | ||||||
|  |         "url": "https://github.com/sponsors/ljharb" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/is-buffer": { | ||||||
|  |       "version": "1.1.6", | ||||||
|  |       "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", | ||||||
|  |       "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" | ||||||
|  |     }, | ||||||
|  |     "node_modules/is-core-module": { | ||||||
|  |       "version": "2.9.0", | ||||||
|  |       "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.9.0.tgz", | ||||||
|  |       "integrity": "sha512-+5FPy5PnwmO3lvfMb0AsoPaBG+5KHUI0wYFXOtYPnVVVspTFUuMZNfNaNVRt3FZadstu2c8x23vykRW/NBoU6A==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "has": "^1.0.3" | ||||||
|  |       }, | ||||||
|  |       "funding": { | ||||||
|  |         "url": "https://github.com/sponsors/ljharb" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/is-expression": { | ||||||
|  |       "version": "3.0.0", | ||||||
|  |       "resolved": "https://registry.npmjs.org/is-expression/-/is-expression-3.0.0.tgz", | ||||||
|  |       "integrity": "sha512-vyMeQMq+AiH5uUnoBfMTwf18tO3bM6k1QXBE9D6ueAAquEfCZe3AJPtud9g6qS0+4X8xA7ndpZiDyeb2l2qOBw==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "acorn": "~4.0.2", | ||||||
|  |         "object-assign": "^4.0.1" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/is-promise": { | ||||||
|  |       "version": "2.2.2", | ||||||
|  |       "resolved": "https://registry.npmjs.org/is-promise/-/is-promise-2.2.2.tgz", | ||||||
|  |       "integrity": "sha512-+lP4/6lKUBfQjZ2pdxThZvLUAafmZb8OAxFb8XXtiQmS35INgr85hdOGoEs124ez1FCnZJt6jau/T+alh58QFQ==" | ||||||
|  |     }, | ||||||
|  |     "node_modules/is-regex": { | ||||||
|  |       "version": "1.1.4", | ||||||
|  |       "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.4.tgz", | ||||||
|  |       "integrity": "sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "call-bind": "^1.0.2", | ||||||
|  |         "has-tostringtag": "^1.0.0" | ||||||
|  |       }, | ||||||
|  |       "engines": { | ||||||
|  |         "node": ">= 0.4" | ||||||
|  |       }, | ||||||
|  |       "funding": { | ||||||
|  |         "url": "https://github.com/sponsors/ljharb" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/js-stringify": { | ||||||
|  |       "version": "1.0.2", | ||||||
|  |       "resolved": "https://registry.npmjs.org/js-stringify/-/js-stringify-1.0.2.tgz", | ||||||
|  |       "integrity": "sha512-rtS5ATOo2Q5k1G+DADISilDA6lv79zIiwFd6CcjuIxGKLFm5C+RLImRscVap9k55i+MOZwgliw+NejvkLuGD5g==" | ||||||
|  |     }, | ||||||
|  |     "node_modules/jstransformer": { | ||||||
|  |       "version": "1.0.0", | ||||||
|  |       "resolved": "https://registry.npmjs.org/jstransformer/-/jstransformer-1.0.0.tgz", | ||||||
|  |       "integrity": "sha512-C9YK3Rf8q6VAPDCCU9fnqo3mAfOH6vUGnMcP4AQAYIEpWtfGLpwOTmZ+igtdK5y+VvI2n3CyYSzy4Qh34eq24A==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "is-promise": "^2.0.0", | ||||||
|  |         "promise": "^7.0.1" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/kind-of": { | ||||||
|  |       "version": "3.2.2", | ||||||
|  |       "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", | ||||||
|  |       "integrity": "sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "is-buffer": "^1.1.5" | ||||||
|  |       }, | ||||||
|  |       "engines": { | ||||||
|  |         "node": ">=0.10.0" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/lazy-cache": { | ||||||
|  |       "version": "1.0.4", | ||||||
|  |       "resolved": "https://registry.npmjs.org/lazy-cache/-/lazy-cache-1.0.4.tgz", | ||||||
|  |       "integrity": "sha512-RE2g0b5VGZsOCFOCgP7omTRYFqydmZkBwl5oNnQ1lDYC57uyO9KqNnNVxT7COSHTxrRCWVcAVOcbjk+tvh/rgQ==", | ||||||
|  |       "engines": { | ||||||
|  |         "node": ">=0.10.0" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/lodash": { | ||||||
|  |       "version": "4.17.21", | ||||||
|  |       "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", | ||||||
|  |       "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" | ||||||
|  |     }, | ||||||
|  |     "node_modules/longest": { | ||||||
|  |       "version": "1.0.1", | ||||||
|  |       "resolved": "https://registry.npmjs.org/longest/-/longest-1.0.1.tgz", | ||||||
|  |       "integrity": "sha512-k+yt5n3l48JU4k8ftnKG6V7u32wyH2NfKzeMto9F/QRE0amxy/LayxwlvjjkZEIzqR+19IrtFO8p5kB9QaYUFg==", | ||||||
|  |       "engines": { | ||||||
|  |         "node": ">=0.10.0" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/minimist": { | ||||||
|  |       "version": "1.2.6", | ||||||
|  |       "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz", | ||||||
|  |       "integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==" | ||||||
|  |     }, | ||||||
|  |     "node_modules/mkdirp": { | ||||||
|  |       "version": "0.5.6", | ||||||
|  |       "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.6.tgz", | ||||||
|  |       "integrity": "sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "minimist": "^1.2.6" | ||||||
|  |       }, | ||||||
|  |       "bin": { | ||||||
|  |         "mkdirp": "bin/cmd.js" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/object-assign": { | ||||||
|  |       "version": "4.1.1", | ||||||
|  |       "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", | ||||||
|  |       "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", | ||||||
|  |       "engines": { | ||||||
|  |         "node": ">=0.10.0" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/path-parse": { | ||||||
|  |       "version": "1.0.7", | ||||||
|  |       "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", | ||||||
|  |       "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" | ||||||
|  |     }, | ||||||
|  |     "node_modules/promise": { | ||||||
|  |       "version": "7.3.1", | ||||||
|  |       "resolved": "https://registry.npmjs.org/promise/-/promise-7.3.1.tgz", | ||||||
|  |       "integrity": "sha512-nolQXZ/4L+bP/UGlkfaIujX9BKxGwmQ9OT4mOt5yvy8iK1h3wqTEJCijzGANTCCl9nWjY41juyAn2K3Q1hLLTg==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "asap": "~2.0.3" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/pug": { | ||||||
|  |       "version": "2.0.4", | ||||||
|  |       "resolved": "https://registry.npmjs.org/pug/-/pug-2.0.4.tgz", | ||||||
|  |       "integrity": "sha512-XhoaDlvi6NIzL49nu094R2NA6P37ijtgMDuWE+ofekDChvfKnzFal60bhSdiy8y2PBO6fmz3oMEIcfpBVRUdvw==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "pug-code-gen": "^2.0.2", | ||||||
|  |         "pug-filters": "^3.1.1", | ||||||
|  |         "pug-lexer": "^4.1.0", | ||||||
|  |         "pug-linker": "^3.0.6", | ||||||
|  |         "pug-load": "^2.0.12", | ||||||
|  |         "pug-parser": "^5.0.1", | ||||||
|  |         "pug-runtime": "^2.0.5", | ||||||
|  |         "pug-strip-comments": "^1.0.4" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/pug-attrs": { | ||||||
|  |       "version": "2.0.4", | ||||||
|  |       "resolved": "https://registry.npmjs.org/pug-attrs/-/pug-attrs-2.0.4.tgz", | ||||||
|  |       "integrity": "sha512-TaZ4Z2TWUPDJcV3wjU3RtUXMrd3kM4Wzjbe3EWnSsZPsJ3LDI0F3yCnf2/W7PPFF+edUFQ0HgDL1IoxSz5K8EQ==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "constantinople": "^3.0.1", | ||||||
|  |         "js-stringify": "^1.0.1", | ||||||
|  |         "pug-runtime": "^2.0.5" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/pug-cli": { | ||||||
|  |       "version": "1.0.0-alpha6", | ||||||
|  |       "resolved": "https://registry.npmjs.org/pug-cli/-/pug-cli-1.0.0-alpha6.tgz", | ||||||
|  |       "integrity": "sha512-ogaf7h4cT174NFSHNqAMdrZpGFCZSvsht41IYZZgP7ERZ1OxSc5fmPpojo1w9UntVreeChQP3BJ5r+Fey0a9zg==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "chalk": "^1.0.0", | ||||||
|  |         "commander": "^2.8.1", | ||||||
|  |         "mkdirp": "^0.5.1", | ||||||
|  |         "pug": "^2.0.0-alpha7" | ||||||
|  |       }, | ||||||
|  |       "bin": { | ||||||
|  |         "pug": "index.js" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/pug-code-gen": { | ||||||
|  |       "version": "2.0.3", | ||||||
|  |       "resolved": "https://registry.npmjs.org/pug-code-gen/-/pug-code-gen-2.0.3.tgz", | ||||||
|  |       "integrity": "sha512-r9sezXdDuZJfW9J91TN/2LFbiqDhmltTFmGpHTsGdrNGp3p4SxAjjXEfnuK2e4ywYsRIVP0NeLbSAMHUcaX1EA==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "constantinople": "^3.1.2", | ||||||
|  |         "doctypes": "^1.1.0", | ||||||
|  |         "js-stringify": "^1.0.1", | ||||||
|  |         "pug-attrs": "^2.0.4", | ||||||
|  |         "pug-error": "^1.3.3", | ||||||
|  |         "pug-runtime": "^2.0.5", | ||||||
|  |         "void-elements": "^2.0.1", | ||||||
|  |         "with": "^5.0.0" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/pug-error": { | ||||||
|  |       "version": "1.3.3", | ||||||
|  |       "resolved": "https://registry.npmjs.org/pug-error/-/pug-error-1.3.3.tgz", | ||||||
|  |       "integrity": "sha512-qE3YhESP2mRAWMFJgKdtT5D7ckThRScXRwkfo+Erqga7dyJdY3ZquspprMCj/9sJ2ijm5hXFWQE/A3l4poMWiQ==" | ||||||
|  |     }, | ||||||
|  |     "node_modules/pug-filters": { | ||||||
|  |       "version": "3.1.1", | ||||||
|  |       "resolved": "https://registry.npmjs.org/pug-filters/-/pug-filters-3.1.1.tgz", | ||||||
|  |       "integrity": "sha512-lFfjNyGEyVWC4BwX0WyvkoWLapI5xHSM3xZJFUhx4JM4XyyRdO8Aucc6pCygnqV2uSgJFaJWW3Ft1wCWSoQkQg==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "clean-css": "^4.1.11", | ||||||
|  |         "constantinople": "^3.0.1", | ||||||
|  |         "jstransformer": "1.0.0", | ||||||
|  |         "pug-error": "^1.3.3", | ||||||
|  |         "pug-walk": "^1.1.8", | ||||||
|  |         "resolve": "^1.1.6", | ||||||
|  |         "uglify-js": "^2.6.1" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/pug-lexer": { | ||||||
|  |       "version": "4.1.0", | ||||||
|  |       "resolved": "https://registry.npmjs.org/pug-lexer/-/pug-lexer-4.1.0.tgz", | ||||||
|  |       "integrity": "sha512-i55yzEBtjm0mlplW4LoANq7k3S8gDdfC6+LThGEvsK4FuobcKfDAwt6V4jKPH9RtiE3a2Akfg5UpafZ1OksaPA==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "character-parser": "^2.1.1", | ||||||
|  |         "is-expression": "^3.0.0", | ||||||
|  |         "pug-error": "^1.3.3" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/pug-linker": { | ||||||
|  |       "version": "3.0.6", | ||||||
|  |       "resolved": "https://registry.npmjs.org/pug-linker/-/pug-linker-3.0.6.tgz", | ||||||
|  |       "integrity": "sha512-bagfuHttfQOpANGy1Y6NJ+0mNb7dD2MswFG2ZKj22s8g0wVsojpRlqveEQHmgXXcfROB2RT6oqbPYr9EN2ZWzg==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "pug-error": "^1.3.3", | ||||||
|  |         "pug-walk": "^1.1.8" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/pug-load": { | ||||||
|  |       "version": "2.0.12", | ||||||
|  |       "resolved": "https://registry.npmjs.org/pug-load/-/pug-load-2.0.12.tgz", | ||||||
|  |       "integrity": "sha512-UqpgGpyyXRYgJs/X60sE6SIf8UBsmcHYKNaOccyVLEuT6OPBIMo6xMPhoJnqtB3Q3BbO4Z3Bjz5qDsUWh4rXsg==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "object-assign": "^4.1.0", | ||||||
|  |         "pug-walk": "^1.1.8" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/pug-parser": { | ||||||
|  |       "version": "5.0.1", | ||||||
|  |       "resolved": "https://registry.npmjs.org/pug-parser/-/pug-parser-5.0.1.tgz", | ||||||
|  |       "integrity": "sha512-nGHqK+w07p5/PsPIyzkTQfzlYfuqoiGjaoqHv1LjOv2ZLXmGX1O+4Vcvps+P4LhxZ3drYSljjq4b+Naid126wA==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "pug-error": "^1.3.3", | ||||||
|  |         "token-stream": "0.0.1" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/pug-runtime": { | ||||||
|  |       "version": "2.0.5", | ||||||
|  |       "resolved": "https://registry.npmjs.org/pug-runtime/-/pug-runtime-2.0.5.tgz", | ||||||
|  |       "integrity": "sha512-P+rXKn9un4fQY77wtpcuFyvFaBww7/91f3jHa154qU26qFAnOe6SW1CbIDcxiG5lLK9HazYrMCCuDvNgDQNptw==" | ||||||
|  |     }, | ||||||
|  |     "node_modules/pug-strip-comments": { | ||||||
|  |       "version": "1.0.4", | ||||||
|  |       "resolved": "https://registry.npmjs.org/pug-strip-comments/-/pug-strip-comments-1.0.4.tgz", | ||||||
|  |       "integrity": "sha512-i5j/9CS4yFhSxHp5iKPHwigaig/VV9g+FgReLJWWHEHbvKsbqL0oP/K5ubuLco6Wu3Kan5p7u7qk8A4oLLh6vw==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "pug-error": "^1.3.3" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/pug-walk": { | ||||||
|  |       "version": "1.1.8", | ||||||
|  |       "resolved": "https://registry.npmjs.org/pug-walk/-/pug-walk-1.1.8.tgz", | ||||||
|  |       "integrity": "sha512-GMu3M5nUL3fju4/egXwZO0XLi6fW/K3T3VTgFQ14GxNi8btlxgT5qZL//JwZFm/2Fa64J/PNS8AZeys3wiMkVA==" | ||||||
|  |     }, | ||||||
|  |     "node_modules/regenerator-runtime": { | ||||||
|  |       "version": "0.11.1", | ||||||
|  |       "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.11.1.tgz", | ||||||
|  |       "integrity": "sha512-MguG95oij0fC3QV3URf4V2SDYGJhJnJGqvIIgdECeODCT98wSWDAJ94SSuVpYQUoTcGUIL6L4yNB7j1DFFHSBg==" | ||||||
|  |     }, | ||||||
|  |     "node_modules/repeat-string": { | ||||||
|  |       "version": "1.6.1", | ||||||
|  |       "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz", | ||||||
|  |       "integrity": "sha512-PV0dzCYDNfRi1jCDbJzpW7jNNDRuCOG/jI5ctQcGKt/clZD+YcPS3yIlWuTJMmESC8aevCFmWJy5wjAFgNqN6w==", | ||||||
|  |       "engines": { | ||||||
|  |         "node": ">=0.10" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/resolve": { | ||||||
|  |       "version": "1.22.1", | ||||||
|  |       "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.1.tgz", | ||||||
|  |       "integrity": "sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "is-core-module": "^2.9.0", | ||||||
|  |         "path-parse": "^1.0.7", | ||||||
|  |         "supports-preserve-symlinks-flag": "^1.0.0" | ||||||
|  |       }, | ||||||
|  |       "bin": { | ||||||
|  |         "resolve": "bin/resolve" | ||||||
|  |       }, | ||||||
|  |       "funding": { | ||||||
|  |         "url": "https://github.com/sponsors/ljharb" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/right-align": { | ||||||
|  |       "version": "0.1.3", | ||||||
|  |       "resolved": "https://registry.npmjs.org/right-align/-/right-align-0.1.3.tgz", | ||||||
|  |       "integrity": "sha512-yqINtL/G7vs2v+dFIZmFUDbnVyFUJFKd6gK22Kgo6R4jfJGFtisKyncWDDULgjfqf4ASQuIQyjJ7XZ+3aWpsAg==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "align-text": "^0.1.1" | ||||||
|  |       }, | ||||||
|  |       "engines": { | ||||||
|  |         "node": ">=0.10.0" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/source-map": { | ||||||
|  |       "version": "0.6.1", | ||||||
|  |       "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", | ||||||
|  |       "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", | ||||||
|  |       "engines": { | ||||||
|  |         "node": ">=0.10.0" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/strip-ansi": { | ||||||
|  |       "version": "3.0.1", | ||||||
|  |       "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", | ||||||
|  |       "integrity": "sha512-VhumSSbBqDTP8p2ZLKj40UjBCV4+v8bUSEpUb4KjRgWk9pbqGF4REFj6KEagidb2f/M6AzC0EmFyDNGaw9OCzg==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "ansi-regex": "^2.0.0" | ||||||
|  |       }, | ||||||
|  |       "engines": { | ||||||
|  |         "node": ">=0.10.0" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/supports-color": { | ||||||
|  |       "version": "2.0.0", | ||||||
|  |       "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz", | ||||||
|  |       "integrity": "sha512-KKNVtd6pCYgPIKU4cp2733HWYCpplQhddZLBUryaAHou723x+FRzQ5Df824Fj+IyyuiQTRoub4SnIFfIcrp70g==", | ||||||
|  |       "engines": { | ||||||
|  |         "node": ">=0.8.0" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/supports-preserve-symlinks-flag": { | ||||||
|  |       "version": "1.0.0", | ||||||
|  |       "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", | ||||||
|  |       "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", | ||||||
|  |       "engines": { | ||||||
|  |         "node": ">= 0.4" | ||||||
|  |       }, | ||||||
|  |       "funding": { | ||||||
|  |         "url": "https://github.com/sponsors/ljharb" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/to-fast-properties": { | ||||||
|  |       "version": "1.0.3", | ||||||
|  |       "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-1.0.3.tgz", | ||||||
|  |       "integrity": "sha512-lxrWP8ejsq+7E3nNjwYmUBMAgjMTZoTI+sdBOpvNyijeDLa29LUn9QaoXAHv4+Z578hbmHHJKZknzxVtvo77og==", | ||||||
|  |       "engines": { | ||||||
|  |         "node": ">=0.10.0" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/token-stream": { | ||||||
|  |       "version": "0.0.1", | ||||||
|  |       "resolved": "https://registry.npmjs.org/token-stream/-/token-stream-0.0.1.tgz", | ||||||
|  |       "integrity": "sha512-nfjOAu/zAWmX9tgwi5NRp7O7zTDUD1miHiB40klUnAh9qnL1iXdgzcz/i5dMaL5jahcBAaSfmNOBBJBLJW8TEg==" | ||||||
|  |     }, | ||||||
|  |     "node_modules/uglify-js": { | ||||||
|  |       "version": "2.8.29", | ||||||
|  |       "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-2.8.29.tgz", | ||||||
|  |       "integrity": "sha512-qLq/4y2pjcU3vhlhseXGGJ7VbFO4pBANu0kwl8VCa9KEI0V8VfZIx2Fy3w01iSTA/pGwKZSmu/+I4etLNDdt5w==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "source-map": "~0.5.1", | ||||||
|  |         "uglify-to-browserify": "~1.0.0", | ||||||
|  |         "yargs": "~3.10.0" | ||||||
|  |       }, | ||||||
|  |       "bin": { | ||||||
|  |         "uglifyjs": "bin/uglifyjs" | ||||||
|  |       }, | ||||||
|  |       "engines": { | ||||||
|  |         "node": ">=0.8.0" | ||||||
|  |       }, | ||||||
|  |       "optionalDependencies": { | ||||||
|  |         "uglify-to-browserify": "~1.0.0" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/uglify-js/node_modules/source-map": { | ||||||
|  |       "version": "0.5.7", | ||||||
|  |       "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", | ||||||
|  |       "integrity": "sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ==", | ||||||
|  |       "engines": { | ||||||
|  |         "node": ">=0.10.0" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/uglify-to-browserify": { | ||||||
|  |       "version": "1.0.2", | ||||||
|  |       "resolved": "https://registry.npmjs.org/uglify-to-browserify/-/uglify-to-browserify-1.0.2.tgz", | ||||||
|  |       "integrity": "sha512-vb2s1lYx2xBtUgy+ta+b2J/GLVUR+wmpINwHePmPRhOsIVCG2wDzKJ0n14GslH1BifsqVzSOwQhRaCAsZ/nI4Q==", | ||||||
|  |       "optional": true | ||||||
|  |     }, | ||||||
|  |     "node_modules/void-elements": { | ||||||
|  |       "version": "2.0.1", | ||||||
|  |       "resolved": "https://registry.npmjs.org/void-elements/-/void-elements-2.0.1.tgz", | ||||||
|  |       "integrity": "sha512-qZKX4RnBzH2ugr8Lxa7x+0V6XD9Sb/ouARtiasEQCHB1EVU4NXtmHsDDrx1dO4ne5fc3J6EW05BP1Dl0z0iung==", | ||||||
|  |       "engines": { | ||||||
|  |         "node": ">=0.10.0" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/window-size": { | ||||||
|  |       "version": "0.1.0", | ||||||
|  |       "resolved": "https://registry.npmjs.org/window-size/-/window-size-0.1.0.tgz", | ||||||
|  |       "integrity": "sha512-1pTPQDKTdd61ozlKGNCjhNRd+KPmgLSGa3mZTHoOliaGcESD8G1PXhh7c1fgiPjVbNVfgy2Faw4BI8/m0cC8Mg==", | ||||||
|  |       "engines": { | ||||||
|  |         "node": ">= 0.8.0" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/with": { | ||||||
|  |       "version": "5.1.1", | ||||||
|  |       "resolved": "https://registry.npmjs.org/with/-/with-5.1.1.tgz", | ||||||
|  |       "integrity": "sha512-uAnSsFGfSpF6DNhBXStvlZILfHJfJu4eUkfbRGk94kGO1Ta7bg6FwfvoOhhyHAJuFbCw+0xk4uJ3u57jLvlCJg==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "acorn": "^3.1.0", | ||||||
|  |         "acorn-globals": "^3.0.0" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/with/node_modules/acorn": { | ||||||
|  |       "version": "3.3.0", | ||||||
|  |       "resolved": "https://registry.npmjs.org/acorn/-/acorn-3.3.0.tgz", | ||||||
|  |       "integrity": "sha512-OLUyIIZ7mF5oaAUT1w0TFqQS81q3saT46x8t7ukpPjMNk+nbs4ZHhs7ToV8EWnLYLepjETXd4XaCE4uxkMeqUw==", | ||||||
|  |       "bin": { | ||||||
|  |         "acorn": "bin/acorn" | ||||||
|  |       }, | ||||||
|  |       "engines": { | ||||||
|  |         "node": ">=0.4.0" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/wordwrap": { | ||||||
|  |       "version": "0.0.2", | ||||||
|  |       "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-0.0.2.tgz", | ||||||
|  |       "integrity": "sha512-xSBsCeh+g+dinoBv3GAOWM4LcVVO68wLXRanibtBSdUvkGWQRGeE9P7IwU9EmDDi4jA6L44lz15CGMwdw9N5+Q==", | ||||||
|  |       "engines": { | ||||||
|  |         "node": ">=0.4.0" | ||||||
|  |       } | ||||||
|  |     }, | ||||||
|  |     "node_modules/yargs": { | ||||||
|  |       "version": "3.10.0", | ||||||
|  |       "resolved": "https://registry.npmjs.org/yargs/-/yargs-3.10.0.tgz", | ||||||
|  |       "integrity": "sha512-QFzUah88GAGy9lyDKGBqZdkYApt63rCXYBGYnEP4xDJPXNqXXnBDACnbrXnViV6jRSqAePwrATi2i8mfYm4L1A==", | ||||||
|  |       "dependencies": { | ||||||
|  |         "camelcase": "^1.0.2", | ||||||
|  |         "cliui": "^2.1.0", | ||||||
|  |         "decamelize": "^1.0.0", | ||||||
|  |         "window-size": "0.1.0" | ||||||
|  |       } | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  | } | ||||||
							
								
								
									
										21
									
								
								Source/node_modules/@types/babel-types/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Executable file
									
								
							
							
						
						
									
										21
									
								
								Source/node_modules/@types/babel-types/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Executable file
									
								
							| @@ -0,0 +1,21 @@ | |||||||
|  |     MIT License | ||||||
|  |  | ||||||
|  |     Copyright (c) Microsoft Corporation. | ||||||
|  |  | ||||||
|  |     Permission is hereby granted, free of charge, to any person obtaining a copy | ||||||
|  |     of this software and associated documentation files (the "Software"), to deal | ||||||
|  |     in the Software without restriction, including without limitation the rights | ||||||
|  |     to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||||||
|  |     copies of the Software, and to permit persons to whom the Software is | ||||||
|  |     furnished to do so, subject to the following conditions: | ||||||
|  |  | ||||||
|  |     The above copyright notice and this permission notice shall be included in all | ||||||
|  |     copies or substantial portions of the Software. | ||||||
|  |  | ||||||
|  |     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||||
|  |     IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||||
|  |     FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||||||
|  |     AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||||
|  |     LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||||||
|  |     OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||||
|  |     SOFTWARE | ||||||
							
								
								
									
										2001
									
								
								Source/node_modules/@types/babel-types/index.d.ts
									
									
									
										generated
									
									
										vendored
									
									
										Executable file
									
								
							
							
						
						
									
										2001
									
								
								Source/node_modules/@types/babel-types/index.d.ts
									
									
									
										generated
									
									
										vendored
									
									
										Executable file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										45
									
								
								Source/node_modules/@types/babel-types/package.json
									
									
									
										generated
									
									
										vendored
									
									
										Executable file
									
								
							
							
						
						
									
										45
									
								
								Source/node_modules/@types/babel-types/package.json
									
									
									
										generated
									
									
										vendored
									
									
										Executable file
									
								
							| @@ -0,0 +1,45 @@ | |||||||
|  | { | ||||||
|  |     "name": "@types/babel-types", | ||||||
|  |     "version": "7.0.11", | ||||||
|  |     "description": "TypeScript definitions for babel-types", | ||||||
|  |     "homepage": "https://github.com/DefinitelyTyped/DefinitelyTyped/tree/master/types/babel-types", | ||||||
|  |     "license": "MIT", | ||||||
|  |     "contributors": [ | ||||||
|  |         { | ||||||
|  |             "name": "Troy Gerwien", | ||||||
|  |             "url": "https://github.com/yortus", | ||||||
|  |             "githubUsername": "yortus" | ||||||
|  |         }, | ||||||
|  |         { | ||||||
|  |             "name": "Sam Baxter", | ||||||
|  |             "url": "https://github.com/baxtersa", | ||||||
|  |             "githubUsername": "baxtersa" | ||||||
|  |         }, | ||||||
|  |         { | ||||||
|  |             "name": "Marvin Hagemeister", | ||||||
|  |             "url": "https://github.com/marvinhagemeister", | ||||||
|  |             "githubUsername": "marvinhagemeister" | ||||||
|  |         }, | ||||||
|  |         { | ||||||
|  |             "name": "Boris Cherny", | ||||||
|  |             "url": "https://github.com/bcherny", | ||||||
|  |             "githubUsername": "bcherny" | ||||||
|  |         }, | ||||||
|  |         { | ||||||
|  |             "name": "ExE Boss", | ||||||
|  |             "url": "https://github.com/ExE-Boss", | ||||||
|  |             "githubUsername": "ExE-Boss" | ||||||
|  |         } | ||||||
|  |     ], | ||||||
|  |     "main": "", | ||||||
|  |     "types": "index.d.ts", | ||||||
|  |     "repository": { | ||||||
|  |         "type": "git", | ||||||
|  |         "url": "https://github.com/DefinitelyTyped/DefinitelyTyped.git", | ||||||
|  |         "directory": "types/babel-types" | ||||||
|  |     }, | ||||||
|  |     "scripts": {}, | ||||||
|  |     "dependencies": {}, | ||||||
|  |     "typesPublisherContentHash": "b7dac276d8190b0a848848517df5523c999c33280ed4a8778cf496aeed0b0a83", | ||||||
|  |     "typeScriptVersion": "3.7" | ||||||
|  | } | ||||||
							
								
								
									
										21
									
								
								Source/node_modules/@types/babylon/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Executable file
									
								
							
							
						
						
									
										21
									
								
								Source/node_modules/@types/babylon/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Executable file
									
								
							| @@ -0,0 +1,21 @@ | |||||||
|  |     MIT License | ||||||
|  |  | ||||||
|  |     Copyright (c) Microsoft Corporation. | ||||||
|  |  | ||||||
|  |     Permission is hereby granted, free of charge, to any person obtaining a copy | ||||||
|  |     of this software and associated documentation files (the "Software"), to deal | ||||||
|  |     in the Software without restriction, including without limitation the rights | ||||||
|  |     to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||||||
|  |     copies of the Software, and to permit persons to whom the Software is | ||||||
|  |     furnished to do so, subject to the following conditions: | ||||||
|  |  | ||||||
|  |     The above copyright notice and this permission notice shall be included in all | ||||||
|  |     copies or substantial portions of the Software. | ||||||
|  |  | ||||||
|  |     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||||
|  |     IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||||
|  |     FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||||||
|  |     AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||||
|  |     LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||||||
|  |     OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||||
|  |     SOFTWARE | ||||||
							
								
								
									
										59
									
								
								Source/node_modules/@types/babylon/index.d.ts
									
									
									
										generated
									
									
										vendored
									
									
										Executable file
									
								
							
							
						
						
									
										59
									
								
								Source/node_modules/@types/babylon/index.d.ts
									
									
									
										generated
									
									
										vendored
									
									
										Executable file
									
								
							| @@ -0,0 +1,59 @@ | |||||||
|  | // Type definitions for babylon 6.16 | ||||||
|  | // Project: https://github.com/babel/babylon, https://babeljs.io | ||||||
|  | // Definitions by: Troy Gerwien <https://github.com/yortus> | ||||||
|  | //                 Marvin Hagemeister <https://github.com/marvinhagemeister> | ||||||
|  | // Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped | ||||||
|  | // TypeScript Version: 2.8 | ||||||
|  |  | ||||||
|  | import { File, Expression } from 'babel-types'; | ||||||
|  |  | ||||||
|  | export function parse(code: string, opts?: BabylonOptions): File; | ||||||
|  |  | ||||||
|  | export function parseExpression(input: string, options?: BabylonOptions): Expression; | ||||||
|  |  | ||||||
|  | export interface BabylonOptions { | ||||||
|  |     /** | ||||||
|  |      * By default, import and export declarations can only appear at a program's top level. | ||||||
|  |      * Setting this option to true allows them anywhere where a statement is allowed. | ||||||
|  |      */ | ||||||
|  |     allowImportExportEverywhere?: boolean | undefined; | ||||||
|  |  | ||||||
|  |     /** | ||||||
|  |      * By default, a return statement at the top level raises an error. Set this to true to accept such code. | ||||||
|  |      */ | ||||||
|  |     allowReturnOutsideFunction?: boolean | undefined; | ||||||
|  |  | ||||||
|  |     allowSuperOutsideMethod?: boolean | undefined; | ||||||
|  |  | ||||||
|  |     /** | ||||||
|  |      * Indicate the mode the code should be parsed in. Can be either "script" or "module". | ||||||
|  |      */ | ||||||
|  |     sourceType?: 'script' | 'module' | undefined; | ||||||
|  |  | ||||||
|  |     /** | ||||||
|  |      * Correlate output AST nodes with their source filename. Useful when | ||||||
|  |      * generating code and source maps from the ASTs of multiple input files. | ||||||
|  |      */ | ||||||
|  |     sourceFilename?: string | undefined; | ||||||
|  |  | ||||||
|  |     /** | ||||||
|  |      * Array containing the plugins that you want to enable. | ||||||
|  |      */ | ||||||
|  |     plugins?: PluginName[] | undefined; | ||||||
|  | } | ||||||
|  |  | ||||||
|  | export type PluginName = | ||||||
|  |     'estree' | | ||||||
|  |     'jsx' | | ||||||
|  |     'flow' | | ||||||
|  |     'typescript' | | ||||||
|  |     'classConstructorCall' | | ||||||
|  |     'doExpressions' | | ||||||
|  |     'objectRestSpread' | | ||||||
|  |     'decorators' | | ||||||
|  |     'classProperties' | | ||||||
|  |     'exportExtensions' | | ||||||
|  |     'asyncGenerators' | | ||||||
|  |     'functionBind' | | ||||||
|  |     'functionSent' | | ||||||
|  |     'dynamicImport'; | ||||||
							
								
								
									
										32
									
								
								Source/node_modules/@types/babylon/package.json
									
									
									
										generated
									
									
										vendored
									
									
										Executable file
									
								
							
							
						
						
									
										32
									
								
								Source/node_modules/@types/babylon/package.json
									
									
									
										generated
									
									
										vendored
									
									
										Executable file
									
								
							| @@ -0,0 +1,32 @@ | |||||||
|  | { | ||||||
|  |     "name": "@types/babylon", | ||||||
|  |     "version": "6.16.6", | ||||||
|  |     "description": "TypeScript definitions for babylon", | ||||||
|  |     "homepage": "https://github.com/DefinitelyTyped/DefinitelyTyped/tree/master/types/babylon", | ||||||
|  |     "license": "MIT", | ||||||
|  |     "contributors": [ | ||||||
|  |         { | ||||||
|  |             "name": "Troy Gerwien", | ||||||
|  |             "url": "https://github.com/yortus", | ||||||
|  |             "githubUsername": "yortus" | ||||||
|  |         }, | ||||||
|  |         { | ||||||
|  |             "name": "Marvin Hagemeister", | ||||||
|  |             "url": "https://github.com/marvinhagemeister", | ||||||
|  |             "githubUsername": "marvinhagemeister" | ||||||
|  |         } | ||||||
|  |     ], | ||||||
|  |     "main": "", | ||||||
|  |     "types": "index.d.ts", | ||||||
|  |     "repository": { | ||||||
|  |         "type": "git", | ||||||
|  |         "url": "https://github.com/DefinitelyTyped/DefinitelyTyped.git", | ||||||
|  |         "directory": "types/babylon" | ||||||
|  |     }, | ||||||
|  |     "scripts": {}, | ||||||
|  |     "dependencies": { | ||||||
|  |         "@types/babel-types": "*" | ||||||
|  |     }, | ||||||
|  |     "typesPublisherContentHash": "294fc5f5bb76f2c3e69836bdab4091018fc8b4a3cecc40f5927bbb5dc3aafcbc", | ||||||
|  |     "typeScriptVersion": "3.6" | ||||||
|  | } | ||||||
							
								
								
									
										19
									
								
								Source/node_modules/acorn-globals/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										19
									
								
								Source/node_modules/acorn-globals/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,19 @@ | |||||||
|  | Copyright (c) 2014 Forbes Lindesay | ||||||
|  |  | ||||||
|  | Permission is hereby granted, free of charge, to any person obtaining a copy | ||||||
|  | of this software and associated documentation files (the "Software"), to deal | ||||||
|  | in the Software without restriction, including without limitation the rights | ||||||
|  | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||||||
|  | copies of the Software, and to permit persons to whom the Software is | ||||||
|  | furnished to do so, subject to the following conditions: | ||||||
|  |  | ||||||
|  | The above copyright notice and this permission notice shall be included in | ||||||
|  | all copies or substantial portions of the Software. | ||||||
|  |  | ||||||
|  | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||||
|  | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||||
|  | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||||||
|  | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||||
|  | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||||||
|  | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | ||||||
|  | THE SOFTWARE. | ||||||
							
								
								
									
										163
									
								
								Source/node_modules/acorn-globals/index.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										163
									
								
								Source/node_modules/acorn-globals/index.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,163 @@ | |||||||
|  | 'use strict'; | ||||||
|  |  | ||||||
|  | var acorn = require('acorn'); | ||||||
|  | var walk = require('acorn/dist/walk'); | ||||||
|  |  | ||||||
|  | function isScope(node) { | ||||||
|  |   return node.type === 'FunctionExpression' || node.type === 'FunctionDeclaration' || node.type === 'ArrowFunctionExpression' || node.type === 'Program'; | ||||||
|  | } | ||||||
|  | function isBlockScope(node) { | ||||||
|  |   return node.type === 'BlockStatement' || isScope(node); | ||||||
|  | } | ||||||
|  |  | ||||||
|  | function declaresArguments(node) { | ||||||
|  |   return node.type === 'FunctionExpression' || node.type === 'FunctionDeclaration'; | ||||||
|  | } | ||||||
|  |  | ||||||
|  | function declaresThis(node) { | ||||||
|  |   return node.type === 'FunctionExpression' || node.type === 'FunctionDeclaration'; | ||||||
|  | } | ||||||
|  |  | ||||||
|  | function reallyParse(source) { | ||||||
|  |   return acorn.parse(source, { | ||||||
|  |     allowReturnOutsideFunction: true, | ||||||
|  |     allowImportExportEverywhere: true, | ||||||
|  |     allowHashBang: true | ||||||
|  |   }); | ||||||
|  | } | ||||||
|  | module.exports = findGlobals; | ||||||
|  | module.exports.parse = reallyParse; | ||||||
|  | function findGlobals(source) { | ||||||
|  |   var globals = []; | ||||||
|  |   var ast; | ||||||
|  |   // istanbul ignore else | ||||||
|  |   if (typeof source === 'string') { | ||||||
|  |     ast = reallyParse(source); | ||||||
|  |   } else { | ||||||
|  |     ast = source; | ||||||
|  |   } | ||||||
|  |   // istanbul ignore if | ||||||
|  |   if (!(ast && typeof ast === 'object' && ast.type === 'Program')) { | ||||||
|  |     throw new TypeError('Source must be either a string of JavaScript or an acorn AST'); | ||||||
|  |   } | ||||||
|  |   var declareFunction = function (node) { | ||||||
|  |     var fn = node; | ||||||
|  |     fn.locals = fn.locals || {}; | ||||||
|  |     node.params.forEach(function (node) { | ||||||
|  |       declarePattern(node, fn); | ||||||
|  |     }); | ||||||
|  |     if (node.id) { | ||||||
|  |       fn.locals[node.id.name] = true; | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  |   var declarePattern = function (node, parent) { | ||||||
|  |     switch (node.type) { | ||||||
|  |       case 'Identifier': | ||||||
|  |         parent.locals[node.name] = true; | ||||||
|  |         break; | ||||||
|  |       case 'ObjectPattern': | ||||||
|  |         node.properties.forEach(function (node) { | ||||||
|  |           declarePattern(node.value, parent); | ||||||
|  |         }); | ||||||
|  |         break; | ||||||
|  |       case 'ArrayPattern': | ||||||
|  |         node.elements.forEach(function (node) { | ||||||
|  |           if (node) declarePattern(node, parent); | ||||||
|  |         }); | ||||||
|  |         break; | ||||||
|  |       case 'RestElement': | ||||||
|  |         declarePattern(node.argument, parent); | ||||||
|  |         break; | ||||||
|  |       case 'AssignmentPattern': | ||||||
|  |         declarePattern(node.left, parent); | ||||||
|  |         break; | ||||||
|  |       // istanbul ignore next | ||||||
|  |       default: | ||||||
|  |         throw new Error('Unrecognized pattern type: ' + node.type); | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  |   var declareModuleSpecifier = function (node, parents) { | ||||||
|  |     ast.locals = ast.locals || {}; | ||||||
|  |     ast.locals[node.local.name] = true; | ||||||
|  |   } | ||||||
|  |   walk.ancestor(ast, { | ||||||
|  |     'VariableDeclaration': function (node, parents) { | ||||||
|  |       var parent = null; | ||||||
|  |       for (var i = parents.length - 1; i >= 0 && parent === null; i--) { | ||||||
|  |         if (node.kind === 'var' ? isScope(parents[i]) : isBlockScope(parents[i])) { | ||||||
|  |           parent = parents[i]; | ||||||
|  |         } | ||||||
|  |       } | ||||||
|  |       parent.locals = parent.locals || {}; | ||||||
|  |       node.declarations.forEach(function (declaration) { | ||||||
|  |         declarePattern(declaration.id, parent); | ||||||
|  |       }); | ||||||
|  |     }, | ||||||
|  |     'FunctionDeclaration': function (node, parents) { | ||||||
|  |       var parent = null; | ||||||
|  |       for (var i = parents.length - 2; i >= 0 && parent === null; i--) { | ||||||
|  |         if (isScope(parents[i])) { | ||||||
|  |           parent = parents[i]; | ||||||
|  |         } | ||||||
|  |       } | ||||||
|  |       parent.locals = parent.locals || {}; | ||||||
|  |       parent.locals[node.id.name] = true; | ||||||
|  |       declareFunction(node); | ||||||
|  |     }, | ||||||
|  |     'Function': declareFunction, | ||||||
|  |     'ClassDeclaration': function (node, parents) { | ||||||
|  |       var parent = null; | ||||||
|  |       for (var i = parents.length - 2; i >= 0 && parent === null; i--) { | ||||||
|  |         if (isScope(parents[i])) { | ||||||
|  |           parent = parents[i]; | ||||||
|  |         } | ||||||
|  |       } | ||||||
|  |       parent.locals = parent.locals || {}; | ||||||
|  |       parent.locals[node.id.name] = true; | ||||||
|  |     }, | ||||||
|  |     'TryStatement': function (node) { | ||||||
|  |       if (node.handler === null) return; | ||||||
|  |       node.handler.locals = node.handler.locals || {}; | ||||||
|  |       node.handler.locals[node.handler.param.name] = true; | ||||||
|  |     }, | ||||||
|  |     'ImportDefaultSpecifier': declareModuleSpecifier, | ||||||
|  |     'ImportSpecifier': declareModuleSpecifier, | ||||||
|  |     'ImportNamespaceSpecifier': declareModuleSpecifier | ||||||
|  |   }); | ||||||
|  |   function identifier(node, parents) { | ||||||
|  |     var name = node.name; | ||||||
|  |     if (name === 'undefined') return; | ||||||
|  |     for (var i = 0; i < parents.length; i++) { | ||||||
|  |       if (name === 'arguments' && declaresArguments(parents[i])) { | ||||||
|  |         return; | ||||||
|  |       } | ||||||
|  |       if (parents[i].locals && name in parents[i].locals) { | ||||||
|  |         return; | ||||||
|  |       } | ||||||
|  |     } | ||||||
|  |     node.parents = parents; | ||||||
|  |     globals.push(node); | ||||||
|  |   } | ||||||
|  |   walk.ancestor(ast, { | ||||||
|  |     'VariablePattern': identifier, | ||||||
|  |     'Identifier': identifier, | ||||||
|  |     'ThisExpression': function (node, parents) { | ||||||
|  |       for (var i = 0; i < parents.length; i++) { | ||||||
|  |         if (declaresThis(parents[i])) { | ||||||
|  |           return; | ||||||
|  |         } | ||||||
|  |       } | ||||||
|  |       node.parents = parents; | ||||||
|  |       globals.push(node); | ||||||
|  |     } | ||||||
|  |   }); | ||||||
|  |   var groupedGlobals = {}; | ||||||
|  |   globals.forEach(function (node) { | ||||||
|  |     var name = node.type === 'ThisExpression' ? 'this' : node.name; | ||||||
|  |     groupedGlobals[name] = (groupedGlobals[name] || []); | ||||||
|  |     groupedGlobals[name].push(node); | ||||||
|  |   }); | ||||||
|  |   return Object.keys(groupedGlobals).sort().map(function (name) { | ||||||
|  |     return {name: name, nodes: groupedGlobals[name]}; | ||||||
|  |   }); | ||||||
|  | } | ||||||
							
								
								
									
										34
									
								
								Source/node_modules/acorn-globals/package.json
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										34
									
								
								Source/node_modules/acorn-globals/package.json
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,34 @@ | |||||||
|  | { | ||||||
|  |   "name": "acorn-globals", | ||||||
|  |   "version": "3.1.0", | ||||||
|  |   "description": "Detect global variables in JavaScript using acorn", | ||||||
|  |   "keywords": [ | ||||||
|  |     "ast", | ||||||
|  |     "variable", | ||||||
|  |     "name", | ||||||
|  |     "lexical", | ||||||
|  |     "scope", | ||||||
|  |     "local", | ||||||
|  |     "global", | ||||||
|  |     "implicit" | ||||||
|  |   ], | ||||||
|  |   "files": [ | ||||||
|  |     "index.js", | ||||||
|  |     "LICENSE" | ||||||
|  |   ], | ||||||
|  |   "dependencies": { | ||||||
|  |     "acorn": "^4.0.4" | ||||||
|  |   }, | ||||||
|  |   "devDependencies": { | ||||||
|  |     "testit": "^2.0.2" | ||||||
|  |   }, | ||||||
|  |   "scripts": { | ||||||
|  |     "test": "node test" | ||||||
|  |   }, | ||||||
|  |   "repository": { | ||||||
|  |     "type": "git", | ||||||
|  |     "url": "https://github.com/ForbesLindesay/acorn-globals.git" | ||||||
|  |   }, | ||||||
|  |   "author": "ForbesLindesay", | ||||||
|  |   "license": "MIT" | ||||||
|  | } | ||||||
							
								
								
									
										10
									
								
								Source/node_modules/acorn/.npmignore
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								Source/node_modules/acorn/.npmignore
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,10 @@ | |||||||
|  | /.tern-port | ||||||
|  | /test | ||||||
|  | /local | ||||||
|  | /rollup | ||||||
|  | /bin/generate-identifier-regex.js | ||||||
|  | /bin/update_authors.sh | ||||||
|  | .editorconfig | ||||||
|  | .gitattributes | ||||||
|  | .tern-project | ||||||
|  | .travis.yml | ||||||
							
								
								
									
										62
									
								
								Source/node_modules/acorn/AUTHORS
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										62
									
								
								Source/node_modules/acorn/AUTHORS
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,62 @@ | |||||||
|  | List of Acorn contributors. Updated before every release. | ||||||
|  |  | ||||||
|  | Adrian Rakovsky | ||||||
|  | Alistair Braidwood | ||||||
|  | Amila Welihinda | ||||||
|  | Andres Suarez | ||||||
|  | Angelo | ||||||
|  | Aparajita Fishman | ||||||
|  | Arian Stolwijk | ||||||
|  | Artem Govorov | ||||||
|  | Brandon Mills | ||||||
|  | Charles Hughes | ||||||
|  | Conrad Irwin | ||||||
|  | Daniel Tschinder | ||||||
|  | David Bonnet | ||||||
|  | Domenico Matteo | ||||||
|  | Forbes Lindesay | ||||||
|  | Gilad Peleg | ||||||
|  | impinball | ||||||
|  | Ingvar Stepanyan | ||||||
|  | Jackson Ray Hamilton | ||||||
|  | Jesse McCarthy | ||||||
|  | Jiaxing Wang | ||||||
|  | Joel Kemp | ||||||
|  | Johannes Herr | ||||||
|  | Jordan Klassen | ||||||
|  | Jürg Lehni | ||||||
|  | Kai Cataldo | ||||||
|  | keeyipchan | ||||||
|  | Keheliya Gallaba | ||||||
|  | Kevin Irish | ||||||
|  | Kevin Kwok | ||||||
|  | krator | ||||||
|  | Marijn Haverbeke | ||||||
|  | Martin Carlberg | ||||||
|  | Mat Garcia | ||||||
|  | Mathias Bynens | ||||||
|  | Mathieu 'p01' Henri | ||||||
|  | Matthew Bastien | ||||||
|  | Max Schaefer | ||||||
|  | Max Zerzouri | ||||||
|  | Mihai Bazon | ||||||
|  | Mike Rennie | ||||||
|  | naoh | ||||||
|  | Nicholas C. Zakas | ||||||
|  | Nick Fitzgerald | ||||||
|  | Olivier Thomann | ||||||
|  | Oskar Schöldström | ||||||
|  | Paul Harper | ||||||
|  | Peter Rust | ||||||
|  | PlNG | ||||||
|  | Prayag Verma | ||||||
|  | ReadmeCritic | ||||||
|  | r-e-d | ||||||
|  | Richard Gibson | ||||||
|  | Rich Harris | ||||||
|  | Sebastian McKenzie | ||||||
|  | Simen Bekkhus | ||||||
|  | Timothy Gu | ||||||
|  | Toru Nagashima | ||||||
|  | Wexpo Lyu | ||||||
|  | zsjforcn | ||||||
							
								
								
									
										19
									
								
								Source/node_modules/acorn/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										19
									
								
								Source/node_modules/acorn/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,19 @@ | |||||||
|  | Copyright (C) 2012-2016 by various contributors (see AUTHORS) | ||||||
|  |  | ||||||
|  | Permission is hereby granted, free of charge, to any person obtaining a copy | ||||||
|  | of this software and associated documentation files (the "Software"), to deal | ||||||
|  | in the Software without restriction, including without limitation the rights | ||||||
|  | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||||||
|  | copies of the Software, and to permit persons to whom the Software is | ||||||
|  | furnished to do so, subject to the following conditions: | ||||||
|  |  | ||||||
|  | The above copyright notice and this permission notice shall be included in | ||||||
|  | all copies or substantial portions of the Software. | ||||||
|  |  | ||||||
|  | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||||
|  | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||||
|  | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||||||
|  | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||||
|  | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||||||
|  | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | ||||||
|  | THE SOFTWARE. | ||||||
							
								
								
									
										67
									
								
								Source/node_modules/acorn/bin/acorn
									
									
									
										generated
									
									
										vendored
									
									
										Executable file
									
								
							
							
						
						
									
										67
									
								
								Source/node_modules/acorn/bin/acorn
									
									
									
										generated
									
									
										vendored
									
									
										Executable file
									
								
							| @@ -0,0 +1,67 @@ | |||||||
|  | #!/usr/bin/env node | ||||||
|  | 'use strict'; | ||||||
|  |  | ||||||
|  | var path = require('path'); | ||||||
|  | var fs = require('fs'); | ||||||
|  | var acorn = require('../dist/acorn.js'); | ||||||
|  |  | ||||||
|  | var infile; | ||||||
|  | var forceFile; | ||||||
|  | var silent = false; | ||||||
|  | var compact = false; | ||||||
|  | var tokenize = false; | ||||||
|  | var options = {} | ||||||
|  |  | ||||||
|  | function help(status) { | ||||||
|  |   var print = (status == 0) ? console.log : console.error | ||||||
|  |   print("usage: " + path.basename(process.argv[1]) + " [--ecma3|--ecma5|--ecma6|--ecma7|...|--ecma2015|--ecma2016|...]") | ||||||
|  |   print("        [--tokenize] [--locations] [---allow-hash-bang] [--compact] [--silent] [--module] [--help] [--] [infile]") | ||||||
|  |   process.exit(status) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | for (var i = 2; i < process.argv.length; ++i) { | ||||||
|  |   var arg = process.argv[i] | ||||||
|  |   if ((arg == "-" || arg[0] != "-") && !infile) infile = arg | ||||||
|  |   else if (arg == "--" && !infile && i + 2 == process.argv.length) forceFile = infile = process.argv[++i] | ||||||
|  |   else if (arg == "--locations") options.locations = true | ||||||
|  |   else if (arg == "--allow-hash-bang") options.allowHashBang = true | ||||||
|  |   else if (arg == "--silent") silent = true | ||||||
|  |   else if (arg == "--compact") compact = true | ||||||
|  |   else if (arg == "--help") help(0) | ||||||
|  |   else if (arg == "--tokenize") tokenize = true | ||||||
|  |   else if (arg == "--module") options.sourceType = 'module' | ||||||
|  |   else { | ||||||
|  |     var match = arg.match(/^--ecma(\d+)$/) | ||||||
|  |     if (match) | ||||||
|  |       options.ecmaVersion = +match[1] | ||||||
|  |     else | ||||||
|  |       help(1) | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | function run(code) { | ||||||
|  |   var result | ||||||
|  |   if (!tokenize) { | ||||||
|  |     try { result = acorn.parse(code, options) } | ||||||
|  |     catch(e) { console.error(e.message); process.exit(1) } | ||||||
|  |   } else { | ||||||
|  |     result = [] | ||||||
|  |     var tokenizer = acorn.tokenizer(code, options), token | ||||||
|  |     while (true) { | ||||||
|  |       try { token = tokenizer.getToken() } | ||||||
|  |       catch(e) { console.error(e.message); process.exit(1) } | ||||||
|  |       result.push(token) | ||||||
|  |       if (token.type == acorn.tokTypes.eof) break | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  |   if (!silent) console.log(JSON.stringify(result, null, compact ? null : 2)) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | if (forceFile || infile && infile != "-") { | ||||||
|  |   run(fs.readFileSync(infile, "utf8")) | ||||||
|  | } else { | ||||||
|  |   var code = "" | ||||||
|  |   process.stdin.resume() | ||||||
|  |   process.stdin.on("data", function (chunk) { return code += chunk; }) | ||||||
|  |   process.stdin.on("end", function () { return run(code); }) | ||||||
|  | } | ||||||
							
								
								
									
										0
									
								
								Source/node_modules/acorn/dist/.keep
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										0
									
								
								Source/node_modules/acorn/dist/.keep
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
								
								
									
										3401
									
								
								Source/node_modules/acorn/dist/acorn.es.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										3401
									
								
								Source/node_modules/acorn/dist/acorn.es.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										3433
									
								
								Source/node_modules/acorn/dist/acorn.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										3433
									
								
								Source/node_modules/acorn/dist/acorn.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										1364
									
								
								Source/node_modules/acorn/dist/acorn_loose.es.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										1364
									
								
								Source/node_modules/acorn/dist/acorn_loose.es.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										1374
									
								
								Source/node_modules/acorn/dist/acorn_loose.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										1374
									
								
								Source/node_modules/acorn/dist/acorn_loose.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										342
									
								
								Source/node_modules/acorn/dist/walk.es.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										342
									
								
								Source/node_modules/acorn/dist/walk.es.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,342 @@ | |||||||
|  | // AST walker module for Mozilla Parser API compatible trees | ||||||
|  |  | ||||||
|  | // A simple walk is one where you simply specify callbacks to be | ||||||
|  | // called on specific nodes. The last two arguments are optional. A | ||||||
|  | // simple use would be | ||||||
|  | // | ||||||
|  | //     walk.simple(myTree, { | ||||||
|  | //         Expression: function(node) { ... } | ||||||
|  | //     }); | ||||||
|  | // | ||||||
|  | // to do something with all expressions. All Parser API node types | ||||||
|  | // can be used to identify node types, as well as Expression, | ||||||
|  | // Statement, and ScopeBody, which denote categories of nodes. | ||||||
|  | // | ||||||
|  | // The base argument can be used to pass a custom (recursive) | ||||||
|  | // walker, and state can be used to give this walked an initial | ||||||
|  | // state. | ||||||
|  |  | ||||||
|  | function simple(node, visitors, base, state, override) { | ||||||
|  |   if (!base) base = exports.base | ||||||
|  |   ;(function c(node, st, override) { | ||||||
|  |     var type = override || node.type, found = visitors[type] | ||||||
|  |     base[type](node, st, c) | ||||||
|  |     if (found) found(node, st) | ||||||
|  |   })(node, state, override) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // An ancestor walk keeps an array of ancestor nodes (including the | ||||||
|  | // current node) and passes them to the callback as third parameter | ||||||
|  | // (and also as state parameter when no other state is present). | ||||||
|  | function ancestor(node, visitors, base, state) { | ||||||
|  |   if (!base) base = exports.base | ||||||
|  |   var ancestors = [] | ||||||
|  |   ;(function c(node, st, override) { | ||||||
|  |     var type = override || node.type, found = visitors[type] | ||||||
|  |     var isNew = node != ancestors[ancestors.length - 1] | ||||||
|  |     if (isNew) ancestors.push(node) | ||||||
|  |     base[type](node, st, c) | ||||||
|  |     if (found) found(node, st || ancestors, ancestors) | ||||||
|  |     if (isNew) ancestors.pop() | ||||||
|  |   })(node, state) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // A recursive walk is one where your functions override the default | ||||||
|  | // walkers. They can modify and replace the state parameter that's | ||||||
|  | // threaded through the walk, and can opt how and whether to walk | ||||||
|  | // their child nodes (by calling their third argument on these | ||||||
|  | // nodes). | ||||||
|  | function recursive(node, state, funcs, base, override) { | ||||||
|  |   var visitor = funcs ? exports.make(funcs, base) : base | ||||||
|  |   ;(function c(node, st, override) { | ||||||
|  |     visitor[override || node.type](node, st, c) | ||||||
|  |   })(node, state, override) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | function makeTest(test) { | ||||||
|  |   if (typeof test == "string") | ||||||
|  |     return function (type) { return type == test; } | ||||||
|  |   else if (!test) | ||||||
|  |     return function () { return true; } | ||||||
|  |   else | ||||||
|  |     return test | ||||||
|  | } | ||||||
|  |  | ||||||
|  | var Found = function Found(node, state) { this.node = node; this.state = state }; | ||||||
|  |  | ||||||
|  | // Find a node with a given start, end, and type (all are optional, | ||||||
|  | // null can be used as wildcard). Returns a {node, state} object, or | ||||||
|  | // undefined when it doesn't find a matching node. | ||||||
|  | function findNodeAt(node, start, end, test, base, state) { | ||||||
|  |   test = makeTest(test) | ||||||
|  |   if (!base) base = exports.base | ||||||
|  |   try { | ||||||
|  |     ;(function c(node, st, override) { | ||||||
|  |       var type = override || node.type | ||||||
|  |       if ((start == null || node.start <= start) && | ||||||
|  |           (end == null || node.end >= end)) | ||||||
|  |         base[type](node, st, c) | ||||||
|  |       if ((start == null || node.start == start) && | ||||||
|  |           (end == null || node.end == end) && | ||||||
|  |           test(type, node)) | ||||||
|  |         throw new Found(node, st) | ||||||
|  |     })(node, state) | ||||||
|  |   } catch (e) { | ||||||
|  |     if (e instanceof Found) return e | ||||||
|  |     throw e | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Find the innermost node of a given type that contains the given | ||||||
|  | // position. Interface similar to findNodeAt. | ||||||
|  | function findNodeAround(node, pos, test, base, state) { | ||||||
|  |   test = makeTest(test) | ||||||
|  |   if (!base) base = exports.base | ||||||
|  |   try { | ||||||
|  |     ;(function c(node, st, override) { | ||||||
|  |       var type = override || node.type | ||||||
|  |       if (node.start > pos || node.end < pos) return | ||||||
|  |       base[type](node, st, c) | ||||||
|  |       if (test(type, node)) throw new Found(node, st) | ||||||
|  |     })(node, state) | ||||||
|  |   } catch (e) { | ||||||
|  |     if (e instanceof Found) return e | ||||||
|  |     throw e | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Find the outermost matching node after a given position. | ||||||
|  | function findNodeAfter(node, pos, test, base, state) { | ||||||
|  |   test = makeTest(test) | ||||||
|  |   if (!base) base = exports.base | ||||||
|  |   try { | ||||||
|  |     ;(function c(node, st, override) { | ||||||
|  |       if (node.end < pos) return | ||||||
|  |       var type = override || node.type | ||||||
|  |       if (node.start >= pos && test(type, node)) throw new Found(node, st) | ||||||
|  |       base[type](node, st, c) | ||||||
|  |     })(node, state) | ||||||
|  |   } catch (e) { | ||||||
|  |     if (e instanceof Found) return e | ||||||
|  |     throw e | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Find the outermost matching node before a given position. | ||||||
|  | function findNodeBefore(node, pos, test, base, state) { | ||||||
|  |   test = makeTest(test) | ||||||
|  |   if (!base) base = exports.base | ||||||
|  |   var max | ||||||
|  |   ;(function c(node, st, override) { | ||||||
|  |     if (node.start > pos) return | ||||||
|  |     var type = override || node.type | ||||||
|  |     if (node.end <= pos && (!max || max.node.end < node.end) && test(type, node)) | ||||||
|  |       max = new Found(node, st) | ||||||
|  |     base[type](node, st, c) | ||||||
|  |   })(node, state) | ||||||
|  |   return max | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Fallback to an Object.create polyfill for older environments. | ||||||
|  | var create = Object.create || function(proto) { | ||||||
|  |   function Ctor() {} | ||||||
|  |   Ctor.prototype = proto | ||||||
|  |   return new Ctor | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Used to create a custom walker. Will fill in all missing node | ||||||
|  | // type properties with the defaults. | ||||||
|  | function make(funcs, base) { | ||||||
|  |   if (!base) base = exports.base | ||||||
|  |   var visitor = create(base) | ||||||
|  |   for (var type in funcs) visitor[type] = funcs[type] | ||||||
|  |   return visitor | ||||||
|  | } | ||||||
|  |  | ||||||
|  | function skipThrough(node, st, c) { c(node, st) } | ||||||
|  | function ignore(_node, _st, _c) {} | ||||||
|  |  | ||||||
|  | // Node walkers. | ||||||
|  |  | ||||||
|  | var base = {} | ||||||
|  |  | ||||||
|  | base.Program = base.BlockStatement = function (node, st, c) { | ||||||
|  |   for (var i = 0; i < node.body.length; ++i) | ||||||
|  |     c(node.body[i], st, "Statement") | ||||||
|  | } | ||||||
|  | base.Statement = skipThrough | ||||||
|  | base.EmptyStatement = ignore | ||||||
|  | base.ExpressionStatement = base.ParenthesizedExpression = | ||||||
|  |   function (node, st, c) { return c(node.expression, st, "Expression"); } | ||||||
|  | base.IfStatement = function (node, st, c) { | ||||||
|  |   c(node.test, st, "Expression") | ||||||
|  |   c(node.consequent, st, "Statement") | ||||||
|  |   if (node.alternate) c(node.alternate, st, "Statement") | ||||||
|  | } | ||||||
|  | base.LabeledStatement = function (node, st, c) { return c(node.body, st, "Statement"); } | ||||||
|  | base.BreakStatement = base.ContinueStatement = ignore | ||||||
|  | base.WithStatement = function (node, st, c) { | ||||||
|  |   c(node.object, st, "Expression") | ||||||
|  |   c(node.body, st, "Statement") | ||||||
|  | } | ||||||
|  | base.SwitchStatement = function (node, st, c) { | ||||||
|  |   c(node.discriminant, st, "Expression") | ||||||
|  |   for (var i = 0; i < node.cases.length; ++i) { | ||||||
|  |     var cs = node.cases[i] | ||||||
|  |     if (cs.test) c(cs.test, st, "Expression") | ||||||
|  |     for (var j = 0; j < cs.consequent.length; ++j) | ||||||
|  |       c(cs.consequent[j], st, "Statement") | ||||||
|  |   } | ||||||
|  | } | ||||||
|  | base.ReturnStatement = base.YieldExpression = base.AwaitExpression = function (node, st, c) { | ||||||
|  |   if (node.argument) c(node.argument, st, "Expression") | ||||||
|  | } | ||||||
|  | base.ThrowStatement = base.SpreadElement = | ||||||
|  |   function (node, st, c) { return c(node.argument, st, "Expression"); } | ||||||
|  | base.TryStatement = function (node, st, c) { | ||||||
|  |   c(node.block, st, "Statement") | ||||||
|  |   if (node.handler) c(node.handler, st) | ||||||
|  |   if (node.finalizer) c(node.finalizer, st, "Statement") | ||||||
|  | } | ||||||
|  | base.CatchClause = function (node, st, c) { | ||||||
|  |   c(node.param, st, "Pattern") | ||||||
|  |   c(node.body, st, "ScopeBody") | ||||||
|  | } | ||||||
|  | base.WhileStatement = base.DoWhileStatement = function (node, st, c) { | ||||||
|  |   c(node.test, st, "Expression") | ||||||
|  |   c(node.body, st, "Statement") | ||||||
|  | } | ||||||
|  | base.ForStatement = function (node, st, c) { | ||||||
|  |   if (node.init) c(node.init, st, "ForInit") | ||||||
|  |   if (node.test) c(node.test, st, "Expression") | ||||||
|  |   if (node.update) c(node.update, st, "Expression") | ||||||
|  |   c(node.body, st, "Statement") | ||||||
|  | } | ||||||
|  | base.ForInStatement = base.ForOfStatement = function (node, st, c) { | ||||||
|  |   c(node.left, st, "ForInit") | ||||||
|  |   c(node.right, st, "Expression") | ||||||
|  |   c(node.body, st, "Statement") | ||||||
|  | } | ||||||
|  | base.ForInit = function (node, st, c) { | ||||||
|  |   if (node.type == "VariableDeclaration") c(node, st) | ||||||
|  |   else c(node, st, "Expression") | ||||||
|  | } | ||||||
|  | base.DebuggerStatement = ignore | ||||||
|  |  | ||||||
|  | base.FunctionDeclaration = function (node, st, c) { return c(node, st, "Function"); } | ||||||
|  | base.VariableDeclaration = function (node, st, c) { | ||||||
|  |   for (var i = 0; i < node.declarations.length; ++i) | ||||||
|  |     c(node.declarations[i], st) | ||||||
|  | } | ||||||
|  | base.VariableDeclarator = function (node, st, c) { | ||||||
|  |   c(node.id, st, "Pattern") | ||||||
|  |   if (node.init) c(node.init, st, "Expression") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | base.Function = function (node, st, c) { | ||||||
|  |   if (node.id) c(node.id, st, "Pattern") | ||||||
|  |   for (var i = 0; i < node.params.length; i++) | ||||||
|  |     c(node.params[i], st, "Pattern") | ||||||
|  |   c(node.body, st, node.expression ? "ScopeExpression" : "ScopeBody") | ||||||
|  | } | ||||||
|  | // FIXME drop these node types in next major version | ||||||
|  | // (They are awkward, and in ES6 every block can be a scope.) | ||||||
|  | base.ScopeBody = function (node, st, c) { return c(node, st, "Statement"); } | ||||||
|  | base.ScopeExpression = function (node, st, c) { return c(node, st, "Expression"); } | ||||||
|  |  | ||||||
|  | base.Pattern = function (node, st, c) { | ||||||
|  |   if (node.type == "Identifier") | ||||||
|  |     c(node, st, "VariablePattern") | ||||||
|  |   else if (node.type == "MemberExpression") | ||||||
|  |     c(node, st, "MemberPattern") | ||||||
|  |   else | ||||||
|  |     c(node, st) | ||||||
|  | } | ||||||
|  | base.VariablePattern = ignore | ||||||
|  | base.MemberPattern = skipThrough | ||||||
|  | base.RestElement = function (node, st, c) { return c(node.argument, st, "Pattern"); } | ||||||
|  | base.ArrayPattern =  function (node, st, c) { | ||||||
|  |   for (var i = 0; i < node.elements.length; ++i) { | ||||||
|  |     var elt = node.elements[i] | ||||||
|  |     if (elt) c(elt, st, "Pattern") | ||||||
|  |   } | ||||||
|  | } | ||||||
|  | base.ObjectPattern = function (node, st, c) { | ||||||
|  |   for (var i = 0; i < node.properties.length; ++i) | ||||||
|  |     c(node.properties[i].value, st, "Pattern") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | base.Expression = skipThrough | ||||||
|  | base.ThisExpression = base.Super = base.MetaProperty = ignore | ||||||
|  | base.ArrayExpression = function (node, st, c) { | ||||||
|  |   for (var i = 0; i < node.elements.length; ++i) { | ||||||
|  |     var elt = node.elements[i] | ||||||
|  |     if (elt) c(elt, st, "Expression") | ||||||
|  |   } | ||||||
|  | } | ||||||
|  | base.ObjectExpression = function (node, st, c) { | ||||||
|  |   for (var i = 0; i < node.properties.length; ++i) | ||||||
|  |     c(node.properties[i], st) | ||||||
|  | } | ||||||
|  | base.FunctionExpression = base.ArrowFunctionExpression = base.FunctionDeclaration | ||||||
|  | base.SequenceExpression = base.TemplateLiteral = function (node, st, c) { | ||||||
|  |   for (var i = 0; i < node.expressions.length; ++i) | ||||||
|  |     c(node.expressions[i], st, "Expression") | ||||||
|  | } | ||||||
|  | base.UnaryExpression = base.UpdateExpression = function (node, st, c) { | ||||||
|  |   c(node.argument, st, "Expression") | ||||||
|  | } | ||||||
|  | base.BinaryExpression = base.LogicalExpression = function (node, st, c) { | ||||||
|  |   c(node.left, st, "Expression") | ||||||
|  |   c(node.right, st, "Expression") | ||||||
|  | } | ||||||
|  | base.AssignmentExpression = base.AssignmentPattern = function (node, st, c) { | ||||||
|  |   c(node.left, st, "Pattern") | ||||||
|  |   c(node.right, st, "Expression") | ||||||
|  | } | ||||||
|  | base.ConditionalExpression = function (node, st, c) { | ||||||
|  |   c(node.test, st, "Expression") | ||||||
|  |   c(node.consequent, st, "Expression") | ||||||
|  |   c(node.alternate, st, "Expression") | ||||||
|  | } | ||||||
|  | base.NewExpression = base.CallExpression = function (node, st, c) { | ||||||
|  |   c(node.callee, st, "Expression") | ||||||
|  |   if (node.arguments) for (var i = 0; i < node.arguments.length; ++i) | ||||||
|  |     c(node.arguments[i], st, "Expression") | ||||||
|  | } | ||||||
|  | base.MemberExpression = function (node, st, c) { | ||||||
|  |   c(node.object, st, "Expression") | ||||||
|  |   if (node.computed) c(node.property, st, "Expression") | ||||||
|  | } | ||||||
|  | base.ExportNamedDeclaration = base.ExportDefaultDeclaration = function (node, st, c) { | ||||||
|  |   if (node.declaration) | ||||||
|  |     c(node.declaration, st, node.type == "ExportNamedDeclaration" || node.declaration.id ? "Statement" : "Expression") | ||||||
|  |   if (node.source) c(node.source, st, "Expression") | ||||||
|  | } | ||||||
|  | base.ExportAllDeclaration = function (node, st, c) { | ||||||
|  |   c(node.source, st, "Expression") | ||||||
|  | } | ||||||
|  | base.ImportDeclaration = function (node, st, c) { | ||||||
|  |   for (var i = 0; i < node.specifiers.length; i++) | ||||||
|  |     c(node.specifiers[i], st) | ||||||
|  |   c(node.source, st, "Expression") | ||||||
|  | } | ||||||
|  | base.ImportSpecifier = base.ImportDefaultSpecifier = base.ImportNamespaceSpecifier = base.Identifier = base.Literal = ignore | ||||||
|  |  | ||||||
|  | base.TaggedTemplateExpression = function (node, st, c) { | ||||||
|  |   c(node.tag, st, "Expression") | ||||||
|  |   c(node.quasi, st) | ||||||
|  | } | ||||||
|  | base.ClassDeclaration = base.ClassExpression = function (node, st, c) { return c(node, st, "Class"); } | ||||||
|  | base.Class = function (node, st, c) { | ||||||
|  |   if (node.id) c(node.id, st, "Pattern") | ||||||
|  |   if (node.superClass) c(node.superClass, st, "Expression") | ||||||
|  |   for (var i = 0; i < node.body.body.length; i++) | ||||||
|  |     c(node.body.body[i], st) | ||||||
|  | } | ||||||
|  | base.MethodDefinition = base.Property = function (node, st, c) { | ||||||
|  |   if (node.computed) c(node.key, st, "Expression") | ||||||
|  |   c(node.value, st, "Expression") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | export { simple, ancestor, recursive, findNodeAt, findNodeAround, findNodeAfter, findNodeBefore, make, base }; | ||||||
							
								
								
									
										360
									
								
								Source/node_modules/acorn/dist/walk.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										360
									
								
								Source/node_modules/acorn/dist/walk.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,360 @@ | |||||||
|  | (function (global, factory) { | ||||||
|  |   typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports) : | ||||||
|  |   typeof define === 'function' && define.amd ? define(['exports'], factory) : | ||||||
|  |   (factory((global.acorn = global.acorn || {}, global.acorn.walk = global.acorn.walk || {}))); | ||||||
|  | }(this, (function (exports) { 'use strict'; | ||||||
|  |  | ||||||
|  | // AST walker module for Mozilla Parser API compatible trees | ||||||
|  |  | ||||||
|  | // A simple walk is one where you simply specify callbacks to be | ||||||
|  | // called on specific nodes. The last two arguments are optional. A | ||||||
|  | // simple use would be | ||||||
|  | // | ||||||
|  | //     walk.simple(myTree, { | ||||||
|  | //         Expression: function(node) { ... } | ||||||
|  | //     }); | ||||||
|  | // | ||||||
|  | // to do something with all expressions. All Parser API node types | ||||||
|  | // can be used to identify node types, as well as Expression, | ||||||
|  | // Statement, and ScopeBody, which denote categories of nodes. | ||||||
|  | // | ||||||
|  | // The base argument can be used to pass a custom (recursive) | ||||||
|  | // walker, and state can be used to give this walked an initial | ||||||
|  | // state. | ||||||
|  |  | ||||||
|  | function simple(node, visitors, base, state, override) { | ||||||
|  |   if (!base) base = exports.base | ||||||
|  |   ;(function c(node, st, override) { | ||||||
|  |     var type = override || node.type, found = visitors[type] | ||||||
|  |     base[type](node, st, c) | ||||||
|  |     if (found) found(node, st) | ||||||
|  |   })(node, state, override) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // An ancestor walk keeps an array of ancestor nodes (including the | ||||||
|  | // current node) and passes them to the callback as third parameter | ||||||
|  | // (and also as state parameter when no other state is present). | ||||||
|  | function ancestor(node, visitors, base, state) { | ||||||
|  |   if (!base) base = exports.base | ||||||
|  |   var ancestors = [] | ||||||
|  |   ;(function c(node, st, override) { | ||||||
|  |     var type = override || node.type, found = visitors[type] | ||||||
|  |     var isNew = node != ancestors[ancestors.length - 1] | ||||||
|  |     if (isNew) ancestors.push(node) | ||||||
|  |     base[type](node, st, c) | ||||||
|  |     if (found) found(node, st || ancestors, ancestors) | ||||||
|  |     if (isNew) ancestors.pop() | ||||||
|  |   })(node, state) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // A recursive walk is one where your functions override the default | ||||||
|  | // walkers. They can modify and replace the state parameter that's | ||||||
|  | // threaded through the walk, and can opt how and whether to walk | ||||||
|  | // their child nodes (by calling their third argument on these | ||||||
|  | // nodes). | ||||||
|  | function recursive(node, state, funcs, base, override) { | ||||||
|  |   var visitor = funcs ? exports.make(funcs, base) : base | ||||||
|  |   ;(function c(node, st, override) { | ||||||
|  |     visitor[override || node.type](node, st, c) | ||||||
|  |   })(node, state, override) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | function makeTest(test) { | ||||||
|  |   if (typeof test == "string") | ||||||
|  |     return function (type) { return type == test; } | ||||||
|  |   else if (!test) | ||||||
|  |     return function () { return true; } | ||||||
|  |   else | ||||||
|  |     return test | ||||||
|  | } | ||||||
|  |  | ||||||
|  | var Found = function Found(node, state) { this.node = node; this.state = state }; | ||||||
|  |  | ||||||
|  | // Find a node with a given start, end, and type (all are optional, | ||||||
|  | // null can be used as wildcard). Returns a {node, state} object, or | ||||||
|  | // undefined when it doesn't find a matching node. | ||||||
|  | function findNodeAt(node, start, end, test, base, state) { | ||||||
|  |   test = makeTest(test) | ||||||
|  |   if (!base) base = exports.base | ||||||
|  |   try { | ||||||
|  |     ;(function c(node, st, override) { | ||||||
|  |       var type = override || node.type | ||||||
|  |       if ((start == null || node.start <= start) && | ||||||
|  |           (end == null || node.end >= end)) | ||||||
|  |         base[type](node, st, c) | ||||||
|  |       if ((start == null || node.start == start) && | ||||||
|  |           (end == null || node.end == end) && | ||||||
|  |           test(type, node)) | ||||||
|  |         throw new Found(node, st) | ||||||
|  |     })(node, state) | ||||||
|  |   } catch (e) { | ||||||
|  |     if (e instanceof Found) return e | ||||||
|  |     throw e | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Find the innermost node of a given type that contains the given | ||||||
|  | // position. Interface similar to findNodeAt. | ||||||
|  | function findNodeAround(node, pos, test, base, state) { | ||||||
|  |   test = makeTest(test) | ||||||
|  |   if (!base) base = exports.base | ||||||
|  |   try { | ||||||
|  |     ;(function c(node, st, override) { | ||||||
|  |       var type = override || node.type | ||||||
|  |       if (node.start > pos || node.end < pos) return | ||||||
|  |       base[type](node, st, c) | ||||||
|  |       if (test(type, node)) throw new Found(node, st) | ||||||
|  |     })(node, state) | ||||||
|  |   } catch (e) { | ||||||
|  |     if (e instanceof Found) return e | ||||||
|  |     throw e | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Find the outermost matching node after a given position. | ||||||
|  | function findNodeAfter(node, pos, test, base, state) { | ||||||
|  |   test = makeTest(test) | ||||||
|  |   if (!base) base = exports.base | ||||||
|  |   try { | ||||||
|  |     ;(function c(node, st, override) { | ||||||
|  |       if (node.end < pos) return | ||||||
|  |       var type = override || node.type | ||||||
|  |       if (node.start >= pos && test(type, node)) throw new Found(node, st) | ||||||
|  |       base[type](node, st, c) | ||||||
|  |     })(node, state) | ||||||
|  |   } catch (e) { | ||||||
|  |     if (e instanceof Found) return e | ||||||
|  |     throw e | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Find the outermost matching node before a given position. | ||||||
|  | function findNodeBefore(node, pos, test, base, state) { | ||||||
|  |   test = makeTest(test) | ||||||
|  |   if (!base) base = exports.base | ||||||
|  |   var max | ||||||
|  |   ;(function c(node, st, override) { | ||||||
|  |     if (node.start > pos) return | ||||||
|  |     var type = override || node.type | ||||||
|  |     if (node.end <= pos && (!max || max.node.end < node.end) && test(type, node)) | ||||||
|  |       max = new Found(node, st) | ||||||
|  |     base[type](node, st, c) | ||||||
|  |   })(node, state) | ||||||
|  |   return max | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Fallback to an Object.create polyfill for older environments. | ||||||
|  | var create = Object.create || function(proto) { | ||||||
|  |   function Ctor() {} | ||||||
|  |   Ctor.prototype = proto | ||||||
|  |   return new Ctor | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Used to create a custom walker. Will fill in all missing node | ||||||
|  | // type properties with the defaults. | ||||||
|  | function make(funcs, base) { | ||||||
|  |   if (!base) base = exports.base | ||||||
|  |   var visitor = create(base) | ||||||
|  |   for (var type in funcs) visitor[type] = funcs[type] | ||||||
|  |   return visitor | ||||||
|  | } | ||||||
|  |  | ||||||
|  | function skipThrough(node, st, c) { c(node, st) } | ||||||
|  | function ignore(_node, _st, _c) {} | ||||||
|  |  | ||||||
|  | // Node walkers. | ||||||
|  |  | ||||||
|  | var base = {} | ||||||
|  |  | ||||||
|  | base.Program = base.BlockStatement = function (node, st, c) { | ||||||
|  |   for (var i = 0; i < node.body.length; ++i) | ||||||
|  |     c(node.body[i], st, "Statement") | ||||||
|  | } | ||||||
|  | base.Statement = skipThrough | ||||||
|  | base.EmptyStatement = ignore | ||||||
|  | base.ExpressionStatement = base.ParenthesizedExpression = | ||||||
|  |   function (node, st, c) { return c(node.expression, st, "Expression"); } | ||||||
|  | base.IfStatement = function (node, st, c) { | ||||||
|  |   c(node.test, st, "Expression") | ||||||
|  |   c(node.consequent, st, "Statement") | ||||||
|  |   if (node.alternate) c(node.alternate, st, "Statement") | ||||||
|  | } | ||||||
|  | base.LabeledStatement = function (node, st, c) { return c(node.body, st, "Statement"); } | ||||||
|  | base.BreakStatement = base.ContinueStatement = ignore | ||||||
|  | base.WithStatement = function (node, st, c) { | ||||||
|  |   c(node.object, st, "Expression") | ||||||
|  |   c(node.body, st, "Statement") | ||||||
|  | } | ||||||
|  | base.SwitchStatement = function (node, st, c) { | ||||||
|  |   c(node.discriminant, st, "Expression") | ||||||
|  |   for (var i = 0; i < node.cases.length; ++i) { | ||||||
|  |     var cs = node.cases[i] | ||||||
|  |     if (cs.test) c(cs.test, st, "Expression") | ||||||
|  |     for (var j = 0; j < cs.consequent.length; ++j) | ||||||
|  |       c(cs.consequent[j], st, "Statement") | ||||||
|  |   } | ||||||
|  | } | ||||||
|  | base.ReturnStatement = base.YieldExpression = base.AwaitExpression = function (node, st, c) { | ||||||
|  |   if (node.argument) c(node.argument, st, "Expression") | ||||||
|  | } | ||||||
|  | base.ThrowStatement = base.SpreadElement = | ||||||
|  |   function (node, st, c) { return c(node.argument, st, "Expression"); } | ||||||
|  | base.TryStatement = function (node, st, c) { | ||||||
|  |   c(node.block, st, "Statement") | ||||||
|  |   if (node.handler) c(node.handler, st) | ||||||
|  |   if (node.finalizer) c(node.finalizer, st, "Statement") | ||||||
|  | } | ||||||
|  | base.CatchClause = function (node, st, c) { | ||||||
|  |   c(node.param, st, "Pattern") | ||||||
|  |   c(node.body, st, "ScopeBody") | ||||||
|  | } | ||||||
|  | base.WhileStatement = base.DoWhileStatement = function (node, st, c) { | ||||||
|  |   c(node.test, st, "Expression") | ||||||
|  |   c(node.body, st, "Statement") | ||||||
|  | } | ||||||
|  | base.ForStatement = function (node, st, c) { | ||||||
|  |   if (node.init) c(node.init, st, "ForInit") | ||||||
|  |   if (node.test) c(node.test, st, "Expression") | ||||||
|  |   if (node.update) c(node.update, st, "Expression") | ||||||
|  |   c(node.body, st, "Statement") | ||||||
|  | } | ||||||
|  | base.ForInStatement = base.ForOfStatement = function (node, st, c) { | ||||||
|  |   c(node.left, st, "ForInit") | ||||||
|  |   c(node.right, st, "Expression") | ||||||
|  |   c(node.body, st, "Statement") | ||||||
|  | } | ||||||
|  | base.ForInit = function (node, st, c) { | ||||||
|  |   if (node.type == "VariableDeclaration") c(node, st) | ||||||
|  |   else c(node, st, "Expression") | ||||||
|  | } | ||||||
|  | base.DebuggerStatement = ignore | ||||||
|  |  | ||||||
|  | base.FunctionDeclaration = function (node, st, c) { return c(node, st, "Function"); } | ||||||
|  | base.VariableDeclaration = function (node, st, c) { | ||||||
|  |   for (var i = 0; i < node.declarations.length; ++i) | ||||||
|  |     c(node.declarations[i], st) | ||||||
|  | } | ||||||
|  | base.VariableDeclarator = function (node, st, c) { | ||||||
|  |   c(node.id, st, "Pattern") | ||||||
|  |   if (node.init) c(node.init, st, "Expression") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | base.Function = function (node, st, c) { | ||||||
|  |   if (node.id) c(node.id, st, "Pattern") | ||||||
|  |   for (var i = 0; i < node.params.length; i++) | ||||||
|  |     c(node.params[i], st, "Pattern") | ||||||
|  |   c(node.body, st, node.expression ? "ScopeExpression" : "ScopeBody") | ||||||
|  | } | ||||||
|  | // FIXME drop these node types in next major version | ||||||
|  | // (They are awkward, and in ES6 every block can be a scope.) | ||||||
|  | base.ScopeBody = function (node, st, c) { return c(node, st, "Statement"); } | ||||||
|  | base.ScopeExpression = function (node, st, c) { return c(node, st, "Expression"); } | ||||||
|  |  | ||||||
|  | base.Pattern = function (node, st, c) { | ||||||
|  |   if (node.type == "Identifier") | ||||||
|  |     c(node, st, "VariablePattern") | ||||||
|  |   else if (node.type == "MemberExpression") | ||||||
|  |     c(node, st, "MemberPattern") | ||||||
|  |   else | ||||||
|  |     c(node, st) | ||||||
|  | } | ||||||
|  | base.VariablePattern = ignore | ||||||
|  | base.MemberPattern = skipThrough | ||||||
|  | base.RestElement = function (node, st, c) { return c(node.argument, st, "Pattern"); } | ||||||
|  | base.ArrayPattern =  function (node, st, c) { | ||||||
|  |   for (var i = 0; i < node.elements.length; ++i) { | ||||||
|  |     var elt = node.elements[i] | ||||||
|  |     if (elt) c(elt, st, "Pattern") | ||||||
|  |   } | ||||||
|  | } | ||||||
|  | base.ObjectPattern = function (node, st, c) { | ||||||
|  |   for (var i = 0; i < node.properties.length; ++i) | ||||||
|  |     c(node.properties[i].value, st, "Pattern") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | base.Expression = skipThrough | ||||||
|  | base.ThisExpression = base.Super = base.MetaProperty = ignore | ||||||
|  | base.ArrayExpression = function (node, st, c) { | ||||||
|  |   for (var i = 0; i < node.elements.length; ++i) { | ||||||
|  |     var elt = node.elements[i] | ||||||
|  |     if (elt) c(elt, st, "Expression") | ||||||
|  |   } | ||||||
|  | } | ||||||
|  | base.ObjectExpression = function (node, st, c) { | ||||||
|  |   for (var i = 0; i < node.properties.length; ++i) | ||||||
|  |     c(node.properties[i], st) | ||||||
|  | } | ||||||
|  | base.FunctionExpression = base.ArrowFunctionExpression = base.FunctionDeclaration | ||||||
|  | base.SequenceExpression = base.TemplateLiteral = function (node, st, c) { | ||||||
|  |   for (var i = 0; i < node.expressions.length; ++i) | ||||||
|  |     c(node.expressions[i], st, "Expression") | ||||||
|  | } | ||||||
|  | base.UnaryExpression = base.UpdateExpression = function (node, st, c) { | ||||||
|  |   c(node.argument, st, "Expression") | ||||||
|  | } | ||||||
|  | base.BinaryExpression = base.LogicalExpression = function (node, st, c) { | ||||||
|  |   c(node.left, st, "Expression") | ||||||
|  |   c(node.right, st, "Expression") | ||||||
|  | } | ||||||
|  | base.AssignmentExpression = base.AssignmentPattern = function (node, st, c) { | ||||||
|  |   c(node.left, st, "Pattern") | ||||||
|  |   c(node.right, st, "Expression") | ||||||
|  | } | ||||||
|  | base.ConditionalExpression = function (node, st, c) { | ||||||
|  |   c(node.test, st, "Expression") | ||||||
|  |   c(node.consequent, st, "Expression") | ||||||
|  |   c(node.alternate, st, "Expression") | ||||||
|  | } | ||||||
|  | base.NewExpression = base.CallExpression = function (node, st, c) { | ||||||
|  |   c(node.callee, st, "Expression") | ||||||
|  |   if (node.arguments) for (var i = 0; i < node.arguments.length; ++i) | ||||||
|  |     c(node.arguments[i], st, "Expression") | ||||||
|  | } | ||||||
|  | base.MemberExpression = function (node, st, c) { | ||||||
|  |   c(node.object, st, "Expression") | ||||||
|  |   if (node.computed) c(node.property, st, "Expression") | ||||||
|  | } | ||||||
|  | base.ExportNamedDeclaration = base.ExportDefaultDeclaration = function (node, st, c) { | ||||||
|  |   if (node.declaration) | ||||||
|  |     c(node.declaration, st, node.type == "ExportNamedDeclaration" || node.declaration.id ? "Statement" : "Expression") | ||||||
|  |   if (node.source) c(node.source, st, "Expression") | ||||||
|  | } | ||||||
|  | base.ExportAllDeclaration = function (node, st, c) { | ||||||
|  |   c(node.source, st, "Expression") | ||||||
|  | } | ||||||
|  | base.ImportDeclaration = function (node, st, c) { | ||||||
|  |   for (var i = 0; i < node.specifiers.length; i++) | ||||||
|  |     c(node.specifiers[i], st) | ||||||
|  |   c(node.source, st, "Expression") | ||||||
|  | } | ||||||
|  | base.ImportSpecifier = base.ImportDefaultSpecifier = base.ImportNamespaceSpecifier = base.Identifier = base.Literal = ignore | ||||||
|  |  | ||||||
|  | base.TaggedTemplateExpression = function (node, st, c) { | ||||||
|  |   c(node.tag, st, "Expression") | ||||||
|  |   c(node.quasi, st) | ||||||
|  | } | ||||||
|  | base.ClassDeclaration = base.ClassExpression = function (node, st, c) { return c(node, st, "Class"); } | ||||||
|  | base.Class = function (node, st, c) { | ||||||
|  |   if (node.id) c(node.id, st, "Pattern") | ||||||
|  |   if (node.superClass) c(node.superClass, st, "Expression") | ||||||
|  |   for (var i = 0; i < node.body.body.length; i++) | ||||||
|  |     c(node.body.body[i], st) | ||||||
|  | } | ||||||
|  | base.MethodDefinition = base.Property = function (node, st, c) { | ||||||
|  |   if (node.computed) c(node.key, st, "Expression") | ||||||
|  |   c(node.value, st, "Expression") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | exports.simple = simple; | ||||||
|  | exports.ancestor = ancestor; | ||||||
|  | exports.recursive = recursive; | ||||||
|  | exports.findNodeAt = findNodeAt; | ||||||
|  | exports.findNodeAround = findNodeAround; | ||||||
|  | exports.findNodeAfter = findNodeAfter; | ||||||
|  | exports.findNodeBefore = findNodeBefore; | ||||||
|  | exports.make = make; | ||||||
|  | exports.base = base; | ||||||
|  |  | ||||||
|  | Object.defineProperty(exports, '__esModule', { value: true }); | ||||||
|  |  | ||||||
|  | }))); | ||||||
							
								
								
									
										46
									
								
								Source/node_modules/acorn/package.json
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										46
									
								
								Source/node_modules/acorn/package.json
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,46 @@ | |||||||
|  | { | ||||||
|  |   "name": "acorn", | ||||||
|  |   "description": "ECMAScript parser", | ||||||
|  |   "homepage": "https://github.com/ternjs/acorn", | ||||||
|  |   "main": "dist/acorn.js", | ||||||
|  |   "jsnext:main": "dist/acorn.es.js", | ||||||
|  |   "version": "4.0.13", | ||||||
|  |   "engines": { | ||||||
|  |     "node": ">=0.4.0" | ||||||
|  |   }, | ||||||
|  |   "maintainers": [ | ||||||
|  |     { | ||||||
|  |       "name": "Marijn Haverbeke", | ||||||
|  |       "email": "marijnh@gmail.com", | ||||||
|  |       "web": "http://marijnhaverbeke.nl" | ||||||
|  |     }, | ||||||
|  |     { | ||||||
|  |       "name": "Ingvar Stepanyan", | ||||||
|  |       "email": "me@rreverser.com", | ||||||
|  |       "web": "http://rreverser.com/" | ||||||
|  |     } | ||||||
|  |   ], | ||||||
|  |   "repository": { | ||||||
|  |     "type": "git", | ||||||
|  |     "url": "https://github.com/ternjs/acorn.git" | ||||||
|  |   }, | ||||||
|  |   "license": "MIT", | ||||||
|  |   "scripts": { | ||||||
|  |     "prepublish": "npm test", | ||||||
|  |     "test": "node test/run.js", | ||||||
|  |     "pretest": "npm run build", | ||||||
|  |     "build": "npm run build:main && npm run build:walk && npm run build:loose && npm run build:bin", | ||||||
|  |     "build:main": "rollup -c rollup/config.main.js", | ||||||
|  |     "build:walk": "rollup -c rollup/config.walk.js", | ||||||
|  |     "build:loose": "rollup -c rollup/config.loose.js", | ||||||
|  |     "build:bin": "rollup -c rollup/config.bin.js" | ||||||
|  |   }, | ||||||
|  |   "bin": { | ||||||
|  |     "acorn": "./bin/acorn" | ||||||
|  |   }, | ||||||
|  |   "devDependencies": { | ||||||
|  |     "rollup": "^0.34.1", | ||||||
|  |     "rollup-plugin-buble": "^0.11.0", | ||||||
|  |     "unicode-9.0.0": "^0.7.0" | ||||||
|  |   } | ||||||
|  | } | ||||||
							
								
								
									
										60
									
								
								Source/node_modules/acorn/src/bin/acorn.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										60
									
								
								Source/node_modules/acorn/src/bin/acorn.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,60 @@ | |||||||
|  | import {basename} from "path" | ||||||
|  | import {readFileSync as readFile} from "fs" | ||||||
|  | import * as acorn from "acorn" | ||||||
|  |  | ||||||
|  | let infile, forceFile, silent = false, compact = false, tokenize = false | ||||||
|  | const options = {} | ||||||
|  |  | ||||||
|  | function help(status) { | ||||||
|  |   const print = (status == 0) ? console.log : console.error | ||||||
|  |   print("usage: " + basename(process.argv[1]) + " [--ecma3|--ecma5|--ecma6|--ecma7|...|--ecma2015|--ecma2016|...]") | ||||||
|  |   print("        [--tokenize] [--locations] [---allow-hash-bang] [--compact] [--silent] [--module] [--help] [--] [infile]") | ||||||
|  |   process.exit(status) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | for (let i = 2; i < process.argv.length; ++i) { | ||||||
|  |   const arg = process.argv[i] | ||||||
|  |   if ((arg == "-" || arg[0] != "-") && !infile) infile = arg | ||||||
|  |   else if (arg == "--" && !infile && i + 2 == process.argv.length) forceFile = infile = process.argv[++i] | ||||||
|  |   else if (arg == "--locations") options.locations = true | ||||||
|  |   else if (arg == "--allow-hash-bang") options.allowHashBang = true | ||||||
|  |   else if (arg == "--silent") silent = true | ||||||
|  |   else if (arg == "--compact") compact = true | ||||||
|  |   else if (arg == "--help") help(0) | ||||||
|  |   else if (arg == "--tokenize") tokenize = true | ||||||
|  |   else if (arg == "--module") options.sourceType = 'module' | ||||||
|  |   else { | ||||||
|  |     let match = arg.match(/^--ecma(\d+)$/) | ||||||
|  |     if (match) | ||||||
|  |       options.ecmaVersion = +match[1] | ||||||
|  |     else | ||||||
|  |       help(1) | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | function run(code) { | ||||||
|  |   let result | ||||||
|  |   if (!tokenize) { | ||||||
|  |     try { result = acorn.parse(code, options) } | ||||||
|  |     catch(e) { console.error(e.message); process.exit(1) } | ||||||
|  |   } else { | ||||||
|  |     result = [] | ||||||
|  |     let tokenizer = acorn.tokenizer(code, options), token | ||||||
|  |     while (true) { | ||||||
|  |       try { token = tokenizer.getToken() } | ||||||
|  |       catch(e) { console.error(e.message); process.exit(1) } | ||||||
|  |       result.push(token) | ||||||
|  |       if (token.type == acorn.tokTypes.eof) break | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  |   if (!silent) console.log(JSON.stringify(result, null, compact ? null : 2)) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | if (forceFile || infile && infile != "-") { | ||||||
|  |   run(readFile(infile, "utf8")) | ||||||
|  | } else { | ||||||
|  |   let code = "" | ||||||
|  |   process.stdin.resume() | ||||||
|  |   process.stdin.on("data", chunk => code += chunk) | ||||||
|  |   process.stdin.on("end", () => run(code)) | ||||||
|  | } | ||||||
							
								
								
									
										819
									
								
								Source/node_modules/acorn/src/expression.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										819
									
								
								Source/node_modules/acorn/src/expression.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,819 @@ | |||||||
|  | // A recursive descent parser operates by defining functions for all | ||||||
|  | // syntactic elements, and recursively calling those, each function | ||||||
|  | // advancing the input stream and returning an AST node. Precedence | ||||||
|  | // of constructs (for example, the fact that `!x[1]` means `!(x[1])` | ||||||
|  | // instead of `(!x)[1]` is handled by the fact that the parser | ||||||
|  | // function that parses unary prefix operators is called first, and | ||||||
|  | // in turn calls the function that parses `[]` subscripts — that | ||||||
|  | // way, it'll receive the node for `x[1]` already parsed, and wraps | ||||||
|  | // *that* in the unary operator node. | ||||||
|  | // | ||||||
|  | // Acorn uses an [operator precedence parser][opp] to handle binary | ||||||
|  | // operator precedence, because it is much more compact than using | ||||||
|  | // the technique outlined above, which uses different, nesting | ||||||
|  | // functions to specify precedence, for all of the ten binary | ||||||
|  | // precedence levels that JavaScript defines. | ||||||
|  | // | ||||||
|  | // [opp]: http://en.wikipedia.org/wiki/Operator-precedence_parser | ||||||
|  |  | ||||||
|  | import {types as tt} from "./tokentype" | ||||||
|  | import {Parser} from "./state" | ||||||
|  | import {DestructuringErrors} from "./parseutil" | ||||||
|  |  | ||||||
|  | const pp = Parser.prototype | ||||||
|  |  | ||||||
|  | // Check if property name clashes with already added. | ||||||
|  | // Object/class getters and setters are not allowed to clash — | ||||||
|  | // either with each other or with an init property — and in | ||||||
|  | // strict mode, init properties are also not allowed to be repeated. | ||||||
|  |  | ||||||
|  | pp.checkPropClash = function(prop, propHash) { | ||||||
|  |   if (this.options.ecmaVersion >= 6 && (prop.computed || prop.method || prop.shorthand)) | ||||||
|  |     return | ||||||
|  |   let {key} = prop, name | ||||||
|  |   switch (key.type) { | ||||||
|  |   case "Identifier": name = key.name; break | ||||||
|  |   case "Literal": name = String(key.value); break | ||||||
|  |   default: return | ||||||
|  |   } | ||||||
|  |   let {kind} = prop | ||||||
|  |   if (this.options.ecmaVersion >= 6) { | ||||||
|  |     if (name === "__proto__" && kind === "init") { | ||||||
|  |       if (propHash.proto) this.raiseRecoverable(key.start, "Redefinition of __proto__ property") | ||||||
|  |       propHash.proto = true | ||||||
|  |     } | ||||||
|  |     return | ||||||
|  |   } | ||||||
|  |   name = "$" + name | ||||||
|  |   let other = propHash[name] | ||||||
|  |   if (other) { | ||||||
|  |     let isGetSet = kind !== "init" | ||||||
|  |     if ((this.strict || isGetSet) && other[kind] || !(isGetSet ^ other.init)) | ||||||
|  |       this.raiseRecoverable(key.start, "Redefinition of property") | ||||||
|  |   } else { | ||||||
|  |     other = propHash[name] = { | ||||||
|  |       init: false, | ||||||
|  |       get: false, | ||||||
|  |       set: false | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  |   other[kind] = true | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // ### Expression parsing | ||||||
|  |  | ||||||
|  | // These nest, from the most general expression type at the top to | ||||||
|  | // 'atomic', nondivisible expression types at the bottom. Most of | ||||||
|  | // the functions will simply let the function(s) below them parse, | ||||||
|  | // and, *if* the syntactic construct they handle is present, wrap | ||||||
|  | // the AST node that the inner parser gave them in another node. | ||||||
|  |  | ||||||
|  | // Parse a full expression. The optional arguments are used to | ||||||
|  | // forbid the `in` operator (in for loops initalization expressions) | ||||||
|  | // and provide reference for storing '=' operator inside shorthand | ||||||
|  | // property assignment in contexts where both object expression | ||||||
|  | // and object pattern might appear (so it's possible to raise | ||||||
|  | // delayed syntax error at correct position). | ||||||
|  |  | ||||||
|  | pp.parseExpression = function(noIn, refDestructuringErrors) { | ||||||
|  |   let startPos = this.start, startLoc = this.startLoc | ||||||
|  |   let expr = this.parseMaybeAssign(noIn, refDestructuringErrors) | ||||||
|  |   if (this.type === tt.comma) { | ||||||
|  |     let node = this.startNodeAt(startPos, startLoc) | ||||||
|  |     node.expressions = [expr] | ||||||
|  |     while (this.eat(tt.comma)) node.expressions.push(this.parseMaybeAssign(noIn, refDestructuringErrors)) | ||||||
|  |     return this.finishNode(node, "SequenceExpression") | ||||||
|  |   } | ||||||
|  |   return expr | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Parse an assignment expression. This includes applications of | ||||||
|  | // operators like `+=`. | ||||||
|  |  | ||||||
|  | pp.parseMaybeAssign = function(noIn, refDestructuringErrors, afterLeftParse) { | ||||||
|  |   if (this.inGenerator && this.isContextual("yield")) return this.parseYield() | ||||||
|  |  | ||||||
|  |   let ownDestructuringErrors = false, oldParenAssign = -1 | ||||||
|  |   if (refDestructuringErrors) { | ||||||
|  |     oldParenAssign = refDestructuringErrors.parenthesizedAssign | ||||||
|  |     refDestructuringErrors.parenthesizedAssign = -1 | ||||||
|  |   } else { | ||||||
|  |     refDestructuringErrors = new DestructuringErrors | ||||||
|  |     ownDestructuringErrors = true | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   let startPos = this.start, startLoc = this.startLoc | ||||||
|  |   if (this.type == tt.parenL || this.type == tt.name) | ||||||
|  |     this.potentialArrowAt = this.start | ||||||
|  |   let left = this.parseMaybeConditional(noIn, refDestructuringErrors) | ||||||
|  |   if (afterLeftParse) left = afterLeftParse.call(this, left, startPos, startLoc) | ||||||
|  |   if (this.type.isAssign) { | ||||||
|  |     this.checkPatternErrors(refDestructuringErrors, true) | ||||||
|  |     if (!ownDestructuringErrors) DestructuringErrors.call(refDestructuringErrors) | ||||||
|  |     let node = this.startNodeAt(startPos, startLoc) | ||||||
|  |     node.operator = this.value | ||||||
|  |     node.left = this.type === tt.eq ? this.toAssignable(left) : left | ||||||
|  |     refDestructuringErrors.shorthandAssign = -1 // reset because shorthand default was used correctly | ||||||
|  |     this.checkLVal(left) | ||||||
|  |     this.next() | ||||||
|  |     node.right = this.parseMaybeAssign(noIn) | ||||||
|  |     return this.finishNode(node, "AssignmentExpression") | ||||||
|  |   } else { | ||||||
|  |     if (ownDestructuringErrors) this.checkExpressionErrors(refDestructuringErrors, true) | ||||||
|  |   } | ||||||
|  |   if (oldParenAssign > -1) refDestructuringErrors.parenthesizedAssign = oldParenAssign | ||||||
|  |   return left | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Parse a ternary conditional (`?:`) operator. | ||||||
|  |  | ||||||
|  | pp.parseMaybeConditional = function(noIn, refDestructuringErrors) { | ||||||
|  |   let startPos = this.start, startLoc = this.startLoc | ||||||
|  |   let expr = this.parseExprOps(noIn, refDestructuringErrors) | ||||||
|  |   if (this.checkExpressionErrors(refDestructuringErrors)) return expr | ||||||
|  |   if (this.eat(tt.question)) { | ||||||
|  |     let node = this.startNodeAt(startPos, startLoc) | ||||||
|  |     node.test = expr | ||||||
|  |     node.consequent = this.parseMaybeAssign() | ||||||
|  |     this.expect(tt.colon) | ||||||
|  |     node.alternate = this.parseMaybeAssign(noIn) | ||||||
|  |     return this.finishNode(node, "ConditionalExpression") | ||||||
|  |   } | ||||||
|  |   return expr | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Start the precedence parser. | ||||||
|  |  | ||||||
|  | pp.parseExprOps = function(noIn, refDestructuringErrors) { | ||||||
|  |   let startPos = this.start, startLoc = this.startLoc | ||||||
|  |   let expr = this.parseMaybeUnary(refDestructuringErrors, false) | ||||||
|  |   if (this.checkExpressionErrors(refDestructuringErrors)) return expr | ||||||
|  |   return this.parseExprOp(expr, startPos, startLoc, -1, noIn) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Parse binary operators with the operator precedence parsing | ||||||
|  | // algorithm. `left` is the left-hand side of the operator. | ||||||
|  | // `minPrec` provides context that allows the function to stop and | ||||||
|  | // defer further parser to one of its callers when it encounters an | ||||||
|  | // operator that has a lower precedence than the set it is parsing. | ||||||
|  |  | ||||||
|  | pp.parseExprOp = function(left, leftStartPos, leftStartLoc, minPrec, noIn) { | ||||||
|  |   let prec = this.type.binop | ||||||
|  |   if (prec != null && (!noIn || this.type !== tt._in)) { | ||||||
|  |     if (prec > minPrec) { | ||||||
|  |       let logical = this.type === tt.logicalOR || this.type === tt.logicalAND | ||||||
|  |       let op = this.value | ||||||
|  |       this.next() | ||||||
|  |       let startPos = this.start, startLoc = this.startLoc | ||||||
|  |       let right = this.parseExprOp(this.parseMaybeUnary(null, false), startPos, startLoc, prec, noIn) | ||||||
|  |       let node = this.buildBinary(leftStartPos, leftStartLoc, left, right, op, logical) | ||||||
|  |       return this.parseExprOp(node, leftStartPos, leftStartLoc, minPrec, noIn) | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  |   return left | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.buildBinary = function(startPos, startLoc, left, right, op, logical) { | ||||||
|  |   let node = this.startNodeAt(startPos, startLoc) | ||||||
|  |   node.left = left | ||||||
|  |   node.operator = op | ||||||
|  |   node.right = right | ||||||
|  |   return this.finishNode(node, logical ? "LogicalExpression" : "BinaryExpression") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Parse unary operators, both prefix and postfix. | ||||||
|  |  | ||||||
|  | pp.parseMaybeUnary = function(refDestructuringErrors, sawUnary) { | ||||||
|  |   let startPos = this.start, startLoc = this.startLoc, expr | ||||||
|  |   if (this.inAsync && this.isContextual("await")) { | ||||||
|  |     expr = this.parseAwait(refDestructuringErrors) | ||||||
|  |     sawUnary = true | ||||||
|  |   } else if (this.type.prefix) { | ||||||
|  |     let node = this.startNode(), update = this.type === tt.incDec | ||||||
|  |     node.operator = this.value | ||||||
|  |     node.prefix = true | ||||||
|  |     this.next() | ||||||
|  |     node.argument = this.parseMaybeUnary(null, true) | ||||||
|  |     this.checkExpressionErrors(refDestructuringErrors, true) | ||||||
|  |     if (update) this.checkLVal(node.argument) | ||||||
|  |     else if (this.strict && node.operator === "delete" && | ||||||
|  |              node.argument.type === "Identifier") | ||||||
|  |       this.raiseRecoverable(node.start, "Deleting local variable in strict mode") | ||||||
|  |     else sawUnary = true | ||||||
|  |     expr = this.finishNode(node, update ? "UpdateExpression" : "UnaryExpression") | ||||||
|  |   } else { | ||||||
|  |     expr = this.parseExprSubscripts(refDestructuringErrors) | ||||||
|  |     if (this.checkExpressionErrors(refDestructuringErrors)) return expr | ||||||
|  |     while (this.type.postfix && !this.canInsertSemicolon()) { | ||||||
|  |       let node = this.startNodeAt(startPos, startLoc) | ||||||
|  |       node.operator = this.value | ||||||
|  |       node.prefix = false | ||||||
|  |       node.argument = expr | ||||||
|  |       this.checkLVal(expr) | ||||||
|  |       this.next() | ||||||
|  |       expr = this.finishNode(node, "UpdateExpression") | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   if (!sawUnary && this.eat(tt.starstar)) | ||||||
|  |     return this.buildBinary(startPos, startLoc, expr, this.parseMaybeUnary(null, false), "**", false) | ||||||
|  |   else | ||||||
|  |     return expr | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Parse call, dot, and `[]`-subscript expressions. | ||||||
|  |  | ||||||
|  | pp.parseExprSubscripts = function(refDestructuringErrors) { | ||||||
|  |   let startPos = this.start, startLoc = this.startLoc | ||||||
|  |   let expr = this.parseExprAtom(refDestructuringErrors) | ||||||
|  |   let skipArrowSubscripts = expr.type === "ArrowFunctionExpression" && this.input.slice(this.lastTokStart, this.lastTokEnd) !== ")" | ||||||
|  |   if (this.checkExpressionErrors(refDestructuringErrors) || skipArrowSubscripts) return expr | ||||||
|  |   let result = this.parseSubscripts(expr, startPos, startLoc) | ||||||
|  |   if (refDestructuringErrors && result.type === "MemberExpression") { | ||||||
|  |     if (refDestructuringErrors.parenthesizedAssign >= result.start) refDestructuringErrors.parenthesizedAssign = -1 | ||||||
|  |     if (refDestructuringErrors.parenthesizedBind >= result.start) refDestructuringErrors.parenthesizedBind = -1 | ||||||
|  |   } | ||||||
|  |   return result | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.parseSubscripts = function(base, startPos, startLoc, noCalls) { | ||||||
|  |   let maybeAsyncArrow = this.options.ecmaVersion >= 8 && base.type === "Identifier" && base.name === "async" && | ||||||
|  |       this.lastTokEnd == base.end && !this.canInsertSemicolon() | ||||||
|  |   for (let computed;;) { | ||||||
|  |     if ((computed = this.eat(tt.bracketL)) || this.eat(tt.dot)) { | ||||||
|  |       let node = this.startNodeAt(startPos, startLoc) | ||||||
|  |       node.object = base | ||||||
|  |       node.property = computed ? this.parseExpression() : this.parseIdent(true) | ||||||
|  |       node.computed = !!computed | ||||||
|  |       if (computed) this.expect(tt.bracketR) | ||||||
|  |       base = this.finishNode(node, "MemberExpression") | ||||||
|  |     } else if (!noCalls && this.eat(tt.parenL)) { | ||||||
|  |       let refDestructuringErrors = new DestructuringErrors, oldYieldPos = this.yieldPos, oldAwaitPos = this.awaitPos | ||||||
|  |       this.yieldPos = 0 | ||||||
|  |       this.awaitPos = 0 | ||||||
|  |       let exprList = this.parseExprList(tt.parenR, this.options.ecmaVersion >= 8, false, refDestructuringErrors) | ||||||
|  |       if (maybeAsyncArrow && !this.canInsertSemicolon() && this.eat(tt.arrow)) { | ||||||
|  |         this.checkPatternErrors(refDestructuringErrors, false) | ||||||
|  |         this.checkYieldAwaitInDefaultParams() | ||||||
|  |         this.yieldPos = oldYieldPos | ||||||
|  |         this.awaitPos = oldAwaitPos | ||||||
|  |         return this.parseArrowExpression(this.startNodeAt(startPos, startLoc), exprList, true) | ||||||
|  |       } | ||||||
|  |       this.checkExpressionErrors(refDestructuringErrors, true) | ||||||
|  |       this.yieldPos = oldYieldPos || this.yieldPos | ||||||
|  |       this.awaitPos = oldAwaitPos || this.awaitPos | ||||||
|  |       let node = this.startNodeAt(startPos, startLoc) | ||||||
|  |       node.callee = base | ||||||
|  |       node.arguments = exprList | ||||||
|  |       base = this.finishNode(node, "CallExpression") | ||||||
|  |     } else if (this.type === tt.backQuote) { | ||||||
|  |       let node = this.startNodeAt(startPos, startLoc) | ||||||
|  |       node.tag = base | ||||||
|  |       node.quasi = this.parseTemplate() | ||||||
|  |       base = this.finishNode(node, "TaggedTemplateExpression") | ||||||
|  |     } else { | ||||||
|  |       return base | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Parse an atomic expression — either a single token that is an | ||||||
|  | // expression, an expression started by a keyword like `function` or | ||||||
|  | // `new`, or an expression wrapped in punctuation like `()`, `[]`, | ||||||
|  | // or `{}`. | ||||||
|  |  | ||||||
|  | pp.parseExprAtom = function(refDestructuringErrors) { | ||||||
|  |   let node, canBeArrow = this.potentialArrowAt == this.start | ||||||
|  |   switch (this.type) { | ||||||
|  |   case tt._super: | ||||||
|  |     if (!this.inFunction) | ||||||
|  |       this.raise(this.start, "'super' outside of function or class") | ||||||
|  |  | ||||||
|  |   case tt._this: | ||||||
|  |     let type = this.type === tt._this ? "ThisExpression" : "Super" | ||||||
|  |     node = this.startNode() | ||||||
|  |     this.next() | ||||||
|  |     return this.finishNode(node, type) | ||||||
|  |  | ||||||
|  |   case tt.name: | ||||||
|  |     let startPos = this.start, startLoc = this.startLoc | ||||||
|  |     let id = this.parseIdent(this.type !== tt.name) | ||||||
|  |     if (this.options.ecmaVersion >= 8 && id.name === "async" && !this.canInsertSemicolon() && this.eat(tt._function)) | ||||||
|  |       return this.parseFunction(this.startNodeAt(startPos, startLoc), false, false, true) | ||||||
|  |     if (canBeArrow && !this.canInsertSemicolon()) { | ||||||
|  |       if (this.eat(tt.arrow)) | ||||||
|  |         return this.parseArrowExpression(this.startNodeAt(startPos, startLoc), [id], false) | ||||||
|  |       if (this.options.ecmaVersion >= 8 && id.name === "async" && this.type === tt.name) { | ||||||
|  |         id = this.parseIdent() | ||||||
|  |         if (this.canInsertSemicolon() || !this.eat(tt.arrow)) | ||||||
|  |           this.unexpected() | ||||||
|  |         return this.parseArrowExpression(this.startNodeAt(startPos, startLoc), [id], true) | ||||||
|  |       } | ||||||
|  |     } | ||||||
|  |     return id | ||||||
|  |  | ||||||
|  |   case tt.regexp: | ||||||
|  |     let value = this.value | ||||||
|  |     node = this.parseLiteral(value.value) | ||||||
|  |     node.regex = {pattern: value.pattern, flags: value.flags} | ||||||
|  |     return node | ||||||
|  |  | ||||||
|  |   case tt.num: case tt.string: | ||||||
|  |     return this.parseLiteral(this.value) | ||||||
|  |  | ||||||
|  |   case tt._null: case tt._true: case tt._false: | ||||||
|  |     node = this.startNode() | ||||||
|  |     node.value = this.type === tt._null ? null : this.type === tt._true | ||||||
|  |     node.raw = this.type.keyword | ||||||
|  |     this.next() | ||||||
|  |     return this.finishNode(node, "Literal") | ||||||
|  |  | ||||||
|  |   case tt.parenL: | ||||||
|  |     let start = this.start, expr = this.parseParenAndDistinguishExpression(canBeArrow) | ||||||
|  |     if (refDestructuringErrors) { | ||||||
|  |       if (refDestructuringErrors.parenthesizedAssign < 0 && !this.isSimpleAssignTarget(expr)) | ||||||
|  |         refDestructuringErrors.parenthesizedAssign = start | ||||||
|  |       if (refDestructuringErrors.parenthesizedBind < 0) | ||||||
|  |         refDestructuringErrors.parenthesizedBind = start | ||||||
|  |     } | ||||||
|  |     return expr | ||||||
|  |  | ||||||
|  |   case tt.bracketL: | ||||||
|  |     node = this.startNode() | ||||||
|  |     this.next() | ||||||
|  |     node.elements = this.parseExprList(tt.bracketR, true, true, refDestructuringErrors) | ||||||
|  |     return this.finishNode(node, "ArrayExpression") | ||||||
|  |  | ||||||
|  |   case tt.braceL: | ||||||
|  |     return this.parseObj(false, refDestructuringErrors) | ||||||
|  |  | ||||||
|  |   case tt._function: | ||||||
|  |     node = this.startNode() | ||||||
|  |     this.next() | ||||||
|  |     return this.parseFunction(node, false) | ||||||
|  |  | ||||||
|  |   case tt._class: | ||||||
|  |     return this.parseClass(this.startNode(), false) | ||||||
|  |  | ||||||
|  |   case tt._new: | ||||||
|  |     return this.parseNew() | ||||||
|  |  | ||||||
|  |   case tt.backQuote: | ||||||
|  |     return this.parseTemplate() | ||||||
|  |  | ||||||
|  |   default: | ||||||
|  |     this.unexpected() | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.parseLiteral = function(value) { | ||||||
|  |   let node = this.startNode() | ||||||
|  |   node.value = value | ||||||
|  |   node.raw = this.input.slice(this.start, this.end) | ||||||
|  |   this.next() | ||||||
|  |   return this.finishNode(node, "Literal") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.parseParenExpression = function() { | ||||||
|  |   this.expect(tt.parenL) | ||||||
|  |   let val = this.parseExpression() | ||||||
|  |   this.expect(tt.parenR) | ||||||
|  |   return val | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.parseParenAndDistinguishExpression = function(canBeArrow) { | ||||||
|  |   let startPos = this.start, startLoc = this.startLoc, val, allowTrailingComma = this.options.ecmaVersion >= 8 | ||||||
|  |   if (this.options.ecmaVersion >= 6) { | ||||||
|  |     this.next() | ||||||
|  |  | ||||||
|  |     let innerStartPos = this.start, innerStartLoc = this.startLoc | ||||||
|  |     let exprList = [], first = true, lastIsComma = false | ||||||
|  |     let refDestructuringErrors = new DestructuringErrors, oldYieldPos = this.yieldPos, oldAwaitPos = this.awaitPos, spreadStart, innerParenStart | ||||||
|  |     this.yieldPos = 0 | ||||||
|  |     this.awaitPos = 0 | ||||||
|  |     while (this.type !== tt.parenR) { | ||||||
|  |       first ? first = false : this.expect(tt.comma) | ||||||
|  |       if (allowTrailingComma && this.afterTrailingComma(tt.parenR, true)) { | ||||||
|  |         lastIsComma = true | ||||||
|  |         break | ||||||
|  |       } else if (this.type === tt.ellipsis) { | ||||||
|  |         spreadStart = this.start | ||||||
|  |         exprList.push(this.parseParenItem(this.parseRest())) | ||||||
|  |         if (this.type === tt.comma) this.raise(this.start, "Comma is not permitted after the rest element") | ||||||
|  |         break | ||||||
|  |       } else { | ||||||
|  |         if (this.type === tt.parenL && !innerParenStart) { | ||||||
|  |           innerParenStart = this.start | ||||||
|  |         } | ||||||
|  |         exprList.push(this.parseMaybeAssign(false, refDestructuringErrors, this.parseParenItem)) | ||||||
|  |       } | ||||||
|  |     } | ||||||
|  |     let innerEndPos = this.start, innerEndLoc = this.startLoc | ||||||
|  |     this.expect(tt.parenR) | ||||||
|  |  | ||||||
|  |     if (canBeArrow && !this.canInsertSemicolon() && this.eat(tt.arrow)) { | ||||||
|  |       this.checkPatternErrors(refDestructuringErrors, false) | ||||||
|  |       this.checkYieldAwaitInDefaultParams() | ||||||
|  |       if (innerParenStart) this.unexpected(innerParenStart) | ||||||
|  |       this.yieldPos = oldYieldPos | ||||||
|  |       this.awaitPos = oldAwaitPos | ||||||
|  |       return this.parseParenArrowList(startPos, startLoc, exprList) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     if (!exprList.length || lastIsComma) this.unexpected(this.lastTokStart) | ||||||
|  |     if (spreadStart) this.unexpected(spreadStart) | ||||||
|  |     this.checkExpressionErrors(refDestructuringErrors, true) | ||||||
|  |     this.yieldPos = oldYieldPos || this.yieldPos | ||||||
|  |     this.awaitPos = oldAwaitPos || this.awaitPos | ||||||
|  |  | ||||||
|  |     if (exprList.length > 1) { | ||||||
|  |       val = this.startNodeAt(innerStartPos, innerStartLoc) | ||||||
|  |       val.expressions = exprList | ||||||
|  |       this.finishNodeAt(val, "SequenceExpression", innerEndPos, innerEndLoc) | ||||||
|  |     } else { | ||||||
|  |       val = exprList[0] | ||||||
|  |     } | ||||||
|  |   } else { | ||||||
|  |     val = this.parseParenExpression() | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   if (this.options.preserveParens) { | ||||||
|  |     let par = this.startNodeAt(startPos, startLoc) | ||||||
|  |     par.expression = val | ||||||
|  |     return this.finishNode(par, "ParenthesizedExpression") | ||||||
|  |   } else { | ||||||
|  |     return val | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.parseParenItem = function(item) { | ||||||
|  |   return item | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.parseParenArrowList = function(startPos, startLoc, exprList) { | ||||||
|  |   return this.parseArrowExpression(this.startNodeAt(startPos, startLoc), exprList) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // New's precedence is slightly tricky. It must allow its argument to | ||||||
|  | // be a `[]` or dot subscript expression, but not a call — at least, | ||||||
|  | // not without wrapping it in parentheses. Thus, it uses the noCalls | ||||||
|  | // argument to parseSubscripts to prevent it from consuming the | ||||||
|  | // argument list. | ||||||
|  |  | ||||||
|  | const empty = [] | ||||||
|  |  | ||||||
|  | pp.parseNew = function() { | ||||||
|  |   let node = this.startNode() | ||||||
|  |   let meta = this.parseIdent(true) | ||||||
|  |   if (this.options.ecmaVersion >= 6 && this.eat(tt.dot)) { | ||||||
|  |     node.meta = meta | ||||||
|  |     node.property = this.parseIdent(true) | ||||||
|  |     if (node.property.name !== "target") | ||||||
|  |       this.raiseRecoverable(node.property.start, "The only valid meta property for new is new.target") | ||||||
|  |     if (!this.inFunction) | ||||||
|  |       this.raiseRecoverable(node.start, "new.target can only be used in functions") | ||||||
|  |     return this.finishNode(node, "MetaProperty") | ||||||
|  |   } | ||||||
|  |   let startPos = this.start, startLoc = this.startLoc | ||||||
|  |   node.callee = this.parseSubscripts(this.parseExprAtom(), startPos, startLoc, true) | ||||||
|  |   if (this.eat(tt.parenL)) node.arguments = this.parseExprList(tt.parenR, this.options.ecmaVersion >= 8, false) | ||||||
|  |   else node.arguments = empty | ||||||
|  |   return this.finishNode(node, "NewExpression") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Parse template expression. | ||||||
|  |  | ||||||
|  | pp.parseTemplateElement = function() { | ||||||
|  |   let elem = this.startNode() | ||||||
|  |   elem.value = { | ||||||
|  |     raw: this.input.slice(this.start, this.end).replace(/\r\n?/g, '\n'), | ||||||
|  |     cooked: this.value | ||||||
|  |   } | ||||||
|  |   this.next() | ||||||
|  |   elem.tail = this.type === tt.backQuote | ||||||
|  |   return this.finishNode(elem, "TemplateElement") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.parseTemplate = function() { | ||||||
|  |   let node = this.startNode() | ||||||
|  |   this.next() | ||||||
|  |   node.expressions = [] | ||||||
|  |   let curElt = this.parseTemplateElement() | ||||||
|  |   node.quasis = [curElt] | ||||||
|  |   while (!curElt.tail) { | ||||||
|  |     this.expect(tt.dollarBraceL) | ||||||
|  |     node.expressions.push(this.parseExpression()) | ||||||
|  |     this.expect(tt.braceR) | ||||||
|  |     node.quasis.push(curElt = this.parseTemplateElement()) | ||||||
|  |   } | ||||||
|  |   this.next() | ||||||
|  |   return this.finishNode(node, "TemplateLiteral") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Parse an object literal or binding pattern. | ||||||
|  |  | ||||||
|  | pp.parseObj = function(isPattern, refDestructuringErrors) { | ||||||
|  |   let node = this.startNode(), first = true, propHash = {} | ||||||
|  |   node.properties = [] | ||||||
|  |   this.next() | ||||||
|  |   while (!this.eat(tt.braceR)) { | ||||||
|  |     if (!first) { | ||||||
|  |       this.expect(tt.comma) | ||||||
|  |       if (this.afterTrailingComma(tt.braceR)) break | ||||||
|  |     } else first = false | ||||||
|  |  | ||||||
|  |     let prop = this.startNode(), isGenerator, isAsync, startPos, startLoc | ||||||
|  |     if (this.options.ecmaVersion >= 6) { | ||||||
|  |       prop.method = false | ||||||
|  |       prop.shorthand = false | ||||||
|  |       if (isPattern || refDestructuringErrors) { | ||||||
|  |         startPos = this.start | ||||||
|  |         startLoc = this.startLoc | ||||||
|  |       } | ||||||
|  |       if (!isPattern) | ||||||
|  |         isGenerator = this.eat(tt.star) | ||||||
|  |     } | ||||||
|  |     this.parsePropertyName(prop) | ||||||
|  |     if (!isPattern && this.options.ecmaVersion >= 8 && !isGenerator && !prop.computed && | ||||||
|  |         prop.key.type === "Identifier" && prop.key.name === "async" && this.type !== tt.parenL && | ||||||
|  |         this.type !== tt.colon && !this.canInsertSemicolon()) { | ||||||
|  |       isAsync = true | ||||||
|  |       this.parsePropertyName(prop, refDestructuringErrors) | ||||||
|  |     } else { | ||||||
|  |       isAsync = false | ||||||
|  |     } | ||||||
|  |     this.parsePropertyValue(prop, isPattern, isGenerator, isAsync, startPos, startLoc, refDestructuringErrors) | ||||||
|  |     this.checkPropClash(prop, propHash) | ||||||
|  |     node.properties.push(this.finishNode(prop, "Property")) | ||||||
|  |   } | ||||||
|  |   return this.finishNode(node, isPattern ? "ObjectPattern" : "ObjectExpression") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.parsePropertyValue = function(prop, isPattern, isGenerator, isAsync, startPos, startLoc, refDestructuringErrors) { | ||||||
|  |   if ((isGenerator || isAsync) && this.type === tt.colon) | ||||||
|  |     this.unexpected() | ||||||
|  |  | ||||||
|  |   if (this.eat(tt.colon)) { | ||||||
|  |     prop.value = isPattern ? this.parseMaybeDefault(this.start, this.startLoc) : this.parseMaybeAssign(false, refDestructuringErrors) | ||||||
|  |     prop.kind = "init" | ||||||
|  |   } else if (this.options.ecmaVersion >= 6 && this.type === tt.parenL) { | ||||||
|  |     if (isPattern) this.unexpected() | ||||||
|  |     prop.kind = "init" | ||||||
|  |     prop.method = true | ||||||
|  |     prop.value = this.parseMethod(isGenerator, isAsync) | ||||||
|  |   } else if (this.options.ecmaVersion >= 5 && !prop.computed && prop.key.type === "Identifier" && | ||||||
|  |              (prop.key.name === "get" || prop.key.name === "set") && | ||||||
|  |              (this.type != tt.comma && this.type != tt.braceR)) { | ||||||
|  |     if (isGenerator || isAsync || isPattern) this.unexpected() | ||||||
|  |     prop.kind = prop.key.name | ||||||
|  |     this.parsePropertyName(prop) | ||||||
|  |     prop.value = this.parseMethod(false) | ||||||
|  |     let paramCount = prop.kind === "get" ? 0 : 1 | ||||||
|  |     if (prop.value.params.length !== paramCount) { | ||||||
|  |       let start = prop.value.start | ||||||
|  |       if (prop.kind === "get") | ||||||
|  |         this.raiseRecoverable(start, "getter should have no params") | ||||||
|  |       else | ||||||
|  |         this.raiseRecoverable(start, "setter should have exactly one param") | ||||||
|  |     } else { | ||||||
|  |       if (prop.kind === "set" && prop.value.params[0].type === "RestElement") | ||||||
|  |         this.raiseRecoverable(prop.value.params[0].start, "Setter cannot use rest params") | ||||||
|  |     } | ||||||
|  |   } else if (this.options.ecmaVersion >= 6 && !prop.computed && prop.key.type === "Identifier") { | ||||||
|  |     if (this.keywords.test(prop.key.name) || | ||||||
|  |         (this.strict ? this.reservedWordsStrict : this.reservedWords).test(prop.key.name) || | ||||||
|  |         (this.inGenerator && prop.key.name == "yield") || | ||||||
|  |         (this.inAsync && prop.key.name == "await")) | ||||||
|  |       this.raiseRecoverable(prop.key.start, "'" + prop.key.name + "' can not be used as shorthand property") | ||||||
|  |     prop.kind = "init" | ||||||
|  |     if (isPattern) { | ||||||
|  |       prop.value = this.parseMaybeDefault(startPos, startLoc, prop.key) | ||||||
|  |     } else if (this.type === tt.eq && refDestructuringErrors) { | ||||||
|  |       if (refDestructuringErrors.shorthandAssign < 0) | ||||||
|  |         refDestructuringErrors.shorthandAssign = this.start | ||||||
|  |       prop.value = this.parseMaybeDefault(startPos, startLoc, prop.key) | ||||||
|  |     } else { | ||||||
|  |       prop.value = prop.key | ||||||
|  |     } | ||||||
|  |     prop.shorthand = true | ||||||
|  |   } else this.unexpected() | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.parsePropertyName = function(prop) { | ||||||
|  |   if (this.options.ecmaVersion >= 6) { | ||||||
|  |     if (this.eat(tt.bracketL)) { | ||||||
|  |       prop.computed = true | ||||||
|  |       prop.key = this.parseMaybeAssign() | ||||||
|  |       this.expect(tt.bracketR) | ||||||
|  |       return prop.key | ||||||
|  |     } else { | ||||||
|  |       prop.computed = false | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  |   return prop.key = this.type === tt.num || this.type === tt.string ? this.parseExprAtom() : this.parseIdent(true) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Initialize empty function node. | ||||||
|  |  | ||||||
|  | pp.initFunction = function(node) { | ||||||
|  |   node.id = null | ||||||
|  |   if (this.options.ecmaVersion >= 6) { | ||||||
|  |     node.generator = false | ||||||
|  |     node.expression = false | ||||||
|  |   } | ||||||
|  |   if (this.options.ecmaVersion >= 8) | ||||||
|  |     node.async = false | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Parse object or class method. | ||||||
|  |  | ||||||
|  | pp.parseMethod = function(isGenerator, isAsync) { | ||||||
|  |   let node = this.startNode(), oldInGen = this.inGenerator, oldInAsync = this.inAsync, | ||||||
|  |       oldYieldPos = this.yieldPos, oldAwaitPos = this.awaitPos, oldInFunc = this.inFunction | ||||||
|  |  | ||||||
|  |   this.initFunction(node) | ||||||
|  |   if (this.options.ecmaVersion >= 6) | ||||||
|  |     node.generator = isGenerator | ||||||
|  |   if (this.options.ecmaVersion >= 8) | ||||||
|  |     node.async = !!isAsync | ||||||
|  |  | ||||||
|  |   this.inGenerator = node.generator | ||||||
|  |   this.inAsync = node.async | ||||||
|  |   this.yieldPos = 0 | ||||||
|  |   this.awaitPos = 0 | ||||||
|  |   this.inFunction = true | ||||||
|  |  | ||||||
|  |   this.expect(tt.parenL) | ||||||
|  |   node.params = this.parseBindingList(tt.parenR, false, this.options.ecmaVersion >= 8) | ||||||
|  |   this.checkYieldAwaitInDefaultParams() | ||||||
|  |   this.parseFunctionBody(node, false) | ||||||
|  |  | ||||||
|  |   this.inGenerator = oldInGen | ||||||
|  |   this.inAsync = oldInAsync | ||||||
|  |   this.yieldPos = oldYieldPos | ||||||
|  |   this.awaitPos = oldAwaitPos | ||||||
|  |   this.inFunction = oldInFunc | ||||||
|  |   return this.finishNode(node, "FunctionExpression") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Parse arrow function expression with given parameters. | ||||||
|  |  | ||||||
|  | pp.parseArrowExpression = function(node, params, isAsync) { | ||||||
|  |   let oldInGen = this.inGenerator, oldInAsync = this.inAsync, | ||||||
|  |       oldYieldPos = this.yieldPos, oldAwaitPos = this.awaitPos, oldInFunc = this.inFunction | ||||||
|  |  | ||||||
|  |   this.initFunction(node) | ||||||
|  |   if (this.options.ecmaVersion >= 8) | ||||||
|  |     node.async = !!isAsync | ||||||
|  |  | ||||||
|  |   this.inGenerator = false | ||||||
|  |   this.inAsync = node.async | ||||||
|  |   this.yieldPos = 0 | ||||||
|  |   this.awaitPos = 0 | ||||||
|  |   this.inFunction = true | ||||||
|  |  | ||||||
|  |   node.params = this.toAssignableList(params, true) | ||||||
|  |   this.parseFunctionBody(node, true) | ||||||
|  |  | ||||||
|  |   this.inGenerator = oldInGen | ||||||
|  |   this.inAsync = oldInAsync | ||||||
|  |   this.yieldPos = oldYieldPos | ||||||
|  |   this.awaitPos = oldAwaitPos | ||||||
|  |   this.inFunction = oldInFunc | ||||||
|  |   return this.finishNode(node, "ArrowFunctionExpression") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Parse function body and check parameters. | ||||||
|  |  | ||||||
|  | pp.parseFunctionBody = function(node, isArrowFunction) { | ||||||
|  |   let isExpression = isArrowFunction && this.type !== tt.braceL | ||||||
|  |   let oldStrict = this.strict, useStrict = false | ||||||
|  |  | ||||||
|  |   if (isExpression) { | ||||||
|  |     node.body = this.parseMaybeAssign() | ||||||
|  |     node.expression = true | ||||||
|  |   } else { | ||||||
|  |     let nonSimple = this.options.ecmaVersion >= 7 && !this.isSimpleParamList(node.params) | ||||||
|  |     if (!oldStrict || nonSimple) { | ||||||
|  |       useStrict = this.strictDirective(this.end) | ||||||
|  |       // If this is a strict mode function, verify that argument names | ||||||
|  |       // are not repeated, and it does not try to bind the words `eval` | ||||||
|  |       // or `arguments`. | ||||||
|  |       if (useStrict && nonSimple) | ||||||
|  |         this.raiseRecoverable(node.start, "Illegal 'use strict' directive in function with non-simple parameter list") | ||||||
|  |     } | ||||||
|  |     // Start a new scope with regard to labels and the `inFunction` | ||||||
|  |     // flag (restore them to their old value afterwards). | ||||||
|  |     let oldLabels = this.labels | ||||||
|  |     this.labels = [] | ||||||
|  |     if (useStrict) this.strict = true | ||||||
|  |     node.body = this.parseBlock(true) | ||||||
|  |     node.expression = false | ||||||
|  |     this.labels = oldLabels | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   if (oldStrict || useStrict) { | ||||||
|  |     this.strict = true | ||||||
|  |     if (node.id) | ||||||
|  |       this.checkLVal(node.id, true) | ||||||
|  |     this.checkParams(node) | ||||||
|  |     this.strict = oldStrict | ||||||
|  |   } else if (isArrowFunction || !this.isSimpleParamList(node.params)) { | ||||||
|  |     this.checkParams(node) | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.isSimpleParamList = function(params) { | ||||||
|  |   for (let i = 0; i < params.length; i++) | ||||||
|  |     if (params[i].type !== "Identifier") return false | ||||||
|  |   return true | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Checks function params for various disallowed patterns such as using "eval" | ||||||
|  | // or "arguments" and duplicate parameters. | ||||||
|  |  | ||||||
|  | pp.checkParams = function(node) { | ||||||
|  |   let nameHash = {} | ||||||
|  |   for (let i = 0; i < node.params.length; i++) this.checkLVal(node.params[i], true, nameHash) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Parses a comma-separated list of expressions, and returns them as | ||||||
|  | // an array. `close` is the token type that ends the list, and | ||||||
|  | // `allowEmpty` can be turned on to allow subsequent commas with | ||||||
|  | // nothing in between them to be parsed as `null` (which is needed | ||||||
|  | // for array literals). | ||||||
|  |  | ||||||
|  | pp.parseExprList = function(close, allowTrailingComma, allowEmpty, refDestructuringErrors) { | ||||||
|  |   let elts = [], first = true | ||||||
|  |   while (!this.eat(close)) { | ||||||
|  |     if (!first) { | ||||||
|  |       this.expect(tt.comma) | ||||||
|  |       if (allowTrailingComma && this.afterTrailingComma(close)) break | ||||||
|  |     } else first = false | ||||||
|  |  | ||||||
|  |     let elt | ||||||
|  |     if (allowEmpty && this.type === tt.comma) | ||||||
|  |       elt = null | ||||||
|  |     else if (this.type === tt.ellipsis) { | ||||||
|  |       elt = this.parseSpread(refDestructuringErrors) | ||||||
|  |       if (refDestructuringErrors && this.type === tt.comma && refDestructuringErrors.trailingComma < 0) | ||||||
|  |         refDestructuringErrors.trailingComma = this.start | ||||||
|  |     } else { | ||||||
|  |       elt = this.parseMaybeAssign(false, refDestructuringErrors) | ||||||
|  |     } | ||||||
|  |     elts.push(elt) | ||||||
|  |   } | ||||||
|  |   return elts | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Parse the next token as an identifier. If `liberal` is true (used | ||||||
|  | // when parsing properties), it will also convert keywords into | ||||||
|  | // identifiers. | ||||||
|  |  | ||||||
|  | pp.parseIdent = function(liberal) { | ||||||
|  |   let node = this.startNode() | ||||||
|  |   if (liberal && this.options.allowReserved == "never") liberal = false | ||||||
|  |   if (this.type === tt.name) { | ||||||
|  |     if (!liberal && (this.strict ? this.reservedWordsStrict : this.reservedWords).test(this.value) && | ||||||
|  |         (this.options.ecmaVersion >= 6 || | ||||||
|  |          this.input.slice(this.start, this.end).indexOf("\\") == -1)) | ||||||
|  |       this.raiseRecoverable(this.start, "The keyword '" + this.value + "' is reserved") | ||||||
|  |     if (this.inGenerator && this.value === "yield") | ||||||
|  |       this.raiseRecoverable(this.start, "Can not use 'yield' as identifier inside a generator") | ||||||
|  |     if (this.inAsync && this.value === "await") | ||||||
|  |       this.raiseRecoverable(this.start, "Can not use 'await' as identifier inside an async function") | ||||||
|  |     node.name = this.value | ||||||
|  |   } else if (liberal && this.type.keyword) { | ||||||
|  |     node.name = this.type.keyword | ||||||
|  |   } else { | ||||||
|  |     this.unexpected() | ||||||
|  |   } | ||||||
|  |   this.next() | ||||||
|  |   return this.finishNode(node, "Identifier") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Parses yield expression inside generator. | ||||||
|  |  | ||||||
|  | pp.parseYield = function() { | ||||||
|  |   if (!this.yieldPos) this.yieldPos = this.start | ||||||
|  |  | ||||||
|  |   let node = this.startNode() | ||||||
|  |   this.next() | ||||||
|  |   if (this.type == tt.semi || this.canInsertSemicolon() || (this.type != tt.star && !this.type.startsExpr)) { | ||||||
|  |     node.delegate = false | ||||||
|  |     node.argument = null | ||||||
|  |   } else { | ||||||
|  |     node.delegate = this.eat(tt.star) | ||||||
|  |     node.argument = this.parseMaybeAssign() | ||||||
|  |   } | ||||||
|  |   return this.finishNode(node, "YieldExpression") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.parseAwait = function() { | ||||||
|  |   if (!this.awaitPos) this.awaitPos = this.start | ||||||
|  |  | ||||||
|  |   let node = this.startNode() | ||||||
|  |   this.next() | ||||||
|  |   node.argument = this.parseMaybeUnary(null, true) | ||||||
|  |   return this.finishNode(node, "AwaitExpression") | ||||||
|  | } | ||||||
							
								
								
									
										81
									
								
								Source/node_modules/acorn/src/identifier.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										81
									
								
								Source/node_modules/acorn/src/identifier.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,81 @@ | |||||||
|  | // Reserved word lists for various dialects of the language | ||||||
|  |  | ||||||
|  | export const reservedWords = { | ||||||
|  |   3: "abstract boolean byte char class double enum export extends final float goto implements import int interface long native package private protected public short static super synchronized throws transient volatile", | ||||||
|  |   5: "class enum extends super const export import", | ||||||
|  |   6: "enum", | ||||||
|  |   strict: "implements interface let package private protected public static yield", | ||||||
|  |   strictBind: "eval arguments" | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // And the keywords | ||||||
|  |  | ||||||
|  | var ecma5AndLessKeywords = "break case catch continue debugger default do else finally for function if return switch throw try var while with null true false instanceof typeof void delete new in this" | ||||||
|  |  | ||||||
|  | export const keywords = { | ||||||
|  |   5: ecma5AndLessKeywords, | ||||||
|  |   6: ecma5AndLessKeywords + " const class extends export import super" | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // ## Character categories | ||||||
|  |  | ||||||
|  | // Big ugly regular expressions that match characters in the | ||||||
|  | // whitespace, identifier, and identifier-start categories. These | ||||||
|  | // are only applied when a character is found to actually have a | ||||||
|  | // code point above 128. | ||||||
|  | // Generated by `bin/generate-identifier-regex.js`. | ||||||
|  |  | ||||||
|  | let nonASCIIidentifierStartChars = "\xaa\xb5\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0370-\u0374\u0376\u0377\u037a-\u037d\u037f\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u048a-\u052f\u0531-\u0556\u0559\u0561-\u0587\u05d0-\u05ea\u05f0-\u05f2\u0620-\u064a\u066e\u066f\u0671-\u06d3\u06d5\u06e5\u06e6\u06ee\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u07f4\u07f5\u07fa\u0800-\u0815\u081a\u0824\u0828\u0840-\u0858\u08a0-\u08b4\u08b6-\u08bd\u0904-\u0939\u093d\u0950\u0958-\u0961\u0971-\u0980\u0985-\u098c\u098f\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc\u09dd\u09df-\u09e1\u09f0\u09f1\u0a05-\u0a0a\u0a0f\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32\u0a33\u0a35\u0a36\u0a38\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0\u0ae1\u0af9\u0b05-\u0b0c\u0b0f\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32\u0b33\u0b35-\u0b39\u0b3d\u0b5c\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99\u0b9a\u0b9c\u0b9e\u0b9f\u0ba3\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d\u0c58-\u0c5a\u0c60\u0c61\u0c80\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0\u0ce1\u0cf1\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d54-\u0d56\u0d5f-\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32\u0e33\u0e40-\u0e46\u0e81\u0e82\u0e84\u0e87\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa\u0eab\u0ead-\u0eb0\u0eb2\u0eb3\u0ebd\u0ec0-\u0ec4\u0ec6\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065\u1066\u106e-\u1070\u1075-\u1081\u108e\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u13a0-\u13f5\u13f8-\u13fd\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f8\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17d7\u17dc\u1820-\u1877\u1880-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191e\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u1a00-\u1a16\u1a20-\u1a54\u1aa7\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c7d\u1c80-\u1c88\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5\u1cf6\u1d00-\u1dbf\u1e00-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u2071\u207f\u2090-\u209c\u2102\u2107\u210a-\u2113\u2115\u2118-\u211d\u2124\u2126\u2128\u212a-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cee\u2cf2\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u3005-\u3007\u3021-\u3029\u3031-\u3035\u3038-\u303c\u3041-\u3096\u309b-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312d\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fd5\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua61f\ua62a\ua62b\ua640-\ua66e\ua67f-\ua69d\ua6a0-\ua6ef\ua717-\ua71f\ua722-\ua788\ua78b-\ua7ae\ua7b0-\ua7b7\ua7f7-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua8fd\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9cf\ua9e0-\ua9e4\ua9e6-\ua9ef\ua9fa-\ua9fe\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa76\uaa7a\uaa7e-\uaaaf\uaab1\uaab5\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadd\uaae0-\uaaea\uaaf2-\uaaf4\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uab30-\uab5a\uab5c-\uab65\uab70-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40\ufb41\ufb43\ufb44\ufb46-\ufbb1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfb\ufe70-\ufe74\ufe76-\ufefc\uff21-\uff3a\uff41-\uff5a\uff66-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc" | ||||||
|  | let nonASCIIidentifierChars = "\u200c\u200d\xb7\u0300-\u036f\u0387\u0483-\u0487\u0591-\u05bd\u05bf\u05c1\u05c2\u05c4\u05c5\u05c7\u0610-\u061a\u064b-\u0669\u0670\u06d6-\u06dc\u06df-\u06e4\u06e7\u06e8\u06ea-\u06ed\u06f0-\u06f9\u0711\u0730-\u074a\u07a6-\u07b0\u07c0-\u07c9\u07eb-\u07f3\u0816-\u0819\u081b-\u0823\u0825-\u0827\u0829-\u082d\u0859-\u085b\u08d4-\u08e1\u08e3-\u0903\u093a-\u093c\u093e-\u094f\u0951-\u0957\u0962\u0963\u0966-\u096f\u0981-\u0983\u09bc\u09be-\u09c4\u09c7\u09c8\u09cb-\u09cd\u09d7\u09e2\u09e3\u09e6-\u09ef\u0a01-\u0a03\u0a3c\u0a3e-\u0a42\u0a47\u0a48\u0a4b-\u0a4d\u0a51\u0a66-\u0a71\u0a75\u0a81-\u0a83\u0abc\u0abe-\u0ac5\u0ac7-\u0ac9\u0acb-\u0acd\u0ae2\u0ae3\u0ae6-\u0aef\u0b01-\u0b03\u0b3c\u0b3e-\u0b44\u0b47\u0b48\u0b4b-\u0b4d\u0b56\u0b57\u0b62\u0b63\u0b66-\u0b6f\u0b82\u0bbe-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcd\u0bd7\u0be6-\u0bef\u0c00-\u0c03\u0c3e-\u0c44\u0c46-\u0c48\u0c4a-\u0c4d\u0c55\u0c56\u0c62\u0c63\u0c66-\u0c6f\u0c81-\u0c83\u0cbc\u0cbe-\u0cc4\u0cc6-\u0cc8\u0cca-\u0ccd\u0cd5\u0cd6\u0ce2\u0ce3\u0ce6-\u0cef\u0d01-\u0d03\u0d3e-\u0d44\u0d46-\u0d48\u0d4a-\u0d4d\u0d57\u0d62\u0d63\u0d66-\u0d6f\u0d82\u0d83\u0dca\u0dcf-\u0dd4\u0dd6\u0dd8-\u0ddf\u0de6-\u0def\u0df2\u0df3\u0e31\u0e34-\u0e3a\u0e47-\u0e4e\u0e50-\u0e59\u0eb1\u0eb4-\u0eb9\u0ebb\u0ebc\u0ec8-\u0ecd\u0ed0-\u0ed9\u0f18\u0f19\u0f20-\u0f29\u0f35\u0f37\u0f39\u0f3e\u0f3f\u0f71-\u0f84\u0f86\u0f87\u0f8d-\u0f97\u0f99-\u0fbc\u0fc6\u102b-\u103e\u1040-\u1049\u1056-\u1059\u105e-\u1060\u1062-\u1064\u1067-\u106d\u1071-\u1074\u1082-\u108d\u108f-\u109d\u135d-\u135f\u1369-\u1371\u1712-\u1714\u1732-\u1734\u1752\u1753\u1772\u1773\u17b4-\u17d3\u17dd\u17e0-\u17e9\u180b-\u180d\u1810-\u1819\u18a9\u1920-\u192b\u1930-\u193b\u1946-\u194f\u19d0-\u19da\u1a17-\u1a1b\u1a55-\u1a5e\u1a60-\u1a7c\u1a7f-\u1a89\u1a90-\u1a99\u1ab0-\u1abd\u1b00-\u1b04\u1b34-\u1b44\u1b50-\u1b59\u1b6b-\u1b73\u1b80-\u1b82\u1ba1-\u1bad\u1bb0-\u1bb9\u1be6-\u1bf3\u1c24-\u1c37\u1c40-\u1c49\u1c50-\u1c59\u1cd0-\u1cd2\u1cd4-\u1ce8\u1ced\u1cf2-\u1cf4\u1cf8\u1cf9\u1dc0-\u1df5\u1dfb-\u1dff\u203f\u2040\u2054\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2cef-\u2cf1\u2d7f\u2de0-\u2dff\u302a-\u302f\u3099\u309a\ua620-\ua629\ua66f\ua674-\ua67d\ua69e\ua69f\ua6f0\ua6f1\ua802\ua806\ua80b\ua823-\ua827\ua880\ua881\ua8b4-\ua8c5\ua8d0-\ua8d9\ua8e0-\ua8f1\ua900-\ua909\ua926-\ua92d\ua947-\ua953\ua980-\ua983\ua9b3-\ua9c0\ua9d0-\ua9d9\ua9e5\ua9f0-\ua9f9\uaa29-\uaa36\uaa43\uaa4c\uaa4d\uaa50-\uaa59\uaa7b-\uaa7d\uaab0\uaab2-\uaab4\uaab7\uaab8\uaabe\uaabf\uaac1\uaaeb-\uaaef\uaaf5\uaaf6\uabe3-\uabea\uabec\uabed\uabf0-\uabf9\ufb1e\ufe00-\ufe0f\ufe20-\ufe2f\ufe33\ufe34\ufe4d-\ufe4f\uff10-\uff19\uff3f" | ||||||
|  |  | ||||||
|  | const nonASCIIidentifierStart = new RegExp("[" + nonASCIIidentifierStartChars + "]") | ||||||
|  | const nonASCIIidentifier = new RegExp("[" + nonASCIIidentifierStartChars + nonASCIIidentifierChars + "]") | ||||||
|  |  | ||||||
|  | nonASCIIidentifierStartChars = nonASCIIidentifierChars = null | ||||||
|  |  | ||||||
|  | // These are a run-length and offset encoded representation of the | ||||||
|  | // >0xffff code points that are a valid part of identifiers. The | ||||||
|  | // offset starts at 0x10000, and each pair of numbers represents an | ||||||
|  | // offset to the next range, and then a size of the range. They were | ||||||
|  | // generated by bin/generate-identifier-regex.js | ||||||
|  | const astralIdentifierStartCodes = [0,11,2,25,2,18,2,1,2,14,3,13,35,122,70,52,268,28,4,48,48,31,17,26,6,37,11,29,3,35,5,7,2,4,43,157,19,35,5,35,5,39,9,51,157,310,10,21,11,7,153,5,3,0,2,43,2,1,4,0,3,22,11,22,10,30,66,18,2,1,11,21,11,25,71,55,7,1,65,0,16,3,2,2,2,26,45,28,4,28,36,7,2,27,28,53,11,21,11,18,14,17,111,72,56,50,14,50,785,52,76,44,33,24,27,35,42,34,4,0,13,47,15,3,22,0,2,0,36,17,2,24,85,6,2,0,2,3,2,14,2,9,8,46,39,7,3,1,3,21,2,6,2,1,2,4,4,0,19,0,13,4,159,52,19,3,54,47,21,1,2,0,185,46,42,3,37,47,21,0,60,42,86,25,391,63,32,0,449,56,264,8,2,36,18,0,50,29,881,921,103,110,18,195,2749,1070,4050,582,8634,568,8,30,114,29,19,47,17,3,32,20,6,18,881,68,12,0,67,12,65,0,32,6124,20,754,9486,1,3071,106,6,12,4,8,8,9,5991,84,2,70,2,1,3,0,3,1,3,3,2,11,2,0,2,6,2,64,2,3,3,7,2,6,2,27,2,3,2,4,2,0,4,6,2,339,3,24,2,24,2,30,2,24,2,30,2,24,2,30,2,24,2,30,2,24,2,7,4149,196,60,67,1213,3,2,26,2,1,2,0,3,0,2,9,2,3,2,0,2,0,7,0,5,0,2,0,2,0,2,2,2,1,2,0,3,0,2,0,2,0,2,0,2,0,2,1,2,0,3,3,2,6,2,3,2,3,2,0,2,9,2,16,6,2,2,4,2,16,4421,42710,42,4148,12,221,3,5761,10591,541] | ||||||
|  | const astralIdentifierCodes = [509,0,227,0,150,4,294,9,1368,2,2,1,6,3,41,2,5,0,166,1,1306,2,54,14,32,9,16,3,46,10,54,9,7,2,37,13,2,9,52,0,13,2,49,13,10,2,4,9,83,11,7,0,161,11,6,9,7,3,57,0,2,6,3,1,3,2,10,0,11,1,3,6,4,4,193,17,10,9,87,19,13,9,214,6,3,8,28,1,83,16,16,9,82,12,9,9,84,14,5,9,423,9,838,7,2,7,17,9,57,21,2,13,19882,9,135,4,60,6,26,9,1016,45,17,3,19723,1,5319,4,4,5,9,7,3,6,31,3,149,2,1418,49,513,54,5,49,9,0,15,0,23,4,2,14,1361,6,2,16,3,6,2,1,2,4,2214,6,110,6,6,9,792487,239] | ||||||
|  |  | ||||||
|  | // This has a complexity linear to the value of the code. The | ||||||
|  | // assumption is that looking up astral identifier characters is | ||||||
|  | // rare. | ||||||
|  | function isInAstralSet(code, set) { | ||||||
|  |   let pos = 0x10000 | ||||||
|  |   for (let i = 0; i < set.length; i += 2) { | ||||||
|  |     pos += set[i] | ||||||
|  |     if (pos > code) return false | ||||||
|  |     pos += set[i + 1] | ||||||
|  |     if (pos >= code) return true | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Test whether a given character code starts an identifier. | ||||||
|  |  | ||||||
|  | export function isIdentifierStart(code, astral) { | ||||||
|  |   if (code < 65) return code === 36 | ||||||
|  |   if (code < 91) return true | ||||||
|  |   if (code < 97) return code === 95 | ||||||
|  |   if (code < 123) return true | ||||||
|  |   if (code <= 0xffff) return code >= 0xaa && nonASCIIidentifierStart.test(String.fromCharCode(code)) | ||||||
|  |   if (astral === false) return false | ||||||
|  |   return isInAstralSet(code, astralIdentifierStartCodes) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Test whether a given character is part of an identifier. | ||||||
|  |  | ||||||
|  | export function isIdentifierChar(code, astral) { | ||||||
|  |   if (code < 48) return code === 36 | ||||||
|  |   if (code < 58) return true | ||||||
|  |   if (code < 65) return false | ||||||
|  |   if (code < 91) return true | ||||||
|  |   if (code < 97) return code === 95 | ||||||
|  |   if (code < 123) return true | ||||||
|  |   if (code <= 0xffff) return code >= 0xaa && nonASCIIidentifier.test(String.fromCharCode(code)) | ||||||
|  |   if (astral === false) return false | ||||||
|  |   return isInAstralSet(code, astralIdentifierStartCodes) || isInAstralSet(code, astralIdentifierCodes) | ||||||
|  | } | ||||||
							
								
								
									
										77
									
								
								Source/node_modules/acorn/src/index.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										77
									
								
								Source/node_modules/acorn/src/index.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,77 @@ | |||||||
|  | // Acorn is a tiny, fast JavaScript parser written in JavaScript. | ||||||
|  | // | ||||||
|  | // Acorn was written by Marijn Haverbeke, Ingvar Stepanyan, and | ||||||
|  | // various contributors and released under an MIT license. | ||||||
|  | // | ||||||
|  | // Git repositories for Acorn are available at | ||||||
|  | // | ||||||
|  | //     http://marijnhaverbeke.nl/git/acorn | ||||||
|  | //     https://github.com/ternjs/acorn.git | ||||||
|  | // | ||||||
|  | // Please use the [github bug tracker][ghbt] to report issues. | ||||||
|  | // | ||||||
|  | // [ghbt]: https://github.com/ternjs/acorn/issues | ||||||
|  | // | ||||||
|  | // This file defines the main parser interface. The library also comes | ||||||
|  | // with a [error-tolerant parser][dammit] and an | ||||||
|  | // [abstract syntax tree walker][walk], defined in other files. | ||||||
|  | // | ||||||
|  | // [dammit]: acorn_loose.js | ||||||
|  | // [walk]: util/walk.js | ||||||
|  |  | ||||||
|  | import {Parser} from "./state" | ||||||
|  | import "./parseutil" | ||||||
|  | import "./statement" | ||||||
|  | import "./lval" | ||||||
|  | import "./expression" | ||||||
|  | import "./location" | ||||||
|  |  | ||||||
|  | export {Parser, plugins} from "./state" | ||||||
|  | export {defaultOptions} from "./options" | ||||||
|  | export {Position, SourceLocation, getLineInfo} from "./locutil" | ||||||
|  | export {Node} from "./node" | ||||||
|  | export {TokenType, types as tokTypes, keywords as keywordTypes} from "./tokentype" | ||||||
|  | export {TokContext, types as tokContexts} from "./tokencontext" | ||||||
|  | export {isIdentifierChar, isIdentifierStart} from "./identifier" | ||||||
|  | export {Token} from "./tokenize" | ||||||
|  | export {isNewLine, lineBreak, lineBreakG} from "./whitespace" | ||||||
|  |  | ||||||
|  | export const version = "4.0.11" | ||||||
|  |  | ||||||
|  | // The main exported interface (under `self.acorn` when in the | ||||||
|  | // browser) is a `parse` function that takes a code string and | ||||||
|  | // returns an abstract syntax tree as specified by [Mozilla parser | ||||||
|  | // API][api]. | ||||||
|  | // | ||||||
|  | // [api]: https://developer.mozilla.org/en-US/docs/SpiderMonkey/Parser_API | ||||||
|  |  | ||||||
|  | export function parse(input, options) { | ||||||
|  |   return new Parser(options, input).parse() | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // This function tries to parse a single expression at a given | ||||||
|  | // offset in a string. Useful for parsing mixed-language formats | ||||||
|  | // that embed JavaScript expressions. | ||||||
|  |  | ||||||
|  | export function parseExpressionAt(input, pos, options) { | ||||||
|  |   let p = new Parser(options, input, pos) | ||||||
|  |   p.nextToken() | ||||||
|  |   return p.parseExpression() | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Acorn is organized as a tokenizer and a recursive-descent parser. | ||||||
|  | // The `tokenizer` export provides an interface to the tokenizer. | ||||||
|  |  | ||||||
|  | export function tokenizer(input, options) { | ||||||
|  |   return new Parser(options, input) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // This is a terrible kludge to support the existing, pre-ES6 | ||||||
|  | // interface where the loose parser module retroactively adds exports | ||||||
|  | // to this module. | ||||||
|  | export let parse_dammit, LooseParser, pluginsLoose | ||||||
|  | export function addLooseExports(parse, Parser, plugins) { | ||||||
|  |   parse_dammit = parse | ||||||
|  |   LooseParser = Parser | ||||||
|  |   pluginsLoose = plugins | ||||||
|  | } | ||||||
							
								
								
									
										26
									
								
								Source/node_modules/acorn/src/location.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										26
									
								
								Source/node_modules/acorn/src/location.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,26 @@ | |||||||
|  | import {Parser} from "./state" | ||||||
|  | import {Position, getLineInfo} from "./locutil" | ||||||
|  |  | ||||||
|  | const pp = Parser.prototype | ||||||
|  |  | ||||||
|  | // This function is used to raise exceptions on parse errors. It | ||||||
|  | // takes an offset integer (into the current `input`) to indicate | ||||||
|  | // the location of the error, attaches the position to the end | ||||||
|  | // of the error message, and then raises a `SyntaxError` with that | ||||||
|  | // message. | ||||||
|  |  | ||||||
|  | pp.raise = function(pos, message) { | ||||||
|  |   let loc = getLineInfo(this.input, pos) | ||||||
|  |   message += " (" + loc.line + ":" + loc.column + ")" | ||||||
|  |   let err = new SyntaxError(message) | ||||||
|  |   err.pos = pos; err.loc = loc; err.raisedAt = this.pos | ||||||
|  |   throw err | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.raiseRecoverable = pp.raise | ||||||
|  |  | ||||||
|  | pp.curPosition = function() { | ||||||
|  |   if (this.options.locations) { | ||||||
|  |     return new Position(this.curLine, this.pos - this.lineStart) | ||||||
|  |   } | ||||||
|  | } | ||||||
							
								
								
									
										42
									
								
								Source/node_modules/acorn/src/locutil.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										42
									
								
								Source/node_modules/acorn/src/locutil.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,42 @@ | |||||||
|  | import {lineBreakG} from "./whitespace" | ||||||
|  |  | ||||||
|  | // These are used when `options.locations` is on, for the | ||||||
|  | // `startLoc` and `endLoc` properties. | ||||||
|  |  | ||||||
|  | export class Position { | ||||||
|  |   constructor(line, col) { | ||||||
|  |     this.line = line | ||||||
|  |     this.column = col | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   offset(n) { | ||||||
|  |     return new Position(this.line, this.column + n) | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | export class SourceLocation { | ||||||
|  |   constructor(p, start, end) { | ||||||
|  |     this.start = start | ||||||
|  |     this.end = end | ||||||
|  |     if (p.sourceFile !== null) this.source = p.sourceFile | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // The `getLineInfo` function is mostly useful when the | ||||||
|  | // `locations` option is off (for performance reasons) and you | ||||||
|  | // want to find the line/column position for a given character | ||||||
|  | // offset. `input` should be the code string that the offset refers | ||||||
|  | // into. | ||||||
|  |  | ||||||
|  | export function getLineInfo(input, offset) { | ||||||
|  |   for (let line = 1, cur = 0;;) { | ||||||
|  |     lineBreakG.lastIndex = cur | ||||||
|  |     let match = lineBreakG.exec(input) | ||||||
|  |     if (match && match.index < offset) { | ||||||
|  |       ++line | ||||||
|  |       cur = match.index + match[0].length | ||||||
|  |     } else { | ||||||
|  |       return new Position(line, offset - cur) | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  | } | ||||||
							
								
								
									
										562
									
								
								Source/node_modules/acorn/src/loose/expression.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										562
									
								
								Source/node_modules/acorn/src/loose/expression.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,562 @@ | |||||||
|  | import {LooseParser} from "./state" | ||||||
|  | import {isDummy} from "./parseutil" | ||||||
|  | import {tokTypes as tt} from "../index" | ||||||
|  |  | ||||||
|  | const lp = LooseParser.prototype | ||||||
|  |  | ||||||
|  | lp.checkLVal = function(expr) { | ||||||
|  |   if (!expr) return expr | ||||||
|  |   switch (expr.type) { | ||||||
|  |   case "Identifier": | ||||||
|  |   case "MemberExpression": | ||||||
|  |     return expr | ||||||
|  |  | ||||||
|  |   case "ParenthesizedExpression": | ||||||
|  |     expr.expression = this.checkLVal(expr.expression) | ||||||
|  |     return expr | ||||||
|  |  | ||||||
|  |   default: | ||||||
|  |     return this.dummyIdent() | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.parseExpression = function(noIn) { | ||||||
|  |   let start = this.storeCurrentPos() | ||||||
|  |   let expr = this.parseMaybeAssign(noIn) | ||||||
|  |   if (this.tok.type === tt.comma) { | ||||||
|  |     let node = this.startNodeAt(start) | ||||||
|  |     node.expressions = [expr] | ||||||
|  |     while (this.eat(tt.comma)) node.expressions.push(this.parseMaybeAssign(noIn)) | ||||||
|  |     return this.finishNode(node, "SequenceExpression") | ||||||
|  |   } | ||||||
|  |   return expr | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.parseParenExpression = function() { | ||||||
|  |   this.pushCx() | ||||||
|  |   this.expect(tt.parenL) | ||||||
|  |   let val = this.parseExpression() | ||||||
|  |   this.popCx() | ||||||
|  |   this.expect(tt.parenR) | ||||||
|  |   return val | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.parseMaybeAssign = function(noIn) { | ||||||
|  |   if (this.toks.isContextual("yield")) { | ||||||
|  |     let node = this.startNode() | ||||||
|  |     this.next() | ||||||
|  |     if (this.semicolon() || this.canInsertSemicolon() || (this.tok.type != tt.star && !this.tok.type.startsExpr)) { | ||||||
|  |       node.delegate = false | ||||||
|  |       node.argument = null | ||||||
|  |     } else { | ||||||
|  |       node.delegate = this.eat(tt.star) | ||||||
|  |       node.argument = this.parseMaybeAssign() | ||||||
|  |     } | ||||||
|  |     return this.finishNode(node, "YieldExpression") | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   let start = this.storeCurrentPos() | ||||||
|  |   let left = this.parseMaybeConditional(noIn) | ||||||
|  |   if (this.tok.type.isAssign) { | ||||||
|  |     let node = this.startNodeAt(start) | ||||||
|  |     node.operator = this.tok.value | ||||||
|  |     node.left = this.tok.type === tt.eq ? this.toAssignable(left) : this.checkLVal(left) | ||||||
|  |     this.next() | ||||||
|  |     node.right = this.parseMaybeAssign(noIn) | ||||||
|  |     return this.finishNode(node, "AssignmentExpression") | ||||||
|  |   } | ||||||
|  |   return left | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.parseMaybeConditional = function(noIn) { | ||||||
|  |   let start = this.storeCurrentPos() | ||||||
|  |   let expr = this.parseExprOps(noIn) | ||||||
|  |   if (this.eat(tt.question)) { | ||||||
|  |     let node = this.startNodeAt(start) | ||||||
|  |     node.test = expr | ||||||
|  |     node.consequent = this.parseMaybeAssign() | ||||||
|  |     node.alternate = this.expect(tt.colon) ? this.parseMaybeAssign(noIn) : this.dummyIdent() | ||||||
|  |     return this.finishNode(node, "ConditionalExpression") | ||||||
|  |   } | ||||||
|  |   return expr | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.parseExprOps = function(noIn) { | ||||||
|  |   let start = this.storeCurrentPos() | ||||||
|  |   let indent = this.curIndent, line = this.curLineStart | ||||||
|  |   return this.parseExprOp(this.parseMaybeUnary(false), start, -1, noIn, indent, line) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.parseExprOp = function(left, start, minPrec, noIn, indent, line) { | ||||||
|  |   if (this.curLineStart != line && this.curIndent < indent && this.tokenStartsLine()) return left | ||||||
|  |   let prec = this.tok.type.binop | ||||||
|  |   if (prec != null && (!noIn || this.tok.type !== tt._in)) { | ||||||
|  |     if (prec > minPrec) { | ||||||
|  |       let node = this.startNodeAt(start) | ||||||
|  |       node.left = left | ||||||
|  |       node.operator = this.tok.value | ||||||
|  |       this.next() | ||||||
|  |       if (this.curLineStart != line && this.curIndent < indent && this.tokenStartsLine()) { | ||||||
|  |         node.right = this.dummyIdent() | ||||||
|  |       } else { | ||||||
|  |         let rightStart = this.storeCurrentPos() | ||||||
|  |         node.right = this.parseExprOp(this.parseMaybeUnary(false), rightStart, prec, noIn, indent, line) | ||||||
|  |       } | ||||||
|  |       this.finishNode(node, /&&|\|\|/.test(node.operator) ? "LogicalExpression" : "BinaryExpression") | ||||||
|  |       return this.parseExprOp(node, start, minPrec, noIn, indent, line) | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  |   return left | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.parseMaybeUnary = function(sawUnary) { | ||||||
|  |   let start = this.storeCurrentPos(), expr | ||||||
|  |   if (this.options.ecmaVersion >= 8 && this.inAsync && this.toks.isContextual("await")) { | ||||||
|  |     expr = this.parseAwait() | ||||||
|  |     sawUnary = true | ||||||
|  |   } else if (this.tok.type.prefix) { | ||||||
|  |     let node = this.startNode(), update = this.tok.type === tt.incDec | ||||||
|  |     if (!update) sawUnary = true | ||||||
|  |     node.operator = this.tok.value | ||||||
|  |     node.prefix = true | ||||||
|  |     this.next() | ||||||
|  |     node.argument = this.parseMaybeUnary(true) | ||||||
|  |     if (update) node.argument = this.checkLVal(node.argument) | ||||||
|  |     expr = this.finishNode(node, update ? "UpdateExpression" : "UnaryExpression") | ||||||
|  |   } else if (this.tok.type === tt.ellipsis) { | ||||||
|  |     let node = this.startNode() | ||||||
|  |     this.next() | ||||||
|  |     node.argument = this.parseMaybeUnary(sawUnary) | ||||||
|  |     expr = this.finishNode(node, "SpreadElement") | ||||||
|  |   } else { | ||||||
|  |     expr = this.parseExprSubscripts() | ||||||
|  |     while (this.tok.type.postfix && !this.canInsertSemicolon()) { | ||||||
|  |       let node = this.startNodeAt(start) | ||||||
|  |       node.operator = this.tok.value | ||||||
|  |       node.prefix = false | ||||||
|  |       node.argument = this.checkLVal(expr) | ||||||
|  |       this.next() | ||||||
|  |       expr = this.finishNode(node, "UpdateExpression") | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   if (!sawUnary && this.eat(tt.starstar)) { | ||||||
|  |     let node = this.startNodeAt(start) | ||||||
|  |     node.operator = "**" | ||||||
|  |     node.left = expr | ||||||
|  |     node.right = this.parseMaybeUnary(false) | ||||||
|  |     return this.finishNode(node, "BinaryExpression") | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   return expr | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.parseExprSubscripts = function() { | ||||||
|  |   let start = this.storeCurrentPos() | ||||||
|  |   return this.parseSubscripts(this.parseExprAtom(), start, false, this.curIndent, this.curLineStart) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.parseSubscripts = function(base, start, noCalls, startIndent, line) { | ||||||
|  |   for (;;) { | ||||||
|  |     if (this.curLineStart != line && this.curIndent <= startIndent && this.tokenStartsLine()) { | ||||||
|  |       if (this.tok.type == tt.dot && this.curIndent == startIndent) | ||||||
|  |         --startIndent | ||||||
|  |       else | ||||||
|  |         return base | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     let maybeAsyncArrow = base.type === "Identifier" && base.name === "async" && !this.canInsertSemicolon() | ||||||
|  |  | ||||||
|  |     if (this.eat(tt.dot)) { | ||||||
|  |       let node = this.startNodeAt(start) | ||||||
|  |       node.object = base | ||||||
|  |       if (this.curLineStart != line && this.curIndent <= startIndent && this.tokenStartsLine()) | ||||||
|  |         node.property = this.dummyIdent() | ||||||
|  |       else | ||||||
|  |         node.property = this.parsePropertyAccessor() || this.dummyIdent() | ||||||
|  |       node.computed = false | ||||||
|  |       base = this.finishNode(node, "MemberExpression") | ||||||
|  |     } else if (this.tok.type == tt.bracketL) { | ||||||
|  |       this.pushCx() | ||||||
|  |       this.next() | ||||||
|  |       let node = this.startNodeAt(start) | ||||||
|  |       node.object = base | ||||||
|  |       node.property = this.parseExpression() | ||||||
|  |       node.computed = true | ||||||
|  |       this.popCx() | ||||||
|  |       this.expect(tt.bracketR) | ||||||
|  |       base = this.finishNode(node, "MemberExpression") | ||||||
|  |     } else if (!noCalls && this.tok.type == tt.parenL) { | ||||||
|  |       let exprList = this.parseExprList(tt.parenR) | ||||||
|  |       if (maybeAsyncArrow && this.eat(tt.arrow)) | ||||||
|  |         return this.parseArrowExpression(this.startNodeAt(start), exprList, true) | ||||||
|  |       let node = this.startNodeAt(start) | ||||||
|  |       node.callee = base | ||||||
|  |       node.arguments = exprList | ||||||
|  |       base = this.finishNode(node, "CallExpression") | ||||||
|  |     } else if (this.tok.type == tt.backQuote) { | ||||||
|  |       let node = this.startNodeAt(start) | ||||||
|  |       node.tag = base | ||||||
|  |       node.quasi = this.parseTemplate() | ||||||
|  |       base = this.finishNode(node, "TaggedTemplateExpression") | ||||||
|  |     } else { | ||||||
|  |       return base | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.parseExprAtom = function() { | ||||||
|  |   let node | ||||||
|  |   switch (this.tok.type) { | ||||||
|  |   case tt._this: | ||||||
|  |   case tt._super: | ||||||
|  |     let type = this.tok.type === tt._this ? "ThisExpression" : "Super" | ||||||
|  |     node = this.startNode() | ||||||
|  |     this.next() | ||||||
|  |     return this.finishNode(node, type) | ||||||
|  |  | ||||||
|  |   case tt.name: | ||||||
|  |     let start = this.storeCurrentPos() | ||||||
|  |     let id = this.parseIdent() | ||||||
|  |     let isAsync = false | ||||||
|  |     if (id.name === "async" && !this.canInsertSemicolon()) { | ||||||
|  |       if (this.eat(tt._function)) | ||||||
|  |         return this.parseFunction(this.startNodeAt(start), false, true) | ||||||
|  |       if (this.tok.type === tt.name) { | ||||||
|  |         id = this.parseIdent() | ||||||
|  |         isAsync = true | ||||||
|  |       } | ||||||
|  |     } | ||||||
|  |     return this.eat(tt.arrow) ? this.parseArrowExpression(this.startNodeAt(start), [id], isAsync) : id | ||||||
|  |  | ||||||
|  |   case tt.regexp: | ||||||
|  |     node = this.startNode() | ||||||
|  |     let val = this.tok.value | ||||||
|  |     node.regex = {pattern: val.pattern, flags: val.flags} | ||||||
|  |     node.value = val.value | ||||||
|  |     node.raw = this.input.slice(this.tok.start, this.tok.end) | ||||||
|  |     this.next() | ||||||
|  |     return this.finishNode(node, "Literal") | ||||||
|  |  | ||||||
|  |   case tt.num: case tt.string: | ||||||
|  |     node = this.startNode() | ||||||
|  |     node.value = this.tok.value | ||||||
|  |     node.raw = this.input.slice(this.tok.start, this.tok.end) | ||||||
|  |     this.next() | ||||||
|  |     return this.finishNode(node, "Literal") | ||||||
|  |  | ||||||
|  |   case tt._null: case tt._true: case tt._false: | ||||||
|  |     node = this.startNode() | ||||||
|  |     node.value = this.tok.type === tt._null ? null : this.tok.type === tt._true | ||||||
|  |     node.raw = this.tok.type.keyword | ||||||
|  |     this.next() | ||||||
|  |     return this.finishNode(node, "Literal") | ||||||
|  |  | ||||||
|  |   case tt.parenL: | ||||||
|  |     let parenStart = this.storeCurrentPos() | ||||||
|  |     this.next() | ||||||
|  |     let inner = this.parseExpression() | ||||||
|  |     this.expect(tt.parenR) | ||||||
|  |     if (this.eat(tt.arrow)) { | ||||||
|  |       // (a,)=>a // SequenceExpression makes dummy in the last hole. Drop the dummy. | ||||||
|  |       let params = inner.expressions || [inner] | ||||||
|  |       if (params.length && isDummy(params[params.length - 1])) | ||||||
|  |         params.pop() | ||||||
|  |       return this.parseArrowExpression(this.startNodeAt(parenStart), params) | ||||||
|  |     } | ||||||
|  |     if (this.options.preserveParens) { | ||||||
|  |       let par = this.startNodeAt(parenStart) | ||||||
|  |       par.expression = inner | ||||||
|  |       inner = this.finishNode(par, "ParenthesizedExpression") | ||||||
|  |     } | ||||||
|  |     return inner | ||||||
|  |  | ||||||
|  |   case tt.bracketL: | ||||||
|  |     node = this.startNode() | ||||||
|  |     node.elements = this.parseExprList(tt.bracketR, true) | ||||||
|  |     return this.finishNode(node, "ArrayExpression") | ||||||
|  |  | ||||||
|  |   case tt.braceL: | ||||||
|  |     return this.parseObj() | ||||||
|  |  | ||||||
|  |   case tt._class: | ||||||
|  |     return this.parseClass(false) | ||||||
|  |  | ||||||
|  |   case tt._function: | ||||||
|  |     node = this.startNode() | ||||||
|  |     this.next() | ||||||
|  |     return this.parseFunction(node, false) | ||||||
|  |  | ||||||
|  |   case tt._new: | ||||||
|  |     return this.parseNew() | ||||||
|  |  | ||||||
|  |   case tt.backQuote: | ||||||
|  |     return this.parseTemplate() | ||||||
|  |  | ||||||
|  |   default: | ||||||
|  |     return this.dummyIdent() | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.parseNew = function() { | ||||||
|  |   let node = this.startNode(), startIndent = this.curIndent, line = this.curLineStart | ||||||
|  |   let meta = this.parseIdent(true) | ||||||
|  |   if (this.options.ecmaVersion >= 6 && this.eat(tt.dot)) { | ||||||
|  |     node.meta = meta | ||||||
|  |     node.property = this.parseIdent(true) | ||||||
|  |     return this.finishNode(node, "MetaProperty") | ||||||
|  |   } | ||||||
|  |   let start = this.storeCurrentPos() | ||||||
|  |   node.callee = this.parseSubscripts(this.parseExprAtom(), start, true, startIndent, line) | ||||||
|  |   if (this.tok.type == tt.parenL) { | ||||||
|  |     node.arguments = this.parseExprList(tt.parenR) | ||||||
|  |   } else { | ||||||
|  |     node.arguments = [] | ||||||
|  |   } | ||||||
|  |   return this.finishNode(node, "NewExpression") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.parseTemplateElement = function() { | ||||||
|  |   let elem = this.startNode() | ||||||
|  |   elem.value = { | ||||||
|  |     raw: this.input.slice(this.tok.start, this.tok.end).replace(/\r\n?/g, '\n'), | ||||||
|  |     cooked: this.tok.value | ||||||
|  |   } | ||||||
|  |   this.next() | ||||||
|  |   elem.tail = this.tok.type === tt.backQuote | ||||||
|  |   return this.finishNode(elem, "TemplateElement") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.parseTemplate = function() { | ||||||
|  |   let node = this.startNode() | ||||||
|  |   this.next() | ||||||
|  |   node.expressions = [] | ||||||
|  |   let curElt = this.parseTemplateElement() | ||||||
|  |   node.quasis = [curElt] | ||||||
|  |   while (!curElt.tail) { | ||||||
|  |     this.next() | ||||||
|  |     node.expressions.push(this.parseExpression()) | ||||||
|  |     if (this.expect(tt.braceR)) { | ||||||
|  |       curElt = this.parseTemplateElement() | ||||||
|  |     } else { | ||||||
|  |       curElt = this.startNode() | ||||||
|  |       curElt.value = {cooked: '', raw: ''} | ||||||
|  |       curElt.tail = true | ||||||
|  |       this.finishNode(curElt, "TemplateElement") | ||||||
|  |     } | ||||||
|  |     node.quasis.push(curElt) | ||||||
|  |   } | ||||||
|  |   this.expect(tt.backQuote) | ||||||
|  |   return this.finishNode(node, "TemplateLiteral") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.parseObj = function() { | ||||||
|  |   let node = this.startNode() | ||||||
|  |   node.properties = [] | ||||||
|  |   this.pushCx() | ||||||
|  |   let indent = this.curIndent + 1, line = this.curLineStart | ||||||
|  |   this.eat(tt.braceL) | ||||||
|  |   if (this.curIndent + 1 < indent) { indent = this.curIndent; line = this.curLineStart } | ||||||
|  |   while (!this.closes(tt.braceR, indent, line)) { | ||||||
|  |     let prop = this.startNode(), isGenerator, isAsync, start | ||||||
|  |     if (this.options.ecmaVersion >= 6) { | ||||||
|  |       start = this.storeCurrentPos() | ||||||
|  |       prop.method = false | ||||||
|  |       prop.shorthand = false | ||||||
|  |       isGenerator = this.eat(tt.star) | ||||||
|  |     } | ||||||
|  |     this.parsePropertyName(prop) | ||||||
|  |     if (!prop.computed && | ||||||
|  |         prop.key.type === "Identifier" && prop.key.name === "async" && this.tok.type !== tt.parenL && | ||||||
|  |         this.tok.type !== tt.colon && !this.canInsertSemicolon()) { | ||||||
|  |       this.parsePropertyName(prop) | ||||||
|  |       isAsync = true | ||||||
|  |     } else { | ||||||
|  |       isAsync = false | ||||||
|  |     } | ||||||
|  |     if (isDummy(prop.key)) { if (isDummy(this.parseMaybeAssign())) this.next(); this.eat(tt.comma); continue } | ||||||
|  |     if (this.eat(tt.colon)) { | ||||||
|  |       prop.kind = "init" | ||||||
|  |       prop.value = this.parseMaybeAssign() | ||||||
|  |     } else if (this.options.ecmaVersion >= 6 && (this.tok.type === tt.parenL || this.tok.type === tt.braceL)) { | ||||||
|  |       prop.kind = "init" | ||||||
|  |       prop.method = true | ||||||
|  |       prop.value = this.parseMethod(isGenerator, isAsync) | ||||||
|  |     } else if (this.options.ecmaVersion >= 5 && prop.key.type === "Identifier" && | ||||||
|  |                !prop.computed && (prop.key.name === "get" || prop.key.name === "set") && | ||||||
|  |                (this.tok.type != tt.comma && this.tok.type != tt.braceR)) { | ||||||
|  |       prop.kind = prop.key.name | ||||||
|  |       this.parsePropertyName(prop) | ||||||
|  |       prop.value = this.parseMethod(false) | ||||||
|  |     } else { | ||||||
|  |       prop.kind = "init" | ||||||
|  |       if (this.options.ecmaVersion >= 6) { | ||||||
|  |         if (this.eat(tt.eq)) { | ||||||
|  |           let assign = this.startNodeAt(start) | ||||||
|  |           assign.operator = "=" | ||||||
|  |           assign.left = prop.key | ||||||
|  |           assign.right = this.parseMaybeAssign() | ||||||
|  |           prop.value = this.finishNode(assign, "AssignmentExpression") | ||||||
|  |         } else { | ||||||
|  |           prop.value = prop.key | ||||||
|  |         } | ||||||
|  |       } else { | ||||||
|  |         prop.value = this.dummyIdent() | ||||||
|  |       } | ||||||
|  |       prop.shorthand = true | ||||||
|  |     } | ||||||
|  |     node.properties.push(this.finishNode(prop, "Property")) | ||||||
|  |     this.eat(tt.comma) | ||||||
|  |   } | ||||||
|  |   this.popCx() | ||||||
|  |   if (!this.eat(tt.braceR)) { | ||||||
|  |     // If there is no closing brace, make the node span to the start | ||||||
|  |     // of the next token (this is useful for Tern) | ||||||
|  |     this.last.end = this.tok.start | ||||||
|  |     if (this.options.locations) this.last.loc.end = this.tok.loc.start | ||||||
|  |   } | ||||||
|  |   return this.finishNode(node, "ObjectExpression") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.parsePropertyName = function(prop) { | ||||||
|  |   if (this.options.ecmaVersion >= 6) { | ||||||
|  |     if (this.eat(tt.bracketL)) { | ||||||
|  |       prop.computed = true | ||||||
|  |       prop.key = this.parseExpression() | ||||||
|  |       this.expect(tt.bracketR) | ||||||
|  |       return | ||||||
|  |     } else { | ||||||
|  |       prop.computed = false | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  |   let key = (this.tok.type === tt.num || this.tok.type === tt.string) ? this.parseExprAtom() : this.parseIdent() | ||||||
|  |   prop.key = key || this.dummyIdent() | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.parsePropertyAccessor = function() { | ||||||
|  |   if (this.tok.type === tt.name || this.tok.type.keyword) return this.parseIdent() | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.parseIdent = function() { | ||||||
|  |   let name = this.tok.type === tt.name ? this.tok.value : this.tok.type.keyword | ||||||
|  |   if (!name) return this.dummyIdent() | ||||||
|  |   let node = this.startNode() | ||||||
|  |   this.next() | ||||||
|  |   node.name = name | ||||||
|  |   return this.finishNode(node, "Identifier") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.initFunction = function(node) { | ||||||
|  |   node.id = null | ||||||
|  |   node.params = [] | ||||||
|  |   if (this.options.ecmaVersion >= 6) { | ||||||
|  |     node.generator = false | ||||||
|  |     node.expression = false | ||||||
|  |   } | ||||||
|  |   if (this.options.ecmaVersion >= 8) | ||||||
|  |     node.async = false | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Convert existing expression atom to assignable pattern | ||||||
|  | // if possible. | ||||||
|  |  | ||||||
|  | lp.toAssignable = function(node, binding) { | ||||||
|  |   if (!node || node.type == "Identifier" || (node.type == "MemberExpression" && !binding)) { | ||||||
|  |     // Okay | ||||||
|  |   } else if (node.type == "ParenthesizedExpression") { | ||||||
|  |     node.expression = this.toAssignable(node.expression, binding) | ||||||
|  |   } else if (this.options.ecmaVersion < 6) { | ||||||
|  |     return this.dummyIdent() | ||||||
|  |   } else if (node.type == "ObjectExpression") { | ||||||
|  |     node.type = "ObjectPattern" | ||||||
|  |     let props = node.properties | ||||||
|  |     for (let i = 0; i < props.length; i++) | ||||||
|  |       props[i].value = this.toAssignable(props[i].value, binding) | ||||||
|  |   } else if (node.type == "ArrayExpression") { | ||||||
|  |     node.type = "ArrayPattern" | ||||||
|  |     this.toAssignableList(node.elements, binding) | ||||||
|  |   } else if (node.type == "SpreadElement") { | ||||||
|  |     node.type = "RestElement" | ||||||
|  |     node.argument = this.toAssignable(node.argument, binding) | ||||||
|  |   } else if (node.type == "AssignmentExpression") { | ||||||
|  |     node.type = "AssignmentPattern" | ||||||
|  |     delete node.operator | ||||||
|  |   } else { | ||||||
|  |     return this.dummyIdent() | ||||||
|  |   } | ||||||
|  |   return node | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.toAssignableList = function(exprList, binding) { | ||||||
|  |   for (let i = 0; i < exprList.length; i++) | ||||||
|  |     exprList[i] = this.toAssignable(exprList[i], binding) | ||||||
|  |   return exprList | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.parseFunctionParams = function(params) { | ||||||
|  |   params = this.parseExprList(tt.parenR) | ||||||
|  |   return this.toAssignableList(params, true) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.parseMethod = function(isGenerator, isAsync) { | ||||||
|  |   let node = this.startNode(), oldInAsync = this.inAsync | ||||||
|  |   this.initFunction(node) | ||||||
|  |   if (this.options.ecmaVersion >= 6) | ||||||
|  |     node.generator = !!isGenerator | ||||||
|  |   if (this.options.ecmaVersion >= 8) | ||||||
|  |     node.async = !!isAsync | ||||||
|  |   this.inAsync = node.async | ||||||
|  |   node.params = this.parseFunctionParams() | ||||||
|  |   node.expression = this.options.ecmaVersion >= 6 && this.tok.type !== tt.braceL | ||||||
|  |   node.body = node.expression ? this.parseMaybeAssign() : this.parseBlock() | ||||||
|  |   this.inAsync = oldInAsync | ||||||
|  |   return this.finishNode(node, "FunctionExpression") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.parseArrowExpression = function(node, params, isAsync) { | ||||||
|  |   let oldInAsync = this.inAsync | ||||||
|  |   this.initFunction(node) | ||||||
|  |   if (this.options.ecmaVersion >= 8) | ||||||
|  |     node.async = !!isAsync | ||||||
|  |   this.inAsync = node.async | ||||||
|  |   node.params = this.toAssignableList(params, true) | ||||||
|  |   node.expression = this.tok.type !== tt.braceL | ||||||
|  |   node.body = node.expression ? this.parseMaybeAssign() : this.parseBlock() | ||||||
|  |   this.inAsync = oldInAsync | ||||||
|  |   return this.finishNode(node, "ArrowFunctionExpression") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.parseExprList = function(close, allowEmpty) { | ||||||
|  |   this.pushCx() | ||||||
|  |   let indent = this.curIndent, line = this.curLineStart, elts = [] | ||||||
|  |   this.next() // Opening bracket | ||||||
|  |   while (!this.closes(close, indent + 1, line)) { | ||||||
|  |     if (this.eat(tt.comma)) { | ||||||
|  |       elts.push(allowEmpty ? null : this.dummyIdent()) | ||||||
|  |       continue | ||||||
|  |     } | ||||||
|  |     let elt = this.parseMaybeAssign() | ||||||
|  |     if (isDummy(elt)) { | ||||||
|  |       if (this.closes(close, indent, line)) break | ||||||
|  |       this.next() | ||||||
|  |     } else { | ||||||
|  |       elts.push(elt) | ||||||
|  |     } | ||||||
|  |     this.eat(tt.comma) | ||||||
|  |   } | ||||||
|  |   this.popCx() | ||||||
|  |   if (!this.eat(close)) { | ||||||
|  |     // If there is no closing brace, make the node span to the start | ||||||
|  |     // of the next token (this is useful for Tern) | ||||||
|  |     this.last.end = this.tok.start | ||||||
|  |     if (this.options.locations) this.last.loc.end = this.tok.loc.start | ||||||
|  |   } | ||||||
|  |   return elts | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.parseAwait = function() { | ||||||
|  |   let node = this.startNode() | ||||||
|  |   this.next() | ||||||
|  |   node.argument = this.parseMaybeUnary() | ||||||
|  |   return this.finishNode(node, "AwaitExpression") | ||||||
|  | } | ||||||
							
								
								
									
										48
									
								
								Source/node_modules/acorn/src/loose/index.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										48
									
								
								Source/node_modules/acorn/src/loose/index.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,48 @@ | |||||||
|  | // Acorn: Loose parser | ||||||
|  | // | ||||||
|  | // This module provides an alternative parser (`parse_dammit`) that | ||||||
|  | // exposes that same interface as `parse`, but will try to parse | ||||||
|  | // anything as JavaScript, repairing syntax error the best it can. | ||||||
|  | // There are circumstances in which it will raise an error and give | ||||||
|  | // up, but they are very rare. The resulting AST will be a mostly | ||||||
|  | // valid JavaScript AST (as per the [Mozilla parser API][api], except | ||||||
|  | // that: | ||||||
|  | // | ||||||
|  | // - Return outside functions is allowed | ||||||
|  | // | ||||||
|  | // - Label consistency (no conflicts, break only to existing labels) | ||||||
|  | //   is not enforced. | ||||||
|  | // | ||||||
|  | // - Bogus Identifier nodes with a name of `"✖"` are inserted whenever | ||||||
|  | //   the parser got too confused to return anything meaningful. | ||||||
|  | // | ||||||
|  | // [api]: https://developer.mozilla.org/en-US/docs/SpiderMonkey/Parser_API | ||||||
|  | // | ||||||
|  | // The expected use for this is to *first* try `acorn.parse`, and only | ||||||
|  | // if that fails switch to `parse_dammit`. The loose parser might | ||||||
|  | // parse badly indented code incorrectly, so **don't** use it as | ||||||
|  | // your default parser. | ||||||
|  | // | ||||||
|  | // Quite a lot of acorn.js is duplicated here. The alternative was to | ||||||
|  | // add a *lot* of extra cruft to that file, making it less readable | ||||||
|  | // and slower. Copying and editing the code allowed me to make | ||||||
|  | // invasive changes and simplifications without creating a complicated | ||||||
|  | // tangle. | ||||||
|  |  | ||||||
|  | import {addLooseExports, defaultOptions} from "../index" | ||||||
|  | import {LooseParser, pluginsLoose} from "./state" | ||||||
|  | import "./tokenize" | ||||||
|  | import "./statement" | ||||||
|  | import "./expression" | ||||||
|  |  | ||||||
|  | export {LooseParser, pluginsLoose} from "./state" | ||||||
|  |  | ||||||
|  | defaultOptions.tabSize = 4 | ||||||
|  |  | ||||||
|  | export function parse_dammit(input, options) { | ||||||
|  |   let p = new LooseParser(input, options) | ||||||
|  |   p.next() | ||||||
|  |   return p.parseTopLevel() | ||||||
|  | } | ||||||
|  |  | ||||||
|  | addLooseExports(parse_dammit, LooseParser, pluginsLoose) | ||||||
							
								
								
									
										1
									
								
								Source/node_modules/acorn/src/loose/parseutil.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								Source/node_modules/acorn/src/loose/parseutil.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1 @@ | |||||||
|  | export function isDummy(node) { return node.name == "✖" } | ||||||
							
								
								
									
										161
									
								
								Source/node_modules/acorn/src/loose/state.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										161
									
								
								Source/node_modules/acorn/src/loose/state.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,161 @@ | |||||||
|  | import {tokenizer, SourceLocation, tokTypes as tt, Node, lineBreak, isNewLine} from "../index" | ||||||
|  |  | ||||||
|  | // Registered plugins | ||||||
|  | export const pluginsLoose = {} | ||||||
|  |  | ||||||
|  | export class LooseParser { | ||||||
|  |   constructor(input, options = {}) { | ||||||
|  |     this.toks = tokenizer(input, options) | ||||||
|  |     this.options = this.toks.options | ||||||
|  |     this.input = this.toks.input | ||||||
|  |     this.tok = this.last = {type: tt.eof, start: 0, end: 0} | ||||||
|  |     if (this.options.locations) { | ||||||
|  |       let here = this.toks.curPosition() | ||||||
|  |       this.tok.loc = new SourceLocation(this.toks, here, here) | ||||||
|  |     } | ||||||
|  |     this.ahead = [] // Tokens ahead | ||||||
|  |     this.context = [] // Indentation contexted | ||||||
|  |     this.curIndent = 0 | ||||||
|  |     this.curLineStart = 0 | ||||||
|  |     this.nextLineStart = this.lineEnd(this.curLineStart) + 1 | ||||||
|  |     this.inAsync = false | ||||||
|  |     // Load plugins | ||||||
|  |     this.options.pluginsLoose = options.pluginsLoose || {} | ||||||
|  |     this.loadPlugins(this.options.pluginsLoose) | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   startNode() { | ||||||
|  |     return new Node(this.toks, this.tok.start, this.options.locations ? this.tok.loc.start : null) | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   storeCurrentPos() { | ||||||
|  |     return this.options.locations ? [this.tok.start, this.tok.loc.start] : this.tok.start | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   startNodeAt(pos) { | ||||||
|  |     if (this.options.locations) { | ||||||
|  |       return new Node(this.toks, pos[0], pos[1]) | ||||||
|  |     } else { | ||||||
|  |       return new Node(this.toks, pos) | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   finishNode(node, type) { | ||||||
|  |     node.type = type | ||||||
|  |     node.end = this.last.end | ||||||
|  |     if (this.options.locations) | ||||||
|  |       node.loc.end = this.last.loc.end | ||||||
|  |     if (this.options.ranges) | ||||||
|  |       node.range[1] = this.last.end | ||||||
|  |     return node | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   dummyNode(type) { | ||||||
|  |     let dummy = this.startNode() | ||||||
|  |     dummy.type = type | ||||||
|  |     dummy.end = dummy.start | ||||||
|  |     if (this.options.locations) | ||||||
|  |       dummy.loc.end = dummy.loc.start | ||||||
|  |     if (this.options.ranges) | ||||||
|  |       dummy.range[1] = dummy.start | ||||||
|  |     this.last = {type: tt.name, start: dummy.start, end: dummy.start, loc: dummy.loc} | ||||||
|  |     return dummy | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   dummyIdent() { | ||||||
|  |     let dummy = this.dummyNode("Identifier") | ||||||
|  |     dummy.name = "✖" | ||||||
|  |     return dummy | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   dummyString() { | ||||||
|  |     let dummy = this.dummyNode("Literal") | ||||||
|  |     dummy.value = dummy.raw = "✖" | ||||||
|  |     return dummy | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   eat(type) { | ||||||
|  |     if (this.tok.type === type) { | ||||||
|  |       this.next() | ||||||
|  |       return true | ||||||
|  |     } else { | ||||||
|  |       return false | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   isContextual(name) { | ||||||
|  |     return this.tok.type === tt.name && this.tok.value === name | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   eatContextual(name) { | ||||||
|  |     return this.tok.value === name && this.eat(tt.name) | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   canInsertSemicolon() { | ||||||
|  |     return this.tok.type === tt.eof || this.tok.type === tt.braceR || | ||||||
|  |       lineBreak.test(this.input.slice(this.last.end, this.tok.start)) | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   semicolon() { | ||||||
|  |     return this.eat(tt.semi) | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   expect(type) { | ||||||
|  |     if (this.eat(type)) return true | ||||||
|  |     for (let i = 1; i <= 2; i++) { | ||||||
|  |       if (this.lookAhead(i).type == type) { | ||||||
|  |         for (let j = 0; j < i; j++) this.next() | ||||||
|  |         return true | ||||||
|  |       } | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   pushCx() { | ||||||
|  |     this.context.push(this.curIndent) | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   popCx() { | ||||||
|  |     this.curIndent = this.context.pop() | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   lineEnd(pos) { | ||||||
|  |     while (pos < this.input.length && !isNewLine(this.input.charCodeAt(pos))) ++pos | ||||||
|  |     return pos | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   indentationAfter(pos) { | ||||||
|  |     for (let count = 0;; ++pos) { | ||||||
|  |       let ch = this.input.charCodeAt(pos) | ||||||
|  |       if (ch === 32) ++count | ||||||
|  |       else if (ch === 9) count += this.options.tabSize | ||||||
|  |       else return count | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   closes(closeTok, indent, line, blockHeuristic) { | ||||||
|  |     if (this.tok.type === closeTok || this.tok.type === tt.eof) return true | ||||||
|  |     return line != this.curLineStart && this.curIndent < indent && this.tokenStartsLine() && | ||||||
|  |       (!blockHeuristic || this.nextLineStart >= this.input.length || | ||||||
|  |        this.indentationAfter(this.nextLineStart) < indent) | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   tokenStartsLine() { | ||||||
|  |     for (let p = this.tok.start - 1; p >= this.curLineStart; --p) { | ||||||
|  |       let ch = this.input.charCodeAt(p) | ||||||
|  |       if (ch !== 9 && ch !== 32) return false | ||||||
|  |     } | ||||||
|  |     return true | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   extend(name, f) { | ||||||
|  |     this[name] = f(this[name]) | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   loadPlugins(pluginConfigs) { | ||||||
|  |     for (let name in pluginConfigs) { | ||||||
|  |       let plugin = pluginsLoose[name] | ||||||
|  |       if (!plugin) throw new Error("Plugin '" + name + "' not found") | ||||||
|  |       plugin(this, pluginConfigs[name]) | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  | } | ||||||
							
								
								
									
										450
									
								
								Source/node_modules/acorn/src/loose/statement.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										450
									
								
								Source/node_modules/acorn/src/loose/statement.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,450 @@ | |||||||
|  | import {LooseParser} from "./state" | ||||||
|  | import {isDummy} from "./parseutil" | ||||||
|  | import {getLineInfo, tokTypes as tt} from "../index" | ||||||
|  |  | ||||||
|  | const lp = LooseParser.prototype | ||||||
|  |  | ||||||
|  | lp.parseTopLevel = function() { | ||||||
|  |   let node = this.startNodeAt(this.options.locations ? [0, getLineInfo(this.input, 0)] : 0) | ||||||
|  |   node.body = [] | ||||||
|  |   while (this.tok.type !== tt.eof) node.body.push(this.parseStatement()) | ||||||
|  |   this.last = this.tok | ||||||
|  |   if (this.options.ecmaVersion >= 6) { | ||||||
|  |     node.sourceType = this.options.sourceType | ||||||
|  |   } | ||||||
|  |   return this.finishNode(node, "Program") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.parseStatement = function() { | ||||||
|  |   let starttype = this.tok.type, node = this.startNode(), kind | ||||||
|  |  | ||||||
|  |   if (this.toks.isLet()) { | ||||||
|  |     starttype = tt._var | ||||||
|  |     kind = "let" | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   switch (starttype) { | ||||||
|  |   case tt._break: case tt._continue: | ||||||
|  |     this.next() | ||||||
|  |     let isBreak = starttype === tt._break | ||||||
|  |     if (this.semicolon() || this.canInsertSemicolon()) { | ||||||
|  |       node.label = null | ||||||
|  |     } else { | ||||||
|  |       node.label = this.tok.type === tt.name ? this.parseIdent() : null | ||||||
|  |       this.semicolon() | ||||||
|  |     } | ||||||
|  |     return this.finishNode(node, isBreak ? "BreakStatement" : "ContinueStatement") | ||||||
|  |  | ||||||
|  |   case tt._debugger: | ||||||
|  |     this.next() | ||||||
|  |     this.semicolon() | ||||||
|  |     return this.finishNode(node, "DebuggerStatement") | ||||||
|  |  | ||||||
|  |   case tt._do: | ||||||
|  |     this.next() | ||||||
|  |     node.body = this.parseStatement() | ||||||
|  |     node.test = this.eat(tt._while) ? this.parseParenExpression() : this.dummyIdent() | ||||||
|  |     this.semicolon() | ||||||
|  |     return this.finishNode(node, "DoWhileStatement") | ||||||
|  |  | ||||||
|  |   case tt._for: | ||||||
|  |     this.next() | ||||||
|  |     this.pushCx() | ||||||
|  |     this.expect(tt.parenL) | ||||||
|  |     if (this.tok.type === tt.semi) return this.parseFor(node, null) | ||||||
|  |     let isLet = this.toks.isLet() | ||||||
|  |     if (isLet || this.tok.type === tt._var || this.tok.type === tt._const) { | ||||||
|  |       let init = this.parseVar(true, isLet ? "let" : this.tok.value) | ||||||
|  |       if (init.declarations.length === 1 && (this.tok.type === tt._in || this.isContextual("of"))) { | ||||||
|  |         return this.parseForIn(node, init) | ||||||
|  |       } | ||||||
|  |       return this.parseFor(node, init) | ||||||
|  |     } | ||||||
|  |     let init = this.parseExpression(true) | ||||||
|  |     if (this.tok.type === tt._in || this.isContextual("of")) | ||||||
|  |       return this.parseForIn(node, this.toAssignable(init)) | ||||||
|  |     return this.parseFor(node, init) | ||||||
|  |  | ||||||
|  |   case tt._function: | ||||||
|  |     this.next() | ||||||
|  |     return this.parseFunction(node, true) | ||||||
|  |  | ||||||
|  |   case tt._if: | ||||||
|  |     this.next() | ||||||
|  |     node.test = this.parseParenExpression() | ||||||
|  |     node.consequent = this.parseStatement() | ||||||
|  |     node.alternate = this.eat(tt._else) ? this.parseStatement() : null | ||||||
|  |     return this.finishNode(node, "IfStatement") | ||||||
|  |  | ||||||
|  |   case tt._return: | ||||||
|  |     this.next() | ||||||
|  |     if (this.eat(tt.semi) || this.canInsertSemicolon()) node.argument = null | ||||||
|  |     else { node.argument = this.parseExpression(); this.semicolon() } | ||||||
|  |     return this.finishNode(node, "ReturnStatement") | ||||||
|  |  | ||||||
|  |   case tt._switch: | ||||||
|  |     let blockIndent = this.curIndent, line = this.curLineStart | ||||||
|  |     this.next() | ||||||
|  |     node.discriminant = this.parseParenExpression() | ||||||
|  |     node.cases = [] | ||||||
|  |     this.pushCx() | ||||||
|  |     this.expect(tt.braceL) | ||||||
|  |  | ||||||
|  |     let cur | ||||||
|  |     while (!this.closes(tt.braceR, blockIndent, line, true)) { | ||||||
|  |       if (this.tok.type === tt._case || this.tok.type === tt._default) { | ||||||
|  |         let isCase = this.tok.type === tt._case | ||||||
|  |         if (cur) this.finishNode(cur, "SwitchCase") | ||||||
|  |         node.cases.push(cur = this.startNode()) | ||||||
|  |         cur.consequent = [] | ||||||
|  |         this.next() | ||||||
|  |         if (isCase) cur.test = this.parseExpression() | ||||||
|  |         else cur.test = null | ||||||
|  |         this.expect(tt.colon) | ||||||
|  |       } else { | ||||||
|  |         if (!cur) { | ||||||
|  |           node.cases.push(cur = this.startNode()) | ||||||
|  |           cur.consequent = [] | ||||||
|  |           cur.test = null | ||||||
|  |         } | ||||||
|  |         cur.consequent.push(this.parseStatement()) | ||||||
|  |       } | ||||||
|  |     } | ||||||
|  |     if (cur) this.finishNode(cur, "SwitchCase") | ||||||
|  |     this.popCx() | ||||||
|  |     this.eat(tt.braceR) | ||||||
|  |     return this.finishNode(node, "SwitchStatement") | ||||||
|  |  | ||||||
|  |   case tt._throw: | ||||||
|  |     this.next() | ||||||
|  |     node.argument = this.parseExpression() | ||||||
|  |     this.semicolon() | ||||||
|  |     return this.finishNode(node, "ThrowStatement") | ||||||
|  |  | ||||||
|  |   case tt._try: | ||||||
|  |     this.next() | ||||||
|  |     node.block = this.parseBlock() | ||||||
|  |     node.handler = null | ||||||
|  |     if (this.tok.type === tt._catch) { | ||||||
|  |       let clause = this.startNode() | ||||||
|  |       this.next() | ||||||
|  |       this.expect(tt.parenL) | ||||||
|  |       clause.param = this.toAssignable(this.parseExprAtom(), true) | ||||||
|  |       this.expect(tt.parenR) | ||||||
|  |       clause.body = this.parseBlock() | ||||||
|  |       node.handler = this.finishNode(clause, "CatchClause") | ||||||
|  |     } | ||||||
|  |     node.finalizer = this.eat(tt._finally) ? this.parseBlock() : null | ||||||
|  |     if (!node.handler && !node.finalizer) return node.block | ||||||
|  |     return this.finishNode(node, "TryStatement") | ||||||
|  |  | ||||||
|  |   case tt._var: | ||||||
|  |   case tt._const: | ||||||
|  |     return this.parseVar(false, kind || this.tok.value) | ||||||
|  |  | ||||||
|  |   case tt._while: | ||||||
|  |     this.next() | ||||||
|  |     node.test = this.parseParenExpression() | ||||||
|  |     node.body = this.parseStatement() | ||||||
|  |     return this.finishNode(node, "WhileStatement") | ||||||
|  |  | ||||||
|  |   case tt._with: | ||||||
|  |     this.next() | ||||||
|  |     node.object = this.parseParenExpression() | ||||||
|  |     node.body = this.parseStatement() | ||||||
|  |     return this.finishNode(node, "WithStatement") | ||||||
|  |  | ||||||
|  |   case tt.braceL: | ||||||
|  |     return this.parseBlock() | ||||||
|  |  | ||||||
|  |   case tt.semi: | ||||||
|  |     this.next() | ||||||
|  |     return this.finishNode(node, "EmptyStatement") | ||||||
|  |  | ||||||
|  |   case tt._class: | ||||||
|  |     return this.parseClass(true) | ||||||
|  |  | ||||||
|  |   case tt._import: | ||||||
|  |     return this.parseImport() | ||||||
|  |  | ||||||
|  |   case tt._export: | ||||||
|  |     return this.parseExport() | ||||||
|  |  | ||||||
|  |   default: | ||||||
|  |     if (this.toks.isAsyncFunction()) { | ||||||
|  |       this.next() | ||||||
|  |       this.next() | ||||||
|  |       return this.parseFunction(node, true, true) | ||||||
|  |     } | ||||||
|  |     let expr = this.parseExpression() | ||||||
|  |     if (isDummy(expr)) { | ||||||
|  |       this.next() | ||||||
|  |       if (this.tok.type === tt.eof) return this.finishNode(node, "EmptyStatement") | ||||||
|  |       return this.parseStatement() | ||||||
|  |     } else if (starttype === tt.name && expr.type === "Identifier" && this.eat(tt.colon)) { | ||||||
|  |       node.body = this.parseStatement() | ||||||
|  |       node.label = expr | ||||||
|  |       return this.finishNode(node, "LabeledStatement") | ||||||
|  |     } else { | ||||||
|  |       node.expression = expr | ||||||
|  |       this.semicolon() | ||||||
|  |       return this.finishNode(node, "ExpressionStatement") | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.parseBlock = function() { | ||||||
|  |   let node = this.startNode() | ||||||
|  |   this.pushCx() | ||||||
|  |   this.expect(tt.braceL) | ||||||
|  |   let blockIndent = this.curIndent, line = this.curLineStart | ||||||
|  |   node.body = [] | ||||||
|  |   while (!this.closes(tt.braceR, blockIndent, line, true)) | ||||||
|  |     node.body.push(this.parseStatement()) | ||||||
|  |   this.popCx() | ||||||
|  |   this.eat(tt.braceR) | ||||||
|  |   return this.finishNode(node, "BlockStatement") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.parseFor = function(node, init) { | ||||||
|  |   node.init = init | ||||||
|  |   node.test = node.update = null | ||||||
|  |   if (this.eat(tt.semi) && this.tok.type !== tt.semi) node.test = this.parseExpression() | ||||||
|  |   if (this.eat(tt.semi) && this.tok.type !== tt.parenR) node.update = this.parseExpression() | ||||||
|  |   this.popCx() | ||||||
|  |   this.expect(tt.parenR) | ||||||
|  |   node.body = this.parseStatement() | ||||||
|  |   return this.finishNode(node, "ForStatement") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.parseForIn = function(node, init) { | ||||||
|  |   let type = this.tok.type === tt._in ? "ForInStatement" : "ForOfStatement" | ||||||
|  |   this.next() | ||||||
|  |   node.left = init | ||||||
|  |   node.right = this.parseExpression() | ||||||
|  |   this.popCx() | ||||||
|  |   this.expect(tt.parenR) | ||||||
|  |   node.body = this.parseStatement() | ||||||
|  |   return this.finishNode(node, type) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.parseVar = function(noIn, kind) { | ||||||
|  |   let node = this.startNode() | ||||||
|  |   node.kind = kind | ||||||
|  |   this.next() | ||||||
|  |   node.declarations = [] | ||||||
|  |   do { | ||||||
|  |     let decl = this.startNode() | ||||||
|  |     decl.id = this.options.ecmaVersion >= 6 ? this.toAssignable(this.parseExprAtom(), true) : this.parseIdent() | ||||||
|  |     decl.init = this.eat(tt.eq) ? this.parseMaybeAssign(noIn) : null | ||||||
|  |     node.declarations.push(this.finishNode(decl, "VariableDeclarator")) | ||||||
|  |   } while (this.eat(tt.comma)) | ||||||
|  |   if (!node.declarations.length) { | ||||||
|  |     let decl = this.startNode() | ||||||
|  |     decl.id = this.dummyIdent() | ||||||
|  |     node.declarations.push(this.finishNode(decl, "VariableDeclarator")) | ||||||
|  |   } | ||||||
|  |   if (!noIn) this.semicolon() | ||||||
|  |   return this.finishNode(node, "VariableDeclaration") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.parseClass = function(isStatement) { | ||||||
|  |   let node = this.startNode() | ||||||
|  |   this.next() | ||||||
|  |   if (isStatement == null) isStatement = this.tok.type === tt.name | ||||||
|  |   if (this.tok.type === tt.name) node.id = this.parseIdent() | ||||||
|  |   else if (isStatement) node.id = this.dummyIdent() | ||||||
|  |   else node.id = null | ||||||
|  |   node.superClass = this.eat(tt._extends) ? this.parseExpression() : null | ||||||
|  |   node.body = this.startNode() | ||||||
|  |   node.body.body = [] | ||||||
|  |   this.pushCx() | ||||||
|  |   let indent = this.curIndent + 1, line = this.curLineStart | ||||||
|  |   this.eat(tt.braceL) | ||||||
|  |   if (this.curIndent + 1 < indent) { indent = this.curIndent; line = this.curLineStart } | ||||||
|  |   while (!this.closes(tt.braceR, indent, line)) { | ||||||
|  |     if (this.semicolon()) continue | ||||||
|  |     let method = this.startNode(), isGenerator, isAsync | ||||||
|  |     if (this.options.ecmaVersion >= 6) { | ||||||
|  |       method.static = false | ||||||
|  |       isGenerator = this.eat(tt.star) | ||||||
|  |     } | ||||||
|  |     this.parsePropertyName(method) | ||||||
|  |     if (isDummy(method.key)) { if (isDummy(this.parseMaybeAssign())) this.next(); this.eat(tt.comma); continue } | ||||||
|  |     if (method.key.type === "Identifier" && !method.computed && method.key.name === "static" && | ||||||
|  |         (this.tok.type != tt.parenL && this.tok.type != tt.braceL)) { | ||||||
|  |       method.static = true | ||||||
|  |       isGenerator = this.eat(tt.star) | ||||||
|  |       this.parsePropertyName(method) | ||||||
|  |     } else { | ||||||
|  |       method.static = false | ||||||
|  |     } | ||||||
|  |     if (!method.computed && | ||||||
|  |         method.key.type === "Identifier" && method.key.name === "async" && this.tok.type !== tt.parenL && | ||||||
|  |         !this.canInsertSemicolon()) { | ||||||
|  |       this.parsePropertyName(method) | ||||||
|  |       isAsync = true | ||||||
|  |     } else { | ||||||
|  |       isAsync = false | ||||||
|  |     } | ||||||
|  |     if (this.options.ecmaVersion >= 5 && method.key.type === "Identifier" && | ||||||
|  |         !method.computed && (method.key.name === "get" || method.key.name === "set") && | ||||||
|  |         this.tok.type !== tt.parenL && this.tok.type !== tt.braceL) { | ||||||
|  |       method.kind = method.key.name | ||||||
|  |       this.parsePropertyName(method) | ||||||
|  |       method.value = this.parseMethod(false) | ||||||
|  |     } else { | ||||||
|  |       if (!method.computed && !method.static && !isGenerator && !isAsync && ( | ||||||
|  |         method.key.type === "Identifier" && method.key.name === "constructor" || | ||||||
|  |           method.key.type === "Literal" && method.key.value === "constructor")) { | ||||||
|  |         method.kind = "constructor" | ||||||
|  |       } else { | ||||||
|  |         method.kind =  "method" | ||||||
|  |       } | ||||||
|  |       method.value = this.parseMethod(isGenerator, isAsync) | ||||||
|  |     } | ||||||
|  |     node.body.body.push(this.finishNode(method, "MethodDefinition")) | ||||||
|  |   } | ||||||
|  |   this.popCx() | ||||||
|  |   if (!this.eat(tt.braceR)) { | ||||||
|  |     // If there is no closing brace, make the node span to the start | ||||||
|  |     // of the next token (this is useful for Tern) | ||||||
|  |     this.last.end = this.tok.start | ||||||
|  |     if (this.options.locations) this.last.loc.end = this.tok.loc.start | ||||||
|  |   } | ||||||
|  |   this.semicolon() | ||||||
|  |   this.finishNode(node.body, "ClassBody") | ||||||
|  |   return this.finishNode(node, isStatement ? "ClassDeclaration" : "ClassExpression") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.parseFunction = function(node, isStatement, isAsync) { | ||||||
|  |   let oldInAsync = this.inAsync | ||||||
|  |   this.initFunction(node) | ||||||
|  |   if (this.options.ecmaVersion >= 6) { | ||||||
|  |     node.generator = this.eat(tt.star) | ||||||
|  |   } | ||||||
|  |   if (this.options.ecmaVersion >= 8) { | ||||||
|  |     node.async = !!isAsync | ||||||
|  |   } | ||||||
|  |   if (isStatement == null) isStatement = this.tok.type === tt.name | ||||||
|  |   if (this.tok.type === tt.name) node.id = this.parseIdent() | ||||||
|  |   else if (isStatement) node.id = this.dummyIdent() | ||||||
|  |   this.inAsync = node.async | ||||||
|  |   node.params = this.parseFunctionParams() | ||||||
|  |   node.body = this.parseBlock() | ||||||
|  |   this.inAsync = oldInAsync | ||||||
|  |   return this.finishNode(node, isStatement ? "FunctionDeclaration" : "FunctionExpression") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.parseExport = function() { | ||||||
|  |   let node = this.startNode() | ||||||
|  |   this.next() | ||||||
|  |   if (this.eat(tt.star)) { | ||||||
|  |     node.source = this.eatContextual("from") ? this.parseExprAtom() : this.dummyString() | ||||||
|  |     return this.finishNode(node, "ExportAllDeclaration") | ||||||
|  |   } | ||||||
|  |   if (this.eat(tt._default)) { | ||||||
|  |     // export default (function foo() {}) // This is FunctionExpression. | ||||||
|  |     let isAsync | ||||||
|  |     if (this.tok.type === tt._function || (isAsync = this.toks.isAsyncFunction())) { | ||||||
|  |       let fNode = this.startNode() | ||||||
|  |       this.next() | ||||||
|  |       if (isAsync) this.next() | ||||||
|  |       node.declaration = this.parseFunction(fNode, null, isAsync) | ||||||
|  |     } else if (this.tok.type === tt._class) { | ||||||
|  |       node.declaration = this.parseClass(null) | ||||||
|  |     } else { | ||||||
|  |       node.declaration = this.parseMaybeAssign() | ||||||
|  |       this.semicolon() | ||||||
|  |     } | ||||||
|  |     return this.finishNode(node, "ExportDefaultDeclaration") | ||||||
|  |   } | ||||||
|  |   if (this.tok.type.keyword || this.toks.isLet() || this.toks.isAsyncFunction()) { | ||||||
|  |     node.declaration = this.parseStatement() | ||||||
|  |     node.specifiers = [] | ||||||
|  |     node.source = null | ||||||
|  |   } else { | ||||||
|  |     node.declaration = null | ||||||
|  |     node.specifiers = this.parseExportSpecifierList() | ||||||
|  |     node.source = this.eatContextual("from") ? this.parseExprAtom() : null | ||||||
|  |     this.semicolon() | ||||||
|  |   } | ||||||
|  |   return this.finishNode(node, "ExportNamedDeclaration") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.parseImport = function() { | ||||||
|  |   let node = this.startNode() | ||||||
|  |   this.next() | ||||||
|  |   if (this.tok.type === tt.string) { | ||||||
|  |     node.specifiers = [] | ||||||
|  |     node.source = this.parseExprAtom() | ||||||
|  |     node.kind = '' | ||||||
|  |   } else { | ||||||
|  |     let elt | ||||||
|  |     if (this.tok.type === tt.name && this.tok.value !== "from") { | ||||||
|  |       elt = this.startNode() | ||||||
|  |       elt.local = this.parseIdent() | ||||||
|  |       this.finishNode(elt, "ImportDefaultSpecifier") | ||||||
|  |       this.eat(tt.comma) | ||||||
|  |     } | ||||||
|  |     node.specifiers = this.parseImportSpecifierList() | ||||||
|  |     node.source = this.eatContextual("from") && this.tok.type == tt.string ? this.parseExprAtom() : this.dummyString() | ||||||
|  |     if (elt) node.specifiers.unshift(elt) | ||||||
|  |   } | ||||||
|  |   this.semicolon() | ||||||
|  |   return this.finishNode(node, "ImportDeclaration") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.parseImportSpecifierList = function() { | ||||||
|  |   let elts = [] | ||||||
|  |   if (this.tok.type === tt.star) { | ||||||
|  |     let elt = this.startNode() | ||||||
|  |     this.next() | ||||||
|  |     elt.local = this.eatContextual("as") ? this.parseIdent() : this.dummyIdent() | ||||||
|  |     elts.push(this.finishNode(elt, "ImportNamespaceSpecifier")) | ||||||
|  |   } else { | ||||||
|  |     let indent = this.curIndent, line = this.curLineStart, continuedLine = this.nextLineStart | ||||||
|  |     this.pushCx() | ||||||
|  |     this.eat(tt.braceL) | ||||||
|  |     if (this.curLineStart > continuedLine) continuedLine = this.curLineStart | ||||||
|  |     while (!this.closes(tt.braceR, indent + (this.curLineStart <= continuedLine ? 1 : 0), line)) { | ||||||
|  |       let elt = this.startNode() | ||||||
|  |       if (this.eat(tt.star)) { | ||||||
|  |         elt.local = this.eatContextual("as") ? this.parseIdent() : this.dummyIdent() | ||||||
|  |         this.finishNode(elt, "ImportNamespaceSpecifier") | ||||||
|  |       } else { | ||||||
|  |         if (this.isContextual("from")) break | ||||||
|  |         elt.imported = this.parseIdent() | ||||||
|  |         if (isDummy(elt.imported)) break | ||||||
|  |         elt.local = this.eatContextual("as") ? this.parseIdent() : elt.imported | ||||||
|  |         this.finishNode(elt, "ImportSpecifier") | ||||||
|  |       } | ||||||
|  |       elts.push(elt) | ||||||
|  |       this.eat(tt.comma) | ||||||
|  |     } | ||||||
|  |     this.eat(tt.braceR) | ||||||
|  |     this.popCx() | ||||||
|  |   } | ||||||
|  |   return elts | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.parseExportSpecifierList = function() { | ||||||
|  |   let elts = [] | ||||||
|  |   let indent = this.curIndent, line = this.curLineStart, continuedLine = this.nextLineStart | ||||||
|  |   this.pushCx() | ||||||
|  |   this.eat(tt.braceL) | ||||||
|  |   if (this.curLineStart > continuedLine) continuedLine = this.curLineStart | ||||||
|  |   while (!this.closes(tt.braceR, indent + (this.curLineStart <= continuedLine ? 1 : 0), line)) { | ||||||
|  |     if (this.isContextual("from")) break | ||||||
|  |     let elt = this.startNode() | ||||||
|  |     elt.local = this.parseIdent() | ||||||
|  |     if (isDummy(elt.local)) break | ||||||
|  |     elt.exported = this.eatContextual("as") ? this.parseIdent() : elt.local | ||||||
|  |     this.finishNode(elt, "ExportSpecifier") | ||||||
|  |     elts.push(elt) | ||||||
|  |     this.eat(tt.comma) | ||||||
|  |   } | ||||||
|  |   this.eat(tt.braceR) | ||||||
|  |   this.popCx() | ||||||
|  |   return elts | ||||||
|  | } | ||||||
							
								
								
									
										108
									
								
								Source/node_modules/acorn/src/loose/tokenize.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										108
									
								
								Source/node_modules/acorn/src/loose/tokenize.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,108 @@ | |||||||
|  | import {tokTypes as tt, Token, isNewLine, SourceLocation, getLineInfo, lineBreakG} from "../index" | ||||||
|  | import {LooseParser} from "./state" | ||||||
|  |  | ||||||
|  | const lp = LooseParser.prototype | ||||||
|  |  | ||||||
|  | function isSpace(ch) { | ||||||
|  |   return (ch < 14 && ch > 8) || ch === 32 || ch === 160 || isNewLine(ch) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.next = function() { | ||||||
|  |   this.last = this.tok | ||||||
|  |   if (this.ahead.length) | ||||||
|  |     this.tok = this.ahead.shift() | ||||||
|  |   else | ||||||
|  |     this.tok = this.readToken() | ||||||
|  |  | ||||||
|  |   if (this.tok.start >= this.nextLineStart) { | ||||||
|  |     while (this.tok.start >= this.nextLineStart) { | ||||||
|  |       this.curLineStart = this.nextLineStart | ||||||
|  |       this.nextLineStart = this.lineEnd(this.curLineStart) + 1 | ||||||
|  |     } | ||||||
|  |     this.curIndent = this.indentationAfter(this.curLineStart) | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.readToken = function() { | ||||||
|  |   for (;;) { | ||||||
|  |     try { | ||||||
|  |       this.toks.next() | ||||||
|  |       if (this.toks.type === tt.dot && | ||||||
|  |           this.input.substr(this.toks.end, 1) === "." && | ||||||
|  |           this.options.ecmaVersion >= 6) { | ||||||
|  |         this.toks.end++ | ||||||
|  |         this.toks.type = tt.ellipsis | ||||||
|  |       } | ||||||
|  |       return new Token(this.toks) | ||||||
|  |     } catch(e) { | ||||||
|  |       if (!(e instanceof SyntaxError)) throw e | ||||||
|  |  | ||||||
|  |       // Try to skip some text, based on the error message, and then continue | ||||||
|  |       let msg = e.message, pos = e.raisedAt, replace = true | ||||||
|  |       if (/unterminated/i.test(msg)) { | ||||||
|  |         pos = this.lineEnd(e.pos + 1) | ||||||
|  |         if (/string/.test(msg)) { | ||||||
|  |           replace = {start: e.pos, end: pos, type: tt.string, value: this.input.slice(e.pos + 1, pos)} | ||||||
|  |         } else if (/regular expr/i.test(msg)) { | ||||||
|  |           let re = this.input.slice(e.pos, pos) | ||||||
|  |           try { re = new RegExp(re) } catch(e) {} | ||||||
|  |           replace = {start: e.pos, end: pos, type: tt.regexp, value: re} | ||||||
|  |         } else if (/template/.test(msg)) { | ||||||
|  |           replace = {start: e.pos, end: pos, | ||||||
|  |                      type: tt.template, | ||||||
|  |                      value: this.input.slice(e.pos, pos)} | ||||||
|  |         } else { | ||||||
|  |           replace = false | ||||||
|  |         } | ||||||
|  |       } else if (/invalid (unicode|regexp|number)|expecting unicode|octal literal|is reserved|directly after number|expected number in radix/i.test(msg)) { | ||||||
|  |         while (pos < this.input.length && !isSpace(this.input.charCodeAt(pos))) ++pos | ||||||
|  |       } else if (/character escape|expected hexadecimal/i.test(msg)) { | ||||||
|  |         while (pos < this.input.length) { | ||||||
|  |           let ch = this.input.charCodeAt(pos++) | ||||||
|  |           if (ch === 34 || ch === 39 || isNewLine(ch)) break | ||||||
|  |         } | ||||||
|  |       } else if (/unexpected character/i.test(msg)) { | ||||||
|  |         pos++ | ||||||
|  |         replace = false | ||||||
|  |       } else if (/regular expression/i.test(msg)) { | ||||||
|  |         replace = true | ||||||
|  |       } else { | ||||||
|  |         throw e | ||||||
|  |       } | ||||||
|  |       this.resetTo(pos) | ||||||
|  |       if (replace === true) replace = {start: pos, end: pos, type: tt.name, value: "✖"} | ||||||
|  |       if (replace) { | ||||||
|  |         if (this.options.locations) | ||||||
|  |           replace.loc = new SourceLocation( | ||||||
|  |             this.toks, | ||||||
|  |             getLineInfo(this.input, replace.start), | ||||||
|  |             getLineInfo(this.input, replace.end)) | ||||||
|  |         return replace | ||||||
|  |       } | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.resetTo = function(pos) { | ||||||
|  |   this.toks.pos = pos | ||||||
|  |   let ch = this.input.charAt(pos - 1) | ||||||
|  |   this.toks.exprAllowed = !ch || /[\[\{\(,;:?\/*=+\-~!|&%^<>]/.test(ch) || | ||||||
|  |     /[enwfd]/.test(ch) && | ||||||
|  |     /\b(keywords|case|else|return|throw|new|in|(instance|type)of|delete|void)$/.test(this.input.slice(pos - 10, pos)) | ||||||
|  |  | ||||||
|  |   if (this.options.locations) { | ||||||
|  |     this.toks.curLine = 1 | ||||||
|  |     this.toks.lineStart = lineBreakG.lastIndex = 0 | ||||||
|  |     let match | ||||||
|  |     while ((match = lineBreakG.exec(this.input)) && match.index < pos) { | ||||||
|  |       ++this.toks.curLine | ||||||
|  |       this.toks.lineStart = match.index + match[0].length | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | lp.lookAhead = function(n) { | ||||||
|  |   while (n > this.ahead.length) | ||||||
|  |     this.ahead.push(this.readToken()) | ||||||
|  |   return this.ahead[n - 1] | ||||||
|  | } | ||||||
							
								
								
									
										219
									
								
								Source/node_modules/acorn/src/lval.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										219
									
								
								Source/node_modules/acorn/src/lval.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,219 @@ | |||||||
|  | import {types as tt} from "./tokentype" | ||||||
|  | import {Parser} from "./state" | ||||||
|  | import {has} from "./util" | ||||||
|  |  | ||||||
|  | const pp = Parser.prototype | ||||||
|  |  | ||||||
|  | // Convert existing expression atom to assignable pattern | ||||||
|  | // if possible. | ||||||
|  |  | ||||||
|  | pp.toAssignable = function(node, isBinding) { | ||||||
|  |   if (this.options.ecmaVersion >= 6 && node) { | ||||||
|  |     switch (node.type) { | ||||||
|  |       case "Identifier": | ||||||
|  |       if (this.inAsync && node.name === "await") | ||||||
|  |         this.raise(node.start, "Can not use 'await' as identifier inside an async function") | ||||||
|  |       break | ||||||
|  |  | ||||||
|  |     case "ObjectPattern": | ||||||
|  |     case "ArrayPattern": | ||||||
|  |       break | ||||||
|  |  | ||||||
|  |     case "ObjectExpression": | ||||||
|  |       node.type = "ObjectPattern" | ||||||
|  |       for (let i = 0; i < node.properties.length; i++) { | ||||||
|  |         let prop = node.properties[i] | ||||||
|  |         if (prop.kind !== "init") this.raise(prop.key.start, "Object pattern can't contain getter or setter") | ||||||
|  |         this.toAssignable(prop.value, isBinding) | ||||||
|  |       } | ||||||
|  |       break | ||||||
|  |  | ||||||
|  |     case "ArrayExpression": | ||||||
|  |       node.type = "ArrayPattern" | ||||||
|  |       this.toAssignableList(node.elements, isBinding) | ||||||
|  |       break | ||||||
|  |  | ||||||
|  |     case "AssignmentExpression": | ||||||
|  |       if (node.operator === "=") { | ||||||
|  |         node.type = "AssignmentPattern" | ||||||
|  |         delete node.operator | ||||||
|  |         this.toAssignable(node.left, isBinding) | ||||||
|  |         // falls through to AssignmentPattern | ||||||
|  |       } else { | ||||||
|  |         this.raise(node.left.end, "Only '=' operator can be used for specifying default value.") | ||||||
|  |         break | ||||||
|  |       } | ||||||
|  |  | ||||||
|  |     case "AssignmentPattern": | ||||||
|  |       break | ||||||
|  |  | ||||||
|  |     case "ParenthesizedExpression": | ||||||
|  |       node.expression = this.toAssignable(node.expression, isBinding) | ||||||
|  |       break | ||||||
|  |  | ||||||
|  |     case "MemberExpression": | ||||||
|  |       if (!isBinding) break | ||||||
|  |  | ||||||
|  |     default: | ||||||
|  |       this.raise(node.start, "Assigning to rvalue") | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  |   return node | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Convert list of expression atoms to binding list. | ||||||
|  |  | ||||||
|  | pp.toAssignableList = function(exprList, isBinding) { | ||||||
|  |   let end = exprList.length | ||||||
|  |   if (end) { | ||||||
|  |     let last = exprList[end - 1] | ||||||
|  |     if (last && last.type == "RestElement") { | ||||||
|  |       --end | ||||||
|  |     } else if (last && last.type == "SpreadElement") { | ||||||
|  |       last.type = "RestElement" | ||||||
|  |       let arg = last.argument | ||||||
|  |       this.toAssignable(arg, isBinding) | ||||||
|  |       if (arg.type !== "Identifier" && arg.type !== "MemberExpression" && arg.type !== "ArrayPattern") | ||||||
|  |         this.unexpected(arg.start) | ||||||
|  |       --end | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     if (isBinding && last && last.type === "RestElement" && last.argument.type !== "Identifier") | ||||||
|  |       this.unexpected(last.argument.start) | ||||||
|  |   } | ||||||
|  |   for (let i = 0; i < end; i++) { | ||||||
|  |     let elt = exprList[i] | ||||||
|  |     if (elt) this.toAssignable(elt, isBinding) | ||||||
|  |   } | ||||||
|  |   return exprList | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Parses spread element. | ||||||
|  |  | ||||||
|  | pp.parseSpread = function(refDestructuringErrors) { | ||||||
|  |   let node = this.startNode() | ||||||
|  |   this.next() | ||||||
|  |   node.argument = this.parseMaybeAssign(false, refDestructuringErrors) | ||||||
|  |   return this.finishNode(node, "SpreadElement") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.parseRest = function(allowNonIdent) { | ||||||
|  |   let node = this.startNode() | ||||||
|  |   this.next() | ||||||
|  |  | ||||||
|  |   // RestElement inside of a function parameter must be an identifier | ||||||
|  |   if (allowNonIdent) node.argument = this.type === tt.name ? this.parseIdent() : this.unexpected() | ||||||
|  |   else node.argument = this.type === tt.name || this.type === tt.bracketL ? this.parseBindingAtom() : this.unexpected() | ||||||
|  |  | ||||||
|  |   return this.finishNode(node, "RestElement") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Parses lvalue (assignable) atom. | ||||||
|  |  | ||||||
|  | pp.parseBindingAtom = function() { | ||||||
|  |   if (this.options.ecmaVersion < 6) return this.parseIdent() | ||||||
|  |   switch (this.type) { | ||||||
|  |   case tt.name: | ||||||
|  |     return this.parseIdent() | ||||||
|  |  | ||||||
|  |   case tt.bracketL: | ||||||
|  |     let node = this.startNode() | ||||||
|  |     this.next() | ||||||
|  |     node.elements = this.parseBindingList(tt.bracketR, true, true) | ||||||
|  |     return this.finishNode(node, "ArrayPattern") | ||||||
|  |  | ||||||
|  |   case tt.braceL: | ||||||
|  |     return this.parseObj(true) | ||||||
|  |  | ||||||
|  |   default: | ||||||
|  |     this.unexpected() | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.parseBindingList = function(close, allowEmpty, allowTrailingComma, allowNonIdent) { | ||||||
|  |   let elts = [], first = true | ||||||
|  |   while (!this.eat(close)) { | ||||||
|  |     if (first) first = false | ||||||
|  |     else this.expect(tt.comma) | ||||||
|  |     if (allowEmpty && this.type === tt.comma) { | ||||||
|  |       elts.push(null) | ||||||
|  |     } else if (allowTrailingComma && this.afterTrailingComma(close)) { | ||||||
|  |       break | ||||||
|  |     } else if (this.type === tt.ellipsis) { | ||||||
|  |       let rest = this.parseRest(allowNonIdent) | ||||||
|  |       this.parseBindingListItem(rest) | ||||||
|  |       elts.push(rest) | ||||||
|  |       if (this.type === tt.comma) this.raise(this.start, "Comma is not permitted after the rest element") | ||||||
|  |       this.expect(close) | ||||||
|  |       break | ||||||
|  |     } else { | ||||||
|  |       let elem = this.parseMaybeDefault(this.start, this.startLoc) | ||||||
|  |       this.parseBindingListItem(elem) | ||||||
|  |       elts.push(elem) | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  |   return elts | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.parseBindingListItem = function(param) { | ||||||
|  |   return param | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Parses assignment pattern around given atom if possible. | ||||||
|  |  | ||||||
|  | pp.parseMaybeDefault = function(startPos, startLoc, left) { | ||||||
|  |   left = left || this.parseBindingAtom() | ||||||
|  |   if (this.options.ecmaVersion < 6 || !this.eat(tt.eq)) return left | ||||||
|  |   let node = this.startNodeAt(startPos, startLoc) | ||||||
|  |   node.left = left | ||||||
|  |   node.right = this.parseMaybeAssign() | ||||||
|  |   return this.finishNode(node, "AssignmentPattern") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Verify that a node is an lval — something that can be assigned | ||||||
|  | // to. | ||||||
|  |  | ||||||
|  | pp.checkLVal = function(expr, isBinding, checkClashes) { | ||||||
|  |   switch (expr.type) { | ||||||
|  |   case "Identifier": | ||||||
|  |     if (this.strict && this.reservedWordsStrictBind.test(expr.name)) | ||||||
|  |       this.raiseRecoverable(expr.start, (isBinding ? "Binding " : "Assigning to ") + expr.name + " in strict mode") | ||||||
|  |     if (checkClashes) { | ||||||
|  |       if (has(checkClashes, expr.name)) | ||||||
|  |         this.raiseRecoverable(expr.start, "Argument name clash") | ||||||
|  |       checkClashes[expr.name] = true | ||||||
|  |     } | ||||||
|  |     break | ||||||
|  |  | ||||||
|  |   case "MemberExpression": | ||||||
|  |     if (isBinding) this.raiseRecoverable(expr.start, (isBinding ? "Binding" : "Assigning to") + " member expression") | ||||||
|  |     break | ||||||
|  |  | ||||||
|  |   case "ObjectPattern": | ||||||
|  |     for (let i = 0; i < expr.properties.length; i++) | ||||||
|  |       this.checkLVal(expr.properties[i].value, isBinding, checkClashes) | ||||||
|  |     break | ||||||
|  |  | ||||||
|  |   case "ArrayPattern": | ||||||
|  |     for (let i = 0; i < expr.elements.length; i++) { | ||||||
|  |       let elem = expr.elements[i] | ||||||
|  |       if (elem) this.checkLVal(elem, isBinding, checkClashes) | ||||||
|  |     } | ||||||
|  |     break | ||||||
|  |  | ||||||
|  |   case "AssignmentPattern": | ||||||
|  |     this.checkLVal(expr.left, isBinding, checkClashes) | ||||||
|  |     break | ||||||
|  |  | ||||||
|  |   case "RestElement": | ||||||
|  |     this.checkLVal(expr.argument, isBinding, checkClashes) | ||||||
|  |     break | ||||||
|  |  | ||||||
|  |   case "ParenthesizedExpression": | ||||||
|  |     this.checkLVal(expr.expression, isBinding, checkClashes) | ||||||
|  |     break | ||||||
|  |  | ||||||
|  |   default: | ||||||
|  |     this.raise(expr.start, (isBinding ? "Binding" : "Assigning to") + " rvalue") | ||||||
|  |   } | ||||||
|  | } | ||||||
							
								
								
									
										50
									
								
								Source/node_modules/acorn/src/node.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										50
									
								
								Source/node_modules/acorn/src/node.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,50 @@ | |||||||
|  | import {Parser} from "./state" | ||||||
|  | import {SourceLocation} from "./locutil" | ||||||
|  |  | ||||||
|  | export class Node { | ||||||
|  |   constructor(parser, pos, loc) { | ||||||
|  |     this.type = "" | ||||||
|  |     this.start = pos | ||||||
|  |     this.end = 0 | ||||||
|  |     if (parser.options.locations) | ||||||
|  |       this.loc = new SourceLocation(parser, loc) | ||||||
|  |     if (parser.options.directSourceFile) | ||||||
|  |       this.sourceFile = parser.options.directSourceFile | ||||||
|  |     if (parser.options.ranges) | ||||||
|  |       this.range = [pos, 0] | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Start an AST node, attaching a start offset. | ||||||
|  |  | ||||||
|  | const pp = Parser.prototype | ||||||
|  |  | ||||||
|  | pp.startNode = function() { | ||||||
|  |   return new Node(this, this.start, this.startLoc) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.startNodeAt = function(pos, loc) { | ||||||
|  |   return new Node(this, pos, loc) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Finish an AST node, adding `type` and `end` properties. | ||||||
|  |  | ||||||
|  | function finishNodeAt(node, type, pos, loc) { | ||||||
|  |   node.type = type | ||||||
|  |   node.end = pos | ||||||
|  |   if (this.options.locations) | ||||||
|  |     node.loc.end = loc | ||||||
|  |   if (this.options.ranges) | ||||||
|  |     node.range[1] = pos | ||||||
|  |   return node | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.finishNode = function(node, type) { | ||||||
|  |   return finishNodeAt.call(this, node, type, this.lastTokEnd, this.lastTokEndLoc) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Finish node at given position | ||||||
|  |  | ||||||
|  | pp.finishNodeAt = function(node, type, pos, loc) { | ||||||
|  |   return finishNodeAt.call(this, node, type, pos, loc) | ||||||
|  | } | ||||||
							
								
								
									
										128
									
								
								Source/node_modules/acorn/src/options.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										128
									
								
								Source/node_modules/acorn/src/options.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,128 @@ | |||||||
|  | import {has, isArray} from "./util" | ||||||
|  | import {SourceLocation} from "./locutil" | ||||||
|  |  | ||||||
|  | // A second optional argument can be given to further configure | ||||||
|  | // the parser process. These options are recognized: | ||||||
|  |  | ||||||
|  | export const defaultOptions = { | ||||||
|  |   // `ecmaVersion` indicates the ECMAScript version to parse. Must | ||||||
|  |   // be either 3, 5, 6 (2015), 7 (2016), or 8 (2017). This influences support | ||||||
|  |   // for strict mode, the set of reserved words, and support for | ||||||
|  |   // new syntax features. The default is 7. | ||||||
|  |   ecmaVersion: 7, | ||||||
|  |   // `sourceType` indicates the mode the code should be parsed in. | ||||||
|  |   // Can be either `"script"` or `"module"`. This influences global | ||||||
|  |   // strict mode and parsing of `import` and `export` declarations. | ||||||
|  |   sourceType: "script", | ||||||
|  |   // `onInsertedSemicolon` can be a callback that will be called | ||||||
|  |   // when a semicolon is automatically inserted. It will be passed | ||||||
|  |   // th position of the comma as an offset, and if `locations` is | ||||||
|  |   // enabled, it is given the location as a `{line, column}` object | ||||||
|  |   // as second argument. | ||||||
|  |   onInsertedSemicolon: null, | ||||||
|  |   // `onTrailingComma` is similar to `onInsertedSemicolon`, but for | ||||||
|  |   // trailing commas. | ||||||
|  |   onTrailingComma: null, | ||||||
|  |   // By default, reserved words are only enforced if ecmaVersion >= 5. | ||||||
|  |   // Set `allowReserved` to a boolean value to explicitly turn this on | ||||||
|  |   // an off. When this option has the value "never", reserved words | ||||||
|  |   // and keywords can also not be used as property names. | ||||||
|  |   allowReserved: null, | ||||||
|  |   // When enabled, a return at the top level is not considered an | ||||||
|  |   // error. | ||||||
|  |   allowReturnOutsideFunction: false, | ||||||
|  |   // When enabled, import/export statements are not constrained to | ||||||
|  |   // appearing at the top of the program. | ||||||
|  |   allowImportExportEverywhere: false, | ||||||
|  |   // When enabled, hashbang directive in the beginning of file | ||||||
|  |   // is allowed and treated as a line comment. | ||||||
|  |   allowHashBang: false, | ||||||
|  |   // When `locations` is on, `loc` properties holding objects with | ||||||
|  |   // `start` and `end` properties in `{line, column}` form (with | ||||||
|  |   // line being 1-based and column 0-based) will be attached to the | ||||||
|  |   // nodes. | ||||||
|  |   locations: false, | ||||||
|  |   // A function can be passed as `onToken` option, which will | ||||||
|  |   // cause Acorn to call that function with object in the same | ||||||
|  |   // format as tokens returned from `tokenizer().getToken()`. Note | ||||||
|  |   // that you are not allowed to call the parser from the | ||||||
|  |   // callback—that will corrupt its internal state. | ||||||
|  |   onToken: null, | ||||||
|  |   // A function can be passed as `onComment` option, which will | ||||||
|  |   // cause Acorn to call that function with `(block, text, start, | ||||||
|  |   // end)` parameters whenever a comment is skipped. `block` is a | ||||||
|  |   // boolean indicating whether this is a block (`/* */`) comment, | ||||||
|  |   // `text` is the content of the comment, and `start` and `end` are | ||||||
|  |   // character offsets that denote the start and end of the comment. | ||||||
|  |   // When the `locations` option is on, two more parameters are | ||||||
|  |   // passed, the full `{line, column}` locations of the start and | ||||||
|  |   // end of the comments. Note that you are not allowed to call the | ||||||
|  |   // parser from the callback—that will corrupt its internal state. | ||||||
|  |   onComment: null, | ||||||
|  |   // Nodes have their start and end characters offsets recorded in | ||||||
|  |   // `start` and `end` properties (directly on the node, rather than | ||||||
|  |   // the `loc` object, which holds line/column data. To also add a | ||||||
|  |   // [semi-standardized][range] `range` property holding a `[start, | ||||||
|  |   // end]` array with the same numbers, set the `ranges` option to | ||||||
|  |   // `true`. | ||||||
|  |   // | ||||||
|  |   // [range]: https://bugzilla.mozilla.org/show_bug.cgi?id=745678 | ||||||
|  |   ranges: false, | ||||||
|  |   // It is possible to parse multiple files into a single AST by | ||||||
|  |   // passing the tree produced by parsing the first file as | ||||||
|  |   // `program` option in subsequent parses. This will add the | ||||||
|  |   // toplevel forms of the parsed file to the `Program` (top) node | ||||||
|  |   // of an existing parse tree. | ||||||
|  |   program: null, | ||||||
|  |   // When `locations` is on, you can pass this to record the source | ||||||
|  |   // file in every node's `loc` object. | ||||||
|  |   sourceFile: null, | ||||||
|  |   // This value, if given, is stored in every node, whether | ||||||
|  |   // `locations` is on or off. | ||||||
|  |   directSourceFile: null, | ||||||
|  |   // When enabled, parenthesized expressions are represented by | ||||||
|  |   // (non-standard) ParenthesizedExpression nodes | ||||||
|  |   preserveParens: false, | ||||||
|  |   plugins: {} | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Interpret and default an options object | ||||||
|  |  | ||||||
|  | export function getOptions(opts) { | ||||||
|  |   let options = {} | ||||||
|  |  | ||||||
|  |   for (let opt in defaultOptions) | ||||||
|  |     options[opt] = opts && has(opts, opt) ? opts[opt] : defaultOptions[opt] | ||||||
|  |  | ||||||
|  |   if (options.ecmaVersion >= 2015) | ||||||
|  |     options.ecmaVersion -= 2009 | ||||||
|  |  | ||||||
|  |   if (options.allowReserved == null) | ||||||
|  |     options.allowReserved = options.ecmaVersion < 5 | ||||||
|  |  | ||||||
|  |   if (isArray(options.onToken)) { | ||||||
|  |     let tokens = options.onToken | ||||||
|  |     options.onToken = (token) => tokens.push(token) | ||||||
|  |   } | ||||||
|  |   if (isArray(options.onComment)) | ||||||
|  |     options.onComment = pushComment(options, options.onComment) | ||||||
|  |  | ||||||
|  |   return options | ||||||
|  | } | ||||||
|  |  | ||||||
|  | function pushComment(options, array) { | ||||||
|  |   return function (block, text, start, end, startLoc, endLoc) { | ||||||
|  |     let comment = { | ||||||
|  |       type: block ? 'Block' : 'Line', | ||||||
|  |       value: text, | ||||||
|  |       start: start, | ||||||
|  |       end: end | ||||||
|  |     } | ||||||
|  |     if (options.locations) | ||||||
|  |       comment.loc = new SourceLocation(this, startLoc, endLoc) | ||||||
|  |     if (options.ranges) | ||||||
|  |       comment.range = [start, end] | ||||||
|  |     array.push(comment) | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
							
								
								
									
										128
									
								
								Source/node_modules/acorn/src/parseutil.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										128
									
								
								Source/node_modules/acorn/src/parseutil.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,128 @@ | |||||||
|  | import {types as tt} from "./tokentype" | ||||||
|  | import {Parser} from "./state" | ||||||
|  | import {lineBreak, skipWhiteSpace} from "./whitespace" | ||||||
|  |  | ||||||
|  | const pp = Parser.prototype | ||||||
|  |  | ||||||
|  | // ## Parser utilities | ||||||
|  |  | ||||||
|  | const literal = /^(?:'((?:[^\']|\.)*)'|"((?:[^\"]|\.)*)"|;)/ | ||||||
|  | pp.strictDirective = function(start) { | ||||||
|  |   for (;;) { | ||||||
|  |     skipWhiteSpace.lastIndex = start | ||||||
|  |     start += skipWhiteSpace.exec(this.input)[0].length | ||||||
|  |     let match = literal.exec(this.input.slice(start)) | ||||||
|  |     if (!match) return false | ||||||
|  |     if ((match[1] || match[2]) == "use strict") return true | ||||||
|  |     start += match[0].length | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Predicate that tests whether the next token is of the given | ||||||
|  | // type, and if yes, consumes it as a side effect. | ||||||
|  |  | ||||||
|  | pp.eat = function(type) { | ||||||
|  |   if (this.type === type) { | ||||||
|  |     this.next() | ||||||
|  |     return true | ||||||
|  |   } else { | ||||||
|  |     return false | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Tests whether parsed token is a contextual keyword. | ||||||
|  |  | ||||||
|  | pp.isContextual = function(name) { | ||||||
|  |   return this.type === tt.name && this.value === name | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Consumes contextual keyword if possible. | ||||||
|  |  | ||||||
|  | pp.eatContextual = function(name) { | ||||||
|  |   return this.value === name && this.eat(tt.name) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Asserts that following token is given contextual keyword. | ||||||
|  |  | ||||||
|  | pp.expectContextual = function(name) { | ||||||
|  |   if (!this.eatContextual(name)) this.unexpected() | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Test whether a semicolon can be inserted at the current position. | ||||||
|  |  | ||||||
|  | pp.canInsertSemicolon = function() { | ||||||
|  |   return this.type === tt.eof || | ||||||
|  |     this.type === tt.braceR || | ||||||
|  |     lineBreak.test(this.input.slice(this.lastTokEnd, this.start)) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.insertSemicolon = function() { | ||||||
|  |   if (this.canInsertSemicolon()) { | ||||||
|  |     if (this.options.onInsertedSemicolon) | ||||||
|  |       this.options.onInsertedSemicolon(this.lastTokEnd, this.lastTokEndLoc) | ||||||
|  |     return true | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Consume a semicolon, or, failing that, see if we are allowed to | ||||||
|  | // pretend that there is a semicolon at this position. | ||||||
|  |  | ||||||
|  | pp.semicolon = function() { | ||||||
|  |   if (!this.eat(tt.semi) && !this.insertSemicolon()) this.unexpected() | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.afterTrailingComma = function(tokType, notNext) { | ||||||
|  |   if (this.type == tokType) { | ||||||
|  |     if (this.options.onTrailingComma) | ||||||
|  |       this.options.onTrailingComma(this.lastTokStart, this.lastTokStartLoc) | ||||||
|  |     if (!notNext) | ||||||
|  |       this.next() | ||||||
|  |     return true | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Expect a token of a given type. If found, consume it, otherwise, | ||||||
|  | // raise an unexpected token error. | ||||||
|  |  | ||||||
|  | pp.expect = function(type) { | ||||||
|  |   this.eat(type) || this.unexpected() | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Raise an unexpected token error. | ||||||
|  |  | ||||||
|  | pp.unexpected = function(pos) { | ||||||
|  |   this.raise(pos != null ? pos : this.start, "Unexpected token") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | export class DestructuringErrors { | ||||||
|  |   constructor() { | ||||||
|  |     this.shorthandAssign = this.trailingComma = this.parenthesizedAssign = this.parenthesizedBind = -1 | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.checkPatternErrors = function(refDestructuringErrors, isAssign) { | ||||||
|  |   if (!refDestructuringErrors) return | ||||||
|  |   if (refDestructuringErrors.trailingComma > -1) | ||||||
|  |     this.raiseRecoverable(refDestructuringErrors.trailingComma, "Comma is not permitted after the rest element") | ||||||
|  |   let parens = isAssign ? refDestructuringErrors.parenthesizedAssign : refDestructuringErrors.parenthesizedBind | ||||||
|  |   if (parens > -1) this.raiseRecoverable(parens, "Parenthesized pattern") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.checkExpressionErrors = function(refDestructuringErrors, andThrow) { | ||||||
|  |   let pos = refDestructuringErrors ? refDestructuringErrors.shorthandAssign : -1 | ||||||
|  |   if (!andThrow) return pos >= 0 | ||||||
|  |   if (pos > -1) this.raise(pos, "Shorthand property assignments are valid only in destructuring patterns") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.checkYieldAwaitInDefaultParams = function() { | ||||||
|  |   if (this.yieldPos && (!this.awaitPos || this.yieldPos < this.awaitPos)) | ||||||
|  |     this.raise(this.yieldPos, "Yield expression cannot be a default value") | ||||||
|  |   if (this.awaitPos) | ||||||
|  |     this.raise(this.awaitPos, "Await expression cannot be a default value") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.isSimpleAssignTarget = function(expr) { | ||||||
|  |   if (expr.type === "ParenthesizedExpression") | ||||||
|  |     return this.isSimpleAssignTarget(expr.expression) | ||||||
|  |   return expr.type === "Identifier" || expr.type === "MemberExpression" | ||||||
|  | } | ||||||
							
								
								
									
										111
									
								
								Source/node_modules/acorn/src/state.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										111
									
								
								Source/node_modules/acorn/src/state.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,111 @@ | |||||||
|  | import {reservedWords, keywords} from "./identifier" | ||||||
|  | import {types as tt} from "./tokentype" | ||||||
|  | import {lineBreak} from "./whitespace" | ||||||
|  | import {getOptions} from "./options" | ||||||
|  |  | ||||||
|  | // Registered plugins | ||||||
|  | export const plugins = {} | ||||||
|  |  | ||||||
|  | function keywordRegexp(words) { | ||||||
|  |   return new RegExp("^(" + words.replace(/ /g, "|") + ")$") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | export class Parser { | ||||||
|  |   constructor(options, input, startPos) { | ||||||
|  |     this.options = options = getOptions(options) | ||||||
|  |     this.sourceFile = options.sourceFile | ||||||
|  |     this.keywords = keywordRegexp(keywords[options.ecmaVersion >= 6 ? 6 : 5]) | ||||||
|  |     let reserved = "" | ||||||
|  |     if (!options.allowReserved) { | ||||||
|  |       for (let v = options.ecmaVersion;; v--) | ||||||
|  |         if (reserved = reservedWords[v]) break | ||||||
|  |       if (options.sourceType == "module") reserved += " await" | ||||||
|  |     } | ||||||
|  |     this.reservedWords = keywordRegexp(reserved) | ||||||
|  |     let reservedStrict = (reserved ? reserved + " " : "") + reservedWords.strict | ||||||
|  |     this.reservedWordsStrict = keywordRegexp(reservedStrict) | ||||||
|  |     this.reservedWordsStrictBind = keywordRegexp(reservedStrict + " " + reservedWords.strictBind) | ||||||
|  |     this.input = String(input) | ||||||
|  |  | ||||||
|  |     // Used to signal to callers of `readWord1` whether the word | ||||||
|  |     // contained any escape sequences. This is needed because words with | ||||||
|  |     // escape sequences must not be interpreted as keywords. | ||||||
|  |     this.containsEsc = false | ||||||
|  |  | ||||||
|  |     // Load plugins | ||||||
|  |     this.loadPlugins(options.plugins) | ||||||
|  |  | ||||||
|  |     // Set up token state | ||||||
|  |  | ||||||
|  |     // The current position of the tokenizer in the input. | ||||||
|  |     if (startPos) { | ||||||
|  |       this.pos = startPos | ||||||
|  |       this.lineStart = this.input.lastIndexOf("\n", startPos - 1) + 1 | ||||||
|  |       this.curLine = this.input.slice(0, this.lineStart).split(lineBreak).length | ||||||
|  |     } else { | ||||||
|  |       this.pos = this.lineStart = 0 | ||||||
|  |       this.curLine = 1 | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     // Properties of the current token: | ||||||
|  |     // Its type | ||||||
|  |     this.type = tt.eof | ||||||
|  |     // For tokens that include more information than their type, the value | ||||||
|  |     this.value = null | ||||||
|  |     // Its start and end offset | ||||||
|  |     this.start = this.end = this.pos | ||||||
|  |     // And, if locations are used, the {line, column} object | ||||||
|  |     // corresponding to those offsets | ||||||
|  |     this.startLoc = this.endLoc = this.curPosition() | ||||||
|  |  | ||||||
|  |     // Position information for the previous token | ||||||
|  |     this.lastTokEndLoc = this.lastTokStartLoc = null | ||||||
|  |     this.lastTokStart = this.lastTokEnd = this.pos | ||||||
|  |  | ||||||
|  |     // The context stack is used to superficially track syntactic | ||||||
|  |     // context to predict whether a regular expression is allowed in a | ||||||
|  |     // given position. | ||||||
|  |     this.context = this.initialContext() | ||||||
|  |     this.exprAllowed = true | ||||||
|  |  | ||||||
|  |     // Figure out if it's a module code. | ||||||
|  |     this.inModule = options.sourceType === "module" | ||||||
|  |     this.strict = this.inModule || this.strictDirective(this.pos) | ||||||
|  |  | ||||||
|  |     // Used to signify the start of a potential arrow function | ||||||
|  |     this.potentialArrowAt = -1 | ||||||
|  |  | ||||||
|  |     // Flags to track whether we are in a function, a generator, an async function. | ||||||
|  |     this.inFunction = this.inGenerator = this.inAsync = false | ||||||
|  |     // Positions to delayed-check that yield/await does not exist in default parameters. | ||||||
|  |     this.yieldPos = this.awaitPos = 0 | ||||||
|  |     // Labels in scope. | ||||||
|  |     this.labels = [] | ||||||
|  |  | ||||||
|  |     // If enabled, skip leading hashbang line. | ||||||
|  |     if (this.pos === 0 && options.allowHashBang && this.input.slice(0, 2) === '#!') | ||||||
|  |       this.skipLineComment(2) | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   // DEPRECATED Kept for backwards compatibility until 3.0 in case a plugin uses them | ||||||
|  |   isKeyword(word) { return this.keywords.test(word) } | ||||||
|  |   isReservedWord(word) { return this.reservedWords.test(word) } | ||||||
|  |  | ||||||
|  |   extend(name, f) { | ||||||
|  |     this[name] = f(this[name]) | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   loadPlugins(pluginConfigs) { | ||||||
|  |     for (let name in pluginConfigs) { | ||||||
|  |       let plugin = plugins[name] | ||||||
|  |       if (!plugin) throw new Error("Plugin '" + name + "' not found") | ||||||
|  |       plugin(this, pluginConfigs[name]) | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   parse() { | ||||||
|  |     let node = this.options.program || this.startNode() | ||||||
|  |     this.nextToken() | ||||||
|  |     return this.parseTopLevel(node) | ||||||
|  |   } | ||||||
|  | } | ||||||
							
								
								
									
										744
									
								
								Source/node_modules/acorn/src/statement.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										744
									
								
								Source/node_modules/acorn/src/statement.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,744 @@ | |||||||
|  | import {types as tt} from "./tokentype" | ||||||
|  | import {Parser} from "./state" | ||||||
|  | import {lineBreak, skipWhiteSpace} from "./whitespace" | ||||||
|  | import {isIdentifierStart, isIdentifierChar} from "./identifier" | ||||||
|  | import {DestructuringErrors} from "./parseutil" | ||||||
|  |  | ||||||
|  | const pp = Parser.prototype | ||||||
|  |  | ||||||
|  | // ### Statement parsing | ||||||
|  |  | ||||||
|  | // Parse a program. Initializes the parser, reads any number of | ||||||
|  | // statements, and wraps them in a Program node.  Optionally takes a | ||||||
|  | // `program` argument.  If present, the statements will be appended | ||||||
|  | // to its body instead of creating a new node. | ||||||
|  |  | ||||||
|  | pp.parseTopLevel = function(node) { | ||||||
|  |   let exports = {} | ||||||
|  |   if (!node.body) node.body = [] | ||||||
|  |   while (this.type !== tt.eof) { | ||||||
|  |     let stmt = this.parseStatement(true, true, exports) | ||||||
|  |     node.body.push(stmt) | ||||||
|  |   } | ||||||
|  |   this.next() | ||||||
|  |   if (this.options.ecmaVersion >= 6) { | ||||||
|  |     node.sourceType = this.options.sourceType | ||||||
|  |   } | ||||||
|  |   return this.finishNode(node, "Program") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | const loopLabel = {kind: "loop"}, switchLabel = {kind: "switch"} | ||||||
|  |  | ||||||
|  | pp.isLet = function() { | ||||||
|  |   if (this.type !== tt.name || this.options.ecmaVersion < 6 || this.value != "let") return false | ||||||
|  |   skipWhiteSpace.lastIndex = this.pos | ||||||
|  |   let skip = skipWhiteSpace.exec(this.input) | ||||||
|  |   let next = this.pos + skip[0].length, nextCh = this.input.charCodeAt(next) | ||||||
|  |   if (nextCh === 91 || nextCh == 123) return true // '{' and '[' | ||||||
|  |   if (isIdentifierStart(nextCh, true)) { | ||||||
|  |     for (var pos = next + 1; isIdentifierChar(this.input.charCodeAt(pos), true); ++pos) {} | ||||||
|  |     let ident = this.input.slice(next, pos) | ||||||
|  |     if (!this.isKeyword(ident)) return true | ||||||
|  |   } | ||||||
|  |   return false | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // check 'async [no LineTerminator here] function' | ||||||
|  | // - 'async /*foo*/ function' is OK. | ||||||
|  | // - 'async /*\n*/ function' is invalid. | ||||||
|  | pp.isAsyncFunction = function() { | ||||||
|  |   if (this.type !== tt.name || this.options.ecmaVersion < 8 || this.value != "async") | ||||||
|  |     return false | ||||||
|  |  | ||||||
|  |   skipWhiteSpace.lastIndex = this.pos | ||||||
|  |   let skip = skipWhiteSpace.exec(this.input) | ||||||
|  |   let next = this.pos + skip[0].length | ||||||
|  |   return !lineBreak.test(this.input.slice(this.pos, next)) && | ||||||
|  |     this.input.slice(next, next + 8) === "function" && | ||||||
|  |     (next + 8 == this.input.length || !isIdentifierChar(this.input.charAt(next + 8))) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Parse a single statement. | ||||||
|  | // | ||||||
|  | // If expecting a statement and finding a slash operator, parse a | ||||||
|  | // regular expression literal. This is to handle cases like | ||||||
|  | // `if (foo) /blah/.exec(foo)`, where looking at the previous token | ||||||
|  | // does not help. | ||||||
|  |  | ||||||
|  | pp.parseStatement = function(declaration, topLevel, exports) { | ||||||
|  |   let starttype = this.type, node = this.startNode(), kind | ||||||
|  |  | ||||||
|  |   if (this.isLet()) { | ||||||
|  |     starttype = tt._var | ||||||
|  |     kind = "let" | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   // Most types of statements are recognized by the keyword they | ||||||
|  |   // start with. Many are trivial to parse, some require a bit of | ||||||
|  |   // complexity. | ||||||
|  |  | ||||||
|  |   switch (starttype) { | ||||||
|  |   case tt._break: case tt._continue: return this.parseBreakContinueStatement(node, starttype.keyword) | ||||||
|  |   case tt._debugger: return this.parseDebuggerStatement(node) | ||||||
|  |   case tt._do: return this.parseDoStatement(node) | ||||||
|  |   case tt._for: return this.parseForStatement(node) | ||||||
|  |   case tt._function: | ||||||
|  |     if (!declaration && this.options.ecmaVersion >= 6) this.unexpected() | ||||||
|  |     return this.parseFunctionStatement(node, false) | ||||||
|  |   case tt._class: | ||||||
|  |     if (!declaration) this.unexpected() | ||||||
|  |     return this.parseClass(node, true) | ||||||
|  |   case tt._if: return this.parseIfStatement(node) | ||||||
|  |   case tt._return: return this.parseReturnStatement(node) | ||||||
|  |   case tt._switch: return this.parseSwitchStatement(node) | ||||||
|  |   case tt._throw: return this.parseThrowStatement(node) | ||||||
|  |   case tt._try: return this.parseTryStatement(node) | ||||||
|  |   case tt._const: case tt._var: | ||||||
|  |     kind = kind || this.value | ||||||
|  |     if (!declaration && kind != "var") this.unexpected() | ||||||
|  |     return this.parseVarStatement(node, kind) | ||||||
|  |   case tt._while: return this.parseWhileStatement(node) | ||||||
|  |   case tt._with: return this.parseWithStatement(node) | ||||||
|  |   case tt.braceL: return this.parseBlock() | ||||||
|  |   case tt.semi: return this.parseEmptyStatement(node) | ||||||
|  |   case tt._export: | ||||||
|  |   case tt._import: | ||||||
|  |     if (!this.options.allowImportExportEverywhere) { | ||||||
|  |       if (!topLevel) | ||||||
|  |         this.raise(this.start, "'import' and 'export' may only appear at the top level") | ||||||
|  |       if (!this.inModule) | ||||||
|  |         this.raise(this.start, "'import' and 'export' may appear only with 'sourceType: module'") | ||||||
|  |     } | ||||||
|  |     return starttype === tt._import ? this.parseImport(node) : this.parseExport(node, exports) | ||||||
|  |  | ||||||
|  |     // If the statement does not start with a statement keyword or a | ||||||
|  |     // brace, it's an ExpressionStatement or LabeledStatement. We | ||||||
|  |     // simply start parsing an expression, and afterwards, if the | ||||||
|  |     // next token is a colon and the expression was a simple | ||||||
|  |     // Identifier node, we switch to interpreting it as a label. | ||||||
|  |   default: | ||||||
|  |     if (this.isAsyncFunction() && declaration) { | ||||||
|  |       this.next() | ||||||
|  |       return this.parseFunctionStatement(node, true) | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     let maybeName = this.value, expr = this.parseExpression() | ||||||
|  |     if (starttype === tt.name && expr.type === "Identifier" && this.eat(tt.colon)) | ||||||
|  |       return this.parseLabeledStatement(node, maybeName, expr) | ||||||
|  |     else return this.parseExpressionStatement(node, expr) | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.parseBreakContinueStatement = function(node, keyword) { | ||||||
|  |   let isBreak = keyword == "break" | ||||||
|  |   this.next() | ||||||
|  |   if (this.eat(tt.semi) || this.insertSemicolon()) node.label = null | ||||||
|  |   else if (this.type !== tt.name) this.unexpected() | ||||||
|  |   else { | ||||||
|  |     node.label = this.parseIdent() | ||||||
|  |     this.semicolon() | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   // Verify that there is an actual destination to break or | ||||||
|  |   // continue to. | ||||||
|  |   for (var i = 0; i < this.labels.length; ++i) { | ||||||
|  |     let lab = this.labels[i] | ||||||
|  |     if (node.label == null || lab.name === node.label.name) { | ||||||
|  |       if (lab.kind != null && (isBreak || lab.kind === "loop")) break | ||||||
|  |       if (node.label && isBreak) break | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  |   if (i === this.labels.length) this.raise(node.start, "Unsyntactic " + keyword) | ||||||
|  |   return this.finishNode(node, isBreak ? "BreakStatement" : "ContinueStatement") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.parseDebuggerStatement = function(node) { | ||||||
|  |   this.next() | ||||||
|  |   this.semicolon() | ||||||
|  |   return this.finishNode(node, "DebuggerStatement") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.parseDoStatement = function(node) { | ||||||
|  |   this.next() | ||||||
|  |   this.labels.push(loopLabel) | ||||||
|  |   node.body = this.parseStatement(false) | ||||||
|  |   this.labels.pop() | ||||||
|  |   this.expect(tt._while) | ||||||
|  |   node.test = this.parseParenExpression() | ||||||
|  |   if (this.options.ecmaVersion >= 6) | ||||||
|  |     this.eat(tt.semi) | ||||||
|  |   else | ||||||
|  |     this.semicolon() | ||||||
|  |   return this.finishNode(node, "DoWhileStatement") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Disambiguating between a `for` and a `for`/`in` or `for`/`of` | ||||||
|  | // loop is non-trivial. Basically, we have to parse the init `var` | ||||||
|  | // statement or expression, disallowing the `in` operator (see | ||||||
|  | // the second parameter to `parseExpression`), and then check | ||||||
|  | // whether the next token is `in` or `of`. When there is no init | ||||||
|  | // part (semicolon immediately after the opening parenthesis), it | ||||||
|  | // is a regular `for` loop. | ||||||
|  |  | ||||||
|  | pp.parseForStatement = function(node) { | ||||||
|  |   this.next() | ||||||
|  |   this.labels.push(loopLabel) | ||||||
|  |   this.expect(tt.parenL) | ||||||
|  |   if (this.type === tt.semi) return this.parseFor(node, null) | ||||||
|  |   let isLet = this.isLet() | ||||||
|  |   if (this.type === tt._var || this.type === tt._const || isLet) { | ||||||
|  |     let init = this.startNode(), kind = isLet ? "let" : this.value | ||||||
|  |     this.next() | ||||||
|  |     this.parseVar(init, true, kind) | ||||||
|  |     this.finishNode(init, "VariableDeclaration") | ||||||
|  |     if ((this.type === tt._in || (this.options.ecmaVersion >= 6 && this.isContextual("of"))) && init.declarations.length === 1 && | ||||||
|  |         !(kind !== "var" && init.declarations[0].init)) | ||||||
|  |       return this.parseForIn(node, init) | ||||||
|  |     return this.parseFor(node, init) | ||||||
|  |   } | ||||||
|  |   let refDestructuringErrors = new DestructuringErrors | ||||||
|  |   let init = this.parseExpression(true, refDestructuringErrors) | ||||||
|  |   if (this.type === tt._in || (this.options.ecmaVersion >= 6 && this.isContextual("of"))) { | ||||||
|  |     this.toAssignable(init) | ||||||
|  |     this.checkLVal(init) | ||||||
|  |     this.checkPatternErrors(refDestructuringErrors, true) | ||||||
|  |     return this.parseForIn(node, init) | ||||||
|  |   } else { | ||||||
|  |     this.checkExpressionErrors(refDestructuringErrors, true) | ||||||
|  |   } | ||||||
|  |   return this.parseFor(node, init) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.parseFunctionStatement = function(node, isAsync) { | ||||||
|  |   this.next() | ||||||
|  |   return this.parseFunction(node, true, false, isAsync) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.isFunction = function() { | ||||||
|  |   return this.type === tt._function || this.isAsyncFunction() | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.parseIfStatement = function(node) { | ||||||
|  |   this.next() | ||||||
|  |   node.test = this.parseParenExpression() | ||||||
|  |   // allow function declarations in branches, but only in non-strict mode | ||||||
|  |   node.consequent = this.parseStatement(!this.strict && this.isFunction()) | ||||||
|  |   node.alternate = this.eat(tt._else) ? this.parseStatement(!this.strict && this.isFunction()) : null | ||||||
|  |   return this.finishNode(node, "IfStatement") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.parseReturnStatement = function(node) { | ||||||
|  |   if (!this.inFunction && !this.options.allowReturnOutsideFunction) | ||||||
|  |     this.raise(this.start, "'return' outside of function") | ||||||
|  |   this.next() | ||||||
|  |  | ||||||
|  |   // In `return` (and `break`/`continue`), the keywords with | ||||||
|  |   // optional arguments, we eagerly look for a semicolon or the | ||||||
|  |   // possibility to insert one. | ||||||
|  |  | ||||||
|  |   if (this.eat(tt.semi) || this.insertSemicolon()) node.argument = null | ||||||
|  |   else { node.argument = this.parseExpression(); this.semicolon() } | ||||||
|  |   return this.finishNode(node, "ReturnStatement") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.parseSwitchStatement = function(node) { | ||||||
|  |   this.next() | ||||||
|  |   node.discriminant = this.parseParenExpression() | ||||||
|  |   node.cases = [] | ||||||
|  |   this.expect(tt.braceL) | ||||||
|  |   this.labels.push(switchLabel) | ||||||
|  |  | ||||||
|  |   // Statements under must be grouped (by label) in SwitchCase | ||||||
|  |   // nodes. `cur` is used to keep the node that we are currently | ||||||
|  |   // adding statements to. | ||||||
|  |  | ||||||
|  |   for (var cur, sawDefault = false; this.type != tt.braceR;) { | ||||||
|  |     if (this.type === tt._case || this.type === tt._default) { | ||||||
|  |       let isCase = this.type === tt._case | ||||||
|  |       if (cur) this.finishNode(cur, "SwitchCase") | ||||||
|  |       node.cases.push(cur = this.startNode()) | ||||||
|  |       cur.consequent = [] | ||||||
|  |       this.next() | ||||||
|  |       if (isCase) { | ||||||
|  |         cur.test = this.parseExpression() | ||||||
|  |       } else { | ||||||
|  |         if (sawDefault) this.raiseRecoverable(this.lastTokStart, "Multiple default clauses") | ||||||
|  |         sawDefault = true | ||||||
|  |         cur.test = null | ||||||
|  |       } | ||||||
|  |       this.expect(tt.colon) | ||||||
|  |     } else { | ||||||
|  |       if (!cur) this.unexpected() | ||||||
|  |       cur.consequent.push(this.parseStatement(true)) | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  |   if (cur) this.finishNode(cur, "SwitchCase") | ||||||
|  |   this.next() // Closing brace | ||||||
|  |   this.labels.pop() | ||||||
|  |   return this.finishNode(node, "SwitchStatement") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.parseThrowStatement = function(node) { | ||||||
|  |   this.next() | ||||||
|  |   if (lineBreak.test(this.input.slice(this.lastTokEnd, this.start))) | ||||||
|  |     this.raise(this.lastTokEnd, "Illegal newline after throw") | ||||||
|  |   node.argument = this.parseExpression() | ||||||
|  |   this.semicolon() | ||||||
|  |   return this.finishNode(node, "ThrowStatement") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Reused empty array added for node fields that are always empty. | ||||||
|  |  | ||||||
|  | const empty = [] | ||||||
|  |  | ||||||
|  | pp.parseTryStatement = function(node) { | ||||||
|  |   this.next() | ||||||
|  |   node.block = this.parseBlock() | ||||||
|  |   node.handler = null | ||||||
|  |   if (this.type === tt._catch) { | ||||||
|  |     let clause = this.startNode() | ||||||
|  |     this.next() | ||||||
|  |     this.expect(tt.parenL) | ||||||
|  |     clause.param = this.parseBindingAtom() | ||||||
|  |     this.checkLVal(clause.param, true) | ||||||
|  |     this.expect(tt.parenR) | ||||||
|  |     clause.body = this.parseBlock() | ||||||
|  |     node.handler = this.finishNode(clause, "CatchClause") | ||||||
|  |   } | ||||||
|  |   node.finalizer = this.eat(tt._finally) ? this.parseBlock() : null | ||||||
|  |   if (!node.handler && !node.finalizer) | ||||||
|  |     this.raise(node.start, "Missing catch or finally clause") | ||||||
|  |   return this.finishNode(node, "TryStatement") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.parseVarStatement = function(node, kind) { | ||||||
|  |   this.next() | ||||||
|  |   this.parseVar(node, false, kind) | ||||||
|  |   this.semicolon() | ||||||
|  |   return this.finishNode(node, "VariableDeclaration") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.parseWhileStatement = function(node) { | ||||||
|  |   this.next() | ||||||
|  |   node.test = this.parseParenExpression() | ||||||
|  |   this.labels.push(loopLabel) | ||||||
|  |   node.body = this.parseStatement(false) | ||||||
|  |   this.labels.pop() | ||||||
|  |   return this.finishNode(node, "WhileStatement") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.parseWithStatement = function(node) { | ||||||
|  |   if (this.strict) this.raise(this.start, "'with' in strict mode") | ||||||
|  |   this.next() | ||||||
|  |   node.object = this.parseParenExpression() | ||||||
|  |   node.body = this.parseStatement(false) | ||||||
|  |   return this.finishNode(node, "WithStatement") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.parseEmptyStatement = function(node) { | ||||||
|  |   this.next() | ||||||
|  |   return this.finishNode(node, "EmptyStatement") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.parseLabeledStatement = function(node, maybeName, expr) { | ||||||
|  |   for (let i = 0; i < this.labels.length; ++i) | ||||||
|  |     if (this.labels[i].name === maybeName) this.raise(expr.start, "Label '" + maybeName + "' is already declared") | ||||||
|  |   let kind = this.type.isLoop ? "loop" : this.type === tt._switch ? "switch" : null | ||||||
|  |   for (let i = this.labels.length - 1; i >= 0; i--) { | ||||||
|  |     let label = this.labels[i] | ||||||
|  |     if (label.statementStart == node.start) { | ||||||
|  |       label.statementStart = this.start | ||||||
|  |       label.kind = kind | ||||||
|  |     } else break | ||||||
|  |   } | ||||||
|  |   this.labels.push({name: maybeName, kind: kind, statementStart: this.start}) | ||||||
|  |   node.body = this.parseStatement(true) | ||||||
|  |   if (node.body.type == "ClassDeclaration" || | ||||||
|  |       node.body.type == "VariableDeclaration" && (this.strict || node.body.kind != "var") || | ||||||
|  |       node.body.type == "FunctionDeclaration" && (this.strict || node.body.generator)) | ||||||
|  |     this.raiseRecoverable(node.body.start, "Invalid labeled declaration") | ||||||
|  |   this.labels.pop() | ||||||
|  |   node.label = expr | ||||||
|  |   return this.finishNode(node, "LabeledStatement") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.parseExpressionStatement = function(node, expr) { | ||||||
|  |   node.expression = expr | ||||||
|  |   this.semicolon() | ||||||
|  |   return this.finishNode(node, "ExpressionStatement") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Parse a semicolon-enclosed block of statements, handling `"use | ||||||
|  | // strict"` declarations when `allowStrict` is true (used for | ||||||
|  | // function bodies). | ||||||
|  |  | ||||||
|  | pp.parseBlock = function() { | ||||||
|  |   let node = this.startNode() | ||||||
|  |   node.body = [] | ||||||
|  |   this.expect(tt.braceL) | ||||||
|  |   while (!this.eat(tt.braceR)) { | ||||||
|  |     let stmt = this.parseStatement(true) | ||||||
|  |     node.body.push(stmt) | ||||||
|  |   } | ||||||
|  |   return this.finishNode(node, "BlockStatement") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Parse a regular `for` loop. The disambiguation code in | ||||||
|  | // `parseStatement` will already have parsed the init statement or | ||||||
|  | // expression. | ||||||
|  |  | ||||||
|  | pp.parseFor = function(node, init) { | ||||||
|  |   node.init = init | ||||||
|  |   this.expect(tt.semi) | ||||||
|  |   node.test = this.type === tt.semi ? null : this.parseExpression() | ||||||
|  |   this.expect(tt.semi) | ||||||
|  |   node.update = this.type === tt.parenR ? null : this.parseExpression() | ||||||
|  |   this.expect(tt.parenR) | ||||||
|  |   node.body = this.parseStatement(false) | ||||||
|  |   this.labels.pop() | ||||||
|  |   return this.finishNode(node, "ForStatement") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Parse a `for`/`in` and `for`/`of` loop, which are almost | ||||||
|  | // same from parser's perspective. | ||||||
|  |  | ||||||
|  | pp.parseForIn = function(node, init) { | ||||||
|  |   let type = this.type === tt._in ? "ForInStatement" : "ForOfStatement" | ||||||
|  |   this.next() | ||||||
|  |   node.left = init | ||||||
|  |   node.right = this.parseExpression() | ||||||
|  |   this.expect(tt.parenR) | ||||||
|  |   node.body = this.parseStatement(false) | ||||||
|  |   this.labels.pop() | ||||||
|  |   return this.finishNode(node, type) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Parse a list of variable declarations. | ||||||
|  |  | ||||||
|  | pp.parseVar = function(node, isFor, kind) { | ||||||
|  |   node.declarations = [] | ||||||
|  |   node.kind = kind | ||||||
|  |   for (;;) { | ||||||
|  |     let decl = this.startNode() | ||||||
|  |     this.parseVarId(decl) | ||||||
|  |     if (this.eat(tt.eq)) { | ||||||
|  |       decl.init = this.parseMaybeAssign(isFor) | ||||||
|  |     } else if (kind === "const" && !(this.type === tt._in || (this.options.ecmaVersion >= 6 && this.isContextual("of")))) { | ||||||
|  |       this.unexpected() | ||||||
|  |     } else if (decl.id.type != "Identifier" && !(isFor && (this.type === tt._in || this.isContextual("of")))) { | ||||||
|  |       this.raise(this.lastTokEnd, "Complex binding patterns require an initialization value") | ||||||
|  |     } else { | ||||||
|  |       decl.init = null | ||||||
|  |     } | ||||||
|  |     node.declarations.push(this.finishNode(decl, "VariableDeclarator")) | ||||||
|  |     if (!this.eat(tt.comma)) break | ||||||
|  |   } | ||||||
|  |   return node | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.parseVarId = function(decl) { | ||||||
|  |   decl.id = this.parseBindingAtom() | ||||||
|  |   this.checkLVal(decl.id, true) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Parse a function declaration or literal (depending on the | ||||||
|  | // `isStatement` parameter). | ||||||
|  |  | ||||||
|  | pp.parseFunction = function(node, isStatement, allowExpressionBody, isAsync) { | ||||||
|  |   this.initFunction(node) | ||||||
|  |   if (this.options.ecmaVersion >= 6 && !isAsync) | ||||||
|  |     node.generator = this.eat(tt.star) | ||||||
|  |   if (this.options.ecmaVersion >= 8) | ||||||
|  |     node.async = !!isAsync | ||||||
|  |  | ||||||
|  |   if (isStatement == null) | ||||||
|  |     isStatement = this.type == tt.name | ||||||
|  |   if (isStatement) | ||||||
|  |     node.id = this.parseIdent() | ||||||
|  |  | ||||||
|  |   let oldInGen = this.inGenerator, oldInAsync = this.inAsync, | ||||||
|  |       oldYieldPos = this.yieldPos, oldAwaitPos = this.awaitPos, oldInFunc = this.inFunction | ||||||
|  |   this.inGenerator = node.generator | ||||||
|  |   this.inAsync = node.async | ||||||
|  |   this.yieldPos = 0 | ||||||
|  |   this.awaitPos = 0 | ||||||
|  |   this.inFunction = true | ||||||
|  |  | ||||||
|  |   if (!isStatement && this.type === tt.name) | ||||||
|  |     node.id = this.parseIdent() | ||||||
|  |   this.parseFunctionParams(node) | ||||||
|  |   this.parseFunctionBody(node, allowExpressionBody) | ||||||
|  |  | ||||||
|  |   this.inGenerator = oldInGen | ||||||
|  |   this.inAsync = oldInAsync | ||||||
|  |   this.yieldPos = oldYieldPos | ||||||
|  |   this.awaitPos = oldAwaitPos | ||||||
|  |   this.inFunction = oldInFunc | ||||||
|  |   return this.finishNode(node, isStatement ? "FunctionDeclaration" : "FunctionExpression") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.parseFunctionParams = function(node) { | ||||||
|  |   this.expect(tt.parenL) | ||||||
|  |   node.params = this.parseBindingList(tt.parenR, false, this.options.ecmaVersion >= 8, true) | ||||||
|  |   this.checkYieldAwaitInDefaultParams() | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Parse a class declaration or literal (depending on the | ||||||
|  | // `isStatement` parameter). | ||||||
|  |  | ||||||
|  | pp.parseClass = function(node, isStatement) { | ||||||
|  |   this.next() | ||||||
|  |   if (isStatement == null) isStatement = this.type === tt.name | ||||||
|  |   this.parseClassId(node, isStatement) | ||||||
|  |   this.parseClassSuper(node) | ||||||
|  |   let classBody = this.startNode() | ||||||
|  |   let hadConstructor = false | ||||||
|  |   classBody.body = [] | ||||||
|  |   this.expect(tt.braceL) | ||||||
|  |   while (!this.eat(tt.braceR)) { | ||||||
|  |     if (this.eat(tt.semi)) continue | ||||||
|  |     let method = this.startNode() | ||||||
|  |     let isGenerator = this.eat(tt.star) | ||||||
|  |     let isAsync = false | ||||||
|  |     let isMaybeStatic = this.type === tt.name && this.value === "static" | ||||||
|  |     this.parsePropertyName(method) | ||||||
|  |     method.static = isMaybeStatic && this.type !== tt.parenL | ||||||
|  |     if (method.static) { | ||||||
|  |       if (isGenerator) this.unexpected() | ||||||
|  |       isGenerator = this.eat(tt.star) | ||||||
|  |       this.parsePropertyName(method) | ||||||
|  |     } | ||||||
|  |     if (this.options.ecmaVersion >= 8 && !isGenerator && !method.computed && | ||||||
|  |         method.key.type === "Identifier" && method.key.name === "async" && this.type !== tt.parenL && | ||||||
|  |         !this.canInsertSemicolon()) { | ||||||
|  |       isAsync = true | ||||||
|  |       this.parsePropertyName(method) | ||||||
|  |     } | ||||||
|  |     method.kind = "method" | ||||||
|  |     let isGetSet = false | ||||||
|  |     if (!method.computed) { | ||||||
|  |       let {key} = method | ||||||
|  |       if (!isGenerator && !isAsync && key.type === "Identifier" && this.type !== tt.parenL && (key.name === "get" || key.name === "set")) { | ||||||
|  |         isGetSet = true | ||||||
|  |         method.kind = key.name | ||||||
|  |         key = this.parsePropertyName(method) | ||||||
|  |       } | ||||||
|  |       if (!method.static && (key.type === "Identifier" && key.name === "constructor" || | ||||||
|  |           key.type === "Literal" && key.value === "constructor")) { | ||||||
|  |         if (hadConstructor) this.raise(key.start, "Duplicate constructor in the same class") | ||||||
|  |         if (isGetSet) this.raise(key.start, "Constructor can't have get/set modifier") | ||||||
|  |         if (isGenerator) this.raise(key.start, "Constructor can't be a generator") | ||||||
|  |         if (isAsync) this.raise(key.start, "Constructor can't be an async method") | ||||||
|  |         method.kind = "constructor" | ||||||
|  |         hadConstructor = true | ||||||
|  |       } | ||||||
|  |     } | ||||||
|  |     this.parseClassMethod(classBody, method, isGenerator, isAsync) | ||||||
|  |     if (isGetSet) { | ||||||
|  |       let paramCount = method.kind === "get" ? 0 : 1 | ||||||
|  |       if (method.value.params.length !== paramCount) { | ||||||
|  |         let start = method.value.start | ||||||
|  |         if (method.kind === "get") | ||||||
|  |           this.raiseRecoverable(start, "getter should have no params") | ||||||
|  |         else | ||||||
|  |           this.raiseRecoverable(start, "setter should have exactly one param") | ||||||
|  |       } else { | ||||||
|  |         if (method.kind === "set" && method.value.params[0].type === "RestElement") | ||||||
|  |           this.raiseRecoverable(method.value.params[0].start, "Setter cannot use rest params") | ||||||
|  |       } | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  |   node.body = this.finishNode(classBody, "ClassBody") | ||||||
|  |   return this.finishNode(node, isStatement ? "ClassDeclaration" : "ClassExpression") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.parseClassMethod = function(classBody, method, isGenerator, isAsync) { | ||||||
|  |   method.value = this.parseMethod(isGenerator, isAsync) | ||||||
|  |   classBody.body.push(this.finishNode(method, "MethodDefinition")) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.parseClassId = function(node, isStatement) { | ||||||
|  |   node.id = this.type === tt.name ? this.parseIdent() : isStatement ? this.unexpected() : null | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.parseClassSuper = function(node) { | ||||||
|  |   node.superClass = this.eat(tt._extends) ? this.parseExprSubscripts() : null | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Parses module export declaration. | ||||||
|  |  | ||||||
|  | pp.parseExport = function(node, exports) { | ||||||
|  |   this.next() | ||||||
|  |   // export * from '...' | ||||||
|  |   if (this.eat(tt.star)) { | ||||||
|  |     this.expectContextual("from") | ||||||
|  |     node.source = this.type === tt.string ? this.parseExprAtom() : this.unexpected() | ||||||
|  |     this.semicolon() | ||||||
|  |     return this.finishNode(node, "ExportAllDeclaration") | ||||||
|  |   } | ||||||
|  |   if (this.eat(tt._default)) { // export default ... | ||||||
|  |     this.checkExport(exports, "default", this.lastTokStart) | ||||||
|  |     let isAsync | ||||||
|  |     if (this.type === tt._function || (isAsync = this.isAsyncFunction())) { | ||||||
|  |       let fNode = this.startNode() | ||||||
|  |       this.next() | ||||||
|  |       if (isAsync) this.next() | ||||||
|  |       node.declaration = this.parseFunction(fNode, null, false, isAsync) | ||||||
|  |     } else if (this.type === tt._class) { | ||||||
|  |       let cNode = this.startNode() | ||||||
|  |       node.declaration = this.parseClass(cNode, null) | ||||||
|  |     } else { | ||||||
|  |       node.declaration = this.parseMaybeAssign() | ||||||
|  |       this.semicolon() | ||||||
|  |     } | ||||||
|  |     return this.finishNode(node, "ExportDefaultDeclaration") | ||||||
|  |   } | ||||||
|  |   // export var|const|let|function|class ... | ||||||
|  |   if (this.shouldParseExportStatement()) { | ||||||
|  |     node.declaration = this.parseStatement(true) | ||||||
|  |     if (node.declaration.type === "VariableDeclaration") | ||||||
|  |       this.checkVariableExport(exports, node.declaration.declarations) | ||||||
|  |     else | ||||||
|  |       this.checkExport(exports, node.declaration.id.name, node.declaration.id.start) | ||||||
|  |     node.specifiers = [] | ||||||
|  |     node.source = null | ||||||
|  |   } else { // export { x, y as z } [from '...'] | ||||||
|  |     node.declaration = null | ||||||
|  |     node.specifiers = this.parseExportSpecifiers(exports) | ||||||
|  |     if (this.eatContextual("from")) { | ||||||
|  |       node.source = this.type === tt.string ? this.parseExprAtom() : this.unexpected() | ||||||
|  |     } else { | ||||||
|  |       // check for keywords used as local names | ||||||
|  |       for (let i = 0; i < node.specifiers.length; i++) { | ||||||
|  |         if (this.keywords.test(node.specifiers[i].local.name) || this.reservedWords.test(node.specifiers[i].local.name)) { | ||||||
|  |           this.unexpected(node.specifiers[i].local.start) | ||||||
|  |         } | ||||||
|  |       } | ||||||
|  |  | ||||||
|  |       node.source = null | ||||||
|  |     } | ||||||
|  |     this.semicolon() | ||||||
|  |   } | ||||||
|  |   return this.finishNode(node, "ExportNamedDeclaration") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.checkExport = function(exports, name, pos) { | ||||||
|  |   if (!exports) return | ||||||
|  |   if (Object.prototype.hasOwnProperty.call(exports, name)) | ||||||
|  |     this.raiseRecoverable(pos, "Duplicate export '" + name + "'") | ||||||
|  |   exports[name] = true | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.checkPatternExport = function(exports, pat) { | ||||||
|  |   let type = pat.type | ||||||
|  |   if (type == "Identifier") | ||||||
|  |     this.checkExport(exports, pat.name, pat.start) | ||||||
|  |   else if (type == "ObjectPattern") | ||||||
|  |     for (let i = 0; i < pat.properties.length; ++i) | ||||||
|  |       this.checkPatternExport(exports, pat.properties[i].value) | ||||||
|  |   else if (type == "ArrayPattern") | ||||||
|  |     for (let i = 0; i < pat.elements.length; ++i) { | ||||||
|  |       let elt = pat.elements[i] | ||||||
|  |       if (elt) this.checkPatternExport(exports, elt) | ||||||
|  |     } | ||||||
|  |   else if (type == "AssignmentPattern") | ||||||
|  |     this.checkPatternExport(exports, pat.left) | ||||||
|  |   else if (type == "ParenthesizedExpression") | ||||||
|  |     this.checkPatternExport(exports, pat.expression) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.checkVariableExport = function(exports, decls) { | ||||||
|  |   if (!exports) return | ||||||
|  |   for (let i = 0; i < decls.length; i++) | ||||||
|  |     this.checkPatternExport(exports, decls[i].id) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.shouldParseExportStatement = function() { | ||||||
|  |   return this.type.keyword === "var" | ||||||
|  |     || this.type.keyword === "const" | ||||||
|  |     || this.type.keyword === "class" | ||||||
|  |     || this.type.keyword === "function" | ||||||
|  |     || this.isLet() | ||||||
|  |     || this.isAsyncFunction() | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Parses a comma-separated list of module exports. | ||||||
|  |  | ||||||
|  | pp.parseExportSpecifiers = function(exports) { | ||||||
|  |   let nodes = [], first = true | ||||||
|  |   // export { x, y as z } [from '...'] | ||||||
|  |   this.expect(tt.braceL) | ||||||
|  |   while (!this.eat(tt.braceR)) { | ||||||
|  |     if (!first) { | ||||||
|  |       this.expect(tt.comma) | ||||||
|  |       if (this.afterTrailingComma(tt.braceR)) break | ||||||
|  |     } else first = false | ||||||
|  |  | ||||||
|  |     let node = this.startNode() | ||||||
|  |     node.local = this.parseIdent(true) | ||||||
|  |     node.exported = this.eatContextual("as") ? this.parseIdent(true) : node.local | ||||||
|  |     this.checkExport(exports, node.exported.name, node.exported.start) | ||||||
|  |     nodes.push(this.finishNode(node, "ExportSpecifier")) | ||||||
|  |   } | ||||||
|  |   return nodes | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Parses import declaration. | ||||||
|  |  | ||||||
|  | pp.parseImport = function(node) { | ||||||
|  |   this.next() | ||||||
|  |   // import '...' | ||||||
|  |   if (this.type === tt.string) { | ||||||
|  |     node.specifiers = empty | ||||||
|  |     node.source = this.parseExprAtom() | ||||||
|  |   } else { | ||||||
|  |     node.specifiers = this.parseImportSpecifiers() | ||||||
|  |     this.expectContextual("from") | ||||||
|  |     node.source = this.type === tt.string ? this.parseExprAtom() : this.unexpected() | ||||||
|  |   } | ||||||
|  |   this.semicolon() | ||||||
|  |   return this.finishNode(node, "ImportDeclaration") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Parses a comma-separated list of module imports. | ||||||
|  |  | ||||||
|  | pp.parseImportSpecifiers = function() { | ||||||
|  |   let nodes = [], first = true | ||||||
|  |   if (this.type === tt.name) { | ||||||
|  |     // import defaultObj, { x, y as z } from '...' | ||||||
|  |     let node = this.startNode() | ||||||
|  |     node.local = this.parseIdent() | ||||||
|  |     this.checkLVal(node.local, true) | ||||||
|  |     nodes.push(this.finishNode(node, "ImportDefaultSpecifier")) | ||||||
|  |     if (!this.eat(tt.comma)) return nodes | ||||||
|  |   } | ||||||
|  |   if (this.type === tt.star) { | ||||||
|  |     let node = this.startNode() | ||||||
|  |     this.next() | ||||||
|  |     this.expectContextual("as") | ||||||
|  |     node.local = this.parseIdent() | ||||||
|  |     this.checkLVal(node.local, true) | ||||||
|  |     nodes.push(this.finishNode(node, "ImportNamespaceSpecifier")) | ||||||
|  |     return nodes | ||||||
|  |   } | ||||||
|  |   this.expect(tt.braceL) | ||||||
|  |   while (!this.eat(tt.braceR)) { | ||||||
|  |     if (!first) { | ||||||
|  |       this.expect(tt.comma) | ||||||
|  |       if (this.afterTrailingComma(tt.braceR)) break | ||||||
|  |     } else first = false | ||||||
|  |  | ||||||
|  |     let node = this.startNode() | ||||||
|  |     node.imported = this.parseIdent(true) | ||||||
|  |     if (this.eatContextual("as")) { | ||||||
|  |       node.local = this.parseIdent() | ||||||
|  |     } else { | ||||||
|  |       node.local = node.imported | ||||||
|  |       if (this.isKeyword(node.local.name)) this.unexpected(node.local.start) | ||||||
|  |       if (this.reservedWordsStrict.test(node.local.name)) this.raiseRecoverable(node.local.start, "The keyword '" + node.local.name + "' is reserved") | ||||||
|  |     } | ||||||
|  |     this.checkLVal(node.local, true) | ||||||
|  |     nodes.push(this.finishNode(node, "ImportSpecifier")) | ||||||
|  |   } | ||||||
|  |   return nodes | ||||||
|  | } | ||||||
							
								
								
									
										110
									
								
								Source/node_modules/acorn/src/tokencontext.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										110
									
								
								Source/node_modules/acorn/src/tokencontext.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,110 @@ | |||||||
|  | // The algorithm used to determine whether a regexp can appear at a | ||||||
|  | // given point in the program is loosely based on sweet.js' approach. | ||||||
|  | // See https://github.com/mozilla/sweet.js/wiki/design | ||||||
|  |  | ||||||
|  | import {Parser} from "./state" | ||||||
|  | import {types as tt} from "./tokentype" | ||||||
|  | import {lineBreak} from "./whitespace" | ||||||
|  |  | ||||||
|  | export class TokContext { | ||||||
|  |   constructor(token, isExpr, preserveSpace, override) { | ||||||
|  |     this.token = token | ||||||
|  |     this.isExpr = !!isExpr | ||||||
|  |     this.preserveSpace = !!preserveSpace | ||||||
|  |     this.override = override | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | export const types = { | ||||||
|  |   b_stat: new TokContext("{", false), | ||||||
|  |   b_expr: new TokContext("{", true), | ||||||
|  |   b_tmpl: new TokContext("${", true), | ||||||
|  |   p_stat: new TokContext("(", false), | ||||||
|  |   p_expr: new TokContext("(", true), | ||||||
|  |   q_tmpl: new TokContext("`", true, true, p => p.readTmplToken()), | ||||||
|  |   f_expr: new TokContext("function", true) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | const pp = Parser.prototype | ||||||
|  |  | ||||||
|  | pp.initialContext = function() { | ||||||
|  |   return [types.b_stat] | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.braceIsBlock = function(prevType) { | ||||||
|  |   if (prevType === tt.colon) { | ||||||
|  |     let parent = this.curContext() | ||||||
|  |     if (parent === types.b_stat || parent === types.b_expr) | ||||||
|  |       return !parent.isExpr | ||||||
|  |   } | ||||||
|  |   if (prevType === tt._return) | ||||||
|  |     return lineBreak.test(this.input.slice(this.lastTokEnd, this.start)) | ||||||
|  |   if (prevType === tt._else || prevType === tt.semi || prevType === tt.eof || prevType === tt.parenR) | ||||||
|  |     return true | ||||||
|  |   if (prevType == tt.braceL) | ||||||
|  |     return this.curContext() === types.b_stat | ||||||
|  |   return !this.exprAllowed | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.updateContext = function(prevType) { | ||||||
|  |   let update, type = this.type | ||||||
|  |   if (type.keyword && prevType == tt.dot) | ||||||
|  |     this.exprAllowed = false | ||||||
|  |   else if (update = type.updateContext) | ||||||
|  |     update.call(this, prevType) | ||||||
|  |   else | ||||||
|  |     this.exprAllowed = type.beforeExpr | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Token-specific context update code | ||||||
|  |  | ||||||
|  | tt.parenR.updateContext = tt.braceR.updateContext = function() { | ||||||
|  |   if (this.context.length == 1) { | ||||||
|  |     this.exprAllowed = true | ||||||
|  |     return | ||||||
|  |   } | ||||||
|  |   let out = this.context.pop() | ||||||
|  |   if (out === types.b_stat && this.curContext() === types.f_expr) { | ||||||
|  |     this.context.pop() | ||||||
|  |     this.exprAllowed = false | ||||||
|  |   } else if (out === types.b_tmpl) { | ||||||
|  |     this.exprAllowed = true | ||||||
|  |   } else { | ||||||
|  |     this.exprAllowed = !out.isExpr | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | tt.braceL.updateContext = function(prevType) { | ||||||
|  |   this.context.push(this.braceIsBlock(prevType) ? types.b_stat : types.b_expr) | ||||||
|  |   this.exprAllowed = true | ||||||
|  | } | ||||||
|  |  | ||||||
|  | tt.dollarBraceL.updateContext = function() { | ||||||
|  |   this.context.push(types.b_tmpl) | ||||||
|  |   this.exprAllowed = true | ||||||
|  | } | ||||||
|  |  | ||||||
|  | tt.parenL.updateContext = function(prevType) { | ||||||
|  |   let statementParens = prevType === tt._if || prevType === tt._for || prevType === tt._with || prevType === tt._while | ||||||
|  |   this.context.push(statementParens ? types.p_stat : types.p_expr) | ||||||
|  |   this.exprAllowed = true | ||||||
|  | } | ||||||
|  |  | ||||||
|  | tt.incDec.updateContext = function() { | ||||||
|  |   // tokExprAllowed stays unchanged | ||||||
|  | } | ||||||
|  |  | ||||||
|  | tt._function.updateContext = function(prevType) { | ||||||
|  |   if (prevType.beforeExpr && prevType !== tt.semi && prevType !== tt._else && | ||||||
|  |       !((prevType === tt.colon || prevType === tt.braceL) && this.curContext() === types.b_stat)) | ||||||
|  |     this.context.push(types.f_expr) | ||||||
|  |   this.exprAllowed = false | ||||||
|  | } | ||||||
|  |  | ||||||
|  | tt.backQuote.updateContext = function() { | ||||||
|  |   if (this.curContext() === types.q_tmpl) | ||||||
|  |     this.context.pop() | ||||||
|  |   else | ||||||
|  |     this.context.push(types.q_tmpl) | ||||||
|  |   this.exprAllowed = false | ||||||
|  | } | ||||||
							
								
								
									
										686
									
								
								Source/node_modules/acorn/src/tokenize.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										686
									
								
								Source/node_modules/acorn/src/tokenize.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,686 @@ | |||||||
|  | import {isIdentifierStart, isIdentifierChar} from "./identifier" | ||||||
|  | import {types as tt, keywords as keywordTypes} from "./tokentype" | ||||||
|  | import {Parser} from "./state" | ||||||
|  | import {SourceLocation} from "./locutil" | ||||||
|  | import {lineBreak, lineBreakG, isNewLine, nonASCIIwhitespace} from "./whitespace" | ||||||
|  |  | ||||||
|  | // Object type used to represent tokens. Note that normally, tokens | ||||||
|  | // simply exist as properties on the parser object. This is only | ||||||
|  | // used for the onToken callback and the external tokenizer. | ||||||
|  |  | ||||||
|  | export class Token { | ||||||
|  |   constructor(p) { | ||||||
|  |     this.type = p.type | ||||||
|  |     this.value = p.value | ||||||
|  |     this.start = p.start | ||||||
|  |     this.end = p.end | ||||||
|  |     if (p.options.locations) | ||||||
|  |       this.loc = new SourceLocation(p, p.startLoc, p.endLoc) | ||||||
|  |     if (p.options.ranges) | ||||||
|  |       this.range = [p.start, p.end] | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // ## Tokenizer | ||||||
|  |  | ||||||
|  | const pp = Parser.prototype | ||||||
|  |  | ||||||
|  | // Are we running under Rhino? | ||||||
|  | const isRhino = typeof Packages == "object" && Object.prototype.toString.call(Packages) == "[object JavaPackage]" | ||||||
|  |  | ||||||
|  | // Move to the next token | ||||||
|  |  | ||||||
|  | pp.next = function() { | ||||||
|  |   if (this.options.onToken) | ||||||
|  |     this.options.onToken(new Token(this)) | ||||||
|  |  | ||||||
|  |   this.lastTokEnd = this.end | ||||||
|  |   this.lastTokStart = this.start | ||||||
|  |   this.lastTokEndLoc = this.endLoc | ||||||
|  |   this.lastTokStartLoc = this.startLoc | ||||||
|  |   this.nextToken() | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.getToken = function() { | ||||||
|  |   this.next() | ||||||
|  |   return new Token(this) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // If we're in an ES6 environment, make parsers iterable | ||||||
|  | if (typeof Symbol !== "undefined") | ||||||
|  |   pp[Symbol.iterator] = function () { | ||||||
|  |     let self = this | ||||||
|  |     return {next: function () { | ||||||
|  |       let token = self.getToken() | ||||||
|  |       return { | ||||||
|  |         done: token.type === tt.eof, | ||||||
|  |         value: token | ||||||
|  |       } | ||||||
|  |     }} | ||||||
|  |   } | ||||||
|  |  | ||||||
|  | // Toggle strict mode. Re-reads the next number or string to please | ||||||
|  | // pedantic tests (`"use strict"; 010;` should fail). | ||||||
|  |  | ||||||
|  | pp.curContext = function() { | ||||||
|  |   return this.context[this.context.length - 1] | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Read a single token, updating the parser object's token-related | ||||||
|  | // properties. | ||||||
|  |  | ||||||
|  | pp.nextToken = function() { | ||||||
|  |   let curContext = this.curContext() | ||||||
|  |   if (!curContext || !curContext.preserveSpace) this.skipSpace() | ||||||
|  |  | ||||||
|  |   this.start = this.pos | ||||||
|  |   if (this.options.locations) this.startLoc = this.curPosition() | ||||||
|  |   if (this.pos >= this.input.length) return this.finishToken(tt.eof) | ||||||
|  |  | ||||||
|  |   if (curContext.override) return curContext.override(this) | ||||||
|  |   else this.readToken(this.fullCharCodeAtPos()) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.readToken = function(code) { | ||||||
|  |   // Identifier or keyword. '\uXXXX' sequences are allowed in | ||||||
|  |   // identifiers, so '\' also dispatches to that. | ||||||
|  |   if (isIdentifierStart(code, this.options.ecmaVersion >= 6) || code === 92 /* '\' */) | ||||||
|  |     return this.readWord() | ||||||
|  |  | ||||||
|  |   return this.getTokenFromCode(code) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.fullCharCodeAtPos = function() { | ||||||
|  |   let code = this.input.charCodeAt(this.pos) | ||||||
|  |   if (code <= 0xd7ff || code >= 0xe000) return code | ||||||
|  |   let next = this.input.charCodeAt(this.pos + 1) | ||||||
|  |   return (code << 10) + next - 0x35fdc00 | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.skipBlockComment = function() { | ||||||
|  |   let startLoc = this.options.onComment && this.curPosition() | ||||||
|  |   let start = this.pos, end = this.input.indexOf("*/", this.pos += 2) | ||||||
|  |   if (end === -1) this.raise(this.pos - 2, "Unterminated comment") | ||||||
|  |   this.pos = end + 2 | ||||||
|  |   if (this.options.locations) { | ||||||
|  |     lineBreakG.lastIndex = start | ||||||
|  |     let match | ||||||
|  |     while ((match = lineBreakG.exec(this.input)) && match.index < this.pos) { | ||||||
|  |       ++this.curLine | ||||||
|  |       this.lineStart = match.index + match[0].length | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  |   if (this.options.onComment) | ||||||
|  |     this.options.onComment(true, this.input.slice(start + 2, end), start, this.pos, | ||||||
|  |                            startLoc, this.curPosition()) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.skipLineComment = function(startSkip) { | ||||||
|  |   let start = this.pos | ||||||
|  |   let startLoc = this.options.onComment && this.curPosition() | ||||||
|  |   let ch = this.input.charCodeAt(this.pos+=startSkip) | ||||||
|  |   while (this.pos < this.input.length && ch !== 10 && ch !== 13 && ch !== 8232 && ch !== 8233) { | ||||||
|  |     ++this.pos | ||||||
|  |     ch = this.input.charCodeAt(this.pos) | ||||||
|  |   } | ||||||
|  |   if (this.options.onComment) | ||||||
|  |     this.options.onComment(false, this.input.slice(start + startSkip, this.pos), start, this.pos, | ||||||
|  |                            startLoc, this.curPosition()) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Called at the start of the parse and after every token. Skips | ||||||
|  | // whitespace and comments, and. | ||||||
|  |  | ||||||
|  | pp.skipSpace = function() { | ||||||
|  |   loop: while (this.pos < this.input.length) { | ||||||
|  |     let ch = this.input.charCodeAt(this.pos) | ||||||
|  |     switch (ch) { | ||||||
|  |       case 32: case 160: // ' ' | ||||||
|  |         ++this.pos | ||||||
|  |         break | ||||||
|  |       case 13: | ||||||
|  |         if (this.input.charCodeAt(this.pos + 1) === 10) { | ||||||
|  |           ++this.pos | ||||||
|  |         } | ||||||
|  |       case 10: case 8232: case 8233: | ||||||
|  |         ++this.pos | ||||||
|  |         if (this.options.locations) { | ||||||
|  |           ++this.curLine | ||||||
|  |           this.lineStart = this.pos | ||||||
|  |         } | ||||||
|  |         break | ||||||
|  |       case 47: // '/' | ||||||
|  |         switch (this.input.charCodeAt(this.pos + 1)) { | ||||||
|  |           case 42: // '*' | ||||||
|  |             this.skipBlockComment() | ||||||
|  |             break | ||||||
|  |           case 47: | ||||||
|  |             this.skipLineComment(2) | ||||||
|  |             break | ||||||
|  |           default: | ||||||
|  |             break loop | ||||||
|  |         } | ||||||
|  |         break | ||||||
|  |       default: | ||||||
|  |         if (ch > 8 && ch < 14 || ch >= 5760 && nonASCIIwhitespace.test(String.fromCharCode(ch))) { | ||||||
|  |           ++this.pos | ||||||
|  |         } else { | ||||||
|  |           break loop | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Called at the end of every token. Sets `end`, `val`, and | ||||||
|  | // maintains `context` and `exprAllowed`, and skips the space after | ||||||
|  | // the token, so that the next one's `start` will point at the | ||||||
|  | // right position. | ||||||
|  |  | ||||||
|  | pp.finishToken = function(type, val) { | ||||||
|  |   this.end = this.pos | ||||||
|  |   if (this.options.locations) this.endLoc = this.curPosition() | ||||||
|  |   let prevType = this.type | ||||||
|  |   this.type = type | ||||||
|  |   this.value = val | ||||||
|  |  | ||||||
|  |   this.updateContext(prevType) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // ### Token reading | ||||||
|  |  | ||||||
|  | // This is the function that is called to fetch the next token. It | ||||||
|  | // is somewhat obscure, because it works in character codes rather | ||||||
|  | // than characters, and because operator parsing has been inlined | ||||||
|  | // into it. | ||||||
|  | // | ||||||
|  | // All in the name of speed. | ||||||
|  | // | ||||||
|  | pp.readToken_dot = function() { | ||||||
|  |   let next = this.input.charCodeAt(this.pos + 1) | ||||||
|  |   if (next >= 48 && next <= 57) return this.readNumber(true) | ||||||
|  |   let next2 = this.input.charCodeAt(this.pos + 2) | ||||||
|  |   if (this.options.ecmaVersion >= 6 && next === 46 && next2 === 46) { // 46 = dot '.' | ||||||
|  |     this.pos += 3 | ||||||
|  |     return this.finishToken(tt.ellipsis) | ||||||
|  |   } else { | ||||||
|  |     ++this.pos | ||||||
|  |     return this.finishToken(tt.dot) | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.readToken_slash = function() { // '/' | ||||||
|  |   let next = this.input.charCodeAt(this.pos + 1) | ||||||
|  |   if (this.exprAllowed) {++this.pos; return this.readRegexp()} | ||||||
|  |   if (next === 61) return this.finishOp(tt.assign, 2) | ||||||
|  |   return this.finishOp(tt.slash, 1) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.readToken_mult_modulo_exp = function(code) { // '%*' | ||||||
|  |   let next = this.input.charCodeAt(this.pos + 1) | ||||||
|  |   let size = 1 | ||||||
|  |   let tokentype = code === 42 ? tt.star : tt.modulo | ||||||
|  |  | ||||||
|  |   // exponentiation operator ** and **= | ||||||
|  |   if (this.options.ecmaVersion >= 7 && next === 42) { | ||||||
|  |     ++size | ||||||
|  |     tokentype = tt.starstar | ||||||
|  |     next = this.input.charCodeAt(this.pos + 2) | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   if (next === 61) return this.finishOp(tt.assign, size + 1) | ||||||
|  |   return this.finishOp(tokentype, size) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.readToken_pipe_amp = function(code) { // '|&' | ||||||
|  |   let next = this.input.charCodeAt(this.pos + 1) | ||||||
|  |   if (next === code) return this.finishOp(code === 124 ? tt.logicalOR : tt.logicalAND, 2) | ||||||
|  |   if (next === 61) return this.finishOp(tt.assign, 2) | ||||||
|  |   return this.finishOp(code === 124 ? tt.bitwiseOR : tt.bitwiseAND, 1) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.readToken_caret = function() { // '^' | ||||||
|  |   let next = this.input.charCodeAt(this.pos + 1) | ||||||
|  |   if (next === 61) return this.finishOp(tt.assign, 2) | ||||||
|  |   return this.finishOp(tt.bitwiseXOR, 1) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.readToken_plus_min = function(code) { // '+-' | ||||||
|  |   let next = this.input.charCodeAt(this.pos + 1) | ||||||
|  |   if (next === code) { | ||||||
|  |     if (next == 45 && this.input.charCodeAt(this.pos + 2) == 62 && | ||||||
|  |         lineBreak.test(this.input.slice(this.lastTokEnd, this.pos))) { | ||||||
|  |       // A `-->` line comment | ||||||
|  |       this.skipLineComment(3) | ||||||
|  |       this.skipSpace() | ||||||
|  |       return this.nextToken() | ||||||
|  |     } | ||||||
|  |     return this.finishOp(tt.incDec, 2) | ||||||
|  |   } | ||||||
|  |   if (next === 61) return this.finishOp(tt.assign, 2) | ||||||
|  |   return this.finishOp(tt.plusMin, 1) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.readToken_lt_gt = function(code) { // '<>' | ||||||
|  |   let next = this.input.charCodeAt(this.pos + 1) | ||||||
|  |   let size = 1 | ||||||
|  |   if (next === code) { | ||||||
|  |     size = code === 62 && this.input.charCodeAt(this.pos + 2) === 62 ? 3 : 2 | ||||||
|  |     if (this.input.charCodeAt(this.pos + size) === 61) return this.finishOp(tt.assign, size + 1) | ||||||
|  |     return this.finishOp(tt.bitShift, size) | ||||||
|  |   } | ||||||
|  |   if (next == 33 && code == 60 && this.input.charCodeAt(this.pos + 2) == 45 && | ||||||
|  |       this.input.charCodeAt(this.pos + 3) == 45) { | ||||||
|  |     if (this.inModule) this.unexpected() | ||||||
|  |     // `<!--`, an XML-style comment that should be interpreted as a line comment | ||||||
|  |     this.skipLineComment(4) | ||||||
|  |     this.skipSpace() | ||||||
|  |     return this.nextToken() | ||||||
|  |   } | ||||||
|  |   if (next === 61) size = 2 | ||||||
|  |   return this.finishOp(tt.relational, size) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.readToken_eq_excl = function(code) { // '=!' | ||||||
|  |   let next = this.input.charCodeAt(this.pos + 1) | ||||||
|  |   if (next === 61) return this.finishOp(tt.equality, this.input.charCodeAt(this.pos + 2) === 61 ? 3 : 2) | ||||||
|  |   if (code === 61 && next === 62 && this.options.ecmaVersion >= 6) { // '=>' | ||||||
|  |     this.pos += 2 | ||||||
|  |     return this.finishToken(tt.arrow) | ||||||
|  |   } | ||||||
|  |   return this.finishOp(code === 61 ? tt.eq : tt.prefix, 1) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.getTokenFromCode = function(code) { | ||||||
|  |   switch (code) { | ||||||
|  |     // The interpretation of a dot depends on whether it is followed | ||||||
|  |     // by a digit or another two dots. | ||||||
|  |   case 46: // '.' | ||||||
|  |     return this.readToken_dot() | ||||||
|  |  | ||||||
|  |     // Punctuation tokens. | ||||||
|  |   case 40: ++this.pos; return this.finishToken(tt.parenL) | ||||||
|  |   case 41: ++this.pos; return this.finishToken(tt.parenR) | ||||||
|  |   case 59: ++this.pos; return this.finishToken(tt.semi) | ||||||
|  |   case 44: ++this.pos; return this.finishToken(tt.comma) | ||||||
|  |   case 91: ++this.pos; return this.finishToken(tt.bracketL) | ||||||
|  |   case 93: ++this.pos; return this.finishToken(tt.bracketR) | ||||||
|  |   case 123: ++this.pos; return this.finishToken(tt.braceL) | ||||||
|  |   case 125: ++this.pos; return this.finishToken(tt.braceR) | ||||||
|  |   case 58: ++this.pos; return this.finishToken(tt.colon) | ||||||
|  |   case 63: ++this.pos; return this.finishToken(tt.question) | ||||||
|  |  | ||||||
|  |   case 96: // '`' | ||||||
|  |     if (this.options.ecmaVersion < 6) break | ||||||
|  |     ++this.pos | ||||||
|  |     return this.finishToken(tt.backQuote) | ||||||
|  |  | ||||||
|  |   case 48: // '0' | ||||||
|  |     let next = this.input.charCodeAt(this.pos + 1) | ||||||
|  |     if (next === 120 || next === 88) return this.readRadixNumber(16) // '0x', '0X' - hex number | ||||||
|  |     if (this.options.ecmaVersion >= 6) { | ||||||
|  |       if (next === 111 || next === 79) return this.readRadixNumber(8) // '0o', '0O' - octal number | ||||||
|  |       if (next === 98 || next === 66) return this.readRadixNumber(2) // '0b', '0B' - binary number | ||||||
|  |     } | ||||||
|  |     // Anything else beginning with a digit is an integer, octal | ||||||
|  |     // number, or float. | ||||||
|  |   case 49: case 50: case 51: case 52: case 53: case 54: case 55: case 56: case 57: // 1-9 | ||||||
|  |     return this.readNumber(false) | ||||||
|  |  | ||||||
|  |     // Quotes produce strings. | ||||||
|  |   case 34: case 39: // '"', "'" | ||||||
|  |     return this.readString(code) | ||||||
|  |  | ||||||
|  |     // Operators are parsed inline in tiny state machines. '=' (61) is | ||||||
|  |     // often referred to. `finishOp` simply skips the amount of | ||||||
|  |     // characters it is given as second argument, and returns a token | ||||||
|  |     // of the type given by its first argument. | ||||||
|  |  | ||||||
|  |   case 47: // '/' | ||||||
|  |     return this.readToken_slash() | ||||||
|  |  | ||||||
|  |   case 37: case 42: // '%*' | ||||||
|  |     return this.readToken_mult_modulo_exp(code) | ||||||
|  |  | ||||||
|  |   case 124: case 38: // '|&' | ||||||
|  |     return this.readToken_pipe_amp(code) | ||||||
|  |  | ||||||
|  |   case 94: // '^' | ||||||
|  |     return this.readToken_caret() | ||||||
|  |  | ||||||
|  |   case 43: case 45: // '+-' | ||||||
|  |     return this.readToken_plus_min(code) | ||||||
|  |  | ||||||
|  |   case 60: case 62: // '<>' | ||||||
|  |     return this.readToken_lt_gt(code) | ||||||
|  |  | ||||||
|  |   case 61: case 33: // '=!' | ||||||
|  |     return this.readToken_eq_excl(code) | ||||||
|  |  | ||||||
|  |   case 126: // '~' | ||||||
|  |     return this.finishOp(tt.prefix, 1) | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   this.raise(this.pos, "Unexpected character '" + codePointToString(code) + "'") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.finishOp = function(type, size) { | ||||||
|  |   let str = this.input.slice(this.pos, this.pos + size) | ||||||
|  |   this.pos += size | ||||||
|  |   return this.finishToken(type, str) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Parse a regular expression. Some context-awareness is necessary, | ||||||
|  | // since a '/' inside a '[]' set does not end the expression. | ||||||
|  |  | ||||||
|  | function tryCreateRegexp(src, flags, throwErrorAt, parser) { | ||||||
|  |   try { | ||||||
|  |     return new RegExp(src, flags) | ||||||
|  |   } catch (e) { | ||||||
|  |     if (throwErrorAt !== undefined) { | ||||||
|  |       if (e instanceof SyntaxError) parser.raise(throwErrorAt, "Error parsing regular expression: " + e.message) | ||||||
|  |       throw e | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | var regexpUnicodeSupport = !!tryCreateRegexp("\uffff", "u") | ||||||
|  |  | ||||||
|  | pp.readRegexp = function() { | ||||||
|  |   let escaped, inClass, start = this.pos | ||||||
|  |   for (;;) { | ||||||
|  |     if (this.pos >= this.input.length) this.raise(start, "Unterminated regular expression") | ||||||
|  |     let ch = this.input.charAt(this.pos) | ||||||
|  |     if (lineBreak.test(ch)) this.raise(start, "Unterminated regular expression") | ||||||
|  |     if (!escaped) { | ||||||
|  |       if (ch === "[") inClass = true | ||||||
|  |       else if (ch === "]" && inClass) inClass = false | ||||||
|  |       else if (ch === "/" && !inClass) break | ||||||
|  |       escaped = ch === "\\" | ||||||
|  |     } else escaped = false | ||||||
|  |     ++this.pos | ||||||
|  |   } | ||||||
|  |   let content = this.input.slice(start, this.pos) | ||||||
|  |   ++this.pos | ||||||
|  |   // Need to use `readWord1` because '\uXXXX' sequences are allowed | ||||||
|  |   // here (don't ask). | ||||||
|  |   let mods = this.readWord1() | ||||||
|  |   let tmp = content, tmpFlags = "" | ||||||
|  |   if (mods) { | ||||||
|  |     let validFlags = /^[gim]*$/ | ||||||
|  |     if (this.options.ecmaVersion >= 6) validFlags = /^[gimuy]*$/ | ||||||
|  |     if (!validFlags.test(mods)) this.raise(start, "Invalid regular expression flag") | ||||||
|  |     if (mods.indexOf("u") >= 0) { | ||||||
|  |       if (regexpUnicodeSupport) { | ||||||
|  |         tmpFlags = "u" | ||||||
|  |       } else { | ||||||
|  |         // Replace each astral symbol and every Unicode escape sequence that | ||||||
|  |         // possibly represents an astral symbol or a paired surrogate with a | ||||||
|  |         // single ASCII symbol to avoid throwing on regular expressions that | ||||||
|  |         // are only valid in combination with the `/u` flag. | ||||||
|  |         // Note: replacing with the ASCII symbol `x` might cause false | ||||||
|  |         // negatives in unlikely scenarios. For example, `[\u{61}-b]` is a | ||||||
|  |         // perfectly valid pattern that is equivalent to `[a-b]`, but it would | ||||||
|  |         // be replaced by `[x-b]` which throws an error. | ||||||
|  |         tmp = tmp.replace(/\\u\{([0-9a-fA-F]+)\}/g, (_match, code, offset) => { | ||||||
|  |           code = Number("0x" + code) | ||||||
|  |           if (code > 0x10FFFF) this.raise(start + offset + 3, "Code point out of bounds") | ||||||
|  |           return "x" | ||||||
|  |         }) | ||||||
|  |         tmp = tmp.replace(/\\u([a-fA-F0-9]{4})|[\uD800-\uDBFF][\uDC00-\uDFFF]/g, "x") | ||||||
|  |         tmpFlags = tmpFlags.replace("u", "") | ||||||
|  |       } | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  |   // Detect invalid regular expressions. | ||||||
|  |   let value = null | ||||||
|  |   // Rhino's regular expression parser is flaky and throws uncatchable exceptions, | ||||||
|  |   // so don't do detection if we are running under Rhino | ||||||
|  |   if (!isRhino) { | ||||||
|  |     tryCreateRegexp(tmp, tmpFlags, start, this) | ||||||
|  |     // Get a regular expression object for this pattern-flag pair, or `null` in | ||||||
|  |     // case the current environment doesn't support the flags it uses. | ||||||
|  |     value = tryCreateRegexp(content, mods) | ||||||
|  |   } | ||||||
|  |   return this.finishToken(tt.regexp, {pattern: content, flags: mods, value: value}) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Read an integer in the given radix. Return null if zero digits | ||||||
|  | // were read, the integer value otherwise. When `len` is given, this | ||||||
|  | // will return `null` unless the integer has exactly `len` digits. | ||||||
|  |  | ||||||
|  | pp.readInt = function(radix, len) { | ||||||
|  |   let start = this.pos, total = 0 | ||||||
|  |   for (let i = 0, e = len == null ? Infinity : len; i < e; ++i) { | ||||||
|  |     let code = this.input.charCodeAt(this.pos), val | ||||||
|  |     if (code >= 97) val = code - 97 + 10 // a | ||||||
|  |     else if (code >= 65) val = code - 65 + 10 // A | ||||||
|  |     else if (code >= 48 && code <= 57) val = code - 48 // 0-9 | ||||||
|  |     else val = Infinity | ||||||
|  |     if (val >= radix) break | ||||||
|  |     ++this.pos | ||||||
|  |     total = total * radix + val | ||||||
|  |   } | ||||||
|  |   if (this.pos === start || len != null && this.pos - start !== len) return null | ||||||
|  |  | ||||||
|  |   return total | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.readRadixNumber = function(radix) { | ||||||
|  |   this.pos += 2 // 0x | ||||||
|  |   let val = this.readInt(radix) | ||||||
|  |   if (val == null) this.raise(this.start + 2, "Expected number in radix " + radix) | ||||||
|  |   if (isIdentifierStart(this.fullCharCodeAtPos())) this.raise(this.pos, "Identifier directly after number") | ||||||
|  |   return this.finishToken(tt.num, val) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Read an integer, octal integer, or floating-point number. | ||||||
|  |  | ||||||
|  | pp.readNumber = function(startsWithDot) { | ||||||
|  |   let start = this.pos, isFloat = false, octal = this.input.charCodeAt(this.pos) === 48 | ||||||
|  |   if (!startsWithDot && this.readInt(10) === null) this.raise(start, "Invalid number") | ||||||
|  |   if (octal && this.pos == start + 1) octal = false | ||||||
|  |   let next = this.input.charCodeAt(this.pos) | ||||||
|  |   if (next === 46 && !octal) { // '.' | ||||||
|  |     ++this.pos | ||||||
|  |     this.readInt(10) | ||||||
|  |     isFloat = true | ||||||
|  |     next = this.input.charCodeAt(this.pos) | ||||||
|  |   } | ||||||
|  |   if ((next === 69 || next === 101) && !octal) { // 'eE' | ||||||
|  |     next = this.input.charCodeAt(++this.pos) | ||||||
|  |     if (next === 43 || next === 45) ++this.pos // '+-' | ||||||
|  |     if (this.readInt(10) === null) this.raise(start, "Invalid number") | ||||||
|  |     isFloat = true | ||||||
|  |   } | ||||||
|  |   if (isIdentifierStart(this.fullCharCodeAtPos())) this.raise(this.pos, "Identifier directly after number") | ||||||
|  |  | ||||||
|  |   let str = this.input.slice(start, this.pos), val | ||||||
|  |   if (isFloat) val = parseFloat(str) | ||||||
|  |   else if (!octal || str.length === 1) val = parseInt(str, 10) | ||||||
|  |   else if (/[89]/.test(str) || this.strict) this.raise(start, "Invalid number") | ||||||
|  |   else val = parseInt(str, 8) | ||||||
|  |   return this.finishToken(tt.num, val) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Read a string value, interpreting backslash-escapes. | ||||||
|  |  | ||||||
|  | pp.readCodePoint = function() { | ||||||
|  |   let ch = this.input.charCodeAt(this.pos), code | ||||||
|  |  | ||||||
|  |   if (ch === 123) { | ||||||
|  |     if (this.options.ecmaVersion < 6) this.unexpected() | ||||||
|  |     let codePos = ++this.pos | ||||||
|  |     code = this.readHexChar(this.input.indexOf('}', this.pos) - this.pos) | ||||||
|  |     ++this.pos | ||||||
|  |     if (code > 0x10FFFF) this.raise(codePos, "Code point out of bounds") | ||||||
|  |   } else { | ||||||
|  |     code = this.readHexChar(4) | ||||||
|  |   } | ||||||
|  |   return code | ||||||
|  | } | ||||||
|  |  | ||||||
|  | function codePointToString(code) { | ||||||
|  |   // UTF-16 Decoding | ||||||
|  |   if (code <= 0xFFFF) return String.fromCharCode(code) | ||||||
|  |   code -= 0x10000 | ||||||
|  |   return String.fromCharCode((code >> 10) + 0xD800, (code & 1023) + 0xDC00) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | pp.readString = function(quote) { | ||||||
|  |   let out = "", chunkStart = ++this.pos | ||||||
|  |   for (;;) { | ||||||
|  |     if (this.pos >= this.input.length) this.raise(this.start, "Unterminated string constant") | ||||||
|  |     let ch = this.input.charCodeAt(this.pos) | ||||||
|  |     if (ch === quote) break | ||||||
|  |     if (ch === 92) { // '\' | ||||||
|  |       out += this.input.slice(chunkStart, this.pos) | ||||||
|  |       out += this.readEscapedChar(false) | ||||||
|  |       chunkStart = this.pos | ||||||
|  |     } else { | ||||||
|  |       if (isNewLine(ch)) this.raise(this.start, "Unterminated string constant") | ||||||
|  |       ++this.pos | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  |   out += this.input.slice(chunkStart, this.pos++) | ||||||
|  |   return this.finishToken(tt.string, out) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Reads template string tokens. | ||||||
|  |  | ||||||
|  | pp.readTmplToken = function() { | ||||||
|  |   let out = "", chunkStart = this.pos | ||||||
|  |   for (;;) { | ||||||
|  |     if (this.pos >= this.input.length) this.raise(this.start, "Unterminated template") | ||||||
|  |     let ch = this.input.charCodeAt(this.pos) | ||||||
|  |     if (ch === 96 || ch === 36 && this.input.charCodeAt(this.pos + 1) === 123) { // '`', '${' | ||||||
|  |       if (this.pos === this.start && this.type === tt.template) { | ||||||
|  |         if (ch === 36) { | ||||||
|  |           this.pos += 2 | ||||||
|  |           return this.finishToken(tt.dollarBraceL) | ||||||
|  |         } else { | ||||||
|  |           ++this.pos | ||||||
|  |           return this.finishToken(tt.backQuote) | ||||||
|  |         } | ||||||
|  |       } | ||||||
|  |       out += this.input.slice(chunkStart, this.pos) | ||||||
|  |       return this.finishToken(tt.template, out) | ||||||
|  |     } | ||||||
|  |     if (ch === 92) { // '\' | ||||||
|  |       out += this.input.slice(chunkStart, this.pos) | ||||||
|  |       out += this.readEscapedChar(true) | ||||||
|  |       chunkStart = this.pos | ||||||
|  |     } else if (isNewLine(ch)) { | ||||||
|  |       out += this.input.slice(chunkStart, this.pos) | ||||||
|  |       ++this.pos | ||||||
|  |       switch (ch) { | ||||||
|  |         case 13: | ||||||
|  |           if (this.input.charCodeAt(this.pos) === 10) ++this.pos | ||||||
|  |         case 10: | ||||||
|  |           out += "\n" | ||||||
|  |           break | ||||||
|  |         default: | ||||||
|  |           out += String.fromCharCode(ch) | ||||||
|  |           break | ||||||
|  |       } | ||||||
|  |       if (this.options.locations) { | ||||||
|  |         ++this.curLine | ||||||
|  |         this.lineStart = this.pos | ||||||
|  |       } | ||||||
|  |       chunkStart = this.pos | ||||||
|  |     } else { | ||||||
|  |       ++this.pos | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Used to read escaped characters | ||||||
|  |  | ||||||
|  | pp.readEscapedChar = function(inTemplate) { | ||||||
|  |   let ch = this.input.charCodeAt(++this.pos) | ||||||
|  |   ++this.pos | ||||||
|  |   switch (ch) { | ||||||
|  |   case 110: return "\n" // 'n' -> '\n' | ||||||
|  |   case 114: return "\r" // 'r' -> '\r' | ||||||
|  |   case 120: return String.fromCharCode(this.readHexChar(2)) // 'x' | ||||||
|  |   case 117: return codePointToString(this.readCodePoint()) // 'u' | ||||||
|  |   case 116: return "\t" // 't' -> '\t' | ||||||
|  |   case 98: return "\b" // 'b' -> '\b' | ||||||
|  |   case 118: return "\u000b" // 'v' -> '\u000b' | ||||||
|  |   case 102: return "\f" // 'f' -> '\f' | ||||||
|  |   case 13: if (this.input.charCodeAt(this.pos) === 10) ++this.pos // '\r\n' | ||||||
|  |   case 10: // ' \n' | ||||||
|  |     if (this.options.locations) { this.lineStart = this.pos; ++this.curLine } | ||||||
|  |     return "" | ||||||
|  |   default: | ||||||
|  |     if (ch >= 48 && ch <= 55) { | ||||||
|  |       let octalStr = this.input.substr(this.pos - 1, 3).match(/^[0-7]+/)[0] | ||||||
|  |       let octal = parseInt(octalStr, 8) | ||||||
|  |       if (octal > 255) { | ||||||
|  |         octalStr = octalStr.slice(0, -1) | ||||||
|  |         octal = parseInt(octalStr, 8) | ||||||
|  |       } | ||||||
|  |       if (octalStr !== "0" && (this.strict || inTemplate)) { | ||||||
|  |         this.raise(this.pos - 2, "Octal literal in strict mode") | ||||||
|  |       } | ||||||
|  |       this.pos += octalStr.length - 1 | ||||||
|  |       return String.fromCharCode(octal) | ||||||
|  |     } | ||||||
|  |     return String.fromCharCode(ch) | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Used to read character escape sequences ('\x', '\u', '\U'). | ||||||
|  |  | ||||||
|  | pp.readHexChar = function(len) { | ||||||
|  |   let codePos = this.pos | ||||||
|  |   let n = this.readInt(16, len) | ||||||
|  |   if (n === null) this.raise(codePos, "Bad character escape sequence") | ||||||
|  |   return n | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Read an identifier, and return it as a string. Sets `this.containsEsc` | ||||||
|  | // to whether the word contained a '\u' escape. | ||||||
|  | // | ||||||
|  | // Incrementally adds only escaped chars, adding other chunks as-is | ||||||
|  | // as a micro-optimization. | ||||||
|  |  | ||||||
|  | pp.readWord1 = function() { | ||||||
|  |   this.containsEsc = false | ||||||
|  |   let word = "", first = true, chunkStart = this.pos | ||||||
|  |   let astral = this.options.ecmaVersion >= 6 | ||||||
|  |   while (this.pos < this.input.length) { | ||||||
|  |     let ch = this.fullCharCodeAtPos() | ||||||
|  |     if (isIdentifierChar(ch, astral)) { | ||||||
|  |       this.pos += ch <= 0xffff ? 1 : 2 | ||||||
|  |     } else if (ch === 92) { // "\" | ||||||
|  |       this.containsEsc = true | ||||||
|  |       word += this.input.slice(chunkStart, this.pos) | ||||||
|  |       let escStart = this.pos | ||||||
|  |       if (this.input.charCodeAt(++this.pos) != 117) // "u" | ||||||
|  |         this.raise(this.pos, "Expecting Unicode escape sequence \\uXXXX") | ||||||
|  |       ++this.pos | ||||||
|  |       let esc = this.readCodePoint() | ||||||
|  |       if (!(first ? isIdentifierStart : isIdentifierChar)(esc, astral)) | ||||||
|  |         this.raise(escStart, "Invalid Unicode escape") | ||||||
|  |       word += codePointToString(esc) | ||||||
|  |       chunkStart = this.pos | ||||||
|  |     } else { | ||||||
|  |       break | ||||||
|  |     } | ||||||
|  |     first = false | ||||||
|  |   } | ||||||
|  |   return word + this.input.slice(chunkStart, this.pos) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Read an identifier or keyword token. Will check for reserved | ||||||
|  | // words when necessary. | ||||||
|  |  | ||||||
|  | pp.readWord = function() { | ||||||
|  |   let word = this.readWord1() | ||||||
|  |   let type = tt.name | ||||||
|  |   if (this.keywords.test(word)) { | ||||||
|  |     if (this.containsEsc) this.raiseRecoverable(this.start, "Escape sequence in keyword " + word) | ||||||
|  |     type = keywordTypes[word] | ||||||
|  |   } | ||||||
|  |   return this.finishToken(type, word) | ||||||
|  | } | ||||||
							
								
								
									
										147
									
								
								Source/node_modules/acorn/src/tokentype.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										147
									
								
								Source/node_modules/acorn/src/tokentype.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,147 @@ | |||||||
|  | // ## Token types | ||||||
|  |  | ||||||
|  | // The assignment of fine-grained, information-carrying type objects | ||||||
|  | // allows the tokenizer to store the information it has about a | ||||||
|  | // token in a way that is very cheap for the parser to look up. | ||||||
|  |  | ||||||
|  | // All token type variables start with an underscore, to make them | ||||||
|  | // easy to recognize. | ||||||
|  |  | ||||||
|  | // The `beforeExpr` property is used to disambiguate between regular | ||||||
|  | // expressions and divisions. It is set on all token types that can | ||||||
|  | // be followed by an expression (thus, a slash after them would be a | ||||||
|  | // regular expression). | ||||||
|  | // | ||||||
|  | // The `startsExpr` property is used to check if the token ends a | ||||||
|  | // `yield` expression. It is set on all token types that either can | ||||||
|  | // directly start an expression (like a quotation mark) or can | ||||||
|  | // continue an expression (like the body of a string). | ||||||
|  | // | ||||||
|  | // `isLoop` marks a keyword as starting a loop, which is important | ||||||
|  | // to know when parsing a label, in order to allow or disallow | ||||||
|  | // continue jumps to that label. | ||||||
|  |  | ||||||
|  | export class TokenType { | ||||||
|  |   constructor(label, conf = {}) { | ||||||
|  |     this.label = label | ||||||
|  |     this.keyword = conf.keyword | ||||||
|  |     this.beforeExpr = !!conf.beforeExpr | ||||||
|  |     this.startsExpr = !!conf.startsExpr | ||||||
|  |     this.isLoop = !!conf.isLoop | ||||||
|  |     this.isAssign = !!conf.isAssign | ||||||
|  |     this.prefix = !!conf.prefix | ||||||
|  |     this.postfix = !!conf.postfix | ||||||
|  |     this.binop = conf.binop || null | ||||||
|  |     this.updateContext = null | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | function binop(name, prec) { | ||||||
|  |   return new TokenType(name, {beforeExpr: true, binop: prec}) | ||||||
|  | } | ||||||
|  | const beforeExpr = {beforeExpr: true}, startsExpr = {startsExpr: true} | ||||||
|  |  | ||||||
|  | // Map keyword names to token types. | ||||||
|  |  | ||||||
|  | export const keywords = {} | ||||||
|  |  | ||||||
|  | // Succinct definitions of keyword token types | ||||||
|  | function kw(name, options = {}) { | ||||||
|  |   options.keyword = name | ||||||
|  |   return keywords[name] = new TokenType(name, options) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | export const types = { | ||||||
|  |   num: new TokenType("num", startsExpr), | ||||||
|  |   regexp: new TokenType("regexp", startsExpr), | ||||||
|  |   string: new TokenType("string", startsExpr), | ||||||
|  |   name: new TokenType("name", startsExpr), | ||||||
|  |   eof: new TokenType("eof"), | ||||||
|  |  | ||||||
|  |   // Punctuation token types. | ||||||
|  |   bracketL: new TokenType("[", {beforeExpr: true, startsExpr: true}), | ||||||
|  |   bracketR: new TokenType("]"), | ||||||
|  |   braceL: new TokenType("{", {beforeExpr: true, startsExpr: true}), | ||||||
|  |   braceR: new TokenType("}"), | ||||||
|  |   parenL: new TokenType("(", {beforeExpr: true, startsExpr: true}), | ||||||
|  |   parenR: new TokenType(")"), | ||||||
|  |   comma: new TokenType(",", beforeExpr), | ||||||
|  |   semi: new TokenType(";", beforeExpr), | ||||||
|  |   colon: new TokenType(":", beforeExpr), | ||||||
|  |   dot: new TokenType("."), | ||||||
|  |   question: new TokenType("?", beforeExpr), | ||||||
|  |   arrow: new TokenType("=>", beforeExpr), | ||||||
|  |   template: new TokenType("template"), | ||||||
|  |   ellipsis: new TokenType("...", beforeExpr), | ||||||
|  |   backQuote: new TokenType("`", startsExpr), | ||||||
|  |   dollarBraceL: new TokenType("${", {beforeExpr: true, startsExpr: true}), | ||||||
|  |  | ||||||
|  |   // Operators. These carry several kinds of properties to help the | ||||||
|  |   // parser use them properly (the presence of these properties is | ||||||
|  |   // what categorizes them as operators). | ||||||
|  |   // | ||||||
|  |   // `binop`, when present, specifies that this operator is a binary | ||||||
|  |   // operator, and will refer to its precedence. | ||||||
|  |   // | ||||||
|  |   // `prefix` and `postfix` mark the operator as a prefix or postfix | ||||||
|  |   // unary operator. | ||||||
|  |   // | ||||||
|  |   // `isAssign` marks all of `=`, `+=`, `-=` etcetera, which act as | ||||||
|  |   // binary operators with a very low precedence, that should result | ||||||
|  |   // in AssignmentExpression nodes. | ||||||
|  |  | ||||||
|  |   eq: new TokenType("=", {beforeExpr: true, isAssign: true}), | ||||||
|  |   assign: new TokenType("_=", {beforeExpr: true, isAssign: true}), | ||||||
|  |   incDec: new TokenType("++/--", {prefix: true, postfix: true, startsExpr: true}), | ||||||
|  |   prefix: new TokenType("prefix", {beforeExpr: true, prefix: true, startsExpr: true}), | ||||||
|  |   logicalOR: binop("||", 1), | ||||||
|  |   logicalAND: binop("&&", 2), | ||||||
|  |   bitwiseOR: binop("|", 3), | ||||||
|  |   bitwiseXOR: binop("^", 4), | ||||||
|  |   bitwiseAND: binop("&", 5), | ||||||
|  |   equality: binop("==/!=", 6), | ||||||
|  |   relational: binop("</>", 7), | ||||||
|  |   bitShift: binop("<</>>", 8), | ||||||
|  |   plusMin: new TokenType("+/-", {beforeExpr: true, binop: 9, prefix: true, startsExpr: true}), | ||||||
|  |   modulo: binop("%", 10), | ||||||
|  |   star: binop("*", 10), | ||||||
|  |   slash: binop("/", 10), | ||||||
|  |   starstar: new TokenType("**", {beforeExpr: true}), | ||||||
|  |  | ||||||
|  |   // Keyword token types. | ||||||
|  |   _break: kw("break"), | ||||||
|  |   _case: kw("case", beforeExpr), | ||||||
|  |   _catch: kw("catch"), | ||||||
|  |   _continue: kw("continue"), | ||||||
|  |   _debugger: kw("debugger"), | ||||||
|  |   _default: kw("default", beforeExpr), | ||||||
|  |   _do: kw("do", {isLoop: true, beforeExpr: true}), | ||||||
|  |   _else: kw("else", beforeExpr), | ||||||
|  |   _finally: kw("finally"), | ||||||
|  |   _for: kw("for", {isLoop: true}), | ||||||
|  |   _function: kw("function", startsExpr), | ||||||
|  |   _if: kw("if"), | ||||||
|  |   _return: kw("return", beforeExpr), | ||||||
|  |   _switch: kw("switch"), | ||||||
|  |   _throw: kw("throw", beforeExpr), | ||||||
|  |   _try: kw("try"), | ||||||
|  |   _var: kw("var"), | ||||||
|  |   _const: kw("const"), | ||||||
|  |   _while: kw("while", {isLoop: true}), | ||||||
|  |   _with: kw("with"), | ||||||
|  |   _new: kw("new", {beforeExpr: true, startsExpr: true}), | ||||||
|  |   _this: kw("this", startsExpr), | ||||||
|  |   _super: kw("super", startsExpr), | ||||||
|  |   _class: kw("class"), | ||||||
|  |   _extends: kw("extends", beforeExpr), | ||||||
|  |   _export: kw("export"), | ||||||
|  |   _import: kw("import"), | ||||||
|  |   _null: kw("null", startsExpr), | ||||||
|  |   _true: kw("true", startsExpr), | ||||||
|  |   _false: kw("false", startsExpr), | ||||||
|  |   _in: kw("in", {beforeExpr: true, binop: 7}), | ||||||
|  |   _instanceof: kw("instanceof", {beforeExpr: true, binop: 7}), | ||||||
|  |   _typeof: kw("typeof", {beforeExpr: true, prefix: true, startsExpr: true}), | ||||||
|  |   _void: kw("void", {beforeExpr: true, prefix: true, startsExpr: true}), | ||||||
|  |   _delete: kw("delete", {beforeExpr: true, prefix: true, startsExpr: true}) | ||||||
|  | } | ||||||
							
								
								
									
										9
									
								
								Source/node_modules/acorn/src/util.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										9
									
								
								Source/node_modules/acorn/src/util.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,9 @@ | |||||||
|  | export function isArray(obj) { | ||||||
|  |   return Object.prototype.toString.call(obj) === "[object Array]" | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Checks if an object has a property. | ||||||
|  |  | ||||||
|  | export function has(obj, propName) { | ||||||
|  |   return Object.prototype.hasOwnProperty.call(obj, propName) | ||||||
|  | } | ||||||
							
								
								
									
										342
									
								
								Source/node_modules/acorn/src/walk/index.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										342
									
								
								Source/node_modules/acorn/src/walk/index.js
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,342 @@ | |||||||
|  | // AST walker module for Mozilla Parser API compatible trees | ||||||
|  |  | ||||||
|  | // A simple walk is one where you simply specify callbacks to be | ||||||
|  | // called on specific nodes. The last two arguments are optional. A | ||||||
|  | // simple use would be | ||||||
|  | // | ||||||
|  | //     walk.simple(myTree, { | ||||||
|  | //         Expression: function(node) { ... } | ||||||
|  | //     }); | ||||||
|  | // | ||||||
|  | // to do something with all expressions. All Parser API node types | ||||||
|  | // can be used to identify node types, as well as Expression, | ||||||
|  | // Statement, and ScopeBody, which denote categories of nodes. | ||||||
|  | // | ||||||
|  | // The base argument can be used to pass a custom (recursive) | ||||||
|  | // walker, and state can be used to give this walked an initial | ||||||
|  | // state. | ||||||
|  |  | ||||||
|  | export function simple(node, visitors, base, state, override) { | ||||||
|  |   if (!base) base = exports.base | ||||||
|  |   ;(function c(node, st, override) { | ||||||
|  |     let type = override || node.type, found = visitors[type] | ||||||
|  |     base[type](node, st, c) | ||||||
|  |     if (found) found(node, st) | ||||||
|  |   })(node, state, override) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // An ancestor walk keeps an array of ancestor nodes (including the | ||||||
|  | // current node) and passes them to the callback as third parameter | ||||||
|  | // (and also as state parameter when no other state is present). | ||||||
|  | export function ancestor(node, visitors, base, state) { | ||||||
|  |   if (!base) base = exports.base | ||||||
|  |   let ancestors = [] | ||||||
|  |   ;(function c(node, st, override) { | ||||||
|  |     let type = override || node.type, found = visitors[type] | ||||||
|  |     let isNew = node != ancestors[ancestors.length - 1] | ||||||
|  |     if (isNew) ancestors.push(node) | ||||||
|  |     base[type](node, st, c) | ||||||
|  |     if (found) found(node, st || ancestors, ancestors) | ||||||
|  |     if (isNew) ancestors.pop() | ||||||
|  |   })(node, state) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // A recursive walk is one where your functions override the default | ||||||
|  | // walkers. They can modify and replace the state parameter that's | ||||||
|  | // threaded through the walk, and can opt how and whether to walk | ||||||
|  | // their child nodes (by calling their third argument on these | ||||||
|  | // nodes). | ||||||
|  | export function recursive(node, state, funcs, base, override) { | ||||||
|  |   let visitor = funcs ? exports.make(funcs, base) : base | ||||||
|  |   ;(function c(node, st, override) { | ||||||
|  |     visitor[override || node.type](node, st, c) | ||||||
|  |   })(node, state, override) | ||||||
|  | } | ||||||
|  |  | ||||||
|  | function makeTest(test) { | ||||||
|  |   if (typeof test == "string") | ||||||
|  |     return type => type == test | ||||||
|  |   else if (!test) | ||||||
|  |     return () => true | ||||||
|  |   else | ||||||
|  |     return test | ||||||
|  | } | ||||||
|  |  | ||||||
|  | class Found { | ||||||
|  |   constructor(node, state) { this.node = node; this.state = state } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Find a node with a given start, end, and type (all are optional, | ||||||
|  | // null can be used as wildcard). Returns a {node, state} object, or | ||||||
|  | // undefined when it doesn't find a matching node. | ||||||
|  | export function findNodeAt(node, start, end, test, base, state) { | ||||||
|  |   test = makeTest(test) | ||||||
|  |   if (!base) base = exports.base | ||||||
|  |   try { | ||||||
|  |     ;(function c(node, st, override) { | ||||||
|  |       let type = override || node.type | ||||||
|  |       if ((start == null || node.start <= start) && | ||||||
|  |           (end == null || node.end >= end)) | ||||||
|  |         base[type](node, st, c) | ||||||
|  |       if ((start == null || node.start == start) && | ||||||
|  |           (end == null || node.end == end) && | ||||||
|  |           test(type, node)) | ||||||
|  |         throw new Found(node, st) | ||||||
|  |     })(node, state) | ||||||
|  |   } catch (e) { | ||||||
|  |     if (e instanceof Found) return e | ||||||
|  |     throw e | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Find the innermost node of a given type that contains the given | ||||||
|  | // position. Interface similar to findNodeAt. | ||||||
|  | export function findNodeAround(node, pos, test, base, state) { | ||||||
|  |   test = makeTest(test) | ||||||
|  |   if (!base) base = exports.base | ||||||
|  |   try { | ||||||
|  |     ;(function c(node, st, override) { | ||||||
|  |       let type = override || node.type | ||||||
|  |       if (node.start > pos || node.end < pos) return | ||||||
|  |       base[type](node, st, c) | ||||||
|  |       if (test(type, node)) throw new Found(node, st) | ||||||
|  |     })(node, state) | ||||||
|  |   } catch (e) { | ||||||
|  |     if (e instanceof Found) return e | ||||||
|  |     throw e | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Find the outermost matching node after a given position. | ||||||
|  | export function findNodeAfter(node, pos, test, base, state) { | ||||||
|  |   test = makeTest(test) | ||||||
|  |   if (!base) base = exports.base | ||||||
|  |   try { | ||||||
|  |     ;(function c(node, st, override) { | ||||||
|  |       if (node.end < pos) return | ||||||
|  |       let type = override || node.type | ||||||
|  |       if (node.start >= pos && test(type, node)) throw new Found(node, st) | ||||||
|  |       base[type](node, st, c) | ||||||
|  |     })(node, state) | ||||||
|  |   } catch (e) { | ||||||
|  |     if (e instanceof Found) return e | ||||||
|  |     throw e | ||||||
|  |   } | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Find the outermost matching node before a given position. | ||||||
|  | export function findNodeBefore(node, pos, test, base, state) { | ||||||
|  |   test = makeTest(test) | ||||||
|  |   if (!base) base = exports.base | ||||||
|  |   let max | ||||||
|  |   ;(function c(node, st, override) { | ||||||
|  |     if (node.start > pos) return | ||||||
|  |     let type = override || node.type | ||||||
|  |     if (node.end <= pos && (!max || max.node.end < node.end) && test(type, node)) | ||||||
|  |       max = new Found(node, st) | ||||||
|  |     base[type](node, st, c) | ||||||
|  |   })(node, state) | ||||||
|  |   return max | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Fallback to an Object.create polyfill for older environments. | ||||||
|  | const create = Object.create || function(proto) { | ||||||
|  |   function Ctor() {} | ||||||
|  |   Ctor.prototype = proto | ||||||
|  |   return new Ctor | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // Used to create a custom walker. Will fill in all missing node | ||||||
|  | // type properties with the defaults. | ||||||
|  | export function make(funcs, base) { | ||||||
|  |   if (!base) base = exports.base | ||||||
|  |   let visitor = create(base) | ||||||
|  |   for (var type in funcs) visitor[type] = funcs[type] | ||||||
|  |   return visitor | ||||||
|  | } | ||||||
|  |  | ||||||
|  | function skipThrough(node, st, c) { c(node, st) } | ||||||
|  | function ignore(_node, _st, _c) {} | ||||||
|  |  | ||||||
|  | // Node walkers. | ||||||
|  |  | ||||||
|  | export const base = {} | ||||||
|  |  | ||||||
|  | base.Program = base.BlockStatement = (node, st, c) => { | ||||||
|  |   for (let i = 0; i < node.body.length; ++i) | ||||||
|  |     c(node.body[i], st, "Statement") | ||||||
|  | } | ||||||
|  | base.Statement = skipThrough | ||||||
|  | base.EmptyStatement = ignore | ||||||
|  | base.ExpressionStatement = base.ParenthesizedExpression = | ||||||
|  |   (node, st, c) => c(node.expression, st, "Expression") | ||||||
|  | base.IfStatement = (node, st, c) => { | ||||||
|  |   c(node.test, st, "Expression") | ||||||
|  |   c(node.consequent, st, "Statement") | ||||||
|  |   if (node.alternate) c(node.alternate, st, "Statement") | ||||||
|  | } | ||||||
|  | base.LabeledStatement = (node, st, c) => c(node.body, st, "Statement") | ||||||
|  | base.BreakStatement = base.ContinueStatement = ignore | ||||||
|  | base.WithStatement = (node, st, c) => { | ||||||
|  |   c(node.object, st, "Expression") | ||||||
|  |   c(node.body, st, "Statement") | ||||||
|  | } | ||||||
|  | base.SwitchStatement = (node, st, c) => { | ||||||
|  |   c(node.discriminant, st, "Expression") | ||||||
|  |   for (let i = 0; i < node.cases.length; ++i) { | ||||||
|  |     let cs = node.cases[i] | ||||||
|  |     if (cs.test) c(cs.test, st, "Expression") | ||||||
|  |     for (let j = 0; j < cs.consequent.length; ++j) | ||||||
|  |       c(cs.consequent[j], st, "Statement") | ||||||
|  |   } | ||||||
|  | } | ||||||
|  | base.ReturnStatement = base.YieldExpression = base.AwaitExpression = (node, st, c) => { | ||||||
|  |   if (node.argument) c(node.argument, st, "Expression") | ||||||
|  | } | ||||||
|  | base.ThrowStatement = base.SpreadElement = | ||||||
|  |   (node, st, c) => c(node.argument, st, "Expression") | ||||||
|  | base.TryStatement = (node, st, c) => { | ||||||
|  |   c(node.block, st, "Statement") | ||||||
|  |   if (node.handler) c(node.handler, st) | ||||||
|  |   if (node.finalizer) c(node.finalizer, st, "Statement") | ||||||
|  | } | ||||||
|  | base.CatchClause = (node, st, c) => { | ||||||
|  |   c(node.param, st, "Pattern") | ||||||
|  |   c(node.body, st, "ScopeBody") | ||||||
|  | } | ||||||
|  | base.WhileStatement = base.DoWhileStatement = (node, st, c) => { | ||||||
|  |   c(node.test, st, "Expression") | ||||||
|  |   c(node.body, st, "Statement") | ||||||
|  | } | ||||||
|  | base.ForStatement = (node, st, c) => { | ||||||
|  |   if (node.init) c(node.init, st, "ForInit") | ||||||
|  |   if (node.test) c(node.test, st, "Expression") | ||||||
|  |   if (node.update) c(node.update, st, "Expression") | ||||||
|  |   c(node.body, st, "Statement") | ||||||
|  | } | ||||||
|  | base.ForInStatement = base.ForOfStatement = (node, st, c) => { | ||||||
|  |   c(node.left, st, "ForInit") | ||||||
|  |   c(node.right, st, "Expression") | ||||||
|  |   c(node.body, st, "Statement") | ||||||
|  | } | ||||||
|  | base.ForInit = (node, st, c) => { | ||||||
|  |   if (node.type == "VariableDeclaration") c(node, st) | ||||||
|  |   else c(node, st, "Expression") | ||||||
|  | } | ||||||
|  | base.DebuggerStatement = ignore | ||||||
|  |  | ||||||
|  | base.FunctionDeclaration = (node, st, c) => c(node, st, "Function") | ||||||
|  | base.VariableDeclaration = (node, st, c) => { | ||||||
|  |   for (let i = 0; i < node.declarations.length; ++i) | ||||||
|  |     c(node.declarations[i], st) | ||||||
|  | } | ||||||
|  | base.VariableDeclarator = (node, st, c) => { | ||||||
|  |   c(node.id, st, "Pattern") | ||||||
|  |   if (node.init) c(node.init, st, "Expression") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | base.Function = (node, st, c) => { | ||||||
|  |   if (node.id) c(node.id, st, "Pattern") | ||||||
|  |   for (let i = 0; i < node.params.length; i++) | ||||||
|  |     c(node.params[i], st, "Pattern") | ||||||
|  |   c(node.body, st, node.expression ? "ScopeExpression" : "ScopeBody") | ||||||
|  | } | ||||||
|  | // FIXME drop these node types in next major version | ||||||
|  | // (They are awkward, and in ES6 every block can be a scope.) | ||||||
|  | base.ScopeBody = (node, st, c) => c(node, st, "Statement") | ||||||
|  | base.ScopeExpression = (node, st, c) => c(node, st, "Expression") | ||||||
|  |  | ||||||
|  | base.Pattern = (node, st, c) => { | ||||||
|  |   if (node.type == "Identifier") | ||||||
|  |     c(node, st, "VariablePattern") | ||||||
|  |   else if (node.type == "MemberExpression") | ||||||
|  |     c(node, st, "MemberPattern") | ||||||
|  |   else | ||||||
|  |     c(node, st) | ||||||
|  | } | ||||||
|  | base.VariablePattern = ignore | ||||||
|  | base.MemberPattern = skipThrough | ||||||
|  | base.RestElement = (node, st, c) => c(node.argument, st, "Pattern") | ||||||
|  | base.ArrayPattern =  (node, st, c) => { | ||||||
|  |   for (let i = 0; i < node.elements.length; ++i) { | ||||||
|  |     let elt = node.elements[i] | ||||||
|  |     if (elt) c(elt, st, "Pattern") | ||||||
|  |   } | ||||||
|  | } | ||||||
|  | base.ObjectPattern = (node, st, c) => { | ||||||
|  |   for (let i = 0; i < node.properties.length; ++i) | ||||||
|  |     c(node.properties[i].value, st, "Pattern") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | base.Expression = skipThrough | ||||||
|  | base.ThisExpression = base.Super = base.MetaProperty = ignore | ||||||
|  | base.ArrayExpression = (node, st, c) => { | ||||||
|  |   for (let i = 0; i < node.elements.length; ++i) { | ||||||
|  |     let elt = node.elements[i] | ||||||
|  |     if (elt) c(elt, st, "Expression") | ||||||
|  |   } | ||||||
|  | } | ||||||
|  | base.ObjectExpression = (node, st, c) => { | ||||||
|  |   for (let i = 0; i < node.properties.length; ++i) | ||||||
|  |     c(node.properties[i], st) | ||||||
|  | } | ||||||
|  | base.FunctionExpression = base.ArrowFunctionExpression = base.FunctionDeclaration | ||||||
|  | base.SequenceExpression = base.TemplateLiteral = (node, st, c) => { | ||||||
|  |   for (let i = 0; i < node.expressions.length; ++i) | ||||||
|  |     c(node.expressions[i], st, "Expression") | ||||||
|  | } | ||||||
|  | base.UnaryExpression = base.UpdateExpression = (node, st, c) => { | ||||||
|  |   c(node.argument, st, "Expression") | ||||||
|  | } | ||||||
|  | base.BinaryExpression = base.LogicalExpression = (node, st, c) => { | ||||||
|  |   c(node.left, st, "Expression") | ||||||
|  |   c(node.right, st, "Expression") | ||||||
|  | } | ||||||
|  | base.AssignmentExpression = base.AssignmentPattern = (node, st, c) => { | ||||||
|  |   c(node.left, st, "Pattern") | ||||||
|  |   c(node.right, st, "Expression") | ||||||
|  | } | ||||||
|  | base.ConditionalExpression = (node, st, c) => { | ||||||
|  |   c(node.test, st, "Expression") | ||||||
|  |   c(node.consequent, st, "Expression") | ||||||
|  |   c(node.alternate, st, "Expression") | ||||||
|  | } | ||||||
|  | base.NewExpression = base.CallExpression = (node, st, c) => { | ||||||
|  |   c(node.callee, st, "Expression") | ||||||
|  |   if (node.arguments) for (let i = 0; i < node.arguments.length; ++i) | ||||||
|  |     c(node.arguments[i], st, "Expression") | ||||||
|  | } | ||||||
|  | base.MemberExpression = (node, st, c) => { | ||||||
|  |   c(node.object, st, "Expression") | ||||||
|  |   if (node.computed) c(node.property, st, "Expression") | ||||||
|  | } | ||||||
|  | base.ExportNamedDeclaration = base.ExportDefaultDeclaration = (node, st, c) => { | ||||||
|  |   if (node.declaration) | ||||||
|  |     c(node.declaration, st, node.type == "ExportNamedDeclaration" || node.declaration.id ? "Statement" : "Expression") | ||||||
|  |   if (node.source) c(node.source, st, "Expression") | ||||||
|  | } | ||||||
|  | base.ExportAllDeclaration = (node, st, c) => { | ||||||
|  |   c(node.source, st, "Expression") | ||||||
|  | } | ||||||
|  | base.ImportDeclaration = (node, st, c) => { | ||||||
|  |   for (let i = 0; i < node.specifiers.length; i++) | ||||||
|  |     c(node.specifiers[i], st) | ||||||
|  |   c(node.source, st, "Expression") | ||||||
|  | } | ||||||
|  | base.ImportSpecifier = base.ImportDefaultSpecifier = base.ImportNamespaceSpecifier = base.Identifier = base.Literal = ignore | ||||||
|  |  | ||||||
|  | base.TaggedTemplateExpression = (node, st, c) => { | ||||||
|  |   c(node.tag, st, "Expression") | ||||||
|  |   c(node.quasi, st) | ||||||
|  | } | ||||||
|  | base.ClassDeclaration = base.ClassExpression = (node, st, c) => c(node, st, "Class") | ||||||
|  | base.Class = (node, st, c) => { | ||||||
|  |   if (node.id) c(node.id, st, "Pattern") | ||||||
|  |   if (node.superClass) c(node.superClass, st, "Expression") | ||||||
|  |   for (let i = 0; i < node.body.body.length; i++) | ||||||
|  |     c(node.body.body[i], st) | ||||||
|  | } | ||||||
|  | base.MethodDefinition = base.Property = (node, st, c) => { | ||||||
|  |   if (node.computed) c(node.key, st, "Expression") | ||||||
|  |   c(node.value, st, "Expression") | ||||||
|  | } | ||||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user