diff --git a/LICENSE b/LICENSE
index 5c00ae3..22e5c21 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,3 +1,7 @@
+Better Enums is distributed under the terms of the 2-clause BSD license. Its
+text is given below.
+
+
Copyright (c) 2012-2015, Anton Bachin
All rights reserved.
@@ -21,3 +25,44 @@ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+
+
+
+Better Enums uses the mistune library as part of its documentation generator.
+Its web address and license are given below.
+
+
+http://mistune.readthedocs.org/en/latest/
+
+
+Copyright (c) 2014 - 2015, Hsiaoming Yang
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice, this
+list of conditions and the following disclaimer in the documentation and/or
+other materials provided with the distribution.
+
+* Neither the name of the creator nor the names of its contributors may be used
+to endorse or promote products derived from this software without specific prior
+written permission.
+
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/doc/DesignDecisionsFAQ.md b/doc/DesignDecisionsFAQ.md
index 188b423..674b724 100644
--- a/doc/DesignDecisionsFAQ.md
+++ b/doc/DesignDecisionsFAQ.md
@@ -52,8 +52,6 @@ interesting option, and it has [its own section][traits]. I have tried it, but
the verbosity increase is much greater than the benefit of dropping underscores,
so I chose not to do it.
-%% description = Better Enums design decisions and tradeoffs.
-
### Why does Better Enums use a macro at all?
Better Enums needs to turn the names of declared constants into strings, and I
@@ -248,3 +246,5 @@ generation.
[underlying]: ${prefix}demo/NonIntegralUnderlyingTypes.html
[traits-branch]: $repo/tree/traits
[traits-samples]: $repo/tree/traits/samples
+
+%% description = Better Enums design decisions and tradeoffs.
diff --git a/doc/Makefile b/doc/Makefile
index 758c612..d16fcd8 100644
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -1,12 +1,23 @@
SOURCE_MARKDOWN := $(wildcard tutorial/*) $(wildcard demo/*)
SOURCE_CXX := $(SOURCE_MARKDOWN:.md=.cc)
-.PHONY : all
-all : html examples
-
.PHONY : html
html :
python docs.py
+ @echo "See html/index.html"
+
+.PHONY : publish
+publish : prepare
+ cp -r html ../doc-publish
+ git commit --amend
+ git push -f
+
+.PHONY : prepare
+prepare : examples web
+
+.PHONY : web
+web : examples
+ python docs.py --web
.PHONY : examples
examples : clean-examples $(SOURCE_CXX)
diff --git a/doc/better-enums.css b/doc/better-enums.css
index d585746..f9ca9bf 100644
--- a/doc/better-enums.css
+++ b/doc/better-enums.css
@@ -122,7 +122,6 @@ header {
background: linear-gradient(#395E7E, #4A79A0);
color: white;
padding: 50px 0;
- margin-bottom: 50px;
}
h1 {
@@ -226,12 +225,12 @@ footer {
a {
text-decoration: none;
color: white;
- background-color: red;
+ /*background-color: red;*/
}
a[href=""] {
color: white !important;
- background-color: red !important;
+ /*background-color: red !important;*/
}
a:hover {
@@ -242,6 +241,10 @@ header a:hover {
text-decoration: none;
}
+.main {
+ margin-top: 50px;
+}
+
.main a[href], footer a[href] {
background-color: #edd;
color: #844;
@@ -545,3 +548,32 @@ h3.contents {
margin-top: 0;
padding-bottom: 1em;
}
+
+.buttons-bar {
+ background-color: #f4f4f4;
+ padding-top: 0.5em;
+ padding-bottom: 0.5em;
+ height: 20px;
+}
+
+.buttons-bar a img {
+ margin-right: 25px;
+}
+
+.buttons-bar iframe.gh-button {
+ width: 95px;
+}
+
+.buttons-bar .tweet-share,
+.buttons-bar .gh-button {
+ display: none;
+}
+
+.index .buttons-bar .tweet-share,
+.index .buttons-bar .gh-button {
+ display: initial;
+}
+
+.buttons-bar iframe.gh-watch {
+ width: 103px;
+}
diff --git a/doc/demo/102-any-underlying.md b/doc/demo/102-any-underlying.md
index 46f8efc..870a74a 100644
--- a/doc/demo/102-any-underlying.md
+++ b/doc/demo/102-any-underlying.md
@@ -7,7 +7,15 @@ between `T` and an integral type of your choosing. This also works in $cxx98
doesn't have to be `constexpr`. In $cxx98, everything involving `T` will simply
be done by Better Enums at run time.
-Here's how to do it.
+This feature is semi-experimental. I am considering relaxing the requirements on
+`T` so that it doesn't have to be literal. I can use a `reinterpret_cast` to
+make a mapping automatically. This will make non-integral underlying types
+easier to use, but will also prevent usage at compile time, which unfortunately
+has structural consequences for the implementation of Better Enums, and
+additional semantic consequences for usage, even at run time.
+
+In the meantime, here's how to have a non-integral underlying type in the
+current version.
#include
#include
diff --git a/doc/docs.py b/doc/docs.py
index 8f015f0..36c49e3 100755
--- a/doc/docs.py
+++ b/doc/docs.py
@@ -7,6 +7,8 @@ import string
import transform
import os
import os.path
+import sys
+import urllib
TEMPLATE_DIRECTORY = "template"
OUTPUT_DIRECTORY = "html"
@@ -83,6 +85,9 @@ def compose_page(relative_path, definitions):
definitions["canonical"] = canonical
definitions["prefix"] = prefix
+ definitions["quoted_url"] = urllib.quote(definitions["canonical"], "")
+ definitions["quoted_title"] = urllib.quote(definitions["title"], "")
+
if "class" not in definitions:
definitions["class"] = ""
@@ -202,6 +207,9 @@ def generate_sitemap():
def main():
load_templates()
+ if not (len(sys.argv) >= 2 and sys.argv[1] == "--web"):
+ templates["ga"] = ""
+
remove_output_directory()
process_threaded(TUTORIAL_DIRECTORY)
@@ -212,6 +220,8 @@ def main():
compose_general_page(page)
copy_static_file("better-enums.css")
+ copy_static_file("image/twsupport.png")
+ copy_static_file("image/tweet.png")
generate_sitemap()
diff --git a/doc/image/tweet.png b/doc/image/tweet.png
new file mode 100644
index 0000000..dbbd61d
Binary files /dev/null and b/doc/image/tweet.png differ
diff --git a/doc/image/twsupport.png b/doc/image/twsupport.png
new file mode 100644
index 0000000..ddc4b1f
Binary files /dev/null and b/doc/image/twsupport.png differ
diff --git a/doc/index.md b/doc/index.md
index 9bdb7e8..7c53d94 100644
--- a/doc/index.md
+++ b/doc/index.md
@@ -61,23 +61,23 @@ Channel c = Channel::_from_integral(3);
reflective enum types.
-That means you can easily convert enums to and from strings,
-validate them, and loop over them. In $cxx11, you can do it all at
+That means you can easily convert enums to and from strings,
+validate them, and loop over them. In $cxx11, you can do it all at
compile time.
It's what built-in enums ought to support. Better Enums simply adds the missing
features. And, it is based on the best known techniques, thoroughly tested,
fast, portable, and documented exhaustively.
-All you have to do to use it is include enum.h.
+To use it, just include enum.h.
Try it live online in
-[Wandbox](http://melpon.org/wandbox/permlink/pdlAAGoxnjqG6FRI), or begin the
+[Wandbox](http://melpon.org/wandbox/permlink/wICNzu2LW2vEgqzh), or begin the
[tutorial](${prefix}tutorial/HelloWorld.html)!
-### Features
+### Highlights
@@ -218,9 +218,8 @@ Try it live online in
%% title = Clean reflective enums for C++
-%% description = Better Enums is a single header C++ library providing
-reflective enums with clean syntax. Better Enums can be converted to and from
-strings, be iterated, counted, and used at run time or for template and
-constexpr metaprogramming. Free and open source under the BSD license.
+%% description = Reflective enums in a single header file, with clean syntax.
+The enums can be converted to string, iterated, and counted, at run time or
+as part of metaprogramming. Free and open source under the BSD license.
%% class = index
diff --git a/doc/mistune.py b/doc/mistune.py
new file mode 100755
index 0000000..34860d9
--- /dev/null
+++ b/doc/mistune.py
@@ -0,0 +1,1102 @@
+# coding: utf-8
+"""
+ mistune
+ ~~~~~~~
+
+ The fastest markdown parser in pure Python with renderer feature.
+
+ :copyright: (c) 2014 - 2015 by Hsiaoming Yang.
+"""
+
+import re
+import inspect
+
+__version__ = '0.5.1'
+__author__ = 'Hsiaoming Yang '
+__all__ = [
+ 'BlockGrammar', 'BlockLexer',
+ 'InlineGrammar', 'InlineLexer',
+ 'Renderer', 'Markdown',
+ 'markdown', 'escape',
+]
+
+
+def _pure_pattern(regex):
+ pattern = regex.pattern
+ if pattern.startswith('^'):
+ pattern = pattern[1:]
+ return pattern
+
+
+_key_pattern = re.compile(r'\s+')
+
+
+def _keyify(key):
+ return _key_pattern.sub(' ', key.lower())
+
+
+_escape_pattern = re.compile(r'&(?!#?\w+;)')
+
+
+def escape(text, quote=False, smart_amp=True):
+ """Replace special characters "&", "<" and ">" to HTML-safe sequences.
+
+ The original cgi.escape will always escape "&", but you can control
+ this one for a smart escape amp.
+
+ :param quote: if set to True, " and ' will be escaped.
+ :param smart_amp: if set to False, & will always be escaped.
+ """
+ if smart_amp:
+ text = _escape_pattern.sub('&', text)
+ else:
+ text = text.replace('&', '&')
+ text = text.replace('<', '<')
+ text = text.replace('>', '>')
+ if quote:
+ text = text.replace('"', '"')
+ text = text.replace("'", ''')
+ return text
+
+
+def preprocessing(text, tab=4):
+ text = re.sub(r'\r\n|\r', '\n', text)
+ text = text.replace('\t', ' ' * tab)
+ text = text.replace('\u00a0', ' ')
+ text = text.replace('\u2424', '\n')
+ pattern = re.compile(r'^ +$', re.M)
+ return pattern.sub('', text)
+
+
+_tag = (
+ r'(?!(?:'
+ r'a|em|strong|small|s|cite|q|dfn|abbr|data|time|code|'
+ r'var|samp|kbd|sub|sup|i|b|u|mark|ruby|rt|rp|bdi|bdo|'
+ r'span|br|wbr|ins|del|img)\b)\w+(?!:/|[^\w\s@]*@)\b'
+)
+
+
+class BlockGrammar(object):
+ """Grammars for block level tokens."""
+
+ def_links = re.compile(
+ r'^ *\[([^^\]]+)\]: *' # [key]:
+ r'([^\s>]+)>?' # or link
+ r'(?: +["(]([^\n]+)[")])? *(?:\n+|$)'
+ )
+ def_footnotes = re.compile(
+ r'^\[\^([^\]]+)\]: *('
+ r'[^\n]*(?:\n+|$)' # [^key]:
+ r'(?: {1,}[^\n]*(?:\n+|$))*'
+ r')'
+ )
+
+ newline = re.compile(r'^\n+')
+ block_code = re.compile(r'^( {4}[^\n]+\n*)+')
+ fences = re.compile(
+ r'^ *(`{3,}|~{3,}) *(\S+)? *\n' # ```lang
+ r'([\s\S]+?)\s*'
+ r'\1 *(?:\n+|$)' # ```
+ )
+ hrule = re.compile(r'^ {0,3}[-*_](?: *[-*_]){2,} *(?:\n+|$)')
+ heading = re.compile(r'^ *(#{1,6}) *([^\n]+?) *#* *(?:\n+|$)')
+ lheading = re.compile(r'^([^\n]+)\n *(=|-)+ *(?:\n+|$)')
+ block_quote = re.compile(r'^( *>[^\n]+(\n[^\n]+)*\n*)+')
+ list_block = re.compile(
+ r'^( *)([*+-]|\d+\.) [\s\S]+?'
+ r'(?:'
+ r'\n+(?=\1?(?:[-*_] *){3,}(?:\n+|$))' # hrule
+ r'|\n+(?=%s)' # def links
+ r'|\n+(?=%s)' # def footnotes
+ r'|\n{2,}'
+ r'(?! )'
+ r'(?!\1(?:[*+-]|\d+\.) )\n*'
+ r'|'
+ r'\s*$)' % (
+ _pure_pattern(def_links),
+ _pure_pattern(def_footnotes),
+ )
+ )
+ list_item = re.compile(
+ r'^(( *)(?:[*+-]|\d+\.) [^\n]*'
+ r'(?:\n(?!\2(?:[*+-]|\d+\.) )[^\n]*)*)',
+ flags=re.M
+ )
+ list_bullet = re.compile(r'^ *(?:[*+-]|\d+\.) +')
+ paragraph = re.compile(
+ r'^((?:[^\n]+\n?(?!'
+ r'%s|%s|%s|%s|%s|%s|%s|%s|%s'
+ r'))+)\n*' % (
+ _pure_pattern(fences).replace(r'\1', r'\2'),
+ _pure_pattern(list_block).replace(r'\1', r'\3'),
+ _pure_pattern(hrule),
+ _pure_pattern(heading),
+ _pure_pattern(lheading),
+ _pure_pattern(block_quote),
+ _pure_pattern(def_links),
+ _pure_pattern(def_footnotes),
+ '<' + _tag,
+ )
+ )
+ block_html = re.compile(
+ r'^ *(?:%s|%s|%s) *(?:\n{2,}|\s*$)' % (
+ r'',
+ r'<(%s)[\s\S]+?<\/\1>' % _tag,
+ r'''<%s(?:"[^"]*"|'[^']*'|[^'">])*?>''' % _tag,
+ )
+ )
+ table = re.compile(
+ r'^ *\|(.+)\n *\|( *[-:]+[-| :]*)\n((?: *\|.*(?:\n|$))*)\n*'
+ )
+ nptable = re.compile(
+ r'^ *(\S.*\|.*)\n *([-:]+ *\|[-| :]*)\n((?:.*\|.*(?:\n|$))*)\n*'
+ )
+ text = re.compile(r'^[^\n]+')
+
+
+class BlockLexer(object):
+ """Block level lexer for block grammars."""
+ grammar_class = BlockGrammar
+
+ default_rules = [
+ 'newline', 'hrule', 'block_code', 'fences', 'heading',
+ 'nptable', 'lheading', 'block_quote',
+ 'list_block', 'block_html', 'def_links',
+ 'def_footnotes', 'table', 'paragraph', 'text'
+ ]
+
+ list_rules = (
+ 'newline', 'block_code', 'fences', 'lheading', 'hrule',
+ 'block_quote', 'list_block', 'block_html', 'text',
+ )
+
+ footnote_rules = (
+ 'newline', 'block_code', 'fences', 'heading',
+ 'nptable', 'lheading', 'hrule', 'block_quote',
+ 'list_block', 'block_html', 'table', 'paragraph', 'text'
+ )
+
+ def __init__(self, rules=None, **kwargs):
+ self.tokens = []
+ self.def_links = {}
+ self.def_footnotes = {}
+
+ if not rules:
+ rules = self.grammar_class()
+
+ self.rules = rules
+
+ def __call__(self, text, rules=None):
+ return self.parse(text, rules)
+
+ def parse(self, text, rules=None):
+ text = text.rstrip('\n')
+
+ if not rules:
+ rules = self.default_rules
+
+ def manipulate(text):
+ for key in rules:
+ rule = getattr(self.rules, key)
+ m = rule.match(text)
+ if not m:
+ continue
+ getattr(self, 'parse_%s' % key)(m)
+ return m
+ return False
+
+ while text:
+ m = manipulate(text)
+ if m is not False:
+ text = text[len(m.group(0)):]
+ continue
+ if text:
+ raise RuntimeError('Infinite loop at: %s' % text)
+ return self.tokens
+
+ def parse_newline(self, m):
+ length = len(m.group(0))
+ if length > 1:
+ self.tokens.append({'type': 'newline'})
+
+ def parse_block_code(self, m):
+ code = m.group(0)
+ pattern = re.compile(r'^ {4}', re.M)
+ code = pattern.sub('', code)
+ self.tokens.append({
+ 'type': 'code',
+ 'lang': None,
+ 'text': code,
+ })
+
+ def parse_fences(self, m):
+ self.tokens.append({
+ 'type': 'code',
+ 'lang': m.group(2),
+ 'text': m.group(3),
+ })
+
+ def parse_heading(self, m):
+ self.tokens.append({
+ 'type': 'heading',
+ 'level': len(m.group(1)),
+ 'text': m.group(2),
+ })
+
+ def parse_lheading(self, m):
+ """Parse setext heading."""
+ self.tokens.append({
+ 'type': 'heading',
+ 'level': 1 if m.group(2) == '=' else 2,
+ 'text': m.group(1),
+ })
+
+ def parse_hrule(self, m):
+ self.tokens.append({'type': 'hrule'})
+
+ def parse_list_block(self, m):
+ bull = m.group(2)
+ self.tokens.append({
+ 'type': 'list_start',
+ 'ordered': '.' in bull,
+ })
+ cap = m.group(0)
+ self._process_list_item(cap, bull)
+ self.tokens.append({'type': 'list_end'})
+
+ def _process_list_item(self, cap, bull):
+ cap = self.rules.list_item.findall(cap)
+
+ _next = False
+ length = len(cap)
+
+ for i in range(length):
+ item = cap[i][0]
+
+ # remove the bullet
+ space = len(item)
+ item = self.rules.list_bullet.sub('', item)
+
+ # outdent
+ if '\n ' in item:
+ space = space - len(item)
+ pattern = re.compile(r'^ {1,%d}' % space, flags=re.M)
+ item = pattern.sub('', item)
+
+ # determin whether item is loose or not
+ loose = _next
+ if not loose and re.search(r'\n\n(?!\s*$)', item):
+ loose = True
+
+ rest = len(item)
+ if i != length - 1 and rest:
+ _next = item[rest-1] == '\n'
+ if not loose:
+ loose = _next
+
+ if loose:
+ t = 'loose_item_start'
+ else:
+ t = 'list_item_start'
+
+ self.tokens.append({'type': t})
+ # recurse
+ self.parse(item, self.list_rules)
+ self.tokens.append({'type': 'list_item_end'})
+
+ def parse_block_quote(self, m):
+ self.tokens.append({'type': 'block_quote_start'})
+ cap = m.group(0)
+ pattern = re.compile(r'^ *> ?', flags=re.M)
+ cap = pattern.sub('', cap)
+ self.parse(cap)
+ self.tokens.append({'type': 'block_quote_end'})
+
+ def parse_def_links(self, m):
+ key = _keyify(m.group(1))
+ self.def_links[key] = {
+ 'link': m.group(2),
+ 'title': m.group(3),
+ }
+
+ def parse_def_footnotes(self, m):
+ key = _keyify(m.group(1))
+ if key in self.def_footnotes:
+ # footnote is already defined
+ return
+
+ self.def_footnotes[key] = 0
+
+ self.tokens.append({
+ 'type': 'footnote_start',
+ 'key': key,
+ })
+
+ text = m.group(2)
+
+ if '\n' in text:
+ lines = text.split('\n')
+ whitespace = None
+ for line in lines[1:]:
+ space = len(line) - len(line.lstrip())
+ if space and (not whitespace or space < whitespace):
+ whitespace = space
+ newlines = [lines[0]]
+ for line in lines[1:]:
+ newlines.append(line[whitespace:])
+ text = '\n'.join(newlines)
+
+ self.parse(text, self.footnote_rules)
+
+ self.tokens.append({
+ 'type': 'footnote_end',
+ 'key': key,
+ })
+
+ def parse_table(self, m):
+ item = self._process_table(m)
+
+ cells = re.sub(r'(?: *\| *)?\n$', '', m.group(3))
+ cells = cells.split('\n')
+ for i, v in enumerate(cells):
+ v = re.sub(r'^ *\| *| *\| *$', '', v)
+ cells[i] = re.split(r' *\| *', v)
+
+ item['cells'] = cells
+ self.tokens.append(item)
+
+ def parse_nptable(self, m):
+ item = self._process_table(m)
+
+ cells = re.sub(r'\n$', '', m.group(3))
+ cells = cells.split('\n')
+ for i, v in enumerate(cells):
+ cells[i] = re.split(r' *\| *', v)
+
+ item['cells'] = cells
+ self.tokens.append(item)
+
+ def _process_table(self, m):
+ header = re.sub(r'^ *| *\| *$', '', m.group(1))
+ header = re.split(r' *\| *', header)
+ align = re.sub(r' *|\| *$', '', m.group(2))
+ align = re.split(r' *\| *', align)
+
+ for i, v in enumerate(align):
+ if re.search(r'^ *-+: *$', v):
+ align[i] = 'right'
+ elif re.search(r'^ *:-+: *$', v):
+ align[i] = 'center'
+ elif re.search(r'^ *:-+ *$', v):
+ align[i] = 'left'
+ else:
+ align[i] = None
+
+ item = {
+ 'type': 'table',
+ 'header': header,
+ 'align': align,
+ }
+ return item
+
+ def parse_block_html(self, m):
+ pre = m.group(1) in ['pre', 'script', 'style']
+ text = m.group(0)
+ self.tokens.append({
+ 'type': 'block_html',
+ 'pre': pre,
+ 'text': text
+ })
+
+ def parse_paragraph(self, m):
+ text = m.group(1).rstrip('\n')
+ self.tokens.append({'type': 'paragraph', 'text': text})
+
+ def parse_text(self, m):
+ text = m.group(0)
+ self.tokens.append({'type': 'text', 'text': text})
+
+
+class InlineGrammar(object):
+ """Grammars for inline level tokens."""
+
+ escape = re.compile(r'^\\([\\`*{}\[\]()#+\-.!_>~|])') # \* \+ \! ....
+ tag = re.compile(
+ r'^|' # comment
+ r'^<\/\w+>|' # close tag
+ r'^<\w+[^>]*?>' # open tag
+ )
+ autolink = re.compile(r'^<([^ >]+(@|:\/)[^ >]+)>')
+ link = re.compile(
+ r'^!?\[('
+ r'(?:\[[^^\]]*\]|[^\[\]]|\](?=[^\[]*\]))*'
+ r')\]\('
+ r'''\s*([\s\S]*?)>?(?:\s+['"]([\s\S]*?)['"])?\s*'''
+ r'\)'
+ )
+ reflink = re.compile(
+ r'^!?\[('
+ r'(?:\[[^^\]]*\]|[^\[\]]|\](?=[^\[]*\]))*'
+ r')\]\s*\[([^^\]]*)\]'
+ )
+ nolink = re.compile(r'^!?\[((?:\[[^\]]*\]|[^\[\]])*)\]')
+ url = re.compile(r'''^(https?:\/\/[^\s<]+[^<.,:;"')\]\s])''')
+ double_emphasis = re.compile(
+ r'^_{2}(.+?)_{2}(?!_)' # __word__
+ r'|'
+ r'^\*{2}(.+?)\*{2}(?!\*)' # **word**
+ )
+ emphasis = re.compile(
+ r'^\b_((?:__|.)+?)_\b' # _word_
+ r'|'
+ r'^\*((?:\*\*|.)+?)\*(?!\*)' # *word*
+ )
+ code = re.compile(r'^(`+)\s*(.*?[^`])\s*\1(?!`)') # `code`
+ linebreak = re.compile(r'^ {2,}\n(?!\s*$)')
+ strikethrough = re.compile(r'^~~(?=\S)(.*?\S)~~') # ~~word~~
+ footnote = re.compile(r'^\[\^([^\]]+)\]')
+ text = re.compile(r'^[\s\S]+?(?=[\\'):
+ self._in_link = False
+ return self.renderer.tag(text)
+
+ def output_footnote(self, m):
+ key = _keyify(m.group(1))
+ if key not in self.footnotes:
+ return None
+ if self.footnotes[key]:
+ return None
+ self.footnote_index += 1
+ self.footnotes[key] = self.footnote_index
+ return self.renderer.footnote_ref(key, self.footnote_index)
+
+ def output_link(self, m):
+ return self._process_link(m, m.group(2), m.group(3))
+
+ def output_reflink(self, m):
+ key = _keyify(m.group(2) or m.group(1))
+ if key not in self.links:
+ return None
+ ret = self.links[key]
+ return self._process_link(m, ret['link'], ret['title'])
+
+ def output_nolink(self, m):
+ key = _keyify(m.group(1))
+ if key not in self.links:
+ return None
+ ret = self.links[key]
+ return self._process_link(m, ret['link'], ret['title'])
+
+ def _process_link(self, m, link, title=None):
+ line = m.group(0)
+ text = m.group(1)
+ if line[0] == '!':
+ return self.renderer.image(link, title, text)
+
+ self._in_link = True
+ text = self.output(text)
+ self._in_link = False
+ return self.renderer.link(link, title, text)
+
+ def output_double_emphasis(self, m):
+ text = m.group(2) or m.group(1)
+ text = self.output(text)
+ return self.renderer.double_emphasis(text)
+
+ def output_emphasis(self, m):
+ text = m.group(2) or m.group(1)
+ text = self.output(text)
+ return self.renderer.emphasis(text)
+
+ def output_code(self, m):
+ text = m.group(2)
+ return self.renderer.codespan(text)
+
+ def output_linebreak(self, m):
+ return self.renderer.linebreak()
+
+ def output_strikethrough(self, m):
+ text = self.output(m.group(1))
+ return self.renderer.strikethrough(text)
+
+ def output_text(self, m):
+ text = m.group(0)
+ return self.renderer.text(text)
+
+
+class Renderer(object):
+ """The default HTML renderer for rendering Markdown.
+ """
+
+ def __init__(self, **kwargs):
+ self.options = kwargs
+
+ def placeholder(self):
+ """Returns the default, empty output value for the renderer.
+
+ All renderer methods use the '+=' operator to append to this value.
+ Default is a string so rendering HTML can build up a result string with
+ the rendered Markdown.
+
+ Can be overridden by Renderer subclasses to be types like an empty
+ list, allowing the renderer to create a tree-like structure to
+ represent the document (which can then be reprocessed later into a
+ separate format like docx or pdf).
+ """
+ return ''
+
+ def block_code(self, code, lang=None):
+ """Rendering block level code. ``pre > code``.
+
+ :param code: text content of the code block.
+ :param lang: language of the given code.
+ """
+ code = code.rstrip('\n')
+ if not lang:
+ code = escape(code, smart_amp=False)
+ return '
with the given text.
+
+ :param text: text content of the blockquote.
+ """
+ return '
%s\n
\n' % text.rstrip('\n')
+
+ def block_html(self, html):
+ """Rendering block level pure html content.
+
+ :param html: text content of the html snippet.
+ """
+ if self.options.get('skip_style') and \
+ html.lower().startswith('