class TableMarkupExtensionTest(unittest.TestCase): def setUp(self): self.originalMd = Markdown() self.md = Markdown([TableMarkupExtension()]) def test_basic(self): firstPart = "First part with two paragraphs\n\nThe second paragraph\n\n* A list" secondPart = "Second part" actual = self.md.convert( firstPart + \ "\n\n-----\n\n" + \ secondPart) expectedRe = \ r"^<tr>\s*" + \ r"<td>\s*" + re.escape(self.originalMd.convert(firstPart)) + "\s*</td>\s*" + \ r"<td>\s*" + re.escape(self.originalMd.convert(secondPart)) + "\s*</td>\s*" + \ r"</tr>$""" self.assertRegex(actual, expectedRe) def test_beginning_hr_is_ignored(self): source = "-----\n\nТекст" expectedRe = r"""^<tr>\s*<td>\s*<p>Текст</p>\s*</td>\s*</tr>$""" self.assertRegex(self.md.convert(source), expectedRe) def test_ending_hr_is_ignored(self): source = "Текст\n\n-----" expectedRe = r"""^<tr>\s*<td>\s*<p>Текст</p>\s*</td>\s*</tr>$""" self.assertRegex(self.md.convert(source), expectedRe)
class ItemTypeExtensionTest(unittest.TestCase): def setUp(self): self.md = Markdown([ItemTypeExtension()]) def test_beginning(self): source = "* [deferred] элемент списка"; expectedRe = r"""^<ul>\s*<li class="type_deferred">элемент списка</li>\s*</ul>$""" self.assertRegex(self.md.convert(source), expectedRe) def test_middle(self): source = "* элемент [wait] списка"; expectedRe = r"""^<ul>\s*<li class="type_wait">элемент +списка</li>\s*</ul>$""" self.assertRegex(self.md.convert(source), expectedRe) def test_end(self): source = "* элемент списка [freetime]"; expectedRe = r"""^<ul>\s*<li class="type_freetime">элемент списка *</li>\s*</ul>$""" self.assertRegex(self.md.convert(source), expectedRe) def test_unknown_is_not_replaced(self): source = "* элемент [списка]"; expectedRe = r"""^<ul>\s*<li>элемент \[списка\]</li>\s*</ul>$""" self.assertRegex(self.md.convert(source), expectedRe) def test_references_not_messed_up(self): source = "Тестовая [freetime][deferred] ссылка\n\n[deferred]: http://example.com" expectedRe = r"""^<p>Тестовая <a href=["']http://example.com["']>freetime</a> ссылка</p>$""" self.assertRegex(self.md.convert(source), expectedRe)
def load_stories(story_stuff): meta = { "title": (True, ("one",)), "description": (True, ("one",)), "image": (True, ("one",)), "button": (True, ("one",)), "link": (True, ("one",)), "button2": (False, ("one",)), "link2": (False, ("one",)), "action": (False, ("one",)), } markdowner = Markdown(extensions=["meta"], output_format="html5") stories = [] for story_file, filename in story_stuff: story = {} html = markdowner.convert(story_file.read()) # also loads metadata story["body"] = Markup(html) story["slug"] = os.path.splitext(filename)[0] for field, (required, filters) in meta.items(): field_val = markdowner.Meta.get(field) try: val = apply_field_constraints(field_val, required, filters) except MetaError as e: e.apply_context(filename, field) raise e story[field] = val stories.append(story) markdowner.reset() return stories
class MarkdownReader(Reader): enabled = bool(Markdown) file_extensions = ['md', 'markdown', 'mkd', 'mdown'] default_extensions = ['codehilite(css_class=highlight)', 'extra'] def __init__(self, *args, **kwargs): super(MarkdownReader, self).__init__(*args, **kwargs) self.extensions = self.settings.get('MD_EXTENSIONS', self.default_extensions) self.extensions.append('meta') self._md = Markdown(extensions=self.extensions) def _parse_metadata(self, meta): """Return the dict containing document metadata""" output = {} for name, value in meta.items(): name = name.lower() if name == "summary": summary_values = "\n".join(value) # reset the markdown instance to clear any state self._md.reset() summary = self._md.convert(summary_values) output[name] = self.process_metadata(name, summary) else: output[name] = self.process_metadata(name, value[0]) return output def read(self, source_path): """Parse content and metadata of markdown files""" with pelican_open(source_path) as text: content = self._md.convert(text) metadata = self._parse_metadata(self._md.Meta) return content, metadata
class Markdown: def __init__(self): self.markdown = MarkdownParser( output_format='html5', extensions = ['tables'], ) def parse(self, fp): meta = {} def read_file(): in_head = True for line in fp: if not in_head: yield line else: if len(line) < 2: in_head = False yield line else: name, value = line.split(':') meta[name.lower()] = value.strip() content = self.markdown.convert(''.join(read_file())) self.markdown.reset() pagetype = meta.pop('type', 'templated') # Default Pagetype return pagetype, meta, content
def html_content(self): try: return self._html_content except AttributeError: md = Markdown(extensions=['meta']) self._html_content = md.convert(self.content) return self._html_content
def load_entries(self): md = Markdown(extensions=['codehilite(guess_lang=False)', 'meta', LightBoxExtension()]) raw = (file(fn, 'r').read().decode('utf-8') for fn in glob('entries/*')) self.entries = [] self.galley_entries = [] for entry in raw: html, meta = md.convert(entry), md.Meta if 'title' not in meta or 'date' not in meta: continue title, date = meta['title'][0], meta['date'][0] tags = meta.get('tags', []) print 'Processed', title this_entry = dict( title=title, date=date, tags=tags, raw=entry, html=html, link=date + '_' + self.title_sub(title) + '.html' ) if 'galley' in tags: self.galley_entries.append(this_entry) else: self.entries.append(this_entry) self.entries.sort(lambda a, b: cmp(b['date'], a['date'])) self.galley_entries.sort(lambda a, b: cmp(b['date'], a['date']))
def md_parse_docs(fn): """ Parse a markdown document into (section, field) keyed dict of docstrings. """ md = Markdown(safe_mode='escape') with open(fn, 'r') as f_in: docs_html = md.convert(f_in.read()) etr = etree.HTML(docs_html) docs = {} section = '' field = '' first_h1 = etr.find('body/h1') if first_h1 is not None: section, field = first_h1.text, '' for e in first_h1.itersiblings(): if e.tag == 'h1': section, field = e.text, '' elif e.tag == 'h2': field = e.text else: docs[(section, field)] = docs.get((section, field), '') + etree.tostring(e) return docs
def read(self, filename): """Parse content and metadata of markdown files""" # parse Rmd file - generate md file md_filename = filename.replace(".Rmd", ".aux").replace(".rmd", ".aux") robjects.r( """ require(knitr); opts_knit$set(base.dir='{2}/content'); knit('{0}', '{1}', quiet=TRUE, encoding='UTF-8'); """.format( filename, md_filename, settings.DEFAULT_CONFIG.get("PATH") ) ) # parse md file md = Markdown(extensions=["meta", "codehilite(css_class=highlight)", "extra"]) with pelican_open(md_filename) as text: content = md.convert(text) os.remove(md_filename) # find metadata metadata = {} for name, value in md.Meta.items(): name = name.lower() meta = self.process_metadata(name, value[0]) metadata[name] = meta return content, metadata
def main(in_file_path, out_file_path): with open(in_file_path, 'r') as in_file, \ open(out_file_path, 'w') as out_file: converter = Markdown(output_format='html5') markdown = in_file.read() html = converter.convert(markdown) out_file.write(html)
def main(): parser = argparse.ArgumentParser() parser.add_argument("-o", "--output", help="Output pickle file path", default=None) parser.add_argument("input", help="Input API Blueprint file") args = parser.parse_args() with codecs.open(args.input, "r", "utf-8") as fin: txt = fin.read() m = Markdown(extensions=["plueprint"]) m.set_output_format("apiblueprint") api = m.convert(txt) if args.output is not None: with open(args.output, "wb") as fout: pickle.dump(api, fout, protocol=-1) else: print(api) print("Resource groups:") for g in api: print(" %s" % g) print(" Resources:") for r in g: print(" %s" % r) print(" Actions:") for a in r: print(" %s" % a)
def test_callbacks(self): def dont_linkify_python(attrs, new=False): if not new: # This is an existing <a> tag, leave it be. return attrs # If the TLD is '.py', make sure it starts with http: or https: text = attrs['_text'] if text.endswith('.py') and \ not text.startswith(('www.', 'http:', 'https:')): # This looks like a Python file, not a URL. Don't make a link. return None # Everything checks out, keep going to the next callback. return attrs configs = { 'linkifycallbacks': [[dont_linkify_python], ''] } linkify_ext = LinkifyExtension(configs=configs) md = Markdown(extensions=[linkify_ext]) text = "setup.com www.setup.py http://setup.py setup.py" expected = ('<p><a href="http://setup.com">setup.com</a> ' '<a href="http://www.setup.py">www.setup.py</a> ' '<a href="http://setup.py">http://setup.py</a> ' 'setup.py</p>') actual = md.convert(text) self.assertEqual(expected, actual)
class UrlizeExtensionTest(unittest.TestCase): def setUp(self): self.md = Markdown([UrlizeExtension()]) def test_links_in_angle_brackets_processed(self): source = "<http://yandex.ru/>" expectedRe = r"""^<p><a href=['"]http://yandex.ru/['"]>http://yandex.ru/</a></p>$""" self.assertRegex(self.md.convert(source), expectedRe) def test_links_starting_with_http_processed(self): source = "http://example.com/" expectedRe = r"""^<p><a href=['"]http://example.com/['"]>http://example.com/</a></p>$""" self.assertRegex(self.md.convert(source), expectedRe) def test_links_starting_with_www_processed(self): source = "www.example.com" expectedRe = r"""^<p><a href=['"]http://www.example.com['"]>www.example.com</a></p>$""" self.assertRegex(self.md.convert(source), expectedRe) def test_markdown_links_not_messed_up(self): source = "[](http://yandex.ru/)" expectedRe = r""" href=['"]http://yandex.ru/['"]""" self.assertRegex(self.md.convert(source), expectedRe) def test_manual_links_not_messed_up(self): source = "<a href='http://yandex.ru/'>Яндекс</a>" expected = "<p>%s</p>" % source self.assertEqual(expected, self.md.convert(source))
class HtmlHeaderExtensionTest(unittest.TestCase): DOCTYPE_AND_META = '^' + \ r"""<!DOCTYPE html>\n""" + \ r"""<meta charset=['"]utf-8['"](>|></meta>| */>)\s*""" def setUp(self): self.md = Markdown([HtmlHeaderExtension()]) def test_basic(self): source = "Текст" expectedRe = self.DOCTYPE_AND_META + "<p>Текст</p>$" self.assertRegex(self.md.convert(source), expectedRe) def test_title_is_inserted_for_1st_level_heading(self): source = "# Заголовок" expectedRe = self.DOCTYPE_AND_META + \ "<title>Заголовок</title>\s*" + \ "<h1>Заголовок</h1>$" self.assertRegex(self.md.convert(source), expectedRe) def test_title_not_inserted_if_several_headings(self): source = "# Заголовок 1\n\n# Заголовок 2" expectedRe = self.DOCTYPE_AND_META + \ "<h1>Заголовок 1</h1>\s*" + \ "<h1>Заголовок 2</h1>\s*$" self.assertRegex(self.md.convert(source), expectedRe) def test_title_tags_are_property_stripped(self): source = "# Заголовок со [ссылкой](http://example.com/) внутри" expectedRe = self.DOCTYPE_AND_META + "<title>Заголовок со ссылкой внутри</title>" self.assertRegex(self.md.convert(source), expectedRe)
def test_i18n_always_after_toc(self): text = '# This is h1' expected_toc = ( '<div class="toc">' ' <ul>' ' <li><a href="#esto-es-h1">Esto es h1</a></li>' ' </ul>' '</div>' ) self.catalog.add( 'This is h1', 'Esto es h1' ) self.write_po() md = Markdown( extensions=['markdown.extensions.toc', 'markdown_i18n'], extension_configs={ 'markdown_i18n': { 'i18n_dir': self.dir, 'i18n_lang': 'es_ES' } } ) md.convert(text) toc = getattr(md, 'toc', '') self.assertEqual(clean_xml(toc), clean_xml(expected_toc))
def __init__(self, templatefile, *apifiles, **kwargs): if len(apifiles) == 0: raise ValueError("There must be at least one APIBlueprint file " "specified") include_comments = kwargs.pop("include_comments", True) base_class = kwargs.pop("base_class", None) if base_class is None: base_class = "rest_framework.test.APITestCase" self._use_html2text = kwargs.pop("html2text", False) m = Markdown(extensions=["plueprint"]) m.set_output_format("apiblueprint") with codecs.open(apifiles[0], "r", "utf-8") as fin: self._api = m.convert(fin.read()) for f in apifiles[1:]: with codecs.open(f, "r", "utf-8") as fin: self._api.merge(m.convert(fin.read())) env = Environment(loader=FileSystemLoader(templatefile), autoescape=False, trim_blocks=True, lstrip_blocks=True, extensions=("jinja2.ext.loopcontrols",)) env.filters["symbolize"] = self._symbolize env.filters["html2text"] = self._html2text self._template = env.get_template("") self._include_comments = include_comments self._base_class = base_class[base_class.rfind('.') + 1:] self._base_module = base_class[:-len(self._base_class) - 1] self._counter = 1
def get_bug(filename): print 'Getting for {}'.format(filename) if filename[-4:] != '.txt': filename = filename + '.txt' with codecs.open(os.path.join(bugsdir, filename), 'r', 'utf-8-sig') as f: text = f.read() md = Markdown(extensions=['markdown.extensions.meta'], output_format='html5') output={'content':md.convert(text)} output['title'] = md.Meta.get('title', [''])[0] output['id'] = filename[:-4] output['type'] = types.get(md.Meta.get('type', ['bug'])[0], 'bug') output['category'] = categories.get(md.Meta.get('category', ['unclassified'])[0], 'unclassified') output['validators'] = [] output['working'] = [] for experience in md.Meta.get('xp', ['']): splitted = experience.split(',') name = splitted[0] email = splitted[1] verified = splitted[2] data = { 'name': name, } for elem in splitted[3:]: splitelem = elem.split('=') data[splitelem[0]] = splitelem[1] if verified == 'yes': output['validators'].append(data) elif verified == 'no': output['working'].append(data) return output
def convert(self, source, **context): """ Converts the source file and saves to the destination """ with codecs.open(source, encoding='utf-8') as src: lines = src.readlines() # Parse metadata first so we can get theme extensions md = Markdown() lines = MetaPreprocessor(md).run(lines) Meta = md.Meta meta = {k: ' '.join(v) for k, v in Meta.items()} # Load theme from meta data if set theme = meta.get('theme', 'default') exts = self.config.theme_get(theme, 'markdown_extensions', [ 'codehilite(css_class=syntax,guess_lang=False)']) exts = filter(None, exts) # Removes empty lines md = Markdown(extensions=exts) md.Meta = meta # restore already parsed meta data content = md.convert(''.join(lines)) context['Meta'] = Meta context['meta'] = meta return self.config.render_template(theme, content, **context)
def load_stories(story_stuff): meta = { 'title': (True, ('one',)), 'description': (True, ('one',)), } markdowner = Markdown(extensions=['meta'], output_format='html5') stories = [] for story_file, filename in story_stuff: story = {} html = markdowner.convert(story_file.read()) # also loads metadata story['body'] = Markup(html) story['slug'] = os.path.splitext(filename)[0] for field, (required, filters) in meta.items(): field_val = markdowner.Meta.get(field) try: val = apply_field_constraints(field_val, required, filters) except MetaError as e: e.apply_context(filename, field) raise e story[field] = val stories.append(story) markdowner.reset() return stories
def mk_manual(): import codecs md = Markdown(extensions=['toc']) content = md.convert(codecs.open('readme.md', encoding='utf8').read()) with open('summary/asema-manual.html','w') as out: out.write(html_manual.substitute(content=content.encode('utf8'))) upload('summary/asema-manual.html')
def load_blog(blog_stuff): meta = { 'title': (True, ('one',)), 'author': (True, ('one',)), 'date': (True, ('one', 'iso-date')), 'photo': (False, ('one',)), 'photo_caption': (False, ('')) } markdowner = Markdown(extensions=['meta'], output_format='html5') posts = [] for blog_file, filename in blog_stuff: post = {} html = markdowner.convert(blog_file.read()) # also loads metadata post['content'] = Markup(html) post['summary'] = Markup('\n'.join(html.split('\n', 1)[:-1])) post['slug'] = os.path.splitext(filename)[0] for field, (required, filters) in meta.items(): field_val = markdowner.Meta.get(field) try: val = apply_field_constraints(field_val, required, filters) except MetaError as e: e.apply_context(filename, field) raise e post[field] = val posts.append(post) markdowner.reset() return posts
def read(self, filename): """Parses the given file and returns a :class:`Page` object :param filename: path to the file to read. :type filename: str. :returns: Page """ try: text = open(filename, 'r', encoding='utf-8').read() except UnicodeDecodeError: print('wrong encoding: {0}'.format(filename)) raise md = Markdown(extensions=set(self.extensions + ['meta'])) content = md.convert(text) metadata = {} for name, value in md.Meta.items(): name = name.lower() metadata[name] = self.process_metadata(name, value[0]) return Page(filename.replace(self.path, ''), content, metadata.get('title'), metadata.get('title_short'), metadata.get('template'), metadata.get('parent'), metadata.get('sort'), metadata.get('in_nav'))
def update(self): logger.warning('Update {path}.'.format(path=self.path)) with open(self.path, 'r', encoding='utf-8') as f: source_md = f.read() md = Markdown(extensions=self._md_extensions) self.html = md.convert(source_md) self.meta = PageMeta(meta=md.Meta) modified = self.meta.modified created = self.meta.created if created: created = arrow.get(created).datetime else: created = arrow.get(os.path.getctime(self.path)).datetime if modified: modified = arrow.get(modified).datetime else: modified = created self.created = created self.modified = modified self.title = self.meta.title or md.title self.visible = str(self.meta.visible).lower() != 'false' self.labels = set() for label in self.meta.get_list('labels'): self.labels.add(Label(label))
def get_html_content(self): wikitables = WikiTableExtension() nofollow = NofollowExtension() margin_notes = MarginNotesExtension() toc = TocExtension([('title', 'Table of Contents')]) md = Markdown( extensions=[ toc, 'outline', 'mathjax', wikitables, nofollow, 'def_list', margin_notes, ], output_format='html5', safe_mode='escape' ) markdowned_text = md.convert(self.content) article = { 'html': markdowned_text, 'toc': getattr(md, 'toc', None), } return article
def read_markdown(filename): """Reads markdown file, converts output and fetches title and meta-data for further processing. """ # Use utf-8-sig codec to remove BOM if it is present. This is only possible # this way prior to feeding the text to the markdown parser (which would # also default to pure utf-8) with codecs.open(filename, 'r', 'utf-8-sig') as f: text = f.read() md = Markdown(extensions=['markdown.extensions.meta', 'markdown.extensions.tables'], output_format='html5') # Mark HTML with Markup to prevent jinja2 autoescaping output = {'description': Markup(md.convert(text))} try: meta = md.Meta.copy() except AttributeError: pass else: output['meta'] = meta try: output['title'] = md.Meta['title'][0] except KeyError: pass return output
def main(): m = Markdown(extensions=["plueprint"]) m.set_output_format("apiblueprint") tests_dir = os.path.join(os.path.dirname(__file__), "api-blueprint", "examples") skip_number = 0 index = 0 for doc in sorted(os.listdir(tests_dir)): if os.path.splitext(doc)[1] != ".md" or doc == "README.md": continue index += 1 if index <= skip_number: continue with codecs.open(os.path.join(tests_dir, doc), "r", "utf-8") as fin: txt = fin.read() api = m.convert(txt) print("-- %s --" % doc) print(api) try: api[">"].print_resources() except KeyError: pass print("Actions:") for action in api["/"]: print(action)
def decode(self, path, content): """ Decode content retrieved from the store This handler behaves as follows 1. If the filename has an extension (i.e. "a.png", "file.txt" or "file.") then the content is not handled at ALL. 2. Otherwise the content is decode as utf-8 and converted from Markdown to HTML The returned structure is a dictionary, holding the following keys * content - The html converted content * metadata - The metadata retrieved from the store * raw - The original content as received before parsing """ if os.path.splitext(path)[1]: return content raw = content md = Markdown( extensions=['meta', 'codehilite', 'fenced_code']) content = content.decode('utf-8') content = md.convert(content) data = {'content' : content, 'metadata' : md.Meta, 'raw' : raw} return data
def __call__(self, stream): from jinja2 import Markup from markdown import Markdown # Markdown is not thread safe markdown = Markdown(**self._markdown_options) return Markup(markdown.convert(stream))
def load_blog(blog_stuff): meta = { "title": (True, ("one",)), "author": (True, ("one",)), "date": (True, ("one", "iso-date")), "photo": (False, ("one",)), "photo_caption": (False, ("")), } markdowner = Markdown(extensions=["meta"], output_format="html5") posts = [] for blog_file, filename in blog_stuff: post = {} html = markdowner.convert(blog_file.read()) # also loads metadata post["content"] = Markup(html) post["summary"] = Markup("\n".join(html.split("\n", 1)[:-1])) post["slug"] = os.path.splitext(filename)[0] for field, (required, filters) in meta.items(): field_val = markdowner.Meta.get(field) try: val = apply_field_constraints(field_val, required, filters) except MetaError as e: e.apply_context(filename, field) raise e post[field] = val posts.append(post) markdowner.reset() return posts
def get_feeds(feeds_file, max_age, max_feeds): opml = lp.parse(feeds_file) feeds = opml.feeds feeds = feeds[:max_feeds] md = Markdown() filename = "rssdigest.html" with open(filename, "w") as text_file: text_file.write(md.convert("# Daily RSS Digest \n----")) digeststring = "# Daily RSS Digest \n----\n\n" number_of_feeds = len(feeds) for index, feed in enumerate(feeds): feed = feedparser.parse(feed.url) feedstring = "" addfeed = False print("[" + str(index) + "/" + str(number_of_feeds) + "]") if 'title' in feed.feed: feedstring += "## " + feed.feed.title + "\n" for entry in feed.entries: localtime = time.localtime() try: publishedtime = entry.published_parsed # age in days age = (time.mktime(localtime) - time.mktime(publishedtime)) / 60 / 60 / 24 if age < max_age: feedstring += "## ["+entry.title+"]("+entry.link+")\n\n" if 'description' in entry: if len(entry.description) < 500: feedstring += entry.description + "\n\n" addfeed = True except: pass if not addfeed: print(feedstring + "No new posts\n") feedstring += "----\n" if addfeed: print(feedstring) # Append to string digeststring += feedstring # Append to file with open(filename, "a") as text_file: feedhtml = md.convert(feedstring) text_file.write(feedhtml) digesthtml = md.convert(digeststring) # print("Final: " + digesthtml) return digesthtml
def hipchat_html(): return Markdown(output_format='xhtml', extensions=[ExtraExtension(), HipchatExtension()])
class MarkdownReader(BaseReader): """Reader for Markdown files""" enabled = bool(Markdown) file_extensions = ['md', 'markdown', 'mkd', 'mdown'] def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) settings = self.settings['MARKDOWN'] settings.setdefault('extension_configs', {}) settings.setdefault('extensions', []) for extension in settings['extension_configs'].keys(): if extension not in settings['extensions']: settings['extensions'].append(extension) if 'markdown.extensions.meta' not in settings['extensions']: settings['extensions'].append('markdown.extensions.meta') self._source_path = None def _parse_metadata(self, meta): """Return the dict containing document metadata""" formatted_fields = self.settings['FORMATTED_FIELDS'] # prevent metadata extraction in fields self._md.preprocessors.deregister('meta') output = {} for name, value in meta.items(): name = name.lower() if name in formatted_fields: # formatted metadata is special case and join all list values formatted_values = "\n".join(value) # reset the markdown instance to clear any state self._md.reset() formatted = self._md.convert(formatted_values) output[name] = self.process_metadata(name, formatted) elif not DUPLICATES_DEFINITIONS_ALLOWED.get(name, True): if len(value) > 1: logger.warning( 'Duplicate definition of `%s` ' 'for %s. Using first one.', name, self._source_path) output[name] = self.process_metadata(name, value[0]) elif len(value) > 1: # handle list metadata as list of string output[name] = self.process_metadata(name, value) else: # otherwise, handle metadata as single string output[name] = self.process_metadata(name, value[0]) return output def read(self, source_path): """Parse content and metadata of markdown files""" self._source_path = source_path self._md = Markdown(**self.settings['MARKDOWN']) with pelican_open(source_path) as text: content = self._md.convert(text) if hasattr(self._md, 'Meta'): metadata = self._parse_metadata(self._md.Meta) else: metadata = {} return content, metadata
from argparse import ArgumentParser from inspect import getdoc from json import loads, dumps from random import randint from socket import getfqdn from time import ctime from jinja2 import Environment, FileSystemLoader from markdown import Markdown from twisted.python import log # Used by Klein (annoyingly). from twisted.web.static import File from klein import Klein from autobahn.twisted.websocket import ( WebSocketServerProtocol, WebSocketServerFactory, listenWS ) md = Markdown() parser = ArgumentParser() # So we can add command line arguments. parser.add_argument( '-i', '--interface', default='0.0.0.0', help='The interface to listen on' ) parser.add_argument( '-p', '--http-port', type=int, default=4000, help='The port to listen for HTTP requests on' ) parser.add_argument( '-w', '--websocket-port', type=int, default=4001, help='The port to listen for websocket connections on'
from django.shortcuts import render, redirect, get_object_or_404 from django.http import HttpResponse, HttpResponseRedirect, request from django.urls import reverse from markdown import Markdown #Some real tough thing to understand from django import forms from random import randint from . import util md = Markdown() #To overcome error class createPage(forms.Form): title = forms.CharField(label="Title", widget=forms.TextInput(attrs={'cols': 60})) description = forms.CharField(label="Description", widget=forms.Textarea(attrs={'cols': 120})) def index(request): return render(request, "encyclopedia/index.html", {"entries": util.list_entries()}) def pages(request, name): page_name = util.get_entry(name) #page_name returns the name of the page is exists in the entry or NONE. if page_name is None: return render(request, "encyclopedia/invalidPage.html", {"title": name.capitalize()}) body = md.convert(page_name)
class Generator(): """""" def __init__(self): # Init variables self.CWD = getcwd() self.SRC_PATH = path.join(self.CWD, 'src') self.OUT_PATH = path.join(self.CWD, 'dest') self.ASSET_PATH = path.join(path.dirname(__file__), 'assets') self.TEMPLATE = '' self.mdExtentions = ['meta'] # Read config files self.readConfig() self.readTemplate() # Init markdown parser self.md = Markdown(extensions=self.mdExtentions) # Init colorful output init() def readConfig(self) -> None: """Load the config from config.json""" CONFIG_PATH = path.join(self.CWD, 'config.json') if path.isfile(CONFIG_PATH): with open(CONFIG_PATH) as f: config = f.read() self.CONFIG = loads(config) else: error('config.json not found') def readTemplate(self) -> None: """Load either a custom or the fallback template""" TEMPLATE_PATH = path.join(self.SRC_PATH, 'template.html') if path.isfile(TEMPLATE_PATH): with open(TEMPLATE_PATH) as f: self.TEMPLATE = f.read() # Use the basic fallback file instead else: TEMPLATE_PATH_FALLBACK = path.join(self.ASSET_PATH, 'template.fallback.html') if (path.isfile(TEMPLATE_PATH_FALLBACK)): with open(TEMPLATE_PATH_FALLBACK) as f: self.TEMPLATE = f.read() else: error('template.html and fallback file not found') def findFiles(self) -> None: """Find all the Markdown files in the source directory""" files = [] for i in listdir(self.SRC_PATH): if path.isfile(path.join(self.SRC_PATH, i)): if i.endswith('.md'): files.append(i) else: # TODO: Implement finding non top level files/pages create pass self.FILES = files def readFile(self, filename: str) -> dict: """Read files and """ with open(filename) as f: mdIn = f.read() html = self.md.convert(mdIn) # Get the meta data (author, title etc.) from the last conversion # Although some IDEs say this line is an error it works and is # right according to the documentation meta = self.md.Meta # Create a easily usable dict with the meta data and the converted HTML data = dict() for key in meta.keys(): data[key] = meta[key][0] data['html'] = html return data def copyStaticFiles(self) -> None: """Copy the static file folder""" if 'static-folder' in self.CONFIG and type( self.CONFIG['static-folder']) == str: STATIC_FOLDER_PATH = path.join(self.CWD, self.OUT_PATH, self.CONFIG['static-folder']) if path.isdir(STATIC_FOLDER_PATH): # Only do something if there are files/folders in the folder if len(listdir(STATIC_FOLDER_PATH)): pass else: # HACK: For some reason directly using the CONFIG variable # doesn't work, because of the []. So the folder name has to # get his own variable FOLDER_NAME = self.CONFIG['static-folder'] error(f'Static file folder ({FOLDER_NAME}) does not exist') def templatize(self, conntent: dict) -> str: """""" template = self.TEMPLATE template = template.replace('<!--TITLE-->', conntent['title']) template = template.replace('<!--CONTENT-->', conntent['html']) return template def getOutFilename(self, filename: str) -> str: """""" filename = path.join(self.OUT_PATH, filename) # All the files are of the type Markdown (found by fileextention), # so the fileextention will always be .md what is 3 characters long return f'{filename[0:-3]}.html' def saveFile(self, filename: str, conntent: str) -> None: """Write file""" with open(filename, 'w') as f: f.write(conntent) def processFile(self, filename: str) -> None: """Run alle the functions needed to convert one file (read, convert & save)""" contentMD = self.readFile(path.join(self.SRC_PATH, filename)) contentHTML = self.templatize(contentMD) outFilename = self.getOutFilename(filename) self.saveFile(outFilename, contentHTML) print(f'📝 {filename}') def generate(self): """Run all the functions in the right order""" self.findFiles() for file in self.FILES: self.processFile(file) self.copyStaticFiles()
def _text_markdown(self, text): md = Markdown() return md.convert(text)
from jinja2 import Markup from markdown import Markdown from standup.status.utils import ( trim_urls, week_end as u_week_end, week_start as u_week_start, ) from standup.mdext.nixheaders import NixHeaderExtension BUG_RE = re.compile(r'(bug #?(\d+))', flags=re.I) PULL_RE = re.compile(r'((?:pull|pr) #?(\d+))', flags=re.I) ISSUE_RE = re.compile(r'(issue #?(\d+))', flags=re.I) USER_RE = re.compile(r'(?<=^|(?<=[^\w\-.]))@([\w-]+)', flags=re.I) TAG_RE = re.compile(r'(?:^|[^\w\\/])#([a-z][a-z0-9_.-]*)(?:\b|$)', flags=re.I) MD = Markdown(output_format='html5', extensions=[NixHeaderExtension(), 'nl2br', 'smart_strong']) class Team(models.Model): """A team of users in the organization.""" name = models.CharField(max_length=100, help_text='Name of the team') slug = models.SlugField(unique=True, max_length=100) class Meta: db_table = 'team' ordering = ('name', ) def __str__(self): return self.name def __repr__(self):
"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="ru" xmlns:epub="http://www.idpf.org/2007/ops"> <head><link href="epub.css" media="all" rel="stylesheet" type="text/css" /><title></title></head> <body> %s </body> </html> """ md = Markdown( output_format="xhtml1", extensions=['markdown.extensions.smarty'], extension_configs={ 'markdown.extensions.smarty': { 'substitutions': { 'left-single-quote': '‚', # sb is not a typo! 'right-single-quote': '‘', 'left-double-quote': '«', 'right-double-quote': '»' } } }) for chapter in CHAPTERS: bodyhtml = md.reset().convert(open(chapter, 'r').read().decode('utf-8')) xhtml = TEMPLATE % bodyhtml filename = os.path.splitext(os.path.basename(chapter))[0] + '.xhtml' filepath = os.path.join(EPUB_PATH, filename) with open(filepath, 'w') as f:
class FixHeadingsExtension(Extension): """ FixHeadingsExtension wordt gebruikt als extension voor markdown.Markdown zodat header tags veranderd worden '<h2>' -> '<div class="md_h2">' """ class FixHeadingsProcessor(Treeprocessor): """ FixHeadingsProcessor van Daan, zie FixHeadingsExtension voor het gebruik van de class. """ def run(self, root): for elem in (elem for elem in root if elem.tag in ('h1', 'h2', 'h3', 'h4', 'h5', 'h6')): # deze for-loop itereert over alleen de tags die in het lijstje # staan, door te itereren over de generator met de if-tag tag, elem.tag = elem.tag, 'div' if 'class' in elem.attrib: elem.attrib['class'] = '%s md_%%s' % (elem.attrib['class'], tag) else: elem.attrib['class'] = 'md_%s' % tag return root def extendMarkdown(self, md, md_globals): md.treeprocessors.add('fixheading', FixHeadingsExtension.FixHeadingsProcessor(md), '_end') parser = Markdown(extensions=[FixHeadingsExtension()], safe_mode="escape") # vim: et:sta:bs=2:sw=4:
from markdown import Markdown markdown_converter = Markdown( extensions=[ # Extensions 'abbr', 'attr_list', 'def_list', 'fenced_code', 'tables', 'smart_strong', # Others 'codehilite', 'nl2br' ], safe_mode="escape")
def __init__(self): self.md = Markdown(extensions=['meta'])
def generate_posts(destination): """Generate dynamic blog posts. """ # Check if the posts folder exists. if not os.path.exists("posts"): print("Unable to find posts folder..." + red("abort")) return posts = [] # Load template engine. env = Environment() # Load the template files, base and post. env.loader = FileSystemLoader("template") for post in os.listdir("posts"): orig = os.path.join("posts", post) if os.path.isdir(orig): print("Entry \"{0}\" is a directory, {1}".format( post, yellow("skip"))) continue print("Processing \"{0}\"...".format(orig)), # Read the raw content of the blog post. raw = open(orig, "r").read() # Split headers and content, they're separated by the first # empty line. headers, content = raw.split("\n\n", 1) # Load YAML headers. headers = yaml.load(headers) # Initialize Markdown processor. md = Markdown() # Add the source code pre-processor. md.preprocessors.add("sourcecode", CodeBlockPreprocessor(), "_begin") # Generate the HTML conversion of the original Markdown content. content = md.convert(content) print(green("done")) # If the user specified a date use it, otherwise generate it. if headers.has_key("Date"): date = headers["Date"] else: date = time.strftime("%Y-%m-%d %H:%M:%S") # Generate post descriptor. post_object = dict(date=date, title=headers["Title"], slug=headers["Slug"], author=headers["Author"], content=content, link=None) # This is where we're going to generate the final HTML blog post. file_name = "{0}-{1}.html".format( str(post_object["date"])[:10], post_object["slug"]) dest = os.path.join(destination, file_name) # If the blog post already exists, delete it. if not os.path.exists(dest): print("Generating HTML blog post at \"{0}\"...".format(dest)), # Load basic blog post template. template = env.get_template("post.html") # Generate the HTML content. html = template.render(**post_object) # Create the HTML file. with open(dest, "w") as handle: handle.write(html) print(green("done")) else: print("Post already exists, delete manually if needed... " + yellow("skip")) # Add the new file name to the post object. post_object["link"] = file_name # Add the generated post to the overall list. posts.append(post_object) # Order blog posts from recent to older. posts.sort(key=lambda key: key["date"]) posts.reverse() return posts
class Reader(BaseReader): name = 'Markdown' enabled = bool(Markdown) initial = """ title: Title here slug: title-here tags: world big bang sheldon published: no image: not specified Content here... """ def convertRSTmetaToMD(self): token = re.compile(r":(\w+:)") # http://stackoverflow.com/questions/2212933/python-regex-for-reading-csv-like-rows # NOPEP8 comma = re.compile(r''' \s* # Any whitespace. ( # Start capturing here. [^,"']+? # Either a series of non-comma non-quote chars. | # OR "(?: # A double-quote followed by a string of chars. [^"\\]|\\. # That are either non-quotes or escaped... )* # ...repeated any number of times. " # Followed by a closing double-quote. | # OR '(?:[^'\\]|\\.)*'# Same as above, for single quotes. ) # Done capturing. \s* # Allow arbitrary space before the comma. (?:,|$) # Followed by a comma or the end of a string. ''', re.VERBOSE) pass1 = [] pass2 = [] # First, replace ":token:" with "token:" for line in self.source.split('\r\n'): if not line or line.isspace(): break pass1.append(token.sub(r'\1', line)) # Assume it's properly formatted markdown if pass1 == self.source.split('\r\n')[:len(pass1)]: return # Next, split up comma-seperated tags into newlines + indents for line in pass1: csv = comma.sub('\\1\n ', line).strip() if csv != line: pass2.append(csv) else: pass2.append(line) # First line is a title pass2[0] = "title: " + pass2[0] # Every other line until the point where they're all the same char must # be the title line too, so we indent those. for i in xrange(1, len(pass2)): if pass2[i] and pass2[i] == len(pass2[i]) * pass2[i][0]: pass2 = pass2[:i] + pass2[i+1:] break else: pass2[i] = " " + pass2[i] # Now, reconstruct the source: use our new tags + the remainder of the # source (+1 for the sake of the line of #'s). # Put the \r back because that's how django gave it to us, even though # it's stupid. self.source = "\r\n".join( pass2 + self.source.split("\r\n")[len(pass2)+1:]) def _parse_metadata(self, meta): out = meta for key in out.iterkeys(): if len(out[key]) == 1: out[key] = "".join(out[key]) if key == "published": out[key] = out[key] == "yes" if key == "tags": out[key] = list(set(out[key])) return out def read(self): """Parse content and metadata of markdown files""" self.convertRSTmetaToMD() self._md = Markdown(extensions=['meta', 'codehilite(linenums=True)']) content = self._md.convert(self.source) metadata = self._parse_metadata(self._md.Meta) return content, metadata
import os from markdown import Markdown from mako.template import Template here = os.path.dirname(os.path.abspath(__file__)) markdown = Markdown( extensions=['headerid', 'meta', 'extra', 'codehilite', 'toc'], extension_configs={ 'codehilite': [ ('linenums', False), ('guess_lang', False), ], 'toc': [ ('title', 'Table of Contents'), ('permalink', True), ] } ) template = Template(filename=os.path.join(here, 'layout.mako'), output_encoding='utf-8') with open(os.path.join(here, 'index.md')) as f: source = f.read().decode('utf-8') body = markdown.convert(source) meta = markdown.Meta with open(os.path.join(here, 'www', 'index.html'), 'w') as f: f.write(template.render(body=body, meta=meta))
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._md = Markdown(**self.settings['MARKDOWN'])
# This is the entire configuration of the Markdown parser _md_parser = Markdown( output_format="html5", # type: ignore tab_length=2, extensions=[ "markdown.extensions.abbr", "markdown.extensions.fenced_code", "markdown.extensions.footnotes", "markdown.extensions.tables", "markdown.extensions.codehilite", "markdown.extensions.smarty", "markdown.extensions.toc", "mdx_math", YAMLMetadataExtension(), ], extension_configs={ "markdown.extensions.footnotes": { "UNIQUE_IDS": True, # https://github.com/jekyll/jekyll/issues/3751#issue-83081590 "BACKLINK_TEXT": "↩︎", }, "markdown.extensions.codehilite": { "use_pygments": True, "guess_lang": False }, "mdx_math": { "enable_dollar_delimiter": True }, }, )
from hashlib import sha1 from yweb.orm import ORMBase from sqlalchemy import Column, Integer, String, \ Sequence, DateTime, Table, ForeignKey, Boolean, Text from sqlalchemy.orm import relationship, backref from yweb.utils.base import makesure_path_exist import settings from settings import runtime_data from app.site.utils import get_site_config from markdown import Markdown YMK = Markdown(extensions=['fenced_code', 'codehilite', 'tables']) forum_catalog__manager = Table('forum_catalog__manager', ORMBase.metadata, Column('id', Integer, Sequence('forum_catalog__manager_id_seq'), primary_key=True), Column('catalog_id', Integer, ForeignKey('forum_catalog.id')), Column('user_id', Integer, ForeignKey('auth_user.id')) ) forum_catalog__allowed = Table('forum_catalog__allowed', ORMBase.metadata, Column('id', Integer, Sequence('forum_catalog__allowed_id_seq'), primary_key=True), Column('catalog_id', Integer, ForeignKey('forum_catalog.id')), Column('user_id', Integer, ForeignKey('auth_user.id')) ) forum_topic__tag = Table('forum_topic__tag', ORMBase.metadata,
def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None: """ Add RelativeLinksHelpExtension to the Markdown instance. """ md.registerExtension(self) md.preprocessors.add('help_relative_links', RelativeLinks(), '_begin')
def markdown(markdown_body): return mark_safe(Markdown().convert(markdown_body))
def extendMarkdown(self, md: Markdown) -> None: """ Add SettingHelpExtension to the Markdown instance. """ md.registerExtension(self) md.preprocessors.register(EmoticonTranslation(), "emoticon_translations", -505)
class YAMLMetadataReader(MarkdownReader): """Reader for Markdown files with YAML metadata""" enabled = ENABLED def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Don't use the default markdown metadata extension for parsing # Leave self.settings alone in case we have to fall back to normal markdown parsing self._md_settings = copy.deepcopy(self.settings["MARKDOWN"]) with contextlib.suppress(KeyError, ValueError): self._md_settings["extensions"].remove("markdown.extensions.meta") def read(self, source_path): """Parse content and YAML metadata of markdown files""" self._source_path = source_path self._md = Markdown(**self._md_settings) with pelican_open(source_path) as text: m = HEADER_RE.fullmatch(text) if not m: logger.info( "No YAML metadata header found in '%s' - falling back to markdown metadata parsing", source_path) return super().read(source_path) return (self._md.convert(m.group("content")), self._load_yaml_metadata(m.group("metadata"))) def _load_yaml_metadata(self, text): """Load Pelican metadata from the specified text""" try: metadata = yaml.safe_load(text) if not isinstance(metadata, dict): logger.error( "YAML header didn't parse as a dict for file '%s'", self._source_path) logger.debug("YAML data: %r", metadata) return {} except Exception as e: logger.error("Error parsing YAML for file '%s': %s: %s", self._source_path, type(e).__name__, e) return {} return self._parse_yaml_metadata(metadata) def _parse_yaml_metadata(self, meta): """Parse YAML-provided data into Pelican metadata Based on MarkdownReader._parse_metadata. """ output = {} for name, value in meta.items(): name = name.lower() is_list = isinstance(value, list) if name in self.settings['FORMATTED_FIELDS']: # join mutliple formatted fields before parsing them as markdown self._md.reset() value = self._md.convert( "\n".join(value) if is_list else str(value)) elif is_list and len(value) > 1 and name == "author": # special case: upconvert multiple "author" values to "authors" name = "authors" elif is_list and name in DUPES_NOT_ALLOWED: if len(value) > 1: logger.warning( "Duplicate definition of `%s` for %s (%s). Using first one.", name, self._source_path, value) value = value[0] # Need to do our own metadata processing as YAML loads data in a # different way than the markdown metadata extension. if name in YAML_METADATA_PROCESSORS: value = YAML_METADATA_PROCESSORS[name](value, self.settings) if value is not _DEL: output[name] = value return output
class YAMLMetadataReader(MarkdownReader): """Reader for Markdown files with YAML metadata""" enabled = bool(Markdown) and bool(yaml) def __init__(self, *args, **kwargs): super(YAMLMetadataReader, self).__init__(*args, **kwargs) # Remove the default markdown metadata extension try: self.settings['MARKDOWN']['extensions'].remove( 'markdown.extensions.meta') except ValueError: logger.warning("'markdown.extensions.meta' extension not enabled. " "Did something change in MarkdownReader.__init__?") def read(self, source_path): self._source_path = source_path self._md = Markdown(**self.settings['MARKDOWN']) with pelican_open(source_path) as text: content, metadata = self._process_content(text.strip()) return self._md.convert(content), self._parse_metadata(metadata) def _process_content(self, text): """Split the YAML metadata from the content and load it into a dict Returns a (content_text, metadata_dict) tuple """ if not text or not text.startswith("---\n"): logger.debug("No YAML header found in file {0}" "".format(self._source_path)) return text, {} # Find end of YAML block lines = text.split("\n")[1:] for line_num, line in enumerate(lines): if line == "---" or line == "...": break # Load YAML try: data = yaml.load("\n".join(lines[:line_num]), YamlLoader) if not isinstance(data, dict): logger.warning("YAML header wasn't a dict for file {0}" "".format(self._source_path)) logger.debug("YAML data: {0}".format(data)) data = {} except yaml.parser.ParserError as e: logger.error("Error parsing YAML for file {0}: {1}" "".format(self._source_path, e)) data = {} return "\n".join(lines[line_num + 1:]), data def _to_list(self, obj): """Make sure to always return a list""" return [obj] if isinstance(obj, six.text_type) else obj def _parse_metadata(self, meta): """Parse and sanitize metadata""" _DEL = object() # Used as a sentinel FCNS = { 'tags': lambda x, y: [Tag(t, y) for t in self._to_list(x)] or _DEL, 'date': lambda x, y: get_date(x) if x else _DEL, 'modified': lambda x, y: get_date(x) if x else _DEL, 'category': lambda x, y: Category(x, y) if x else _DEL, 'author': lambda x, y: Author(x, y) if x else _DEL, 'authors': lambda x, y: [Author(a, y) for a in self._to_list(x)] or _DEL, 'default': lambda x, y: x } out = {} for k, v in meta.items(): k = k.lower() if k in self.settings['FORMATTED_FIELDS']: self._md.reset() temp = self._md.convert("\n".join(self._to_list(v))) else: temp = FCNS.get(k, FCNS["default"])(v, self.settings) if temp is not _DEL: out[k] = temp return out
def _get_markdown(project): extensions = _make_extensions_list(project=project) md = Markdown(extensions=extensions) md.extracted_data = {"mentions": [], "references": []} return md
class WebSpoiler(Pattern): def __init__(self): Pattern.__init__(self, r'(%{2})(?P<contents>.+?)\2') def handleMatch(self, m): el_cont = etree.Element('span') el_cont.set('class', 'spoiler-container') el = etree.Element('span') el.set('class', 'spoiler') el.text = m.group('contents') el_cont.append(el) return el_cont # создаем собственный класс уникальных сносок unique_footnotes = UniqueFootnoteExtension() md = Markdown(extensions=['nl2br',unique_footnotes,'codehilite(guess_lang=False)', 'toc'], safe_mode='escape') md.preprocessors.add('cbacktick', CodeBacktick(md), '_begin') md.preprocessors.add('sharp', SharpHeader(md), '>cbacktick') md.preprocessors.add('quoteblock', QuoteBlock(md), '>sharp') md.preprocessors.add('urlcolons', UrlColons(md), '>quoteblock') md.inlinePatterns.add('url', UrlPattern(), '>automail') md.inlinePatterns.add('user', UserLinkPattern(), '>url') md.inlinePatterns.add('post', PostLinkPattern(), '>user') md.inlinePatterns.add('comment', CommentLinkPattern(), '>post') md.inlinePatterns.add('strike', StrikePattern(), '>comment') md.inlinePatterns.add('spoiler', WebSpoiler(), '>strike') md.ESCAPED_CHARS.append('%') # replace native LinkPattern md.inlinePatterns['link'] = ColonLinkPattern(LINK_RE, md)
from django.template import loader from markdown import Markdown logger = logging.getLogger(__name__) ROOT = "metakuna.me/" TEMPLATES = "templates/" # these templates will just be inserted straight into master JUST_WRAP = "templates/just-wrap/" STATIC = "static/" POSTS = "posts/" TEMP = "temp/" titles = {} md = Markdown(extensions=["markdown_markup_emoji.markup_emoji"]) def build_post(dir_ext): # make the static page try: dir_name = f"{POSTS}{dir_ext}" copy_tree(dir_name, f"{TEMP}{dir_name}") with open(f"{dir_name}/post.md", "r") as f: html = md.convert(f.read()) filename = f"{TEMP}{dir_name}/post.html" os.makedirs(os.path.dirname(filename), exist_ok=True) with open(filename, "w") as f: print(html, file=f) with open(f"{dir_name}/meta.json", "r") as metadata_file:
def __init__(self, *args, **kwargs): self.vars = None self.content = None self.file_content = None self.file_type = kwargs.get('file_type', None) if self.file_type is not None: if self.file_type not in ['yaml', 'markdown']: raise ValueError("Invalid file type for content loadable: %s" % self.file_type) self.markdown_config = kwargs.get('markdown_config', None) self.encoding = "utf-8" if len(args) > 0: self.filename = args[0] if 'encoding' in kwargs: self.encoding = kwargs['encoding'] if self.file_type is None: ext = list(os.path.splitext(self.filename))[1].lstrip('.') if ext not in ['yml', 'yaml', 'md', 'markdown']: raise ValueError("File is not a YAML or Markdown-formatted file") self.file_type = 'yaml' if (ext in ['yml', 'yaml']) else 'markdown' with open(self.filename, mode='rt', encoding=self.encoding) as f: self.file_content = f.read() elif 'from_string' in kwargs: self.filename = None self.file_content = kwargs['from_string'] elif 'from_dict' in kwargs: self.filename = None self.vars = kwargs['from_dict'] else: raise MissingParameterError("One or more missing arguments for constructor") if 'name' in kwargs: self.name = kwargs['name'] elif self.filename is not None: self.name = extract_filename(self.filename) else: raise MissingParameterError("Missing \"name\" argument for content loadable instance") # if it wasn't loaded from a dictionary if self.vars is None: if self.file_type is None: raise MissingParameterError("Missing file type parameter for content loadable") # if it's a YAML file if self.file_type == 'yaml': self.vars = yaml.load(self.file_content) if len(self.file_content) else {} if not isinstance(self.vars, dict): self.vars = {} else: markdown_ext = [ MarkdownYamlMetaExtension(), MarkdownLoremIpsumExtension() ] if self.markdown_config.enable_permalinks: markdown_ext.append( MarkdownPermalinkExtension( permalink_text=self.markdown_config.permalink_text, permalink_class=self.markdown_config.permalink_class, permalink_title=self.markdown_config.permalink_title, ) ) markdown_ext.extend([ 'markdown.extensions.fenced_code', 'markdown.extensions.tables', 'markdown.extensions.toc', 'markdown.extensions.footnotes' ]) md = Markdown(extensions=markdown_ext) self.content = md.convert(self.file_content) self.vars = md.meta if isinstance(self.vars, dict): self.vars = dict_strip(self.vars)
def irc_md(): """This makes a converter from markdown to mirc color format. """ md = Markdown(output_format='irc', extensions=[ExtraExtension(), AnsiExtension()]) md.stripTopLevelTags = False return md
def __init__(self, extensions, extension_configs): """Create a Markdown instance.""" self.markdown = Markdown(extensions=extensions, extension_configs=extension_configs, output_format="html5")
def extendMarkdown(self, md: Markdown) -> None: """ Add SettingHelpExtension to the Markdown instance. """ md.registerExtension(self) md.preprocessors.register(Setting(), 'setting', 515)
from django.views.decorators.http import require_GET from markdown import Markdown from blog.models import Blog from endportal import utils from logs.models import Log markdown = Markdown( extensions=[ 'markdown.extensions.extra', 'markdown.extensions.toc', 'markdown.extensions.codehilite', 'arithmatex' ], extension_configs={ # Enable line numbers. 'markdown.extensions.codehilite': { 'linenums': True }, # Enable generic mode for katex, disable smart dollar because it breaks inline math. 'arithmatex': { 'generic': True, 'smart_dollar': False } }, # They said that this can help to improve Chinese character issues. slugify=slugify) def get_universal_context(path, sub_dir): """ Fetch context components which are available in all kinds of blog pages. Including major categories, tags, recent articles, subdirectories and split access path. :param path: Access path string.