<span class="helptext">{{ field.help_text|safe }}</span></td> </tr> ''') pat_url = re.compile(r'(?<!\S)(https?://[^\s\'\"\]\)]+)', re.I) pat_url_www = re.compile(r'(?<!\S)(www\.[-a-z]+\.[-.a-z]+)', re.I) pat_comment = re.compile(r'<!--.*?-->', re.S) pat_header = re.compile(r'<(/?)h\d>', re.S) pat_img = re.compile(r'<img[^>]*>', re.S) pat_readmore = [ re.compile(r'(.*?)<!-- ?more ?-->', re.I | re.S), re.compile(r'<!-- ?begin synopsis ?-->(.+?)<!-- ?end synopsis ?-->', re.I | re.S), ] markdown_safe = markdown.Markdown(safe_mode='escape') markdown_unsafe = markdown.Markdown() @register.filter def as_timezone(ts, zone): if not isinstance(ts, datetime.datetime): return ts return ts.replace(tzinfo=pytz.UTC).astimezone(pytz.timezone(zone)) as_timezone.is_safe = True @register.filter def as_tr(field):
def __init__(self, environment): super(MarkdownExtension, self).__init__(environment) environment.extend(markdowner=markdown.Markdown( extensions=['codehilite']))
class MarkdownExtension(JExtension): options = {} file_extensions = '.md' def preprocess(self, source, name, filename=None): if (not name or (name and not os.path.splitext(name)[1] in self.file_extensions)): return source return html(source) # Markdown mkd = markdown.Markdown(extensions=[ 'markdown.extensions.nl2br', 'markdown.extensions.sane_lists', 'markdown.extensions.toc', 'markdown.extensions.tables' ]) def convert(text): """ Convert MD text to HTML :param text: :return: """ html = mkd.convert(text) mkd.reset() return html def get_toc(text):
<tr> <td colspan="1" rowspan="1"> <p>normal</p> </td> <td colspan="1" rowspan="1"> <p>cell</p> </td> </tr> <tr> <td colspan="1" rowspan="1"> <p>multi line</p> <p>cells too</p> </td> <td colspan="2" rowspan="1"> <p>cells can be <em>formatted</em> <strong>paragraphs</strong></p> </td> </tr> </tbody> </table>""" md = markdown.Markdown(extensions=['grid_tables']) result = md.convert(markdown_table) #print result #with open('test_grid_tables.html', 'w') as f: # f.write(result + '<style>table, tr, td, th {border: 1px solid black;}</style>') assert result == html_table
def configure_markdown(): return markdown.Markdown(extensions=[ 'extra', 'meta', 'sane_lists', 'toc', 'pymdownx.magiclink', 'pymdownx.betterem', 'pymdownx.tilde', 'pymdownx.emoji', 'pymdownx.tasklist', 'pymdownx.superfences' ])
del md.inlinePatterns["strong_em"] del md.parser.blockprocessors[ "code"] # `code` is an indented code block, WW only supports 'fenced' code blocks (using ```\nbackticks\n```) del md.parser.blockprocessors["hashheader"] del md.parser.blockprocessors["hr"] del md.parser.blockprocessors["indent"] del md.parser.blockprocessors["olist"] del md.parser.blockprocessors["quote"] del md.parser.blockprocessors["setextheader"] del md.parser.blockprocessors["ulist"] markdownRenderer = markdown.Markdown(extensions=[ WWExtension(), "markdown.extensions.fenced_code", "markdown.extensions.nl2br" ], output_format="html5") def _jinja_filter_md(val: str): content = None try: content = markdownRenderer.reset().convert(val) # ensure only expected tags are output, convert bare URLs (with allowed protocols) to links content = cleaner.clean(content) except NameError: logger.exception( "Problem with markdown conversion - using message as plaintext for some message content" ) content = val
def convert_to_html(queue): md = markdown.Markdown() while True: print(md.convert(queue.get()))
# web stuff and markdown imports import markdown from flask.ext.sqlalchemy import SQLAlchemy from werkzeug.security import check_password_hash from flask import render_template, request, Flask, flash, redirect, url_for, \ abort, jsonify, Response, make_response app = Flask(__name__) app.config.from_object('settings') db = SQLAlchemy(app) _punct_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+') MARKDOWN_PARSER = markdown.Markdown(extensions=['fenced_code'], output_format="html5", safe_mode=True) class Post(db.Model): def __init__(self, title=None, created_at=None): if title: self.title = title self.slug = slugify(title) if created_at: self.created_at = created_at self.updated_at = created_at __tablename__ = "posts" id = db.Column(db.Integer, primary_key=True) title = db.Column(db.String())
'h4', 'h5', 'h6', 'hr', 'img', 'i', 'li', 'ol', 'pre', 'p', 'strike', 'strong', 'sup', 'sub', 'ul', ], attributes={ "a": ("href", "name", "title", "id", "rel"), "img": ("src", "width", "height", "alt"), }, strip=True, protocols=['http', 'https', 'mailto', 'ftp', 'tel'], ) autolink = AutoLinkExtension() md = markdown.Markdown(extensions=['nl2br', autolink]) @register.filter(is_safe=True) def safe_markdown(value, arg=''): return mark_safe(cleaner.clean(md.reset().convert(value)))
#!/usr/bin/env python import sys import markdown md = markdown.Markdown( extensions=['extra', 'codehilite', 'toc', 'wikilinks', 'latex', 'github'] ) mdfile = open(sys.argv[1], 'r') mdtext = mdfile.read() mdfile.close() sys.stdout.write(md.convert(mdtext)) sys.stdout.write('\n');
def post_comment_old(request, post_pk): # 先获取被评论的文章,因为后面需要把评论和被评论的文章关联起来。 # 这里我们使用了 Django 提供的一个快捷函数 get_object_or_404, # 这个函数的作用是当获取的文章(Post)存在时,则获取;否则返回 404 页面给用户。 post = get_object_or_404(Post, pk=post_pk) if post.status == '1': raise Http404("post does not exist") md = markdown.Markdown(extensions=[ 'markdown.extensions.extra', 'markdown.extensions.codehilite', 'markdown.extensions.toc', ]) post.body = md.convert(post.body) post.toc = md.toc # post.body = markdown.markdown(post.body, # extensions=[ # 'markdown.extensions.extra', # 'markdown.extensions.codehilite', # 'markdown.extensions.toc', # ]) # HTTP 请求有 get 和 post 两种,一般用户通过表单提交数据都是通过 post 请求, # 因此只有当用户的请求为 post 时才需要处理表单数据。 if request.method == 'POST': # 用户提交的数据存在 request.POST 中,这是一个类字典对象。 # 我们利用这些数据构造了 CommentForm 的实例,这样 Django 的表单就生成了。 form = CommentForm(request.POST) # 当调用 form.is_valid() 方法时,Django 自动帮我们检查表单的数据是否符合格式要求。 if form.is_valid(): # 检查到数据是合法的,调用表单的 save 方法保存数据到数据库, # commit=False 的作用是仅仅利用表单的数据生成 Comment 模型类的实例,但还不保存评论数据到数据库。 comment = form.save(commit=False) # 将评论和被评论的文章关联起来。 comment.post = post # 最终将评论数据保存进数据库,调用模型实例的 save 方法 comment.save() # 重定向到 post 的详情页,实际上当 redirect 函数接收一个模型的实例时,它会调用这个模型实例的 get_absolute_url 方法, # 然后重定向到 get_absolute_url 方法返回的 URL。 return redirect(post) else: # 检查到数据不合法,重新渲染详情页,并且渲染表单的错误。 # 因此我们传了三个模板变量给 detail.html, # 一个是文章(Post),一个是评论列表,一个是表单 form # 注意这里我们用到了 post.comment_set.all() 方法, # 这个用法有点类似于 Post.objects.all() # 其作用是获取这篇 post 下的的全部评论, # 因为 Post 和 Comment 是 ForeignKey 关联的, # 因此使用 post.comment_set.all() 反向查询全部评论。 # 具体请看下面的讲解。 comment_list = post.comment_set.all() has_prev = False has_next = False id_active = post.id prev_post = Post.objects.filter( status='0', id__lt=id_active).order_by('-id').first() if prev_post: has_prev = True next_post = Post.objects.filter( status='0', id__gt=id_active).order_by('id').first() if next_post: has_next = True context = { 'post': post, 'form': form, 'comment_list': comment_list, 'has_prev': has_prev, 'has_next': has_next, 'prev_post': prev_post, 'next_post': next_post, } return render(request, 'blog/detail.html', context=context) # 不是 post 请求,说明用户没有提交数据,重定向到文章详情页。 return redirect(post)
def markdown_to_toc(self, markdown_source): markdown_source = toc.pre_process(markdown_source) md = markdown.Markdown(extensions=['toc']) html_output = md.convert(markdown_source) html_output, toc_output = toc.post_process(html_output) return toc.TableOfContents(toc_output)
def filter_prefix(ctx, link): """ Prepend level-times "../" to the given string. Used to go up in the directory hierarchy. Yes, one could also do absolute paths, but then it is harder to debug locally! """ level = ctx.get("level", 0) if level == 0: return link path = ['..'] * level path.append(link) return '/'.join(path) md = markdown.Markdown() @j2.evalcontextfilter def filter_markdown(eval_ctx, text): if eval_ctx.autoescape: return md.convert(j2.escape(text)) return md.convert(text) def render(): if not exists("www"): os.mkdir("www") log("config: {} version {} @ {}, {} mirrors and {} spkgs".format( config["sage"], config["version"], config["releasedate"], len(mirrors),
Utility functions for gramex-guide ''' import cachetools import gramex import hashlib import markdown import re import time import yaml md = markdown.Markdown(extensions=[ 'markdown.extensions.extra', 'markdown.extensions.meta', 'markdown.extensions.codehilite', 'markdown.extensions.smarty', 'markdown.extensions.sane_lists', 'markdown.extensions.fenced_code', 'markdown.extensions.toc', ], output_format='html5') # Create a cache for guide markdown content md_cache = cachetools.LRUCache(maxsize=5000000, getsizeof=len) def markdown_template(content, handler): # Cache the markdown contents locally, to avoid Markdown re-conversion hash = hashlib.md5(content.encode('utf-8')).hexdigest() if hash not in md_cache: md_cache[hash] = { 'content': md.convert(content), 'meta': md.Meta
class KnowlTagPatternWithTitle(markdown.inlinepatterns.Pattern): def handleMatch(self, m): tokens = m.group(2).split("|") kid = tokens[0].strip() if len(tokens) > 1: tit = ''.join(tokens[1:]) return "{{ KNOWL('%s', title='%s') }}" % (kid, tit.strip()) return "{{ KNOWL('%s') }}" % kid # Initialise the markdown converter, sending a wikilink [[topic]] to the L-functions wiki md = markdown.Markdown(extensions=['wikilinks'], extension_configs={ 'wikilinks': [('base_url', 'http://wiki.l-functions.org/')] }) # Prevent $..$, $$..$$, \(..\), \[..\] blocks from being processed by Markdown md.inlinePatterns.add('mathjax$', IgnorePattern(r'(?<![\\\$])(\$[^\$].*?\$)'), '<escape') md.inlinePatterns.add('mathjax$$', IgnorePattern(r'(?<![\\])(\$\$.+?\$\$)'), '<escape') md.inlinePatterns.add('mathjax\\(', IgnorePattern(r'(\\\(.+?\\\))'), '<escape') md.inlinePatterns.add('mathjax\\[', IgnorePattern(r'(\\\[.+?\\\])'), '<escape') # Tell markdown to turn hashtags into search urls hashtag_keywords_rex = r'#([a-zA-Z][a-zA-Z0-9-_]{1,})\b' md.inlinePatterns.add('hashtag', HashTagPattern(hashtag_keywords_rex), '<escape')
def md_factory(allow_links=True, allow_images=True, allow_blocks=True): """creates and configures markdown object""" md = markdown.Markdown(extensions=[ 'markdown.extensions.nl2br', ]) # Remove HTML allowances del md.preprocessors['html_block'] del md.inlinePatterns['html'] # Remove references del md.preprocessors['reference'] del md.inlinePatterns['reference'] del md.inlinePatterns['image_reference'] del md.inlinePatterns['short_reference'] # Add [b], [i], [u] md.inlinePatterns.add('bb_b', inline.bold, '<strong') md.inlinePatterns.add('bb_i', inline.italics, '<emphasis') md.inlinePatterns.add('bb_u', inline.underline, '<emphasis2') # Add ~~deleted~~ striketrough_md = StriketroughExtension() striketrough_md.extendMarkdown(md) if allow_links: # Add [url] md.inlinePatterns.add('bb_url', inline.url(md), '<link') else: # Remove links del md.inlinePatterns['link'] del md.inlinePatterns['autolink'] del md.inlinePatterns['automail'] if allow_images: # Add [img] md.inlinePatterns.add('bb_img', inline.image(md), '<image_link') short_images_md = ShortImagesExtension() short_images_md.extendMarkdown(md) else: # Remove images del md.inlinePatterns['image_link'] if allow_blocks: # Add [hr] and [quote] blocks md.parser.blockprocessors.add('bb_hr', blocks.BBCodeHRProcessor(md.parser), '>hr') fenced_code = FencedCodeExtension() fenced_code.extendMarkdown(md, None) code_bbcode = blocks.CodeBlockExtension() code_bbcode.extendMarkdown(md) quote_bbcode = blocks.QuoteExtension() quote_bbcode.extendMarkdown(md) else: # Remove blocks del md.parser.blockprocessors['hashheader'] del md.parser.blockprocessors['setextheader'] del md.parser.blockprocessors['code'] del md.parser.blockprocessors['quote'] del md.parser.blockprocessors['hr'] del md.parser.blockprocessors['olist'] del md.parser.blockprocessors['ulist'] return pipeline.extend_markdown(md)
def main(): FORCE = sys.argv[1] # noqa - not supported SYNTAX = sys.argv[2] EXTENSION = sys.argv[3] # noqa - not supported OUTPUT_DIR = sys.argv[4] INPUT_FILE = sys.argv[5] CSS_FILE = sys.argv[6] # noqa - not supported TEMPLATE_PATH = get(sys.argv, 7, os.getenv("VIMWIKI_TEMPLATE_PATH", "")) TEMPLATE_DEFAULT = get(sys.argv, 8, os.getenv("VIMWIKI_TEMPLATE_DEFAULT", "")) TEMPLATE_EXT = get(sys.argv, 9, os.getenv("VIMWIKI_TEMPLATE_EXT", "")) ROOT_PATH = get(sys.argv, 10, os.getenv("VIMWIKI_ROOT_PATH", os.getcwd())) # Only markdown is supported if SYNTAX != "markdown": sys.stderr.write("Unsupported syntax: " + SYNTAX) sys.exit(1) # Asign template template = default_template template_file = (os.path.join(TEMPLATE_PATH, TEMPLATE_DEFAULT) + TEMPLATE_EXT) if os.path.isfile(template_file): with open(template_file, "rb") as f: template = f.read().decode() # Get output filename filename, _ = os.path.splitext(os.path.basename(INPUT_FILE)) output_file = os.path.join(OUTPUT_DIR, filename + ".html") extensions = ["fenced_code", "tables"] extensions += os.getenv("VIMWIKI_MARKDOWN_EXTENSIONS", "").split(",") extensions = set([e for e in extensions if e] + [CodeHiliteExtension()]) # Setup markdown parser md = markdown.Markdown(extensions=extensions) md.inlinePatterns.deregister("link") md.inlinePatterns.register( LinkInlineProcessor(markdown.inlinepatterns.LINK_RE, md), "link", 160) with open(INPUT_FILE, "rb") as f: content = "" placeholders = {} # Retrieve vimwiki placeholders for line in f: line = line.decode()[:-1] if line.startswith("%nohtml"): sys.exit(0) elif line.startswith("%title"): placeholders["%title%"] = line[7:] elif line.startswith("%date"): placeholders["%date%"] = line[6:] elif line.startswith("%template"): placeholders["template"] = line[10:] else: content += line + "\n" # Set default values if "%title%" not in placeholders: placeholders["%title%"] = filename if "%date%" not in placeholders: placeholders["%date%"] = datetime.datetime.today().strftime( "%Y-%m-%d") if "template" in placeholders: t = placeholders.pop("template") template_file = os.path.join(TEMPLATE_PATH, t) + TEMPLATE_EXT if os.path.isfile(template_file): with open(template_file, "rb") as f: template = f.read().decode() # Parse template for placeholder, value in placeholders.items(): template = template.replace(placeholder, value) # use blank insted of os.getcwd() because - mean in root directory that contain css template = template.replace("%root_path%", ROOT_PATH if ROOT_PATH != "-" else "") # Parse content content = md.convert(content) # Merge template template = template.replace("%content%", content) with open(output_file, "wb") as o: o.write(template.encode())
import markdown import mkdcomments comments = mkdcomments.CommentsExtension() markdowner = markdown.Markdown(extensions=[comments]) test = markdowner.convert("""\ markdowntext1 <!---inline comment--> <!---this line is ommitted entirely--> markdowntext2 <!---multiline comment multiline comment multiline comment-->markdowntext3 <!---inline comment-->markdowntext4<!---inline comment--> <!---inline comment-->markdowntext5<!---multiline commment multiline comment--> <!---multiline comment multiline comment-->markdowntext6<!---unsupported comment--> <!---multiline comment multiline comment-->markdowntext7<!---multiline comment multiline comment--> """) """ Results: >>> import example >>> print example.test <p>markdowntext1</p> <p>markdowntext2</p> <p>markdowntext3</p>
''' extensions = { 'markdown.extensions.tables', 'pymdownx.inlinehilite', 'pymdownx.critic' } accept_extension_configs = { "pymdownx.highlight": { "css_class": "codehilite" }, 'pymdownx.critic': { 'mode': 'accept' } } with codecs.open('docs/src/markdown/_snippets/critic-accept-example.md', 'w', encoding='utf-8') as f: html = markdown.Markdown( extensions=extensions, extension_configs=accept_extension_configs).convert( critic_markup_example) f.write(html.replace('\n', '')) with codecs.open('docs/src/markdown/_snippets/critic-preview-example.md', 'w', encoding='utf-8') as f: html = markdown.Markdown( extensions=extensions).convert(critic_markup_example) f.write(html.replace('\n', ''))
def showgraph(): if not 'query' in session: return abort(403) w = GProMWrapper() query = session['query'] action = session['action'] topk, sSize = '', '' if not 'topk' in session: pass else: topk = session['topk'] if not 'sSize' in session: pass else: sSize = session['sSize'] fPattern, recall, info = '', '', '' if not 'fPattern' in session: pass else: fPattern = session['fPattern'] if not 'recall' in session: pass else: recall = session['recall'] if not 'info' in session: pass else: info = session['info'] conv = Ansi2HTMLConverter() # generate a graph provQuest = query.find('WHY') summRequest = query.find('SUMMARIZED') lines = [] queryResult = '' gpromlog, dotlog, imagefile = '', '', '' if action == 'provgame' or action == 'provgraph' or action == 'provpolygraph' or action == 'triograph' or action == 'lingraph': if provQuest > 0: userdomQuery = '' summQuery = '' # if summRequest < 0: if recall != '' and info == '': summQuery += ' SCORE AS (' + recall + ' * recall)' elif recall == '' and info != '': summQuery += ' SCORE AS (' + info + ' * informativeness)' elif recall != '' and info != '': summQuery += ' SCORE AS (' + recall + ' * recall + ' + info + ' * informativeness)' # if topk != '': summQuery += ' TOP ' + topk if fPattern != '': summQuery += ' FOR FAILURE OF (' + fPattern + ')' if sSize != '': summQuery += ' SUMMARIZED BY LCA WITH SAMPLE(' + sSize + ').' # else: # score = query.find('SCORE') # if score > 0: # summQuery += ' ' + query[query.find('SCORE'):] # else: # top = query.find('TOP') # if top > 0: # summQuery += ' ' + query[query.find('TOP'):] # if action == 'provgraph' and topk != '' and sSize != '': # query = query[:query.find('))')] + ')) FORMAT REDUCED_GP. TOP ' + topk + ' SUMMARIZED BY LCA WITH SAMPLE(' + sSize + ').' if action == 'provgraph': #and topk == '' and sSize == '': userdom = query.find('_GP. USERDOMAIN OF') score = query.find('SCORE AS') top = query.find('TOP') if userdom > 0: if score != '': userdomQuery += query[userdom + 4:score - 1] if score == '' and top != '': userdomQuery += query[userdom + 4:top - 1] if score == '' and top == '': userdomQuery += query[userdom + 4:len(query) - 1] query = query[:query.find('))')] + ')) FORMAT REDUCED_GP.' # graphFormat = query.find('FORMAT') # if graphFormat < 1: # query = query[:-1] + ' FORMAT REDUCED_GP.' # if action == 'provpolygraph' and topk != '' and sSize != '': # query = query[:query.find('))')] + ')) FORMAT TUPLE_RULE_GOAL_TUPLE. TOP ' + topk + ' SUMMARIZED BY LCA WITH SAMPLE(' + sSize + ').' if action == 'provpolygraph': #and topk == '' and sSize == '': query = query[:query. find('))')] + ')) FORMAT TUPLE_RULE_GOAL_TUPLE.' # # # if action == 'triograph' and topk != '' and sSize != '': # query = query[:query.find('))')] + ')) FORMAT HEAD_RULE_EDB. TOP ' + topk + ' SUMMARIZED BY LCA WITH SAMPLE(' + sSize + ').' if action == 'triograph': #and topk == '' and sSize == '': query = query[:query.find('))' )] + ')) FORMAT TUPLE_RULE_TUPLE.' # graphFormat = query.find('FORMAT') # if graphFormat < 1: # query = query[:-1] + ' FORMAT TUPLE_RULE_TUPLE.' # if action == 'lingraph' and topk != '' and sSize != '': # query = query[:query.find('))')] + ')) FORMAT TUPLE_ONLY. TOP ' + topk + ' SUMMARIZED BY LCA WITH SAMPLE(' + sSize + ').' if action == 'lingraph': #and topk == '' and sSize == '': query = query[:query.find('))')] + ')) FORMAT TUPLE_ONLY.' # graphFormat = query.find('FORMAT') # if graphFormat < 1: # query = query[:-1] + ' FORMAT TUPLE_ONLY.' # if action == 'provgame' and topk != '' and sSize != '': # query = query[:query.find('))')] + ')). TOP ' + topk + ' SUMMARIZED BY LCA WITH SAMPLE(' + sSize + ').' if action == 'provgame': #and topk == '' and sSize == '': query = query[:query.find('))')] + ')).' query += userdomQuery query += summQuery queryhash = md5(query).hexdigest() imagefile = queryhash + '.svg' absImagepath = 'static/' + imagefile returncode, gpromlog, dotlog = w.generateProvGraph( query, absImagepath, 'tmp/pg.dot') gpromlog = conv.convert(gpromlog, full=False) dotlog = conv.convert(dotlog, full=False) # output query results (translate into html table) else: returncode, gpromlog = w.runDLQuery(query) queryResult = gpromlog gpromlog = conv.convert(gpromlog, full=False) if returncode == 0: lines = queryResult.split('\n') numAttr = lines[0].count('|') lines = [l for l in lines if not (not l or l.isspace())] lines = map(lambda x: '| ' + x + 'X', lines) if len(lines) > 1: lines[1] = '|' + (' -- | ' * numAttr) else: lines += ['|' + (' -- | ' * numAttr)] queryResult = '\n'.join(lines) md = markdown.Markdown(extensions=['tables']) queryResult = md.convert(queryResult) dotlog, imagefile = '', '' # input db results # returncode, dblog = w.runInputDB(query) dblog = '' inputDB = dblog dblog = conv.convert(dblog, full=False) rels = [] if returncode == 0: lines = inputDB.split('\n') # collect relation names for eachel in lines: if eachel.count('|') < 1 and eachel.count('-') < 1 and len( eachel) > 0: rels += [eachel] # # output relations # relpoint = 0 # spoint = len(rels) # for eachrel in rels: # # relation name # relDB = eachrel # # tuples # if relpoint < len(rels) - 1: # if len(rels) > 1: # relpoint = rels.index(eachrel) + 1 # nextrel = rels[relpoint] # spoint = lines.index(nextrel) + 1 # insline=lines[1:spoint] # else: # insline=lines[spoint:len(lines)] # numAttr=insline[0].count('|') # insline=[ l for l in insline if not(not l or l.isspace()) ] # insline=map(lambda x: '| ' + x + 'X', insline) # if len(insline) > 1: # insline[1] = '|' + (' -- | ' * numAttr) # else: # insline += ['|' + (' -- | ' * numAttr)] # insDB='\n'.join(insline) # md = markdown.Markdown(extensions=['tables']) # insDB = md.convert(insDB) ## reline=lines[0:lines.index(eachrel)] ## reline=[ l for l in reline if not(not l or l.isspace()) ] ## reline=map(lambda x: 'Database Relation ' + x, eachrel) ## relDB=''.join(reline) ## md = markdown.Markdown(reline[0], extensions=['markdown.extensions.smart_strong']) ## relDB = md.convert(relDB) ## insline=lines[1:10] ## numAttr=insline[0].count('|') ## insline=[ l for l in insline if not(not l or l.isspace()) ] ## insline=map(lambda x: '| ' + x + 'X', insline) ## if len(insline) > 1: ## insline[1] = '|' + (' -- | ' * numAttr) ## else: ## insline += ['|' + (' -- | ' * numAttr)] ## insDB='\n'.join(insline) ## md = markdown.Markdown(extensions=['tables']) ## insDB = md.convert(insDB) return render_template('queryresult.html', query=session['query'], gpromlog=gpromlog, dotlog=dotlog, imagefile=imagefile, returnedError=(returncode != 0), action=action, queryResult=queryResult, lines=lines, rels=rels, topk=topk, sSize=sSize, fPattern=fPattern, recall=recall, info=info)
} assert CONFIG['config']['schedule-format'] == 'pycon' URL = CONFIG['config']['schedule-url'] # Make pretty-print output a valid python string for UTC timezone object. def utc__repr__(self): return "pytz.utc" pytz.utc.__class__.__repr__ = utc__repr__ defaulttime = datetime.now(pytz.timezone( CONFIG['config']['schedule-timezone'])) convert = markdown.Markdown().convert def tolower(d): newd = {} for key, value in d.items(): if type(value) is dict: value = tolower(value) newd[key.lower()] = value return newd def parse_duration(s): bits = re.split('[^0-9]+', s) if len(bits) == 2: return timedelta(hours=int(bits[0]), minutes=int(bits[1]))
<title>Batch</title> <link rel="stylesheet" href="github-markdown.css"> <style> html { margin-left: auto; margin-right: auto; } </style> </head> <body> %s </body> </html> ''' with codecs.open(readmePath, mode="r", encoding="utf-8") as f: markdownSource = f.read() M = markdown.Markdown(extensions=[TocExtension(permalink=False)]) html = htmlTemplate % M.convert(markdownSource) htmlFile = codecs.open(htmlPath, mode="w", encoding="utf-8") htmlFile.write(html) htmlFile.close() #------------- # copy images #------------- import shutil imgsFolder = os.path.join(baseFolder, 'imgs') htmlImgsFolder = os.path.join(htmlFolder, 'imgs')
def recurse_over(ob, name, indent_level=0): ts = type_string(ob) if not ts in doc_these: return # stos what shouldn't be docced getting docced if indent_level > 0 and ts == "module": return # Stops it getting into the stdlib if name in not_these_names: return # Stops things we don't want getting docced indent = indent_level * indent_amount # Indents nicely ds_indent = indent + (indent_amount / 2) if indent_level > 0: print(indent_start % indent) argstr = "" if ts.endswith(("function", "method")): argstr = arg_string(ob) elif ts == "classobj" or ts == "type": if ts == "classobj": ts = "class" if hasattr(ob, "__init__"): if type_string(ob.__init__) == "instancemethod": argstr = arg_string(ob.__init__) else: argstr = "(self)" if ts == "instancemethod": ts = "method" # looks much nicer ds = inspect.getdoc(ob) if ds is None: ds = "" ds = markdown.Markdown(ds) mlink = '<a name="%s">' % name if ts == "module" else "" mend = "</a>" if ts == "module" else "" print("".join(( "<p>", ts_css(ts), item_start % ts, " ", mlink, name, websafe(argstr), mend, item_end, "<br />", ))) print("".join((indent_start % ds_indent, ds, indent_end, "</p>"))) # Although ''.join looks wierd, it's alot faster is string addition members = "" if hasattr(ob, "__all__"): members = ob.__all__ else: members = [item for item in dir(ob) if not item.startswith("_")] if not "im_class" in members: for name in members: recurse_over(getattr(ob, name), name, indent_level + 1) if indent_level > 0: print(indent_end)
def __init__(self, notebooks): # Index directory of whoosh, located in notebookPath. self.schema = fields.Schema( path = fields.TEXT(stored=True), title = fields.TEXT(stored=True), content = fields.TEXT(stored=True), tags = fields.KEYWORD(commas=True)) self.notebookName = notebooks[0][0] self.notebookPath = notebooks[0][1] self.notePath = os.path.join(self.notebookPath, "notes").replace(os.sep, '/') self.htmlPath = os.path.join(self.notebookPath, "html", "notes").replace(os.sep, '/') self.indexdir = os.path.join(self.notePath, ".indexdir").replace(os.sep, '/') self.attachmentPath = os.path.join(self.notebookPath, "attachments").replace(os.sep, '/') self.configfile = os.path.join(self.notebookPath, "notebook.conf").replace(os.sep, '/') cssPath = os.path.join(self.notebookPath, "css").replace(os.sep, '/') self.cssfile = os.path.join(cssPath, "notebook.css").replace(os.sep, '/') self.searchcssfile = os.path.join(cssPath, "search-window.css").replace(os.sep, '/') self.qsettings = QSettings(self.configfile, QSettings.IniFormat) if os.path.exists(self.configfile): self.extensions = readListFromSettings(self.qsettings, "extensions") self.fileExt = self.qsettings.value("fileExt") self.attachmentImage = self.qsettings.value("attachmentImage") self.attachmentDocument = self.qsettings.value("attachmentDocument") self.version = self.qsettings.value("version") self.geometry = self.qsettings.value("geometry") self.windowstate = self.qsettings.value("windowstate") self.mathjax = self.qsettings.value('mathJax') if 'extensionsConfig' not in set(self.qsettings.childGroups()): self.extcfg = self.qsettings.value('extensionsConfig', defaultValue={}) writeDictToSettings(self.qsettings, 'extensionsConfig', self.extcfg) else: self.extcfg = readDictFromSettings(self.qsettings, 'extensionsConfig') else: self.extensions = [] self.fileExt = "" self.attachmentImage = [] self.attachmentDocument = [] self.version = None self.geometry = None self.windowstate = None self.mathjax = '' self.extcfg = {} self.faulty_exts=[] # Default enabled python-markdown extensions. # http://pythonhosted.org/Markdown/extensions/index.html if not self.extensions: self.extensions = [ 'nl2br' # newline to break , 'strkundr' # bold-italics-underline-delete style , 'codehilite' # code syntax highlight , 'fenced_code' # code block , 'headerid' # add id to headers , 'headerlink' # add anchor to headers , 'footnotes' , 'mdx_asciimathml' ] writeListToSettings(self.qsettings, "extensions", self.extensions) while True: print(self.extensions) try: markdown.markdown("",extensions=self.extensions) except AttributeError as e: remove_this = NOT_EXT.findall(e.args[0])[0] if remove_this in self.extensions: print("Found invalid markdown extension",remove_this,". Please consider removing it.") print('If you want to permanently disable this, just hit OK in the Notebook Settings dialog') self.extensions.remove(remove_this) self.faulty_exts.append(remove_this) except ImportError as e: if e.name.startswith('mdx_') and e.name[4:] in self.extensions: print('Found missing markdown extension', e.name[4:], ', temporarily disabling.') print('If you want to permanently disable this, just hit OK in the Notebook Settings dialog') self.extensions.remove(e.name[4:]) self.faulty_exts.append(e.name[4:]) elif e.name in self.extensions: print('Found missing markdown extension', e.name, ', temporarily disabling.') print('If you want to permanently disable this, just hit OK in the Notebook Settings dialog') self.extensions.remove(e.name) self.faulty_exts.append(e.name) else: self.md = markdown.Markdown(self.extensions, extension_configs=self.extcfg) break # Default file extension name if not self.fileExt: self.fileExt = ".md" self.qsettings.setValue("fileExt", self.fileExt) # Image file types that will be copied to attachmentDir # Inserted as image link if not self.attachmentImage: self.attachmentImage = [".jpg", ".jpeg", ".png", ".gif", ".svg"] self.qsettings.setValue("attachmentImage", self.attachmentImage) # Document file types that will be copied to attachmentDir # Inserted as link if not self.attachmentDocument: self.attachmentDocument = [".pdf", ".doc", ".odt"] self.qsettings.setValue("attachmentDocument", self.attachmentDocument) # Migrate notebookPath to v0.3.0 folder structure if not self.version: notebookDir = QDir(self.notebookPath) # move all markdown files to notes/ dirList = notebookDir.entryList(QDir.Dirs | QDir.NoDotAndDotDot) if 'css' in dirList: dirList.remove('css') fileList = notebookDir.entryList(['*.md', '*.mkd', '*.markdown']) notebookDir.mkdir('notes') for d in dirList + fileList: notebookDir.rename(d, os.path.join('notes', d).replace(os.sep, '/')) # remove .indexdir folder oldIndexDir = QDir(os.path.join(self.notebookPath, '.indexdir'.replace(os.sep, '/'))) indexFileList = oldIndexDir.entryList() for f in indexFileList: oldIndexDir.remove(f) notebookDir.rmdir('.indexdir') # rename notes.css to css/notebook.css oldCssFile = os.path.join(self.notebookPath, 'notes.css').replace(os.sep, '/') QDir().mkpath(cssPath) if os.path.exists(oldCssFile): QFile.rename(oldCssFile, self.cssfile) self.version = '0' if not self.mathjax: self.mathjax = 'http://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML' self.qsettings.setValue('mathJax', self.mathjax)
def setUp(self): super().setUp() self.md = markdown.Markdown( extensions=['extra', ResponsiveTableExtension()]) self.md_without = markdown.Markdown(extensions=['extra'])
def initialize(): """ Method to parse and check configurations of FORD, get the project's global documentation, and create the Markdown reader. """ # Setup the command-line options and parse them. parser = argparse.ArgumentParser( description= "Document a program or library written in modern Fortran. Any command-line options over-ride those specified in the project file." ) parser.add_argument( "project_file", help="file containing the description and settings for the project", type=argparse.FileType('r')) parser.add_argument( "-d", "--src_dir", action="append", help='directories containing all source files for the project') parser.add_argument( "-p", "--page_dir", help= "directory containing the optional page tree describing the project") parser.add_argument("-o", "--output_dir", help="directory in which to place output files") parser.add_argument("-s", "--css", help="custom style-sheet for the output") parser.add_argument( "--exclude", action="append", help="any files which should not be included in the documentation") parser.add_argument( "--exclude_dir", action="append", help= "any directories whose contents should not be included in the documentation" ) parser.add_argument( "-e", "--extensions", action="append", help= "extensions which should be scanned for documentation (default: f90, f95, f03, f08)" ) parser.add_argument( "-m", "--macro", action="append", help= "preprocessor macro (and, optionally, its value) to be applied to files in need of preprocessing." ) parser.add_argument("-w", "--warn", dest='warn', action='store_true', help="display warnings for undocumented items") parser.add_argument( "--no-search", dest='search', action='store_false', help="don't process documentation to produce a search feature") parser.add_argument("-q", "--quiet", dest='quiet', action='store_true', help="do not print any description of progress") parser.add_argument("-V", "--version", action="version", version="{}, version {}".format( __appname__, __version__)) parser.add_argument("--debug", dest="dbg", action="store_true", help="display traceback if fatal exception occurs") parser.add_argument( "-I", "--include", action="append", help="any directories which should be searched for include files") # Get options from command-line args = parser.parse_args() # Set up Markdown reader md_ext = [ 'markdown.extensions.meta', 'markdown.extensions.codehilite', 'markdown.extensions.extra', MathJaxExtension(), 'md_environ.environ' ] md = markdown.Markdown(extensions=md_ext, output_format="html5", extension_configs={}) # Read in the project-file. This will contain global documentation (which # will appear on the homepage) as well as any information about the project # and settings for generating the documentation. proj_docs = args.project_file.read() md.convert(proj_docs) # Remake the Markdown object with settings parsed from the project_file if 'md_base_dir' in md.Meta: md_base = md.Meta['md_base_dir'][0] else: md_base = os.path.dirname(args.project_file.name) md_ext.append('markdown_include.include') if 'md_extensions' in md.Meta: md_ext.extend(md.Meta['md_extensions']) md = markdown.Markdown( extensions=md_ext, output_format="html5", extension_configs={'markdown_include.include': { 'base_path': md_base }}) md.reset() # Re-read the project file proj_docs = md.convert(proj_docs) proj_data = md.Meta md.reset() # Get the default options, and any over-rides, straightened out options = [ 'src_dir', 'extensions', 'fpp_extensions', 'fixed_extensions', 'output_dir', 'css', 'exclude', 'project', 'author', 'author_description', 'author_pic', 'summary', 'github', 'bitbucket', 'facebook', 'twitter', 'google_plus', 'linkedin', 'email', 'website', 'project_github', 'project_bitbucket', 'project_website', 'project_download', 'project_sourceforge', 'project_url', 'display', 'version', 'year', 'docmark', 'predocmark', 'docmark_alt', 'predocmark_alt', 'media_dir', 'favicon', 'warn', 'extra_vartypes', 'page_dir', 'source', 'exclude_dir', 'macro', 'include', 'preprocess', 'quiet', 'search', 'lower', 'sort', 'extra_mods', 'dbg', 'graph', 'license', 'extra_filetypes', 'preprocessor', 'creation_date', 'print_creation_date', 'proc_internals', 'coloured_edges', 'graph_dir', 'gitter_sidecar' ] defaults = { 'src_dir': ['./src'], 'extensions': ['f90', 'f95', 'f03', 'f08', 'f15'], 'fpp_extensions': ['F90', 'F95', 'F03', 'F08', 'F15', 'F', 'FOR'], 'fixed_extensions': ['f', 'for', 'F', 'FOR'], 'output_dir': './doc', 'project': 'Fortran Program', 'project_url': '', 'display': ['public', 'protected'], 'year': date.today().year, 'exclude': [], 'exclude_dir': [], 'docmark': '!', 'docmark_alt': '*', 'predocmark': '>', 'predocmark_alt': '|', 'favicon': 'default-icon', 'extra_vartypes': [], 'source': 'false', 'macro': [], 'include': [], 'preprocess': 'true', 'preprocessor': '', 'proc_internals': 'false', 'warn': 'false', 'quiet': 'false', 'search': 'true', 'lower': 'false', 'sort': 'src', 'extra_mods': [], 'dbg': False, 'graph': 'false', 'license': '', 'extra_filetypes': [], 'creation_date': '%Y-%m-%dT%H:%M:%S.%f%z', 'print_creation_date': False, 'coloured_edges': 'false', } listopts = [ 'extensions', 'fpp_extensions', 'fixed_extensions', 'display', 'extra_vartypes', 'src_dir', 'exclude', 'exclude_dir', 'macro', 'include', 'extra_mods', 'extra_filetypes' ] # Evaluate paths relative to project file location base_dir = os.path.abspath(os.path.dirname(args.project_file.name)) proj_data['base_dir'] = base_dir for var in [ 'src_dir', 'page_dir', 'output_dir', 'exclude_dir', 'graph_dir', 'media_dir', 'include', 'favicon', 'css' ]: if var in proj_data: proj_data[var] = [ os.path.normpath( os.path.join(base_dir, os.path.expanduser(os.path.expandvars(p)))) for p in proj_data[var] ] if args.warn: args.warn = 'true' else: del args.warn if args.quiet: args.quiet = 'true' else: del args.quiet if not args.search: args.search = 'false' else: del args.search for option in options: if hasattr(args, option) and getattr(args, option): proj_data[option] = getattr(args, option) elif option in proj_data: # Think if there is a safe way to evaluate any expressions found in this list #proj_data[option] = proj_data[option] if option not in listopts: proj_data[option] = '\n'.join(proj_data[option]) elif option in defaults: proj_data[option] = defaults[option] proj_data['display'] = [item.lower() for item in proj_data['display']] proj_data['creation_date'] = datetime.now().strftime( proj_data['creation_date']) relative = (proj_data['project_url'] == '') proj_data['relative'] = relative proj_data['extensions'] += [ ext for ext in proj_data['fpp_extensions'] if ext not in proj_data['extensions'] ] # Parse file extensions and comment characters for extra filetypes extdict = {} for ext in proj_data['extra_filetypes']: sp = ext.split() if len(sp) < 2: continue extdict[sp[0]] = sp[1] proj_data['extra_filetypes'] = extdict # Make sure no src_dir is contained within output_dir for projdir in proj_data['src_dir']: proj_path = ford.utils.split_path(projdir) out_path = ford.utils.split_path(proj_data['output_dir']) for directory in out_path: if len(proj_path) == 0: break if directory == proj_path[0]: proj_path.remove(directory) else: break else: print( 'Error: directory containing source-code {} a subdirectory of output directory {}.' .format(proj_data['output_dir'], projdir)) sys.exit(1) # Check that none of the docmarks are the same if proj_data['docmark'] == proj_data['predocmark'] != '': print('Error: docmark and predocmark are the same.') sys.exit(1) if proj_data['docmark'] == proj_data['docmark_alt'] != '': print('Error: docmark and docmark_alt are the same.') sys.exit(1) if proj_data['docmark'] == proj_data['predocmark_alt'] != '': print('Error: docmark and predocmark_alt are the same.') sys.exit(1) if proj_data['docmark_alt'] == proj_data['predocmark'] != '': print('Error: docmark_alt and predocmark are the same.') sys.exit(1) if proj_data['docmark_alt'] == proj_data['predocmark_alt'] != '': print('Error: docmark_alt and predocmark_alt are the same.') sys.exit(1) if proj_data['predocmark'] == proj_data['predocmark_alt'] != '': print('Error: predocmark and predocmark_alt are the same.') sys.exit(1) # Add gitter sidecar if specified in metadata if 'gitter_sidecar' in proj_data: proj_docs += ''' <script> ((window.gitter = {{}}).chat = {{}}).options = {{ room: '{}' }}; </script> <script src="https://sidecar.gitter.im/dist/sidecar.v1.js" async defer></script> '''.format(proj_data['gitter_sidecar'].strip()) # Handle preprocessor: if proj_data['preprocess'].lower() == 'true': if proj_data['preprocessor']: preprocessor = proj_data['preprocessor'].split() else: preprocessor = ['cpp', '-traditional-cpp', '-E', '-D__GFORTRAN__'] # Check whether preprocessor works (reading nothing from stdin) try: devnull = open(os.devnull) subprocess.Popen(preprocessor, stdin=devnull, stdout=devnull, stderr=devnull).communicate() except OSError as ex: print('Warning: Testing preprocessor failed') print(' Preprocessor command: {}'.format(preprocessor)) print(' Exception: {}'.format(ex)) print(' -> Preprocessing turned off') proj_data['preprocess'] = 'false' else: proj_data['preprocess'] = 'true' proj_data['preprocessor'] = preprocessor # Get correct license try: proj_data['license'] = LICENSES[proj_data['license'].lower()] except KeyError: print('Warning: license "{}" not recognized.'.format( proj_data['license'])) proj_data['license'] = '' # Return project data, docs, and the Markdown reader md.reset() md.Meta = {} return (proj_data, proj_docs, md)
if not text.endswith('.') or not paragraph: text = text.rstrip('. ') + ' …' return text _md = markdown.Markdown( output_format='html5', # type: ignore[arg-type] extensions=[ "markdown.extensions.abbr", "markdown.extensions.attr_list", "markdown.extensions.def_list", "markdown.extensions.fenced_code", "markdown.extensions.footnotes", "markdown.extensions.tables", "markdown.extensions.admonition", "markdown.extensions.smarty", "markdown.extensions.toc", ], extension_configs={ "markdown.extensions.smarty": dict( smart_dashes=True, smart_ellipses=True, smart_quotes=False, smart_angled_quotes=False, ), }, ) @contextmanager def _fenced_code_blocks_hidden(text): def hide(text):
return self.date > other.date def __ge__(self, other): return self.date >= other.date posts = [] title_re = re.compile('(?<=title:[\s*]).*') name_re = re.compile('(?<=name:[\s*]).*') date_re = re.compile('(?<=date:[\s*]).*') root_dir = '.' base_url = 'https://richardhsu.net' md = markdown.Markdown(extensions=['footnotes']) # get posts post_dir = Path(root_dir + '/posts') for post_file in post_dir.iterdir(): # print('reading ' + post_file.name) with open(str(post_dir) + '/' + post_file.name, encoding='utf-8') as p: file_text = ''.join(p.readlines()) m = title_re.search(file_text) if m: title = m.group(0) m = name_re.search(file_text) if m: name = m.group(0) m = date_re.search(file_text) if m:
def __init__(self, environment): super(MarkdownTagExtension, self).__init__(environment) environment.extend(markdowner=markdown.Markdown(extensions=['extra']))
# copy stylesheet if existing css = template_dir + os.sep + style_file if os.path.exists(css) and os.path.isfile(css): text = tools.read(css) tools.write(install_dir + os.sep + style_file, text, 'ascii') # must save as ascii because Jave CSS import in JEditorPane does not read utf-8 # locate all lookup files print "locating markdown files" folders, files = tools.locate(lookup_files) number = len(files) print "found %d files" %number # create new Markdown parser object md = markdown.Markdown(extensions = ['footnotes'], output_format = 'html4') # loop over files for k in range(number): f = files[k] d = folders[k] print 'processing: ' + f # need to insert the install_dir in the path at first position o = install_dir + os.sep + d # if a subfolder needs to be created do it if not os.path.exists(o): os.makedirs(o) # construct input and output file paths (output file ending on html, input file having a 3 character extension)