def field_to_html(field): """Need to extract the math in brackets so that it doesn't get markdowned. If math is separated with dollar sign it is converted to brackets.""" if CONFIG['math_mode'] == 'dollars': for (sep, (op, cl)) in [("$$", (r"\\[", r"\\]")), ("$", (r"\\(", r"\\)"))]: escaped_sep = sep.replace(r"$", r"\$") # ignore escaped dollar signs when splitting the field field = re.split(r"(?<!\\){}".format(escaped_sep), field) # add op(en) and cl(osing) brackets to every second element of the list field[1::2] = [op + e + cl for e in field[1::2]] field = "".join(field) # # CONFIG['math_mode'] is 'brackets' OR fallback case # else: for bracket in ["(", ")", "[", "]"]: field = field.replace(r"\{}".format(bracket), r"\\{}".format(bracket)) # backslashes, man. if CONFIG['misaka_renderer'] == 'HighlighterRenderer': renderer = HighlighterRenderer() # # CONFIG['misakq_renderer'] is 'HtmlRenderer' OR fallback case # else: renderer = misaka.HtmlRenderer() return misaka.Markdown(renderer, CONFIG['misaka_extensions'])(field)
def benchmark_misaka(text): import misaka as m # mistune has all these features extensions = (m.EXT_NO_INTRA_EMPHASIS | m.EXT_FENCED_CODE | m.EXT_AUTOLINK | m.EXT_TABLES | m.EXT_STRIKETHROUGH) md = m.Markdown(m.HtmlRenderer(), extensions=extensions) md.render(text)
def index(request): news_list = NEWS.objects.order_by('-date')[:20] img_list = [] is_auth = 0 for news in news_list: md_render = misaka.Markdown(misaka.HtmlRenderer()) html = md_render(news.text) soup = BeautifulSoup(html, features='html.parser') img = soup.find('img') if img != None: img_list.append(img.get('src')) else: img_list.append('') zipped = zip(news_list, img_list) zip_list = list(zipped) if request.user.is_authenticated: user = get_object_or_404(news_user, id=request.user.id) is_auth = 1 context = {'news_list': zip_list, 'user': user, 'is_auth': is_auth} else: context = { 'news_list': zip_list, 'img_list': img_list, 'is_auth': is_auth } return render(request, "index.html", context)
def loadmore(request): type = request.GET.get('type') if type != None: List = NEWS.objects.filter(style=type).order_by('-date') else: List = NEWS.objects.order_by('-date') p = 3 limit = 20 img_list = [] for news in List: md_render = misaka.Markdown(misaka.HtmlRenderer()) html = md_render(news.text) soup = BeautifulSoup(html, features='html.parser') img = soup.find('img') if img != None: img_list.append(img.get('src')) else: img_list.append('') zipped = zip(List, img_list) List = L(zipped) paginor = Paginator(List, limit) page = request.GET.get('page', p) item_info = paginor.page(page) news_list = item_info.object_list print(news_list) context = { 'news_list': news_list, } template = 'load.html' return render(request, template, context)
def test_misaka_classes(self): import misaka extensions = ( 'no-intra-emphasis', 'fenced=code', 'autolink', 'tables', 'strikethrough', ) r = misaka.HtmlRenderer() p = misaka.Markdown(r, extensions) p(self.text)
def benchmark_misaka(text): import misaka as m # mistune has all these features extensions = ( m.EXT_NO_INTRA_EMPHASIS | m.EXT_FENCED_CODE | m.EXT_AUTOLINK | m.EXT_TABLES | m.EXT_STRIKETHROUGH ) md = m.Markdown(m.HtmlRenderer(), extensions=extensions) t0 = time.time() for i in range(NUM): md.renderer(text) t1 = time.time() print('misaka', (t1 - t0) * 1000, 'ms')
def get_pics_list(md_content): """ 获取一个markdown文档里的所有图片链接 :param md_content: :return: """ md_render = misaka.Markdown(misaka.HtmlRenderer()) html = md_render(md_content) soup = BeautifulSoup(html, features='html.parser') pics_list = [] for img in soup.find_all('img'): pics_list.append(img.get('src')) return pics_list
def genFromDirectory(root='content', dest='../'): tree = {} # Entire file tree for f in os.listdir(root): p = os.path.join(root, f) # Recurse if directory if os.path.isdir(p): genFromDirectory(root + '/' + f, dest + f + '/') if not p.endswith('.md'): continue with open(p, 'r', encoding='utf-8') as file: s = file.read() d = {} # Data for rendering # parse frontmatter: ---key:value---\n res = re.search('^(---)([\s\S]*)(---)\n([\s\S]*$)', s) if res: meta = yaml.load(res.group(2), Loader=yaml.FullLoader) for k in meta: d[k] = meta[k] content = res.group(4) else: content = s content = protect_cors_url(content) md = misaka.Markdown(misaka.HtmlRenderer()) # actual file contents d['content'] = md(content) # file created d['created'] = datetime.datetime.fromtimestamp(os.path.getmtime(p)).strftime('%A · %B %d · %Y') if not os.path.exists(dest): os.makedirs(dest) n = os.path.splitext(f)[0] with open(dest+n+'.html', 'w', encoding='utf-8') as file: env = Environment( loader = FileSystemLoader('templates')) tmplId = d['template'] if 'template' in d.keys() else 'default' tmpl = env.get_template(tmplId+'.html') file.write(tmpl.render(page=d))
def plain_markdown(text): if text is None: return '' renderer = m.HtmlRenderer(flags=m.HTML_ESCAPE) md = m.Markdown(renderer) return md.render(text)
import os from django.contrib import messages from django.conf import settings from django.templatetags.static import static from django.urls import reverse from django.utils.timesince import timesince import jinja2 import misaka markdown = misaka.Markdown(misaka.HtmlRenderer()) def render_markdown(text): return jinja2.Markup(markdown(text)) def environment(**kwargs): env = jinja2.Environment(**kwargs) env.filters["markdown"] = render_markdown env.globals.update( { "env": os.environ.get("ENV", "dev"), "get_messages": messages.get_messages, "timesince": timesince, "static": static, "url": reverse, "google_analytics_id": settings.GOOGLE_ANALYTICS_ID, "user_guidance_base_url": settings.USER_GUIDANCE_BASE_URL,