def get_events(self, group, group_id): after = (date.today() - timedelta(days=30)) data = get_data_by_global_id(group_id, {"events": { "after": after.isoformat() }}) out = group.copy() for k, v in data.items(): slot, _, _id = k.partition("_") out.setdefault(slot, []).append(v["content"]) k_s = {} for s in out.get("STUFE", ()): k_s[s["Keywords_ID"]] = s for e in out["EVENT"]: sb = from_timestamp(float(e["Start"])) e["StartLocal"] = sb eb = from_timestamp(float(e["End"])) e["EndLocal"] = eb e["Stufen"] = [k_s[s] for s in e["Stufen"]] e["DescriptionMD"] = CachedFuncResult( lambda: commonmark(e["Description"] or "")) return out
def get_html_excerpts(page_results, hit_idx, hit_link, highlights): global og_description hit = page_results[hit_idx] p_num_last = 0 result = "" for p_idx, cm_paragraph in enumerate(filter(None, highlights.split('\n'))): if is_exposed(hit) and p_idx == HIT_EXPOSED_EXCERPT_LIMIT: break if result_type == 'multiple' and p_idx == MULTIPLE_HIT_EXCERPT_LIMIT: result += '<div data-content="•"></div><p><a href="{}"> More... </a></p>\n'.format( hit_link) continue p_num_re = hit.results.formatter.id_tag.format(r'(\d+)') p_num = int(re.match(p_num_re, cm_paragraph).group(1)) if readable_layout(): p_omitted_count = p_num - p_num_last - 1 p_num_last = p_num if p_omitted_count: result += '\n<p>[... {} paragraph{} ...]</p>\n'.format( p_omitted_count, 's' if p_omitted_count > 1 else '') cm_paragraph = re.sub(p_num_re, "", cm_paragraph) paragraph = commonmark(cm_paragraph).strip() if hit_idx == 0 and p_idx == 0: update_og_description(page_results.total, paragraph) is_first_hit_preview = page_results.pagenum == 1 and hit_idx == 0 gets_full_paragraph = ('single' in result_type or is_first_hit_preview) and not is_exposed(hit) if not gets_full_paragraph: sentences = get_sentence_fragments(paragraph) paragraph = get_html_fragmented_paragraph(hit_link, p_num, sentences) can_uniquely_id_paragraphs = result_type != 'single_many' if can_uniquely_id_paragraphs: paragraph = re.sub( r'^(<p>)', r'\1{}'.format(hit.results.formatter.id_tag.format(p_num)), paragraph) if not readable_layout(): paragraph = '<div data-content="{}{}"></div>{}'.format( '¶', p_num, paragraph) result += '{}\n'.format(paragraph) p_remaining_count = extras(hit)['num_doc_p'] - p_num_last if readable_layout() and p_remaining_count: result += '\n<p>[... {} paragraph{} ...]</p>\n'.format( p_remaining_count, 's' if p_remaining_count > 1 else '') result = '<div class="excerpts {}">\n{}</div>\n'.format( 'excerpts-readable' if readable_layout() else 'excerpts-numbered', result) return result
def render(self): rendered = commonmark(self.text).strip() if self.is_nsfw: rendered = make_nsfw_safe(rendered) if self.oembed: rendered = "%s<br>%s" % (rendered, self.oembed.oembed) if self.opengraph: rendered = "%s%s" % (rendered, render_to_string( "content/_og_preview.html", {"opengraph": self.opengraph})) return rendered
def render(self): """Pre-render text to Content.rendered.""" text = self.get_and_linkify_tags() rendered = commonmark(text).strip() if self.is_nsfw: rendered = make_nsfw_safe(rendered) if self.oembed: rendered = "%s<br>%s" % (rendered, self.oembed.oembed) if self.opengraph: rendered = "%s%s" % (rendered, render_to_string( "content/_og_preview.html", {"opengraph": self.opengraph})) self.rendered = rendered Content.objects.filter(id=self.id).update(rendered=rendered)
def cgi_main(): """ Main script: get CGI parameters, return HTML content. """ (script,stdin,language,plain_output) = get_cgi_params() # Create temp directory, save script/stdin files d = mkdtemp(prefix='port-a-script') script_filename = os.path.join(d,"script"); save_file(script_filename,script) if stdin: stdin_filename = os.path.join(d,"stdin"); save_file(stdin_filename,stdin) else: stdin = "" stdin_filename = None # Run scripts and collect results run_port_a_script(d,language,script_filename,stdin_filename) file_list = run_dirtree(d) html_file_list = commonmark(file_list) # Cleanup shutil.rmtree(d) # Send plain text output if plain_output: print ("Content-Type: text/html") print ("") print (file_list.encode('ascii','ignore')) return # Send pretty HTML output print ("Content-Type: text/html") print ("") tmpl = Template(html_tmpl) html = tmpl.render(language=language, script=script, stdin=stdin, results=html_file_list) print (html.encode('ascii','ignore'))
def teacher_update_questions(): if is_teacher_logged_in() != True: return flask.redirect("/teacher/login") # Check wether or not to keep students answers delete = (flask.request.form.get("submit_and_delete") != "" and "submit_and_keep" not in flask.request.form) # Get other data question_text = flask.request.form.get("question_text") answers = flask.request.form.get("answers") # Split answers up answers_raw = list( filter(lambda x: x != "", map(lambda x: x.strip(), answers.split("\n\r")))) # Parse markdown and remove <p> question_text_raw = question_text question_text = commonmark(question_text) question_text = remove_p_tags(question_text) answers = map(commonmark, answers_raw) answers = map(remove_p_tags, answers) answers = list(answers) # Send to Global Variables global QuestionText, QuestionTextRaw, QuestionAnswers, QuestionAnswersRaw, StudentsAnswers QuestionText = question_text QuestionTextRaw = question_text_raw QuestionAnswers = answers QuestionAnswersRaw = answers_raw print(delete) if delete == True: print("Deleting answers...") StudentsAnswers = {} return flask.redirect("/teacher/dashboard")
def DecodeMarkdown(source): return commonmark(source)
def md2html(md): return commonmark(md)
def root(): return str(commonmark(readme))
def parse(path): source = read_text_file(path) result = {} if path.endswith('.html'): if utils.detect_blogofile(source): heading, info, other_html = utils.convert_blogofile(source) parts = utils.parse_rst(heading) body_html = parts['docinfo'] + other_html body_html = utils.pygmentize_pre_blocks(body_html) body_html = body_html.replace('\n</pre>', '</pre>') result['title'] = utils.html_parser.unescape(parts['title']) result['needs_disqus'] = True result['date'] = info['date'] result['tags'] = info['tags'] else: result['title'] = utils.find_title_in_html(source) body_html = SimpleTemplate(source) result['needs_disqus'] = False result['date'] = None result['tags'] = () result['body'] = body_html result['next_link'] = None result['previous_link'] = None result['tags'] = [tag for tag in result['tags'] if tag] elif path.endswith('.md'): if utils.detect_blogofile(source): heading, info, body = utils.convert_blogofile(source) source = body result['date'] = info['date'] result['title'] = info['title'] result['needs_disqus'] = True else: result['needs_disqus'] = False result['body'] = commonmark(source) elif path.endswith('.rst'): if utils.detect_blogofile(source): heading, info, body = utils.convert_blogofile(source) source = heading + body result['title'] = info['title'] del heading, info, body result['needs_disqus'] = True else: result['needs_disqus'] = False doctree = publish_doctree(source) docinfos = doctree.traverse(nodes.docinfo) docinfo = {c.tagname: str(c.children[0]) for i in docinfos for c in i.children} parts = utils.parse_rst(source) # parts = publish_from_doctree(source, writer_name='html', # settings_overrides={'initial_header_level': 2}) body = parts['docinfo'] + utils.pygmentize_pre_blocks(parts['fragment']) result['body'] = body result['date'] = datetime.strptime( docinfo.get('date'), '%d %B %Y').date() if 'title' not in result: result['title'] = parts['title'] elif path.endswith('.ipynb'): notebook = nbformat.reads(source) docinfo = utils.build_docinfo_block_for_notebook(notebook) exporter = HTMLExporter(config=None, extra_loaders=[dl], filters=filters) exporter.template_file = 'brandon.tpl' #notebook = nbformat.convert(notebook, nbformat.current_nbformat) body, resources = exporter.from_notebook_node(notebook) body = body.replace('\n</pre>', '</pre>') body = body.replace('</h1>', '</h1>\n' + docinfo.rstrip()) date = notebook['metadata'].get('date') if date is not None: date = datetime.strptime(date, '%d %B %Y').date() result['body'] = body result['date'] = date result['needs_disqus'] = notebook['metadata'].get('needs_disqus') result['title'] = (notebook['metadata'].get('name', None) or utils.find_title_in_html(body)) else: raise ValueError('unrecognized path: {}'.format(path)) return result
def parse(path): source = read_text_file(path) result = {} if path.endswith('.html'): if utils.detect_blogofile(source): heading, info, other_html = utils.convert_blogofile(source) parts = utils.parse_rst(heading) body_html = parts['docinfo'] + other_html body_html = utils.pygmentize_pre_blocks(body_html) body_html = body_html.replace('\n</pre>', '</pre>') result['title'] = utils.html_parser.unescape(parts['title']) result['needs_disqus'] = True result['date'] = info['date'] result['tags'] = info['tags'] else: result['title'] = utils.find_title_in_html(source) body_html = SimpleTemplate(source) result['needs_disqus'] = False result['date'] = None result['tags'] = () result['body'] = body_html result['next_link'] = None result['previous_link'] = None result['tags'] = [tag for tag in result['tags'] if tag] elif path.endswith('.md'): if utils.detect_blogofile(source): heading, info, body = utils.convert_blogofile(source) source = body result['date'] = info['date'] result['title'] = info['title'] result['needs_disqus'] = True else: result['needs_disqus'] = False result['body'] = commonmark(source) elif path.endswith('.rst'): if utils.detect_blogofile(source): heading, info, body = utils.convert_blogofile(source) source = heading + body result['title'] = info['title'] del heading, info, body result['needs_disqus'] = True else: result['needs_disqus'] = False doctree = publish_doctree(source) docinfos = doctree.traverse(nodes.docinfo) docinfo = { c.tagname: str(c.children[0]) for i in docinfos for c in i.children } parts = utils.parse_rst(source) # parts = publish_from_doctree(source, writer_name='html', # settings_overrides={'initial_header_level': 2}) body = parts['docinfo'] + utils.pygmentize_pre_blocks( parts['fragment']) result['body'] = body result['date'] = datetime.strptime(docinfo.get('date'), '%d %B %Y').date() if 'title' not in result: result['title'] = parts['title'] elif path.endswith('.ipynb'): notebook = nbformat.reads(source) docinfo = utils.build_docinfo_block_for_notebook(notebook) exporter = HTMLExporter(config=None, extra_loaders=[dl], filters=filters) exporter.template_file = 'brandon.tpl' #notebook = nbformat.convert(notebook, nbformat.current_nbformat) body, resources = exporter.from_notebook_node(notebook) body = body.replace('\n</pre>', '</pre>') body = body.replace('</h1>', '</h1>\n' + docinfo.rstrip()) date = notebook['metadata'].get('date') if date is not None: date = datetime.strptime(date, '%d %B %Y').date() result['body'] = body result['date'] = date result['needs_disqus'] = notebook['metadata'].get('needs_disqus') result['title'] = (notebook['metadata'].get('name', None) or utils.find_title_in_html(body)) else: raise ValueError('unrecognized path: {}'.format(path)) return result
#!/usr/bin/env python3 import html5lib, sys from CommonMark import commonmark # Alt: https://github.com/Lucas-C/pelican-mg/blob/master/gen_imgs_from_mds.py for md_file_path in sys.argv[1:]: with open(md_file_path) as md_file: md_content = md_file.read() html = commonmark(md_content) doc_root = html5lib.parse(html) for img in doc_root.iter('{http://www.w3.org/1999/xhtml}img'): print(img.attrib['src'])