def run(body="body.md", nav="nav.md", out="index.html", title="index"): """ Convert the markdown to html. Args: body (str): A path to the markdown file of body nav (str): A path to the markdown file of navigation out (str): A path to the output html file title (str): A title of the website """ html = tmpl_begin.format(title=title) with open(nav, "rt") as f: html += '<header>\n' html += ul_to_nav(mistune.html(f.read())) html += '</header>\n' with open(body, "rt") as f: html += mistune.html(f.read()) html += tmpl_end with open(out, "wt") as f: f.write(html)
def markdown2email(text): import mistune msg = MIMEMultipart("alternative") html = mistune.html(text) part1 = MIMEText(text, "plain") part2 = MIMEText(html, "html") msg.attach(part1) msg.attach(part2) return msg if attachments: full_msg = MIMEMultipart("mixed") full_msg.attach(msg) for attachment in attachments: part = MIMEBase(*attachment["contentType"].split("/", 1), Name=attachment["name"]) part.set_payload(attachment["content"]) part["Content-Transfer-Encoding"] = "base64" part['Content-Disposition'] = 'attachment; filename="{}"'.format( attachment["name"]) full_msg.attach(part)
def rendered(self): t = get_template(self.template.name) o = Organization.get() context = self.template.context.copy() context.update(**{ "organization": o, "organization_address": o.address, "organization_name": o.name, "name": self.name, "page_name": self.slug, "section": self.section, "raw_markdown": self.raw_markdown, "slug": self.slug, "title": self.title, "description": self.description, "publish_date": self.publish_date, "published": self.published, "private": self.private, "context": self.context, "links": Link.objects.all().order_by("-publish_date", "-created_at"), "pages": Page.objects.all().order_by("-created_at"), "posts": Post.objects.all().order_by("-publish_date", "-created_at"), "RESOURCES_URL": settings.RESOURCES_URL }) c = Context(context) rendered_string = mistune.html(self.raw_markdown) rendered_string = rendered_string.replace(u"’", '’').replace(u"“", '“') rendered_string = rendered_string.replace(u"”", '”').replace(u"’", "’") content_template = DjangoTemplate(rendered_string) context["rendered_post_html"] = mark_safe(content_template.render(c).encode("utf-8").decode()) return t.render(context)
def delete_note(map_identifier, topic_identifier, note_identifier): topic_store = get_topic_store() topic_map = topic_store.get_topic_map(map_identifier, current_user.id) if topic_map is None: current_app.logger.warning( f"Topic map not found: user identifier: [{current_user.id}], topic map identifier: [{map_identifier}]" ) abort(404) # If the map doesn't belong to the user and they don't have the right # collaboration mode on the map, then abort if not topic_map.owner and ( topic_map.collaboration_mode is not CollaborationMode.EDIT and topic_map.collaboration_mode is not CollaborationMode.COMMENT ): abort(403) topic = topic_store.get_topic( map_identifier, topic_identifier, resolve_attributes=RetrievalMode.RESOLVE_ATTRIBUTES, ) if topic is None: current_app.logger.warning( f"Topic not found: user identifier: [{current_user.id}], topic map identifier: [{map_identifier}], topic identifier: [{topic_identifier}]" ) abort(404) note_occurrence = topic_store.get_occurrence( map_identifier, note_identifier, inline_resource_data=RetrievalMode.INLINE_RESOURCE_DATA, resolve_attributes=RetrievalMode.RESOLVE_ATTRIBUTES, ) form_note_title = note_occurrence.get_attribute_by_name("title").value form_note_text = mistune.html(note_occurrence.resource_data.decode()) form_note_scope = note_occurrence.scope if request.method == "POST": topic_store.delete_occurrence(map_identifier, note_occurrence.identifier) flash("Note successfully deleted.", "warning") return redirect( url_for( "topic.view", map_identifier=topic_map.identifier, topic_identifier=topic.identifier, ) ) return render_template( "topic/delete_note.html", topic_map=topic_map, topic=topic, note_identifier=note_occurrence.identifier, note_title=form_note_title, note_text=form_note_text, note_scope=form_note_scope, )
def feed(request): blog = resolve_address(request) if not blog: raise Http404("Blog does not exist") all_posts = blog.post_set.filter(publish=True, is_page=False).order_by('-published_date') fg = FeedGenerator() fg.id(blog.useful_domain()) fg.author({'name': blog.subdomain, 'email': 'hidden'}) fg.title(blog.title) fg.subtitle(blog.meta_description or clean_text(unmark(blog.content)[:160]) or blog.title) fg.link(href=f"{blog.useful_domain()}/", rel='alternate') for post in all_posts: fe = fg.add_entry() fe.id(f"{blog.useful_domain()}/{post.slug}/") fe.title(post.title) fe.author({'name': blog.subdomain, 'email': 'hidden'}) fe.link(href=f"{blog.useful_domain()}/{post.slug}/") fe.content(clean_text(mistune.html(post.content)), type="html") fe.published(post.published_date) fe.updated(post.published_date) if request.GET.get('type') == 'rss': fg.link(href=f"{blog.useful_domain()}/feed/?type=rss", rel='self') rssfeed = fg.rss_str(pretty=True) return HttpResponse(rssfeed, content_type='application/rss+xml') else: fg.link(href=f"{blog.useful_domain()}/feed/", rel='self') atomfeed = fg.atom_str(pretty=True) return HttpResponse(atomfeed, content_type='application/atom+xml')
def section_reshaping(sections, issue_content): """ This function enables markdown <h2> to be detected and inserted in the right section of the original dictionary. :params: sections -> Dict: this dict is a dict containing all default headers issue_content -> String: this string contains the pure text of the issue :returns: formated_job -> Dict: it is a copy of sections that will be returned filled """ formated_job = copy(sections) for content in issue_content.split("## "): for section_header in sections: if not content.startswith(f"{section_header}"): continue content_section_header = section_header if section_header in ["Nossa empresa", "Descrição da vaga"]: content_section_header = "Descrição da vaga" to_replace = f"{section_header}" content = content.replace(to_replace, "") formated_job[section_header] = str(mistune.html(content)).replace( "\n", "<br/>") return formated_job
def render_home_page(): with open('HOME.md', 'r', encoding='utf-8') as input_file: text = input_file.read() rendered_text = mistune.html(text) return rendered_text
def html_from_md_file(fp: str) -> str: """ Uses mistune lib to convert md to html """ with open(fp, "rb") as file: html = mistune.html(UnicodeDammit(file.read()).unicode_markup) return html
def generate_app_pages(self): # create all directories self.create_root_directory(self.output_directory) appimage_template = Environment( loader=self.file_system_loader).from_string(APPTEMPLATE) appimage_data_template = Environment( loader=self.file_system_loader).from_string( mistune.html(APPTEMPLATE_MD)) sitemap_content = [] current_formatted_time = time.strftime('%Y-%m-%d') # iterate and generate app pages for app in progressbar(self.apps, redirect_stdout=True): appimage = AppImage(app, token=get_github_token(args)) path_to_appfolder = \ os.path.join(self.output_directory, appimage.title.lower()) # make the app folder if os.path.exists(path_to_appfolder): print(Fore.YELLOW + "[STATIC][{}] Directory exists.".format(appimage.title) + Fore.RESET) else: os.makedirs(path_to_appfolder) # write html file print( Fore.GREEN + "[STATIC][{}] Processing HTML files.".format(appimage.title) + Fore.RESET) with open(os.path.join(path_to_appfolder, 'index.html'), 'w') as w: w.write( appimage_template.render( appimage=appimage, catalog=Catalog(), content=appimage_data_template.render( appimage=appimage))) _app_sitemap_local = SITEMAP_URL.format( url=f"{Catalog().url}/{appimage.title_formatted}", lastmod=current_formatted_time, changefreq="weekly") sitemap_content.append(_app_sitemap_local) self.json.append(appimage.json_data()) with open(os.path.join(path_to_appfolder, 'core.json'), 'w') as w: json.dump(appimage.get_app_metadata(), w) shields_badge = appimage.shields_badge() with open(os.path.join(path_to_appfolder, 'shields.json'), 'w') as w: json.dump(shields_badge, w) # write json file self.write_json_index() with open(os.path.join(self.output_directory, 'sitemap.xml'), 'w') as w: w.write(SITEMAP_HEADER.format(content=''.join(sitemap_content))) print("writing sitemap.xml completed successfully")
def feed(request): fg = FeedGenerator() fg.id("bearblog") fg.author({"name": "Bear Blog", "email": "*****@*****.**"}) newest = request.GET.get("newest") if newest: fg.title("Bear Blog Most Recent Posts") fg.subtitle("Most recent posts on Bear Blog") fg.link(href="https://bearblog.dev/discover/?newest=True", rel="alternate") all_posts = (Post.objects.annotate( upvote_count=Count("upvote"), ).filter( publish=True, blog__reviewed=True, blog__blocked=False, show_in_feed=True, published_date__lte=timezone.now(), ).order_by("-published_date").select_related("blog") [0:posts_per_page]) else: fg.title("Bear Blog Trending Posts") fg.subtitle("Trending posts on Bear Blog") fg.link(href="https://bearblog.dev/discover/", rel="alternate") all_posts = (Post.objects.annotate( upvote_count=Count("upvote"), score=ExpressionWrapper( ((Count("upvote") - 1) / ((Seconds(Now() - F("published_date"))) + 4)**gravity) * 100000, output_field=FloatField(), ), ).filter( publish=True, blog__reviewed=True, blog__blocked=False, show_in_feed=True, published_date__lte=timezone.now(), ).order_by("-score", "-published_date").select_related( "blog").prefetch_related("upvote_set")[0:posts_per_page]) for post in all_posts: fe = fg.add_entry() fe.id(f"{post.blog.useful_domain()}/{post.slug}/") fe.title(post.title) fe.author({"name": post.blog.subdomain, "email": "hidden"}) fe.link(href=f"{post.blog.useful_domain()}/{post.slug}/") fe.content(clean_text(mistune.html(post.content)), type="html") fe.published(post.published_date) fe.updated(post.published_date) if request.GET.get("type") == "rss": fg.link(href=f"{post.blog.useful_domain()}/feed/?type=rss", rel="self") rssfeed = fg.rss_str(pretty=True) return HttpResponse(rssfeed, content_type="application/rss+xml") else: fg.link(href=f"{post.blog.useful_domain()}/feed/", rel="self") atomfeed = fg.atom_str(pretty=True) return HttpResponse(atomfeed, content_type="application/atom+xml")
def render(self, markdown): if self.post is not None and self.post.id is not None: image_html = {img.filename: self.image_html(img) for img in self.post.images.all()} translate = {m['full']: image_html.get(m['filename'], "[IMAGE NOT FOUND]").format(caption=m['alt']) for m in RE_MARKDOWN_IMG.finditer(markdown)} for orig, repl in translate.items(): markdown = markdown.replace(orig, repl) html = mistune.html(markdown) return html
def index(map_identifier): topic_store = get_topic_store() if current_user.is_authenticated: # User is logged in is_map_owner = topic_store.is_topic_map_owner(map_identifier, current_user.id) if is_map_owner: topic_map = topic_store.get_topic_map(map_identifier, current_user.id) else: topic_map = topic_store.get_topic_map(map_identifier) if topic_map is None: abort(404) collaboration_mode = topic_store.get_collaboration_mode( map_identifier, current_user.id) # The map is private and doesn't belong to the user who is trying to # access it if not topic_map.published and not is_map_owner: if not collaboration_mode: # The user is not collaborating on the map abort(403) else: # User is not logged in topic_map = topic_store.get_topic_map(map_identifier) if topic_map is None: abort(404) if not topic_map.published: # User is not logged in and the map is not published abort(403) topic = topic_store.get_topic(map_identifier, "notes") if topic is None: abort(404) note_occurrences = topic_store.get_topic_occurrences( map_identifier, "notes", "note", inline_resource_data=RetrievalMode.INLINE_RESOURCE_DATA, resolve_attributes=RetrievalMode.RESOLVE_ATTRIBUTES, ) notes = [] for note_occurrence in note_occurrences: notes.append({ "identifier": note_occurrence.identifier, "title": note_occurrence.get_attribute_by_name("title").value, "timestamp": maya.parse( note_occurrence.get_attribute_by_name( "modification-timestamp").value), "text": mistune.html(note_occurrence.resource_data.decode()), }) return render_template("note/index.html", topic_map=topic_map, topic=topic, notes=notes)
def markdown(value): markup = mistune.html(value) cleaned_markup = clean_html(markup) # linkify hashtags for tag in re.findall(r"(#[\d\w\.]+)", cleaned_markup): text_tag = tag.replace('#', '') cleaned_markup = cleaned_markup.replace( tag, f"<a href='/blog/?q=%23{text_tag}'>{tag}</a>") return cleaned_markup
def markdown2email(text): if has_mistune: msg = MIMEMultipart("alternative") html = mistune.html(text) part1 = MIMEText(text, "plain") part2 = MIMEText(html, "html") msg.attach(part1) msg.attach(part2) return msg else: return MIMEText(text, "plain")
def assert_case(self, n, text, html): result = mistune.html(text) # normalize to match commonmark result = re.sub(r'\s*\n+\s*', '\n', result) result = re.sub(r'>\n', '>', result) result = re.sub(r'\n<', '<', result) expect = re.sub(r'\s*\n+\s*', '\n', html) expect = re.sub(r'>\n', '>', expect) expect = re.sub(r'\n<', '<', expect) if n in DIFFERENCES: expect = DIFFERENCES[n](expect) self.assertEqual(result, expect)
def export_html(path: str, document: Document): data = mistune.html(document.content) html = f"<!doctype html><html lang=en><head><meta charset=utf-8><title>{document.title}</title>" + \ """<style> *,:after,:before{-webkit-box-sizing:border-box;box-sizing:border-box}:after,:before{text-decoration:inherit;vertical-align:inherit}html{cursor:default;line-height:1.5;-moz-tab-size:4;-o-tab-size:4;tab-size:4;-webkit-tap-highlight-color:transparent;-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%;word-break:break-word}body{margin:0}h1{font-size:2em;margin:.67em 0}dl dl,dl ol,dl ul,ol dl,ol ol,ol ul,ul dl,ul ol,ul ul{margin:0}nav ol,nav ul{list-style:none;padding:0}pre{font-family:monospace,monospace;font-size:1em}abbr[title]{text-decoration:underline;-webkit-text-decoration:underline dotted;text-decoration:underline dotted}b,strong{font-weight:bolder}code,kbd,samp{font-family:monospace,monospace;font-size:1em}small{font-size:80%}audio,canvas,iframe,img,svg,video{vertical-align:middle}audio,video{display:inline-block}audio:not([controls]){display:none;height:0}iframe,img{border-style:none}svg:not([fill]){fill:currentColor}svg:not(:root){overflow:hidden}table{border-collapse:collapse}button,input,select{margin:0}button{overflow:visible;text-transform:none}[type=button],[type=reset],[type=submit],button{-webkit-appearance:button}fieldset{border:1px solid #a0a0a0;padding:.35em .75em .625em}input{overflow:visible}legend{color:inherit;display:table;max-width:100%;white-space:normal}progress{display:inline-block;vertical-align:baseline}select{text-transform:none}textarea{margin:0}[type=checkbox],[type=radio]{padding:0}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}::-webkit-inner-spin-button,::-webkit-outer-spin-button{height:auto}::-webkit-input-placeholder{color:inherit;opacity:.54}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}::-moz-focus-inner{border-style:none;padding:0}:-moz-focusring{outline:1px dotted ButtonText}:-moz-ui-invalid{box-shadow:none}details,dialog{display:block}dialog{background-color:#fff;border:solid;color:#000;height:-moz-fit-content;height:-webkit-fit-content;height:fit-content;left:0;margin:auto;padding:1em;position:absolute;right:0;width:-moz-fit-content;width:-webkit-fit-content;width:fit-content}dialog:not([open]){display:none}summary{display:list-item}canvas{display:inline-block}template{display:none}[tabindex],a,area,button,input,label,select,summary,textarea{-ms-touch-action:manipulation;touch-action:manipulation}[hidden]{display:none}[aria-busy=true]{cursor:progress}[aria-controls]{cursor:pointer}[aria-disabled=true],[disabled]{cursor:not-allowed}[aria-hidden=false][hidden]{display:initial}[aria-hidden=false][hidden]:not(:focus){clip:rect(0,0,0,0);position:absolute}/*! Marx v3.0.6 - The classless CSS reset (perfect for Communists) | MIT License | https://github.com/mblode/marx */article,aside,details,footer,header,main,section,summary{margin:0 auto 16px;width:100%}main{display:block;margin:0 auto;max-width:768px;padding:0 16px 16px}footer{border-top:1px solid rgba(0,0,0,.12);padding:16px 0;text-align:center}footer p{margin-bottom:0}hr{border:0;border-top:1px solid rgba(0,0,0,.12);display:block;margin-top:16px;margin-bottom:16px;width:100%;-webkit-box-sizing:content-box;box-sizing:content-box;height:0;overflow:visible}img{height:auto;max-width:100%;vertical-align:baseline}@media screen and (max-width:400px){article,aside,section{clear:both;display:block;max-width:100%}img{margin-right:16px}}embed,iframe,video{border:0}body{color:rgba(0,0,0,.8);font-family:-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol;font-size:16px;line-height:1.5}p{margin:0 0 16px}h1,h2,h3,h4,h5,h6{color:inherit;font-family:inherit;line-height:1.2;font-weight:500}h1{font-size:40px}h1,h2{margin:20px 0 16px}h2{font-size:32px}h3{font-size:28px}h3,h4{margin:16px 0 4px}h4{font-size:24px}h5{font-size:20px}h5,h6{margin:16px 0 4px}h6{font-size:16px}small{color:rgba(0,0,0,.54);vertical-align:bottom}pre{background:#f7f7f9;display:block;margin:16px 0;padding:16px;white-space:pre-wrap;overflow-wrap:break-word}code,pre{color:rgba(0,0,0,.8);font-family:Menlo,Monaco,Consolas,Courier New,monospace;font-size:16px}code{line-height:inherit;margin:0;padding:0;vertical-align:baseline;word-break:break-all;word-wrap:break-word}a{color:#007bff;text-decoration:none;background-color:rgba(0,0,0,0)}a:focus,a:hover{color:#0062cc;text-decoration:underline}dl{margin-bottom:16px}dd{margin-left:40px}ol,ul{margin-bottom:8px;padding-left:40px;vertical-align:baseline}blockquote{border-left:2px solid rgba(0,0,0,.8);font-style:italic;margin:16px 0;padding-left:16px}blockquote,figcaption{font-family:Georgia,Times,Times New Roman,serif}u{text-decoration:underline}s{text-decoration:line-through}sup{vertical-align:super}sub,sup{font-size:14px}sub{vertical-align:sub}mark{background:#ffeb3b}input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week],select,textarea{background:#fff;background-clip:padding-box;border:1px solid rgba(0,0,0,.12);border-radius:4px;color:rgba(0,0,0,.8);display:block;width:100%;font-size:1rem;padding:8px 16px;line-height:1.5;-webkit-transition:border-color .15s ease-in-out,-webkit-box-shadow .15s ease-in-out;transition:border-color .15s ease-in-out,-webkit-box-shadow .15s ease-in-out;transition:border-color .15s ease-in-out,box-shadow .15s ease-in-out;transition:border-color .15s ease-in-out,box-shadow .15s ease-in-out,-webkit-box-shadow .15s ease-in-out;font-family:-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol}input[type=color]{background:#fff;border:1px solid rgba(0,0,0,.12);border-radius:4px;display:inline-block;vertical-align:middle}input:not([type]){-webkit-appearance:none;background:#fff;background-clip:padding-box;border:1px solid rgba(0,0,0,.12);border-radius:4px;color:rgba(0,0,0,.8);display:block;width:100%;padding:8px 16px;line-height:1.5;-webkit-transition:border-color .15s ease-in-out,-webkit-box-shadow .15s ease-in-out;transition:border-color .15s ease-in-out,-webkit-box-shadow .15s ease-in-out;transition:border-color .15s ease-in-out,box-shadow .15s ease-in-out;transition:border-color .15s ease-in-out,box-shadow .15s ease-in-out,-webkit-box-shadow .15s ease-in-out;text-align:left}input[type=color]:focus,input[type=date]:focus,input[type=datetime-local]:focus,input[type=datetime]:focus,input[type=email]:focus,input[type=month]:focus,input[type=number]:focus,input[type=password]:focus,input[type=search]:focus,input[type=tel]:focus,input[type=text]:focus,input[type=time]:focus,input[type=url]:focus,input[type=week]:focus,select:focus,textarea:focus{background-color:#fff;border-color:#80bdff;outline:0;-webkit-box-shadow:0 0 0 .2rem rgba(0,123,255,.25);box-shadow:0 0 0 .2rem rgba(0,123,255,.25)}input:not([type]):focus{background-color:#fff;border-color:#80bdff;outline:0;-webkit-box-shadow:0 0 0 .2rem rgba(0,123,255,.25);box-shadow:0 0 0 .2rem rgba(0,123,255,.25)}input[type=checkbox]:focus,input[type=file]:focus,input[type=radio]:focus{outline:1px thin rgba(0,0,0,.12)}input[type=color][disabled],input[type=date][disabled],input[type=datetime-local][disabled],input[type=datetime][disabled],input[type=email][disabled],input[type=month][disabled],input[type=number][disabled],input[type=password][disabled],input[type=search][disabled],input[type=tel][disabled],input[type=text][disabled],input[type=time][disabled],input[type=url][disabled],input[type=week][disabled],select[disabled],textarea[disabled]{background-color:rgba(0,0,0,.12);color:rgba(0,0,0,.54);cursor:not-allowed;opacity:1}input:not([type])[disabled]{background-color:rgba(0,0,0,.12);color:rgba(0,0,0,.54);cursor:not-allowed;opacity:1}input[readonly],select[readonly],textarea[readonly]{border-color:rgba(0,0,0,.12);color:rgba(0,0,0,.54)}input:focus:invalid,select:focus:invalid,textarea:focus:invalid{border-color:#ea1c0d;color:#f44336}input[type=checkbox]:focus:invalid:focus,input[type=file]:focus:invalid:focus,input[type=radio]:focus:invalid:focus{outline-color:#f44336}select{border:1px solid rgba(0,0,0,.12);vertical-align:sub}select:not([size]):not([multiple]){height:-webkit-calc(2.25rem + 2px);height:calc(2.25rem + 2px)}select[multiple]{height:auto}label{display:inline-block;line-height:2}fieldset{border:0;margin:0;padding:8px 0}legend{border-bottom:1px solid rgba(0,0,0,.12);color:rgba(0,0,0,.8);display:block;margin-bottom:8px;padding:8px 0;width:100%}textarea{overflow:auto;resize:vertical}input[type=checkbox],input[type=radio]{-webkit-box-sizing:border-box;box-sizing:border-box;padding:0;display:inline}button,input[type=button],input[type=reset],input[type=submit]{background-color:#007bff;border:#007bff;border-radius:4px;color:#fff;padding:8px 16px;display:inline-block;font-weight:400;text-align:center;white-space:nowrap;vertical-align:middle;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;border:1px solid rgba(0,0,0,0);font-size:1rem;line-height:1.5;-webkit-transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,-webkit-box-shadow .15s ease-in-out;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,-webkit-box-shadow .15s ease-in-out;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out,-webkit-box-shadow .15s ease-in-out}button::-moz-focus-inner,input[type=button]::-moz-focus-inner,input[type=reset]::-moz-focus-inner,input[type=submit]::-moz-focus-inner{padding:0}button:hover,input[type=button]:hover,input[type=reset]:hover,input[type=submit]:hover{background-color:#0069d9;border-color:#0062cc;color:#fff}button:not(:disabled):active,input[type=button]:not(:disabled):active,input[type=reset]:not(:disabled):active,input[type=submit]:not(:disabled):active{background-color:#0062cc;border-color:#005cbf;color:#fff}button:focus,input[type=button]:focus,input[type=reset]:focus,input[type=submit]:focus{outline:0;-webkit-box-shadow:0 0 0 .2rem rgba(0,123,255,.5);box-shadow:0 0 0 .2rem rgba(0,123,255,.5)}button:disabled,input[type=button]:disabled,input[type=reset]:disabled,input[type=submit]:disabled{opacity:.65;cursor:not-allowed;background-color:#007bff;border-color:#007bff;color:#fff}table{border-top:1px solid rgba(0,0,0,.12);margin-bottom:16px}caption{padding:8px 0}thead th{border:0;border-bottom:2px solid rgba(0,0,0,.12);text-align:left}tr{margin-bottom:8px}td,th{border-bottom:1px solid rgba(0,0,0,.12);padding:16px;vertical-align:inherit}tfoot tr{text-align:left}tfoot td{color:rgba(0,0,0,.54);font-size:8px;font-style:italic;padding:16px 4px} </style> </head> <body> <main> """ + data + "</main></body></html>" Exporter.write_to_file(path, html) return path
def assert_case(self, n, text, html): result = mistune.html(text) # normalize to match commonmark result = re.sub(r"\s*\n+\s*", "\n", result) result = re.sub(r">\n", ">", result) result = re.sub(r"\n<", "<", result) expect = re.sub(r"\s*\n+\s*", "\n", html) expect = re.sub(r">\n", ">", expect) expect = re.sub(r"\n<", "<", expect) if n in DIFFERENCES: expect = DIFFERENCES[n](expect) self.assertEqual(result, expect)
def markdown(value): markup = mistune.html(value) soup = html_parser(markup, 'html.parser') heading_tags = soup.find_all(['h1', 'h2', 'h3', 'h4', 'h5', 'h6']) for each_tag in heading_tags: each_tag.attrs['id'] = f"section-{heading_tags.index(each_tag)}" each_tag.attrs['id'] = slugify(each_tag.text) cleaned_markup = clean_html(str(soup)) return cleaned_markup
def blogtestpage(): blogtitle = 'Test page' blogimgpath = 'img/thumb/pythoncode.png' # markdown file path (cvapp folder) path = os.path.join(rootdir(), "templates/blog/Markdowntest.md") # get file content textmd = open(path, "r").read() # transform in HTML (with mistune) and return return render_template('test/TestPage_blogpost_MD.html', textmd=mistune.html(textmd), blogtitle=blogtitle, blogimgpath=blogimgpath)
def parse_markdown(in_file): '''Read resume data from markdown file''' with open(in_file, 'r') as file: file_text = file.read() file_html = mistune.html(file_text) # print(file_html) soup = BeautifulSoup(file_html, 'html.parser') # print(soup.prettify()) md_data = {'name': soup.h1.text.strip(), 'sections': []} # loop through info section for line in soup.find_all('p'): if 'PHONE' in line.text: md_data['phone'] = line.text[6:].strip() elif 'EMAIL' in line.text: md_data['email'] = line.text[6:].strip() elif 'WEBSITE' in line.text: md_data['website'] = line.text[8:].strip() elif 'LINKEDIN' in line.text: md_data['linkedin'] = line.text[9:].strip() # loop through each h2 header prev_tag = 'h2' h2_index = -1 h3_index = -1 for section in soup.find_all(['h2', 'h3', 'h4', 'ul']): if 'h2' in str(section): h2_index += 1 section_data = {'title': section.text.strip(), 'entries': []} md_data['sections'].append(section_data) h3_index = -1 elif 'h3' in str(section): entry_data = {'title': section.text.strip()} md_data['sections'][h2_index]['entries'].append(entry_data) h3_index += 1 elif 'h4' in str(section): md_data['sections'][h2_index]['entries'][h3_index][ 'subtitle'] = section.text.strip() elif 'ul' in str(section): md_data['sections'][h2_index]['entries'][h3_index]['info'] = [] for line in section.find_all('li'): if 'LOCATION' in line.text: md_data['sections'][h2_index]['entries'][h3_index][ 'location'] = line.text[9:].strip() elif 'DATES' in line.text: md_data['sections'][h2_index]['entries'][h3_index][ 'dates'] = line.text[6:].strip() else: md_data['sections'][h2_index]['entries'][h3_index][ 'info'].append(line.text) # print(json.dumps(md_data,indent=4)) return md_data
def blogpage1(): blogtitle = 'Easily Setup Docker Container and environnement \ for DataScience with JupiterNotebook' blogimgpath = 'img/blog/jupyterdocker_intro.png' # MD FILE path = os.path.join(rootdir(), "templates/blog/JupyterDockerImage.md") textmd = open(path, "r").read() # transform in HTML (with mistune) and return return render_template('Page_blogpostmarkdown_template.html', textmd=mistune.html(textmd), blogtitle=blogtitle, blogimgpath=blogimgpath)
def markdownToHTML(fileName): rendered = None with open(f"{fileName}", "r") as f: # rendered = mistletoe.markdown(f, HTMLRenderer) rendered = mistune.html(f.read()) import pdb pdb.set_trace() print(rendered) fileName = fileName.split(".md")[0] skeleton_css = CSS("./markdownpdfpy/css/skeleton.css") milligeram_css = CSS("./markdownpdfpy/css/milligram.min.css") HTML(string=rendered).write_pdf(target=f"{fileName}.pdf", stylesheets=[skeleton_css])
def get_repo(category: str = ""): if not github.authorized: return Response("Permission denied", status=403) repo_id = gorse_client.get_recommend(session["user_id"], category)[0] full_name = repo_id.replace(":", "/") github_client = Github(github.token["access_token"]) repo = github_client.get_repo(full_name) # convert readme to html download_url = repo.get_readme().download_url.lower() content = repo.get_readme().decoded_content.decode("utf-8") if download_url.endswith(".rst"): html = publish_parts(content, writer_name="html")["html_body"] else: html = mistune.html(content) soup = BeautifulSoup(html, "html.parser") for a in soup.find_all("a"): if "href" in a.attrs: # open links in new tab a.attrs["target"] = "__blank" # redirect links to github src = a.attrs["href"] if not src.startswith("http://") and not src.startswith( "https://"): a.attrs["href"] = (repo.html_url + "/blob/" + repo.default_branch + "/" + src) blob_url = repo.html_url + '/blob/' for img in soup.find_all("img"): # redirect links to github src = img.attrs["src"] if not src.startswith("http://") and not src.startswith("https://"): if src.startswith("./"): src = src[2:] img.attrs[ "src"] = repo.html_url + "/raw/" + repo.default_branch + "/" + src elif src.startswith(blob_url): img.attrs["src"] = repo.html_url + '/raw/' + src[len(blob_url):] return { "item_id": repo_id, "full_name": repo.full_name, "html_url": repo.html_url, "stargazers_url": repo.stargazers_url, "forks_url": repo.forks_url, "stargazers": repo.get_stargazers().totalCount, "forks": repo.get_forks().totalCount, "readme": emoji.emojize(str(soup), use_aliases=True), }
def post(request, slug): address_info = resolve_address(request) if not address_info: return redirect('/') blog = address_info['blog'] ip_address = client_ip(request) if request.method == "POST": upvoted_pose = get_object_or_404(Post, blog=blog, slug=slug) posts_upvote_dupe = upvoted_pose.upvote_set.filter( ip_address=ip_address) if len(posts_upvote_dupe) == 0: upvote = Upvote(post=upvoted_pose, ip_address=ip_address) upvote.save() if request.GET.get('preview'): all_posts = blog.post_set.annotate( upvote_count=Count('upvote')).all().order_by('-published_date') else: all_posts = blog.post_set.annotate( upvote_count=Count('upvote')).filter( publish=True).order_by('-published_date') post = get_post(all_posts, slug) upvoted = False for upvote in post.upvote_set.all(): if upvote.ip_address == ip_address: upvoted = True content = mistune.html(post.content) return render( request, 'post.html', { 'blog': blog, 'content': content, 'post': post, 'nav': get_nav(all_posts), 'root': address_info['root'], 'meta_description': unmark(post.content)[:160], 'upvoted': upvoted })
def handle(self, *args, **options): headers = { "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS 11.1.0; rv:42.0) Gecko/20100101 Firefox/42.0" } response = requests.get( f"https://remoteok.io/api?tag={settings.GITHUB_ISSUES_LABELS}", headers=headers, ) content = response.json() for job in content[1:10]: if Job.objects.filter(issue_number=job["id"]): continue # import ipdb; ipdb.set_trace() new_job_data = { "title": job["position"], "company_name": job["company"], "description": mistune.html(job["description"]), "requirements": f"Click on the apply button to get to know better about this opportunity.<br/> Job from <a href='{job['apply_url']}'>RemoteOk.io</a>", "country": Country.objects.get_or_create(name="Worldwide")[0], "issue_number": job["id"], "remote": True, "job_level": 5, "application_link": job["apply_url"], "salary_range": 10, } skills = [] for label in job["tags"]: skills.append(Skill.objects.get_or_create(name=label)[0]) if all( [ new_job_data["description"], new_job_data["title"], new_job_data["company_name"], ] ): job = Job.objects.create(**new_job_data) job.skills.set(skills) job.save()
def from_issue(self, issue): self.title = issue.title self.labels = [label.name for label in issue.labels] self.issue_url = issue.html_url # Break down body soup = BeautifulSoup(mistune.html(issue.body), "lxml") headers = soup.find_all("h2") for header in headers: attribute = HEADER_2_ATTRIBUTES[header.text.lower()] text = [] for sib in header.find_next_siblings(): if sib.name == "h2": break else: text += sib.text text = "".join(text) setattr(self, attribute, text) return self
def home(request): address_info = resolve_address(request) if not address_info: return render(request, 'landing.html') blog = address_info['blog'] all_posts = blog.post_set.filter(publish=True).order_by('-published_date') content = mistune.html(blog.content) return render( request, 'home.html', { 'blog': blog, 'content': content, 'posts': get_posts(all_posts), 'nav': get_nav(all_posts), 'root': address_info['root'], 'meta_description': unmark(blog.content)[:160] })
def feed(request): address_info = resolve_address(request) if not address_info: return redirect('/') blog = address_info['blog'] root = address_info['root'] all_posts = blog.post_set.filter(publish=True, is_page=False).order_by('-published_date') fg = FeedGenerator() fg.id(f'http://{root}/') fg.author({'name': blog.subdomain, 'email': 'hidden'}) fg.title(blog.title) if blog.content: fg.subtitle(clean_text(unmark(blog.content)[:160])) else: fg.subtitle(blog.title) fg.link(href=f"http://{root}/", rel='alternate') for post in all_posts: fe = fg.add_entry() fe.id(f"http://{root}/{post.slug}/") fe.title(post.title) fe.author({'name': blog.subdomain, 'email': 'hidden'}) fe.link(href=f"http://{root}/{post.slug}/") fe.content(clean_text(mistune.html(post.content)), type="html") fe.published(post.published_date) fe.updated(post.published_date) if request.GET.get('type') == 'rss': fg.link(href=f"http://{root}/feed/?type=rss", rel='self') rssfeed = fg.rss_str(pretty=True) return HttpResponse(rssfeed, content_type='application/rss+xml') else: fg.link(href=f"http://{root}/feed/", rel='self') atomfeed = fg.atom_str(pretty=True) return HttpResponse(atomfeed, content_type='application/atom+xml')
def fixDefi(self, defi: str) -> str: import mistune defi = defi.replace("\n @", "\n@")\ .replace("\n :", "\n:")\ .replace("\n &", "\n&") defi = defi.lstrip() if defi.startswith("<html>"): defi = defi[len("<html>"):].lstrip() i = defi.find("</html>") if i >= 0: defi = defi[:i] else: defi = mistune.html(defi) if self._extract_inline_images: defi, images = extractInlineHtmlImages( defi, self._glos.tmpDataDir, fnamePrefix="", # maybe f"{self._pos:06d}-" ) if images: defi = (defi, images) return defi
def markdown(content): if not content: return '' markup = mistune.html(content) soup = html_parser(markup, 'html.parser') heading_tags = soup.find_all(['h1', 'h2', 'h3', 'h4', 'h5', 'h6']) for each_tag in heading_tags: each_tag.attrs['id'] = slugify(each_tag.text) for each_anchor in soup.find_all('a', href=True): if 'tab:' in each_anchor.attrs['href']: each_anchor.attrs['href'] = each_anchor.attrs['href'].replace( 'tab:', '') each_anchor.attrs['target'] = '_blank' for match in soup.findAll('code'): if match.parent.name == 'pre': if match.has_attr('class'): match.parent.attrs['class'] = match['class'][0].replace( 'language-', '') match.replaceWithChildren() else: if len(match.contents) > 0: new_tag = soup.new_tag("code") new_tag.append(html.escape(str(match.contents[0]))) match.replace_with(new_tag) host_whitelist = [ 'www.youtube.com', 'www.slideshare.net', 'player.vimeo.com' ] cleaner = Cleaner(host_whitelist=host_whitelist) cleaned_markup = cleaner.clean_html(str(soup)) # TODO: add 'sandbox' attribute to all iframes return cleaned_markup