def run_asm_stats(assembly, out_file): os.environ["FASTA_FILE"] = assembly notebook = read(open("masmvaliweb/notebooks/assembly-stats.ipynb"), 'json') r = NotebookRunner(notebook) r.run_notebook() os.remove(assembly) exportHTML = HTMLExporter(config=Config({'HTMLExporter': {'default_template': 'basic'}})) with open(out_file, 'w') as of: of.write(exportHTML.from_notebook_node(r.nb)[0])
def convert_nb_html(nb): """ Convert a notebooks output to HTML """ nb = run_notebook(nb) config = Config({'HTMLExporter': {'default_template': 'basic'}}) exportHtml = HTMLExporter(config=config) html, resources = exportHtml.from_notebook_node(nb) soup = BeautifulSoup(html) return ''.join(map(str, soup.findAll("div", {"class": ["output", "text_cell_render border-box-sizing rendered_html"]})))
def convert_nb_html(nb): """ Convert a notebooks output to HTML """ nb = run_notebook(nb) config = Config({'HTMLExporter': {'default_template': 'basic'}}) exportHtml = HTMLExporter(config=config) html, resources = exportHtml.from_notebook_node(nb) soup = BeautifulSoup(html) filters = ["output", "text_cell_render border-box-sizing rendered_html"] return ''.join(map(str, soup.findAll("div", {"class": filters})))
def html_conversion(nbjson, template=None): """ Input template file .tpl as string. Provisionnally use the templates in class ezmarkdown.template. e.g. 'ezmarkdown.template.TEMPLATE_OUTPUT_CELLS_ONLY'. """ tpl_loader = jinja2.FileSystemLoader(TEMPLATE_DIR) html_config = Config({"HTMLExporter": {"default_template": 'full'}}) if template: html_config = Config({"HTMLExporter": {"template_file": template}}) html_exporter = HTMLExporter(config=html_config, extra_loaders=[tpl_loader]) html, resources = html_exporter.from_notebook_node(nbjson) return html
def export_unit_to_html(unit): """Export unit into html format.""" path = os.path.dirname(os.path.realpath(__file__)) cfg = Config({'HTMLExporter':{'template_file':'basic_reduced', 'template_path':['.',path], 'filters':{'markdown2html': markdown2html_pandoc}}}) exportHtml = HTMLExporter(config=cfg) (body, resources) = exportHtml.from_notebook_node(unit) body = re.sub(r'\\begin\{ *equation *\}', '\[', body) body = re.sub(r'\\end\{ *equation *\}', '\]', body) return body
def display(user, path): full_path = os.path.join('/home', user, 'notebooks', path + '.ipynb') if not os.path.exists(full_path): return "No such notebook", 404 mtime = str(os.stat(full_path).st_mtime) key = cache_key(user, path, mtime) cached = redis.get(key) if cached: return cached exportHtml = HTMLExporter(config=Config({'HTMLExporter':{'default_template':'basic'}})) notebook = nbformat.reads_json(open(full_path).read()) body, res = exportHtml.from_notebook_node(notebook) redis.set(key, body) redis.expire(key, app.config['REDIS_EXPIRY_SECONDS']) return body
def export_html(nb, dest_path=None, image_dir=None, image_rel_dir=None): """Convert notebook to HTML. Optionally saves HTML to dest_path. """ c = Config({'ExtractOutputPreprocessor': {'enabled': True}}) exporter = HTMLExporter(template_file='full', config=c) output, resources = exporter.from_notebook_node(nb) header = output.split('<head>', 1)[1].split('</head>', 1)[0] body = output.split('<body>', 1)[1].split('</body>', 1)[0] # Monkeypatch CSS header = header.replace('<style', '<style scoped="scoped"') header = header.replace( 'body {\n overflow: visible;\n padding: 8px;\n}\n', '') header = header.replace("code,pre{", "code{") # Filter out styles that conflict with the sphinx theme. bad_anywhere = [ 'navbar', 'body{', 'alert{', 'uneditable-input{', 'collapse{' ] bad_anywhere.extend(['h%s{' % (i + 1) for i in range(6)]) bad_beginning = ['pre{', 'p{margin'] header_lines = [ x for x in header.split('\n') if (not any(x.startswith(s) for s in bad_beginning) and not any(s in x for s in bad_anywhere)) ] header = '\n'.join(header_lines) # Concatenate raw html lines lines = ['<div class="ipynotebook">'] lines.append(header) lines.append(body) lines.append('</div>') html_out = '\n'.join(lines) if image_dir is not None and image_rel_dir is not None: html_out = export_images(resources, image_dir, image_rel_dir, html_out) if dest_path is not None: with io.open(dest_path, 'w', encoding='utf-8') as f: f.write(html_out) return html_out
def export_html(nb, dest_path=None, image_dir=None, image_rel_dir=None): """Convert notebook to HTML. Optionally saves HTML to dest_path. """ c = Config({'ExtractOutputPreprocessor': {'enabled': True}}) exporter = HTMLExporter(template_file='full', config=c) output, resources = exporter.from_notebook_node(nb) header = output.split('<head>', 1)[1].split('</head>', 1)[0] body = output.split('<body>', 1)[1].split('</body>', 1)[0] # Monkeypatch CSS header = header.replace('<style', '<style scoped="scoped"') header = header.replace( 'body {\n overflow: visible;\n padding: 8px;\n}\n', '') header = header.replace("code,pre{", "code{") # Filter out styles that conflict with the sphinx theme. bad_anywhere = ['navbar', 'body{', 'alert{', 'uneditable-input{', 'collapse{'] bad_anywhere.extend(['h%s{' % (i+1) for i in range(6)]) bad_beginning = ['pre{', 'p{margin'] header_lines = [x for x in header.split('\n') if (not any(x.startswith(s) for s in bad_beginning) and not any(s in x for s in bad_anywhere))] header = '\n'.join(header_lines) # Concatenate raw html lines lines = ['<div class="ipynotebook">'] lines.append(header) lines.append(body) lines.append('</div>') html_out = '\n'.join(lines) if image_dir is not None and image_rel_dir is not None: html_out = export_images(resources, image_dir, image_rel_dir, html_out) if dest_path is not None: with open(dest_path, 'w') as f: f.write(html_out) return html_out
def parse(path): source = read_text_file(path) if path.endswith('.rst'): doctree = publish_doctree(source) docinfos = doctree.traverse(nodes.docinfo) docinfo = {c.tagname: str(c.children[0]) for i in docinfos for c in i.children} parts = publish_parts(source, writer_name='html') return {'body': parts['body'], 'date': docinfo.get('date'), 'title': parts['title']} elif path.endswith('.ipynb'): notebook = nbformat.reads_json(source) exporter = HTMLExporter(config=None, extra_loaders=[dl]) body, resources = exporter.from_notebook_node(notebook) return {'body': body, 'date': notebook['metadata']['date'], 'title': notebook['metadata']['name']}
def article_of(notebook_txt, HTMLExporter): notebook = nbformat.reads(notebook_txt) hide, meta = article_meta(notebook) for ix in sorted(hide, reverse=True): del notebook.cells[ix] for n, v in meta: yield '%s: %s\n' % (n, v) yield '\n' exportHtml = HTMLExporter(config=Config({'HTMLExporter': {'default_template': 'basic'}})) body, resources = exportHtml.from_notebook_node(notebook) # [txt[:100] for txt in resources['inlining']['css']] yield body
def article_of(notebook_txt, HTMLExporter): notebook = nbformat.reads(notebook_txt) hide, meta = article_meta(notebook) for ix in sorted(hide, reverse=True): del notebook.cells[ix] for n, v in meta: yield '%s: %s\n' % (n, v) yield '\n' exportHtml = HTMLExporter( config=Config({'HTMLExporter': { 'default_template': 'basic' }})) body, resources = exportHtml.from_notebook_node(notebook) # [txt[:100] for txt in resources['inlining']['css']] yield body
def convert_to_html(notebook, resources, target_directory): """Return a list of strings representing the rst of the given notebook.""" exporter = HTMLExporter( template_file='gallery_page', template_path=[os.path.dirname(__file__)], config={'ExtractOutputPreprocessor': { 'enabled': True }}) resources.update({'output_files_dir': 'notebook_output'}) html, resources = exporter.from_notebook_node(notebook, resources) # Convert linked resources (such as images) into actual files in the # source directory. for name, data in resources.get('outputs', {}).items(): name = os.path.join(target_directory, name) if not os.path.exists(os.path.dirname(name)): os.makedirs(os.path.dirname(name)) with io.open(name, 'wb') as f: f.write(data) return html, resources.get('outputs', {}).keys()
def _parse_ipynb(self): notebook = nbformat.reads_json(self.source) config = Config({'HTMLExporter': {'default_template': 'basic'}}) exporter = HTMLExporter(config=config) body, resources = exporter.from_notebook_node(notebook) body = body[body.index('\n<div class="cell'):-36] # excise template body = self._decapitate(body) body = body.replace('\n</pre>', '</pre>') self.add_mathjax = r'\(' in body fields = notebook['metadata'] if 'date' in fields: self.date = datetime.strptime(fields['date'], '%d %B %Y').date() else: self.date = datetime.now().date() self.tags = set() if 'tags' in fields: self.tags.update('-'.join(tag.strip().lower().split()) for tag in fields['tags'].split(',')) if self.date and self.tags: heading = ':Date: {}\n:Tags: {}\n'.format( self.date.strftime('%d %B %Y').lstrip('0'), ', '.join(sorted(self.tags)), ) parts = parse_rst(heading) body = parts['docinfo'] + body self.add_disqus = fields.get('add_disqus', False) self.body_html = body self.next_link = None self.previous_link = None self.add_title = True
def inner(): # run MUMmer mummer_cmd = "echo RUNNING NUCMER && bash -x ~/github/metassemble/scripts/validate/nucmer/run-nucmer.sh test/references/Mircea_07102013_selected_refs.fasta {0} /tmp/nucmer && echo SUCCESS!".format(save_path) #mummer_cmd = "echo fakka" proc = subprocess.Popen(mummer_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True) while True: char = proc.stdout.read(1) if char: if str(char) != '\n': yield str(char) else: yield '<br />\n' else: break #TODO: not working, points to relative dir yield '<a href="/tmp/nucmer/nucmer.coords">nucmer.coords</a>\n' notebook = read(open("masmvaliweb/notebooks/mgcov-comparison-mpld3.ipynb"), 'json') r = NotebookRunner(notebook) r.run_notebook() exportHTML = HTMLExporter(config=Config({'HTMLExporter': {'default_template': 'basic'}})) yield exportHTML.from_notebook_node(r.nb)[0]
def parse(path): source = read_text_file(path) if path.endswith('.rst'): doctree = publish_doctree(source) docinfos = doctree.traverse(nodes.docinfo) docinfo = { c.tagname: str(c.children[0]) for i in docinfos for c in i.children } parts = publish_parts(source, writer_name='html') return { 'body': parts['body'], 'date': docinfo.get('date'), 'title': parts['title'] } elif path.endswith('.ipynb'): notebook = nbformat.reads_json(source) exporter = HTMLExporter(config=None, extra_loaders=[dl]) body, resources = exporter.from_notebook_node(notebook) return { 'body': body, 'date': notebook['metadata']['date'], 'title': notebook['metadata']['name'] }
def parse(path): source = read_text_file(path) result = {} if path.endswith('.html'): if utils.detect_blogofile(source): heading, info, other_html = utils.convert_blogofile(source) parts = utils.parse_rst(heading) body_html = parts['docinfo'] + other_html body_html = utils.pygmentize_pre_blocks(body_html) body_html = body_html.replace('\n</pre>', '</pre>') result['title'] = utils.html_parser.unescape(parts['title']) result['needs_disqus'] = True result['date'] = info['date'] result['tags'] = info['tags'] else: result['title'] = utils.find_title_in_html(source) body_html = SimpleTemplate(source) result['needs_disqus'] = False result['date'] = None result['tags'] = () result['body'] = body_html result['next_link'] = None result['previous_link'] = None elif path.endswith('.rst'): if utils.detect_blogofile(source): heading, info, body = utils.convert_blogofile(source) source = heading + body result['title'] = info['title'] del heading, info, body result['needs_disqus'] = True else: result['needs_disqus'] = False doctree = publish_doctree(source) docinfos = doctree.traverse(nodes.docinfo) docinfo = {c.tagname: str(c.children[0]) for i in docinfos for c in i.children} parts = utils.parse_rst(source) # parts = publish_from_doctree(source, writer_name='html', # settings_overrides={'initial_header_level': 2}) body = parts['docinfo'] + utils.pygmentize_pre_blocks(parts['fragment']) result['body'] = body result['date'] = datetime.strptime( docinfo.get('date'), '%d %B %Y').date() if 'title' not in result: result['title'] = parts['title'] elif path.endswith('.ipynb'): notebook = nbformat.reads_json(source) docinfo = utils.build_docinfo_block_for_notebook(notebook) exporter = HTMLExporter(config=None, extra_loaders=[dl], filters=filters) body, resources = exporter.from_notebook_node(notebook) body = body.replace('\n</pre>', '</pre>') body = body.replace('</h1>', '</h1>\n' + docinfo.rstrip()) date = notebook['metadata']['date'] if date is not None: date = datetime.strptime(date, '%d %B %Y').date() result['body'] = body result['date'] = date result['needs_disqus'] = notebook['metadata'].get('needs_disqus') result['title'] = (notebook['metadata']['name'] or utils.find_title_in_html(body)) return result
def _generate_html(self, node, substitutions): # pragma: no cover exporter = HTMLExporter() exporter.register_preprocessor( Substitute(self.nbversion, substitutions)) html, _ = exporter.from_notebook_node(node) return html
args = parser.parse_args() host = default.host + "/blog" exportHtml = HTMLExporter(config=Config({'HTMLExporter':{'default_template':'basic'}})) f = open(args.rsakey,'r') private_key = RSA.importKey(f.read()) signer = PKCS1_v1_5.new(private_key) url = args.url if url == "": url = '-'.join((args.title).lower().split(" ")) notebook = nbformat.reads_json(open(os.path.join(args.directory, args.file), "r").read()) (body,resources) = exportHtml.from_notebook_node(notebook) content = ((body.split("</body>")[0]).split('<div class="container" id="notebook-container">')[1])[0:-16] category = "" if args.title == "root": category = "static" else: category = args.category data = {"title": args.title, "url": url, "content": content, "author": args.author, "year": datetime.datetime.now().year, "month": datetime.datetime.now().month,
def _generate_html(self, node, substitutions): # pragma: no cover exporter = HTMLExporter() exporter.register_preprocessor(Substitute(self.nbversion, substitutions)) html,_ = exporter.from_notebook_node(node) return html