def run_asm_stats(assembly, out_file): os.environ["FASTA_FILE"] = assembly notebook = read(open("masmvaliweb/notebooks/assembly-stats.ipynb"), 'json') r = NotebookRunner(notebook) r.run_notebook() os.remove(assembly) exportHTML = HTMLExporter(config=Config({'HTMLExporter': {'default_template': 'basic'}})) with open(out_file, 'w') as of: of.write(exportHTML.from_notebook_node(r.nb)[0])
def convert_nb_html(nb): """ Convert a notebooks output to HTML """ nb = run_notebook(nb) config = Config({'HTMLExporter': {'default_template': 'basic'}}) exportHtml = HTMLExporter(config=config) html, resources = exportHtml.from_notebook_node(nb) soup = BeautifulSoup(html) return ''.join(map(str, soup.findAll("div", {"class": ["output", "text_cell_render border-box-sizing rendered_html"]})))
def convert_nb_html(nb): """ Convert a notebooks output to HTML """ nb = run_notebook(nb) config = Config({'HTMLExporter': {'default_template': 'basic'}}) exportHtml = HTMLExporter(config=config) html, resources = exportHtml.from_notebook_node(nb) soup = BeautifulSoup(html) filters = ["output", "text_cell_render border-box-sizing rendered_html"] return ''.join(map(str, soup.findAll("div", {"class": filters})))
def html_conversion(nbjson, template=None): """ Input template file .tpl as string. Provisionnally use the templates in class ezmarkdown.template. e.g. 'ezmarkdown.template.TEMPLATE_OUTPUT_CELLS_ONLY'. """ tpl_loader = jinja2.FileSystemLoader(TEMPLATE_DIR) html_config = Config({"HTMLExporter": {"default_template": 'full'}}) if template: html_config = Config({"HTMLExporter": {"template_file": template}}) html_exporter = HTMLExporter(config=html_config, extra_loaders=[tpl_loader]) html, resources = html_exporter.from_notebook_node(nbjson) return html
def export_unit_to_html(unit): """Export unit into html format.""" path = os.path.dirname(os.path.realpath(__file__)) cfg = Config({'HTMLExporter':{'template_file':'basic_reduced', 'template_path':['.',path], 'filters':{'markdown2html': markdown2html_pandoc}}}) exportHtml = HTMLExporter(config=cfg) (body, resources) = exportHtml.from_notebook_node(unit) body = re.sub(r'\\begin\{ *equation *\}', '\[', body) body = re.sub(r'\\end\{ *equation *\}', '\]', body) return body
def display(user, path): full_path = os.path.join('/home', user, 'notebooks', path + '.ipynb') if not os.path.exists(full_path): return "No such notebook", 404 mtime = str(os.stat(full_path).st_mtime) key = cache_key(user, path, mtime) cached = redis.get(key) if cached: return cached exportHtml = HTMLExporter(config=Config({'HTMLExporter':{'default_template':'basic'}})) notebook = nbformat.reads_json(open(full_path).read()) body, res = exportHtml.from_notebook_node(notebook) redis.set(key, body) redis.expire(key, app.config['REDIS_EXPIRY_SECONDS']) return body
def export_html(nb, dest_path=None, image_dir=None, image_rel_dir=None): """Convert notebook to HTML. Optionally saves HTML to dest_path. """ c = Config({'ExtractOutputPreprocessor': {'enabled': True}}) exporter = HTMLExporter(template_file='full', config=c) output, resources = exporter.from_notebook_node(nb) header = output.split('<head>', 1)[1].split('</head>', 1)[0] body = output.split('<body>', 1)[1].split('</body>', 1)[0] # Monkeypatch CSS header = header.replace('<style', '<style scoped="scoped"') header = header.replace( 'body {\n overflow: visible;\n padding: 8px;\n}\n', '') header = header.replace("code,pre{", "code{") # Filter out styles that conflict with the sphinx theme. bad_anywhere = [ 'navbar', 'body{', 'alert{', 'uneditable-input{', 'collapse{' ] bad_anywhere.extend(['h%s{' % (i + 1) for i in range(6)]) bad_beginning = ['pre{', 'p{margin'] header_lines = [ x for x in header.split('\n') if (not any(x.startswith(s) for s in bad_beginning) and not any(s in x for s in bad_anywhere)) ] header = '\n'.join(header_lines) # Concatenate raw html lines lines = ['<div class="ipynotebook">'] lines.append(header) lines.append(body) lines.append('</div>') html_out = '\n'.join(lines) if image_dir is not None and image_rel_dir is not None: html_out = export_images(resources, image_dir, image_rel_dir, html_out) if dest_path is not None: with io.open(dest_path, 'w', encoding='utf-8') as f: f.write(html_out) return html_out
def export_html(nb, dest_path=None, image_dir=None, image_rel_dir=None): """Convert notebook to HTML. Optionally saves HTML to dest_path. """ c = Config({'ExtractOutputPreprocessor': {'enabled': True}}) exporter = HTMLExporter(template_file='full', config=c) output, resources = exporter.from_notebook_node(nb) header = output.split('<head>', 1)[1].split('</head>', 1)[0] body = output.split('<body>', 1)[1].split('</body>', 1)[0] # Monkeypatch CSS header = header.replace('<style', '<style scoped="scoped"') header = header.replace( 'body {\n overflow: visible;\n padding: 8px;\n}\n', '') header = header.replace("code,pre{", "code{") # Filter out styles that conflict with the sphinx theme. bad_anywhere = ['navbar', 'body{', 'alert{', 'uneditable-input{', 'collapse{'] bad_anywhere.extend(['h%s{' % (i+1) for i in range(6)]) bad_beginning = ['pre{', 'p{margin'] header_lines = [x for x in header.split('\n') if (not any(x.startswith(s) for s in bad_beginning) and not any(s in x for s in bad_anywhere))] header = '\n'.join(header_lines) # Concatenate raw html lines lines = ['<div class="ipynotebook">'] lines.append(header) lines.append(body) lines.append('</div>') html_out = '\n'.join(lines) if image_dir is not None and image_rel_dir is not None: html_out = export_images(resources, image_dir, image_rel_dir, html_out) if dest_path is not None: with open(dest_path, 'w') as f: f.write(html_out) return html_out
def parse(path): source = read_text_file(path) if path.endswith('.rst'): doctree = publish_doctree(source) docinfos = doctree.traverse(nodes.docinfo) docinfo = {c.tagname: str(c.children[0]) for i in docinfos for c in i.children} parts = publish_parts(source, writer_name='html') return {'body': parts['body'], 'date': docinfo.get('date'), 'title': parts['title']} elif path.endswith('.ipynb'): notebook = nbformat.reads_json(source) exporter = HTMLExporter(config=None, extra_loaders=[dl]) body, resources = exporter.from_notebook_node(notebook) return {'body': body, 'date': notebook['metadata']['date'], 'title': notebook['metadata']['name']}
def Viewer(nburl=None, showcode=False, render='html'): from IPython.nbformat import reader as nbformat from IPython.nbconvert import exporters # Get url or file path if nburl: if nburl.startswith("http"): import requests response = requests.get(nburl) notebook = nbformat.reads_json(response.content) else: notebook = nbformat.read(fp=file(nburl)) else: notebook = '' # Get rendering template if showcode: if render == 'html': template = 'full' else: if render == 'html': template = 'full2' import IPython.nbconvert from IPython.config import Config from IPython.nbconvert import HTMLExporter ## I use `basic` here to have less boilerplate and headers in the HTML. ## we'll see later how to pass config to exporters. exportHtml = HTMLExporter( config=Config({'HTMLExporter': { 'template_file': template }})) (body, resources) = exportHtml.from_filename(nburl) return body
def article_of(notebook_txt, HTMLExporter): notebook = nbformat.reads(notebook_txt) hide, meta = article_meta(notebook) for ix in sorted(hide, reverse=True): del notebook.cells[ix] for n, v in meta: yield '%s: %s\n' % (n, v) yield '\n' exportHtml = HTMLExporter(config=Config({'HTMLExporter': {'default_template': 'basic'}})) body, resources = exportHtml.from_notebook_node(notebook) # [txt[:100] for txt in resources['inlining']['css']] yield body
def article_of(notebook_txt, HTMLExporter): notebook = nbformat.reads(notebook_txt) hide, meta = article_meta(notebook) for ix in sorted(hide, reverse=True): del notebook.cells[ix] for n, v in meta: yield '%s: %s\n' % (n, v) yield '\n' exportHtml = HTMLExporter( config=Config({'HTMLExporter': { 'default_template': 'basic' }})) body, resources = exportHtml.from_notebook_node(notebook) # [txt[:100] for txt in resources['inlining']['css']] yield body
def convert_to_html(notebook, resources, target_directory): """Return a list of strings representing the rst of the given notebook.""" exporter = HTMLExporter( template_file='gallery_page', template_path=[os.path.dirname(__file__)], config={'ExtractOutputPreprocessor': { 'enabled': True }}) resources.update({'output_files_dir': 'notebook_output'}) html, resources = exporter.from_notebook_node(notebook, resources) # Convert linked resources (such as images) into actual files in the # source directory. for name, data in resources.get('outputs', {}).items(): name = os.path.join(target_directory, name) if not os.path.exists(os.path.dirname(name)): os.makedirs(os.path.dirname(name)) with io.open(name, 'wb') as f: f.write(data) return html, resources.get('outputs', {}).keys()
def _parse_ipynb(self): notebook = nbformat.reads_json(self.source) config = Config({'HTMLExporter': {'default_template': 'basic'}}) exporter = HTMLExporter(config=config) body, resources = exporter.from_notebook_node(notebook) body = body[body.index('\n<div class="cell'):-36] # excise template body = self._decapitate(body) body = body.replace('\n</pre>', '</pre>') self.add_mathjax = r'\(' in body fields = notebook['metadata'] if 'date' in fields: self.date = datetime.strptime(fields['date'], '%d %B %Y').date() else: self.date = datetime.now().date() self.tags = set() if 'tags' in fields: self.tags.update('-'.join(tag.strip().lower().split()) for tag in fields['tags'].split(',')) if self.date and self.tags: heading = ':Date: {}\n:Tags: {}\n'.format( self.date.strftime('%d %B %Y').lstrip('0'), ', '.join(sorted(self.tags)), ) parts = parse_rst(heading) body = parts['docinfo'] + body self.add_disqus = fields.get('add_disqus', False) self.body_html = body self.next_link = None self.previous_link = None self.add_title = True
def inner(): # run MUMmer mummer_cmd = "echo RUNNING NUCMER && bash -x ~/github/metassemble/scripts/validate/nucmer/run-nucmer.sh test/references/Mircea_07102013_selected_refs.fasta {0} /tmp/nucmer && echo SUCCESS!".format(save_path) #mummer_cmd = "echo fakka" proc = subprocess.Popen(mummer_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True) while True: char = proc.stdout.read(1) if char: if str(char) != '\n': yield str(char) else: yield '<br />\n' else: break #TODO: not working, points to relative dir yield '<a href="/tmp/nucmer/nucmer.coords">nucmer.coords</a>\n' notebook = read(open("masmvaliweb/notebooks/mgcov-comparison-mpld3.ipynb"), 'json') r = NotebookRunner(notebook) r.run_notebook() exportHTML = HTMLExporter(config=Config({'HTMLExporter': {'default_template': 'basic'}})) yield exportHTML.from_notebook_node(r.nb)[0]
def parse(path): source = read_text_file(path) if path.endswith('.rst'): doctree = publish_doctree(source) docinfos = doctree.traverse(nodes.docinfo) docinfo = { c.tagname: str(c.children[0]) for i in docinfos for c in i.children } parts = publish_parts(source, writer_name='html') return { 'body': parts['body'], 'date': docinfo.get('date'), 'title': parts['title'] } elif path.endswith('.ipynb'): notebook = nbformat.reads_json(source) exporter = HTMLExporter(config=None, extra_loaders=[dl]) body, resources = exporter.from_notebook_node(notebook) return { 'body': body, 'date': notebook['metadata']['date'], 'title': notebook['metadata']['name'] }
def parse(path): source = read_text_file(path) result = {} if path.endswith('.html'): if utils.detect_blogofile(source): heading, info, other_html = utils.convert_blogofile(source) parts = utils.parse_rst(heading) body_html = parts['docinfo'] + other_html body_html = utils.pygmentize_pre_blocks(body_html) body_html = body_html.replace('\n</pre>', '</pre>') result['title'] = utils.html_parser.unescape(parts['title']) result['needs_disqus'] = True result['date'] = info['date'] result['tags'] = info['tags'] else: result['title'] = utils.find_title_in_html(source) body_html = SimpleTemplate(source) result['needs_disqus'] = False result['date'] = None result['tags'] = () result['body'] = body_html result['next_link'] = None result['previous_link'] = None elif path.endswith('.rst'): if utils.detect_blogofile(source): heading, info, body = utils.convert_blogofile(source) source = heading + body result['title'] = info['title'] del heading, info, body result['needs_disqus'] = True else: result['needs_disqus'] = False doctree = publish_doctree(source) docinfos = doctree.traverse(nodes.docinfo) docinfo = {c.tagname: str(c.children[0]) for i in docinfos for c in i.children} parts = utils.parse_rst(source) # parts = publish_from_doctree(source, writer_name='html', # settings_overrides={'initial_header_level': 2}) body = parts['docinfo'] + utils.pygmentize_pre_blocks(parts['fragment']) result['body'] = body result['date'] = datetime.strptime( docinfo.get('date'), '%d %B %Y').date() if 'title' not in result: result['title'] = parts['title'] elif path.endswith('.ipynb'): notebook = nbformat.reads_json(source) docinfo = utils.build_docinfo_block_for_notebook(notebook) exporter = HTMLExporter(config=None, extra_loaders=[dl], filters=filters) body, resources = exporter.from_notebook_node(notebook) body = body.replace('\n</pre>', '</pre>') body = body.replace('</h1>', '</h1>\n' + docinfo.rstrip()) date = notebook['metadata']['date'] if date is not None: date = datetime.strptime(date, '%d %B %Y').date() result['body'] = body result['date'] = date result['needs_disqus'] = notebook['metadata'].get('needs_disqus') result['title'] = (notebook['metadata']['name'] or utils.find_title_in_html(body)) return result
def _generate_html(self, node, substitutions): # pragma: no cover exporter = HTMLExporter() exporter.register_preprocessor( Substitute(self.nbversion, substitutions)) html, _ = exporter.from_notebook_node(node) return html
def _generate_html(self, node, substitutions): # pragma: no cover exporter = HTMLExporter() exporter.register_preprocessor(Substitute(self.nbversion, substitutions)) html,_ = exporter.from_notebook_node(node) return html
parser.add_argument('title', type=str, help=strings_help[2]) parser.add_argument('file', type=str, help=strings_help[5]) parser.add_argument('--url', '-u', type=str, help=strings_help[3], nargs='?', default="", required=False) parser.add_argument('--directory', '-d', type=str, help=strings_help[4], nargs='?', default=default.directory, required=False) parser.add_argument('--author', '-a', type=str, help=strings_help[1], nargs='?', default=default.author, required=False) parser.add_argument('--rsakey', '-k', type=str, help=strings_help[6], nargs='?', default=default.private_key_path, required=False) parser.add_argument('--category', '-c', type=str, help=strings_help[10], nargs='?', default=default.category, required=False) parser.add_argument('--year', type=int, help=strings_help[7], nargs='?',default=datetime.datetime.now().year, required=False) parser.add_argument('--month', type=int, help=strings_help[8], nargs='?', default=datetime.datetime.now().month, required=False) parser.add_argument('--day', type=int, help=strings_help[9], nargs='?', default=datetime.datetime.now().day, required=False) args = parser.parse_args() host = default.host + "/blog" exportHtml = HTMLExporter(config=Config({'HTMLExporter':{'default_template':'basic'}})) f = open(args.rsakey,'r') private_key = RSA.importKey(f.read()) signer = PKCS1_v1_5.new(private_key) url = args.url if url == "": url = '-'.join((args.title).lower().split(" ")) notebook = nbformat.reads_json(open(os.path.join(args.directory, args.file), "r").read()) (body,resources) = exportHtml.from_notebook_node(notebook) content = ((body.split("</body>")[0]).split('<div class="container" id="notebook-container">')[1])[0:-16] category = "" if args.title == "root":
#!/usr/bin/env python # -*- coding: utf-8 -*- import glob from IPython.config import Config from IPython.nbconvert import HTMLExporter c = Config({'ExecutePreprocessor': {'enabled': True}}) exporter = HTMLExporter(config=c) for filename in glob.glob("example-notebooks/*.ipynb"): print(filename) exporter.from_filename(filename)
def publish(notebook_name, url_path, page_title, page_description, ignore_last_n_cells=2, uses_plotly_offline=False, thumbnail=DEFAULT_THUMBNAIL_URL, **kwargs): ''' Convert an IPython notebook into an HTML file that can be consumed by GitHub pages in plotly's documentation repo. Arguments: - notebook_name: The name of the notebook to convert. Used as input arg to nbconvert, nowhere else. - url_path: e.g. /python/offline - page_title: The <title> of the page. - page_description: So, what's this page about? Sell it in 160 characters or less. This is the <meta name="description"> tag. - ignore_last_n_cells: When converting to HTML, don't convert the ignore_last_n_cells number of cells. This is usually the cell that runs this `publish` command and the blank output cell that is always included. - uses_plotly_offline: If this was created with plotly.offline, then set this to True. This will include an extra lib (jquery) that is included in ipython notebook, but not included in gh-pages - thumbnail: Used as a thumbnail image on the notebook splash (if applicable) and as the image when sharing the notebook as a tweet, a facebook post, etc. Can be relative to the repo, eg '/images/static-image' or absolute, e.g. http://i.imgur.com/j0Uiy0n.jpg - language: Not sure what this is used for. Example: publish('Plotly Offline', '/python/offline', 'Plotly Offline for IPython Notebooks', 'How to use Plotly offline inside IPython notebooks with Plotly Offline', uses_plotly_offline=True) ''' # backwards compatability if 'thumbnail_url' in kwargs and kwargs[ 'thumbnail'] == DEFAULT_THUMBNAIL_URL: kwargs['thumbnail'] = kwargs['thumbnail_url'] warnings.warn('Did you "Save" this notebook before running this command? ' 'Remember to save, always save.') parts = url_path.split('/') if len(parts) > 3: warnings.warn('Your URL has more than 2 parts... are you sure?') if url_path[-1] == '/': url_path = url_path[:-1] if len(page_description) > 160: raise Exception("Shorten up that page_description! " "Your description was {} characters, " "and it's gotta be <= than 160.".format( len(page_description))) if thumbnail == DEFAULT_THUMBNAIL_URL: has_thumbnail = 'false' else: has_thumbnail = 'true' if '.ipynb' not in notebook_name: notebook_name += '.ipynb' fn = notebook_name tmpfn = 'temp-{}'.format(fn) nbjson = json.load(open(fn)) if 'cells' in nbjson: nbjson['cells'] = nbjson['cells'][:-ignore_last_n_cells] elif 'worksheets' in nbjson: if len(nbjson['worksheets']) != 1: raise Exception('multiple worksheets?') elif 'cells' in nbjson['worksheets'][0]: nbjson['worksheets'][0]['cells'] = nbjson['worksheets'][0][ 'cells'][:-ignore_last_n_cells] else: raise Exception('cells not in worksheets[0]?') else: raise Exception('unknown ipython notebook format') with open(tmpfn, 'w') as f: f.write(json.dumps(nbjson)) exporter = HTMLExporter(template_file='basic') html = exporter.from_filename(tmpfn)[0] kwargs.setdefault('layout', 'user-guide') kwargs.setdefault('page_type', 'u-guide') kwargs.setdefault('language', 'python') with open('2015-06-30-' + fn.replace('.ipynb', '.html'), 'w') as f: f.write('\n'.join([ '' '---', 'permalink: ' + url_path, 'description: ' + page_description.replace(':', ':'), 'name: ' + page_title.replace(':', ':'), 'has_thumbnail: ' + has_thumbnail, 'thumbnail: ' + thumbnail, '\n'.join(['{}: {}'.format(k, v) for k, v in kwargs.iteritems()]), '---', '{% raw %}' ])) if uses_plotly_offline: f.write( '<script type="text/javascript" ' ' src="https://code.jquery.com/jquery-2.1.4.min.js">' '</script>') f.write(html.encode('utf8')) f.write('{% endraw %}') os.remove(tmpfn)
def publish(notebook_name, url_path, page_title, page_description, ignore_last_n_cells=2, uses_plotly_offline=False, thumbnail=DEFAULT_THUMBNAIL_URL, **kwargs): ''' Convert an IPython notebook into an HTML file that can be consumed by GitHub pages in plotly's documentation repo. Arguments: - notebook_name: The name of the notebook to convert. Used as input arg to nbconvert, nowhere else. - url_path: e.g. /python/offline - page_title: The <title> of the page. - page_description: So, what's this page about? Sell it in 160 characters or less. This is the <meta name="description"> tag. - ignore_last_n_cells: When converting to HTML, don't convert the ignore_last_n_cells number of cells. This is usually the cell that runs this `publish` command and the blank output cell that is always included. - uses_plotly_offline: If this was created with plotly.offline, then set this to True. This will include an extra lib (jquery) that is included in ipython notebook, but not included in gh-pages - thumbnail: Used as a thumbnail image on the notebook splash (if applicable) and as the image when sharing the notebook as a tweet, a facebook post, etc. Can be relative to the repo, eg '/images/static-image' or absolute, e.g. http://i.imgur.com/j0Uiy0n.jpg - language: Not sure what this is used for. Example: publish('Plotly Offline', '/python/offline', 'Plotly Offline for IPython Notebooks', 'How to use Plotly offline inside IPython notebooks with Plotly Offline', uses_plotly_offline=True) ''' # backwards compatability if 'thumbnail_url' in kwargs and kwargs['thumbnail'] == DEFAULT_THUMBNAIL_URL: kwargs['thumbnail'] = kwargs['thumbnail_url'] warnings.warn('Did you "Save" this notebook before running this command? ' 'Remember to save, always save.') parts = url_path.split('/') if len(parts) > 3: warnings.warn('Your URL has more than 2 parts... are you sure?') #if url_path[-1] == '/': # url_path = url_path[:-1] if len(page_description) > 160: raise Exception("Shorten up that page_description! " "Your description was {} characters, " "and it's gotta be <= than 160." .format(len(page_description))) if thumbnail == DEFAULT_THUMBNAIL_URL: has_thumbnail = 'false' else: has_thumbnail = 'true' if '.ipynb' not in notebook_name: notebook_name += '.ipynb' fn = notebook_name tmpfn = 'temp-{}'.format(fn) nbjson = json.load(open(fn)) if 'cells' in nbjson: nbjson['cells'] = nbjson['cells'][:-ignore_last_n_cells] elif 'worksheets' in nbjson: if len(nbjson['worksheets']) != 1: raise Exception('multiple worksheets?') elif 'cells' in nbjson['worksheets'][0]: nbjson['worksheets'][0]['cells'] = nbjson['worksheets'][0]['cells'][:-ignore_last_n_cells] else: raise Exception('cells not in worksheets[0]?') else: raise Exception('unknown ipython notebook format') with open(tmpfn, 'w') as f: f.write(json.dumps(nbjson)) exporter = HTMLExporter(template_file='basic') html = exporter.from_filename(tmpfn)[0] kwargs.setdefault('layout', 'user-guide') kwargs.setdefault('page_type', 'u-guide') kwargs.setdefault('language', 'python') with open('2015-06-30-' + fn.replace('.ipynb', '.html'), 'w') as f: f.write('\n'.join(['' '---', 'permalink: ' + url_path, 'description: ' + page_description.replace(':', ':'), 'name: ' + page_title.replace(':', ':'), 'has_thumbnail: ' + has_thumbnail, 'thumbnail: ' + thumbnail, '\n'.join(['{}: {}'.format(k, v) for k, v in kwargs.iteritems()]), '---', '{% raw %}' ])) if uses_plotly_offline: f.write( '<script type="text/javascript" ' ' src="https://code.jquery.com/jquery-2.1.4.min.js">' '</script>' ) f.write(html.encode('utf8')) f.write('{% endraw %}') os.remove(tmpfn)
def post(args, debug=False): """ Inserts a file as a post to a blog. """ title, suffix = os.path.splitext(os.path.basename(args.file)) # Need to add mathJax header in front of html mathJaxFile = os.path.join(os.path.dirname(__file__), 'mathJax.html') with open(mathJaxFile, 'r') as htmlFile: mathJax = htmlFile.read() # Read file to post if suffix in ('.html', '.htm'): with open(args.file, 'r') as htmlFile: html = mathJax + htmlFile.read() elif suffix in '.ipynb': exportHtml = HTMLExporter(template_file='basic') html = mathJax + exportHtml.from_filename(args.file)[0] else: print args.file, 'has an unrecognized suffix. Stopping.' return # Labels for post if args.label is None: labels = None else: labels = args.label if args.title is not None: title = args.title # Start communications with blogger service, http = authenticate(args) # Retrieve the list of Blogs this user has write privileges on blogs = service.blogs() if debug: print 'blogs =', blogs # Find blog by URL request = blogs.getByUrl(url=args.url) if debug: print 'blogs.getByUrl(url=args.url) =', request.to_json() response = request.execute() if debug: print 'response =', json.dumps(response, indent=2) #response = blogs.getByUrl(url=args.url).execute() # Get blogId blogId = response['id'] if debug: print 'blogId =', blogId # posts instance posts = service.posts() if debug: print 'posts =', posts # Build body of post # Check post doesn't already exist existingPost = getPostByTitle(posts, blogId, title, status='draft', debug=False) if existingPost == None: existingPost = getPostByTitle(posts, blogId, title, status='scheduled', debug=False) if existingPost == None: existingPost = getPostByTitle(posts, blogId, title, status='live', debug=False) if existingPost != None: if args.update: existingPost['content'] = html if labels != None: existingPost['labels'] = labels postId = existingPost['id'] request = posts.update(blogId=blogId, postId=postId, body=existingPost) if debug: print 'posts().update() =', request.to_json() response = request.execute() if debug: print 'response =', json.dumps(response, indent=2) else: print 'Post "' + title + '" already exists!' else: # Insert new post body = {} body['kind'] = 'blogger#post' body['title'] = title body['content'] = html body['blog'] = {'id': blogId} body['labels'] = labels request = posts.insert(blogId=blogId, body=body, isDraft=True) if debug: print 'posts().insert() =', request.to_json() response = request.execute() if debug: print 'response =', json.dumps(response, indent=2)
def post(args, debug=False): """ Inserts a file as a post to a blog. """ title, suffix = os.path.splitext( os.path.basename(args.file) ) # Need to add mathJax header in front of html mathJaxFile = os.path.join(os.path.dirname(__file__),'mathJax.html') with open (mathJaxFile, 'r') as htmlFile: mathJax = htmlFile.read() # Read file to post if suffix in ('.html','.htm'): with open (args.file, 'r') as htmlFile: html = mathJax + htmlFile.read() elif suffix in '.ipynb': exportHtml = HTMLExporter(template_file='basic') html = mathJax + exportHtml.from_filename(args.file)[0] else: print args.file,'has an unrecognized suffix. Stopping.' return # Labels for post if args.label is None: labels = None else: labels = args.label if args.title is not None: title = args.title # Start communications with blogger service, http = authenticate(args) # Retrieve the list of Blogs this user has write privileges on blogs = service.blogs() if debug: print 'blogs =',blogs # Find blog by URL request = blogs.getByUrl(url=args.url) if debug: print 'blogs.getByUrl(url=args.url) =',request.to_json() response = request.execute() if debug: print 'response =',json.dumps(response, indent=2) #response = blogs.getByUrl(url=args.url).execute() # Get blogId blogId = response['id'] if debug: print 'blogId =',blogId # posts instance posts = service.posts() if debug: print 'posts =',posts # Build body of post # Check post doesn't already exist existingPost = getPostByTitle(posts, blogId, title, status='draft', debug=False) if existingPost == None: existingPost = getPostByTitle(posts, blogId, title, status='scheduled', debug=False) if existingPost == None: existingPost = getPostByTitle(posts, blogId, title, status='live', debug=False) if existingPost != None: if args.update: existingPost['content'] = html if labels != None: existingPost['labels'] = labels postId = existingPost['id'] request = posts.update(blogId=blogId, postId=postId, body=existingPost) if debug: print 'posts().update() =',request.to_json() response = request.execute() if debug: print 'response =',json.dumps(response, indent=2) else: print 'Post "'+title+'" already exists!' else: # Insert new post body = {} body['kind'] = 'blogger#post' body['title'] = title body['content'] = html body['blog'] = {'id': blogId} body['labels'] = labels request = posts.insert(blogId=blogId, body=body, isDraft=True) if debug: print 'posts().insert() =',request.to_json() response = request.execute() if debug: print 'response =',json.dumps(response, indent=2)
def build_gallery(examples_directory, target_directory): notebook_fnames = list(notebooks_in_directory(examples_directory)) examples_dir_name = 'examples' examples_directory = os.path.join(target_directory, examples_dir_name) if not os.path.isdir(examples_directory): os.makedirs(examples_directory) gallery_page_fname = os.path.join(target_directory, 'gallery.html') exporter = HTMLExporter(template_file='gallery', template_path=[os.path.dirname(__file__)]) exporter._load_template() exporter = exporter.template sections = [] notebook_keywords = {} for notebook_fname in notebook_fnames: notebook = read_notebook(notebook_fname) notebook_keywords[notebook_fname] = notebook['metadata'].get( 'keywords', []) notebooks_by_keyword = {} for notebook_fname, keywords in notebook_keywords.items(): for keyword in keywords: notebooks_by_keyword.setdefault(keyword, []).append(notebook_fname) import collections Section = collections.namedtuple('Section', ['name', 'examples']) Example = collections.namedtuple('Example', ['name', 'url', 'keywords', 'outputs']) examples_by_fname = {} for fname in notebook_fnames: name = os.path.splitext(os.path.basename(fname))[0] page_content_fname = os.path.join(examples_directory, name + '.html') refname = 'gallery-{}'.format(name.replace('_', '-')) with open(page_content_fname, 'w') as fh: html, outputs = notebook_html(fname, refname, examples_directory) fh.write(html.encode('utf-8')) images = [] for output in outputs: thumb_path = examples_dir_name + '/' + 'notebook_output_thumbs' + '/' + os.path.basename( output) thumb_fname = os.path.join(target_directory, thumb_path) orig_fname = os.path.join(examples_directory, output) if not os.path.exists(os.path.dirname(thumb_fname)): os.makedirs(os.path.dirname(thumb_fname)) import shutil # TODO - resize these. shutil.copy(orig_fname, thumb_fname) images.append(thumb_path) # outputs = [examples_dir_name + '/' + image for image in images] example = Example(name, '{}/{}.html'.format(examples_dir_name, name), notebook_keywords[fname], images) examples_by_fname[fname] = example break for keyword, fnames in notebooks_by_keyword.items(): section = Section(keyword, []) sections.append(section) for fname in fnames: name = os.path.splitext(os.path.basename(fname))[0] if fname in examples_by_fname: section.examples.append(examples_by_fname[fname]) with open(gallery_page_fname, 'w') as gallery_page: gallery_page.write(exporter.render(title='Gallery', sections=sections))
(body,resources) = exporter.from_notebook_node(notebook) with open(out_file,'w') as f: f.write(body.encode('utf8')) return out_file config=Config({ 'HTMLExporter':{ 'template_file':'local', 'template_path': [os.path.join(root_dir,'scripts')] }, }) exportHtmlA = HTMLExporter() exportHtmlB = HTMLExporter(config=config) exportHtmlB.register_preprocessor(LocalPreprocessor(root_dir), enabled=True) out_files = set() for file in args.files: for exporter, suffix in ( (exportHtmlA,'-code'), (exportHtmlB,'')): out_file = convert(exporter, file, suffix) if out_file: out_files.add(out_file)