def lines_to_notebook(lines, name=None): """ Convert the lines of an m file into an IPython notebook Parameters ---------- lines : list A list of strings. Each element is a line in the m file Returns ------- notebook : an IPython NotebookNode class instance, containing the information required to create a file """ source = [] md = np.empty(len(lines), dtype=object) new_cell = np.empty(len(lines), dtype=object) for idx, l in enumerate(lines): new_cell[idx], md[idx], this_source = format_line(l) # Transitions between markdown and code and vice-versa merit a new # cell, even if no newline, or "%%" is found. Make sure not to do this # check for the very first line! if idx>1 and not new_cell[idx]: if md[idx] != md[idx-1]: new_cell[idx] = True source.append(this_source) # This defines the breaking points between cells: new_cell_idx = np.hstack([np.where(new_cell)[0], -1]) # Listify the sources: cell_source = [source[new_cell_idx[i]:new_cell_idx[i+1]] for i in range(len(new_cell_idx)-1)] cell_md = [md[new_cell_idx[i]] for i in range(len(new_cell_idx)-1)] cells = [] # Append the notebook with loading matlab magic extension notebook_head = "import pymatbridge as pymat\n" + "ip = get_ipython()\n" \ + "pymat.load_ipython_extension(ip)" cells.append(nbformat.new_code_cell(notebook_head, language='python')) for cell_idx, cell_s in enumerate(cell_source): if cell_md[cell_idx]: cells.append(nbformat.new_text_cell('markdown', cell_s)) else: cell_s.insert(0, '%%matlab\n') cells.append(nbformat.new_code_cell(cell_s, language='matlab')) ws = nbformat.new_worksheet(cells=cells) notebook = nbformat.new_notebook(metadata=nbformat.new_metadata(), worksheets=[ws]) return notebook
def build_notebook(self): """Build a reveal slides notebook in memory for use with tests. Overrides base in TransformerTestsBase""" outputs = [ nbformat.new_output(output_type="stream", stream="stdout", output_text="a") ] slide_metadata = {'slideshow': {'slide_type': 'slide'}} subslide_metadata = {'slideshow': {'slide_type': 'subslide'}} cells = [ nbformat.new_code_cell(input="", prompt_number=1, outputs=outputs), nbformat.new_text_cell('markdown', source="", metadata=slide_metadata), nbformat.new_code_cell(input="", prompt_number=2, outputs=outputs), nbformat.new_text_cell('markdown', source="", metadata=slide_metadata), nbformat.new_text_cell('markdown', source="", metadata=subslide_metadata) ] worksheets = [nbformat.new_worksheet(name="worksheet1", cells=cells)] return nbformat.new_notebook(name="notebook1", worksheets=worksheets)
def build_notebook(self): """Build a notebook in memory for use with preprocessor tests""" outputs = [ nbformat.new_output(output_type="stream", stream="stdout", output_text="a"), nbformat.new_output(output_type="text", output_text="b"), nbformat.new_output(output_type="stream", stream="stdout", output_text="c"), nbformat.new_output(output_type="stream", stream="stdout", output_text="d"), nbformat.new_output(output_type="stream", stream="stderr", output_text="e"), nbformat.new_output(output_type="stream", stream="stderr", output_text="f"), nbformat.new_output(output_type="png", output_png=b'Zw==') ] #g cells = [ nbformat.new_code_cell(input="$ e $", prompt_number=1, outputs=outputs), nbformat.new_text_cell('markdown', source="$ e $") ] worksheets = [nbformat.new_worksheet(name="worksheet1", cells=cells)] return nbformat.new_notebook(name="notebook1", worksheets=worksheets)
def test_replace_test_source_bad_cell_type(self): """Is an error raised if the cell type has changed?""" cell = new_text_cell("markdown", metadata=dict( assignment=dict(id="test1_for_problem1"))) self.preprocessor._load_autograder_tests_file() assert_raises( RuntimeError, self.preprocessor._replace_test_source, cell)
def test_very_long_cells(self): """ Torture test that long cells do not cause issues """ lorem_ipsum_text = textwrap.dedent("""\ Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec dignissim, ipsum non facilisis tempus, dui felis tincidunt metus, nec pulvinar neque odio eget risus. Nulla nisi lectus, cursus suscipit interdum at, ultrices sit amet orci. Mauris facilisis imperdiet elit, vitae scelerisque ipsum dignissim non. Integer consequat malesuada neque sit amet pulvinar. Curabitur pretium ut turpis eget aliquet. Maecenas sagittis lacus sed lectus volutpat, eu adipiscing purus pulvinar. Maecenas consequat luctus urna, eget cursus quam mollis a. Aliquam vitae ornare erat, non hendrerit urna. Sed eu diam nec massa egestas pharetra at nec tellus. Fusce feugiat lacus quis urna sollicitudin volutpat. Quisque at sapien non nibh feugiat tempus ac ultricies purus. """) lorem_ipsum_text = lorem_ipsum_text.replace("\n"," ") + "\n\n" large_lorem_ipsum_text = "".join([lorem_ipsum_text]*3000) notebook_name = "lorem_ipsum_long.ipynb" tex_name = "lorem_ipsum_long.tex" with self.create_temp_cwd([]): nb = current.new_notebook( worksheets=[ current.new_worksheet(cells=[ current.new_text_cell('markdown',source=large_lorem_ipsum_text) ]) ] ) with open(notebook_name, 'w') as f: current.write(nb, f, 'ipynb') self.call('nbconvert --to latex --log-level 0 ' + os.path.join(notebook_name)) assert os.path.isfile(tex_name)
def test_very_long_cells(self): """ Torture test that long cells do not cause issues """ lorem_ipsum_text = textwrap.dedent("""\ Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec dignissim, ipsum non facilisis tempus, dui felis tincidunt metus, nec pulvinar neque odio eget risus. Nulla nisi lectus, cursus suscipit interdum at, ultrices sit amet orci. Mauris facilisis imperdiet elit, vitae scelerisque ipsum dignissim non. Integer consequat malesuada neque sit amet pulvinar. Curabitur pretium ut turpis eget aliquet. Maecenas sagittis lacus sed lectus volutpat, eu adipiscing purus pulvinar. Maecenas consequat luctus urna, eget cursus quam mollis a. Aliquam vitae ornare erat, non hendrerit urna. Sed eu diam nec massa egestas pharetra at nec tellus. Fusce feugiat lacus quis urna sollicitudin volutpat. Quisque at sapien non nibh feugiat tempus ac ultricies purus. """) lorem_ipsum_text = lorem_ipsum_text.replace("\n", " ") + "\n\n" large_lorem_ipsum_text = "".join([lorem_ipsum_text] * 3000) notebook_name = "lorem_ipsum_long.ipynb" tex_name = "lorem_ipsum_long.tex" with self.create_temp_cwd([]): nb = current.new_notebook(worksheets=[ current.new_worksheet(cells=[ current.new_text_cell('markdown', source=large_lorem_ipsum_text) ]) ]) with open(notebook_name, 'w') as f: current.write(nb, f, 'ipynb') self.call('nbconvert --to latex --log-level 0 ' + os.path.join(notebook_name)) assert os.path.isfile(tex_name)
def mini_markdown_nb(markdown): "create a single text cell notebook with markdown in it" nb = new_notebook() wks = new_worksheet() cell = new_text_cell('markdown', source=markdown) nb['worksheets'].append(wks) nb['worksheets'][0]['cells'].append(cell) return nb
def build_notebook(self): """Build a reveal slides notebook in memory for use with tests. Overrides base in TransformerTestsBase""" outputs = [nbformat.new_output(output_type="stream", stream="stdout", output_text="a")] slide_metadata = {'slideshow' : {'slide_type': 'slide'}} subslide_metadata = {'slideshow' : {'slide_type': 'subslide'}} cells=[nbformat.new_code_cell(input="", prompt_number=1, outputs=outputs), nbformat.new_text_cell('markdown', source="", metadata=slide_metadata), nbformat.new_code_cell(input="", prompt_number=2, outputs=outputs), nbformat.new_text_cell('markdown', source="", metadata=slide_metadata), nbformat.new_text_cell('markdown', source="", metadata=subslide_metadata)] worksheets = [nbformat.new_worksheet(name="worksheet1", cells=cells)] return nbformat.new_notebook(name="notebook1", worksheets=worksheets)
def test_match_tests_double_problem(self): """Is an error raised when a problem id is used twice?""" cell1 = new_code_cell() cell1.metadata = dict(assignment=dict(cell_type="grade", id="foo", points="")) cell2 = new_text_cell("markdown") cell2.metadata = dict(assignment=dict(cell_type="grade", id="foo", points="")) cells = [cell1, cell2] assert_raises(RuntimeError, self.preprocessor._match_tests, cells)
def to_notebook(infile, hr_separated=False): """Given markdown, returns an ipynb compliant JSON string""" parser = markdown.DocParser() ast = json.loads(markdown.ASTtoJSON( parser.parse(infile.read()))) cells = [current.new_text_cell('markdown', '')] for block in ast.get('children', []): if block['t'] in ["IndentedCode", "FencedCode"]: cells.append(current.new_code_cell( block['string_content'].rstrip() )) elif block['t'] in ['SetextHeader', 'ATXHeader']: src = '{} {}'.format( '#' * block.get('level', 1), ''.join(block['strings']) ).rstrip() if hr_separated and cells[-1]['cell_type'] is 'markdown': cells[-1]['source'] += '\n\n{}'.format(src) else: cells.append(current.new_text_cell('markdown', src)) elif block['t'] in ['HorizontalRule']: # We don't render horizontal rules if hr_separated: cells.append(current.new_text_cell('markdown', '')) else: src = '\n'.join(block['strings']).rstrip() if hr_separated and cells[-1]['cell_type'] is 'markdown': cells[-1]['source'] += '\n\n{}'.format(src) else: cells.append(current.new_text_cell('markdown', src)) cells = tidy_notebook(cells[:]) worksheet = current.new_worksheet(cells=cells) nb = current.new_notebook( basename(infile.name).split('.')[:-1], worksheets=[worksheet] ) # using the indent option leaves a bunch of trailing whitespace. No thanks! return json.dumps(nb, indent=2).replace(' \n', '\n')
def test_get_score_nan(self): """Is the score nan when the cell is text?""" cell = new_text_cell("markdown", metadata=dict( assignment=dict(id="test1_for_problem1"))) cell.outputs = [] self.preprocessor._load_autograder_tests_file() score, total_score = self.preprocessor._get_score(cell) assert math.isnan(score) assert total_score == 1
def test_replace_test_source_text(self): """Is the text source properly replaced?""" cell = new_text_cell("markdown", metadata=dict( assignment=dict(id="test2_for_problem1"))) self.preprocessor._load_autograder_tests_file() self.preprocessor._replace_test_source(cell) assert cell.source == "# blah blah blah blah" assert cell.metadata['assignment']['weight'] == 0.6666666666666666 assert cell.metadata['assignment']['points'] == 2
def _create_text_cell(): source = """{% if solution %} this is the answer! {% else %} YOUR ANSWER HERE {% endif %} """ cell = new_text_cell("markdown", source=source) return cell
def visit_paragraph(self, node): text = node.astext() # For every ref directive a paragraph contains # docutils will generate a paragraph complaining # "Unknown interpreted text role \"ref\"." # this is because ref is a sphinx directive # that does not exist in docutils # looking for a better way to handle this # for now filtering such pargraphs from the output if not self.is_ref_error_paragraph(text): p = nbformat.new_text_cell('markdown', source=text) self.add_cell(p)
def test_run_nb(self): """Test %run notebook.ipynb""" from IPython.nbformat import current nb = current.new_notebook(worksheets=[ current.new_worksheet(cells=[ current.new_text_cell("The Ultimate Question of Everything"), current.new_code_cell("answer=42") ]) ]) src = current.writes(nb, 'json') self.mktmp(src, ext='.ipynb') _ip.magic("run %s" % self.fname) nt.assert_equal(_ip.user_ns['answer'], 42)
def build_notebook(self): """Build a notebook in memory for use with preprocessor tests""" outputs = [nbformat.new_output(output_type="stream", stream="stdout", output_text="a"), nbformat.new_output(output_type="text", output_text="b"), nbformat.new_output(output_type="stream", stream="stdout", output_text="c"), nbformat.new_output(output_type="stream", stream="stdout", output_text="d"), nbformat.new_output(output_type="stream", stream="stderr", output_text="e"), nbformat.new_output(output_type="stream", stream="stderr", output_text="f"), nbformat.new_output(output_type="png", output_png='Zw==')] #g cells=[nbformat.new_code_cell(input="$ e $", prompt_number=1,outputs=outputs), nbformat.new_text_cell('markdown', source="$ e $")] worksheets = [nbformat.new_worksheet(name="worksheet1", cells=cells)] return nbformat.new_notebook(name="notebook1", worksheets=worksheets)
def test_run_nb(self): """Test %run notebook.ipynb""" from IPython.nbformat import current nb = current.new_notebook( worksheets=[ current.new_worksheet(cells=[ current.new_text_cell("The Ultimate Question of Everything"), current.new_code_cell("answer=42") ]) ] ) src = current.writes(nb, 'json') self.mktmp(src, ext='.ipynb') _ip.magic("run %s" % self.fname) nt.assert_equal(_ip.user_ns['answer'], 42)
def add_markdown_cell(self, source): cell = nbformat.new_text_cell('markdown', source=source) self.add_cell(cell)
def generate_report(reviews, dataset_name, file_name, load_reviews_code): nb = nbf.new_notebook() title = '# ' + dataset_name + ' Dataset Analysis' title_cell = nbf.new_text_cell(u'markdown', title) rda = ReviewsDatasetAnalyzer(reviews) num_reviews = len(rda.reviews) num_users = len(rda.user_ids) num_items = len(rda.item_ids) user_avg_reviews = float(num_reviews) / num_users item_avg_reviews = float(num_reviews) / num_items sparsity = rda.calculate_sparsity_approx() fact_sheet_text =\ '## Fact Sheet\n' +\ 'The ' + dataset_name + ' contains:\n' +\ '* ' + str(num_reviews) + ' reviews\n' +\ '* Made by ' + str(num_users) + ' users\n' +\ '* About ' + str(num_items) + ' items\n' +\ '* It has an approximated sparsity of ' + str(sparsity) + '\n' +\ '\nNow we are going to analyze the number of reviews per user and ' \ 'per item' fact_sheet_cell = nbf.new_text_cell(u'markdown', fact_sheet_text) reviews_analysis_code =\ 'import sys\n' +\ 'sys.path.append(\'/Users/fpena/UCC/Thesis/projects/yelp/source/python\')\n' +\ 'from etl import ETLUtils\n\n' +\ 'from etl.reviews_dataset_analyzer import ReviewsDatasetAnalyzer\n' +\ '\n# Load reviews\n' + load_reviews_code + '\n' +\ 'rda = ReviewsDatasetAnalyzer(reviews)\n' reviews_analysis_cell = nbf.new_code_cell(reviews_analysis_code) user_analysis_text =\ '## Users Reviews Analysis\n' +\ '* The average number of reviews per user is ' + str(user_avg_reviews) + '\n' +\ '* The minimum number of reviews a user has is ' + str(min(rda.users_count)) + '\n' +\ '* The maximum number of reviews a user has is ' + str(max(rda.users_count)) user_analysis_cell = nbf.new_text_cell(u'markdown', user_analysis_text) counts_per_user_code =\ '# Number of reviews per user\n' +\ 'users_summary = rda.summarize_reviews_by_field(\'user_id\')\n' +\ 'print(\'Average number of reviews per user\', float(rda.num_reviews)/rda.num_users)\n' +\ 'users_summary.plot(kind=\'line\', rot=0)' counts_per_user_cell = nbf.new_code_cell(counts_per_user_code) item_analysis_text =\ '## Items Reviews Analysis\n' +\ '* The average number of reviews per item is ' + str(item_avg_reviews) + '\n' +\ '* The minimum number of reviews an item has is ' + str(min(rda.items_count)) + '\n' +\ '* The maximum number of reviews an item has is ' + str(max(rda.items_count)) item_analysis_cell = nbf.new_text_cell(u'markdown', item_analysis_text) counts_per_item_code =\ '# Number of reviews per item\n' +\ 'items_summary = rda.summarize_reviews_by_field(\'offering_id\')\n' +\ 'print(\'Average number of reviews per item\', float(rda.num_reviews)/rda.num_items)\n' +\ 'items_summary.plot(kind=\'line\', rot=0)' counts_per_item_cell = nbf.new_code_cell(counts_per_item_code) common_items_text =\ '## Number of items 2 users have in common\n' +\ 'In this section we are going to count the number of items two ' \ 'users have in common' common_items_text_cell = nbf.new_text_cell(u'markdown', common_items_text) common_items_code =\ '# Number of items 2 users have in common\n' +\ 'common_item_counts = rda.count_items_in_common()\n' +\ 'plt.plot(common_item_counts.keys(), common_item_counts.values())\n' common_items_code_cell = nbf.new_code_cell(common_items_code) common_items_box_code =\ 'from pylab import boxplot\n' +\ 'my_data = [key for key, value in common_item_counts.iteritems() for i in xrange(value)]\n' +\ 'mean_common_items = float(sum(my_data))/len(my_data)\n' +\ 'print(\'Average number of common items between two users:\', mean_common_items)\n' +\ 'boxplot(my_data)' common_items_box_cell = nbf.new_code_cell(common_items_box_code) cells = [] cells.append(title_cell) cells.append(fact_sheet_cell) cells.append(reviews_analysis_cell) cells.append(user_analysis_cell) cells.append(counts_per_user_cell) cells.append(item_analysis_cell) cells.append(counts_per_item_cell) cells.append(common_items_text_cell) cells.append(common_items_code_cell) cells.append(common_items_box_cell) nb['worksheets'].append(nbf.new_worksheet(cells=cells)) with open(file_name, 'w') as f: nbf.write(nb, f, 'ipynb')
def run(self): path = os.path.abspath(os.path.expanduser(self.args.path)) final_dir = os.path.abspath(os.path.expanduser(self.args.final_dir)) if not path.endswith(".yaml") and not path.endswith(".yml"): raise ValueError filename = os.path.basename(path) new_filename = "Mission" + filename.replace(".yml", ".ipynb").replace(".yaml", ".ipynb") final_dest = os.path.join(final_dir, new_filename) mission, screens = mission_loader(path) nb = nbf.new_notebook() mission_cell = nbf.new_text_cell('markdown', self.assemble_mission_cell(mission).strip()) cells = [mission_cell] for screen in screens: text = self.assemble_screen_meta(screen) text += "\n\n" if screen["type"] == "code": text += "# " + screen["name"] text += "\n\n" text += screen["left_text"] if "instructions" in screen: text += "\n\n" text += "## Instructions\n\n" text += screen["instructions"] if "hint" in screen: text += "\n\n" text += "## Hint\n\n" text += screen["hint"] elif screen["type"] == "video": text += "# " + screen["name"] text += "\n\n" text += screen["video"] elif screen["type"] == "text": text += "# " + screen["name"] text += "\n\n" text += screen["text"] cell = nbf.new_text_cell('markdown', text.strip()) cells.append(cell) if screen["type"] == "code": text = "" if "initial" not in screen and "answer" not in screen: text += screen["initial_display"] else: items = [ {"key": "initial", "name": "## Initial"}, {"key": "initial_display", "name": "## Display"}, {"key": "answer", "name": "## Answer"}, {"key": "check_val", "name": "## Check val"}, {"key": "check_vars", "name": "## Check vars"}, {"key": "check_code_run", "name": "## Check code run"} ] for item in items: if item["key"] in screen and len(str(screen[item["key"]]).strip()) > 0: if item["key"] == "check_vars" and len(screen[item["key"]]) == 0: continue text += item["name"] + "\n\n" if item["key"] == "check_val": text += '"' + str(screen[item["key"]]).strip().replace("\n", "\\n") + '"' else: text += str(screen[item["key"]]).strip() text += "\n\n" cell = nbf.new_code_cell(input=text.strip()) cells.append(cell) nb['worksheets'].append(nbf.new_worksheet(cells=cells)) with open(final_dest, 'w+') as f: nbf.write(nb, f, 'ipynb') # Copy any associated files over original_dir = os.path.dirname(path) for f in os.listdir(original_dir): full_path = os.path.join(original_dir, f) if os.path.isfile(full_path): if not f.endswith(".yaml") and not f.endswith(".yml") and not f.endswith(".ipynb"): shutil.copy2(full_path, os.path.join(final_dir, f))
def _create_text_cell(): source = "this is the answer!\n" cell = new_text_cell('markdown', source=source) return cell
def write_notebook(pseudopath): """See http://nbviewer.ipython.org/gist/fperez/9716279""" nb = nbf.new_notebook() cells = [ nbf.new_text_cell('heading', "This is an auto-generated notebook for %s" % os.path.basename(pseudopath)), nbf.new_code_cell("""\ from __future__ import print_function %matplotlib inline import mpld3 from mpld3 import plugins as plugs plugs.DEFAULT_PLUGINS = [plugs.Reset(), plugs.Zoom(), plugs.BoxZoom(), plugs.MousePosition()] mpld3.enable_notebook() import seaborn as sns #sns.set(style="dark", palette="Set2") sns.set(style='ticks', palette='Set2')"""), nbf.new_code_cell("""\ # Construct the pseudo object and get the DojoReport from pymatgen.io.abinitio.pseudos import Pseudo pseudo = Pseudo.from_file('%s') report = pseudo.dojo_report""" % os.path.basename(pseudopath)), nbf.new_text_cell('heading', "ONCVPSP Input File:"), nbf.new_code_cell("""\ input_file = pseudo.filepath.replace(".psp8", ".in") %cat $input_file"""), nbf.new_code_cell("""\ # Get data from the output file from pseudo_dojo.ppcodes.oncvpsp import OncvOutputParser, PseudoGenDataPlotter onc_parser = OncvOutputParser(pseudo.filepath.replace(".psp8", ".out")) # Parse the file and build the plotter onc_parser.scan() plotter = onc_parser.make_plotter()"""), nbf.new_text_cell('heading', "AE/PS radial wavefunctions $\phi(r)$:"), nbf.new_code_cell("""fig = plotter.plot_radial_wfs(show=False)"""), nbf.new_text_cell('heading', "Arctan of the logarithmic derivatives:"), nbf.new_code_cell("""fig = plotter.plot_atan_logders(show=False)"""), nbf.new_text_cell('heading', "Convergence in $G$-space estimated by ONCVPSP:"), nbf.new_code_cell("""fig = plotter.plot_ene_vs_ecut(show=False)"""), nbf.new_text_cell('heading', "Projectors:"), nbf.new_code_cell("""fig = plotter.plot_projectors(show=False)"""), nbf.new_text_cell('heading', "Core/Valence/Model charge densities:"), nbf.new_code_cell("""fig = plotter.plot_densities(show=False)"""), nbf.new_text_cell('heading', "Local potential and $l$-dependent potentials:"), nbf.new_code_cell("""fig = plotter.plot_potentials(show=False)"""), #nbf.new_text_cell('heading', "1-st order derivative of $v_l$ and $v_{loc}$ computed via finite differences:"), #nbf.new_code_cell("""fig = plotter.plot_der_potentials(order=1, show=False)"""), #nbf.new_text_cell('heading', "2-nd order derivative of $v_l$ and $v_{loc}$ computed via finite differences:"), #nbf.new_code_cell("""fig = plotter.plot_der_potentials(order=2, show=False)"""), nbf.new_text_cell('heading', "Convergence of the total energy:"), nbf.new_code_cell("""\ # Convergence of the total energy (computed from the deltafactor runs with Wien2K equilibrium volume) fig = report.plot_etotal_vs_ecut(show=False)"""), nbf.new_text_cell('heading', "Convergence of the deltafactor results:"), nbf.new_code_cell("""fig = report.plot_deltafactor_convergence(what=("dfact_meV", "dfactprime_meV"), show=False)"""), nbf.new_text_cell('heading', "Convergence of $\Delta v_0$, $\Delta b_0$, and $\Delta b_1$ (deltafactor tests)"), nbf.new_code_cell("""\ # Here we plot the difference wrt Wien2k results. fig = report.plot_deltafactor_convergence(what=("-dfact_meV", "-dfactprime_meV"), show=False)"""), nbf.new_text_cell('heading', "deltafactor EOS for the different cutoff energies:"), nbf.new_code_cell("""fig = report.plot_deltafactor_eos(show=False)"""), nbf.new_text_cell('heading', "Convergence of the GBRV lattice parameters:"), nbf.new_code_cell("""fig = report.plot_gbrv_convergence(show=False)"""), nbf.new_text_cell('heading', "GBRV EOS for the FCC structure:"), nbf.new_code_cell("""fig = report.plot_gbrv_eos(struct_type="fcc", show=False)"""), nbf.new_text_cell('heading', "GBRV EOS for the BCC structure:"), nbf.new_code_cell("""fig = report.plot_gbrv_eos(struct_type="bcc", show=False)"""), # nbf.new_text_cell('heading', "Comparison with the other pseudos in this table"), # nbf.new_code_cell("""\ #from pseudo_dojo import get_pseudos #pseudos = get_pseudos(".") #if len(pseudos) > 1: # pseudos.dojo_compare()"""), ] # Now that we have the cells, we can make a worksheet with them and add it to the notebook: nb['worksheets'].append(nbf.new_worksheet(cells=cells)) # Next, we write it to a file on disk that we can then open as a new notebook. # Note: This should be as easy as: nbf.write(nb, fname), but the current api is a little more verbose and needs a real file-like object. root, ext = os.path.splitext(pseudopath) with open(root + '.ipynb', 'w') as f: nbf.write(nb, f, 'ipynb')
def flush_markdown(self): if len(self._markdown_buffer) == 0: return self._cells.append(nbf.new_text_cell('markdown', self._markdown_buffer)) self._markdown_buffer = ''
def flush_markdown(self): if len(self._markdown_buffer) == 0: return self._cells.append( nbf.new_text_cell('markdown', self._markdown_buffer)) self._markdown_buffer = ''
def add_raw_cell(self, source): cell = nbformat.new_text_cell('raw', source=source) self.add_cell(cell)