def test_preprocessor_collapsible_headings(): """Test collapsible_headings preprocessor.""" # check import shortcut from jupyter_contrib_nbextensions.nbconvert_support import CollapsibleHeadingsPreprocessor # noqa cells = [] for lvl in range(6, 1, -1): for collapsed in (True, False): cells.extend([ nbf.new_markdown_cell( source='{} {} heading level {}'.format( '#' * lvl, 'Collapsed' if collapsed else 'Uncollapsed', lvl), metadata={'heading_collapsed': True} if collapsed else {}), nbf.new_markdown_cell(source='\n'.join([ 'want hidden' if collapsed else 'want to see', 'what I mean', ])), nbf.new_code_cell(source='\n'.join([ 'want hidden' if collapsed else 'want to see', 'what I mean', ])), ]) notebook_node = nbf.new_notebook(cells=cells) body, resources = export_through_preprocessor( notebook_node, CollapsibleHeadingsPreprocessor, RSTExporter, 'rst') assert_not_in('hidden', body, 'check text hidden by collapsed headings') assert_in('want to see', body, 'check for text under uncollapsed headings')
def test_export_job_slides(self): """ test export a job to slides HTML (reveal.js) """ fs = self.fs om = self.om # create a notebook with slides # see https://github.com/jupyter/nbconvert/blob/master/nbconvert/templates/html/slides_reveal.tpl#L1:14 cells = [] code = "print('slide 1')" cells.append( v4.new_markdown_cell('Slide 1', metadata=dict(slide_start=True))) cells.append(v4.new_code_cell(source=code)) cells.append(v4.new_markdown_cell('***', metadata=dict(slide_end=True))) code = "print('slide 2')" cells.append( v4.new_markdown_cell('Slide 2', metadata=dict(slide_start=True))) cells.append(v4.new_code_cell(source=code)) cells.append(v4.new_markdown_cell('***', metadata=dict(slide_end=True))) notebook = v4.new_notebook(cells=cells) # put and run the notebook meta = om.jobs.put(notebook, 'testjob-html') om.jobs.run('testjob-html') # get results and output meta = om.jobs.metadata('testjob-html') resultnb_name = meta.attributes['job_results'][0] outpath = '/tmp/test.html' om.jobs.export(resultnb_name, outpath, format='slides') self.assertTrue(os.path.exists(outpath))
def build_notebook(self): """Build a reveal slides notebook in memory for use with tests. Overrides base in PreprocessorTestsBase""" outputs = [ nbformat.new_output(output_type="stream", name="stdout", text="a") ] slide_metadata = {'slideshow': {'slide_type': 'slide'}} subslide_metadata = {'slideshow': {'slide_type': 'subslide'}} fragment_metadata = {'slideshow': {'slide_type': 'fragment'}} cells = [ nbformat.new_code_cell(source="", execution_count=1, outputs=outputs), nbformat.new_markdown_cell(source="", metadata=slide_metadata), nbformat.new_code_cell(source="", execution_count=2, outputs=outputs), nbformat.new_markdown_cell(source="", metadata=slide_metadata), nbformat.new_markdown_cell(source="", metadata=subslide_metadata), nbformat.new_markdown_cell(source="", metadata=fragment_metadata), nbformat.new_code_cell(source="", execution_count=1, outputs=outputs) ] return nbformat.new_notebook(cells=cells)
def merged_notebooks_in_dir(dirpath, filenames=None): ''' Merge all notebooks in a directory into a single notebook ''' if filenames is None: filenames = os.listdir(dirpath) fns = [ '{}/{}'.format(dirpath, fn) for fn in filenames if '.ipynb_checkpoints' not in dirpath and fn.endswith('.ipynb') ] if fns: merged = new_notebook() #Identify directory containing merged notebooks cell = '\n\n---\n\n# {}\n\n---\n\n'.format(dirpath) merged.cells.append(new_markdown_cell(cell)) else: return for fn in fns: #print(fn) notebook_name = fn.split('/')[-1] with io.open(fn, 'r', encoding='utf-8') as f: nb = nbformat.read(f, as_version=4) #Identify filename of notebook cell = '\n\n---\n\n# {}\n\n---\n\n'.format(fn) merged.cells.append(new_markdown_cell(cell)) merged.cells.extend(nb.cells) if not hasattr(merged.metadata, 'name'): merged.metadata.name = '' merged.metadata.name += "_merged" return nbformat.writes(merged)
def add_sec_label(cell: NotebookNode, nbname) -> Sequence[NotebookNode]: """Adds a Latex \\label{} under the chapter heading. This takes the first cell of a notebook, and expects it to be a Markdown cell starting with a level 1 heading. It inserts a label with the notebook name just underneath this heading. """ assert cell.cell_type == "markdown", cell.cell_type lines = cell.source.splitlines() if lines[0].startswith("#"): header_lines = 1 elif len(lines) > 1 and lines[1].startswith("==="): header_lines = 2 else: raise NoHeader header = "\n".join(lines[:header_lines]) intro_remainder = "\n".join(lines[header_lines:]).strip() res = [ new_markdown_cell(header), new_latex_cell("\label{sec:%s}" % nbname) ] res[0].metadata = cell.metadata if intro_remainder: res.append(new_markdown_cell(intro_remainder)) return res
def add_sec_label(cell: NotebookNode, nbname) -> Sequence[NotebookNode]: """Adds a Latex \\label{} under the chapter heading. This takes the first cell of a notebook, and expects it to be a Markdown cell starting with a level 1 heading. It inserts a label with the notebook name just underneath this heading. """ assert cell.cell_type == 'markdown', cell.cell_type lines = cell.source.splitlines() if lines[0].startswith('# '): header_lines = 1 elif len(lines) > 1 and lines[1].startswith('==='): header_lines = 2 else: raise NoHeader header = '\n'.join(lines[:header_lines]) intro_remainder = '\n'.join(lines[header_lines:]).strip() res = [ new_markdown_cell(header), new_latex_cell('\label{sec:%s}' % nbname) ] res[0].metadata = cell.metadata if intro_remainder: res.append(new_markdown_cell(intro_remainder)) return res
def build_notebook(self): """Build a reveal slides notebook in memory for use with tests.""" outputs = [ nbformat.new_output(output_type="stream", name="stdout", text="a") ] slide_metadata = {"slideshow": {"slide_type": "slide"}} subslide_metadata = {"slideshow": {"slide_type": "subslide"}} fragment_metadata = {"slideshow": {"slide_type": "fragment"}} cells = [ nbformat.new_code_cell(source="", execution_count=1, outputs=outputs), nbformat.new_markdown_cell(source="", metadata=slide_metadata), nbformat.new_code_cell(source="", execution_count=2, outputs=outputs), nbformat.new_markdown_cell(source="", metadata=slide_metadata), nbformat.new_markdown_cell(source="", metadata=subslide_metadata), nbformat.new_markdown_cell(source="", metadata=fragment_metadata), nbformat.new_code_cell(source="", execution_count=1, outputs=outputs), ] return nbformat.new_notebook(cells=cells)
def convertNotebook(notebook): nb = new_notebook() with open(notebook, encoding='utf-8') as data_file: data = json.load(data_file) evaluators = list( (cell['evaluator']) for cell in data['cells'] if 'evaluator' in cell) kernel_name = max(evaluators, key=evaluators.count) if evaluators else 'IPython' if kernel_name in ['JavaScript', 'HTML', 'TeX']: kernel_name = 'IPython' if kernel_name == 'IPython': kernel_spec = { "kernelspec": { "display_name": "Python 2", "language": "python", "name": "python2" } } else: kernel_spec = { "kernelspec": { "display_name": kernel_name, "language": kernel_name.lower(), "name": kernel_name.lower() } } nb.metadata = kernel_spec for cell in data['cells']: if cell['type'] == 'code': metadata = {} if 'tags' in cell: tags = [cell['tags']] metadata = {"tags": tags} if cell['evaluator'] != kernel_name: if cell['evaluator'] == 'TeX': nb.cells.append( new_markdown_cell("${0}$".format("\n".join( map(str, cell['input']['body']))))) else: nb.cells.append( new_code_cell(source='%%{0}\n{1}'.format( cell['evaluator'].lower(), "\n".join(map(str, cell['input']['body']))), metadata=metadata)) else: nb.cells.append( new_code_cell(source="\n".join( map(str, cell['input']['body'])), metadata=metadata)) if cell['type'] == 'markdown': nb.cells.append( new_markdown_cell("\n".join(map(str, cell['body'])))) if cell['type'] == 'section': nb.cells.append( new_markdown_cell(setHeader(cell['level'], cell['title']))) nbformat.write(nb, notebook.partition('.')[0] + '.ipynb')
def parseBkr(data): nb = new_notebook() evaluators = list( (cell['evaluator']) for cell in data['cells'] if 'evaluator' in cell) kernel_name = max(evaluators, key=evaluators.count) if evaluators else 'IPython' if kernel_name in ['JavaScript', 'HTML', 'TeX']: kernel_name = 'IPython' if kernel_name == 'IPython': kernel_spec = { "kernelspec": { "display_name": "Python 2", "language": "python", "name": "python2" } } else: kernel_spec = { "kernelspec": { "display_name": kernel_name, "language": kernel_name.lower(), "name": kernel_name.lower() } } nb.metadata = kernel_spec for cell in data['cells']: if cell['type'] == 'code': metadata = {} if 'initialization' in cell: metadata['init_cell'] = True if 'tags' in cell: tags = [cell['tags']] metadata['tags'] = tags if cell['evaluator'] != kernel_name: if cell['evaluator'] == 'TeX': nb.cells.append( new_markdown_cell("${0}$".format( getFixedCodeText(cell['input'])))) else: nb.cells.append( new_code_cell(source='%%{0}\n{1}'.format( cell['evaluator'].lower(), getFixedCodeText(cell['input'])), metadata=metadata)) else: nb.cells.append( new_code_cell(source=getFixedCodeText(cell['input']), metadata=metadata)) if cell['type'] == 'markdown': nb.cells.append(new_markdown_cell(getFixedCodeText(cell))) if cell['type'] == 'section': nb.cells.append( new_markdown_cell(setHeader(cell['level'], cell['title']))) return nb
def test_html_collapsible_headings(self): """Test exporter for inlining collapsible_headings""" nb = v4.new_notebook(cells=[ v4.new_markdown_cell(source=('# level 1 heading')), v4.new_code_cell(source='a = range(1,10)'), v4.new_markdown_cell(source=('## level 2 heading')), v4.new_code_cell(source='a = range(1,10)'), v4.new_markdown_cell(source=('### level 3 heading')), v4.new_code_cell(source='a = range(1,10)'), ]) self.check_stuff_gets_embedded( nb, 'html_ch', to_be_included=['collapsible_headings'])
def write_plot_nb(plot_nbs, plot_qs, out_path): nb = nbf.new_notebook() score_txt = '\n'.join(['Plot scores:'] + [f'* {pq} : ' for pq in plot_qs]) for login, plot_nb in plot_nbs.items(): nb.cells.append(nbf.new_markdown_cell(f'## {login}')) nb.cells += plot_nb.cells nb.cells.append(nbf.new_markdown_cell(score_txt)) nb_dir = op.join(out_path, 'marking') if not op.isdir(nb_dir): os.makedirs(nb_dir) with open(op.join(nb_dir, 'plot_nb.ipynb'), 'wt') as fobj: fobj.write(nbf.writes(nb))
def test_html_collapsible_headings(self): """Test exporter for inlining collapsible_headings""" nb = v4.new_notebook(cells=[ v4.new_markdown_cell(source=('# level 1 heading')), v4.new_code_cell(source='a = range(1,10)'), v4.new_markdown_cell(source=('## level 2 heading')), v4.new_code_cell(source='a = range(1,10)'), v4.new_markdown_cell(source=('### level 3 heading')), v4.new_code_cell(source='a = range(1,10)'), ]) self.check_stuff_gets_embedded(nb, 'html_ch', to_be_included=['collapsible_headings'])
def test_html_collapsible_headings(self): """Test exporter for inlining collapsible_headings""" nb = v4.new_notebook(cells=[ v4.new_markdown_cell(source=('# level 1 heading')), v4.new_code_cell(source='a = range(1,10)'), v4.new_markdown_cell(source=('## level 2 heading')), v4.new_code_cell(source='a = range(1,10)'), v4.new_markdown_cell(source=('### level 3 heading')), v4.new_code_cell(source='a = range(1,10)'), ]) def check(byte_string, root_node): assert b'collapsible_headings' in byte_string self.check_html(nb, 'html_ch', check_func=check)
def add_cell(self, cells, content, cell_type, metainfo): # if a section consist of all report, report it as a markdown cell if not content: return if cell_type not in ('code', 'markdown'): env.logger.warning( f'Unrecognized cell type {cell_type}, code assumed.') # if cell_type == 'code': cells.append( new_code_cell( # remove any trailing blank lines... source=''.join(content).strip(), execution_count=self.code_count, metadata=metainfo)) self.code_count += 1 elif metainfo.get('kernel', '') == 'Markdown': # markdown code with inline expression cells.append( new_code_cell( # remove any trailing blank lines... source=f'%expand `r ` --in R\n' + ''.join(content).strip(), execution_count=self.code_count, metadata=metainfo)) self.code_count += 1 else: cells.append( new_markdown_cell(source=''.join(content).strip(), metadata=metainfo))
def conf2nb(self): # create notebook from notebook configuration # generate cell configuration from column types self._conf_cols2cells() # generate notebook from configuration den_nb_conf = self.conf nb_cells = [] for cell in sorted(den_nb_conf['cells'].keys()): den_cell_type = den_nb_conf['cells'][cell]['cell'] nb_cell_type = den_blocks.block_conf['cells'][den_cell_type][ 'nb_cell_type'] cell_source = den_blocks.block_conf['cells'][den_cell_type][ 'source'] if hasattr(den_blocks, cell_source): source = getattr(den_blocks, cell_source)(den_nb_conf, den_nb_conf['cells'][cell]) else: source = cell_source if (nb_cell_type == 'markdown'): nb_cells.append(new_markdown_cell(source=source)) elif (nb_cell_type == 'code'): nb_cells.append( new_code_cell(source=source, metadata={'code_folding': [0]})) self.nb = new_notebook(cells=nb_cells) return
def py2ipynb(input, output, cellmark_style, other_ignores=[]): """Converts a .py file to a V.4 .ipynb notebook usiing `parsePy` function :param input: Input .py filename :param output: Output .ipynb filename :param cellmark_style: Determines cell marker based on IDE, see parsePy documentation for values :param other_ignores: Other lines to ignore """ # Create the code cells by parsing the file in input cells = [] for c in parsePy(input, cellmark_style, other_ignores): codecell, metadata, code = c cell = ( new_code_cell(source=code, metadata=metadata) if codecell else new_markdown_cell(source=code, metadata=metadata) ) cells.append(cell) # This creates a V4 Notebook with the code cells extracted above nb0 = new_notebook( cells=cells, metadata={ "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3", } }, ) with codecs.open(output, encoding="utf-8", mode="w") as f: nbformat.write(nb0, f, 4)
def test_very_long_cells(self): """ Torture test that long cells do not cause issues """ lorem_ipsum_text = textwrap.dedent("""\ Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec dignissim, ipsum non facilisis tempus, dui felis tincidunt metus, nec pulvinar neque odio eget risus. Nulla nisi lectus, cursus suscipit interdum at, ultrices sit amet orci. Mauris facilisis imperdiet elit, vitae scelerisque ipsum dignissim non. Integer consequat malesuada neque sit amet pulvinar. Curabitur pretium ut turpis eget aliquet. Maecenas sagittis lacus sed lectus volutpat, eu adipiscing purus pulvinar. Maecenas consequat luctus urna, eget cursus quam mollis a. Aliquam vitae ornare erat, non hendrerit urna. Sed eu diam nec massa egestas pharetra at nec tellus. Fusce feugiat lacus quis urna sollicitudin volutpat. Quisque at sapien non nibh feugiat tempus ac ultricies purus. """) lorem_ipsum_text = lorem_ipsum_text.replace("\n"," ") + "\n\n" large_lorem_ipsum_text = "".join([lorem_ipsum_text]*3000) notebook_name = "lorem_ipsum_long.ipynb" nb = v4.new_notebook( cells=[ v4.new_markdown_cell(source=large_lorem_ipsum_text) ] ) with TemporaryDirectory() as td: nbfile = os.path.join(td, notebook_name) with open(nbfile, 'w') as f: write(nb, f, 4) (output, resources) = LatexExporter(template_file='article').from_filename(nbfile) assert len(output) > 0
def details_notebook(type, uuid): if type not in entity_types: abort(404) client = get_client() entity = client.get_entity(uuid) vitessce_conf = client.get_vitessce_conf_cells_and_lifted_uuid( entity).vitessce_conf if (vitessce_conf is None or vitessce_conf.conf is None or vitessce_conf.cells is None): abort(404) nb = new_notebook() nb['cells'] = [ new_markdown_cell(f""" Visualization for [{entity['hubmap_id']}]({request.base_url.replace('.ipynb','')}) """.strip()), new_code_cell(""" !pip install vitessce==0.1.0a9 !jupyter nbextension install --py --sys-prefix vitessce !jupyter nbextension enable --py --sys-prefix vitessce """.strip()), new_code_cell('from vitessce import VitessceConfig') ] + vitessce_conf.cells + [new_code_cell('conf.widget()')] return Response(response=nbformat.writes(nb), headers={ 'Content-Disposition': f"attachment; filename={entity['hubmap_id']}.ipynb" }, mimetype='application/x-ipynb+json')
def create_singlechoice_cell( self, grade_id: str, student_choices: List[int], instructor_choices: List[int], points: int = 5, ) -> NotebookNode: cell = new_markdown_cell() cell.metadata = { "nbgrader": { "grade": True, "grade_id": grade_id, "locked": False, "points": points, "schema_version": 3, "solution": True, "task": False, }, "extended_cell": { "type": "singlechoice", "choice": student_choices, "source": { "choice": instructor_choices }, }, } cell.source = """ - correct answer - wrong answer """ return cell
def setUp(self): nbdir = self.notebook_dir if not os.path.isdir(pjoin(nbdir, 'foo')): subdir = pjoin(nbdir, 'foo') os.mkdir(subdir) # Make sure that we clean this up when we're done. # By using addCleanup this will happen correctly even if we fail # later in setUp. @self.addCleanup def cleanup_dir(): shutil.rmtree(subdir, ignore_errors=True) nb = new_notebook() nb.cells.append(new_markdown_cell(u'Created by test ³')) cc1 = new_code_cell(source=u'print(2*6)') cc1.outputs.append(new_output(output_type="stream", text=u'12')) cc1.outputs.append(new_output(output_type="execute_result", data={'image/png' : png_green_pixel}, execution_count=1, )) nb.cells.append(cc1) with io.open(pjoin(nbdir, 'foo', 'testnb.ipynb'), 'w', encoding='utf-8') as f: write(nb, f, version=4) self.nbconvert_api = NbconvertAPI(self.request)
def test_very_long_cells(self): """ Torture test that long cells do not cause issues """ lorem_ipsum_text = textwrap.dedent("""\ Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec dignissim, ipsum non facilisis tempus, dui felis tincidunt metus, nec pulvinar neque odio eget risus. Nulla nisi lectus, cursus suscipit interdum at, ultrices sit amet orci. Mauris facilisis imperdiet elit, vitae scelerisque ipsum dignissim non. Integer consequat malesuada neque sit amet pulvinar. Curabitur pretium ut turpis eget aliquet. Maecenas sagittis lacus sed lectus volutpat, eu adipiscing purus pulvinar. Maecenas consequat luctus urna, eget cursus quam mollis a. Aliquam vitae ornare erat, non hendrerit urna. Sed eu diam nec massa egestas pharetra at nec tellus. Fusce feugiat lacus quis urna sollicitudin volutpat. Quisque at sapien non nibh feugiat tempus ac ultricies purus. """) lorem_ipsum_text = lorem_ipsum_text.replace("\n", " ") + "\n\n" large_lorem_ipsum_text = "".join([lorem_ipsum_text] * 3000) notebook_name = "lorem_ipsum_long.ipynb" nb = v4.new_notebook( cells=[v4.new_markdown_cell(source=large_lorem_ipsum_text)]) with TemporaryDirectory() as td: nbfile = os.path.join(td, notebook_name) with open(nbfile, 'w') as f: write(nb, f, 4) (output, resources) = LatexExporter().from_filename(nbfile) assert len(output) > 0
def setUp(self): nbdir = self.notebook_dir.name if not os.path.isdir(pjoin(nbdir, 'foo')): os.mkdir(pjoin(nbdir, 'foo')) nb = new_notebook() nb.cells.append(new_markdown_cell(u'Created by test ³')) cc1 = new_code_cell(source=u'print(2*6)') cc1.outputs.append(new_output(output_type="stream", text=u'12')) cc1.outputs.append( new_output( output_type="execute_result", data={'image/png': png_green_pixel}, execution_count=1, )) nb.cells.append(cc1) with io.open(pjoin(nbdir, 'foo', 'testnb.ipynb'), 'w', encoding='utf-8') as f: write(nb, f, version=4) self.nbconvert_api = NbconvertAPI(self.base_url())
def build_notebook(self, with_json_outputs=False): """Build a notebook in memory for use with preprocessor tests""" outputs = [ nbformat.new_output("stream", name="stdout", text="a"), nbformat.new_output("display_data", data={'text/plain': 'b'}), nbformat.new_output("stream", name="stdout", text="c"), nbformat.new_output("stream", name="stdout", text="d"), nbformat.new_output("stream", name="stderr", text="e"), nbformat.new_output("stream", name="stderr", text="f"), nbformat.new_output("display_data", data={'image/png': 'Zw=='}), # g nbformat.new_output("display_data", data={'application/pdf': 'aA=='}), # h ] if with_json_outputs: outputs.extend([ nbformat.new_output( "display_data", data={'application/json': [1, 2, 3]} ), # j nbformat.new_output( "display_data", data={'application/json': {'a': 1, 'c': {'b': 2}}} ), # k nbformat.new_output( "display_data", data={'application/json': 'abc'} ), # l nbformat.new_output( "display_data", data={'application/json': 15.03} ), # m ]) cells=[nbformat.new_code_cell(source="$ e $", execution_count=1, outputs=outputs), nbformat.new_markdown_cell(source="$ e $")] return nbformat.new_notebook(cells=cells)
async def test_contents_manager(fetch, serverapp, root_dir): "make sure ContentsManager returns right files (ipynb, bin, txt)." nb = new_notebook(cells=[ new_markdown_cell(u'Created by test ³'), new_code_cell("print(2*6)", outputs=[ new_output("stream", text="12"), ]) ]) root_dir.joinpath('testnb.ipynb').write_text(writes(nb, version=4)) root_dir.joinpath('test.bin').write_bytes(b'\xff' + os.urandom(5)) root_dir.joinpath('test.txt').write_text('foobar') r = await fetch('files/testnb.ipynb', method='GET') assert r.code == 200 assert 'print(2*6)' in r.body.decode() r = await fetch('files/test.bin', method='GET') assert r.code == 200 assert r.headers['content-type'] == 'application/octet-stream' assert r.body[:1] == b'\xff' assert len(r.body) == 6 r = await fetch('files/test.txt', method='GET') assert r.code == 200 assert r.headers['content-type'] == 'text/plain; charset=UTF-8' assert r.body.decode() == 'foobar'
async def test_contents_manager(jp_fetch, jp_serverapp, jp_root_dir): """make sure ContentsManager returns right files (ipynb, bin, txt). Also test save file hooks.""" nb = new_notebook(cells=[ new_markdown_cell("Created by test ³"), new_code_cell( "print(2*6)", outputs=[ new_output("stream", text="12"), ], ), ]) jp_root_dir.joinpath("testnb.ipynb").write_text(writes(nb, version=4), encoding="utf-8") jp_root_dir.joinpath("test.bin").write_bytes(b"\xff" + os.urandom(5)) jp_root_dir.joinpath("test.txt").write_text("foobar") r = await jp_fetch("files/testnb.ipynb", method="GET") assert r.code == 200 assert "print(2*6)" in r.body.decode("utf-8") r = await jp_fetch("files/test.bin", method="GET") assert r.code == 200 assert r.headers["content-type"] == "application/octet-stream" assert r.body[:1] == b"\xff" assert len(r.body) == 6 r = await jp_fetch("files/test.txt", method="GET") assert r.code == 200 assert r.headers["content-type"] == "text/plain; charset=UTF-8" assert r.body.decode() == "foobar"
def add_statistics(plan, file): cells = [nbf.new_markdown_cell('# Viajes correctos vs Incidencias\n## Se reportaron viajes para el plan: {}'.format(plan)), nbf.new_code_cell('show(f)'), nbf.new_code_cell('msj_estadisticaplan(\'{}\', viajes)'.format(plan)), nbf.new_markdown_cell('# Plan {} de distribución (Sin Modificaciones)'.format(plan)), nbf.new_code_cell('an.getkm(\'plan\', \'{}\')'.format(plan)), nbf.new_code_cell('geojsonio.embed(an.gettrips(\'plan\', \'{}\').to_geojson())'.format(plan)), nbf.new_markdown_cell('# Plan {} Modificado'.format(plan)), nbf.new_code_cell('an.getkm(\'planm\', \'{}\')'.format(plan)), nbf.new_code_cell('geojsonio.embed(an.gettrips(\'planm\', \'{}\').to_geojson())'.format(plan)), ] noto = json.load(open(file)) noto['cells'] = noto['cells'] + cells with open(file, 'w') as f: json.dump(noto, f)
def test_checkpoints_follow_file(self): # Read initial file state orig = self.api.read('foo/a.ipynb') # Create a checkpoint of initial state r = self.api.new_checkpoint('foo/a.ipynb') cp1 = r.json() # Modify file and save nbcontent = json.loads(orig.text)['content'] nb = from_dict(nbcontent) hcell = new_markdown_cell('Created by test') nb.cells.append(hcell) nbmodel = {'content': nb, 'type': 'notebook'} self.api.save('foo/a.ipynb', body=json.dumps(nbmodel)) # Rename the file. self.api.rename('foo/a.ipynb', 'foo/z.ipynb') # Looking for checkpoints in the old location should yield no results. self.assertEqual(self.api.get_checkpoints('foo/a.ipynb').json(), []) # Looking for checkpoints in the new location should work. cps = self.api.get_checkpoints('foo/z.ipynb').json() self.assertEqual(cps, [cp1]) # Delete the file. The checkpoint should be deleted as well. self.api.delete('foo/z.ipynb') cps = self.api.get_checkpoints('foo/z.ipynb').json() self.assertEqual(cps, [])
def convert(source): with open(source, 'r') as f: text = f.read() pattern1 = re.compile(r'(?s)(\n"""\n.*?\n"""\n)') pattern2 = re.compile(r'\n##\n') cells1 = re.split(pattern1, text) # перший етап - markdown cell, code cell cells = [] # notebook cells for c1 in cells1[1:]: # крім першого рядка if re.match(pattern1, c1): # markdown cell cells.append(new_markdown_cell(c1[5:-5])) else: # code cells cells2 = re.split(pattern2, c1) if cells2[0].startswith('##\n'): cells2[0] = cells2[0][3:] for c2 in cells2: # другий етап - code cell cells.append(new_code_cell(c2)) #print cells nb = new_notebook(cells=cells, metadata={ 'language': 'python', }) dest = source[:-3] + ".ipynb" with codecs.open(dest, encoding='utf-8', mode='w') as f: nbformat.write(nb, f, 4)
def test_htmltoc2(self): """Test exporter for adding table of contents""" nb = v4.new_notebook(cells=[ v4.new_code_cell(source="a = 'world'"), v4.new_markdown_cell(source="# Heading"), ]) self.check_stuff_gets_embedded(nb, 'html_toc', to_be_included=['toc2'])
def add_box(nb_folder, nb_path): """Add box with downloadable links.""" log.info(f"Adding box in {nb_path}") DOWNLOAD_CELL = """ <div class="alert alert-info"> **This is a fixed-text formatted version of a Jupyter notebook** - **Source files:** [{nb_filename}](../../_static/notebooks/{nb_folder}/{nb_filename}) | [{py_filename}](../../_static/notebooks/{nb_folder}/{py_filename}) - **Environment:** [env.yml](../../_static/notebooks/{nb_folder}/env.yml) </div> """ # add box nb_filename = nb_path.absolute().name py_filename = nb_filename.replace("ipynb", "py") ctx = dict( nb_folder=nb_folder, nb_filename=nb_filename, py_filename=py_filename, ) strcell = DOWNLOAD_CELL.format(**ctx) rawnb = nbformat.read(nb_path, as_version=nbformat.NO_CONVERT) if "nbsphinx" not in rawnb.metadata: rawnb.metadata["nbsphinx"] = {"orphan": bool("true")} rawnb.cells.insert(0, new_markdown_cell(strcell)) nbformat.write(rawnb, nb_path)
def add_cell(cells, content, cell_type, cell_count, metainfo): # if a section consist of all report, report it as a markdown cell if not content: return if cell_type not in ('code', 'markdown'): env.logger.warning( f'Unrecognized cell type {cell_type}, code assumed.') if cell_type == 'markdown' and any( x.strip() and not x.startswith('#! ') for x in content): env.logger.warning( 'Markdown lines not starting with #!, code cell assumed.') cell_type = 'code' # if cell_type == 'markdown': cells.append( new_markdown_cell(source=''.join([x[3:] for x in content ]).strip(), metadata=metainfo)) else: cells.append( new_code_cell( # remove any trailing blank lines... source=''.join(content).strip(), execution_count=cell_count, metadata=metainfo))
def test_valid_notebook_without_extra_cells(self): self.nb.cells = [new_markdown_cell()] try: ValidateExtraCells().preprocess(self.nb, {}) assert True except ValidationError: assert False
def build_notebook(self, with_json_outputs=False): """Build a notebook in memory for use with preprocessor tests""" outputs = [ nbformat.new_output("stream", name="stdout", text="a"), nbformat.new_output("display_data", data={'text/plain': 'b'}), nbformat.new_output("stream", name="stdout", text="c"), nbformat.new_output("stream", name="stdout", text="d"), nbformat.new_output("stream", name="stderr", text="e"), nbformat.new_output("stream", name="stderr", text="f"), nbformat.new_output("display_data", data={'image/png': 'Zw=='}), # g nbformat.new_output("display_data", data={'application/pdf': 'aA=='}), # h ] if with_json_outputs: outputs.extend( [ nbformat.new_output("display_data", data={'application/json': [1, 2, 3]}), # j nbformat.new_output( "display_data", data={'application/json': {'a': 1, 'c': {'b': 2}}} ), # k nbformat.new_output("display_data", data={'application/json': 'abc'}), # l nbformat.new_output("display_data", data={'application/json': 15.03}), # m ] ) cells = [ nbformat.new_code_cell(source="$ e $", execution_count=1, outputs=outputs), nbformat.new_markdown_cell(source="$ e $"), ] return nbformat.new_notebook(cells=cells)
def build_notebook(self): """Build a reveal slides notebook in memory for use with tests. Overrides base in PreprocessorTestsBase""" outputs = [nbformat.new_output(output_type="stream", name="stdout", text="a")] slide_metadata = {'slideshow' : {'slide_type': 'slide'}} subslide_metadata = {'slideshow' : {'slide_type': 'subslide'}} cells=[nbformat.new_code_cell(source="", execution_count=1, outputs=outputs), nbformat.new_markdown_cell(source="", metadata=slide_metadata), nbformat.new_code_cell(source="", execution_count=2, outputs=outputs), nbformat.new_markdown_cell(source="", metadata=slide_metadata), nbformat.new_markdown_cell(source="", metadata=subslide_metadata)] return nbformat.new_notebook(cells=cells)
def parse_notebooks(folder, url_docs, git_commit): """ Modifies raw and html-fixed notebooks so they will not have broken links to other files in the documentation. Adds a box to the sphinx formatted notebooks with info and links to the *.ipynb and *.py files. """ DOWNLOAD_CELL = """ <div class="alert alert-info"> **This is a fixed-text formatted version of a Jupyter notebook** - Try online [![Binder](https://mybinder.org/badge.svg)](https://mybinder.org/v2/gh/gammapy/gammapy/{git_commit}?urlpath=lab/tree/{nb_filename}) - You can contribute with your own notebooks in this [GitHub repository](https://github.com/gammapy/gammapy/tree/master/tutorials). - **Source files:** [{nb_filename}](../_static/notebooks/{nb_filename}) | [{py_filename}](../_static/notebooks/{py_filename}) </div> """ for nbpath in list(folder.glob("*.ipynb")): if str(folder) == "notebooks": # add binder cell nb_filename = str(nbpath).replace("notebooks/", "") py_filename = nb_filename.replace("ipynb", "py") ctx = dict( nb_filename=nb_filename, py_filename=py_filename, git_commit=git_commit ) strcell = DOWNLOAD_CELL.format(**ctx) rawnb = nbformat.read(str(nbpath), as_version=nbformat.NO_CONVERT) if "nbsphinx" not in rawnb.metadata: rawnb.metadata["nbsphinx"] = {"orphan": bool("true")} rawnb.cells.insert(0, new_markdown_cell(strcell)) # add latex format for cell in rawnb.cells: if "outputs" in cell.keys(): for output in cell["outputs"]: if output["output_type"] == "execute_result": if "text/latex" in output["data"].keys(): output["data"]["text/latex"] = output["data"][ "text/latex" ].replace("$", "$$") nbformat.write(rawnb, str(nbpath)) # modif links to rst /html doc files txt = nbpath.read_text(encoding="utf-8") if str(folder) == "notebooks": repl = r"..\/\1rst\2" else: repl = r"..\/..\/\1html\2" txt = re.sub( pattern=url_docs + r"(.*?)html(\)|#)", repl=repl, string=txt, flags=re.M | re.I, ) nbpath.write_text(txt, encoding="utf-8")
def write_plot_nb(nb_fname, out_path=None, exe_path=None): nb_dir = op.dirname(nb_fname) nb_base = op.splitext(op.basename(nb_fname))[0] out_path = nb_dir if out_path is None else out_path exe_path = nb_dir if exe_path is None else exe_path # Execute nb = jupytext.read(nb_fname) ex_nb = execute_nb(nb, exe_path) # Write plots as maybe SVG cell_plots = [get_plot(c) for c in ex_nb['cells']] plot_fnames = [] for i, p in enumerate(cell_plots): if p is None: continue out_fname = op.join(out_path, f'{nb_base}_plot_{i:02d}.png') with open(out_fname, 'wb') as fobj: fobj.write(p) plot_fnames.append(out_fname) # Make, write new notebook. plot_nb = nbf.new_notebook() cells = plot_nb['cells'] ncc = nbf.new_code_cell cells.append(ncc('plot_marks = []')) # Two cells per plot. # First cell displays plot # Second has 'plot_marks.append(None)` for plot_fname in plot_fnames: img_url = urlq(op.basename(plot_fname)) cells.append(nbf.new_markdown_cell(f'![]({img_url})')) cells.append(ncc('plot_marks.append(None)')) cells.append(ncc('assert None not in plot_marks')) cells.append(ncc('for mark in plot_marks:\n print(mark)')) out_fname = op.join(out_path, f'{nb_base}_plots.ipynb') nbformat.write(plot_nb, out_fname)
def setUp(self): rootdir = self.root_dir if not os.path.isdir(pjoin(rootdir, 'foo')): subdir = pjoin(rootdir, 'foo') os.mkdir(subdir) # Make sure that we clean this up when we're done. # By using addCleanup this will happen correctly even if we fail # later in setUp. @self.addCleanup def cleanup_dir(): shutil.rmtree(subdir, ignore_errors=True) nb = new_notebook() nb.cells.append(new_markdown_cell(u'Created by test ³')) cc1 = new_code_cell(source=u'print(2*6)') cc1.outputs.append(new_output(output_type="stream", text=u'12')) cc1.outputs.append( new_output( output_type="execute_result", data={'image/png': png_green_pixel}, execution_count=1, )) nb.cells.append(cc1) with io.open(pjoin(rootdir, 'foo', 'testnb.ipynb'), 'w', encoding='utf-8') as f: write(nb, f, version=4) self.nbconvert_api = NbconvertAPI(self.request)
def test_htmltoc2(self): """Test exporter for adding table of contents""" nb = v4.new_notebook(cells=[ v4.new_code_cell(source="a = 'world'"), v4.new_markdown_cell(source="# Heading"), ]) self.check_stuff_gets_embedded( nb, 'html_toc', to_be_included=['toc2'])
def add_markdown_cell(self, path): # Load and update model = self.contents.get(path=path) model['content'].cells.append( new_markdown_cell('Created by test: ' + path) ) # Save and checkpoint again. self.contents.save(model, path=path) return model
def test_embedhtml(self): """Test exporter for embedding images into HTML""" nb = v4.new_notebook(cells=[ v4.new_code_cell(source="a = 'world'"), v4.new_markdown_cell( source="![testimage]({})".format(path_in_data('icon.png')) ), ]) self.check_stuff_gets_embedded( nb, 'html_embed', to_be_included=['base64'])
def test_htmltoc2(self): """Test exporter for adding table of contents""" nb = v4.new_notebook(cells=[ v4.new_code_cell(source="a = 'world'"), v4.new_markdown_cell(source="# Heading"), ]) def check(byte_string, root_node): assert b'toc2' in byte_string self.check_html(nb, 'html_toc', check_func=check)
def test_present_markdown_cell(): cell = v4.new_markdown_cell(source='# Heading\n\n*some markdown*') lines = pp.present_value('+ ', cell) text = '\n'.join(lines) assert lines[0] == '' assert lines[1] == '+ markdown cell:' assert all(line.startswith('+ ') for line in lines if line) assert 'source:' in text assert '# Heading' in text assert '' in lines assert '*some markdown*' in text
def test_htmltoc2(self): """Test exporter for adding table of contents""" with self.create_temp_cwd(): nb = v4.new_notebook( cells=[v4.new_code_cell(source="a = 'world'"), v4.new_markdown_cell(source="# Heading")] ) with io.open("notebook2.ipynb", "w", encoding="utf-8") as f: write(nb, f, 4) self.nbconvert("--to html_toc" ' "notebook2"') assert os.path.isfile("notebook2.html")
def build_notebook(self): notebook = super(TestRegexRemove, self).build_notebook() # Add a few empty cells notebook.cells.extend([ nbformat.new_code_cell(''), nbformat.new_markdown_cell(' '), nbformat.new_raw_cell('\n'), nbformat.new_raw_cell('\t'), ]) return notebook
def test_add_sec_label(): sample = ("# Foo\n" "\n" "Bar") res = latex.add_sec_label(new_markdown_cell(sample), '05-test') assert len(res) == 3 assert res[0].cell_type == 'markdown' assert res[0].source.strip() == '# Foo' assert res[1].cell_type == 'raw' assert res[1].source.strip() == '\\label{sec:05-test}' assert res[2].cell_type == 'markdown' sample = ("Foo\n" "===\n") res = latex.add_sec_label(new_markdown_cell(sample), '05-test') assert len(res) == 2 assert res[0].cell_type == 'markdown' assert res[0].source.strip() == 'Foo\n===' assert res[1].cell_type == 'raw' assert res[1].source.strip() == '\\label{sec:05-test}'
def parseBkr(data): nb = new_notebook() evaluators = list((cell['evaluator']) for cell in data['cells'] if 'evaluator' in cell) kernel_name = max(evaluators, key=evaluators.count) if evaluators else 'IPython' if kernel_name in ['JavaScript', 'HTML', 'TeX']: kernel_name = 'IPython' if kernel_name == 'IPython': kernel_spec = {"kernelspec": { "display_name": "Python 2", "language": "python", "name": "python2" }} else: kernel_spec = {"kernelspec": { "display_name": kernel_name, "language": kernel_name.lower(), "name": kernel_name.lower() }} nb.metadata = kernel_spec for cell in data['cells']: if cell['type'] == 'code': metadata = {} if 'initialization' in cell: metadata['init_cell'] = True if 'tags' in cell: tags = [cell['tags']] metadata['tags'] = tags if cell['evaluator'] != kernel_name: if cell['evaluator'] == 'TeX': nb.cells.append(new_markdown_cell("${0}$".format(getFixedCodeText(cell['input'])))) else: nb.cells.append( new_code_cell(source='%%{0}\n{1}'.format(cell['evaluator'].lower(), getFixedCodeText(cell['input'])), metadata=metadata)) else: nb.cells.append(new_code_cell(source=getFixedCodeText(cell['input']), metadata=metadata)) if cell['type'] == 'markdown': nb.cells.append(new_markdown_cell(getFixedCodeText(cell))) if cell['type'] == 'section': nb.cells.append(new_markdown_cell(setHeader(cell['level'], cell['title']))) return nb
def lines_to_notebook(lines, name=None): """ Convert the lines of an m file into an IPython notebook Parameters ---------- lines : list A list of strings. Each element is a line in the m file Returns ------- notebook : an IPython NotebookNode class instance, containing the information required to create a file """ source = [] md = np.empty(len(lines), dtype=object) new_cell = np.empty(len(lines), dtype=object) for idx, l in enumerate(lines): new_cell[idx], md[idx], this_source = format_line(l) # Transitions between markdown and code and vice-versa merit a new # cell, even if no newline, or "%%" is found. Make sure not to do this # check for the very first line! if idx>1 and not new_cell[idx]: if md[idx] != md[idx-1]: new_cell[idx] = True source.append(this_source) # This defines the breaking points between cells: new_cell_idx = np.hstack([np.where(new_cell)[0], -1]) # Listify the sources: cell_source = [source[new_cell_idx[i]:new_cell_idx[i+1]] for i in range(len(new_cell_idx)-1)] cell_md = [md[new_cell_idx[i]] for i in range(len(new_cell_idx)-1)] cells = [] # Append the notebook with loading matlab magic extension notebook_head = "import pymatbridge as pymat\n" + "ip = get_ipython()\n" \ + "pymat.load_ipython_extension(ip)" cells.append(nbformat.new_code_cell(notebook_head))#, language='python')) for cell_idx, cell_s in enumerate(cell_source): if cell_md[cell_idx]: cells.append(nbformat.new_markdown_cell(cell_s)) else: cell_s.insert(0, '%%matlab\n') cells.append(nbformat.new_code_cell(cell_s))#, language='matlab')) #ws = nbformat.new_worksheet(cells=cells) notebook = nbformat.new_notebook(cells=cells) return notebook
def test_preprocessor_pymarkdown(): """Test python markdown preprocessor.""" # check import shortcut from jupyter_contrib_nbextensions.nbconvert_support import PyMarkdownPreprocessor # noqa notebook_node = nbf.new_notebook(cells=[ nbf.new_code_cell(source="a = 'world'"), nbf.new_markdown_cell(source="Hello {{ a }}", metadata={"variables": {" a ": "world"}}), ]) body, resources = export_through_preprocessor( notebook_node, PyMarkdownPreprocessor, RSTExporter, 'rst') expected = 'Hello world' assert_in(expected, body, 'first cell should contain {}'.format(expected))
def create_locked_cell(source, cell_type, grade_id): if cell_type == "markdown": cell = new_markdown_cell(source=source) elif cell_type == "code": cell = new_code_cell(source=source) else: raise ValueError("invalid cell type: {}".format(cell_type)) cell.metadata.nbgrader = {} cell.metadata.nbgrader["locked"] = True cell.metadata.nbgrader["grade_id"] = grade_id return cell
def test_save(self): resp = self.api.read('foo/a.ipynb') nbcontent = json.loads(resp.text)['content'] nb = from_dict(nbcontent) nb.cells.append(new_markdown_cell(u'Created by test ³')) nbmodel = {'content': nb, 'type': 'notebook'} resp = self.api.save('foo/a.ipynb', body=json.dumps(nbmodel)) nbcontent = self.api.read('foo/a.ipynb').json()['content'] newnb = from_dict(nbcontent) self.assertEqual(newnb.cells[0].source, u'Created by test ³')
def convert_exercise_to_cells(exercise): """ Generates a header, exercise text and code cell from an Exercise class """ cells = [] markdown_text = "## " + exercise.name + "\n" markdown_text += textwrap.dedent(exercise.__doc__) or "" cells.append(nbf.new_markdown_cell(markdown_text)) code_cell_text = "%%run_exercise " + exercise.name + "\n" code_cell_text += textwrap.dedent(exercise.cell_code) cells.append(nbf.new_code_cell(code_cell_text)) return cells
def test_pretty_print_markdown_cell(): cell = v4.new_markdown_cell(source='# Heading\n\n*some markdown*') io = StringIO() pp.pretty_print_value_at(cell, "/cells/0", "+", io) text = io.getvalue() lines = text.splitlines() assert lines[0] == '+markdown cell:' assert all(line.startswith('+') for line in lines if line) assert 'source:' in text assert '+ # Heading' in text assert '+ ' in lines assert '+ *some markdown*' in text
def test_checkpoint_all(self): """ Test that checkpoint_all correctly makes a checkpoint for all files. """ paths = populate(self.contents) original_content_minus_trust = { # Remove metadata that we expect to have dropped path: strip_transient(self.contents.get(path)['content']) for path in paths } original_cps = {} for path in paths: # Create a checkpoint, then update the file. original_cps[path] = self.contents.create_checkpoint(path) self.add_markdown_cell(path) # Verify that we still have the old version checkpointed. cp_content = { path: self.checkpoints.get_notebook_checkpoint( cp['id'], path, )['content'] for path, cp in iteritems(original_cps) } self.assertEqual(original_content_minus_trust, cp_content) new_cps = checkpoint_all( self.checkpoints.db_url, self.td.name, self.checkpoints.user_id, ) new_cp_content = { path: self.checkpoints.get_notebook_checkpoint( cp['id'], path, )['content'] for path, cp in iteritems(new_cps) } for path, new_content in iteritems(new_cp_content): old_content = original_content_minus_trust[_norm_unicode(path)] self.assertEqual( new_content['cells'][:-1], old_content['cells'], ) self.assertEqual( new_content['cells'][-1], new_markdown_cell('Created by test: ' + _norm_unicode(path)), )
def test_run_nb(self): """Test %run notebook.ipynb""" from nbformat import v4, writes nb = v4.new_notebook( cells=[ v4.new_markdown_cell("The Ultimate Question of Everything"), v4.new_code_cell("answer=42") ] ) src = writes(nb, version=4) self.mktmp(src, ext='.ipynb') _ip.magic("run %s" % self.fname) nt.assert_equal(_ip.user_ns['answer'], 42)
def create_grade_and_solution_cell(source, cell_type, grade_id, points): if cell_type == "markdown": cell = new_markdown_cell(source=source) elif cell_type == "code": cell = new_code_cell(source=source) else: raise ValueError("invalid cell type: {}".format(cell_type)) cell.metadata.nbgrader = {} cell.metadata.nbgrader["solution"] = True cell.metadata.nbgrader["grade"] = True cell.metadata.nbgrader["grade_id"] = grade_id cell.metadata.nbgrader["points"] = points return cell
def create_solution_cell(source, cell_type, grade_id, schema_version=1): if cell_type == "markdown": cell = new_markdown_cell(source=source) elif cell_type == "code": cell = new_code_cell(source=source) else: raise ValueError("invalid cell type: {}".format(cell_type)) cell.metadata.nbgrader = {} cell.metadata.nbgrader["solution"] = True cell.metadata.nbgrader["grade_id"] = grade_id cell.metadata.nbgrader["grade"] = False cell.metadata.nbgrader["locked"] = False cell.metadata.nbgrader["schema_version"] = schema_version return cell
def setup_class(cls): """Make a test notebook. Borrowed from nbconvert test. Assumes the class teardown will clean it up in the end.""" super(BundleAPITest, cls).setup_class() nbdir = cls.notebook_dir.name nb = new_notebook() nb.cells.append(new_markdown_cell(u'Created by test')) cc1 = new_code_cell(source=u'print(2*6)') cc1.outputs.append(new_output(output_type="stream", text=u'12')) nb.cells.append(cc1) with io.open(pjoin(nbdir, 'testnb.ipynb'), 'w', encoding='utf-8') as f: write(nb, f, version=4)