def main(): for fname in sys.argv[1:]: nb = reads(io.open(fname, 'r').read()) remove_outputs(nb) base, ext = os.path.splitext(fname) new_ipynb = "%s-no-output%s" % (base, ext) io.open(new_ipynb, 'w', encoding='utf8').write(writes(nb))
def py2ipynb_default(input, output): with open(input) as f: code = f.read() code += """ # <markdowncell> # If you can read this, reads_py() is no longer broken! """ nbook = v3.reads_py(code) nbook = v4.upgrade(nbook) # Upgrade v3 to v4 jsonform = v4.writes(nbook) + "\n" with open(output, "w") as f: f.write(jsonform)
def tester(import_list_dict): # http://nbviewer.ipython.org/gist/fperez/9716279 for import_dict in import_list_dict: code = import_dict['code'] target = import_dict['targets'] nb = v4.new_notebook() cells = [v4.new_code_cell(code)] nb['cells'].extend(cells) fname = tempfile.NamedTemporaryFile(suffix='.ipynb').name with open(fname, 'w') as f: f.write(v4.writes(nb)) # parse the notebook! assert target == depfinder.notebook_path_to_dependencies(fname) os.remove(fname)
def model_to_files(model): """ Converts a IPython notebook model to a github.Gist `files` dict. Parameters __________ model : dict Notebook model as specified by the NotebookManager. There is an additional `__files` dict of the form {filename: file_content} Returns ------- files : dict {filename: content} Note: changed files dict values to be strings. """ files = {} name = model['name'] content = current.writes(model['content']) files[name] = content __files = model.get('__files', {}) for fn, content in __files.items(): files[fn] = content return files
def test_multiple_code_cells(): nb = v4.new_notebook() targets = defaultdict(set) import_list_dict = complex_imports + relative_imports + simple_imports # http://nbviewer.ipython.org/gist/fperez/9716279 for import_dict in import_list_dict: code = import_dict['code'] target = import_dict['targets'] cells = [v4.new_code_cell(code)] nb['cells'].extend(cells) for k, v in target.items(): targets[k].update(set(v)) # turn targets into a dict of sorted lists targets = {k: sorted(list(v)) for k, v in targets.items()} fname = tempfile.NamedTemporaryFile(suffix='.ipynb').name with open(fname, 'w') as f: f.write(v4.writes(nb)) print('temp file name = %s' % fname) # parse the notebook! assert targets == depfinder.notebook_path_to_dependencies(fname)
"language": "python", "name": "python3"}, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3}, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3"}} for cell in nb.cells: if 'outputs' in cell: cell['outputs'] = [] if 'execution_count' in cell: cell['execution_count'] = None if 'metadata' in cell: cell['metadata'] = {} return nb if __name__ == '__main__': nb = v4.reads(sys.stdin.read()) nb = strip_output(nb) output = v4.writes(nb) if type(output) == str and PY2: output = output.encode('utf-8') sys.stdout.write(output)
This does mostly the same thing as the `Clear All Output` command in the notebook UI. Adapted from rom https://gist.github.com/minrk/6176788 to work with git filter driver https://github.com/petered/plato/blob/fb2f4e252f50c79768920d0e47b870a8d799e92b/notebooks/config/strip_notebook_output """ import sys #You may need to do this for your script to work with GitX or Tower: #sys.path.append("/Users/chris/anaconda/envs/conda/lib/python2.7/site-packages") try: from nbformat import v4 except ImportError: raise Exception("Failed to import the latest IPython while trying to strip output " "from your notebooks. Either run venv/bin/activate to enter your virtual env, or update " "the IPython version on your machine (sudo pip install -U ipython)") from nbconvert.preprocessors.clearoutput import ClearOutputPreprocessor def strip_output(nb): """strip the outputs from a notebook object""" stripout=ClearOutputPreprocessor() nb,res=stripout.preprocess(nb,{}) return nb if __name__ == '__main__': nb = v4.reads(sys.stdin.read()) nb = strip_output(nb) sys.stdout.write(v4.writes(nb))
def do_stripping(): nb = v4.reads(sys.stdin.read()) nb = strip_output(nb) sys.stdout.write(v4.writes(nb))
def mainfunction(text): """ Main function. Calls all other functions, depending on whether the macro input is in python or c++. It adds the header information. Also, it adds a cell that draws all canvases. The working text is then converted to a version 3 jupytr notebook, subsequently updated to a version 4. Then, metadata associated with the language the macro is written in is attatched to he notebook. Finally the notebook is exectue and output in both HTML and Jupyter notebook formats. """ ## Modify text from macros to suit a notebook if extension in ("C", "c", "cpp", "C++", "cxx") : main, helpers, rest = split(text) main, addition, keepfunction = processmain(main) if not keepfunction: main = cppcomments(unindenter(cppfunction(main))) # Remove function, Unindent, and convert comments to Markdown cells rest = cppcomments(rest) # Convert top level code comments to Markdown cells ## Construct text by starting wwith top level code, then the helper functions, and finally the main function. text= rest for helper in helpers: text+= "\n# <markdowncell>\n A helper function is created: \n# <codecell>\n%%cpp -d\n" text+=helper text+="\n# <codecell>\n" if keepfunction: text+="%%cpp -d\n" text+=main if addition: text+=addition if extension == "py": text = pythoncomments(text) # Convert comments into Markdown cells ## Add the title and header of the notebook text= "# <markdowncell> \n# # %s\n# %s# \n# This notebook tutorial was automatically generated from the macro found\ in the ROOT repository on %s.\n# **Author:** %s \n# <codecell>\n%s" % (name.title(), description, date, author, text) ## Add cell at the end of the notebook that draws all the canveses. Add a Markdown cell before explaining it. if extension == ("C" or "c" or "cpp" or "c++"): text +="\n# <markdowncell> \n# Draw all canvases \n# <codecell>\ngROOT->GetListOfCanvases()->Draw()" if extension == "py": text +="\n# <markdowncell> \n# Draw all canvases \n# <codecell>\nimport ROOT \ngROOT.GetListOfCanvases().Draw()" ## Create a notebook from the working text nbook = v3.reads_py(text) nbook = v4.upgrade(nbook) # Upgrade v3 to v4 ## Load notebook string into json format, essentially creating a dictionary json_data = json.loads(v4.writes(nbook)) # add the corresponding metadata if extension == "py": json_data[u'metadata']={ "kernelspec": { "display_name": "Python 2", "language": "python", "name": "python2" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 2 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", "version": "2.7.10" } } else: json_data[u'metadata']={ "kernelspec": { "display_name": "ROOT C++", "language": "c++", "name": "root" }, "language_info": { "codemirror_mode": "text/x-c++src", "file_extension": ".C", "mimetype": " text/x-c++src", "name": "c++" } } ## write the json file with the metadata with open(outdir+outname, 'w') as f: json.dump(json_data, f, indent=1, sort_keys=True) ## The two commands to create an html version of the notebook and a notebook with the output print time.time() - starttime #subprocess.call(["jupyter", "nbconvert","--ExecutePreprocessor.timeout=60", "--to=html", "--execute", outdir+outname]) subprocess.call(["jupyter", "nbconvert","--ExecutePreprocessor.timeout=60", "--to=notebook", "--execute", outdir+outname]) os.remove(outdir+outname)
def mainfunction(text, visualize): """ Main function. Calls all other functions. Also, it adds a cell that draws the result. The working text is then converted to a version 3 jupyter notebook, subsequently updated to a version 4. Then, metadata associated with the language the macro is written in is attatched to he notebook. Finally the notebook is executed and output as a Jupyter notebook. """ # Modify text from macros to suit a notebook main, helpers, headers, rest = split(text) main_macro = CreateMainFunction(Indent(ExtractMainFunction(main))) main, argumentsCell = processmain(main) # Remove function, Unindent, and convert comments to Markdown cells main = Comments(Unindent(ExtractMainFunction(main))) rest = RemoveIncludeGuardsAndNamespace(rest) # Command for loading rootlogon.C libloading_macro = '%jsroot on\ngROOT->LoadMacro("${BDMSYS}/etc/rootlogon.C");\n\n' c_macro = headers + libloading_macro + rest + main_macro with open(outPathNameMacro, 'w') as fout: fout.write(c_macro) if argumentsCell: main = argumentsCell + main if visualize: visComment = "# <markdowncell>\n Let's visualize the output!" main += '\n%s\n# <codecell>\nVisualizeInNotebook();\n' % visComment # Convert top level code comments to Markdown cells rest = Comments(rest) # Construct text by starting with top level code, then the helper functions, and finally the main function. # Also add cells for headerfile, or keepfunction text = "# <codecell>\n" + rest for helper in helpers: text += helper text += ("\n# <codecell>\n" + main) # Change to standard Markdown newDescription = changeMarkdown(description) # Horizontal title line hline = '<hr style="border-top-width: 4px; border-top-color: #34609b;">' # Add the title and header of the notebook text = "# <markdowncell> \n# # %s\n%s\n%s# \n# \n# <codecell>\n%s\n# <codecell>\n%s\n# <codecell>\n%s" % ( tutTitle, hline, newDescription, libloading_macro, headers, text) # Create a notebook from the working text nbook = v3.reads_py(text) nbook = v4.upgrade(nbook) # Upgrade v3 to v4 # Load notebook string into json format, essentially creating a dictionary json_data = json.loads(v4.writes(nbook)) # add the corresponding metadata json_data['metadata'] = { "kernelspec": { "display_name": "ROOT C++", "language": "c++", "name": "root" }, "language_info": { "codemirror_mode": "text/x-c++src", "file_extension": ".C", "mimetype": " text/x-c++src", "name": "c++" } } # write the json file with the metadata with open(outPathName, 'w') as fout: json.dump(json_data, fout, indent=1, sort_keys=True) timeout = 60 execute = "--execute" if args.skip: execute = "" # Call commmand that executes the notebook and creates a new notebook with the output nbconvert_cmd = "jupyter nbconvert --to=html --ExecutePreprocessor.timeout=%d %s %s" % ( timeout, execute, outPathName) r = subprocess.call([ "jupyter", "nbconvert", "--to=html", "--ExecutePreprocessor.timeout=%d" % timeout, execute, outPathName ]) if r != 0: sys.stderr.write( "NOTEBOOK_CONVERSION_ERROR: nbconvert failed for notebook %s with return code %s\n" % (outname, r)) sys.stderr.write("FAILED COMMAND: %s\n" % nbconvert_cmd) exit(1)
all_blocks = ast.parse(text).body line_start = [c.lineno-1 for c in all_blocks] line_start[0] = 0 lines = text.split('\n') for c_block, s, e in zip(all_blocks, line_start, line_start[1:]+[len(lines)]): c_text = '\n'.join(lines[s:e]) if isinstance(c_block, ast.If) and c_block.test.comparators[0].s=='__main__': print('Skip if main', lines[s:e]) elif isinstance(c_block, ast.FunctionDef) and c_block.name=='main': # remove start and de-indent lines c_lines = lines[s+1:e] spaces_to_delete = c_block.body[0].col_offset fixed_lines = [n_line[spaces_to_delete:] if n_line.startswith(' '*spaces_to_delete) else n_line for n_line in c_lines] fixed_text = '\n'.join(fixed_lines) print('Unwrapping main function') nbook['cells'].append(v4.new_code_cell(fixed_text)) else: print('appending', c_block) nbook['cells'].append(v4.new_code_cell(c_text)) jsonform = v4.writes(nbook) + "\n" output = input output = output.replace('.py', '.ipynb') output = output.replace('examples/python', 'examples/notebook') print('writing %s' % output) with open(output, "w") as fpout: fpout.write(jsonform)
def mainfunction(text): """ Main function. Calls all other functions, depending on whether the macro input is in python or c++. It adds the header information. Also, it adds a cell that draws all canvases. The working text is then converted to a version 3 jupyter notebook, subsequently updated to a version 4. Then, metadata associated with the language the macro is written in is attatched to he notebook. Finally the notebook is executed and output as a Jupyter notebook. """ # Modify text from macros to suit a notebook if isCpp(): main, helpers, rest = split(text) main, argumentsCell = processmain(main) main = cppComments( unindenter(cppFunction(main)) ) # Remove function, Unindent, and convert comments to Markdown cells if argumentsCell: main = argumentsCell + main rest = cppComments( rest) # Convert top level code comments to Markdown cells # Construct text by starting with top level code, then the helper functions, and finally the main function. # Also add cells for headerfile, or keepfunction if needsHeaderFile: text = "# <markdowncell>\n# The header file must be copied to the current directory\n# <codecell>\n.!cp %s%s.h .\n# <codecell>\n" % ( tutRelativePath, tutName) text += rest else: text = "# <codecell>\n" + rest for helper in helpers: text += helper text += ("\n# <codecell>\n" + main) if extension == "py": text = pythonMainFunction(text) text = pythonComments(text) # Convert comments into Markdown cells # Perform last minute fixes to the notebook, used for specific fixes needed by some tutorials text = fixes(text) # Change to standard Markdown newDescription = changeMarkdown(description) # Add the title and header of the notebook text = "# <markdowncell> \n# # %s\n%s\n%s# \n# \n# **Author:** %s \n# <i><small>This notebook tutorial was automatically generated " \ "with <a href= \"https://github.com/root-mirror/root/blob/master/documentation/doxygen/converttonotebook.py\">ROOTBOOK-izer (Beta)</a> " \ "from the macro found in the ROOT repository on %s.</small></i>\n# <codecell>\n%s" % (tutTitle, blueHorizBar, newDescription, author, date, text) # Add cell at the end of the notebook that draws all the canveses. Add a Markdown cell before explaining it. if isJsroot and not nodraw: if isCpp(): text += "\n# <markdowncell> \n# Draw all canvases \n# <codecell>\n%jsroot on\ngROOT->GetListOfCanvases()->Draw()" if extension == "py": text += "\n# <markdowncell> \n# Draw all canvases \n# <codecell>\n%jsroot on\nfrom ROOT import gROOT \ngROOT.GetListOfCanvases().Draw()" elif not nodraw: if isCpp(): text += "\n# <markdowncell> \n# Draw all canvases \n# <codecell>\ngROOT->GetListOfCanvases()->Draw()" if extension == "py": text += "\n# <markdowncell> \n# Draw all canvases \n# <codecell>\nfrom ROOT import gROOT \ngROOT.GetListOfCanvases().Draw()" # Create a notebook from the working text nbook = v3.reads_py(text) nbook = v4.upgrade(nbook) # Upgrade v3 to v4 # Load notebook string into json format, essentially creating a dictionary json_data = json.loads(v4.writes(nbook)) # add the corresponding metadata if extension == "py": json_data[u'metadata'] = { "kernelspec": { "display_name": "Python 2", "language": "python", "name": "python2" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 2 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", "version": "2.7.10" } } elif isCpp(): json_data[u'metadata'] = { "kernelspec": { "display_name": "ROOT C++", "language": "c++", "name": "root" }, "language_info": { "codemirror_mode": "text/x-c++src", "file_extension": ".C", "mimetype": " text/x-c++src", "name": "c++" } } # write the json file with the metadata with open(outPathName, 'w') as fout: json.dump(json_data, fout, indent=1, sort_keys=True) print(time.time() - starttime) timeout = findTimeout() # Call commmand that executes the notebook and creates a new notebook with the output r = subprocess.call([ "jupyter", "nbconvert", "--ExecutePreprocessor.timeout=%d" % timeout, "--to=notebook", "--execute", outPathName ]) if r != 0: sys.stderr.write( "NOTEBOOK_CONVERSION_WARNING: Nbconvert failed for notebook %s with return code %s\n" % (outname, r)) else: if isJsroot: subprocess.call( ["jupyter", "trust", os.path.join(outdir, outnameconverted)]) # Only remove notebook without output if nbconvert succeedes os.remove(outPathName)
FROM https://github.com/cfriedline/ipynb_template/blob/master/nbstripout """ import sys from future.utils import PY2 from nbformat import v4 def strip_output(nb): """strip the outputs from a notebook object""" nb.metadata.pop('widgets', None) for cell in nb.cells: if 'outputs' in cell: cell['outputs'] = [] if 'execution_count' in cell: cell['execution_count'] = None if 'metadata' in cell: cell['metadata'] = {} return nb if __name__ == '__main__': nb = v4.reads(sys.stdin.read()) nb = strip_output(nb) output = v4.writes(nb) if type(output) == str and PY2: output = output.encode('utf-8') sys.stdout.write(output)
for c_block, s, e in zip(all_blocks, line_start, line_start[1:] + [len(lines)]): print(c_block) c_text = '\n'.join(lines[s:e]) if isinstance(c_block, ast.If) and c_block.test.comparators[0].s == '__main__': print('Skip if main', lines[s:e]) elif isinstance(c_block, ast.FunctionDef) and c_block.name == 'main': # remove start and de-indent lines c_lines = lines[s + 1:e] spaces_to_delete = c_block.body[0].col_offset fixed_lines = [ n_line[spaces_to_delete:] if n_line.startswith(' ' * spaces_to_delete) else n_line for n_line in c_lines ] fixed_text = '\n'.join(fixed_lines) print('Unwrapping main function') full_text += fixed_text else: print('appending', c_block) full_text += c_text + '\n' nbook['cells'].append(v4.new_code_cell(full_text)) jsonform = v4.writes(nbook) + '\n' print(f'writing {output_file}') with open(output_file, 'w') as fpout: fpout.write(jsonform)
from nbformat import v3, v4 with open("mnist-io.py") as fpin: text = fpin.read() nbook = v3.reads_py(text) nbook = v4.upgrade(nbook) # Upgrade v3 to v4 jsonform = v4.writes(nbook) + "\n" with open("mnist.ipynb", "w") as fpout: fpout.write(jsonform)
def mainfunction(text): """ Main function. Calls all other functions, depending on whether the macro input is in python or c++. It adds the header information. Also, it adds a cell that draws all canvases. The working text is then converted to a version 3 jupyter notebook, subsequently updated to a version 4. Then, metadata associated with the language the macro is written in is attatched to he notebook. Finally the notebook is executed and output as a Jupyter notebook. """ # Modify text from macros to suit a notebook if isCpp(): main, helpers, rest = split(text) main, argumentsCell = processmain(main) main = cppComments(unindenter(cppFunction(main))) # Remove function, Unindent, and convert comments to Markdown cells if argumentsCell: main = argumentsCell + main rest = cppComments(rest) # Convert top level code comments to Markdown cells # Construct text by starting with top level code, then the helper functions, and finally the main function. # Also add cells for headerfile, or keepfunction if needsHeaderFile: text = "# <markdowncell>\n# The header file must be copied to the current directory\n# <codecell>\n.!cp %s%s.h .\n# <codecell>\n" % (tutRelativePath, tutName) text += rest else: text = "# <codecell>\n" + rest for helper in helpers: text += helper text += ("\n# <codecell>\n" + main) if extension == "py": text = pythonMainFunction(text) text = pythonComments(text) # Convert comments into Markdown cells # Perform last minute fixes to the notebook, used for specific fixes needed by some tutorials text = fixes(text) # Change to standard Markdown newDescription = changeMarkdown(description) # Add the title and header of the notebook text = "# <markdowncell> \n# # %s\n%s# \n# \n# **Author:** %s \n# <i><small>This notebook tutorial was automatically generated " \ "with <a href= \"https://github.com/root-mirror/root/blob/master/documentation/doxygen/converttonotebook.py\">ROOTBOOK-izer (Beta)</a> " \ "from the macro found in the ROOT repository on %s.</small></i>\n# <codecell>\n%s" % (tutTitle, newDescription, author, date, text) # Add cell at the end of the notebook that draws all the canveses. Add a Markdown cell before explaining it. if isJsroot and not nodraw: if isCpp(): text += "\n# <markdowncell> \n# Draw all canvases \n# <codecell>\n%jsroot on\ngROOT->GetListOfCanvases()->Draw()" if extension == "py": text += "\n# <markdowncell> \n# Draw all canvases \n# <codecell>\n%jsroot on\nfrom ROOT import gROOT \ngROOT.GetListOfCanvases().Draw()" elif not nodraw: if isCpp(): text += "\n# <markdowncell> \n# Draw all canvases \n# <codecell>\ngROOT->GetListOfCanvases()->Draw()" if extension == "py": text += "\n# <markdowncell> \n# Draw all canvases \n# <codecell>\nfrom ROOT import gROOT \ngROOT.GetListOfCanvases().Draw()" # Create a notebook from the working text nbook = v3.reads_py(text) nbook = v4.upgrade(nbook) # Upgrade v3 to v4 # Load notebook string into json format, essentially creating a dictionary json_data = json.loads(v4.writes(nbook)) # add the corresponding metadata if extension == "py": json_data[u'metadata'] = { "kernelspec": { "display_name": "Python 2", "language": "python", "name": "python2" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 2 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", "version": "2.7.10" } } elif isCpp(): json_data[u'metadata'] = { "kernelspec": { "display_name": "ROOT C++", "language": "c++", "name": "root" }, "language_info": { "codemirror_mode": "text/x-c++src", "file_extension": ".C", "mimetype": " text/x-c++src", "name": "c++" } } # write the json file with the metadata with open(outPathName, 'w') as fout: json.dump(json_data, fout, indent=1, sort_keys=True) print(time.time() - starttime) timeout = findTimeout() # Call commmand that executes the notebook and creates a new notebook with the output r = subprocess.call(["jupyter", "nbconvert", "--ExecutePreprocessor.timeout=%d" % timeout, "--to=notebook", "--execute", outPathName]) if r != 0: sys.stderr.write("NOTEBOOK_CONVERSION_WARNING: Nbconvert failed for notebook %s with return code %s\n" %(outname,r)) if isJsroot: subprocess.call(["jupyter", "trust", os.path.join(outdir, outnameconverted)]) if r == 0: # Only remove notebook without output if nbconvert succeedes os.remove(outPathName)
header_text = "# " + module_name cells.append(nbf.new_markdown_cell(header_text)) # Add initialization (magics etc) initialization_text = """import etude etude.initialize('%s')""" % os.path.abspath(args.filename) cells.append(nbf.new_code_cell(initialization_text)) # Add optional introduction exercise_introduction = getattr(exercise_module, "introduction", None) if exercise_introduction is not None: cells.append(nbf.new_markdown_cell(textwrap.dedent(exercise_introduction))) # Add optional exercise-specific initialization (e.g. imports etc) exercise_initialization_text = getattr(exercise_module, "initialization", None) if exercise_initialization_text is not None: cells.append(nbf.new_code_cell(exercise_initialization_text)) # Add the exercises for exercise in exercises: exercise_cells = convert_exercise_to_cells(exercise) cells.extend(exercise_cells) nb.cells.extend(cells) # write notebook with open(os.path.splitext(args.filename)[0] + '.ipynb', 'w') as f: f.write(nbf.writes(nb))
def astext(self) -> str: return nbf.writes(self.asnotebook())