def render_ipynb(from_file: str, to_file: str): # print(f"{from_file} -> {to_file}") header = """ # This file is generated from a Mathy (https://mathy.ai) code example. !pip install mathy --upgrade """ with open(from_file, "r") as fpin: lines = fpin.readlines() header_installs = True out_lines = [] for line in lines: # NOTE: the weird use of f-string is to workaround vscode highlight # getting really confused by the "!pip install" attached to a # if line.startswith(f"#{'!pip install'}"): if header_installs is False: raise ValueError( "All !pip install comments must be the first lines in a snippet." f" Found the following line after a non-install comment: {line}" ) # output without the comment so ipynb installs the requirement out_lines.append(line[1:]) continue # The header installs must be the first (n) lines in a file. After the # first non-comment, nothing will be installed. header_installs = False out_lines.append(line) text = "".join(out_lines) nbook = v4.upgrade(v3.reads_py(f"{header}{text}")) with open(to_file, "w") as fpout: fpout.write(f"{v4.writes(nbook)}\n")
def py2ipynb(readIn): from nbformat import v3, v4 readIn += "# <markdowncell>" # preserve last cell nb = v4.upgrade(v3.reads_py(readIn)) # v3 => v4 json = v4.writes(nb) + "\n" return json
def _main(): args = _parse_command_line_args() data = _load(args.input) data = ('{}\n' '# <markdowncell>\n' '\n' '# If you can read this, reads_py() is no longer broken!\n' ).format(data) data = v4.writes(v4.upgrade(v3.reads_py(data))) _save(data, args.output or args.input.replace('.py', '.ipynb'))
def change_format(arg): from nbformat import v3, v4 with open(arg) as fpin: text = fpin.read() nbook = v3.reads_py(text) nbook = v4.upgrade(nbook) jsonform = v4.writes(nbook) + "\n" new_file = arg + ".ipynb" with open(new_file, "w") as fpout: fpout.write(jsonform)
def read_legacy_pyfile_as_notebook(pytext): from nbformat import v3, v4 pytext += """ # <markdowncell> # If you can read this, reads_py() is no longer broken! """ nbook = v3.reads_py(pytext) nbook = v4.upgrade(nbook) # Upgrade v3 to v4 return nbook
def py2ipynb_default(input, output): with open(input) as f: code = f.read() code += """ # <markdowncell> # If you can read this, reads_py() is no longer broken! """ nbook = v3.reads_py(code) nbook = v4.upgrade(nbook) # Upgrade v3 to v4 jsonform = v4.writes(nbook) + "\n" with open(output, "w") as f: f.write(jsonform)
def py2ipynb(readIn): from nbformat import v3, v4 readIn += "# <markdowncell>" # preserve last cell nb = v4.upgrade(v3.reads_py(readIn)) # v3 => v4 json = v4.writes(nb) + "\n" return json """ # extracts images as separate files config = Config() config.HTMLExporter.preprocessors = [ 'nbconvert.preprocessors.ExtractOutputPreprocessor' ] """ # for path in nbPaths: #! #! outPath = 'markdowns/' + filename + '.md' """ # assign unique key to each image based on notebook name extractOutputConfig = { 'unique_key': filename, 'output_files_dir': '/' + IMAGE_DIR } """ nb = reads(readIn, 4) #! originally read `path` mdExporter = MarkdownExporter() #! add `config = config` #! add resources variable, `resources = extractOutputConfig` md = mdExporter.from_notebook_node(nb) return md[0] # recover text from first index ##! additional manual processing here! """
def py2ipynb_default(input, output): with open(input) as f: code = f.read() code += """ # <markdowncell> # If you can read this, reads_py() is no longer broken! """ nbook = v3.reads_py(code) nbook = v4.upgrade(nbook) # Upgrade v3 to v4 cells = nbook.cells newCells = [] newCells.append(new_code_cell(source="!apt install maven")) newCells.append(new_code_cell(source="!pip install pybacting")) for cell in cells: newCells.append(cell) nbook.cells = newCells jsonform = v4.writes(nbook) + "\n" with open(output, "w") as f: f.write(jsonform)
text = fpin.read() # Compute output file path. output_file = input_file output_file = output_file.replace('.py', '.ipynb') # For example/python/foo.py -> example/notebook/examples/foo.ipynb output_file = output_file.replace('examples/python', 'examples/notebook/examples') # For example/contrib/foo.py -> example/notebook/contrib/foo.ipynb output_file = output_file.replace('examples/contrib', 'examples/notebook/contrib') # For ortools/*/samples/foo.py -> example/notebook/*/foo.ipynb output_file = output_file.replace('ortools', 'examples/notebook') output_file = output_file.replace('samples/', '') nbook = v3.reads_py('') nbook = v4.upgrade(nbook) # Upgrade v3 to v4 print('Adding copyright cell...') google = '##### Copyright 2021 Google LLC.' nbook['cells'].append(v4.new_markdown_cell(source=google, id='google')) print('Adding license cell...') apache = '''Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
## Converts pyhton script into a notebook. # See [this post](https://stackoverflow.com/questions/23292242/converting-to-not-from-ipython-notebook-format) on StackOverflow. import argparse argParser = argparse.ArgumentParser() argParser.add_argument('-in', help='Path to input python file.', required=True) argParser.add_argument('-out', help='Path to output jupyter notebook file.', required=True) arguments = vars(argParser.parse_args()) inFile = arguments["in"] outFile = arguments["out"] from nbformat import v3, v4 with open(inFile) as fpin: text = fpin.read() text += "# <markdowncell>\n # If you can read this, reads_py() is no longer broken!\n" nbook = v3.reads_py(text) nbook = v4.upgrade(nbook) # Upgrade v3 to v4 jsonform = v4.writes(nbook) + "\n" with open(outFile, "w") as fpout: fpout.write(jsonform)
from nbformat import v3, v4 import sys content = open(sys.argv[1]).read() outname = sys.argv[1].split('.')[0] + '.ipynb' nb = v3.reads_py(content) nb = v4.upgrade(nb) nb_json = v4.writes(nb) + '\n' outfile = open(outname, 'w') outfile.write(nb_json) outfile.close()
def mainfunction(text): """ Main function. Calls all other functions, depending on whether the macro input is in python or c++. It adds the header information. Also, it adds a cell that draws all canvases. The working text is then converted to a version 3 jupyter notebook, subsequently updated to a version 4. Then, metadata associated with the language the macro is written in is attatched to he notebook. Finally the notebook is executed and output as a Jupyter notebook. """ # Modify text from macros to suit a notebook if isCpp(): main, helpers, rest = split(text) main, argumentsCell = processmain(main) main = cppComments( unindenter(cppFunction(main)) ) # Remove function, Unindent, and convert comments to Markdown cells if argumentsCell: main = argumentsCell + main rest = cppComments( rest) # Convert top level code comments to Markdown cells # Construct text by starting with top level code, then the helper functions, and finally the main function. # Also add cells for headerfile, or keepfunction if needsHeaderFile: text = "# <markdowncell>\n# The header file must be copied to the current directory\n# <codecell>\n.!cp %s%s.h .\n# <codecell>\n" % ( tutRelativePath, tutName) text += rest else: text = "# <codecell>\n" + rest for helper in helpers: text += helper text += ("\n# <codecell>\n" + main) if extension == "py": text = pythonMainFunction(text) text = pythonComments(text) # Convert comments into Markdown cells # Perform last minute fixes to the notebook, used for specific fixes needed by some tutorials text = fixes(text) # Change to standard Markdown newDescription = changeMarkdown(description) # Add the title and header of the notebook text = "# <markdowncell> \n# # %s\n%s# \n# \n# **Author:** %s \n# <i><small>This notebook tutorial was automatically generated " \ "with <a href= \"https://github.com/root-mirror/root/blob/master/documentation/doxygen/converttonotebook.py\">ROOTBOOK-izer (Beta)</a> " \ "from the macro found in the ROOT repository on %s.</small></i>\n# <codecell>\n%s" % (tutTitle, newDescription, author, date, text) # Add cell at the end of the notebook that draws all the canveses. Add a Markdown cell before explaining it. if isJsroot and not nodraw: if isCpp(): text += "\n# <markdowncell> \n# Draw all canvases \n# <codecell>\n%jsroot on\ngROOT->GetListOfCanvases()->Draw()" if extension == "py": text += "\n# <markdowncell> \n# Draw all canvases \n# <codecell>\n%jsroot on\nfrom ROOT import gROOT \ngROOT.GetListOfCanvases().Draw()" elif not nodraw: if isCpp(): text += "\n# <markdowncell> \n# Draw all canvases \n# <codecell>\ngROOT->GetListOfCanvases()->Draw()" if extension == "py": text += "\n# <markdowncell> \n# Draw all canvases \n# <codecell>\nfrom ROOT import gROOT \ngROOT.GetListOfCanvases().Draw()" # Create a notebook from the working text nbook = v3.reads_py(text) nbook = v4.upgrade(nbook) # Upgrade v3 to v4 # Load notebook string into json format, essentially creating a dictionary json_data = json.loads(v4.writes(nbook)) # add the corresponding metadata if extension == "py": json_data[u'metadata'] = { "kernelspec": { "display_name": "Python 2", "language": "python", "name": "python2" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 2 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", "version": "2.7.10" } } elif isCpp(): json_data[u'metadata'] = { "kernelspec": { "display_name": "ROOT C++", "language": "c++", "name": "root" }, "language_info": { "codemirror_mode": "text/x-c++src", "file_extension": ".C", "mimetype": " text/x-c++src", "name": "c++" } } # write the json file with the metadata with open(outPathName, 'w') as fout: json.dump(json_data, fout, indent=1, sort_keys=True) print(time.time() - starttime) timeout = findTimeout() # Call commmand that executes the notebook and creates a new notebook with the output r = subprocess.call([ "jupyter", "nbconvert", "--ExecutePreprocessor.timeout=%d" % timeout, "--to=notebook", "--execute", outPathName ]) if r != 0: sys.stderr.write( "NOTEBOOK_CONVERSION_WARNING: Nbconvert failed for notebook %s with return code %s\n" % (outname, r)) if isJsroot: subprocess.call( ["jupyter", "trust", os.path.join(outdir, outnameconverted)]) if r == 0: # Only remove notebook without output if nbconvert succeedes os.remove(outPathName)
from nbformat import v3, v4 with open("mnist-io.py") as fpin: text = fpin.read() nbook = v3.reads_py(text) nbook = v4.upgrade(nbook) # Upgrade v3 to v4 jsonform = v4.writes(nbook) + "\n" with open("mnist.ipynb", "w") as fpout: fpout.write(jsonform)
def mainfunction(text): """ Main function. Calls all other functions, depending on whether the macro input is in python or c++. It adds the header information. Also, it adds a cell that draws all canvases. The working text is then converted to a version 3 jupyter notebook, subsequently updated to a version 4. Then, metadata associated with the language the macro is written in is attatched to he notebook. Finally the notebook is executed and output as a Jupyter notebook. """ # Modify text from macros to suit a notebook if isCpp(): main, helpers, rest = split(text) main, argumentsCell = processmain(main) main = cppComments(unindenter(cppFunction(main))) # Remove function, Unindent, and convert comments to Markdown cells if argumentsCell: main = argumentsCell + main rest = cppComments(rest) # Convert top level code comments to Markdown cells # Construct text by starting with top level code, then the helper functions, and finally the main function. # Also add cells for headerfile, or keepfunction if needsHeaderFile: text = "# <markdowncell>\n# The header file must be copied to the current directory\n# <codecell>\n.!cp %s%s.h .\n# <codecell>\n" % (tutRelativePath, tutName) text += rest else: text = "# <codecell>\n" + rest for helper in helpers: text += helper text += ("\n# <codecell>\n" + main) if extension == "py": text = pythonMainFunction(text) text = pythonComments(text) # Convert comments into Markdown cells # Perform last minute fixes to the notebook, used for specific fixes needed by some tutorials text = fixes(text) # Change to standard Markdown newDescription = changeMarkdown(description) # Add the title and header of the notebook text = "# <markdowncell> \n# # %s\n%s# \n# \n# **Author:** %s \n# <i><small>This notebook tutorial was automatically generated " \ "with <a href= \"https://github.com/root-mirror/root/blob/master/documentation/doxygen/converttonotebook.py\">ROOTBOOK-izer (Beta)</a> " \ "from the macro found in the ROOT repository on %s.</small></i>\n# <codecell>\n%s" % (tutTitle, newDescription, author, date, text) # Add cell at the end of the notebook that draws all the canveses. Add a Markdown cell before explaining it. if isJsroot and not nodraw: if isCpp(): text += "\n# <markdowncell> \n# Draw all canvases \n# <codecell>\n%jsroot on\ngROOT->GetListOfCanvases()->Draw()" if extension == "py": text += "\n# <markdowncell> \n# Draw all canvases \n# <codecell>\n%jsroot on\nfrom ROOT import gROOT \ngROOT.GetListOfCanvases().Draw()" elif not nodraw: if isCpp(): text += "\n# <markdowncell> \n# Draw all canvases \n# <codecell>\ngROOT->GetListOfCanvases()->Draw()" if extension == "py": text += "\n# <markdowncell> \n# Draw all canvases \n# <codecell>\nfrom ROOT import gROOT \ngROOT.GetListOfCanvases().Draw()" # Create a notebook from the working text nbook = v3.reads_py(text) nbook = v4.upgrade(nbook) # Upgrade v3 to v4 # Load notebook string into json format, essentially creating a dictionary json_data = json.loads(v4.writes(nbook)) # add the corresponding metadata if extension == "py": json_data[u'metadata'] = { "kernelspec": { "display_name": "Python 2", "language": "python", "name": "python2" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 2 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", "version": "2.7.10" } } elif isCpp(): json_data[u'metadata'] = { "kernelspec": { "display_name": "ROOT C++", "language": "c++", "name": "root" }, "language_info": { "codemirror_mode": "text/x-c++src", "file_extension": ".C", "mimetype": " text/x-c++src", "name": "c++" } } # write the json file with the metadata with open(outPathName, 'w') as fout: json.dump(json_data, fout, indent=1, sort_keys=True) print(time.time() - starttime) timeout = findTimeout() # Call commmand that executes the notebook and creates a new notebook with the output r = subprocess.call(["jupyter", "nbconvert", "--ExecutePreprocessor.timeout=%d" % timeout, "--to=notebook", "--execute", outPathName]) if r != 0: sys.stderr.write("NOTEBOOK_CONVERSION_WARNING: Nbconvert failed for notebook %s with return code %s\n" %(outname,r)) if isJsroot: subprocess.call(["jupyter", "trust", os.path.join(outdir, outnameconverted)]) if r == 0: # Only remove notebook without output if nbconvert succeedes os.remove(outPathName)
def mainfunction(text, visualize): """ Main function. Calls all other functions. Also, it adds a cell that draws the result. The working text is then converted to a version 3 jupyter notebook, subsequently updated to a version 4. Then, metadata associated with the language the macro is written in is attatched to he notebook. Finally the notebook is executed and output as a Jupyter notebook. """ # Modify text from macros to suit a notebook main, helpers, headers, rest = split(text) main_macro = CreateMainFunction(Indent(ExtractMainFunction(main))) main, argumentsCell = processmain(main) # Remove function, Unindent, and convert comments to Markdown cells main = Comments(Unindent(ExtractMainFunction(main))) rest = RemoveIncludeGuardsAndNamespace(rest) # Command for loading biodynamo library libloading = 'gSystem->Load("libbiodynamo");\n\n' libloading_macro = '%jsroot on\nR__LOAD_LIBRARY(libbiodynamo)\n\n' # Append "using namespace bdm;" to headers headers += '\nusing namespace bdm;\n' c_macro = headers + libloading_macro + rest + main_macro with open(outPathNameMacro, 'w') as fout: fout.write(c_macro) if argumentsCell: main = argumentsCell + main if visualize: visComment = "# <markdowncell>\n Let's visualize the output!" main += '\n%s\n# <codecell>\nVisualizeInNotebook();\n' % visComment # Convert top level code comments to Markdown cells rest = Comments(rest) # Construct text by starting with top level code, then the helper functions, and finally the main function. # Also add cells for headerfile, or keepfunction text = "# <codecell>\n" + rest for helper in helpers: text += helper text += ("\n# <codecell>\n" + main) # Change to standard Markdown newDescription = changeMarkdown(description) # Horizontal title line hline = '<hr style="border-top-width: 4px; border-top-color: #34609b;">' # Add the title and header of the notebook text = "# <markdowncell> \n# # %s\n%s\n%s# \n# \n# <codecell>\n%s\n# <codecell>\n%s\n# <codecell>\n%s" % ( tutTitle, hline, newDescription, libloading_macro, headers, text) # Create a notebook from the working text nbook = v3.reads_py(text) nbook = v4.upgrade(nbook) # Upgrade v3 to v4 # Load notebook string into json format, essentially creating a dictionary json_data = json.loads(v4.writes(nbook)) # add the corresponding metadata json_data['metadata'] = { "kernelspec": { "display_name": "ROOT C++", "language": "c++", "name": "root" }, "language_info": { "codemirror_mode": "text/x-c++src", "file_extension": ".C", "mimetype": " text/x-c++src", "name": "c++" } } # write the json file with the metadata with open(outPathName, 'w') as fout: json.dump(json_data, fout, indent=1, sort_keys=True) timeout = 60 execute = "--execute" if args.skip: execute = "" # Call commmand that executes the notebook and creates a new notebook with the output nbconvert_cmd = "jupyter nbconvert --ExecutePreprocessor.timeout=%d %s %s" % ( timeout, execute, outPathName) r = subprocess.call([ "jupyter", "nbconvert", "--ExecutePreprocessor.timeout=%d" % timeout, execute, outPathName ]) if r != 0: sys.stderr.write( "NOTEBOOK_CONVERSION_ERROR: nbconvert failed for notebook %s with return code %s\n" % (outname, r)) sys.stderr.write("FAILED COMMAND: %s\n" % nbconvert_cmd) exit(1)
def mainfunction(text): """ Main function. Calls all other functions, depending on whether the macro input is in python or c++. It adds the header information. Also, it adds a cell that draws all canvases. The working text is then converted to a version 3 jupytr notebook, subsequently updated to a version 4. Then, metadata associated with the language the macro is written in is attatched to he notebook. Finally the notebook is exectue and output in both HTML and Jupyter notebook formats. """ ## Modify text from macros to suit a notebook if extension in ("C", "c", "cpp", "C++", "cxx") : main, helpers, rest = split(text) main, addition, keepfunction = processmain(main) if not keepfunction: main = cppcomments(unindenter(cppfunction(main))) # Remove function, Unindent, and convert comments to Markdown cells rest = cppcomments(rest) # Convert top level code comments to Markdown cells ## Construct text by starting wwith top level code, then the helper functions, and finally the main function. text= rest for helper in helpers: text+= "\n# <markdowncell>\n A helper function is created: \n# <codecell>\n%%cpp -d\n" text+=helper text+="\n# <codecell>\n" if keepfunction: text+="%%cpp -d\n" text+=main if addition: text+=addition if extension == "py": text = pythoncomments(text) # Convert comments into Markdown cells ## Add the title and header of the notebook text= "# <markdowncell> \n# # %s\n# %s# \n# This notebook tutorial was automatically generated from the macro found\ in the ROOT repository on %s.\n# **Author:** %s \n# <codecell>\n%s" % (name.title(), description, date, author, text) ## Add cell at the end of the notebook that draws all the canveses. Add a Markdown cell before explaining it. if extension == ("C" or "c" or "cpp" or "c++"): text +="\n# <markdowncell> \n# Draw all canvases \n# <codecell>\ngROOT->GetListOfCanvases()->Draw()" if extension == "py": text +="\n# <markdowncell> \n# Draw all canvases \n# <codecell>\nimport ROOT \ngROOT.GetListOfCanvases().Draw()" ## Create a notebook from the working text nbook = v3.reads_py(text) nbook = v4.upgrade(nbook) # Upgrade v3 to v4 ## Load notebook string into json format, essentially creating a dictionary json_data = json.loads(v4.writes(nbook)) # add the corresponding metadata if extension == "py": json_data[u'metadata']={ "kernelspec": { "display_name": "Python 2", "language": "python", "name": "python2" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 2 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython2", "version": "2.7.10" } } else: json_data[u'metadata']={ "kernelspec": { "display_name": "ROOT C++", "language": "c++", "name": "root" }, "language_info": { "codemirror_mode": "text/x-c++src", "file_extension": ".C", "mimetype": " text/x-c++src", "name": "c++" } } ## write the json file with the metadata with open(outdir+outname, 'w') as f: json.dump(json_data, f, indent=1, sort_keys=True) ## The two commands to create an html version of the notebook and a notebook with the output print time.time() - starttime #subprocess.call(["jupyter", "nbconvert","--ExecutePreprocessor.timeout=60", "--to=html", "--execute", outdir+outname]) subprocess.call(["jupyter", "nbconvert","--ExecutePreprocessor.timeout=60", "--to=notebook", "--execute", outdir+outname]) os.remove(outdir+outname)