def lines_to_notebook(lines, name=None): """ Convert the lines of an m file into an IPython notebook Parameters ---------- lines : list A list of strings. Each element is a line in the m file Returns ------- notebook : an IPython NotebookNode class instance, containing the information required to create a file """ source = [] md = np.empty(len(lines), dtype=object) new_cell = np.empty(len(lines), dtype=object) for idx, l in enumerate(lines): new_cell[idx], md[idx], this_source = format_line(l) # Transitions between markdown and code and vice-versa merit a new # cell, even if no newline, or "%%" is found. Make sure not to do this # check for the very first line! if idx>1 and not new_cell[idx]: if md[idx] != md[idx-1]: new_cell[idx] = True source.append(this_source) # This defines the breaking points between cells: new_cell_idx = np.hstack([np.where(new_cell)[0], -1]) # Listify the sources: cell_source = [source[new_cell_idx[i]:new_cell_idx[i+1]] for i in range(len(new_cell_idx)-1)] cell_md = [md[new_cell_idx[i]] for i in range(len(new_cell_idx)-1)] cells = [] # Append the notebook with loading matlab magic extension notebook_head = "import pymatbridge as pymat\n" + "ip = get_ipython()\n" \ + "pymat.load_ipython_extension(ip)" cells.append(nbformat.new_code_cell(notebook_head))#, language='python')) for cell_idx, cell_s in enumerate(cell_source): if cell_md[cell_idx]: cells.append(nbformat.new_markdown_cell(cell_s)) else: cell_s.insert(0, '%%matlab\n') cells.append(nbformat.new_code_cell(cell_s))#, language='matlab')) #ws = nbformat.new_worksheet(cells=cells) notebook = nbformat.new_notebook(cells=cells) return notebook
def _create_code_cell(): source = """print("something") ### BEGIN SOLUTION print("hello") ### END SOLUTION""" cell = new_code_cell(source=source) return cell
def build_notebook(self): """Build a reveal slides notebook in memory for use with tests. Overrides base in PreprocessorTestsBase""" outputs = [nbformat.new_output(output_type="stream", name="stdout", text="a")] slide_metadata = {'slideshow' : {'slide_type': 'slide'}} subslide_metadata = {'slideshow' : {'slide_type': 'subslide'}} cells=[nbformat.new_code_cell(source="", execution_count=1, outputs=outputs), nbformat.new_markdown_cell(source="", metadata=slide_metadata), nbformat.new_code_cell(source="", execution_count=2, outputs=outputs), nbformat.new_markdown_cell(source="", metadata=slide_metadata), nbformat.new_markdown_cell(source="", metadata=subslide_metadata)] return nbformat.new_notebook(cells=cells)
def build_notebook(self): """Build a reveal slides notebook in memory for use with tests. Overrides base in PreprocessorTestsBase""" outputs = [nbformat.new_output(output_type='display_data', data={'image/svg+xml':self.simple_svg}) ] cells=[nbformat.new_code_cell(source="", execution_count=1, outputs=outputs)] return nbformat.new_notebook(cells=cells)
def test_javascript_output(self): nb = v4.new_notebook(cells=[ v4.new_code_cell(outputs=[ v4.new_output( output_type='display_data', data={'application/javascript': "javascript_output();"}) ]) ]) (output, resources) = HTMLExporter( template_file='basic').from_notebook_node(nb) self.assertIn('javascript_output', output)
def build_notebook(self): """Build a reveal slides notebook in memory for use with tests. Overrides base in PreprocessorTestsBase""" outputs = [nbformat.new_output(output_type="image/svg+xml", output_svg=self.simple_svg)] slide_metadata = {'slideshow' : {'slide_type': 'slide'}} subslide_metadata = {'slideshow' : {'slide_type': 'subslide'}} cells=[nbformat.new_code_cell(source="", execution_count=1, outputs=outputs)] return nbformat.new_notebook(cells=cells)
def flush_cell(self): if len(self._code_lines) > 0 and len(self._text_lines) > 0: raise Exception('only text or only code can be flushed') if len(self._code_lines) == 0 and len(self._text_lines) == 0: raise Exception('nothing to flush') if len(self._code_lines) > 0: self.cells.append(nbf.new_code_cell('\n'.join(self._code_lines))) else: self.cells.append(nbf.new_markdown_cell('\n'.join(self._text_lines))) self._code_lines = [] self._text_lines = []
def _create_solution_cell(source, cell_type): if cell_type == "markdown": cell = new_markdown_cell(source=source) elif cell_type == "code": cell = new_code_cell(source=source) else: raise ValueError("invalid cell type: {}".format(cell_type)) cell.metadata.nbgrader = {} cell.metadata.nbgrader["solution"] = True cell.metadata.nbgrader["checksum"] = compute_checksum(cell) return cell
def create_locked_cell(source, cell_type, grade_id): if cell_type == "markdown": cell = new_markdown_cell(source=source) elif cell_type == "code": cell = new_code_cell(source=source) else: raise ValueError("invalid cell type: {}".format(cell_type)) cell.metadata.nbgrader = {} cell.metadata.nbgrader["locked"] = True cell.metadata.nbgrader["grade_id"] = grade_id return cell
def test_run_nb(self): """Test %run notebook.ipynb""" from IPython.nbformat import v4, writes nb = v4.new_notebook( cells=[ v4.new_markdown_cell("The Ultimate Question of Everything"), v4.new_code_cell("answer=42") ] ) src = writes(nb, version=4) self.mktmp(src, ext='.ipynb') _ip.magic("run %s" % self.fname) nt.assert_equal(_ip.user_ns['answer'], 42)
def test_coalesce_replace_streams(self): """Are \\r characters handled?""" outputs = [nbformat.new_output(output_type="stream", name="stdout", text="z"), nbformat.new_output(output_type="stream", name="stdout", text="\ra"), nbformat.new_output(output_type="stream", name="stdout", text="\nz\rb"), nbformat.new_output(output_type="stream", name="stdout", text="\nz"), nbformat.new_output(output_type="stream", name="stdout", text="\rc\n"), nbformat.new_output(output_type="stream", name="stdout", text="z\rz\rd")] cells=[nbformat.new_code_cell(source="# None", execution_count=1,outputs=outputs)] nb = nbformat.new_notebook(cells=cells) res = self.build_resources() nb, res = coalesce_streams(nb, res) outputs = nb.cells[0].outputs self.assertEqual(outputs[0].text, u'a\nb\nc\nd')
def create_grade_and_solution_cell(source, cell_type, grade_id, points): if cell_type == "markdown": cell = new_markdown_cell(source=source) elif cell_type == "code": cell = new_code_cell(source=source) else: raise ValueError("invalid cell type: {}".format(cell_type)) cell.metadata.nbgrader = {} cell.metadata.nbgrader["solution"] = True cell.metadata.nbgrader["grade"] = True cell.metadata.nbgrader["grade_id"] = grade_id cell.metadata.nbgrader["points"] = points return cell
def test_empty_code_cell(self): """No empty code cells in rst""" nbname = self._get_notebook() with io.open(nbname, encoding='utf8') as f: nb = nbformat.read(f, 4) exporter = self.exporter_class() (output, resources) = exporter.from_notebook_node(nb) # add an empty code cell nb.cells.append( v4.new_code_cell(source="") ) (output2, resources) = exporter.from_notebook_node(nb) # adding an empty code cell shouldn't change output self.assertEqual(output.strip(), output2.strip())
def test_coalesce_sequenced_streams(self): """Can the coalesce streams preprocessor merge a sequence of streams?""" outputs = [nbformat.new_output(output_type="stream", name="stdout", text="0"), nbformat.new_output(output_type="stream", name="stdout", text="1"), nbformat.new_output(output_type="stream", name="stdout", text="2"), nbformat.new_output(output_type="stream", name="stdout", text="3"), nbformat.new_output(output_type="stream", name="stdout", text="4"), nbformat.new_output(output_type="stream", name="stdout", text="5"), nbformat.new_output(output_type="stream", name="stdout", text="6"), nbformat.new_output(output_type="stream", name="stdout", text="7")] cells=[nbformat.new_code_cell(source="# None", execution_count=1,outputs=outputs)] nb = nbformat.new_notebook(cells=cells) res = self.build_resources() nb, res = coalesce_streams(nb, res) outputs = nb.cells[0].outputs self.assertEqual(outputs[0].text, u'01234567')
def write(cells): """Turn cells list into valid IPython notebook code.""" # Use IPython.nbformat functionality for writing the notebook from IPython.nbformat.v4 import ( new_code_cell, new_markdown_cell, new_notebook) nb_cells = [] for cell_tp, language, block in cells: if cell_tp == 'markdown': nb_cells.append(new_markdown_cell(source=block)) elif cell_tp == 'codecell': nb_cells.append(new_code_cell(source=block)) nb = new_notebook(cells=nb_cells) from IPython.nbformat import writes filestr = writes(nb, version=4) return filestr
def build_notebook(self): """Build a notebook in memory for use with preprocessor tests""" outputs = [ nbformat.new_output("stream", name="stdout", text="a"), nbformat.new_output("display_data", data={'text/plain': 'b'}), nbformat.new_output("stream", name="stdout", text="c"), nbformat.new_output("stream", name="stdout", text="d"), nbformat.new_output("stream", name="stderr", text="e"), nbformat.new_output("stream", name="stderr", text="f"), nbformat.new_output("display_data", data={'image/png': 'Zw=='}), # g nbformat.new_output("display_data", data={'application/pdf': 'aA=='}), # h ] cells=[nbformat.new_code_cell(source="$ e $", execution_count=1, outputs=outputs), nbformat.new_markdown_cell(source="$ e $")] return nbformat.new_notebook(cells=cells)
def test_contents_manager(self): "make sure ContentsManager returns right files (ipynb, bin, txt)." nbdir = self.notebook_dir.name base = self.base_url() nb = new_notebook( cells=[ new_markdown_cell(u'Created by test ³'), new_code_cell("print(2*6)", outputs=[ new_output("stream", text="12"), ]) ] ) with io.open(pjoin(nbdir, 'testnb.ipynb'), 'w', encoding='utf-8') as f: write(nb, f, version=4) with io.open(pjoin(nbdir, 'test.bin'), 'wb') as f: f.write(b'\xff' + os.urandom(5)) f.close() with io.open(pjoin(nbdir, 'test.txt'), 'w') as f: f.write(u'foobar') f.close() r = requests.get(url_path_join(base, 'files', 'testnb.ipynb')) self.assertEqual(r.status_code, 200) self.assertIn('print(2*6)', r.text) json.loads(r.text) r = requests.get(url_path_join(base, 'files', 'test.bin')) self.assertEqual(r.status_code, 200) self.assertEqual(r.headers['content-type'], 'application/octet-stream') self.assertEqual(r.content[:1], b'\xff') self.assertEqual(len(r.content), 6) r = requests.get(url_path_join(base, 'files', 'test.txt')) self.assertEqual(r.status_code, 200) self.assertEqual(r.headers['content-type'], 'text/plain') self.assertEqual(r.text, 'foobar')
def setUp(self): nbdir = self.notebook_dir.name if not os.path.isdir(pjoin(nbdir, "foo")): os.mkdir(pjoin(nbdir, "foo")) nb = new_notebook() nb.cells.append(new_markdown_cell(u"Created by test ³")) cc1 = new_code_cell(source=u"print(2*6)") cc1.outputs.append(new_output(output_type="stream", text=u"12")) cc1.outputs.append( new_output(output_type="execute_result", data={"image/png": png_green_pixel}, execution_count=1) ) nb.cells.append(cc1) with io.open(pjoin(nbdir, "foo", "testnb.ipynb"), "w", encoding="utf-8") as f: write(nb, f, version=4) self.nbconvert_api = NbconvertAPI(self.base_url())
def setUp(self): nbdir = self.notebook_dir.name if not os.path.isdir(pjoin(nbdir, 'foo')): os.mkdir(pjoin(nbdir, 'foo')) nb = new_notebook() nb.cells.append(new_markdown_cell(u'Created by test ³')) cc1 = new_code_cell(source=u'print(2*6)') cc1.outputs.append(new_output(output_type="stream", text=u'12')) cc1.outputs.append(new_output(output_type="execute_result", data={'image/png': png_green_pixel}, execution_count=1, )) nb.cells.append(cc1) with io.open(pjoin(nbdir, 'foo', 'testnb.ipynb'), 'w', encoding='utf-8') as f: write(nb, f, version=4) self.nbconvert_api = NbconvertAPI(self.base_url())
def notebook(self, s): """Export and convert IPython notebooks. This function can export the current IPython history to a notebook file. For example, to export the history to "foo.ipynb" do "%notebook -e foo.ipynb". To export the history to "foo.py" do "%notebook -e foo.py". """ args = magic_arguments.parse_argstring(self.notebook, s) from IPython.nbformat import write, v4 args.filename = unquote_filename(args.filename) if args.export: cells = [] hist = list(self.shell.history_manager.get_range()) for session, execution_count, input in hist[:-1]: cells.append(v4.new_code_cell( execution_count=execution_count, source=source )) nb = v4.new_notebook(cells=cells) with io.open(args.filename, 'w', encoding='utf-8') as f: write(nb, f, version=4)
def test_coalesce_sequenced_streams(self): """Can the coalesce streams preprocessor merge a sequence of streams?""" outputs = [ nbformat.new_output(output_type="stream", name="stdout", text="0"), nbformat.new_output(output_type="stream", name="stdout", text="1"), nbformat.new_output(output_type="stream", name="stdout", text="2"), nbformat.new_output(output_type="stream", name="stdout", text="3"), nbformat.new_output(output_type="stream", name="stdout", text="4"), nbformat.new_output(output_type="stream", name="stdout", text="5"), nbformat.new_output(output_type="stream", name="stdout", text="6"), nbformat.new_output(output_type="stream", name="stdout", text="7") ] cells = [ nbformat.new_code_cell(source="# None", execution_count=1, outputs=outputs) ] nb = nbformat.new_notebook(cells=cells) res = self.build_resources() nb, res = coalesce_streams(nb, res) outputs = nb.cells[0].outputs self.assertEqual(outputs[0].text, u'01234567')
def notebook(self, s): """Export and convert IPython notebooks. This function can export the current IPython history to a notebook file. For example, to export the history to "foo.ipynb" do "%notebook -e foo.ipynb". To export the history to "foo.py" do "%notebook -e foo.py". """ args = magic_arguments.parse_argstring(self.notebook, s) from IPython.nbformat import write, v4 args.filename = unquote_filename(args.filename) if args.export: cells = [] hist = list(self.shell.history_manager.get_range()) if (len(hist) <= 1): raise ValueError('History is empty, cannot export') for session, execution_count, source in hist[:-1]: cells.append( v4.new_code_cell(execution_count=execution_count, source=source)) nb = v4.new_notebook(cells=cells) with io.open(args.filename, 'w', encoding='utf-8') as f: write(nb, f, version=4)
def build_notebook(self): """Build a notebook in memory for use with preprocessor tests""" outputs = [ nbformat.new_output("stream", name="stdout", text="a"), nbformat.new_output("display_data", data={'text/plain': 'b'}), nbformat.new_output("stream", name="stdout", text="c"), nbformat.new_output("stream", name="stdout", text="d"), nbformat.new_output("stream", name="stderr", text="e"), nbformat.new_output("stream", name="stderr", text="f"), nbformat.new_output("display_data", data={'image/png': 'Zw=='}), # g nbformat.new_output("display_data", data={'application/pdf': 'aA=='}), # h ] cells = [ nbformat.new_code_cell(source="$ e $", execution_count=1, outputs=outputs), nbformat.new_markdown_cell(source="$ e $") ] return nbformat.new_notebook(cells=cells)
def add_code_cell(self, nb): output = nbformat.new_output( "display_data", {'application/javascript': "alert('hi');"}) cell = nbformat.new_code_cell("print('hi')", outputs=[output]) nb.cells.append(cell)
def add_code_cell(self, nb): output = nbformat.new_output("display_data", {'application/javascript': "alert('hi');"}) cell = nbformat.new_code_cell("print('hi')", outputs=[output]) nb.cells.append(cell)
import base64, os, re, sys import IPython.nbformat.v4 as nbf filename = os.path.splitext(sys.argv[1])[0] try: title, description = open("{}.txt".format(filename), encoding="utf-8").read().split('\n\n', 1) except IOError: title, description = filename, "" description = description.replace("...", "").replace("'''", "**").replace("''", "*") bendpattern = re.compile("^!+", re.MULTILINE) bendcode = '<img src="http://pyx.sourceforge.net/bend.png" align="left">' description = re.sub(bendpattern, lambda m: bendcode*(m.end()-m.start()), description) code = open("{}.py".format(filename), encoding="utf-8").read() code = re.sub('\.writeEPSfile\(("[a-z]+")?\)\n.*writePDFfile\(("[a-z]+")?\)\n.*writeSVGfile\(("[a-z]+")?\)\n', "", code) nb = nbf.new_notebook() cells = [] cells.append(nbf.new_markdown_cell(source="# " + title)) cells.append(nbf.new_code_cell(source=code, execution_count=1, outputs=[nbf.new_output(output_type=u'execute_result', execution_count=1, data={'image/png': base64.encodebytes(open("{}.png".format(filename), "rb").read()).decode("ascii"), 'image/svg+xml': open("{}.svg".format(filename), "r", encoding="utf-8").read()})])) cells.append(nbf.new_markdown_cell(source=description)) nb = nbf.new_notebook(cells=cells, metadata={'language': 'python'}) open("{}.ipynb".format(filename), "w").write(nbf.writes(nb))
description) code = open("{}.py".format(filename), encoding="utf-8").read() code = re.sub( '\.writeEPSfile\(("[a-z]+")?\)\n.*writePDFfile\(("[a-z]+")?\)\n.*writeSVGfile\(("[a-z]+")?\)\n', "", code) nb = nbf.new_notebook() cells = [] cells.append(nbf.new_markdown_cell(source="# " + title)) cells.append( nbf.new_code_cell(source=code, execution_count=1, outputs=[ nbf.new_output( output_type=u'execute_result', execution_count=1, data={ 'image/png': base64.encodebytes( open("{}.png".format(filename), "rb").read()).decode("ascii"), 'image/svg+xml': open("{}.svg".format(filename), "r", encoding="utf-8").read() }) ])) cells.append(nbf.new_markdown_cell(source=description)) nb = nbf.new_notebook(cells=cells, metadata={'language': 'python'}) open("{}.ipynb".format(filename), "w").write(nbf.writes(nb))
def post(self, convert=False): # Check if this is a convert operation if convert: # Get JSON payload json_data = tornado.escape.json_decode(self.request.body) # Go through all the assignments / bundles in the project payload for x in json_data['bundles']: # Create a notebook version4 object nb = nbfv4.new_notebook() # Create the task description task_description = '## Task \n' + x['description'] + '\n'\ '___ \n' \ '#### '+ x['owner'] + '\n' \ '___ \n' # Create the general description string common_description ='#### Temporary folder \n' \ 'Set your working dir to following folder '+ json_data['gid']+'. Upload your csv/data files into '\ 'this directoy to use them.<br/>'\ '`ftp://pycard.ifi.uzh.ch/data/'+json_data['gid']+'`'\ '<br/><br/>'\ 'Use with R Kernel <br/>' \ '`setwd("./'+ json_data['gid']+'")` <br/><br/>' \ 'Use with Python Kernel <br/> ' \ '`import os` <br/>' \ '`os.chdir("./'+ json_data['gid']+'")` \n' \ '___ \n' \ '#### Notes board \n' \ 'In order to avoid conflicts between notebooks and have a clean transition from one step to another, use the shared notes file ' \ 'shared.txt . The contents of the file will be loaded and made in every notebook, so it is a good place to register variable names used in the different steps, or to provide feedback after each iteration. <br/><br/>' # Add the task_description as a markdown cell heading = nbfv4.new_markdown_cell(task_description) # Set the task_description as read-only heading['metadata'].run_control = dict() heading['metadata'].run_control['read_only'] = True # Append cell to notebook nb['cells'].append(heading) # Add the common description cell as a markdown cell common = nbfv4.new_markdown_cell(common_description) # Set the common description cell as read only common['metadata'].run_control = dict() common['metadata']['common'] = True common['metadata'].run_control['read_only'] = True # Add the cell to the notebook nb['cells'].append(common) # Create a markdown cell for the note board, set the variable_cell metadata to true variablesh = nbfv4.new_markdown_cell() variablesh['metadata']['variable_cell'] = True nb['cells'].append(variablesh) # Set the notebook kernel in metadata nb['metadata']['language'] = json_data['kernel'] # Set cell toolbar to Side Comments in metadata nb['metadata']['celltoolbar'] = "Side Comments" # Set project ID in metadata nb['metadata']['pgid'] = json_data['gid'] # Set id of notes board for this project (shared_notes.txt file) nb['metadata']['variablesid'] = json_data['variablesid'] # Set Google ID for this notebook nb['metadata']['id'] = x['gid'] # Set the worker assigned to this task nb['metadata']['bundle-owner'] = x['owner'] # Go through all the actions in the assignment for a in x['actions']: # Create action description text text = '#### This is the description of the actions that need to be implemented.' \ '\n' \ '### ' + a['name'] + '\n' \ 'Description: ' + a['description'] + '<br>' \ 'Input: ' + a['input'] + '<br>' \ 'Output: ' + a['output'] code = "# Enter implementation here." # Add the description cell as a markdown cell, set it read-only desc = nbfv4.new_markdown_cell(text) desc['metadata'].run_control = dict() desc['metadata'].run_control['read_only'] = True nb['cells'].append(desc) # Create a cell for logging the work log_text = '# Log and description \n' \ 'Please record here all information needed to reproduce and understand your work: \n' \ '- Algorithms used\n' \ '- Things tried but discarded\n' \ '- Explanation **why** you have solved the problem the way you solved it.\n' # Add the description cell as a markdown cell, set it read-only log_desc = nbfv4.new_markdown_cell(log_text) nb['cells'].append(log_desc) # Create the cell code for this action code_cell = nbfv4.new_code_cell(code) code_cell['metadata'].side_comments = dict() # Create the cell code for this action, set the section id needed # for the SideComments extension as metadata code_cell['metadata'].side_comments['id'] = ''.join( random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(20)) logger.info(code_cell['metadata']) # Add cell to notebook nb['cells'].append(code_cell) # Add the contents of the created notebook a the bundle {notebook} property in the project payload x['notebook'] = nbf.writes(nb, version=4) json_data['variables'] = html2text.html2text( json_data['variables']) # Send the answer back to client self.write(json.dumps(json_data, cls=Encoder)) self.set_status(201) else: # Get the project payload from the client json_data = tornado.escape.json_decode(self.request.body) # Add the project to the database ret = db.addProject(json_data) if ret['ok'] == 1.0: # If project was added, create temporary work folder for the project # which can be accessed through ftp if not os.path.exists('/ftp/ipython/data/' + json_data['gid']): os.makedirs('/ftp/ipython/data/' + json_data['gid']) os.chmod('/ftp/ipython/data/' + json_data['gid'], 0o755) self.set_status(201)
def post(self, convert=False): # Check if this is a convert operation if convert: # Get JSON payload json_data = tornado.escape.json_decode(self.request.body) # Go through all the assignments / bundles in the project payload for x in json_data['bundles']: # Create a notebook version4 object nb = nbfv4.new_notebook() # Create the task description task_description = '## Task \n' + x['description'] + '\n'\ '___ \n' \ '#### '+ x['owner'] + '\n' \ '___ \n' # Create the general description string common_description ='#### Temporary folder \n' \ 'Set your working dir to following folder '+ json_data['gid']+'. Upload your csv/data files into '\ 'this directoy to use them.<br/>'\ '`ftp://pycard.ifi.uzh.ch/data/'+json_data['gid']+'`'\ '<br/><br/>'\ 'Use with R Kernel <br/>' \ '`setwd("./'+ json_data['gid']+'")` <br/><br/>' \ 'Use with Python Kernel <br/> ' \ '`import os` <br/>' \ '`os.chdir("./'+ json_data['gid']+'")` \n' \ '___ \n' \ '#### Notes board \n' \ 'In order to avoid conflicts between notebooks and have a clean transition from one step to another, use the shared notes file ' \ 'shared.txt . The contents of the file will be loaded and made in every notebook, so it is a good place to register variable names used in the different steps, or to provide feedback after each iteration. <br/><br/>' # Add the task_description as a markdown cell heading = nbfv4.new_markdown_cell(task_description) # Set the task_description as read-only heading['metadata'].run_control = dict() heading['metadata'].run_control['read_only'] = True # Append cell to notebook nb['cells'].append(heading) # Add the common description cell as a markdown cell common = nbfv4.new_markdown_cell(common_description) # Set the common description cell as read only common['metadata'].run_control = dict() common['metadata']['common'] = True common['metadata'].run_control['read_only'] = True # Add the cell to the notebook nb['cells'].append(common) # Create a markdown cell for the note board, set the variable_cell metadata to true variablesh = nbfv4.new_markdown_cell() variablesh['metadata']['variable_cell'] = True nb['cells'].append(variablesh) # Set the notebook kernel in metadata nb['metadata']['language'] = json_data['kernel'] # Set cell toolbar to Side Comments in metadata nb['metadata']['celltoolbar'] = "Side Comments" # Set project ID in metadata nb['metadata']['pgid'] = json_data['gid'] # Set id of notes board for this project (shared_notes.txt file) nb['metadata']['variablesid'] = json_data['variablesid'] # Set Google ID for this notebook nb['metadata']['id'] = x['gid'] # Go through all the actions in the assignment for a in x['actions']: # Create action description text text = '#### This is the description of the actions that need to be implemented.' \ '\n' \ '### ' + a['name'] + '\n' \ 'Description: ' + a['description'] + '<br>' \ 'Input: ' + a['input'] + '<br>' \ 'Output: ' + a['output'] code = "# Enter implementation here." # Add the description cell as a markdown cell, set it read-only desc = nbfv4.new_markdown_cell(text) desc['metadata'].run_control = dict() desc['metadata'].run_control['read_only'] = True nb['cells'].append(desc) # Create the cell code for this action code_cell = nbfv4.new_code_cell(code) code_cell['metadata'].side_comments = dict() # Create the cell code for this action, set the section id needed # for the SideComments extension as metadata code_cell['metadata'].side_comments['id'] = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(20)) logger.info(code_cell['metadata']) # Add cell to notebook nb['cells'].append(code_cell) # Add the contents of the created notebook a the bundle {notebook} property in the project payload x['notebook'] = nbf.writes(nb,version=4) json_data['variables'] = html2text.html2text(json_data['variables']) # Send the answer back to client self.write(json.dumps(json_data, cls=Encoder)) self.set_status(201) else: # Get the project payload from the client json_data = tornado.escape.json_decode(self.request.body) # Add the project to the database ret = db.addProject(json_data) if ret['ok'] == 1.0: # If project was added, create temporary work folder for the project # which can be accessed through ftp if not os.path.exists('/ftp/ipython/data/' + json_data['gid']): os.makedirs('/ftp/ipython/data/' + json_data['gid']) os.chmod('/ftp/ipython/data/' + json_data['gid'], 0o755) self.set_status(201)