def __init__(self, *args, **kwargs): """ Overwrites the extra loaders to get the right template. """ filename = os.path.join(os.path.dirname(__file__), 'rst_modified.tpl') with open(filename, 'r', encoding='utf-8') as f: content = f.read() dl = DictLoader({'rst_modified.tpl': content}) kwargs['extra_loaders'] = [dl] RSTExporter.__init__(self, *args, **kwargs)
def convert(self, remove_executed=False): """ Convert the executed notebook to a restructured text (RST) file. Parameters ---------- delete_executed : bool, optional Controls whether to remove the executed notebook or not. """ if not path.exists(self._executed_nb_path): raise IOError( "Executed notebook file doesn't exist! Expected: {0}".format( self._executed_nb_path)) if path.exists(self._rst_path) and not self.overwrite: logger.debug( "RST version of notebook already exists at {0}. Use " "overwrite=True or --overwrite (at cmd line) to re-run".format( self._rst_path)) return self._rst_path # Initialize the resources dict - see: # https://github.com/jupyter/nbconvert/blob/master/nbconvert/nbconvertapp.py#L327 resources = {} resources['config_dir'] = '' # we don't need to specify config resources['unique_key'] = self.nb_name # path to store extra files, like plots generated resources['output_files_dir'] = 'nboutput' # Exports the notebook to RST logger.debug('Exporting notebook to RST...') exporter = RSTExporter() if self.template_file: exporter.template_file = self.template_file output, resources = exporter.from_filename(self._executed_nb_path, resources=resources) # Write the output RST file writer = FilesWriter() output_file_path = writer.write(output, resources, notebook_name=self.nb_name) if remove_executed: # optionally, clean up the executed notebook file remove(self._executed_nb_path) return output_file_path
def remove_solutions(nb, nb_name): """Convert solution cells to markdown; embed images from Python output.""" # -- Extract image data from the cell outputs c = Config() template = (f"../static/{nb_name}" "_Solution_{cell_index}_{index}{extension}") c.ExtractOutputPreprocessor.output_filename_template = template # Note: using the RST exporter means we need to install pandoc as a dep # in the github workflow, which adds a little bit of latency, and we don't # actually care about the RST output. It's just a convenient way to get the # image resources the way we want them. exporter = RSTExporter() extractor = ExtractOutputPreprocessor(config=c) exporter.register_preprocessor(extractor, True) _, resources = exporter.from_notebook_node(nb) # -- Convert solution cells to markdown with embedded image nb_cells = nb.get("cells", []) outputs = resources["outputs"] solution_resources = {} for i, cell in enumerate(nb_cells): cell_text = cell["source"].replace(" ", "").lower() if cell_text.startswith("#@titlesolution"): # Just remove solution cells that generate no outputs if not cell["outputs"]: nb_cells.remove(cell) continue # Filter the resources for solution images image_paths = [k for k in outputs if f"Solution_{i}" in k] solution_resources.update({k: outputs[k] for k in image_paths}) # Conver the solution cell to markdown, strip the source, # and embed the image as a link to static resource new_source = "**Example output:**\n\n" + "\n\n".join( [f"<img src='{f}' align='left'>" for f in image_paths]) cell["source"] = new_source cell["cell_type"] = "markdown" del cell["outputs"] del cell["execution_count"] return nb, solution_resources
def _convert(self, tmpdir: Path, entry: Path, outdir: Path, depth: int): """Convert a notebook. Args: tmpdir: Temporary working directory entry: notebook to convert outdir: output directory for .html and .rst files depth: depth below root, for fixing image paths """ test_mode = self.s.get("test_mode") # strip special cells. if self._has_tagged_cells(entry, set(self._cell_tags.values())): _log.debug(f"notebook '{entry.name}' has test cell(s)") orig_entry, entry = entry, self._strip_tagged_cells( tmpdir, entry, ("remove", "exercise"), "testing") notify(f"Stripped tags from: {orig_entry.name}", 3) else: # copy to temporary directory just to protect from output cruft tmp_entry = tmpdir / entry.name shutil.copy(entry, tmp_entry) orig_entry, entry = entry, tmp_entry # convert all tag-stripped versions of the notebook. # before running, check if converted result is newer than source file if self._already_converted(orig_entry, entry, outdir): notify( f"Skip notebook conversion, output is newer, for: {entry.name}", 3) self._results.cached.append(entry) return notify(f"Running notebook: {entry.name}", 3) nb = self._parse_and_execute(entry) if test_mode: # don't do conversion in test mode return notify(f"Exporting notebook '{entry.name}' to directory {outdir}", 3) wrt = FilesWriter() # export each notebook into multiple target formats created_wrapper = False for (exp, postprocess_func, pp_args) in ( (RSTExporter(), self._postprocess_rst, ()), (HTMLExporter(), self._postprocess_html, (depth, )), ): _log.debug( f"export '{orig_entry}' with {exp} to notebook '{entry}'") (body, resources) = exp.from_notebook_node(nb) body = postprocess_func(body, *pp_args) wrt.build_directory = str(outdir) wrt.write(body, resources, notebook_name=entry.stem) # create a 'wrapper' page if not created_wrapper: _log.debug( f"create wrapper page for '{entry.name}' in '{outdir}'") self._create_notebook_wrapper_page(entry.stem, outdir) created_wrapper = True # move notebooks into docs directory _log.debug(f"move notebook '{entry} to output directory: {outdir}") shutil.copy(entry, outdir / entry.name)
def convert(self): """ Converts the executed notebook to a restructured text (RST) file. Returns ------- output_file_path : str`` The path to the converted notebook """ # Only convert if executed notebook exists if not os.path.exists(self.executed_nb_path): raise IOError( "Executed notebook file doesn't exist! Expected: {0}".format( self.executed_nb_path)) # Initialize the resources dict resources = dict() resources['unique_key'] = self.nb_name # path to store extra files, like plots generated resources['output_files_dir'] = 'nboutput/' # Exports the notebook to RST _logger.info("Exporting executed notebook to RST format") exporter = RSTExporter() # If a RST template file has been specified use this template if self.rst_template: exporter.template_file = self.rst_template output, resources = exporter.from_filename(self.executed_nb_path, resources=resources) # Write the output RST file writer = FilesWriter() output_file_path = writer.write(output, resources, notebook_name=self.nb_name) return output_file_path
def default_filters(self): """ Overrides in subclasses to provide extra filters. This should return an iterable of 2-tuples: (name, class-or-function). You should call the method on the parent class and include the filters it provides. If a name is repeated, the last filter provided wins. Filters from user-supplied config win over filters provided by classes. """ for k, v in RSTExporter.default_filters(self): yield (k, v) yield ('convert_pandoc_rst', convert_pandoc_rst) yield ('process_raw_html', process_raw_html)
def convert( srcdir: pathlib.Path = None, outdir: pathlib.Path = None, htmldir: pathlib.Path = None, options: dict = None, ): """Convert notebooks under `srcdir`, placing output in `outdir`. Args: srcdir: Input directory outdir: Output directory htmldir: Where HTML files will end up (Sphinx build directory) options: Options controlling the conversion: * "continue": if true, continue if there is an error executing the notebook; if false, raise NotebookError * "kernel": name of kernel for notebook execution, ie conda env name * "format": output format, either "html" or "rst" * "pat": if not None, a `re` expression that must match notebook filenames, or they will be skipped Raises: NotebookError: error executing or parsing the notebook Returns: None """ ep_kw = {} if options["kernel"]: ep_kw["kernel_name"] = options["kernel"] ep = ExecutePreprocessor(timeout=600, **ep_kw) if options["format"] == "html": exp = HTMLExporter() elif options["format"] == "rst": exp = RSTExporter() else: raise ValueError(f"Invalid output format: {options['fmt']}") wrt = FilesWriter() _convert(srcdir, outdir, htmldir, wrt, exp, ep, options)
def notebooks_to_rst(app): from glob import glob try: # post "big-split", nbconvert is a separate namespace from nbconvert.nbconvertapp import NbConvertApp from nbconvert.writers import FilesWriter from nbconvert.preprocessors import Preprocessor, ExecutePreprocessor, execute from nbconvert.exporters import RSTExporter from nbformat import NotebookNode except ImportError: try: from IPython.nbconvert.nbconvertapp import NbConvertApp from IPython.nbconvert.writers import FilesWriter from IPython.nbconvert.preprocessors import Preprocessor, ExecutePreprocessor, execute from IPython.nbconvert.exporters import RSTExporter from IPython.nbformat import NotebookNode except ImportError: raise ImportError( "Failed to find Jupyter or IPython. Cannot build " "the notebooks embedded in the docs. Proceeding " "the rest of the doc build, but additional " "warnings are likely." ) return class OrphanizerWriter(FilesWriter): def write(self, output, resources, **kwargs): output = ":orphan:\n\n" + output FilesWriter.write(self, output, resources, **kwargs) class AddSysPath(Preprocessor): """ Adds the local system path to the top of the notebook. This makes sure when build_sphinx is invoked that the notebook actually runs with the current build. """ def preprocess(self, nb, resources): syspathstr = "sys.path = {} + sys.path".format(str(sys.path)) cell = { "cell_type": "code", "execution_count": None, "metadata": {}, "outputs": [], "source": "import sys\n" + syspathstr, } nb.cells.insert(0, NotebookNode(cell)) return nb, resources class RemoveSysPath(Preprocessor): """ Removes the sys.path cell added by AddSysPath """ def preprocess(self, nb, resources): if "sys.path" in nb.cells[0].source: del nb.cells[0] return nb, resources class MonkeypatchCellExecutionError(execute.CellExecutionError): def __str__(self): sstr = super(MonkeypatchCellExecutionError, self).__str__() return sstr + " Traceback:\n" + str(self.traceback) execute.CellExecutionError = MonkeypatchCellExecutionError olddir = os.path.abspath(os.curdir) try: srcdir = os.path.abspath(os.path.split(__file__)[0]) if os.path.isdir("notebooks"): os.chdir(os.path.join(srcdir, "notebooks")) nbs = glob("*.ipynb") app.info("Executing and converting these notebooks to sphinx files: " + str(nbs)) nbc_app = NbConvertApp() nbc_app.initialize(argv=[]) nbc_app.writer = OrphanizerWriter() nbc_app.export_format = "rst" pps = RSTExporter().default_preprocessors pps.insert(0, AddSysPath) pps.append(RemoveSysPath) nbc_app.config.RSTExporter.preprocessors = pps nbc_app.notebooks = nbs nbc_app.start() else: app.info("No notebook directory found in docs so not converting any notebooks.") except: e = sys.exc_info()[0] app.warn("Failed to convert notebooks to RST (see above): " + str(e)) finally: os.chdir(olddir)
def convert(self, remove_executed=False): """ Convert the executed notebook to a restructured text (RST) file. Parameters ---------- delete_executed : bool, optional Controls whether to remove the executed notebook or not. """ if not path.exists(self._executed_nb_path): raise IOError("Executed notebook file doesn't exist! Expected: {0}" .format(self._executed_nb_path)) if path.exists(self._rst_path) and not self.overwrite: logger.debug("RST version of notebook already exists at {0}. Use " "overwrite=True or --overwrite (at cmd line) to re-run" .format(self._rst_path)) return self._rst_path # Initialize the resources dict - see: # https://github.com/jupyter/nbconvert/blob/master/nbconvert/nbconvertapp.py#L327 resources = {} resources['config_dir'] = '' # we don't need to specify config resources['unique_key'] = self.nb_name # path to store extra files, like plots generated resources['output_files_dir'] = 'nboutput' # Exports the notebook to RST logger.debug('Exporting notebook to RST...') exporter = RSTExporter() if self.template_file: exporter.template_file = self.template_file output, resources = exporter.from_filename(self._executed_nb_path, resources=resources) # Write the output RST file writer = FilesWriter() output_file_path = writer.write(output, resources, notebook_name=self.nb_name) # read the executed notebook, grab the keywords from the header, # add them in to the RST as filter keywords with open(self._executed_nb_path) as f: nb = nbformat.read(f, as_version=IPYTHON_VERSION) top_cell_text = nb['cells'][0]['source'] match = re.search('## [kK]eywords\s+(.*)', top_cell_text) if match: keywords = match.groups()[0].split(',') keywords = [clean_keyword(k) for k in keywords if k.strip()] keyword_filters = ['filter{0}'.format(k) for k in keywords] else: keyword_filters = [] # Add metatags to top of RST files to get rendered into HTML, used for # the search and filter functionality in Learn Astropy meta_tutorials = '.. meta::\n :keywords: {0}\n' filters = ['filterTutorials'] + keyword_filters meta_tutorials = meta_tutorials.format(', '.join(filters)) with open(output_file_path, 'r') as f: rst_text = f.read() with open(output_file_path, 'w') as f: rst_text = '{0}\n{1}'.format(meta_tutorials, rst_text) f.write(rst_text) if remove_executed: # optionally, clean up the executed notebook file remove(self._executed_nb_path) return output_file_path
def convert(self, remove_executed=False): """ Convert the executed notebook to a restructured text (RST) file. Parameters ---------- delete_executed : bool, optional Controls whether to remove the executed notebook or not. """ if not path.exists(self._executed_nb_path): raise IOError("Executed notebook file doesn't exist! Expected: {0}" .format(self._executed_nb_path)) if path.exists(self._rst_path) and not self.overwrite: logger.debug("RST version of notebook already exists at {0}. Use " "overwrite=True or --overwrite (at cmd line) to re-run" .format(self._rst_path)) return self._rst_path # Initialize the resources dict - see: # https://github.com/jupyter/nbconvert/blob/master/nbconvert/nbconvertapp.py#L327 resources = {} resources['config_dir'] = '' # we don't need to specify config resources['unique_key'] = self.nb_name # path to store extra files, like plots generated resources['output_files_dir'] = 'nboutput' # these keywords are used to build the filter keywords # TODO: add a pre-processor that extracts the keywords from the markdown # cell in the header and adds them to this list # NOTE: the split[-4] trick below is brittle in that it will break if # a notebook is, say, nested two layers deep instead of just one like # all of our notebooks thus far. resources['nb_keywords'] = [self.nb_path.split(sep)[-4]] # Exports the notebook to RST logger.debug('Exporting notebook to RST...') exporter = RSTExporter() if self.template_file: exporter.template_file = self.template_file output, resources = exporter.from_filename(self._executed_nb_path, resources=resources) # Write the output RST file writer = FilesWriter() output_file_path = writer.write(output, resources, notebook_name=self.nb_name) # read the executed notebook, grab the keywords from the header, # add them in to the RST as filter keywords with open(self._executed_nb_path) as f: nb = nbformat.read(f, as_version=IPYTHON_VERSION) top_cell_text = nb['cells'][0]['source'] match = re.search('## [kK]eywords\s+(.*)', top_cell_text) if match: keywords = match.groups()[0].split(',') keywords = [clean_keyword(k) for k in keywords if k.strip()] keyword_filters = ['filter{0}'.format(k) for k in keywords] else: keyword_filters = [] # Add metatags to top of RST files to get rendered into HTML, used for # the search and filter functionality in Learn Astropy meta_tutorials = '.. meta::\n :keywords: {0}\n' filters = ['filterTutorials'] + keyword_filters meta_tutorials = meta_tutorials.format(', '.join(filters)) with open(output_file_path, 'r') as f: rst_text = f.read() with open(output_file_path, 'w') as f: rst_text = '{0}\n{1}'.format(meta_tutorials, rst_text) f.write(rst_text) if remove_executed: # optionally, clean up the executed notebook file remove(self._executed_nb_path) return output_file_path
def compile_tutorial(tutorial_name, force_recompile=False): print('- Tutorial "' + tutorial_name + '"') notebook_path = 'tutorial_notebooks/' + tutorial_name + '/' + tutorial_name + '.ipynb' export_path = 'tutorials/' + tutorial_name + '/' + tutorial_name thumb_dest = os.path.dirname(export_path) + '/thumb.png' if not os.path.exists(os.path.dirname(export_path)): os.makedirs(os.path.dirname(export_path)) # Read in notebook print(' Reading notebook...') notebook = nbformat.read(notebook_path, 4) # Scrape title, description and thumbnail first_cell = notebook.cells[0] title = first_cell.source.splitlines()[0] if '#' in title: title = title.replace('#', '').strip() description = '' for line in first_cell.source.splitlines()[1:]: if line.strip(): description = line.strip() break if not description: print(' Description could not be found in the notebook.') if 'thumbnail_figure_index' in notebook.metadata: thumbnail_figure_index = notebook.metadata['thumbnail_figure_index'] else: thumbnail_figure_index = -1 if 'level' in notebook.metadata: level = notebook.metadata['level'].capitalize() elif 'difficulty' in notebook.metadata: level = notebook.metadata['difficulty'].capitalize() else: level = 'Unknown' # Check if the tutorial was already executed. if os.path.exists(export_path + '.rst'): if os.path.getmtime(export_path + '.rst') > os.path.getmtime(notebook_path): if force_recompile: print(' Already compiled. Recompiling anyway...') else: print(' Already compiled. Skipping compilation...') return title, level, description, thumb_dest.split('/', 1)[-1] # Execute notebook if not already executed already_executed = any( c.get('outputs') or c.get('execution_count') for c in notebook.cells if c.cell_type == 'code') resources = {'metadata': {'path': os.path.dirname(notebook_path)}} if not already_executed: print(' Executing', end='') start = time.time() additional_cell_1 = { "cell_type": "code", "execution_count": None, "metadata": {}, "outputs": [], "source": r"%matplotlib inline" + '\n' + r"%config InlineBackend.print_figure_kwargs = {'bbox_inches': None}" } additional_cell_2 = { "cell_type": "code", "execution_count": None, "metadata": {}, "outputs": [], "source": "import matplotlib as mpl\nmpl.rcParams['figure.figsize'] = (8, 6)\nmpl.rcParams['figure.dpi'] = 150\nmpl.rcParams['savefig.dpi'] = 150" } notebook.cells.insert(1, nbformat.from_dict(additional_cell_1)) notebook.cells.insert(2, nbformat.from_dict(additional_cell_2)) client = NotebookClient(nb=notebook, resources=resources, timeout=585, kernel_name='python3') try: with client.setup_kernel(): for i, cell in enumerate(notebook.cells): print('.', end='') client.execute_cell(cell, i) client.set_widgets_metadata() except CellExecutionError as err: print(' Error while processing notebook:') print(' ', err) print('') notebook.cells.pop(2) notebook.cells.pop(1) end = time.time() time_taken = end - start if time_taken > 60: print(' Execution took %dm%02ds.' % (time_taken / 60, time_taken % 60)) else: print(' Execution took %ds.' % time_taken) else: print(' Notebook was already executed.') print(' Rendering tutorial...') exporter = RSTExporter() output, resources = exporter.from_notebook_node(notebook, resources) writer = FilesWriter(build_directory=os.path.dirname(export_path)) writer.write(output, resources, notebook_name=os.path.basename(export_path)) pictures = sorted(resources['outputs'], key=output.find) try: thumbnail_source = pictures[thumbnail_figure_index] # Read in thumbnail source image img = Image.open(os.path.dirname(export_path) + '/' + thumbnail_source) # Trim whitespace bg = Image.new(img.mode, img.size, img.getpixel((0, 0))) diff = ImageChops.difference(img, bg) diff = ImageChops.add(diff, diff) bbox = diff.getbbox() if bbox: img = img.crop(bbox) # Resize image to have a width of 400px img.thumbnail([400, 1000]) # Save thumbnail img.save(thumb_dest) except: shutil.copyfile('_static/no_thumb.png', thumb_dest) print(' Done!') return title, level, description, thumb_dest.split('/', 1)[-1]
def convert(self, remove_executed=False): """ Convert the executed notebook to a restructured text (RST) file or HTML. Parameters ---------- delete_executed : bool, optional Controls whether to remove the executed notebook or not. """ if not path.exists(self._executed_nb_path): raise IOError( "Executed notebook file doesn't exist! Expected: {0}".format( self._executed_nb_path)) if path.exists(self._output_path) and not self.overwrite: logger.debug( "{0} version of notebook already exists at {1}. Use " "overwrite=True or --overwrite (at cmd line) to re-run".format( self._output_type, self._output_path)) return self._output_path # Initialize the resources dict - see: # https://github.com/jupyter/nbconvert/blob/master/nbconvert/nbconvertapp.py#L327 resources = {} resources['config_dir'] = '' # we don't need to specify config resources['unique_key'] = self.nb_name # path to store extra files, like plots generated resources['output_files_dir'] = 'nboutput' if self.base_path is None: path_to_root = '' else: path_to_root = path.relpath(self.base_path, start=path.split(self.nb_path)[0]) path_to_root += path.sep resources['path_to_pages_root'] = request.pathname2url(path_to_root) # Exports the notebook to the output format logger.debug('Exporting notebook to {}...'.format(self._output_type)) if self._output_type == 'RST': exporter = RSTExporter() elif self._output_type == 'HTML': exporter = HTMLExporter() else: raise ValueError('This should be impossible... output_type should ' 'have been checked earlier, but it is ' 'unrecognized') if self.template_file: exporter.template_file = self.template_file output, resources = exporter.from_filename(self._executed_nb_path, resources=resources) # Write the output file writer = FilesWriter() output_file_path = writer.write(output, resources, notebook_name=self.nb_name) if self._output_type == 'RST': self._add_filter_keywords(output_file_path) if remove_executed: # optionally, clean up the executed notebook file remove(self._executed_nb_path) title = '' try: with open(self.nb_path) as f: nb = nbformat.reader.read(f) title = nb['cells'][0]['source'].split('#')[1].split( "\n")[0].strip() except Exception: print( 'Failed to parse notebook title from first cell, please check notebook.' ) page_info = dict(output_file_path=output_file_path, name=self.nb_name.replace("_", ' ').title(), title=title) return page_info
def notebooks_to_rst(app): from glob import glob try: # post "big-split", nbconvert is a separate namespace from nbconvert.nbconvertapp import NbConvertApp from nbconvert.writers import FilesWriter from nbconvert.preprocessors import Preprocessor, ExecutePreprocessor, execute from nbconvert.exporters import RSTExporter from nbformat import NotebookNode except ImportError: try: from IPython.nbconvert.nbconvertapp import NbConvertApp from IPython.nbconvert.writers import FilesWriter from IPython.nbconvert.preprocessors import Preprocessor, ExecutePreprocessor, execute from IPython.nbconvert.exporters import RSTExporter from IPython.nbformat import NotebookNode except ImportError: raise ImportError( 'Failed to find Jupyter or IPython. Cannot build ' 'the notebooks embedded in the docs. Proceeding ' 'the rest of the doc build, but additional ' 'warnings are likely.') return class OrphanizerWriter(FilesWriter): def write(self, output, resources, **kwargs): output = ':orphan:\n\n' + output FilesWriter.write(self, output, resources, **kwargs) class AddSysPath(Preprocessor): """ Adds the local system path to the top of the notebook. This makes sure when build_sphinx is invoked that the notebook actually runs with the current build. """ def preprocess(self, nb, resources): syspathstr = 'sys.path = {} + sys.path'.format(str(sys.path)) cell = { 'cell_type': 'code', 'execution_count': None, 'metadata': {}, 'outputs': [], 'source': 'import sys\n' + syspathstr } nb.cells.insert(0, NotebookNode(cell)) return nb, resources class RemoveSysPath(Preprocessor): """ Removes the sys.path cell added by AddSysPath """ def preprocess(self, nb, resources): if 'sys.path' in nb.cells[0].source: del nb.cells[0] return nb, resources class MonkeypatchCellExecutionError(execute.CellExecutionError): def __str__(self): sstr = super(MonkeypatchCellExecutionError, self).__str__() return sstr + ' Traceback:\n' + str(self.traceback) execute.CellExecutionError = MonkeypatchCellExecutionError olddir = os.path.abspath(os.curdir) try: srcdir = os.path.abspath(os.path.split(__file__)[0]) if os.path.isdir('notebooks'): os.chdir(os.path.join(srcdir, 'notebooks')) nbs = glob('*.ipynb') app.info( "Executing and converting these notebooks to sphinx files: " + str(nbs)) nbc_app = NbConvertApp() nbc_app.initialize(argv=[]) nbc_app.writer = OrphanizerWriter() nbc_app.export_format = 'rst' pps = RSTExporter().default_preprocessors pps.insert(0, AddSysPath) pps.append(RemoveSysPath) nbc_app.config.RSTExporter.preprocessors = pps nbc_app.notebooks = nbs nbc_app.start() else: app.info( 'No notebook directory found in docs so not converting any notebooks.' ) except: e = sys.exc_info()[0] app.warn('Failed to convert notebooks to RST (see above): ' + str(e)) finally: os.chdir(olddir)
def compile_tutorial(tutorial_name, force_recompile=False): print('- Compiling tutorial ' + tutorial_name + '...') notebook_path = 'tutorial_notebooks/' + tutorial_name + '/' + tutorial_name + '.ipynb' export_path = 'tutorials/' + tutorial_name + '/' + tutorial_name thumb_dest = os.path.dirname(export_path) + '/thumb.png' if not os.path.exists(os.path.dirname(export_path)): os.makedirs(os.path.dirname(export_path)) # Read in notebook notebook = nbformat.read(notebook_path, 4) # Scrape title, description and thumbnail first_cell = notebook.cells[0] title = first_cell.source.splitlines()[0] if '#' in title: title = title.replace('#', '').strip() description = first_cell.source.splitlines()[2].strip() if 'thumbnail_figure_index' in notebook.metadata: thumbnail_figure_index = notebook.metadata['thumbnail_figure_index'] else: thumbnail_figure_index = -1 if 'level' in notebook.metadata: level = notebook.metadata['level'].capitalize() elif 'difficulty' in notebook.metadata: level = notebook.metadata['difficulty'].capitalize() else: level = 'Unknown' # Check if the tutorial was already compiled. if os.path.exists(export_path + '.rst'): if os.path.getmtime(export_path + '.rst') > os.path.getmtime(notebook_path): if force_recompile: print(' Already compiled. Recompiling anyway...') else: print(' Already compiled. Skipping...') return title, level, description, thumb_dest.split('/', 1)[-1] # Execute notebook if not already executed already_executed = any(c.get('outputs') or c.get('execution_count') for c in notebook.cells if c.cell_type == 'code') resources = {} if not already_executed: ep = ExecutePreprocessor(timeout=120, kernel_name='python3') try: notebook, resources = ep.preprocess(notebook, resources={'metadata': {'path': os.path.abspath(os.path.dirname(notebook_path))}}) except CellExecutionError as err: print('Error while processing notebook.') print(err) exporter = RSTExporter() output, resources = exporter.from_notebook_node(notebook, resources) writer = FilesWriter(build_directory=os.path.dirname(export_path)) writer.write(output, resources, notebook_name=os.path.basename(export_path)) pictures = sorted(resources['outputs'], key=output.find) try: thumbnail_source = pictures[thumbnail_figure_index] # Read in thumbnail source image img = Image.open(os.path.dirname(export_path) + '/' + thumbnail_source) # Trim whitespace bg = Image.new(img.mode, img.size, img.getpixel((0, 0))) diff = ImageChops.difference(img, bg) diff = ImageChops.add(diff, diff) bbox = diff.getbbox() if bbox: img = img.crop(bbox) # Resize image to have a width of 400px img.thumbnail([400, 1000]) # Save thumbnail img.save(thumb_dest) except: shutil.copyfile('_static/no_thumb.png', thumb_dest) print(' Done!') return title, level, description, thumb_dest.split('/', 1)[-1]