def run(self): abs_dir = sphinx_abs_dir(self.env) renderers = {} # cache renderers for node in self.document.traverse(CellOutputBundleNode): try: renderer_cls = renderers[node.renderer] except KeyError: renderer_cls = load_renderer(node.renderer) renderers[node.renderer] = renderer_cls renderer = renderer_cls(self.document, node, abs_dir) output_nodes = renderer.cell_output_to_nodes(self.env.nb_render_priority) node.replace_self(output_nodes) # Image collect extra nodes from cell outputs that we need to process # this normally gets called as a `doctree-read` event for node in self.document.traverse(nodes.image): # If the image node has `candidates` then it's already been processed # as in-line markdown, so skip it if "candidates" in node: continue col = ImageCollector() # use the node docname, where possible, to deal with single document builds docname = ( self.app.env.path2doc(node.source) if node.source else self.app.env.docname ) with mock.patch.dict(self.app.env.temp_data, {"docname": docname}): col.process_doc(self.app, node)
def apply(self): builder = self.app.builder.name output_dir = sphinx_abs_dir(self.env) for node in self.document.traverse(CellOutputBundleNode): cell = {"outputs": node.outputs} outputs = cell.get("outputs", []) if node.get("inline", False): output_nodes = cell_output_to_nodes_inline( outputs, RENDER_PRIORITY.get(builder, 'html'), True, output_dir, None) else: output_nodes = cell_output_to_nodes( outputs, RENDER_PRIORITY.get(builder, 'html'), True, output_dir, None) # TODO add warning if output_nodes is empty node.replace_self(output_nodes) # Image collect extra nodes from cell outputs that we need to process for node in self.document.traverse(nodes.image): # If the image node has `candidates` then it's already been processed # as in-line markdown, so skip it if "candidates" in node: continue col = ImageCollector() col.process_doc(self.app, node)
def apply(self): thebe_config = self.config.jupyter_sphinx_thebelab_config for cell_node in self.document.traverse(JupyterCellNode): (output_bundle_node, ) = cell_node.traverse(CellOutputBundleNode) # Create doctree nodes for cell outputs. output_nodes = cell_output_to_nodes( output_bundle_node.outputs, self.config.jupyter_execute_data_priority, bool(cell_node.attributes["stderr"]), sphinx_abs_dir(self.env), thebe_config, ) # Remove the outputbundlenode and we'll attach the outputs next try: cm_language = cell_node.cm_language except AttributeError: cm_language = "python" attach_outputs(output_nodes, cell_node, thebe_config, cm_language) # Image collect extra nodes from cell outputs that we need to process for node in self.document.traverse(image): # If the image node has `candidates` then it's already been processed # as in-line content, so skip it if "candidates" in node: continue # re-initialize an ImageCollector because the `app` imagecollector instance # is only available via event listeners. col = ImageCollector() col.process_doc(self.app, node)
def run(self): abs_dir = sphinx_abs_dir(self.env) renderers = {} # cache renderers for node in self.document.traverse(CellOutputBundleNode): try: renderer_cls = renderers[node.renderer] except KeyError: renderer_cls = load_renderer(node.renderer) renderers[node.renderer] = renderer_cls renderer = renderer_cls(self.document, node, abs_dir) output_nodes = renderer.cell_output_to_nodes(self.env.nb_render_priority) node.replace_self(output_nodes) # Image collect extra nodes from cell outputs that we need to process # this normally gets called as a `doctree-read` event for node in self.document.traverse(nodes.image): # If the image node has `candidates` then it's already been processed # as in-line markdown, so skip it if "candidates" in node: continue col = ImageCollector() col.process_doc(self.app, node)
def run_robot(app, doctree, docname): # Tests can be switched off with a global setting: if not app.config.sphinxcontrib_robotframework_enabled: return # Set up a variable for 'the current working directory': robot_dir = os.path.dirname(os.path.join(app.srcdir, docname)) # Tests are only run when they are found: if not hasattr(doctree, '_robot_source'): return # Skip already run robotframework suites checksums_filename = os.path.join(app.doctreedir, ROBOT_PICKLE_FILENAME) try: with open(checksums_filename, 'rb') as fp: checksums = pickle.loads(fp.read()) except (IOError, EOFError, TypeError, IndexError): checksums = [] checksum = hashlib.md5(doctree._robot_source.encode('utf-8')).hexdigest() if checksum in checksums: return # Build a test suite: robot_file = tempfile.NamedTemporaryFile(dir=robot_dir, suffix='.robot') robot_file.write(doctree._robot_source.encode('utf-8')) robot_file.flush() # flush buffer into file # Skip running when the source has no test cases (e.g. has settings) try: robot_suite = robot.running.TestSuiteBuilder().build(robot_file.name) except robot.errors.DataError as e: if e.message.endswith('File has no test case table.'): return raise except AttributeError as e: # Fix to make this package still work with robotframework < 2.8.x pass if not len(robot_suite.tests): return # Get robot variables from environment env_robot_variables = get_robot_variables() env_robot_keys = [var.split(':')[0] for var in env_robot_variables] # Run the test suite: output = os.path.join(robot_dir, '{0:s}.output.xml'.format(robot_file.name)) log = os.path.join(robot_dir, '{0:s}.log.html'.format(robot_file.name)) options = { 'outputdir': robot_dir, 'output': output, 'log': log, 'report': 'NONE', 'variable': env_robot_variables + [ '%s:%s' % (key, value) for key, value in app.config.sphinxcontrib_robotframework_variables .items() if not key in env_robot_keys ] } # Update persisted checksums nitpicky = getattr(app.config, 'nitpicky', False) result = robot.run(robot_file.name, **options) if result == 0: with open(checksums_filename, 'wb') as fp: fp.write(pickle.dumps(checksums + [checksum])) elif nitpicky: raise SphinxError('Robot Framework reported errors. ' 'Please, see "{0:s}" for details.'.format(log)) if os.path.isfile(output): os.unlink(output) if os.path.isfile(log): os.unlink(log) # Close the test suite (and delete it, because it's a tempfile): robot_file.close() # Re-process images to include robot generated images: if os.path.sep in docname: # Because process_images is not designed to be called more than once, # calling it with docnames with sub-directories needs a bit cleanup: removable = os.path.dirname(docname) + os.path.sep for node in doctree.traverse(docutils.nodes.image): if node['uri'].startswith(removable): node['uri'] = node['uri'][len(removable):] try: app.env.process_images(docname, doctree) except AttributeError: # Sphinx >= 1.5 app.env.temp_data['docname'] = docname from sphinx.environment.collectors.asset import ImageCollector ImageCollector().process_doc(app, doctree) del app.env.temp_data['docname']
if os.path.isfile(log): os.unlink(log) # Close the test suite (and delete it, because it's a tempfile): robot_file.close() # Re-process images to include robot generated images: if os.path.sep in docname: # Because process_images is not designed to be called more than once, # calling it with docnames with sub-directories needs a bit cleanup: removable = os.path.dirname(docname) + os.path.sep for node in doctree.traverse(docutils.nodes.image): if node['uri'].startswith(removable): node['uri'] = node['uri'][len(removable):] try: app.env.process_images(docname, doctree) except AttributeError: # Sphinx >= 1.5 app.env.temp_data['docname'] = docname from sphinx.environment.collectors.asset import ImageCollector ImageCollector().process_doc(app, doctree) del app.env.temp_data['docname'] def setup(app): app.add_config_value('sphinxcontrib_robotframework_enabled', True, True) app.add_config_value('sphinxcontrib_robotframework_variables', {}, True) app.add_config_value('sphinxcontrib_robotframework_quiet', False, True) app.add_directive('code', RobotAwareCodeBlock) app.add_directive('robotframework', RobotSettingsDirective) app.connect('doctree-resolved', run_robot)