def process_need_count(app, doctree, fromdocname): for node_need_count in doctree.traverse(NeedCount): env = app.builder.env all_needs = env.needs_all_needs.values() filter = node_need_count['reftarget'] if filter: filters = filter.split(' ? ') if len(filters) == 1: need_list = prepare_need_list( all_needs) # adds parts to need_list amount = len(filter_needs(need_list, filters[0])) elif len(filters) == 2: need_list = prepare_need_list( all_needs) # adds parts to need_list amount_1 = len(filter_needs(need_list, filters[0])) amount_2 = len(filter_needs(need_list, filters[1])) amount = '{:2.1f}'.format(amount_1 / amount_2 * 100) elif len(filters) > 2: raise NeedsInvalidFilter( 'Filter not valid. Got too many filter elements. Allowed are 1 or 2. ' 'Use " ? " only once to separate filters.') else: amount = len(all_needs) new_node_count = nodes.Text(str(amount), str(amount)) node_need_count.replace_self(new_node_count)
def process_warnings(app, exception): """ Checks the configured warnings. This func gets called by the latest sphinx-event, so that really everything is already done. :param app: application :param exception: raised exceptions :return: """ # We cget called also if an exception occured during build # In this case the build is already broken and we do not need to check anything. if exception is not None: return env = app.env # If no needs were defined, we do not need to do anything if not hasattr(env, "needs_all_needs"): return # Check if warnings already got executed. # Needed because the used event gets executed multiple times, but warnings need to be checked only # on first execution if hasattr( env, "needs_warnings_executed") and env.needs_warnings_executed is True: return env.needs_warnings_executed = True needs = env.needs_all_needs warnings = getattr(app.config, 'needs_warnings', {}) with logging.pending_logging(): logger.info('\nChecking sphinx-needs warnings') warning_raised = False for warning_name, warning_filter in warnings.items(): result = filter_needs(needs.values(), warning_filter) if len(result) == 0: logger.info(' {}: passed'.format(warning_name)) else: need_ids = [x['id'] for x in result] logger.info(' {}: failed'.format(warning_name)) logger.info(' \t\tfailed needs: {} ({})'.format( len(need_ids), ', '.join(need_ids))) logger.info(' \t\tused filter: {}'.format(warning_filter)) warning_raised = True if warning_raised: logger.warning( 'Sphinx-Needs warnings were raised. See console / log output for details.' )
def process_need_count(app, doctree, fromdocname): for node_need_count in doctree.traverse(NeedCount): env = app.builder.env all_needs = env.needs_all_needs.values() filter = node_need_count['reftarget'] if not filter: amount = len(all_needs) else: need_list = prepare_need_list(all_needs) # adds parts to need_list amount = len(filter_needs(need_list, filter)) new_node_count = nodes.Text(amount, amount) node_need_count.replace_self(new_node_count)
def finish(self): needs = self.env.needs_all_needs.values() # We need a list of needs for later filter checks filters = self.env.needs_all_filters config = self.env.config version = getattr(config, "version", "unset") needs_list = NeedsList(config, self.outdir, self.confdir) if config.needs_file: needs_file = config.needs_file needs_list.load_json(needs_file) else: # check if needs.json file exists in conf.py directory needs_json = os.path.join(self.confdir, "needs.json") if os.path.exists(needs_json): log.info("needs.json found, but will not be used because needs_file not configured.") # Clean needs_list from already stored needs of the current version. # This is needed as needs could have been removed from documentation and if this is the case, # removed needs would stay in needs_list, if list gets not cleaned. needs_list.wipe_version(version) # from sphinxcontrib.needs.filter_common import filter_needs filter_string = self.app.config.needs_builder_filter filtered_needs = filter_needs(self.app, needs, filter_string) for need in filtered_needs: needs_list.add_need(version, need) for need_filter in filters.values(): if need_filter["export_id"]: needs_list.add_filter(version, need_filter) try: needs_list.write_json() except Exception as e: log.error(f"Error during writing json file: {e}") else: log.info("Needs successfully exported")
def process_needpie(app, doctree, fromdocname): env = app.builder.env # NEEDFLOW for node in doctree.traverse(Needpie): if not app.config.needs_include_needs: # Ok, this is really dirty. # If we replace a node, docutils checks, if it will not lose any attributes. # But this is here the case, because we are using the attribute "ids" of a node. # However, I do not understand, why losing an attribute is such a big deal, so we delete everything # before docutils claims about it. for att in ('ids', 'names', 'classes', 'dupnames'): node[att] = [] node.replace_self([]) continue id = node.attributes["ids"][0] current_needpie = env.need_all_needpie[id] # Set matplotlib style if current_needpie['style'] is not None: matplotlib.style.use(current_needpie['style']) else: matplotlib.style.use('default') content = current_needpie['content'] sizes = [] for line in content: if line.isdigit(): sizes.append(float(line)) else: result = len( filter_needs(app.env.needs_all_needs.values(), line)) sizes.append(result) labels = current_needpie['labels'] if labels is None: labels = [''] * len(sizes) colors = current_needpie['colors'] if colors is None: # Set default colors, if nothing is given colors = matplotlib.rcParams['axes.prop_cycle'].by_key()['color'] else: # Remove space from color names colors = [x.strip() for x in colors] explode = current_needpie['explode'] if explode is None: explode = [0] * len(labels) shadow = current_needpie['shadow'] text_color = current_needpie['text_color'] fig, axes = matplotlib.pyplot.subplots(figsize=(8, 4), subplot_kw=dict(aspect="equal")) pie_kwargs = { 'labels': labels, 'startangle': 90, 'explode': explode, 'autopct': lambda pct: func(pct, sizes), 'shadow': shadow, 'colors': colors, } if text_color is not None: pie_kwargs['textprops'] = dict(color=text_color) wedges, texts, autotexts = axes.pie(sizes, **pie_kwargs) if text_color is not None: for autotext in autotexts: autotext.set_color(text_color) axes.axis('equal') # Legend preparation if current_needpie['legend'] is not None: axes.legend(wedges, labels, title=str(current_needpie['legend']), loc="center left", bbox_to_anchor=(0.8, 0, 0.5, 1)) matplotlib.pyplot.setp(autotexts, size=8, weight="bold") if current_needpie['title'] is not None and len( current_needpie['title']) > 0: axes.set_title(current_needpie['title']) # Final storage image_folder = os.path.join(env.app.srcdir, '_images') if not os.path.exists(image_folder): os.mkdir(image_folder) # We need to calculate an unique pie-image file name hash_value = hashlib.sha256(id.encode()).hexdigest()[:5] rel_file_path = os.path.join('_images', 'need_pie_{}.png'.format(hash_value)) if rel_file_path not in env.images: fig.savefig(os.path.join(env.app.srcdir, rel_file_path), format='png') env.images[rel_file_path] = [ '_images', os.path.split(rel_file_path)[-1] ] image_node = nodes.image() image_node['uri'] = rel_file_path image_node['candidates'] = { '*': rel_file_path } # look at uri value for source path, relative to the srcdir folder node.replace_self(image_node)
def process_needpie(app, doctree, fromdocname): env = app.builder.env # NEEDFLOW for node in doctree.traverse(Needpie): if not app.config.needs_include_needs: # Ok, this is really dirty. # If we replace a node, docutils checks, if it will not lose any attributes. # But this is here the case, because we are using the attribute "ids" of a node. # However, I do not understand, why losing an attribute is such a big deal, so we delete everything # before docutils claims about it. for att in ("ids", "names", "classes", "dupnames"): node[att] = [] node.replace_self([]) continue id = node.attributes["ids"][0] current_needpie = env.need_all_needpie[id] # Set matplotlib style style_previous_to_script_execution = matplotlib.rcParams if current_needpie["style"]: matplotlib.style.use(current_needpie["style"]) else: matplotlib.style.use("default") content = current_needpie["content"] sizes = [] need_list = list(prepare_need_list(app.env.needs_all_needs.values())) # adds parts to need_list if content and not current_needpie["filter_func"]: for line in content: if line.isdigit(): sizes.append(float(line)) else: result = len(filter_needs(app, need_list, line)) sizes.append(result) elif current_needpie["filter_func"] and not content: try: # check and get filter_func filter_func, filter_args = check_and_get_external_filter_func(current_needpie) # execute filter_func code # Provides only a copy of needs to avoid data manipulations. context = { "needs": copy.deepcopy(need_list), "results": [], } args = [] if filter_args: args = filter_args.split(",") for index, arg in enumerate(args): # All rgs are strings, but we must transform them to requested type, e.g. 1 -> int, "1" -> str context[f"arg{index + 1}"] = arg if filter_func: filter_func(**context) sizes = context["results"] # check items in sizes if not isinstance(sizes, list): logger.error( f"The returned values from the given filter_func {filter_func.__name__} is not valid." " It must be a list." ) for item in sizes: if not isinstance(item, int) and not isinstance(item, float): logger.error( f"The returned values from the given filter_func {filter_func.__name__} is not valid. " "It must be a list with items of type int/float." ) except Exception as e: raise e elif current_needpie["filter_func"] and content: logger.error("filter_func and content can't be used at the same time for needpie.") else: logger.error("Both filter_func and content are not used for needpie.") labels = current_needpie["labels"] if labels is None: labels = [""] * len(sizes) colors = current_needpie["colors"] if colors is None: # Set default colors, if nothing is given colors = matplotlib.rcParams["axes.prop_cycle"].by_key()["color"] else: # Remove space from color names colors = [x.strip() for x in colors] explode = current_needpie["explode"] if explode is None: explode = [0] * len(labels) shadow = current_needpie["shadow"] text_color = current_needpie["text_color"] fig, axes = matplotlib.pyplot.subplots(figsize=(8, 4), subplot_kw={"aspect": "equal"}) pie_kwargs = { "labels": labels, "startangle": 90, "explode": explode, "autopct": lambda pct: label_calc(pct, sizes), "shadow": shadow, "colors": colors, } if text_color: pie_kwargs["textprops"] = {"color": text_color} wedges, _texts, autotexts = axes.pie(sizes, normalize=np.asarray(sizes, np.float32).sum() >= 1, **pie_kwargs) if text_color: for autotext in autotexts: autotext.set_color(text_color) axes.axis("equal") # Legend preparation if current_needpie["legend"]: axes.legend( wedges, labels, title=str(current_needpie["legend"]), loc="center left", bbox_to_anchor=(0.8, 0, 0.5, 1) ) matplotlib.pyplot.setp(autotexts, size=8, weight="bold") if current_needpie["title"]: axes.set_title(current_needpie["title"]) # Final storage image_folder = os.path.join(env.app.srcdir, "_images") if not os.path.exists(image_folder): os.mkdir(image_folder) # We need to calculate an unique pie-image file name hash_value = hashlib.sha256(id.encode()).hexdigest()[:5] rel_file_path = os.path.join("_images", f"need_pie_{hash_value}.png") if rel_file_path not in env.images: fig.savefig(os.path.join(env.app.srcdir, rel_file_path), format="png") # env.images[rel_file_path] = ["_images", os.path.split(rel_file_path)[-1]] env.images.add_file(fromdocname, rel_file_path) image_node = nodes.image() image_node["uri"] = rel_file_path # look at uri value for source path, relative to the srcdir folder image_node["candidates"] = {"*": rel_file_path} # Add lineno to node image_node.line = current_needpie["lineno"] node.replace_self(image_node) # Cleanup matplotlib # Reset the style configuration: matplotlib.rcParams = style_previous_to_script_execution # Close the figure, to free consumed memory. # Otherwise we will get: RuntimeWarning from matplotlib: matplotlib.pyplot.close(fig)
def copy(app, need, needs, option, need_id=None, lower=False, upper=False, filter=None): """ Copies the value of one need option to another .. code-block:: jinja .. req:: copy-example :id: copy_1 :tags: tag_1, tag_2, tag_3 :status: open .. spec:: copy-example implementation :id: copy_2 :status: [[copy("status", "copy_1")]] :links: copy_1 :comment: [[copy("id")]] Copies status of ``copy_1`` to own status. Sets also a comment, which copies the id of own need. .. test:: test of specification and requirement :id: copy_3 :links: copy_2; [[copy('links', 'copy_2')]] :tags: [[copy('tags', 'copy_1')]] Set own link to ``copy_2`` and also copies all links from it. Also copies all tags from copy_1. .. req:: copy-example :id: copy_1 :tags: tag_1, tag_2, tag_3 :status: open .. spec:: copy-example implementation :id: copy_2 :status: [[copy("status", "copy_1")]] :links: copy_1 :comment: [[copy("id")]] Copies status of ``copy_1`` to own status. Sets also a comment, which copies the id of own need. .. test:: test of specification and requirement :id: copy_3 :links: copy_2; [[copy('links', 'copy_2')]] :tags: [[copy('tags', 'copy_1')]] Set own link to ``copy_2`` and also copies all links from it. Also copies all tags from copy_1. If the filter_string needs to compare a value from the current need and the value is unknown yet, you can reference the valued field by using ``current_need["my_field"]`` inside the filter string. Small example:: .. test:: test of current_need value :id: copy_4 The following copy command copies the title of the first need found under the same highest section (headline): [[copy('title', filter='current_need["sections"][-1]==sections[-1]')]] .. test:: test of current_need value :id: copy_4 The following copy command copies the title of the first need found under the same highest section (headline): [[copy('title', filter='current_need["sections"][-1]==sections[-1]')]] This filter possibilities get really powerful in combination with :ref:`needs_global_options`. :param option: Name of the option to copy :param need_id: id of the need, which contains the source option. If None, current need is taken :param upper: Is set to True, copied value will be uppercase :param lower: Is set to True, copied value will be lowercase :param filter: :ref:`filter_string`, which first result is used as copy source. :return: string of copied need option """ if need_id: need = needs[need_id] if filter: result = filter_needs(app, needs.values(), filter, need) if result: need = result[0] if lower: return need[option].lower() if upper: return need[option].upper() return need[option]
def process_needbar(app, doctree, fromdocname): env = app.builder.env # NEEDFLOW for node in doctree.traverse(Needbar): if not app.config.needs_include_needs: # Ok, this is really dirty. # If we replace a node, docutils checks, if it will not lose any attributes. # But this is here the case, because we are using the attribute "ids" of a node. # However, I do not understand, why losing an attribute is such a big deal, so we delete everything # before docutils claims about it. for att in ("ids", "names", "classes", "dupnames"): node[att] = [] node.replace_self([]) continue id = node.attributes["ids"][0] current_needbar = env.need_all_needbar[id] # 1. define constants error_id = current_needbar["error_id"] separator = current_needbar["separator"] if not separator: separator = "," # 2. pre process data # local_data: only valid data be stored, e.g. get ried of xlabels or ylabels content local_data = [] test_columns_length = 0 content = current_needbar["content"] for x in range(len(content)): row_data = content[x].split(separator) local_data.append(row_data) if x == 0: test_columns_length = len(row_data) else: # We can only process content with the same lenght for each line if test_columns_length != len(row_data): raise Exception(f"{error_id}: each content line must have the same length") # 3. process the labels (maybe from content) xlabels = current_needbar["xlabels"] xlabels_in_content = bool(xlabels and len(xlabels) >= 1 and xlabels[0] == "FROM_DATA") ylabels = current_needbar["ylabels"] ylabels_in_content = bool(ylabels and len(ylabels) >= 1 and ylabels[0] == "FROM_DATA") if xlabels_in_content: # get xlabels from content => first row in content xlabels = local_data[0] local_data = local_data[1:] # remove the first row from further processing if ylabels_in_content: # we have a ylabels in the content: xlabels = xlabels[1:] # first element (0) in the row has to be ignored xlabels = [x.strip() for x in xlabels] if not xlabels: # xlabels not been fetched from parameter or content xlabels = [str(1 + x) for x in range(len(local_data[0]))] if ylabels_in_content: # get ylabels from content => first dataset in each row ylabels = [] new_local_data = [] for line in local_data: ylabels.append(line[0]) # fetch ylabels values from first rows new_local_data.append(line[1:]) local_data = new_local_data ylabels = [y.strip() for y in ylabels] if not ylabels: # ylabels not been fetched from parameter or content ylabels = [str(1 + y) for y in range(len(local_data))] # ensure length of xlabels == content columns if not len(xlabels) == len(local_data[0]): raise Exception( f"{error_id} length of xlabels: {len(xlabels)} is not equal with sum of columns: {len(local_data[0])}" ) # ensure length of ylabels == content rows if not len(ylabels) == len(local_data): raise Exception( f"{error_id} length of ylabels: {len(ylabels)} is not equal with sum of rows: {len(local_data)}" ) # 4. transpose the data if needed if current_needbar["transpose"]: local_data = [[local_data[j][i] for j in range(len(local_data))] for i in range(len(local_data[0]))] tmp = ylabels ylabels = xlabels xlabels = tmp # 5. process content local_data_number = [] for line in local_data: line_number = [] for element in line: element = element.strip() if element.isdigit(): line_number.append(float(element)) else: result = len(filter_needs(app, app.env.needs_all_needs.values(), element)) line_number.append(float(result)) local_data_number.append(line_number) # 6. calculate index according to configuration and content size index = [] for row in range(len(local_data_number)): line = [] for column in range(len(local_data_number[0])): if current_needbar["stacked"]: line.append(column) else: value = row + column * len(local_data_number) + column line.append(value) index.append(line) # 7. styling and coloring style_previous_to_script_execution = matplotlib.rcParams # Set matplotlib style if current_needbar["style"]: matplotlib.style.use(current_needbar["style"]) else: # It is necessary to set default style, otherwise the old styling will be used again. matplotlib.style.use("default") # set text colors if current_needbar["text_color"]: text_color = current_needbar["text_color"].strip() matplotlib.rcParams["text.color"] = text_color matplotlib.rcParams["axes.labelcolor"] = text_color try: matplotlib.rcParams["xtick.labelcolor"] = text_color matplotlib.rcParams["ytick.labelcolor"] = text_color except KeyError: # labelcolor is not support in this matplotlib version. Use color instead. matplotlib.rcParams["xtick.color"] = text_color matplotlib.rcParams["ytick.color"] = text_color # get bar colors colors = current_needbar["colors"] if colors: # Remove space from color names colors = [x.strip() for x in colors] # Handle the cases: len(local_data) > len(colors) or len(local_data) < len(colors) # We do the same for input color, with transpose the user could forget to change the color accordingly if not colors or len(colors) == 0: # Set default colors, if nothing is given colors = matplotlib.rcParams["axes.prop_cycle"].by_key()["color"] else: # extend given colors with default colors colors = colors + matplotlib.rcParams["axes.prop_cycle"].by_key()["color"] multi = math.ceil(len(local_data) / len(colors)) if multi > 1: print(f"{error_id} warning: color schema is smaller than data, double coloring is occurring") colors = colors * multi colors = colors[: len(local_data)] y_offset = numpy.zeros(len(local_data_number[0])) # 8. create figure figure, axes = matplotlib.pyplot.subplots() for x in range(len(local_data_number)): if not current_needbar["horizontal"]: bar = axes.bar( index[x], local_data_number[x], bottom=y_offset, label=ylabels[x], color=colors[x], ) else: bar = axes.barh( index[x], local_data_number[x], left=y_offset, label=ylabels[x], color=colors[x], ) if current_needbar["show_sum"]: try: axes.bar_label(bar, label_type="center") # show label in the middel of each bar except AttributeError: # bar_label is not support in older matplotlib versions current_needbar["show_sum"] = None if current_needbar["stacked"]: y_offset = y_offset + numpy.array(local_data_number[x]) # show for a stacked bar the overall value if current_needbar["show_sum"] and x == len(local_data_number) - 1: try: axes.bar_label(bar) except AttributeError: # bar_label is not support in older matplotlib versions current_needbar["show_sum"] = None if not current_needbar["horizontal"]: # We want to support even older version of matplotlib, which do not support axes.set_xticks(labels) x_pos = (numpy.array(index[0]) + numpy.array(index[len(local_data_number) - 1])) / 2 axes.set_xticks(x_pos) axes.set_xticklabels(labels=xlabels) else: # We want to support even older version of matplotlib, which do not support axes.set_yticks(labels) y_pos = (numpy.array(index[0]) + numpy.array(index[len(local_data_number) - 1])) / 2 axes.set_yticks(y_pos) axes.set_yticklabels(labels=xlabels) axes.invert_yaxis() # labels read top-to-bottom xlabels_rotation = current_needbar["xlabels_rotation"] if xlabels_rotation: xlabels_rotation = xlabels_rotation.strip() # Rotate the tick labels if xlabels_rotation.isdigit(): matplotlib.pyplot.setp(axes.get_xticklabels(), rotation=int(xlabels_rotation)) ylabels_rotation = current_needbar["ylabels_rotation"] if ylabels_rotation: ylabels_rotation = ylabels_rotation.strip() # Rotate the tick labels if ylabels_rotation.isdigit(): matplotlib.pyplot.setp(axes.get_yticklabels(), rotation=int(ylabels_rotation)) if current_needbar["title"]: axes.set_title(current_needbar["title"].strip()) if current_needbar["x_axis_title"]: axes.set_xlabel(current_needbar["x_axis_title"].strip()) if current_needbar["y_axis_title"]: axes.set_ylabel(current_needbar["y_axis_title"].strip()) if current_needbar["legend"]: axes.legend() # 9. final storage image_folder = os.path.join(env.app.srcdir, "_images") if not os.path.exists(image_folder): os.mkdir(image_folder) # We need to calculate an unique bar-image file name hash_value = hashlib.sha256(id.encode()).hexdigest()[:5] rel_file_path = os.path.join("_images", f"need_bar_{hash_value}.png") if rel_file_path not in env.images: figure.savefig(os.path.join(env.app.srcdir, rel_file_path)) env.images[rel_file_path] = ["_images", os.path.split(rel_file_path)[-1]] image_node = nodes.image() image_node["uri"] = rel_file_path # normaly the title is more understandable for a person who needs alt if current_needbar["title"]: image_node["alt"] = current_needbar["title"].strip() # look at uri value for source path, relative to the srcdir folder image_node["candidates"] = {"*": rel_file_path} node.replace_self(image_node) # 10. cleanup matplotlib # Reset the style configuration: matplotlib.rcParams = style_previous_to_script_execution # close the figure, to free consumed memory. Otherwise we will get: # RuntimeWarning from matplotlib: More than 20 figures have been opened. matplotlib.pyplot.close(figure)
def process_warnings(app, exception): """ Checks the configured warnings. This func gets called by the latest sphinx-event, so that really everything is already done. :param app: application :param exception: raised exceptions :return: """ # We get called also if an exception occured during build # In this case the build is already broken and we do not need to check anything. if exception: return env = app.env # If no needs were defined, we do not need to do anything if not hasattr(env, "needs_all_needs"): return # Check if warnings already got executed. # Needed because the used event gets executed multiple times, but warnings need to be checked only # on first execution if hasattr(env, "needs_warnings_executed") and env.needs_warnings_executed: return env.needs_warnings_executed = True needs = env.needs_all_needs # Exclude external needs for warnings check checked_needs = {} for need_id, need in needs.items(): if not need["is_external"]: checked_needs[need_id] = need # warnings = app.config.needs_warnings warnings = NEEDS_CONFIG.get("warnings") warnings_always_warn = app.config.needs_warnings_always_warn with logging.pending_logging(): logger.info("\nChecking sphinx-needs warnings") warning_raised = False for warning_name, warning_filter in warnings.items(): if isinstance(warning_filter, str): # filter string used result = filter_needs(app, checked_needs.values(), warning_filter) elif callable(warning_filter): # custom defined filter code used from conf.py result = [] for need in checked_needs.values(): if warning_filter(need, logger): result.append(need) else: logger.warning( f"Unknown needs warnings filter {warning_filter}!") if len(result) == 0: logger.info(f"{warning_name}: passed") else: need_ids = [x["id"] for x in result] # Set Sphinx statuscode to 1, only if -W is used with sphinx-build # Because Sphinx statuscode got calculated in very early build phase and based on warning_count # Sphinx-needs warnings check hasn't happened yet # see deatils in https://github.com/sphinx-doc/sphinx/blob/81a4fd973d4cfcb25d01a7b0be62cdb28f82406d/sphinx/application.py#L345 # noqa # To be clear, app.keep_going = -W and --keep-going, and will overrite -W after # see details in https://github.com/sphinx-doc/sphinx/blob/4.x/sphinx/application.py#L182 if app.statuscode == 0 and (app.keep_going or app.warningiserror): app.statuscode = 1 # get the text for used filter, either from filter string or function name if callable(warning_filter): warning_text = warning_filter.__name__ elif isinstance(warning_filter, str): warning_text = warning_filter if warnings_always_warn: logger.warning( "{}: failed\n\t\tfailed needs: {} ({})\n\t\tused filter: {}" .format(warning_name, len(need_ids), ", ".join(need_ids), warning_text)) else: logger.info( "{}: failed\n\t\tfailed needs: {} ({})\n\t\tused filter: {}" .format(warning_name, len(need_ids), ", ".join(need_ids), warning_text)) warning_raised = True if warning_raised: logger.warning( "Sphinx-Needs warnings were raised. See console / log output for details." )
def process_needextend(app, doctree, fromdocname): """ Perform all modifications on needs """ env = app.builder.env if not hasattr(env, "need_all_needextend"): env.need_all_needextend = {} if not env.needs_workflow["needs_extended"]: env.needs_workflow["needs_extended"] = True list_names = ( ["tags", "links"] + [x["option"] for x in app.config.needs_extra_links] + [f"{x['option']}_back" for x in app.config.needs_extra_links]) # back-links (incoming) link_names = [x["option"] for x in app.config.needs_extra_links] for current_needextend in env.need_all_needextend.values(): # Check if filter is just a need-id. # In this case create the needed filter string need_filter = current_needextend["filter"] if need_filter in app.env.needs_all_needs: need_filter = f'id == "{need_filter}"' # If it looks like a need id, but we haven't found one, raise an exception elif re.fullmatch(app.config.needs_id_regex, need_filter): raise NeedsInvalidFilter( f"Provided id {need_filter} for needextend does not exist." ) try: found_needs = filter_needs(app, app.env.needs_all_needs.values(), need_filter) except NeedsInvalidFilter as e: raise NeedsInvalidFilter( f"Filter not valid for needextend on page {current_needextend['docname']}:\n{e}" ) for found_need in found_needs: # Work in the stored needs, not on the search result need = app.env.needs_all_needs[found_need["id"]] need["is_modified"] = True need["modifications"] += 1 for option, value in current_needextend["modifications"].items( ): if option.startswith("+"): option_name = option[1:] # If we need to handle a list if option_name in list_names: for link in re.split(";|,", value): # Remove whitespaces link = link.strip() if link not in need[option_name]: need[option_name].append(link) # If we manipulate links, we need to set all the reference in the target need # under e.g. links_back if option_name in link_names: for ref_need in re.split(";|,", value): # Remove whitespaces ref_need = ref_need.strip() if found_need[ "id"] not in app.env.needs_all_needs[ ref_need][ f"{option_name}_back"]: app.env.needs_all_needs[ref_need][ f"{option_name}_back"] += [ found_need["id"] ] # else it must be a normal string else: # If content is already stored, we need to add some whitespace if need[option_name]: need[option_name] += " " need[option_name] += value elif option.startswith("-"): option_name = option[1:] if option_name in list_names: old_content = need[ option_name] # Save it, as it may be need to identify referenced needs need[option_name] = [] # If we manipulate links, we need to delete the reference in the target need as well if option_name in link_names: for ref_need in old_content: # There may be several links app.env.needs_all_needs[ref_need][ f"{option_name}_back"].remove( found_need["id"]) else: need[option_name] = "" else: if option in list_names: old_content = need[option].copy() need[option] = [] for link in re.split(";|,", value): # Remove whitespaces link = link.strip() if link not in need[option]: need[option].append(link) # If add new links also as "link_s_back" to the referenced need. if option in link_names: # Remove old links for ref_need in old_content: # There may be several links app.env.needs_all_needs[ref_need][ f"{option}_back"].remove( found_need["id"]) # Add new links for ref_need in need[ option]: # There may be several links if found_need[ "id"] not in app.env.needs_all_needs[ ref_need][f"{option}_back"]: app.env.needs_all_needs[ref_need][ f"{option}_back"] += [ found_need["id"] ] else: need[option] = value for node in doctree.traverse(Needextend): # No printouts for needextend removed_needextend_node(node)
def process_needpie(app, doctree, fromdocname): env = app.builder.env # NEEDFLOW for node in doctree.traverse(Needpie): if not app.config.needs_include_needs: # Ok, this is really dirty. # If we replace a node, docutils checks, if it will not lose any attributes. # But this is here the case, because we are using the attribute "ids" of a node. # However, I do not understand, why losing an attribute is such a big deal, so we delete everything # before docutils claims about it. for att in ("ids", "names", "classes", "dupnames"): node[att] = [] node.replace_self([]) continue id = node.attributes["ids"][0] current_needpie = env.need_all_needpie[id] # Set matplotlib style style_previous_to_script_execution = matplotlib.rcParams if current_needpie["style"]: matplotlib.style.use(current_needpie["style"]) else: matplotlib.style.use("default") content = current_needpie["content"] sizes = [] for line in content: if line.isdigit(): sizes.append(float(line)) else: result = len( filter_needs(app, app.env.needs_all_needs.values(), line)) sizes.append(result) labels = current_needpie["labels"] if labels is None: labels = [""] * len(sizes) colors = current_needpie["colors"] if colors is None: # Set default colors, if nothing is given colors = matplotlib.rcParams["axes.prop_cycle"].by_key()["color"] else: # Remove space from color names colors = [x.strip() for x in colors] explode = current_needpie["explode"] if explode is None: explode = [0] * len(labels) shadow = current_needpie["shadow"] text_color = current_needpie["text_color"] fig, axes = matplotlib.pyplot.subplots(figsize=(8, 4), subplot_kw={"aspect": "equal"}) pie_kwargs = { "labels": labels, "startangle": 90, "explode": explode, "autopct": lambda pct: label_calc(pct, sizes), "shadow": shadow, "colors": colors, } if text_color: pie_kwargs["textprops"] = {"color": text_color} wedges, _texts, autotexts = axes.pie(sizes, **pie_kwargs) if text_color: for autotext in autotexts: autotext.set_color(text_color) axes.axis("equal") # Legend preparation if current_needpie["legend"]: axes.legend(wedges, labels, title=str(current_needpie["legend"]), loc="center left", bbox_to_anchor=(0.8, 0, 0.5, 1)) matplotlib.pyplot.setp(autotexts, size=8, weight="bold") if current_needpie["title"]: axes.set_title(current_needpie["title"]) # Final storage image_folder = os.path.join(env.app.srcdir, "_images") if not os.path.exists(image_folder): os.mkdir(image_folder) # We need to calculate an unique pie-image file name hash_value = hashlib.sha256(id.encode()).hexdigest()[:5] rel_file_path = os.path.join("_images", f"need_pie_{hash_value}.png") if rel_file_path not in env.images: fig.savefig(os.path.join(env.app.srcdir, rel_file_path), format="png") env.images[rel_file_path] = [ "_images", os.path.split(rel_file_path)[-1] ] image_node = nodes.image() image_node["uri"] = rel_file_path # look at uri value for source path, relative to the srcdir folder image_node["candidates"] = {"*": rel_file_path} node.replace_self(image_node) # Cleanup matplotlib # Reset the style configuration: matplotlib.rcParams = style_previous_to_script_execution # Close the figure, to free consumed memory. # Otherwise we will get: RuntimeWarning from matplotlib: matplotlib.pyplot.close(fig)