def _set_edge_attributes(self, edge, gc): if len(self._edge_label_decorators()) > 0: gc.setText("\n".join( CollectionTools.flatten( BaseGraphElementDecorator.decorations( self._edge_label_decorators(), edge)))) # TODO zu langes tooltip/edgetooltip führt bei Graphviz dazu, dass fehlerhaftes SVG erzeugt wird if len(self._edge_tooltip_decorators()) > 0: # TODO wie erzeuge ich einen Zeilenumbruch im Tooltip?? gc.setTooltip( Label(", ".join( CollectionTools.flatten( BaseGraphElementDecorator.decorations( self._edge_tooltip_decorators(), edge))))) edge_attrs = edge.get_attr_names() for (in_attr) in edge_attrs: in_value = edge.get_attr(in_attr) if in_attr == EdgeAttributes.COLOR: gc.setLineColor(self.map_swt_color(in_value)) elif in_attr == EdgeAttributes.WEIGHT: gc.setLineWidth(in_value) elif in_attr == EdgeAttributes.STYLE: gc.setLineStyle(EdgeStyles.map(self.StyleMap, in_value)) elif in_attr in [EdgeAttributes.GROUPED_EDGES]: pass else: logging.warning("Unknown edge attributes %s=%s" % (in_attr, in_value))
def _set_node_attributes(self, node, gn): if len(self._node_label_decorators()) > 0: gn.setText( str(node) + " " + " ".join( CollectionTools.flatten( BaseGraphElementDecorator.decorations( self._node_label_decorators(), node)))) else: gn.setText(str(node)) # TODO zu langes tooltip/edgetooltip führt bei Graphviz dazu, dass fehlerhaftes SVG erzeugt wird if len(self._node_tooltip_decorators()) > 0: # TODO wie erzeuge ich einen Zeilenumbruch im Tooltip?? gn.setTooltip( Label(", ".join( CollectionTools.flatten( BaseGraphElementDecorator.decorations( self._node_tooltip_decorators(), node))))) height = None width = None node_attrs = self._graph().node_attr_names(node) for (in_attr) in node_attrs: in_value = self._graph().node_attr(node, in_attr) # TODO Mapping definieren if in_attr == NodeAttributes.SHAPE: pass #attrs[GraphvizConstants.GRAPHVIZ_ATTR_SHAPE] = GraphShapes.map(self.ShapeMap, in_value) #if attrs[GraphvizConstants.GRAPHVIZ_ATTR_SHAPE] == GraphvizConstants.GRAPHVIZ_SHAPE_TAB \ # and NodeAttributes.HEIGHT not in node_attrs: #attrs["width"] = str(4.0 * float(config_graphviz.get_node_scale())) #attrs["height"] = str(4.0 * float(config_graphviz.get_node_scale())) elif in_attr == NodeAttributes.HEIGHT: height = float(in_value) # str(float(in_value) * float(config_graphviz.get_node_scale())) elif in_attr == NodeAttributes.WIDTH: width = float(in_value) # str(float(in_value) * float(config_graphviz.get_node_scale())) elif in_attr == NodeAttributes.LINE_COLOR: gn.setBorderColor(self.map_swt_color(in_value, SWT.COLOR_BLACK)) elif in_attr == NodeAttributes.FILL_COLOR: gn.setBackgroundColor( self.map_swt_color(in_value, SWT.COLOR_WHITE)) elif in_attr == NodeAttributes.LINK: pass #attrs["href"] = in_value elif in_attr in [ NodeAttributes.GROUPED_NODES, NodeAttributes.SKIPPED_FROM_EDGE, NodeAttributes.SKIPPED_TO_EDGE, NodeAttributes.LABEL ]: pass else: self.__logger.warning("Unknown node attributes %s=%s" % (in_attr, in_value)) if width: gn.setSize(width * 40, height * 40)
def __init_headers_in_module_specs(self, file_to_module_map_supply): headers_in_module_specs = CollectionTools.transpose_items_as_dict( file_to_module_map_supply.get_module_to_header_file_map()) duplicate_headers = CollectionTools.find_duplicates( value for (key, value) in file_to_module_map_supply.get_module_to_header_file_map()) self.__logger.info( "%i headers in module specs, ignoring %i duplicates" % (len(headers_in_module_specs), len(duplicate_headers))) for header in duplicate_headers: del headers_in_module_specs[header] return headers_in_module_specs
def default_canonic_path_list(cls, input_include_paths, path_module=os.path): norm_input_include_paths = map( path_module.split, imap(path_module.normpath, input_include_paths)) result = [] CollectionTools.extend_unique_keys( result, (cls.get_canonic_path( path, norm_input_include_paths, path_module=path_module) for path in norm_input_include_paths), lambda path: path_module.normcase(path)) return result
def get_target_groups(self): if self.__moduleDependenciesClustered == None: moduleDependencies = OperationProcessor.read_dependencies(self.__inputDictIter) deletedDependencies = dict() moduleDependencies = self.join_and_delete_modules(moduleDependencies, self.__operationDictIter, deletedDependencies) fileDependencies = LocalCollectionTools.convert_module_list_to_file_list(moduleDependencies) deletedFileDependencies = LocalCollectionTools.convert_module_list_to_file_list(deletedDependencies) fileDependencies = self.join_missing_files(fileDependencies, deletedFileDependencies) #fileDependencies = value_set_to_csv(fileDependencies) fileDependencies = CollectionTools.value_set_to_tuple(fileDependencies) #print_dependencies(fileDependencies) self.__moduleDependenciesClustered = CollectionTools.transpose(fileDependencies) return self.__moduleDependenciesClustered
def test_as_immutable_dict(self): length = 3 testee = dict((x,x) for x in range(length)) result = CollectionTools.as_immutable(testee) self.assertTrue(isinstance(result, frozendict)) self.assertNotEquals(id(testee), id(result)) self.__check_len_twice(length, result)
def types_of_all_modules(self, node): if isinstance(node, NodeGroup): modules = self._graph().node_attr(node, NodeAttributes.GROUPED_NODES) types = CollectionTools.union_all(self.types_of_single_module(module) for module in modules) else: types = self.types_of_single_module(node) return types
def generate_module_to_file_map(self, use_exceptions=True): # TODO nicht erst die inverse berechnen if self.__module_to_file_map[use_exceptions] == None: self.__module_to_file_map[ use_exceptions] = CollectionTools.transpose( self.generate_file_to_module_map(use_exceptions)) return self.__module_to_file_map[use_exceptions]
def test_as_immutable_list(self): length = 3 testee = list(x for x in range(length)) result = CollectionTools.as_immutable(testee) self.assertTrue(isinstance(result, tuple)) self.assertNotEquals(id(testee), id(result)) self.__check_len_twice(length, result)
def main(): logging.basicConfig(stream=sys.stderr, level=logging.DEBUG) #Configurator().default() if (len(sys.argv) < 2): print("Call: %s <output from FindIncludeGuards.sh>" % (sys.argv[0], )) exit() # TODO move i files_to_guards = dict([ (path_name, guard) for (path_name, guard) in csv.reader(open(sys.argv[1]), delimiter=':') ]) #duplicates = CollectionTools.find_duplicates(files_to_guards.itervalues(), lambda x: len(x)==0 or x == '#pragma once') duplicates_dict = CollectionTools.find_duplicate_values( files_to_guards.iteritems(), lambda x: len(x) == 0 or x == '#pragma once') correct, pragma_once, missing, irregular, ms_generated, guard = check_include_guards( files_to_guards, path_name, guard) print( "%i correct (%i of which using #pragma once before or without a guard), %i missing guard or malformed structure, %i irregular (%i of which are MS generated)" % (correct, pragma_once, missing, irregular, ms_generated)) #print("%i duplicates: %s" % (len(duplicates), ",".join(duplicates))) print("%i duplicates" % (len(duplicates_dict))) for guard in sorted(duplicates_dict.keys()): print("%s:%s" % (guard, ",".join(sorted(duplicates_dict[guard]))))
def _edge_label(self, edge): if len(self._label_decorators()) > 0: return "\\n".join( CollectionTools.flatten( BaseGraphElementDecorator.decorations( self._label_decorators(), edge))) else: return None
def nodes_in_edge_list(edge_list): """ >>> sorted(GraphConversions.nodes_in_edge_list([('a', 'b'), ('b', 'c')])) ['a', 'b', 'c'] """ nodes = CollectionTools.union_all( ((source for (source, _target) in edge_list), (target for (_source, target) in edge_list))) return nodes
def _tooltip(self, graph_element): if len(self._tooltip_decorators()) > 0: # TODO wie erzeuge ich einen Zeilenumbruch im Tooltip?? return ", ".join( CollectionTools.flatten( BaseGraphElementDecorator.decorations( self._tooltip_decorators(), graph_element))) else: return ""
def find_module_clusters(inModuleDependencies, distanceThreshold, sizeThreshold, size_fun = len): """ Try to further reduce the number of clusters by merging the existing module clusters, if their distance is less than distanceThreshold. size_fun should calculate the size of a value of inModuleDependencies, the default is the length of the list. find_module_clusters does not try to further reduce clusters that are larger than sizeThreshold. @return: A tuple-valued dictionary, which maps keys of tuples of source elements to tuples of target elements. """ moduleDependencies = CollectionTools.value_tuple_to_set(inModuleDependencies) # In einer Schleife wiederholen, bis in einem Schritt keine Änderung mehr stattgefunden hat minKeyPair = (0, 0) while minKeyPair != None: # Teilmenge der keys bestimmen, deren values den threshold nicht überschreiten keysBelowThreshold = [key for key in moduleDependencies.keys() if size_fun(moduleDependencies[key]) < sizeThreshold] #pprint.pprint(keysBelowThreshold) logging.info("Keys below size threshold %i" % len(keysBelowThreshold)) # Für alle Paare die Distanz bestimmen und das Paar mit der minimalen Distanz zusammenfügen minKeyPair = find_min_dist(moduleDependencies.keys(), keysBelowThreshold, distanceThreshold, distance_rel) # TODO join_modules fügt nur die beiden Cluster zusammen, aber nicht tatsächlich die Module. # TODO join_modules müsste dafür erweitert werden für den Fall, dass die keys bereits iterables sind. # TODO oder ist das jetzige Verhalten sogar besser? if minKeyPair != None: logging.info("Joining %s and %s (distance %f)" , minKeyPair[0], minKeyPair[1], distance_rel(minKeyPair[0], minKeyPair[1])) BasicOperations.join_modules(moduleDependencies, tuple(sorted(set(minKeyPair[0]) | set(minKeyPair[1]))), [minKeyPair[0], minKeyPair[1]]) return CollectionTools.value_set_to_tuple(moduleDependencies)
def _node_label(self, node): nodename = str(node) if self._graph().node_attr(node, NodeAttributes.LABEL) != None: label = self._render_node_name(self._graph().node_attr(node, NodeAttributes.LABEL)) else: label = self._render_node_name(nodename) if len(self._label_decorators()) > 0: if len(label): label += "\\n" label += "\\n".join(CollectionTools.flatten(BaseGraphElementDecorator.decorations(self._label_decorators(), node))) return label
def get_rparts(dirname, num, strict=False, pathmodule=os.path): dirname_parts = dirname.rsplit(pathmodule.sep, num + 1) len_dirname = len( list( CollectionTools.flatten( dir_part.split('.') for dir_part in dirname_parts))) # TODO perhaps the "build" should be required? if not strict and dirname_parts[len(dirname_parts) - 1].lower() == "build": del dirname_parts[len(dirname_parts) - 1] if len_dirname > num: return dirname_parts[len_dirname - num:] else: return dirname_parts
def get_header_list(self): ''' Returns a list of all headers that are referenced in any implementation file that is defined in a module specification. ''' all_files = set( CollectionTools.flatten( config_file_include_deps_supply.get_file_include_deps())) result = [ x for x in all_files if config_cpp_file_configuration.is_header_file(x) ] # TODO filter only headers. Where is the list of valid header extensions defined? self.__logger.info( "%i headers referenced in module spec implementation files" % (len(result))) return result
def __copy_edges_and_node(self, nodes, edges, do_deepcopy): if edges != None: if do_deepcopy: self.__edges = deepcopy(set(edges)) else: self.__edges = edges #self.__edges = set([AttributedEdge(source, target) for (source, target) in edges]) if nodes == None: self.__nodes = dict() for node in CollectionTools.union_all(edge.node_set() for edge in self.__edges): self.__nodes[node] = dict() if nodes != None: if do_deepcopy: self.__nodes = deepcopy(nodes) else: self.__nodes = nodes if edges == None: self.__edges = set()
def get_scc_merged_graph(self): result_graph = MutableAttributeGraph() for node in self.__base_graph.node_names_iter(): if self.__scc_helper.get_scc_number_of_node(node) == None: result_graph.add_node(node) result_graph.set_node_attrs(node, self.__base_graph.node_attrs(node)) result_graph.set_node_attrs(node, {NodeAttributes.LABEL: ""}) for scc in self.__scc_helper.get_sccs_iter(): node_name = self.get_scc_node_name(scc) result_graph.add_node(node_name) all_base_nodes = CollectionTools.union_all(self.__get_grouped_nodes(node) for node in scc) result_graph.set_node_attrs(node_name, {NodeAttributes.GROUPED_NODES: all_base_nodes, NodeAttributes.LABEL: ""}) for edge in self.__base_graph.edges(): source = self.get_node_name(edge.get_from_node()) target = self.get_node_name(edge.get_to_node()) if source != target: result_graph.add_edge(source, target) return result_graph
def dependent_nodes(graph, start_nodes): """ >>> graph = GraphConversions.edge_list_to_pygraph([('a', 'b'), ('b', 'c'), ('a', 'c')]) >>> sorted(GraphAlgorithms.dependent_nodes(graph, ['c'])) ['a', 'b', 'c'] """ if isinstance(graph, BasicGraph): accessibility_matrix = GraphAlgorithms.accessibility_matrix_from_graph( graph, inverse=True) elif isinstance(graph, digraph): accessibility_matrix = accessibility(graph.reverse()) else: raise TypeError("%s is not a known graph type", graph) # dependent_nodes = set() # for start_node in start_nodes: # if start_node in accessibility_matrix: # dependent_nodes.update(accessibility_matrix[start_node]) # return dependent_nodes # TODO ist das effizienter als return CollectionTools.union_all(accessibility_matrix[start_node] for start_node in start_nodes if start_node in accessibility_matrix)
def nodes_raw(self): if self.__nodes == None: self.__nodes = frozenset(CollectionTools.union_all(graph.nodes_raw() for graph in self.__graphs)) return self.__nodes
def __init__(self): self.__logger = logging.getLogger(self.__class__.__module__) self.__vcproj_to_module_map = CollectionTools.transpose_items_as_dict( config_msvc_data_supply().get_module_to_vcproj_map())
def determine_duplicates(): header_to_module_map = config_file_to_module_map_supply( ).get_module_to_header_file_map() duplicates = CollectionTools.find_duplicate_values(header_to_module_map) return duplicates
def get_skip_module_types_as_source(self): return CollectionTools.union_all((PhysicalModuleTypeConstants.WRAPPER_MODULE_TYPES, PhysicalModuleTypeConstants.IRRELEVANT_MODULE_TYPES))
def connected_nodes(self): return CollectionTools.union_all(edge.node_set() for edge in self._edges())
def get_skip_module_types_as_source(self): return CollectionTools.union_all((PhysicalModuleTypeConstants.IMPLEMENTATION_MODULE_TYPES, PhysicalModuleTypeConstants.INTERFACE_MODULE_TYPES, PhysicalModuleTypeConstants.IRRELEVANT_MODULE_TYPES))
def __extend_normed_unique(self, result, paths): inputs = imap(os.path.normpath, paths) CollectionTools.extend_unique(result, inputs)
def test_flatten_3(self): flat_iter = CollectionTools.flatten( ((), (), ()) ) self.assertTrue(hasattr(flat_iter, "__iter__")) self.assertEquals([], list(flat_iter))
def test_flatten_1(self): flat_iter = CollectionTools.flatten( (1,2,(3,(4,5))) ) self.assertTrue(hasattr(flat_iter, "__iter__")) self.assertEquals([1, 2, 3, 4, 5], list(flat_iter))
def __init__(self, modules, *args, **kwargs): self.__decoratee = CABStyleFinestLevelModuleGrouperInternal(modules=None, *args, **kwargs) self.__prefix_mapper = PrefixMapper(CollectionTools.identity_dict(self.additional_module_group_prefixes)) if modules: self.configure_nodes(modules)