def get_all_dependencies(self, dependant): """ Returns a set of all dependencies of the given dependants, even indirectly related ones. >>> tracker = DependencyTracker() >>> tracker.add(0, 1) >>> tracker.add(1, 2) >>> tracker.get_all_dependencies(2) {0, 1} :param dependant: The dependant to get all dependencies for. :return: A set of dependencies. """ dependencies = set() def append_to_dependencies(prev, nxt): dependencies.add(nxt) traverse_graph( [dependant], lambda node: {dependency for dependency, dependants in self._dependency_dict.items() if node in dependants}, append_to_dependencies) return dependencies
def get_all_dependants(self, dependency): """ Returns a set of all dependants of the given dependency, even indirectly related ones. >>> tracker = DependencyTracker() >>> tracker.add(0, 1) >>> tracker.add(1, 2) >>> tracker.get_all_dependants(0) {1, 2} :param dependency: The dependency to get all dependants for. :return: A set of dependants. """ dependants = set() def append_to_dependants(prev, nxt): dependants.add(nxt) traverse_graph( [dependency], lambda node: self._dependency_dict.get(node, frozenset()), append_to_dependants) return dependants
def check_circular_dependencies(self): """ Checks whether there are circular dependency conflicts. >>> tracker = DependencyTracker() >>> tracker.add(0, 1) >>> tracker.add(1, 0) >>> tracker.check_circular_dependencies() Traceback (most recent call last): ... coalib.core.CircularDependencyError.CircularDependencyError: ... :raises CircularDependencyError: Raised on circular dependency conflicts. """ traverse_graph( self._dependency_dict.keys(), lambda node: self._dependency_dict.get(node, frozenset()))
def test_function(self): results = [] def append_to_results(prev, nxt): results.append((prev, nxt)) traverse_graph(start_nodes, partial(get_successive_nodes, graph), append_to_results) # Test if edges were walked twice. result_set = set(results) remaining_results = list(results) for elem in result_set: remaining_results.remove(elem) self.assertEqual(len(remaining_results), 0, 'Edge(s) walked twice: ' + ', '.join( str(r) for r in remaining_results)) # Compare real with expected without respecting order. expected_set = set(expected) self.assertEqual(result_set, expected_set)
def initialize_dependencies(bears): """ Initializes and returns a ``DependencyTracker`` instance together with a set of bears ready for scheduling. This function acquires, processes and registers bear dependencies accordingly using a consumer-based system, where each dependency bear has only a single instance per section and file-dictionary. The bears set returned accounts for bears that have dependencies and excludes them accordingly. Dependency bears that have themselves no further dependencies are included so the dependency chain can be processed correctly. :param bears: The set of instantiated bears to run that serve as an entry-point. :return: A tuple with ``(dependency_tracker, bears_to_schedule)``. """ # Pre-collect bears in a set as we use them more than once. Especially # remove duplicate instances. bears = set(bears) dependency_tracker = DependencyTracker() # For a consumer-based system, we have a situation which can be visualized # with a graph. Each dependency relation from one bear-type to another # bear-type is represented with an arrow, starting from the dependent # bear-type and ending at the dependency: # # (section1, file_dict1) (section1, file_dict2) (section2, file_dict2) # | | | | # V V V V # bear1 bear2 bear3 bear4 # | | | | # V V | | # BearType1 BearType2 -----------------------| # | | | # | | V # ---------------------------------------------> BearType3 # # We need to traverse this graph and instantiate dependency bears # accordingly, one per section. # Group bears by sections and file-dictionaries. These will serve as # entry-points for the dependency-instantiation-graph. grouping = group(bears, key=lambda bear: (bear.section, bear.file_dict)) for (section, file_dict), bears_per_section in grouping: # Pre-collect bears as the iterator only works once. bears_per_section = list(bears_per_section) # Now traverse each edge of the graph, and instantiate a new dependency # bear if not already instantiated. For the entry point bears, we hack # in identity-mappings because those are already instances. Also map # the types of the instantiated bears to those instances, as if the # user already supplied an instance of a dependency, we reuse it # accordingly. type_to_instance_map = {} for bear in bears_per_section: type_to_instance_map[bear] = bear type_to_instance_map[type(bear)] = bear def instantiate_and_track(prev_bear_type, next_bear_type): if next_bear_type not in type_to_instance_map: type_to_instance_map[next_bear_type] = (next_bear_type( section, file_dict)) dependency_tracker.add(type_to_instance_map[next_bear_type], type_to_instance_map[prev_bear_type]) traverse_graph(bears_per_section, lambda bear: bear.BEAR_DEPS, instantiate_and_track) # Get all bears that aren't resolved and exclude those from scheduler set. bears -= { bear for bear in bears if dependency_tracker.get_dependencies(bear) } # Get all bears that have no further dependencies and shall be # scheduled additionally. for dependency in dependency_tracker.dependencies: if not dependency_tracker.get_dependencies(dependency): bears.add(dependency) return dependency_tracker, bears
def initialize_dependencies(bears): """ Initializes and returns a ``DependencyTracker`` instance together with a set of bears ready for scheduling. This function acquires, processes and registers bear dependencies accordingly using a consumer-based system, where each dependency bear has only a single instance per section and file-dictionary. The bears set returned accounts for bears that have dependencies and excludes them accordingly. Dependency bears that have themselves no further dependencies are included so the dependency chain can be processed correctly. :param bears: The set of instantiated bears to run that serve as an entry-point. :return: A tuple with ``(dependency_tracker, bears_to_schedule)``. """ # Pre-collect bears in a set as we use them more than once. Especially # remove duplicate instances. bears = set(bears) dependency_tracker = DependencyTracker() # For a consumer-based system, we have a situation which can be visualized # with a graph. Each dependency relation from one bear-type to another # bear-type is represented with an arrow, starting from the dependent # bear-type and ending at the dependency: # # (section1, file_dict1) (section1, file_dict2) (section2, file_dict2) # | | | | # V V V V # bear1 bear2 bear3 bear4 # | | | | # V V | | # BearType1 BearType2 -----------------------| # | | | # | | V # ---------------------------------------------> BearType3 # # We need to traverse this graph and instantiate dependency bears # accordingly, one per section. # Group bears by sections and file-dictionaries. These will serve as # entry-points for the dependency-instantiation-graph. grouping = group(bears, key=lambda bear: (bear.section, bear.file_dict)) for (section, file_dict), bears_per_section in grouping: # Pre-collect bears as the iterator only works once. bears_per_section = list(bears_per_section) # Now traverse each edge of the graph, and instantiate a new dependency # bear if not already instantiated. For the entry point bears, we hack # in identity-mappings because those are already instances. Also map # the types of the instantiated bears to those instances, as if the # user already supplied an instance of a dependency, we reuse it # accordingly. type_to_instance_map = {} for bear in bears_per_section: type_to_instance_map[bear] = bear type_to_instance_map[type(bear)] = bear def get_successive_nodes_and_track(bear): for dependency_bear_type in bear.BEAR_DEPS: if dependency_bear_type not in type_to_instance_map: dependency_bear = dependency_bear_type(section, file_dict) type_to_instance_map[dependency_bear_type] = dependency_bear dependency_tracker.add( type_to_instance_map[dependency_bear_type], bear) # Return the dependencies of the instances instead of the types, so # bears are capable to specify dependencies at runtime. return (type_to_instance_map[dependency_bear_type] for dependency_bear_type in bear.BEAR_DEPS) traverse_graph(bears_per_section, get_successive_nodes_and_track) # Get all bears that aren't resolved and exclude those from scheduler set. bears -= {bear for bear in bears if dependency_tracker.get_dependencies(bear)} # Get all bears that have no further dependencies and shall be # scheduled additionally. for dependency in dependency_tracker.dependencies: if not dependency_tracker.get_dependencies(dependency): bears.add(dependency) return dependency_tracker, bears
def test_function(self): with self.assertRaises(CircularDependencyError) as cm: traverse_graph(start_nodes, partial(get_successive_nodes, graph))