def _update_extensions(self) -> None: extension_types_enabled_in_configuration = { extension_configuration.extension_type for extension_configuration in self._configuration.extensions if extension_configuration.enabled } extension_types_sorter = TopologicalSorter( _build_extension_type_graph( extension_types_enabled_in_configuration)) extension_types_sorter.prepare() extensions = [] while extension_types_sorter.is_active(): extension_types_batch = extension_types_sorter.get_ready() extensions_batch = [] for extension_type in extension_types_batch: if issubclass(extension_type, ConfigurableExtension): if extension_type not in extension_types_enabled_in_configuration or self._configuration.extensions[ extension_type].extension_type_configuration is None: configuration = extension_type.default_configuration() else: configuration = self._configuration.extensions[ extension_type].extension_type_configuration extension = extension_type(self, configuration) else: extension = extension_type(self) extensions_batch.append(extension) extension_types_sorter.done(extension_type) extensions.append(extensions_batch) self._extensions._update(extensions)
def get_op_order(self): ts = TopologicalSorter() for host in self.inventory: for i, op_hash in enumerate(host.op_hash_order): if not i: ts.add(op_hash) else: ts.add(op_hash, host.op_hash_order[i - 1]) final_op_order = [] ts.prepare() while ts.is_active(): # Ensure that where we have multiple different operations that can be executed in any # dependency order we order them by line numbers. node_group = sorted( ts.get_ready(), key=lambda op_hash: self.op_meta[op_hash]["op_order"], ) ts.done(*node_group) final_op_order.extend(node_group) return final_op_order
def do_all_tasks(graph, task_queue, done_queue): topological_sorter = TopologicalSorter(graph) topological_sorter.prepare() while topological_sorter.is_active(): for node in topological_sorter.get_ready(): task_queue.put(node) node = done_queue.get() topological_sorter.done(node)
def validate(self, result: Announcement): graph = TopologicalSorter() for dep in result.dependencies: graph.add(dep) for key, message in self.items(): for dep in message.dependencies: graph.add(dep, key) graph.prepare() missing = graph._node2info.keys() - self.keys() if missing: raise ValueError(f"Missing items: {','.join(missing)}")
def to_dsl(self, provider, operations_graph, reversed_operations_graph, cluster_name, is_delete, artifacts=None, target_directory=None, inputs=None, outputs=None, extra=None, debug=False): if artifacts is None: artifacts = [] if target_directory is None: target_directory = self.initial_artifacts_directory self.artifacts = {} for art in artifacts: self.artifacts[art[NAME]] = art provider_config = ProviderConfiguration(provider) ansible_config = provider_config.get_section(ANSIBLE) node_filter_config = provider_config.get_subsection( ANSIBLE, NODE_FILTER) ids_file_path = os.getcwd( ) + '/id_vars_' + cluster_name + self.get_artifact_extension() self.init_global_variables(inputs) operations_graph = self.init_graph(operations_graph) # the graph of operations at the moment is a dictionary of copies of ProviderTemplatre objects, # of the form Node/Relationship: {the set of opers of Nodes/Relationships on which it depends} elements = TopologicalSorter(operations_graph) # use TopologicalSorter for creating graph if is_delete: reversed_operations_graph = self.init_graph( reversed_operations_graph) elements = TopologicalSorter(reversed_operations_graph) elements.prepare() # first operations from on top of the graph in state 'ready' ansible_playbook = [] if not debug: self.prepare_for_run() # function for initializing tmp clouni directory q = Queue() # queue for node names + operations active = [] # list of parallel active operations first = True while elements.is_active(): node_name = None # try to get new finished operation from queue and find it in list of active # if get - mark done this operation (but before it find in graph) # if ready operations exists - get it and execute, remove from active try: node_name = q.get_nowait() except: time.sleep(1) if node_name is not None: for node in active: if node.name == node_name.split( SEPARATOR )[0] and node.operation == node_name.split(SEPARATOR)[1]: active.remove(node) elements.done(node) for v in elements.get_ready(): # in delete mode we skip all operations exept delete and create operation transforms to delete if is_delete: if v.operation == 'create': v.operation = 'delete' else: elements.done(v) continue logging.debug("Creating ansible play from operation: %s" % v.name + ':' + v.operation) extra_tasks_for_delete = self.get_extra_tasks_for_delete( v.type, v.name.replace('-', '_'), ids_file_path) description_prefix, module_prefix = self.get_module_prefixes( is_delete, ansible_config) description_by_type = self.ansible_description_by_type( v.type_name, description_prefix) module_by_type = self.ansible_module_by_type( v.type_name, module_prefix) ansible_play_for_elem = dict(name=description_prefix + ' ' + provider + ' cluster: ' + v.name + ':' + v.operation, hosts=self.default_host, tasks=[]) # reload id_vars file if not is_delete and first: first = False ansible_play_for_elem['tasks'].append( copy.deepcopy( {FILE: { PATH: ids_file_path, STATE: 'absent' }})) ansible_play_for_elem['tasks'].append( copy.deepcopy( {FILE: { PATH: ids_file_path, STATE: 'touch' }})) # create playbook for every operation if v.operation == 'delete': if not v.is_software_component: ansible_play_for_elem['tasks'].append( copy.deepcopy({'include_vars': ids_file_path})) ansible_tasks = self.get_ansible_tasks_for_delete( v, description_by_type, module_by_type, additional_args=extra) ansible_tasks.extend( self.get_ansible_tasks_from_interface( v, target_directory, is_delete, v.operation, cluster_name, additional_args=extra)) if not any(item == module_by_type for item in ansible_config.get( 'modules_skipping_delete', [])): ansible_play_for_elem['tasks'].extend( copy.deepcopy(ansible_tasks)) elif v.operation == 'create': if not v.is_software_component: ansible_play_for_elem['tasks'].extend( copy.deepcopy( self.get_ansible_tasks_for_inputs(inputs))) ansible_tasks = self.get_ansible_tasks_for_create( v, target_directory, node_filter_config, description_by_type, module_by_type, additional_args=extra) ansible_tasks.extend( self.get_ansible_tasks_from_interface( v, target_directory, is_delete, v.operation, cluster_name, additional_args=extra)) ansible_play_for_elem['tasks'].extend( copy.deepcopy(ansible_tasks)) ansible_play_for_elem['tasks'].extend( copy.deepcopy(extra_tasks_for_delete)) else: ansible_play_for_elem['hosts'] = v.host ansible_play_for_elem['tasks'].extend( copy.deepcopy( self.get_ansible_tasks_from_interface( v, target_directory, is_delete, v.operation, cluster_name, additional_args=extra))) else: (_, element_type, _) = utils.tosca_type_parse(v.type) if element_type == NODES: if v.is_software_component: ansible_play_for_elem['hosts'] = v.host # operations for relationships executes on target/source host depends on operation elif element_type == RELATIONSHIPS: if v.operation == 'pre_configure_target' or v.operation == 'post_configure_target' or v.operation == 'add_source': for elem in operations_graph: if elem.name == v.target: if elem.is_software_component: ansible_play_for_elem[ 'hosts'] = elem.host break elif v.operation == 'pre_configure_source' or v.operation == 'post_configure_source': for elem in operations_graph: if elem.name == v.source: if elem.is_software_component: ansible_play_for_elem[ 'hosts'] = elem.host break else: logging.error( "Unsupported operation for relationship in operation graph" ) sys.exit(1) else: logging.error( "Unsupported element type in operation graph") sys.exit(1) ansible_play_for_elem['tasks'].extend( copy.deepcopy( self.get_ansible_tasks_from_interface( v, target_directory, is_delete, v.operation, cluster_name, additional_args=extra))) ansible_playbook.append(ansible_play_for_elem) # run playbooks if not debug: self.parallel_run([ansible_play_for_elem], v.name, v.operation, q, cluster_name) # add element to active list active.append(v) else: elements.done(v) if is_delete: last_play = dict(name='Renew id_vars_example.yaml', hosts=self.default_host, tasks=[]) last_play['tasks'].append( copy.deepcopy({FILE: { PATH: ids_file_path, STATE: 'absent' }})) if not debug: self.parallel_run([last_play], None, None, q, cluster_name) done = q.get() if done != 'Done': logging.error("Something wrong with multiprocessing queue") sys.exit(1) ansible_playbook.append(last_play) # delete dir with cluster_name in tmp clouni dir if not debug: rmtree(os.path.join(utils.get_tmp_clouni_dir(), cluster_name)) return yaml.dump(ansible_playbook, default_flow_style=False, sort_keys=False)
import fileinput as fi import heapq from graphlib import TopologicalSorter ts = TopologicalSorter() for line in fi.input(): pr = line.split(" ") a, b = pr[1], pr[-3] ts.add(b, a) ts.prepare() EXTRA = 60 # Worker heaps IDLE = 5 BUSY = [] Q = list(ts.get_ready()) heapq.heapify(Q) t_min = 0 while ts.is_active(): # While there are tasks and idle workers, we hand out tasks. while Q and IDLE > 0: c = heapq.heappop(Q) IDLE -= 1 heapq.heappush(BUSY, (t_min + ord(c) - (ord("A") - 1) + EXTRA, c))