def test_walk_raise(self): sot = utils.TinyDAG() sot.from_dict(self.test_graph) bad_node = 'f' with testtools.ExpectedException(exceptions.SDKException): for node in sot.walk(timeout=1): if node != bad_node: sot.node_done(node)
def test_walk(self): sot = utils.TinyDAG() sot.from_dict(self.test_graph) sorted_list = [] for node in sot.walk(): sorted_list.append(node) sot.node_done(node) self._verify_order(sot.graph, sorted_list) self.assertEqual(len(self.test_graph.keys()), len(sorted_list))
def test_walk_parallel(self): sot = utils.TinyDAG() sot.from_dict(self.test_graph) sorted_list = [] with concurrent.futures.ThreadPoolExecutor(max_workers=15) as executor: for node in sot.walk(timeout=1): executor.submit(test_walker_fn, sot, node, sorted_list) self._verify_order(sot.graph, sorted_list) print(sorted_list) self.assertEqual(len(self.test_graph.keys()), len(sorted_list))
def project_cleanup(self, dry_run=True, wait_timeout=120, status_queue=None): """Cleanup the project resources. Cleanup all resources in all services, which provide cleanup methods. :param bool dry_run: Cleanup or only list identified resources. :param int wait_timeout: Maximum amount of time given to each service to comlete the cleanup. :param queue status_queue: a threading queue object used to get current process status. The queue contain processed resources. """ dependencies = {} get_dep_fn_name = '_get_cleanup_dependencies' cleanup_fn_name = '_service_cleanup' if not status_queue: status_queue = queue.Queue() for service in self.config.get_enabled_services(): if hasattr(self, service): proxy = getattr(self, service) if (proxy and hasattr(proxy, get_dep_fn_name) and hasattr(proxy, cleanup_fn_name)): deps = getattr(proxy, get_dep_fn_name)() if deps: dependencies.update(deps) dep_graph = utils.TinyDAG() for k, v in dependencies.items(): dep_graph.add_node(k) for dep in v['before']: dep_graph.add_node(dep) dep_graph.add_edge(k, dep) for service in dep_graph.walk(timeout=wait_timeout): fn = None if hasattr(self, service): proxy = getattr(self, service) cleanup_fn = getattr(proxy, cleanup_fn_name, None) if cleanup_fn: fn = functools.partial(cleanup_fn, dry_run=dry_run, status_queue=status_queue) if fn: self._pool_executor.submit(cleanup_task, dep_graph, service, fn) else: dep_graph.node_done(service) for count in utils.iterate_timeout( timeout=wait_timeout, message="Timeout waiting for cleanup to finish", wait=1): if dep_graph.is_complete(): return
def test_add_node_after_edge(self): sot = utils.TinyDAG() sot.add_node('a') sot.add_edge('a', 'b') sot.add_node('a') self.assertEqual(sot._graph['a'], set('b'))
def test_topological_sort(self): sot = utils.TinyDAG() sot.from_dict(self.test_graph) sorted_list = sot.topological_sort() self._verify_order(sot.graph, sorted_list) self.assertEqual(len(self.test_graph.keys()), len(sorted_list))
def test_from_dict(self): sot = utils.TinyDAG() sot.from_dict(self.test_graph)
def project_cleanup( self, dry_run=True, wait_timeout=120, status_queue=None, filters=None, resource_evaluation_fn=None ): """Cleanup the project resources. Cleanup all resources in all services, which provide cleanup methods. :param bool dry_run: Cleanup or only list identified resources. :param int wait_timeout: Maximum amount of time given to each service to comlete the cleanup. :param queue status_queue: a threading queue object used to get current process status. The queue contain processed resources. :param dict filters: Additional filters for the cleanup (only resources matching all filters will be deleted, if there are no other dependencies). :param resource_evaluation_fn: A callback function, which will be invoked for each resurce and must return True/False depending on whether resource need to be deleted or not. """ dependencies = {} get_dep_fn_name = '_get_cleanup_dependencies' cleanup_fn_name = '_service_cleanup' if not status_queue: status_queue = queue.Queue() for service in self.config.get_enabled_services(): if hasattr(self, service): proxy = getattr(self, service) if ( proxy and hasattr(proxy, get_dep_fn_name) and hasattr(proxy, cleanup_fn_name) ): deps = getattr(proxy, get_dep_fn_name)() if deps: dependencies.update(deps) dep_graph = utils.TinyDAG() for k, v in dependencies.items(): dep_graph.add_node(k) for dep in v['before']: dep_graph.add_node(dep) dep_graph.add_edge(k, dep) for dep in v.get('after', []): dep_graph.add_edge(dep, k) cleanup_resources = dict() for service in dep_graph.walk(timeout=wait_timeout): fn = None if hasattr(self, service): proxy = getattr(self, service) cleanup_fn = getattr(proxy, cleanup_fn_name, None) if cleanup_fn: fn = functools.partial( cleanup_fn, dry_run=dry_run, client_status_queue=status_queue, identified_resources=cleanup_resources, filters=filters, resource_evaluation_fn=resource_evaluation_fn ) if fn: self._pool_executor.submit( cleanup_task, dep_graph, service, fn ) else: dep_graph.node_done(service) for count in utils.iterate_timeout( timeout=wait_timeout, message="Timeout waiting for cleanup to finish", wait=1): if dep_graph.is_complete(): return