def test_several_updates(simple_plan): simple_plan.node['just_fail']['status'] = states.ERROR.name graph.update_graph(simple_plan) assert next(graph.wait_finish(simple_plan.graph['uid'], 10)) == { 'SKIPPED': 0, 'SUCCESS': 0, 'NOOP': 0, 'ERROR': 1, 'INPROGRESS': 0, 'PENDING': 1, 'ERROR_RETRY': 0, } simple_plan.node['echo_stuff']['status'] = states.ERROR.name graph.update_graph(simple_plan) assert next(graph.wait_finish(simple_plan.graph['uid'], 10)) == { 'SKIPPED': 0, 'SUCCESS': 0, 'NOOP': 0, 'ERROR': 2, 'INPROGRESS': 0, 'PENDING': 0, 'ERROR_RETRY': 0, }
def test_several_updates(simple): simple.node["just_fail"]["status"] = states.ERROR.name graph.update_graph(simple) assert next(graph.wait_finish(simple.graph["uid"], 10)) == { "SKIPPED": 0, "SUCCESS": 0, "NOOP": 0, "ERROR": 1, "INPROGRESS": 0, "PENDING": 1, "ERROR_RETRY": 0, } simple.node["echo_stuff"]["status"] = states.ERROR.name graph.update_graph(simple) assert next(graph.wait_finish(simple.graph["uid"], 10)) == { "SKIPPED": 0, "SUCCESS": 0, "NOOP": 0, "ERROR": 2, "INPROGRESS": 0, "PENDING": 0, "ERROR_RETRY": 0, }
def soft_stop(self, ctxt, plan_uid): with Lock(plan_uid, str(get_current_ident()), retries=20, wait=1): plan = graph.get_graph(plan_uid) for n in plan: if plan.node[n]['status'] in ( states.PENDING.name, states.PENDING_RETRY.name): plan.node[n]['status'] = states.SKIPPED.name graph.update_graph(plan)
def filter(uid, start, end): graph.reset_filtered(uid) plan = graph.get_graph(uid) errors = filters.filter(plan, start=start, end=end) if errors: raise click.ClickException('\n'.join(errors)) graph.update_graph(plan) utils.write_graph(plan) click.echo('Created {name}.png'.format(name=plan.graph['name']))
def schedule(plan_uid, dg): tasks = traverse(dg) limit_chain = limits.get_default_chain( dg, [t for t in dg if dg.node[t]['status'] == 'INPROGRESS'], tasks) execution = executor.celery_executor( dg, limit_chain, control_tasks=('fault_tolerance',)) graph.update_graph(dg) execution()
def next(self, ctxt, plan_uid): with Lock(plan_uid, str(get_current_ident()), retries=20, wait=1): log.debug('Received *next* event for %s', plan_uid) plan = graph.get_graph(plan_uid) rst = self._next(plan) for task_name in rst: self._do_scheduling(plan, task_name) graph.update_graph(plan) log.debug('Scheduled tasks %r', rst) # process tasks with tasks client return rst
def soft_stop(self, ctxt, plan_uid): with Lock(plan_uid, str(get_current_ident()), retries=20, waiter=Waiter(1)): plan = graph.get_graph(plan_uid) for n in plan: if plan.node[n]['status'] in (states.PENDING.name, states.ERROR_RETRY.name): plan.node[n]['status'] = states.SKIPPED.name graph.update_graph(plan)
def test_wait_finish(simple): for n in simple: simple.node[n]['status'] = states.SUCCESS.name graph.update_graph(simple) assert next(graph.wait_finish(simple.graph['uid'], 10)) == { 'SKIPPED': 0, 'SUCCESS': 2, 'NOOP': 0, 'ERROR': 0, 'INPROGRESS': 0, 'PENDING': 0 }
def test_wait_finish(simple): for n in simple: simple.node[n]["status"] = states.SUCCESS.name graph.update_graph(simple) assert next(graph.wait_finish(simple.graph["uid"], 10)) == { "SKIPPED": 0, "SUCCESS": 2, "NOOP": 0, "ERROR": 0, "INPROGRESS": 0, "PENDING": 0, "ERROR_RETRY": 0, }
def update_next(self, ctxt, status, errmsg): log.debug( 'Received update for TASK %s - %s %s', ctxt['task_id'], status, errmsg) plan_uid, task_name = ctxt['task_id'].rsplit(':', 1) with Lock(plan_uid, str(get_current_ident()), retries=20, wait=1): plan = graph.get_graph(plan_uid) self._do_update(plan, task_name, status, errmsg=errmsg) rst = self._next(plan) for task_name in rst: self._do_scheduling(plan, task_name) graph.update_graph(plan) log.debug('Scheduled tasks %r', rst) return rst
def next(self, ctxt, plan_uid): with Lock(plan_uid, str(get_current_ident()), retries=20, waiter=Waiter(1)): log.debug('Received *next* event for %s', plan_uid) plan = graph.get_graph(plan_uid) if len(plan) == 0: raise ValueError('Plan {} is empty'.format(plan_uid)) rst = self._next(plan) for task_name in rst: self._do_scheduling(plan, task_name) graph.update_graph(plan) log.debug('Scheduled tasks %r', rst) # process tasks with tasks client return rst
def update_next(self, ctxt, status, errmsg): log.debug('Received update for TASK %s - %s %s', ctxt['task_id'], status, errmsg) plan_uid, task_name = ctxt['task_id'].rsplit(':', 1) with Lock(plan_uid, str(get_current_ident()), retries=20, waiter=Waiter(1)): plan = graph.get_graph(plan_uid) self._do_update(plan, task_name, status, errmsg=errmsg) rst = self._next(plan) for task_name in rst: self._do_scheduling(plan, task_name) graph.update_graph(plan) log.debug('Scheduled tasks %r', rst) return rst
def next(self, ctxt, plan_uid): with Lock( plan_uid, str(get_current_ident()), retries=20, waiter=Waiter(1) ): log.debug('Received *next* event for %s', plan_uid) plan = graph.get_graph(plan_uid) if len(plan) == 0: raise ValueError('Plan {} is empty'.format(plan_uid)) rst = self._next(plan) for task_name in rst: self._do_scheduling(plan, task_name) graph.update_graph(plan) log.debug('Scheduled tasks %r', rst) # process tasks with tasks client return rst
def simple_plan_retries(simple_plan): simple_plan.node['just_fail']['retry'] = 1 graph.update_graph(simple_plan, force=True) return simple_plan
def timeout_plan(simple_plan): simple_plan.node['echo_stuff']['timeout'] = 1 graph.update_graph(simple_plan, force=True) return simple_plan
def soft_stop(plan_uid): dg = graph.get_graph(plan_uid) for n in dg: if dg.node[n]['status'] == 'PENDING': dg.node[n]['status'] = 'SKIPPED' graph.update_graph(dg)