def reset_state(wi: RunningTask, task_data: RunningTaskData) -> RunningTask: # reset the received messages wi.received_messages = task_data.received_messages # type: ignore # move the fsm into the last known state wi.machine.set_state(task_data.current_state_name) # import state of the current step wi.current_state.import_state(task_data.current_state_snapshot) # reset times wi.task_started_at = task_data.task_started_at wi.step_started_at = task_data.step_started_at # ignore all messages that would be emitted wi.move_to_next_state() return wi
def test_complete_workflow( workflow_instance: Tuple[RunningTask, Subscriber, Subscriber, Dict[str, List[Subscriber]]] ) -> None: init, s1, s2, subscriptions = workflow_instance # start new workflow instance wi, events = RunningTask.empty(init.descriptor, lambda: subscriptions) assert wi.current_step.name == "start" assert len(events) == 2 events = wi.handle_done(ActionDone("start", wi.id, "start", s1.id)) assert wi.current_step.name == "wait" assert len(events) == 0 handled, events = wi.handle_event(Event("godot", {"a": 2})) assert wi.current_step.name == "wait" assert handled is False assert len(events) == 0 handled, events = wi.handle_event(Event("godot", {"a": 1, "d": "test"})) assert wi.current_step.name == "collect" assert handled is True assert len(events) == 2 # event from EmitEvent and action from PerformAction events = wi.handle_done(ActionDone("start", wi.id, "start", s1.id)) # assert wi.current_step.name == "collect" assert len(events) == 0 events = wi.handle_done(ActionDone("collect", wi.id, "collect", s1.id)) assert wi.current_step.name == "collect" assert len(events) == 0 events = wi.handle_done(ActionDone("collect", wi.id, "collect", s2.id)) assert wi.current_step.name == "done" assert len(events) == 1 events = wi.handle_done(ActionDone("done", wi.id, "done", s1.id)) assert len(events) == 0 assert wi.current_step.name == "done" events = wi.handle_done(ActionDone("done", wi.id, "done", s2.id)) assert len(events) == 1 assert wi.is_active is False
async def start_interrupted_tasks(self) -> List[RunningTask]: descriptions = {w.id: w for w in self.task_descriptions} def reset_state(wi: RunningTask, task_data: RunningTaskData) -> RunningTask: # reset the received messages wi.received_messages = task_data.received_messages # type: ignore # move the fsm into the last known state wi.machine.set_state(task_data.current_state_name) # import state of the current step wi.current_state.import_state(task_data.current_state_snapshot) # reset times wi.task_started_at = task_data.task_started_at wi.step_started_at = task_data.step_started_at # ignore all messages that would be emitted wi.move_to_next_state() return wi instances: List[RunningTask] = [] async for data in self.running_task_db.all(): descriptor = descriptions.get(data.task_descriptor_id) if descriptor: # we have captured the timestamp when the task has been started updated = self.evaluate_task_definition(descriptor, now=utc_str(data.task_started_at)) rt = RunningTask(data.id, updated, self.subscription_handler.subscribers_by_event) instance = reset_state(rt, data) if isinstance(instance.current_step.action, RestartAgainStepAction): log.info(f"Restart interrupted action: {instance.current_step.action}") await self.execute_task_commands(instance, instance.current_state.commands_to_execute()) instances.append(instance) else: log.warning(f"No task description with this id found: {data.task_descriptor_id}. Remove instance data.") await self.running_task_db.delete(data.id) return instances
def workflow_instance( test_workflow: Workflow, ) -> Tuple[RunningTask, Subscriber, Subscriber, Dict[str, List[Subscriber]]]: td = timedelta(seconds=100) sub1 = Subscription("start_collect", True, td) sub2 = Subscription("collect", True, td) sub3 = Subscription("collect_done", True, td) s1 = Subscriber.from_list("s1", [sub1, sub2, sub3]) s2 = Subscriber.from_list("s2", [sub2, sub3]) subscriptions = { "start_collect": [s1], "collect": [s1, s2], "collect_done": [s1, s2] } w, _ = RunningTask.empty(test_workflow, lambda: subscriptions) w.received_messages = [ Action("start_collect", w.id, "start"), ActionDone("start_collect", w.id, "start", s1.id), ActionDone("start_collect", w.id, "start", s2.id), Event("godot", { "a": 1, "b": 2 }), Action("collect", w.id, "collect"), ActionDone("collect", w.id, "collect", s1.id), ] w.move_to_next_state() return w, s1, s2, subscriptions
async def execute_task_commands( self, wi: RunningTask, commands: Sequence[TaskCommand], origin_message: Optional[Message] = None) -> None: async def execute_commands() -> None: # execute and collect all task commands results: Dict[TaskCommand, Any] = {} for command in commands: if isinstance(command, SendMessage): await self.message_bus.emit(command.message) results[command] = None elif isinstance(command, ExecuteOnCLI): # TODO: instead of executing it in process, we should do an http call here to a worker core. ctx = CLIContext({ **command.env, **wi.descriptor.environment }) result = await self.cli.execute_cli_command( command.command, stream.list, ctx) results[command] = result else: raise AttributeError( f"Does not understand this command: {wi.descriptor.name}: {command}" ) # The descriptor might be removed in the mean time. If this is the case stop execution. if wi.descriptor_alive: active_before_result = wi.is_active # before we move on, we need to store the current state of the task (or delete if it is done) await self.store_running_task_state(wi, origin_message) # inform the task about the result, which might trigger new tasks to execute new_commands = wi.handle_command_results(results) if new_commands: # note: recursion depth is defined by the number of steps in a job description and should be safe. await self.execute_task_commands(wi, new_commands) elif active_before_result and not wi.is_active: # if this was the last result the task was waiting for, delete the task await self.store_running_task_state(wi, origin_message) async def execute_in_order(task: Task) -> None: # type: ignore # pypy # make sure the last execution is finished, before the new execution starts await task await execute_commands() # start execution of commands in own task to not block the task handler # note: the task is awaited finally in the timeout handler or context handler shutdown wi.update_task = asyncio.create_task( execute_in_order(wi.update_task) if wi. update_task else execute_commands())
async def delete_running_task(self, task: RunningTask) -> None: # send analytics event await self.event_sender.core_event( CoreEvent.TaskCompleted, { "task_descriptor_id": task.descriptor.id, "task_descriptor_name": task.descriptor.name, "kind": type(task.descriptor).__name__, "success": not task.is_error, }, duration=(utc() - task.task_started_at).total_seconds(), step_count=len(task.descriptor.steps), ) task.descriptor_alive = False # remove tasks from list of running tasks self.tasks.pop(task.id, None) if task.update_task and not task.update_task.done(): task.update_task.cancel() # mark step as error task.end() # remove from database with suppress(Exception): await self.running_task_db.delete(task.id)
async def start_task_directly(self, desc: TaskDescription, reason: str) -> RunningTask: updated = self.evaluate_task_definition(desc) task, commands = RunningTask.empty(updated, self.subscription_handler.subscribers_by_event) log.info(f"Start new task: {updated.name} with id {task.id}") # store initial state in database await self.running_task_db.insert(task) self.tasks[task.id] = task await self.execute_task_commands(task, commands) await self.event_sender.core_event( CoreEvent.TaskStarted, { "reason": reason, "task_descriptor_id": updated.id, "task_descriptor_name": updated.name, "kind": type(updated).__name__, }, ) return task