def test_resume_failed_task_and_successful_task(self, workflow_context, thread_executor): node = workflow_context.model.node.get_by_name( tests_mock.models.DEPENDENCY_NODE_NAME) node.attributes['invocations'] = models.Attribute.wrap( 'invocations', 0) self._create_interface(workflow_context, node, mock_pass_first_task_only) ctx = self._prepare_execution_and_get_workflow_ctx( workflow_context.model, workflow_context.resource, workflow_context.model.service.list()[0], mock_parallel_tasks_workflow, thread_executor, inputs={ 'retry_interval': 1, 'max_attempts': 2, 'number_of_tasks': 2 }) eng = engine.Engine(thread_executor) wf_thread = Thread(target=eng.execute, kwargs=dict(ctx=ctx)) wf_thread.setDaemon(True) wf_thread.start() if custom_events['execution_failed'].wait(60) is False: raise TimeoutError("Execution did not end") tasks = workflow_context.model.task.list(filters={'_stub_type': None}) node = workflow_context.model.node.refresh(node) assert node.attributes['invocations'].value == 3 failed_task = [t for t in tasks if t.status == t.FAILED][0] # First task passes assert any(task.status == task.FAILED for task in tasks) assert failed_task.attempts_count == 2 # Second task fails assert any(task.status == task.SUCCESS for task in tasks) assert ctx.execution.status in ctx.execution.FAILED custom_events['is_resumed'].set() new_thread_executor = thread.ThreadExecutor() try: new_engine = engine.Engine(new_thread_executor) new_engine.execute(ctx, resuming=True, retry_failed=True) finally: new_thread_executor.close() # Wait for it to finish and assert changes. node = workflow_context.model.node.refresh(node) assert failed_task.attempts_count == 1 assert node.attributes['invocations'].value == 4 assert all(task.status == task.SUCCESS for task in tasks) assert ctx.execution.status == ctx.execution.SUCCEEDED
def test_resume_failed_task_and_successful_task(self, workflow_context, thread_executor): node = workflow_context.model.node.get_by_name(tests_mock.models.DEPENDENCY_NODE_NAME) node.attributes['invocations'] = models.Attribute.wrap('invocations', 0) self._create_interface(workflow_context, node, mock_pass_first_task_only) wf_runner = self._create_initial_workflow_runner( workflow_context, mock_parallel_tasks_workflow, thread_executor, inputs={'retry_interval': 1, 'max_attempts': 2, 'number_of_tasks': 2} ) wf_thread = Thread(target=wf_runner.execute) wf_thread.setDaemon(True) wf_thread.start() if custom_events['execution_failed'].wait(60) is False: raise TimeoutError("Execution did not end") tasks = workflow_context.model.task.list(filters={'_stub_type': None}) node = workflow_context.model.node.refresh(node) assert node.attributes['invocations'].value == 3 failed_task = [t for t in tasks if t.status == t.FAILED][0] # First task passes assert any(task.status == task.FAILED for task in tasks) assert failed_task.attempts_count == 2 # Second task fails assert any(task.status == task.SUCCESS for task in tasks) assert wf_runner.execution.status in wf_runner.execution.FAILED custom_events['is_resumed'].set() new_thread_executor = thread.ThreadExecutor() try: new_wf_runner = WorkflowRunner( service_id=wf_runner.service.id, retry_failed_tasks=True, inputs={}, model_storage=workflow_context.model, resource_storage=workflow_context.resource, plugin_manager=None, execution_id=wf_runner.execution.id, executor=new_thread_executor) new_wf_runner.execute() finally: new_thread_executor.close() # Wait for it to finish and assert changes. node = workflow_context.model.node.refresh(node) assert failed_task.attempts_count == 1 assert node.attributes['invocations'].value == 4 assert all(task.status == task.SUCCESS for task in tasks) assert wf_runner.execution.status == wf_runner.execution.SUCCEEDED
def test_resume_failed_task(self, workflow_context, thread_executor): node = workflow_context.model.node.get_by_name( tests_mock.models.DEPENDENCY_NODE_NAME) node.attributes['invocations'] = models.Attribute.wrap( 'invocations', 0) self._create_interface(workflow_context, node, mock_failed_before_resuming) wf_runner = self._create_initial_workflow_runner( workflow_context, mock_single_task_workflow, thread_executor) wf_thread = Thread(target=wf_runner.execute) wf_thread.setDaemon(True) wf_thread.start() self._wait_for_active_and_cancel(wf_runner) task = workflow_context.model.task.list( filters={'_stub_type': None})[0] assert node.attributes['invocations'].value == 2 assert task.status == task.STARTED assert wf_runner.execution.status in (wf_runner.execution.CANCELLED, wf_runner.execution.CANCELLING) custom_events['is_resumed'].set() assert node.attributes['invocations'].value == 2 # Create a new workflow runner, with an existing execution id. This would cause # the old execution to restart. new_thread_executor = thread.ThreadExecutor() try: new_wf_runner = WorkflowRunner( service_id=wf_runner.service.id, inputs={}, model_storage=workflow_context.model, resource_storage=workflow_context.resource, plugin_manager=None, execution_id=wf_runner.execution.id, executor=new_thread_executor) new_wf_runner.execute() finally: new_thread_executor.close() # Wait for it to finish and assert changes. assert node.attributes['invocations'].value == task.max_attempts - 1 assert task.status == task.SUCCESS assert wf_runner.execution.status == wf_runner.execution.SUCCEEDED
def test_resume_failed_task(self, workflow_context, thread_executor): node = workflow_context.model.node.get_by_name( tests_mock.models.DEPENDENCY_NODE_NAME) node.attributes['invocations'] = models.Attribute.wrap( 'invocations', 0) self._create_interface(workflow_context, node, mock_failed_before_resuming) ctx = self._prepare_execution_and_get_workflow_ctx( workflow_context.model, workflow_context.resource, workflow_context.model.service.list()[0], mock_parallel_tasks_workflow, thread_executor) eng = engine.Engine(thread_executor) wf_thread = Thread(target=eng.execute, kwargs=dict(ctx=ctx)) wf_thread.setDaemon(True) wf_thread.start() self._cancel_active_execution(eng, ctx) node = workflow_context.model.node.refresh(node) task = workflow_context.model.task.list( filters={'_stub_type': None})[0] assert node.attributes['invocations'].value == 2 assert task.status == task.STARTED assert ctx.execution.status in (ctx.execution.CANCELLED, ctx.execution.CANCELLING) custom_events['is_resumed'].set() assert node.attributes['invocations'].value == 2 # Create a new workflow runner, with an existing execution id. This would cause # the old execution to restart. new_thread_executor = thread.ThreadExecutor() try: new_engine = engine.Engine(new_thread_executor) new_engine.execute(ctx, resuming=True) finally: new_thread_executor.close() # Wait for it to finish and assert changes. node = workflow_context.model.node.refresh(node) assert node.attributes['invocations'].value == task.max_attempts - 1 assert task.status == task.SUCCESS assert ctx.execution.status == ctx.execution.SUCCEEDED
def test_resume_started_task(self, workflow_context, thread_executor): node = workflow_context.model.node.get_by_name(tests_mock.models.DEPENDENCY_NODE_NAME) node.attributes['invocations'] = models.Attribute.wrap('invocations', 0) self._create_interface(workflow_context, node, mock_stuck_task) wf_runner = self._create_initial_workflow_runner( workflow_context, mock_parallel_tasks_workflow, thread_executor, inputs={'number_of_tasks': 1}) wf_thread = Thread(target=wf_runner.execute) wf_thread.daemon = True wf_thread.start() self._wait_for_active_and_cancel(wf_runner) node = workflow_context.model.node.refresh(node) task = workflow_context.model.task.list(filters={'_stub_type': None})[0] assert node.attributes['invocations'].value == 1 assert task.status == task.STARTED assert wf_runner.execution.status in (wf_runner.execution.CANCELLED, wf_runner.execution.CANCELLING) custom_events['is_resumed'].set() new_thread_executor = thread.ThreadExecutor() try: new_wf_runner = WorkflowRunner( service_id=wf_runner.service.id, inputs={}, model_storage=workflow_context.model, resource_storage=workflow_context.resource, plugin_manager=None, execution_id=wf_runner.execution.id, executor=new_thread_executor) new_wf_runner.execute() finally: new_thread_executor.close() # Wait for it to finish and assert changes. node = workflow_context.model.node.refresh(node) assert node.attributes['invocations'].value == 2 assert task.status == task.SUCCESS assert wf_runner.execution.status == wf_runner.execution.SUCCEEDED
def thread_executor(): result = thread.ThreadExecutor() try: yield result finally: result.close()
def thread_executor(): ex = thread.ThreadExecutor() try: yield ex finally: ex.close()