Beispiel #1
0
    def test_resume_failed_task_and_successful_task(self, workflow_context, thread_executor):
        node = workflow_context.model.node.get_by_name(tests_mock.models.DEPENDENCY_NODE_NAME)
        node.attributes['invocations'] = models.Attribute.wrap('invocations', 0)
        self._create_interface(workflow_context, node, mock_pass_first_task_only)

        wf_runner = self._create_initial_workflow_runner(
            workflow_context,
            mock_parallel_tasks_workflow,
            thread_executor,
            inputs={'retry_interval': 1, 'max_attempts': 2, 'number_of_tasks': 2}
        )
        wf_thread = Thread(target=wf_runner.execute)
        wf_thread.setDaemon(True)
        wf_thread.start()

        if custom_events['execution_failed'].wait(60) is False:
            raise TimeoutError("Execution did not end")

        tasks = workflow_context.model.task.list(filters={'_stub_type': None})
        node = workflow_context.model.node.refresh(node)
        assert node.attributes['invocations'].value == 3
        failed_task = [t for t in tasks if t.status == t.FAILED][0]

        # First task passes
        assert any(task.status == task.FAILED for task in tasks)
        assert failed_task.attempts_count == 2
        # Second task fails
        assert any(task.status == task.SUCCESS for task in tasks)
        assert wf_runner.execution.status in wf_runner.execution.FAILED

        custom_events['is_resumed'].set()
        new_thread_executor = thread.ThreadExecutor()
        try:
            new_wf_runner = WorkflowRunner(
                service_id=wf_runner.service.id,
                retry_failed_tasks=True,
                inputs={},
                model_storage=workflow_context.model,
                resource_storage=workflow_context.resource,
                plugin_manager=None,
                execution_id=wf_runner.execution.id,
                executor=new_thread_executor)

            new_wf_runner.execute()
        finally:
            new_thread_executor.close()

        # Wait for it to finish and assert changes.
        node = workflow_context.model.node.refresh(node)
        assert failed_task.attempts_count == 1
        assert node.attributes['invocations'].value == 4
        assert all(task.status == task.SUCCESS for task in tasks)
        assert wf_runner.execution.status == wf_runner.execution.SUCCEEDED
Beispiel #2
0
def resume_execution(execution_id, model_storage, resource_storage,
                     plugin_manager, logger):
    """
    Resume the specified execution
    """
    body = request.json
    execution = model_storage.execution.get(execution_id)
    if execution.status != execution.status.CANCELLED:
        return "cancelled execution cannot be resumed", 400
    executor = DryExecutor(
    ) if 'executor' in body and body['executor'] == 'dry' else None
    retry_failed_tasks = body['retry_failed_tasks'] \
        if 'retry_failed_tasks' in body else False

    runner = WorkflowRunner(model_storage,
                            resource_storage,
                            plugin_manager,
                            execution_id=execution_id,
                            executor=executor,
                            retry_failed_tasks=retry_failed_tasks)

    tname = '{}_{}_{}'.format(execution.service.name, execution.workflow_name,
                              runner.execution_id)
    thread = threading.ExceptionThread(target=runner.execute,
                                       name=tname,
                                       daemon=True)
    thread.start()
    execution_state[str(runner.execution_id)] = [runner, thread]
    return jsonify({"id": runner.execution_id}), 202
Beispiel #3
0
def start_execution(service_id, workflow_name, model_storage, resource_storage,
                    plugin_manager, logger):
    """
    Start an execution for the specified service
    """
    body = request.json
    executor = DryExecutor(
    ) if 'executor' in body and body['executor'] == 'dry' else None

    inputs = body['inputs'] if 'inputs' in body else None
    task_max_attempts = (body['task_max_attempts']
                         if 'task_max_attempts' in body else 30)
    task_retry_interval = (body['task_retry_interval']
                           if 'task_retry_interval' in body else 30)

    runner = WorkflowRunner(model_storage,
                            resource_storage,
                            plugin_manager,
                            service_id=service_id,
                            workflow_name=workflow_name,
                            inputs=inputs,
                            executor=executor,
                            task_max_attempts=task_max_attempts,
                            task_retry_interval=task_retry_interval)

    service = model_storage.service.get(service_id)
    tname = '{}_{}_{}'.format(service.name, workflow_name, runner.execution_id)
    thread = threading.ExceptionThread(target=runner.execute, name=tname)
    thread.start()
    execution_state[str(runner.execution_id)] = [runner, thread]
    return jsonify({"id": runner.execution_id}), 202
Beispiel #4
0
def _create_workflow_runner(request, workflow_name, inputs=None, executor=None,
                            task_max_attempts=None, task_retry_interval=None):
    # helper method for instantiating a workflow runner
    service_id = request.getfixturevalue('service').id
    model = request.getfixturevalue('model')
    resource = request.getfixturevalue('resource')
    plugin_manager = request.getfixturevalue('plugin_manager')

    # task configuration parameters can't be set to None, therefore only
    # passing those if they've been set by the test
    task_configuration_kwargs = dict()
    if task_max_attempts is not None:
        task_configuration_kwargs['task_max_attempts'] = task_max_attempts
    if task_retry_interval is not None:
        task_configuration_kwargs['task_retry_interval'] = task_retry_interval

    return WorkflowRunner(
        workflow_name=workflow_name,
        service_id=service_id,
        inputs=inputs or {},
        executor=executor,
        model_storage=model,
        resource_storage=resource,
        plugin_manager=plugin_manager,
        **task_configuration_kwargs)
Beispiel #5
0
    def _create_initial_workflow_runner(self,
                                        workflow_context,
                                        workflow,
                                        executor,
                                        inputs=None):

        service = workflow_context.service
        service.workflows[
            'custom_workflow'] = tests_mock.models.create_operation(
                'custom_workflow',
                operation_kwargs={
                    'function':
                    '{0}.{1}'.format(__name__, workflow.__name__),
                    'inputs':
                    dict((k, models.Input.wrap(k, v))
                         for k, v in (inputs or {}).items())
                })
        workflow_context.model.service.update(service)

        wf_runner = WorkflowRunner(service_id=workflow_context.service.id,
                                   inputs=inputs or {},
                                   model_storage=workflow_context.model,
                                   resource_storage=workflow_context.resource,
                                   plugin_manager=None,
                                   workflow_name='custom_workflow',
                                   executor=executor)
        return wf_runner
Beispiel #6
0
    def test_resume_failed_task(self, workflow_context, thread_executor):
        node = workflow_context.model.node.get_by_name(
            tests_mock.models.DEPENDENCY_NODE_NAME)
        node.attributes['invocations'] = models.Attribute.wrap(
            'invocations', 0)
        self._create_interface(workflow_context, node,
                               mock_failed_before_resuming)

        wf_runner = self._create_initial_workflow_runner(
            workflow_context, mock_single_task_workflow, thread_executor)
        wf_thread = Thread(target=wf_runner.execute)
        wf_thread.setDaemon(True)
        wf_thread.start()

        self._wait_for_active_and_cancel(wf_runner)

        task = workflow_context.model.task.list(
            filters={'_stub_type': None})[0]
        assert node.attributes['invocations'].value == 2
        assert task.status == task.STARTED
        assert wf_runner.execution.status in (wf_runner.execution.CANCELLED,
                                              wf_runner.execution.CANCELLING)

        custom_events['is_resumed'].set()
        assert node.attributes['invocations'].value == 2

        # Create a new workflow runner, with an existing execution id. This would cause
        # the old execution to restart.
        new_thread_executor = thread.ThreadExecutor()
        try:
            new_wf_runner = WorkflowRunner(
                service_id=wf_runner.service.id,
                inputs={},
                model_storage=workflow_context.model,
                resource_storage=workflow_context.resource,
                plugin_manager=None,
                execution_id=wf_runner.execution.id,
                executor=new_thread_executor)

            new_wf_runner.execute()
        finally:
            new_thread_executor.close()

        # Wait for it to finish and assert changes.
        assert node.attributes['invocations'].value == task.max_attempts - 1
        assert task.status == task.SUCCESS
        assert wf_runner.execution.status == wf_runner.execution.SUCCEEDED
Beispiel #7
0
    def test_resume_started_task(self, workflow_context, thread_executor):
        node = workflow_context.model.node.get_by_name(tests_mock.models.DEPENDENCY_NODE_NAME)
        node.attributes['invocations'] = models.Attribute.wrap('invocations', 0)
        self._create_interface(workflow_context, node, mock_stuck_task)

        wf_runner = self._create_initial_workflow_runner(
            workflow_context, mock_parallel_tasks_workflow, thread_executor,
            inputs={'number_of_tasks': 1})

        wf_thread = Thread(target=wf_runner.execute)
        wf_thread.daemon = True
        wf_thread.start()

        self._wait_for_active_and_cancel(wf_runner)
        node = workflow_context.model.node.refresh(node)
        task = workflow_context.model.task.list(filters={'_stub_type': None})[0]
        assert node.attributes['invocations'].value == 1
        assert task.status == task.STARTED
        assert wf_runner.execution.status in (wf_runner.execution.CANCELLED,
                                              wf_runner.execution.CANCELLING)
        custom_events['is_resumed'].set()

        new_thread_executor = thread.ThreadExecutor()
        try:
            new_wf_runner = WorkflowRunner(
                service_id=wf_runner.service.id,
                inputs={},
                model_storage=workflow_context.model,
                resource_storage=workflow_context.resource,
                plugin_manager=None,
                execution_id=wf_runner.execution.id,
                executor=new_thread_executor)

            new_wf_runner.execute()
        finally:
            new_thread_executor.close()

        # Wait for it to finish and assert changes.
        node = workflow_context.model.node.refresh(node)
        assert node.attributes['invocations'].value == 2
        assert task.status == task.SUCCESS
        assert wf_runner.execution.status == wf_runner.execution.SUCCEEDED
Beispiel #8
0
    def test_resume_workflow(self, workflow_context, thread_executor):
        node = workflow_context.model.node.get_by_name(tests_mock.models.DEPENDENCY_NODE_NAME)
        node.attributes['invocations'] = models.Attribute.wrap('invocations', 0)
        self._create_interface(workflow_context, node, mock_pass_first_task_only)

        wf_runner = self._create_initial_workflow_runner(
            workflow_context, mock_parallel_tasks_workflow, thread_executor,
            inputs={'number_of_tasks': 2})

        wf_thread = Thread(target=wf_runner.execute)
        wf_thread.daemon = True
        wf_thread.start()

        # Wait for the execution to start
        self._wait_for_active_and_cancel(wf_runner)
        node = workflow_context.model.node.refresh(node)

        tasks = workflow_context.model.task.list(filters={'_stub_type': None})
        assert any(task.status == task.SUCCESS for task in tasks)
        assert any(task.status == task.RETRYING for task in tasks)
        custom_events['is_resumed'].set()
        assert any(task.status == task.RETRYING for task in tasks)

        # Create a new workflow runner, with an existing execution id. This would cause
        # the old execution to restart.
        new_wf_runner = WorkflowRunner(
            service_id=wf_runner.service.id,
            inputs={},
            model_storage=workflow_context.model,
            resource_storage=workflow_context.resource,
            plugin_manager=None,
            execution_id=wf_runner.execution.id,
            executor=thread_executor)

        new_wf_runner.execute()

        # Wait for it to finish and assert changes.
        node = workflow_context.model.node.refresh(node)
        assert all(task.status == task.SUCCESS for task in tasks)
        assert node.attributes['invocations'].value == 3
        assert wf_runner.execution.status == wf_runner.execution.SUCCEEDED
Beispiel #9
0
    def test_resume_workflow(self, workflow_context, executor):
        node = workflow_context.model.node.get_by_name(
            tests_mock.models.DEPENDENCY_NODE_NAME)
        node.attributes['invocations'] = models.Attribute.wrap(
            'invocations', 0)
        self._create_interface(workflow_context, node, mock_resuming_task)

        service = workflow_context.service
        service.workflows[
            'custom_workflow'] = tests_mock.models.create_operation(
                'custom_workflow',
                operation_kwargs={
                    'function': '{0}.{1}'.format(__name__,
                                                 mock_workflow.__name__)
                })
        workflow_context.model.service.update(service)

        wf_runner = WorkflowRunner(service_id=workflow_context.service.id,
                                   inputs={},
                                   model_storage=workflow_context.model,
                                   resource_storage=workflow_context.resource,
                                   plugin_manager=None,
                                   workflow_name='custom_workflow',
                                   executor=executor)
        wf_thread = Thread(target=wf_runner.execute)
        wf_thread.daemon = True
        wf_thread.start()

        # Wait for the execution to start
        if events['is_active'].wait(5) is False:
            raise TimeoutError("is_active wasn't set to True")
        wf_runner.cancel()

        if events['execution_ended'].wait(60) is False:
            raise TimeoutError("Execution did not end")

        tasks = workflow_context.model.task.list(filters={'_stub_type': None})
        assert any(task.status == task.SUCCESS for task in tasks)
        assert any(task.status in (task.FAILED, task.RETRYING)
                   for task in tasks)
        events['is_resumed'].set()
        assert any(task.status in (task.FAILED, task.RETRYING)
                   for task in tasks)

        # Create a new workflow runner, with an existing execution id. This would cause
        # the old execution to restart.
        new_wf_runner = WorkflowRunner(
            service_id=wf_runner.service.id,
            inputs={},
            model_storage=workflow_context.model,
            resource_storage=workflow_context.resource,
            plugin_manager=None,
            execution_id=wf_runner.execution.id,
            executor=executor)

        new_wf_runner.execute()

        # Wait for it to finish and assert changes.
        assert all(task.status == task.SUCCESS for task in tasks)
        assert node.attributes['invocations'].value == 3
        assert wf_runner.execution.status == wf_runner.execution.SUCCEEDED