Пример #1
0
    def _request_task_rerun(self, task_id, route, reset_items=False):
        task = self.workflow_state.get_task(task_id, route)
        task_ctx = json_util.deepcopy(task['ctxs']['in'])
        task_prev = json_util.deepcopy(task['prev'])
        task_spec = self.spec.tasks.get_task(task_id)

        # Reset terminal status for the rerunnable candidate.
        task.pop('term', None)
        task.pop('ignore', None)

        # Reset the list of errors for the task.
        for e in [e for e in self.errors if e.get('task_id', None) == task_id]:
            self.errors.remove(e)

        # If task has items, then use existing staged task entry and reset failed items.
        if task_spec.has_items():
            staged_task = self.workflow_state.get_staged_task(task_id, route)
            for item in staged_task.get('items', []):
                if reset_items or item['status'] in statuses.ABENDED_STATUSES:
                    item['status'] = statuses.UNSET
        # Otherwise, add a new task state entry and stage task to be returned in get_next_tasks.
        else:
            self.add_task_state(task_id, route, in_ctx_idxs=task_ctx, prev=task_prev)
            self.workflow_state.add_staged_task(task_id, route, ctxs=task_ctx, prev=task_prev)

        # Reset terminal status for the task branch which will also be rerun.
        for _, next_task in self.workflow_state.get_task_sequence(task_id, route):
            next_task.pop('term', None)
Пример #2
0
    def test_task_status_of_tasks_along_split(self):
        task_pointers = {
            't1__r0': 0,
            't2__r0': 1,
            't3__r0': 2,
            't4__r1': 3,
            't4__r2': 4,
            't5__r1': 5,
            't5__r2': 6,
            't6__r1': 7,
            't6__r2': 8
        }

        task_flow_entries = [
            {'id': 't1', 'route': 0, 'status': statuses.SUCCEEDED},
            {'id': 't2', 'route': 0, 'status': statuses.SUCCEEDED},
            {'id': 't3', 'route': 0, 'status': statuses.SUCCEEDED},
            {'id': 't4', 'route': 1, 'status': statuses.SUCCEEDED},
            {'id': 't4', 'route': 2, 'status': statuses.FAILED},
            {'id': 't5', 'route': 1, 'status': statuses.SUCCEEDED},
            {'id': 't5', 'route': 2, 'status': statuses.FAILED},
            {'id': 't6', 'route': 1, 'status': statuses.SUCCEEDED},
            {'id': 't6', 'route': 2, 'status': statuses.FAILED},
        ]

        routes = [
            [],
            ['t2__t0'],
            ['t3__t0'],
        ]

        context = {
            '__state': {
                'tasks': task_pointers,
                'sequence': task_flow_entries,
                'routes': routes
            }
        }

        # Check the task statuses along route 1.
        current_ctx = json_util.deepcopy(context)
        current_ctx['__current_task'] = {'id': 't6', 'route': 1}
        self.assertEqual(funcs.task_status_(current_ctx, 't1'), statuses.SUCCEEDED)
        self.assertEqual(funcs.task_status_(current_ctx, 't2'), statuses.SUCCEEDED)
        self.assertEqual(funcs.task_status_(current_ctx, 't3'), statuses.SUCCEEDED)
        self.assertEqual(funcs.task_status_(current_ctx, 't4'), statuses.SUCCEEDED)
        self.assertEqual(funcs.task_status_(current_ctx, 't5'), statuses.SUCCEEDED)
        self.assertEqual(funcs.task_status_(current_ctx, 't6'), statuses.SUCCEEDED)

        # Check the task statuses along route 2.
        current_ctx = json_util.deepcopy(context)
        current_ctx['__current_task'] = {'id': 't6', 'route': 2}
        self.assertEqual(funcs.task_status_(current_ctx, 't1'), statuses.SUCCEEDED)
        self.assertEqual(funcs.task_status_(current_ctx, 't2'), statuses.SUCCEEDED)
        self.assertEqual(funcs.task_status_(current_ctx, 't3'), statuses.SUCCEEDED)
        self.assertEqual(funcs.task_status_(current_ctx, 't4'), statuses.FAILED)
        self.assertEqual(funcs.task_status_(current_ctx, 't5'), statuses.FAILED)
        self.assertEqual(funcs.task_status_(current_ctx, 't6'), statuses.FAILED)
Пример #3
0
    def test_task_status_of_tasks_along_split(self):
        task_pointers = {
            "t1__r0": 0,
            "t2__r0": 1,
            "t3__r0": 2,
            "t4__r1": 3,
            "t4__r2": 4,
            "t5__r1": 5,
            "t5__r2": 6,
            "t6__r1": 7,
            "t6__r2": 8,
        }

        task_flow_entries = [
            {"id": "t1", "route": 0, "status": statuses.SUCCEEDED},
            {"id": "t2", "route": 0, "status": statuses.SUCCEEDED},
            {"id": "t3", "route": 0, "status": statuses.SUCCEEDED},
            {"id": "t4", "route": 1, "status": statuses.SUCCEEDED},
            {"id": "t4", "route": 2, "status": statuses.FAILED},
            {"id": "t5", "route": 1, "status": statuses.SUCCEEDED},
            {"id": "t5", "route": 2, "status": statuses.FAILED},
            {"id": "t6", "route": 1, "status": statuses.SUCCEEDED},
            {"id": "t6", "route": 2, "status": statuses.FAILED},
        ]

        routes = [
            [],
            ["t2__t0"],
            ["t3__t0"],
        ]

        context = {
            "__state": {"tasks": task_pointers, "sequence": task_flow_entries, "routes": routes}
        }

        # Check the task statuses along route 1.
        current_ctx = json_util.deepcopy(context)
        current_ctx["__current_task"] = {"id": "t6", "route": 1}
        self.assertEqual(funcs.task_status_(current_ctx, "t1"), statuses.SUCCEEDED)
        self.assertEqual(funcs.task_status_(current_ctx, "t2"), statuses.SUCCEEDED)
        self.assertEqual(funcs.task_status_(current_ctx, "t3"), statuses.SUCCEEDED)
        self.assertEqual(funcs.task_status_(current_ctx, "t4"), statuses.SUCCEEDED)
        self.assertEqual(funcs.task_status_(current_ctx, "t5"), statuses.SUCCEEDED)
        self.assertEqual(funcs.task_status_(current_ctx, "t6"), statuses.SUCCEEDED)

        # Check the task statuses along route 2.
        current_ctx = json_util.deepcopy(context)
        current_ctx["__current_task"] = {"id": "t6", "route": 2}
        self.assertEqual(funcs.task_status_(current_ctx, "t1"), statuses.SUCCEEDED)
        self.assertEqual(funcs.task_status_(current_ctx, "t2"), statuses.SUCCEEDED)
        self.assertEqual(funcs.task_status_(current_ctx, "t3"), statuses.SUCCEEDED)
        self.assertEqual(funcs.task_status_(current_ctx, "t4"), statuses.FAILED)
        self.assertEqual(funcs.task_status_(current_ctx, "t5"), statuses.FAILED)
        self.assertEqual(funcs.task_status_(current_ctx, "t6"), statuses.FAILED)
Пример #4
0
 def serialize(self):
     return {
         'spec': self.spec.serialize(),
         'graph': self.graph.serialize(),
         'input': self.get_workflow_input(),
         'context': self.get_workflow_parent_context(),
         'state': self.workflow_state.serialize(),
         'log': json_util.deepcopy(self.log),
         'errors': json_util.deepcopy(self.errors),
         'output': self.get_workflow_output()
     }
Пример #5
0
    def setup_retry_in_task_state(self, task_state_entry, in_ctx_idxs):
        # Setup the retry in the task state.
        task_id = task_state_entry['id']
        task_retry_spec = self.graph.get_task_retry_spec(task_id)
        task_state_entry['retry'] = json_util.deepcopy(task_retry_spec)
        task_state_entry['retry']['tally'] = 0

        # Get task context for evaluating the expression in delay and count.
        in_ctx = self.get_task_context(in_ctx_idxs)

        # Evaluate the retry delay value.
        if ('delay' in task_state_entry['retry'] and
                isinstance(task_state_entry['retry']['delay'], six.string_types)):
            delay_value = expr_base.evaluate(task_state_entry['retry']['delay'], in_ctx)

            if not isinstance(delay_value, int):
                raise ValueError('The retry delay for task "%s" is not an integer.' % task_id)

            task_state_entry['retry']['delay'] = delay_value

        # Evaluate the retry count value.
        if ('count' in task_state_entry['retry'] and
                isinstance(task_state_entry['retry']['count'], six.string_types)):
            count_value = expr_base.evaluate(task_state_entry['retry']['count'], in_ctx)

            if not isinstance(count_value, int):
                raise ValueError('The retry count for task "%s" is not an integer.' % task_id)

            task_state_entry['retry']['count'] = count_value
Пример #6
0
    def get_workflow_terminal_context(self):
        if self.get_workflow_status() not in statuses.COMPLETED_STATUSES:
            raise exc.WorkflowContextError('Workflow is not in completed status.')

        wf_term_ctx = {}

        term_tasks = self.workflow_state.get_terminal_tasks()

        if not term_tasks:
            return wf_term_ctx

        _, first_term_task = term_tasks[0:1][0]
        other_term_tasks = term_tasks[1:]

        wf_term_ctx = self.get_task_context(first_term_task['ctxs']['in'])

        for idx, task in other_term_tasks:
            # Remove the initial context since the first task processed above already
            # inclulded that and we only want to apply the differences.
            in_ctx_idxs = json_util.deepcopy(task['ctxs']['in'])
            in_ctx_idxs.remove(0)

            wf_term_ctx = dict_util.merge_dicts(
                wf_term_ctx,
                self.get_task_context(in_ctx_idxs),
                overwrite=True
            )

        return wf_term_ctx
Пример #7
0
    def deserialize(cls, data):
        spec_module = spec_loader.get_spec_module(data['spec']['catalog'])
        spec = spec_module.WorkflowSpec.deserialize(data['spec'])

        graph = graphing.WorkflowGraph.deserialize(data['graph'])
        inputs = json_util.deepcopy(data['input'])
        context = json_util.deepcopy(data['context'])
        state = WorkflowState.deserialize(data['state'])
        log = json_util.deepcopy(data.get('log', []))
        errors = json_util.deepcopy(data['errors'])
        outputs = json_util.deepcopy(data['output'])

        instance = cls(spec)
        instance.restore(graph, log, errors, state, inputs, outputs, context)

        return instance
Пример #8
0
    def _evaluate_route(self, task_transition, prev_route):
        task_id = task_transition[1]

        prev_task_transition_id = (
            constants.TASK_STATE_TRANSITION_FORMAT %
            (task_transition[0], str(task_transition[2]))
        )

        is_split_task = self.spec.tasks.is_split_task(task_id)
        is_in_cycle = self.graph.in_cycle(task_id)

        if not is_split_task or is_in_cycle:
            return prev_route

        old_route_details = self.workflow_state.routes[prev_route]
        new_route_details = json_util.deepcopy(old_route_details)

        if prev_task_transition_id not in old_route_details:
            new_route_details.append(prev_task_transition_id)

        if old_route_details == new_route_details:
            return prev_route

        self.workflow_state.routes.append(new_route_details)

        return len(self.workflow_state.routes) - 1