示例#1
0
def get_task_status(request, task_id, bk_biz_id):
    try:
        task = TaskFlowInstance.objects.get(pk=task_id, business__cc_id=bk_biz_id)
        task_status = task.get_status()
        result = {
            'result': True,
            'data': task_status
        }
        return JsonResponse(result)
    # 请求子流程的状态,直接通过pipeline api查询
    except (ValueError, TaskFlowInstance.DoesNotExist):
        logger.info('task[id=%s] does not exist' % task_id)
    except Exception as e:
        message = 'task[id=%s] get status error: %s' % (task_id, e)
        logger.error(message)
        result = {'result': False, 'message': message}
        return JsonResponse(result)
    try:
        task_status = pipeline_api.get_status_tree(task_id, max_depth=99)
        TaskFlowInstance.format_pipeline_status(task_status)
    except Exception as e:
        message = 'task[id=%s] get status error: %s' % (task_id, e)
        logger.error(message)
        result = {'result': False, 'message': message}
        return JsonResponse(result)
    result = {
        'result': True,
        'data': task_status
    }
    return JsonResponse(result)
示例#2
0
def status(request, biz_cc_id):
    instance_id = request.GET.get('instance_id')
    try:
        task = TaskFlowInstance.objects.get(pk=instance_id,
                                            business__cc_id=biz_cc_id)
        task_status = task.get_status()
        ctx = {'result': True, 'data': task_status}
        return JsonResponse(ctx)
    # 请求子流程的状态,直接通过pipeline api查询
    except (ValueError, TaskFlowInstance.DoesNotExist):
        logger.info('taskflow[id=%s] does not exist' % instance_id)
    except Exception as e:
        message = 'taskflow[id=%s] get status error: %s' % (instance_id, e)
        logger.error(message)
        ctx = {'result': False, 'message': message}
        return JsonResponse(ctx)
    try:
        task_status = pipeline_api.get_status_tree(instance_id, max_depth=99)
        TaskFlowInstance.format_pipeline_status(task_status)
        ctx = {'result': True, 'data': task_status}
    # subprocess pipeline has not executed
    except exceptions.InvalidOperationException:
        ctx = {'result': True, 'data': {'state': states.CREATED}}
    except Exception as e:
        message = 'taskflow[id=%s] get status error: %s' % (instance_id, e)
        logger.error(message)
        ctx = {'result': False, 'message': message}
    return JsonResponse(ctx)
示例#3
0
文件: views.py 项目: manlucas/atom
def get_task_status(request, task_id, project_id):
    project = request.project
    try:
        task = TaskFlowInstance.objects.get(pk=task_id, project_id=project.id, is_deleted=False)
        task_status = task.get_status()
        result = {
            'result': True,
            'data': task_status
        }
        return JsonResponse(result)
    # 请求子流程的状态,直接通过pipeline api查询
    except (ValueError, TaskFlowInstance.DoesNotExist):
        logger.info('task[id=%s] does not exist' % task_id)
    except Exception as e:
        message = 'task[id={task_id}] get status error: {error}'.format(task_id=task_id, error=e)
        logger.error(message)
        result = {'result': False, 'message': message}
        return JsonResponse(result)

    try:
        task_status = pipeline_api.get_status_tree(task_id, max_depth=99)
        TaskFlowInstance.format_pipeline_status(task_status)
    except Exception as e:
        message = 'task[id={task_id}] get status error: {error}'.format(task_id=task_id, error=e)
        logger.error(message)
        result = {'result': False, 'message': message}
        return JsonResponse(result)
    result = {
        'result': True,
        'data': task_status
    }
    return JsonResponse(result)
示例#4
0
文件: api.py 项目: manlucas/atom
def status(request, project_id):
    instance_id = request.GET.get('instance_id')
    subprocess_id = request.GET.get('subprocess_id')

    if not subprocess_id:
        try:
            task = TaskFlowInstance.objects.get(pk=instance_id,
                                                project_id=project_id)
            task_status = task.get_status()
            ctx = {'result': True, 'data': task_status}
            return JsonResponse(ctx)
        except Exception as e:
            message = 'taskflow[id=%s] get status error: %s' % (instance_id, e)
            logger.error(message)
            ctx = {'result': False, 'message': message}
            return JsonResponse(ctx)

    # 请求子流程的状态,直接通过pipeline api查询
    try:
        task_status = pipeline_api.get_status_tree(subprocess_id, max_depth=99)
        TaskFlowInstance.format_pipeline_status(task_status)
        ctx = {'result': True, 'data': task_status}
    # subprocess pipeline has not executed
    except exceptions.InvalidOperationException:
        ctx = {'result': True, 'data': {'state': states.CREATED}}
    except Exception as e:
        message = 'taskflow[id=%s] get status error: %s' % (instance_id, e)
        logger.error(message)
        ctx = {'result': False, 'message': message}
    return JsonResponse(ctx)
示例#5
0
def pipeline_post_save_handler(sender, instance, created, **kwargs):
    # 任务必须是执行完成,由 celery 触发
    if not created and instance.is_finished:
        # 获得任务实例的执行树
        status_tree = get_status_tree(instance.instance_id, 99)
        # 删除原有原子数据
        ComponentExecuteData.objects.filter(instance_id=instance.id).delete()
        # 获得任务实例的执行数据
        data = instance.execution_data
        component_list = []
        with transaction.atomic():
            try:
                # act_id 节点 act 原子数据
                for act_id, act in data[PE.activities].items():
                    is_retry = False
                    if act['type'] == PE.ServiceActivity:
                        # 原子重试
                        if status_tree["children"][act_id]["retry"] > 0:
                            # 需要通过执行历史获得
                            history_list = get_activity_histories(act_id)
                            for history in history_list:
                                start_time = history["started_time"]
                                archived_time = history["archived_time"]
                                elapsed_time = history["elapsed_time"]
                                is_retry = True
                        else:
                            # 原子没有重试
                            # 执行树的相关内容
                            start_time = status_tree["started_time"]
                            archived_time = status_tree["archived_time"]
                            elapsed_time = status_tree["elapsed_time"]
                        status = True if status_tree[
                            "state"] == "FINISHED" else False
                        # 创建对象
                        component = ComponentExecuteData(
                            component_code=act['component']['code'],
                            instance_id=instance.instance_id,
                            node_id=act_id,
                            started_time=start_time,
                            archived_time=archived_time,
                            elapsed_time=elapsed_time,
                            status=status,
                            is_skip=status_tree["skip"],
                            is_retry=is_retry)
                        component_list.append(component)
                    else:
                        # 传递流程数据
                        children_tree_dict = status_tree["children"][act_id][
                            "children"]
                        component_list = recursive_subprocess_tree(
                            children_tree_dict, act_id, instance.instance_id,
                            component_list, act[PE.pipeline][PE.activities],
                            None)
                ComponentExecuteData.objects.bulk_create(component_list)
            except Exception as e:
                logger.exception(
                    u"instance_post_save_handler raise error: %s" % e)
示例#6
0
def get_state(node_id):
    tree = api.get_status_tree(node_id, max_depth=100)

    res = _map(tree)

    # collect all atom
    descendants = {}
    _collect_descendants(tree, descendants)
    res['children'] = descendants

    # return
    return res
示例#7
0
 def get_status(self):
     if not self.pipeline_instance.is_started:
         return {
             "start_time": None,
             "state": "CREATED",
             "retry": 0,
             "skip": 0,
             "finish_time": None,
             "children": {}
         }
     status_tree = pipeline_api.get_status_tree(
         self.pipeline_instance.instance_id, max_depth=99)
     TaskFlowInstance.format_pipeline_status(status_tree)
     return status_tree
示例#8
0
    def get_node_detail_v1(
        self,
        username: str,
        subprocess_stack: List[str],
        component_code: Optional[str] = None,
        loop: Optional[int] = None,
        **kwargs,
    ) -> dict:
        act_start = True
        detail = {}
        # 首先获取最新一次执行详情
        try:
            detail = pipeline_api.get_status_tree(self.node_id)
        except pipeline_exceptions.InvalidOperationException:
            act_start = False

        if not act_start:
            pipeline_instance = kwargs["pipeline_instance"]
            node = self._get_node_info(
                node_id=self.node_id, pipeline=pipeline_instance.execution_data, subprocess_stack=subprocess_stack
            )
            detail.update(
                {
                    "name": node["name"],
                    "error_ignorable": node.get("error_ignorable", False),
                    "state": pipeline_states.READY,
                }
            )
        else:
            format_pipeline_status(detail)
            # 默认只请求最后一次循环结果
            if loop is None or int(loop) >= detail["loop"]:
                loop = detail["loop"]
                detail["history_id"] = -1
                detail["histories"] = pipeline_api.get_activity_histories(node_id=self.node_id, loop=loop)
            # 如果用户传了 loop 参数,并且 loop 小于当前节点已循环次数 detail['loop'],则从历史数据获取结果
            else:
                histories = pipeline_api.get_activity_histories(node_id=self.node_id, loop=loop)
                self._assemble_histroy_detail(detail=detail, histories=histories)
                detail["history_id"] = histories[-1]["history_id"]

            for hist in detail["histories"]:
                # 重试记录必然是因为失败才重试
                hist.setdefault("state", pipeline_states.FAILED)
                format_pipeline_status(hist)

        if "error_ignorable" in detail:
            detail["error_ignored"] = detail["error_ignorable"]
        return {"result": True, "data": detail, "message": "", "code": err_code.SUCCESS.code}
示例#9
0
 def get_node_detail(self, node_id, component_code=None, subprocess_stack=None):
     try:
         detail = pipeline_api.get_status_tree(node_id)
     except exceptions.InvalidOperationException as e:
         return {'result': False, 'message': e.message}
     TaskFlowInstance.format_pipeline_status(detail)
     data = self.get_node_data(node_id, component_code, subprocess_stack)
     if not data['result']:
         return data
     detail['histories'] = pipeline_api.get_activity_histories(node_id)
     for his in detail['histories']:
         his.setdefault('state', 'FAILED')
         TaskFlowInstance.format_pipeline_status(his)
     detail.update(data['data'])
     return {'result': True, 'data': detail}
示例#10
0
    def get_node_detail(self,
                        node_id,
                        component_code=None,
                        subprocess_stack=None):
        if not self.has_node(node_id):
            message = 'node[node_id={node_id}] not found in task[task_id={task_id}]'.format(
                node_id=node_id, task_id=self.id)
            return {'result': False, 'message': message, 'data': {}}

        ret_data = self.get_node_data(node_id, component_code,
                                      subprocess_stack)
        try:
            detail = pipeline_api.get_status_tree(node_id)
        except exceptions.InvalidOperationException as e:
            return {'result': False, 'message': e.message, 'data': {}}
        TaskFlowInstance.format_pipeline_status(detail)
        detail['histories'] = pipeline_api.get_activity_histories(node_id)
        for his in detail['histories']:
            his.setdefault('state', 'FAILED')
            TaskFlowInstance.format_pipeline_status(his)
        detail.update(ret_data['data'])
        return {'result': True, 'data': detail, 'message': ''}
示例#11
0
def pipeline_post_save_handler(sender, instance, created, **kwargs):
    # 任务必须是执行完成,由 celery 触发
    if not created and instance.is_finished:
        # 获得任务实例的执行树
        status_tree = get_status_tree(instance.instance_id, 99)
        # 删除原有标准插件数据
        ComponentExecuteData.objects.filter(
            instance_id=instance.instance_id).delete()
        # 获得任务实例的执行数据
        data = instance.execution_data
        component_list = []
        with transaction.atomic():
            try:
                # act_id 节点 act 标准插件数据
                for act_id, act in data[PE.activities].items():
                    is_retry = False
                    if act['type'] == PE.ServiceActivity:
                        # 标准插件重试
                        status_act = status_tree["children"].get(act_id)
                        if status_act is None:
                            continue
                        if status_act["retry"] > 0:
                            # 需要通过执行历史获得
                            history_list = get_activity_histories(act_id)
                            for history in history_list:
                                start_time = history["started_time"]
                                archived_time = history["archived_time"]
                                elapsed_time = history["elapsed_time"]
                                is_retry = True
                        else:
                            # 标准插件没有重试
                            # 执行树的相关内容
                            start_time = status_tree["started_time"]
                            archived_time = status_tree["archived_time"]
                            elapsed_time = status_tree["elapsed_time"]
                        status = True if status_tree[
                            "state"] == "FINISHED" else False
                        # 创建对象
                        component = ComponentExecuteData(
                            component_code=act['component']['code'],
                            instance_id=instance.instance_id,
                            node_id=act_id,
                            started_time=start_time,
                            archived_time=archived_time,
                            elapsed_time=elapsed_time,
                            status=status,
                            is_skip=status_tree["skip"],
                            is_retry=is_retry)
                        component_list.append(component)
                    else:
                        # 传递流程数据
                        children_tree_dict = status_tree["children"][act_id][
                            "children"]
                        component_list = recursive_subprocess_tree(
                            children_tree_dict, act_id, instance.instance_id,
                            component_list, act[PE.pipeline][PE.activities],
                            None)
                ComponentExecuteData.objects.bulk_create(component_list)
            except Exception as e:
                logger.exception(
                    u"instance_post_save_handler raise error: %s" % e)
            # 统计流程标准插件个数,子流程个数,网关个数
            instance_id = instance.instance_id
            # 获取pipeline_tree
            pipeline_tree = instance.execution_data
            # 初始化插入值
            atom_total = 0
            subprocess_total = 0
            # 获得pipeline_tree
            tree_activities = pipeline_tree["activities"]
            # 获取网关数量
            gateways_total = len(pipeline_tree["gateways"])
            # 遍历activities节点
            for activity in tree_activities:
                activity_type = tree_activities[activity]["type"]
                if activity_type == "ServiceActivity":
                    atom_total += 1
                elif activity_type == "SubProcess":
                    subprocess_total += 1
            try:
                InstanceInPipeline.objects.create(
                    instance_id=instance_id,
                    atom_total=atom_total,
                    subprocess_total=subprocess_total,
                    gateways_total=gateways_total)
            except Exception as e:
                logger.exception(
                    u"instance_post_save_handler raise error: %s" % e)
示例#12
0
文件: test_api.py 项目: manlucas/atom
    def test_status_tree(self):
        s1 = Status.objects.create(id=uniqid(),
                                   name='s1',
                                   state=states.FINISHED,
                                   started_time=timezone.now(),
                                   archived_time=timezone.now() +
                                   timedelta(seconds=3))
        s2 = Status.objects.create(id=uniqid(),
                                   name='s2',
                                   state=states.FINISHED,
                                   started_time=timezone.now(),
                                   archived_time=timezone.now() +
                                   timedelta(seconds=3))
        s3 = Status.objects.create(id=uniqid(),
                                   name='s3',
                                   state=states.FINISHED,
                                   started_time=timezone.now(),
                                   archived_time=timezone.now() +
                                   timedelta(seconds=3))
        s4 = Status.objects.create(id=uniqid(),
                                   name='s4',
                                   state=states.FINISHED,
                                   started_time=timezone.now(),
                                   archived_time=timezone.now() +
                                   timedelta(seconds=3))
        s5 = Status.objects.create(id=uniqid(),
                                   name='s5',
                                   state=states.FINISHED,
                                   started_time=timezone.now(),
                                   archived_time=timezone.now() +
                                   timedelta(seconds=3))
        s6 = Status.objects.create(id=uniqid(),
                                   name='s6',
                                   state=states.FINISHED,
                                   started_time=timezone.now(),
                                   archived_time=timezone.now() +
                                   timedelta(seconds=3))

        NodeRelationship.objects.build_relationship(s1.id, s1.id)
        NodeRelationship.objects.build_relationship(s2.id, s2.id)
        NodeRelationship.objects.build_relationship(s3.id, s3.id)
        NodeRelationship.objects.build_relationship(s4.id, s4.id)
        NodeRelationship.objects.build_relationship(s5.id, s5.id)
        NodeRelationship.objects.build_relationship(s6.id, s6.id)

        NodeRelationship.objects.build_relationship(s1.id, s2.id)
        NodeRelationship.objects.build_relationship(s1.id, s3.id)
        NodeRelationship.objects.build_relationship(s2.id, s4.id)
        NodeRelationship.objects.build_relationship(s4.id, s5.id)
        NodeRelationship.objects.build_relationship(s4.id, s6.id)

        # refresh from db, sync datetime
        s1.refresh_from_db()
        s2.refresh_from_db()
        s3.refresh_from_db()
        s4.refresh_from_db()
        s5.refresh_from_db()
        s6.refresh_from_db()

        def get_status_dict_with_children(s, children):
            return {
                'archived_time':
                s.archived_time,
                'created_time':
                s.created_time,
                'elapsed_time':
                calculate_elapsed_time(s.started_time, s.archived_time),
                'error_ignorable':
                s.error_ignorable,
                'id':
                s.id,
                'loop':
                s.loop,
                'name':
                s.name,
                'retry':
                s.retry,
                'skip':
                s.skip,
                'started_time':
                s.started_time,
                'state':
                s.state,
                'version':
                s.version,
                'children':
                children
            }

        tree_depth_1 = get_status_dict_with_children(
            s1,
            children={
                s2.id: get_status_dict_with_children(s2, children={}),
                s3.id: get_status_dict_with_children(s3, children={})
            })

        tree = api.get_status_tree(s1.id, 1)
        self.assertDictEqual(tree, tree_depth_1)

        tree_depth_2 = get_status_dict_with_children(
            s1,
            children={
                s2.id:
                get_status_dict_with_children(
                    s2,
                    children={s4.id: get_status_dict_with_children(s4, {})}),
                s3.id:
                get_status_dict_with_children(s3, {})
            })

        tree = api.get_status_tree(s1.id, 2)
        self.assertDictEqual(tree, tree_depth_2)

        tree_depth_3 = get_status_dict_with_children(
            s1,
            children={
                s2.id:
                get_status_dict_with_children(
                    s2,
                    children={
                        s4.id:
                        get_status_dict_with_children(
                            s4,
                            children={
                                s5.id: get_status_dict_with_children(s5, {}),
                                s6.id: get_status_dict_with_children(s6, {})
                            })
                    }),
                s3.id:
                get_status_dict_with_children(s3, children={})
            })  # noqa

        tree = api.get_status_tree(s1.id, 3)
        self.assertDictEqual(tree, tree_depth_3)

        tree = api.get_status_tree(s1.id, 4)
        self.assertDictEqual(tree, tree_depth_3)
示例#13
0
    def get_task_status_v1(self, subprocess_id: Optional[str],
                           with_ex_data: bool) -> dict:
        if self.pipeline_instance.is_expired:
            return {
                "result": True,
                "data": {
                    "state": "EXPIRED"
                },
                "message": "",
                "code": err_code.SUCCESS.code
            }
        if not self.pipeline_instance.is_started:
            return {
                "result": True,
                "data": self.CREATED_STATUS,
                "message": "",
                "code": err_code.SUCCESS.code,
            }
        if not subprocess_id:
            try:
                task_status = pipeline_api.get_status_tree(
                    self.pipeline_instance.instance_id, max_depth=99)
                format_pipeline_status(task_status)
                return {
                    "result": True,
                    "data": task_status,
                    "message": "",
                    "code": err_code.SUCCESS.code,
                }
            except pipeline_exceptions.InvalidOperationException as e:
                logger.error(f"node relationship does not exist: {e}")
                task_status = self.CREATED_STATUS
            except Exception:
                logger.exception("task.get_status fail")
                return {
                    "result": False,
                    "message": "task.get_status fail",
                    "data": {},
                    "code": err_code.UNKNOWN_ERROR.code,
                }
        else:
            try:
                task_status = pipeline_api.get_status_tree(subprocess_id,
                                                           max_depth=99)
                format_pipeline_status(task_status)
            except pipeline_exceptions.InvalidOperationException:
                # do not raise error when subprocess not exist or has not been executed
                task_status = self.CREATED_STATUS
            except Exception:
                logger.exception(
                    "pipeline_api.get_status_tree(subprocess_id:{}) fail".
                    format(subprocess_id))
                return {
                    "result": False,
                    "message":
                    "pipeline_api.get_status_tree(subprocess_id:{}) fail",
                    "data": {},
                    "code": err_code.UNKNOWN_ERROR.code,
                }

        # 返回失败节点和对应调试信息
        if with_ex_data and task_status["state"] == bamboo_engine_states.FAILED:
            failed_nodes = self._collect_fail_nodes(task_status)
            task_status["ex_data"] = {}
            failed_nodes_outputs = pipeline_api.get_batch_outputs(failed_nodes)
            for node_id in failed_nodes:
                task_status["ex_data"][node_id] = failed_nodes_outputs[
                    node_id]["ex_data"]

        return {
            "result": True,
            "data": task_status,
            "code": err_code.SUCCESS.code,
            "message": ""
        }
示例#14
0
    def get_node_data_v1(
        self,
        username: str,
        subprocess_stack: List[str],
        component_code: Optional[str] = None,
        loop: Optional[int] = None,
        **kwargs,
    ) -> dict:
        node_started = True
        inputs = {}
        outputs = {}
        try:
            detail = pipeline_api.get_status_tree(self.node_id)
        except pipeline_exceptions.InvalidOperationException:
            node_started = False
        else:
            # 最新 loop 执行记录,直接通过接口获取
            if loop is None or int(loop) >= detail["loop"]:
                try:
                    inputs = pipeline_api.get_inputs(self.node_id)
                except pipeline_engine_models.Data.DoesNotExist:
                    logger.exception("shield DoesNotExist in pipeline engine layer")
                    inputs = {}

                try:
                    outputs = pipeline_api.get_outputs(self.node_id)
                except pipeline_engine_models.Data.DoesNotExist:
                    logger.exception("shield DoesNotExist in pipeline engine layer")
                    outputs = {}
            # 历史 loop 记录,需要从 histories 获取,并取最新一次操作数据(如手动重试时重新填参)
            else:
                his_data = pipeline_api.get_activity_histories(node_id=self.node_id, loop=loop)
                inputs = his_data[-1]["inputs"]
                outputs = {"outputs": his_data[-1]["outputs"], "ex_data": his_data[-1]["ex_data"]}

        pipeline_instance = kwargs["pipeline_instance"]
        if not node_started:
            node_info = self._get_node_info(
                node_id=self.node_id, pipeline=pipeline_instance.execution_data, subprocess_stack=subprocess_stack
            )
            if node_info["type"] != "ServiceActivity":
                return {
                    "result": True,
                    "data": {"inputs": {}, "outputs": [], "ex_data": ""},
                    "message": "",
                    "code": err_code.SUCCESS.code,
                }

            success, err, inputs, outputs = self._prerender_node_data(
                pipeline_instance=pipeline_instance, subprocess_stack=subprocess_stack, username=username
            )
            if not success:
                return {
                    "result": False,
                    "data": {},
                    "message": err,
                    "code": err_code.UNKNOWN_ERROR.code,
                }

        # 根据传入的 component_code 对输出进行格式化
        success, err, outputs_table = self._format_outputs(
            outputs=outputs,
            component_code=component_code,
            pipeline_instance=pipeline_instance,
            subprocess_stack=subprocess_stack,
        )
        if not success:
            return {
                "result": False,
                "data": {},
                "message": err,
                "code": err_code.UNKNOWN_ERROR.code,
            }

        data = {"inputs": inputs, "outputs": outputs_table, "ex_data": outputs.pop("ex_data", "")}
        return {"result": True, "data": data, "message": "", "code": err_code.SUCCESS.code}