def get_node_detail_v1( self, username: str, subprocess_stack: List[str], component_code: Optional[str] = None, loop: Optional[int] = None, **kwargs, ) -> dict: act_start = True detail = {} # 首先获取最新一次执行详情 try: detail = pipeline_api.get_status_tree(self.node_id) except pipeline_exceptions.InvalidOperationException: act_start = False if not act_start: pipeline_instance = kwargs["pipeline_instance"] node = self._get_node_info( node_id=self.node_id, pipeline=pipeline_instance.execution_data, subprocess_stack=subprocess_stack ) detail.update( { "name": node["name"], "error_ignorable": node.get("error_ignorable", False), "state": pipeline_states.READY, } ) else: format_pipeline_status(detail) # 默认只请求最后一次循环结果 if loop is None or int(loop) >= detail["loop"]: loop = detail["loop"] detail["history_id"] = -1 detail["histories"] = pipeline_api.get_activity_histories(node_id=self.node_id, loop=loop) # 如果用户传了 loop 参数,并且 loop 小于当前节点已循环次数 detail['loop'],则从历史数据获取结果 else: histories = pipeline_api.get_activity_histories(node_id=self.node_id, loop=loop) self._assemble_histroy_detail(detail=detail, histories=histories) detail["history_id"] = histories[-1]["history_id"] for hist in detail["histories"]: # 重试记录必然是因为失败才重试 hist.setdefault("state", pipeline_states.FAILED) format_pipeline_status(hist) if "error_ignorable" in detail: detail["error_ignored"] = detail["error_ignorable"] return {"result": True, "data": detail, "message": "", "code": err_code.SUCCESS.code}
def pipeline_post_save_handler(sender, instance, created, **kwargs): # 任务必须是执行完成,由 celery 触发 if not created and instance.is_finished: # 获得任务实例的执行树 status_tree = get_status_tree(instance.instance_id, 99) # 删除原有原子数据 ComponentExecuteData.objects.filter(instance_id=instance.id).delete() # 获得任务实例的执行数据 data = instance.execution_data component_list = [] with transaction.atomic(): try: # act_id 节点 act 原子数据 for act_id, act in data[PE.activities].items(): is_retry = False if act['type'] == PE.ServiceActivity: # 原子重试 if status_tree["children"][act_id]["retry"] > 0: # 需要通过执行历史获得 history_list = get_activity_histories(act_id) for history in history_list: start_time = history["started_time"] archived_time = history["archived_time"] elapsed_time = history["elapsed_time"] is_retry = True else: # 原子没有重试 # 执行树的相关内容 start_time = status_tree["started_time"] archived_time = status_tree["archived_time"] elapsed_time = status_tree["elapsed_time"] status = True if status_tree[ "state"] == "FINISHED" else False # 创建对象 component = ComponentExecuteData( component_code=act['component']['code'], instance_id=instance.instance_id, node_id=act_id, started_time=start_time, archived_time=archived_time, elapsed_time=elapsed_time, status=status, is_skip=status_tree["skip"], is_retry=is_retry) component_list.append(component) else: # 传递流程数据 children_tree_dict = status_tree["children"][act_id][ "children"] component_list = recursive_subprocess_tree( children_tree_dict, act_id, instance.instance_id, component_list, act[PE.pipeline][PE.activities], None) ComponentExecuteData.objects.bulk_create(component_list) except Exception as e: logger.exception( u"instance_post_save_handler raise error: %s" % e)
def get_node_detail(self, node_id, component_code=None, subprocess_stack=None): try: detail = pipeline_api.get_status_tree(node_id) except exceptions.InvalidOperationException as e: return {'result': False, 'message': e.message} TaskFlowInstance.format_pipeline_status(detail) data = self.get_node_data(node_id, component_code, subprocess_stack) if not data['result']: return data detail['histories'] = pipeline_api.get_activity_histories(node_id) for his in detail['histories']: his.setdefault('state', 'FAILED') TaskFlowInstance.format_pipeline_status(his) detail.update(data['data']) return {'result': True, 'data': detail}
def get_node_detail(self, node_id, component_code=None, subprocess_stack=None): if not self.has_node(node_id): message = 'node[node_id={node_id}] not found in task[task_id={task_id}]'.format( node_id=node_id, task_id=self.id) return {'result': False, 'message': message, 'data': {}} ret_data = self.get_node_data(node_id, component_code, subprocess_stack) try: detail = pipeline_api.get_status_tree(node_id) except exceptions.InvalidOperationException as e: return {'result': False, 'message': e.message, 'data': {}} TaskFlowInstance.format_pipeline_status(detail) detail['histories'] = pipeline_api.get_activity_histories(node_id) for his in detail['histories']: his.setdefault('state', 'FAILED') TaskFlowInstance.format_pipeline_status(his) detail.update(ret_data['data']) return {'result': True, 'data': detail, 'message': ''}
def recursive_subprocess_tree(children_tree_dict, act_id, instance_id, component_list, activities=None, stack=None): """ @summary 递归子流程树 :param children_tree_dict: 执行树的 children 节点 :param act_id: 上一个流程 id :param instance_id: 实例 id :param activities: 子流程模板中标准插件信息 :param stack: 子流程堆栈信息 :param component_list: 存放执行的标准插件数据,用于批量插入 :return: """ if stack is None: stack = [] # 防止stack共用 other_stack = stack[:] # 插入上一个模板的id other_stack.insert(0, act_id) for act_id, act in activities.items(): is_skip = False is_retry = False # 属于标准插件节点 if act["type"] == PE.ServiceActivity: # 标准插件重试 if children_tree_dict[act_id]["retry"] > 0: # 需要通过执行历史获得 history_list = get_activity_histories(act_id) for history in history_list: started_time = history["started_time"] archived_time = history["archived_time"] elapsed_time = history["elapsed_time"] is_retry = True # 标准插件未重试 else: started_time = children_tree_dict[act_id]["started_time"] archived_time = children_tree_dict[act_id]["archived_time"] elapsed_time = children_tree_dict[act_id]["elapsed_time"] is_skip = children_tree_dict[act_id]["skip"] status = True if children_tree_dict[act_id][ "state"] == "FINISHED" else False # 创建对象 component = ComponentExecuteData( component_code=act['component']['code'], instance_id=instance_id, node_id=act_id, is_sub=True, subprocess_stack=json.dumps(other_stack), started_time=started_time, archived_time=archived_time, elapsed_time=elapsed_time, status=status, is_skip=is_skip, is_retry=is_retry) component_list.append(component) # 子流程的执行堆栈(子流程的执行过程) # 添加节点id elif act["type"] == PE.SubProcess: # 重新子流程获取 children 节点的信息 other_children_tree_dict = children_tree_dict[act_id]["children"] # 递归子流程树 component_list = recursive_subprocess_tree( other_children_tree_dict, act_id, instance_id, component_list, act[PE.pipeline][PE.activities], other_stack) return component_list
def pipeline_post_save_handler(sender, instance, created, **kwargs): # 任务必须是执行完成,由 celery 触发 if not created and instance.is_finished: # 获得任务实例的执行树 status_tree = get_status_tree(instance.instance_id, 99) # 删除原有标准插件数据 ComponentExecuteData.objects.filter( instance_id=instance.instance_id).delete() # 获得任务实例的执行数据 data = instance.execution_data component_list = [] with transaction.atomic(): try: # act_id 节点 act 标准插件数据 for act_id, act in data[PE.activities].items(): is_retry = False if act['type'] == PE.ServiceActivity: # 标准插件重试 status_act = status_tree["children"].get(act_id) if status_act is None: continue if status_act["retry"] > 0: # 需要通过执行历史获得 history_list = get_activity_histories(act_id) for history in history_list: start_time = history["started_time"] archived_time = history["archived_time"] elapsed_time = history["elapsed_time"] is_retry = True else: # 标准插件没有重试 # 执行树的相关内容 start_time = status_tree["started_time"] archived_time = status_tree["archived_time"] elapsed_time = status_tree["elapsed_time"] status = True if status_tree[ "state"] == "FINISHED" else False # 创建对象 component = ComponentExecuteData( component_code=act['component']['code'], instance_id=instance.instance_id, node_id=act_id, started_time=start_time, archived_time=archived_time, elapsed_time=elapsed_time, status=status, is_skip=status_tree["skip"], is_retry=is_retry) component_list.append(component) else: # 传递流程数据 children_tree_dict = status_tree["children"][act_id][ "children"] component_list = recursive_subprocess_tree( children_tree_dict, act_id, instance.instance_id, component_list, act[PE.pipeline][PE.activities], None) ComponentExecuteData.objects.bulk_create(component_list) except Exception as e: logger.exception( u"instance_post_save_handler raise error: %s" % e) # 统计流程标准插件个数,子流程个数,网关个数 instance_id = instance.instance_id # 获取pipeline_tree pipeline_tree = instance.execution_data # 初始化插入值 atom_total = 0 subprocess_total = 0 # 获得pipeline_tree tree_activities = pipeline_tree["activities"] # 获取网关数量 gateways_total = len(pipeline_tree["gateways"]) # 遍历activities节点 for activity in tree_activities: activity_type = tree_activities[activity]["type"] if activity_type == "ServiceActivity": atom_total += 1 elif activity_type == "SubProcess": subprocess_total += 1 try: InstanceInPipeline.objects.create( instance_id=instance_id, atom_total=atom_total, subprocess_total=subprocess_total, gateways_total=gateways_total) except Exception as e: logger.exception( u"instance_post_save_handler raise error: %s" % e)
def get_activity_histories(act_id): histories = api.get_activity_histories(act_id) for item in histories: item['started_time'] = _better_time_or_none(item['started_time']) item['finished_time'] = _better_time_or_none(item.pop('archived_time')) return histories
def recursive_collect_components_execution(activities, status_tree, task_instance, engine_ver=1, stack=None): """ @summary 递归流程树,获取所有执行结束的插件TaskflowExecutedNodeStatistics对象列表(成功/失败) @param activities: 当前流程树的任务节点信息 @param status_tree: 当前流程树的任务节点状态 @param task_instance: 根流程实例TaskFlowInstance @param stack: 子流程堆栈 @param engine_ver: 流程引擎版本 """ instance = task_instance.pipeline_instance task_instance_id = task_instance.id task_template = TaskTemplate.objects.get( pipeline_template=instance.template) if stack is None: stack = [] is_sub = False else: is_sub = True component_list = [] for act_id, act in activities.items(): if act_id in status_tree: exec_act = status_tree[act_id] # 标准插件节点 if act[PE.type] == PE.ServiceActivity: # 结束、失败、撤销 if exec_act["state"] in states.ARCHIVED_STATES: component_code = act["component"]["code"] component_version = act["component"].get( "version", LEGACY_PLUGINS_VERSION) is_remote = False if component_code == "remote_plugin": component_code = act["component"]["data"][ "plugin_code"]["value"] component_version = act["component"]["data"][ "plugin_version"]["value"] is_remote = True component_kwargs = { "component_code": component_code, "instance_id": instance.id, "task_instance_id": task_instance_id, "is_sub": is_sub, "node_id": act_id, "subprocess_stack": json.dumps(stack), "started_time": format_date_time(exec_act["start_time"]), "archived_time": format_date_time(exec_act["finish_time"]), "elapsed_time": exec_act.get( "elapsed_time", calculate_elapsed_time( format_date_time(exec_act["start_time"]), format_date_time(exec_act["finish_time"])), ), "is_skip": exec_act["skip"], "is_retry": False, "status": exec_act["state"] == "FINISHED", "version": component_version, "template_id": instance.template.id, "task_template_id": task_template.id, "project_id": task_template.project.id, "instance_create_time": instance.create_time, "instance_start_time": instance.start_time, "instance_finish_time": instance.finish_time, "is_remote": is_remote, } component_list.append( TaskflowExecutedNodeStatistics(**component_kwargs)) if exec_act["retry"] > 0: # 有重试记录,需要从执行历史中获取数据 if engine_ver == 1: history_list = pipeline_api.get_activity_histories( act_id) else: history_list_result = bamboo_engine_api.get_node_short_histories( runtime=BambooDjangoRuntime(), node_id=act_id) history_list = history_list_result.data if history_list_result.result else [] for history in history_list: component_kwargs.update({ "started_time": history["started_time"], "archived_time": history["archived_time"], "elapsed_time": history.get( "elapsed_time", calculate_elapsed_time( history["started_time"], history["archived_time"]), ), "is_retry": True, "is_skip": False, "status": False, }) component_list.append( TaskflowExecutedNodeStatistics( **component_kwargs)) # 子流程的执行堆栈(子流程的执行过程) elif act[PE.type] == PE.SubProcess: sub_activities = act[PE.pipeline][PE.activities] # 防止stack共用 copied_stack = deepcopy(stack) copied_stack.insert(0, act_id) component_list += recursive_collect_components_execution( activities=sub_activities, status_tree=exec_act["children"], task_instance=task_instance, stack=copied_stack, engine_ver=engine_ver, ) return component_list
def test_get_activity_histories(self): with patch(PIPELINE_HISTORY_GET_HISTORY, MagicMock(return_value=self.dummy_return)): history = api.get_activity_histories(self.node_id) self.assertEqual(history, self.dummy_return)
def get_node_data_v1( self, username: str, subprocess_stack: List[str], component_code: Optional[str] = None, loop: Optional[int] = None, **kwargs, ) -> dict: node_started = True inputs = {} outputs = {} try: detail = pipeline_api.get_status_tree(self.node_id) except pipeline_exceptions.InvalidOperationException: node_started = False else: # 最新 loop 执行记录,直接通过接口获取 if loop is None or int(loop) >= detail["loop"]: try: inputs = pipeline_api.get_inputs(self.node_id) except pipeline_engine_models.Data.DoesNotExist: logger.exception("shield DoesNotExist in pipeline engine layer") inputs = {} try: outputs = pipeline_api.get_outputs(self.node_id) except pipeline_engine_models.Data.DoesNotExist: logger.exception("shield DoesNotExist in pipeline engine layer") outputs = {} # 历史 loop 记录,需要从 histories 获取,并取最新一次操作数据(如手动重试时重新填参) else: his_data = pipeline_api.get_activity_histories(node_id=self.node_id, loop=loop) inputs = his_data[-1]["inputs"] outputs = {"outputs": his_data[-1]["outputs"], "ex_data": his_data[-1]["ex_data"]} pipeline_instance = kwargs["pipeline_instance"] if not node_started: node_info = self._get_node_info( node_id=self.node_id, pipeline=pipeline_instance.execution_data, subprocess_stack=subprocess_stack ) if node_info["type"] != "ServiceActivity": return { "result": True, "data": {"inputs": {}, "outputs": [], "ex_data": ""}, "message": "", "code": err_code.SUCCESS.code, } success, err, inputs, outputs = self._prerender_node_data( pipeline_instance=pipeline_instance, subprocess_stack=subprocess_stack, username=username ) if not success: return { "result": False, "data": {}, "message": err, "code": err_code.UNKNOWN_ERROR.code, } # 根据传入的 component_code 对输出进行格式化 success, err, outputs_table = self._format_outputs( outputs=outputs, component_code=component_code, pipeline_instance=pipeline_instance, subprocess_stack=subprocess_stack, ) if not success: return { "result": False, "data": {}, "message": err, "code": err_code.UNKNOWN_ERROR.code, } data = {"inputs": inputs, "outputs": outputs_table, "ex_data": outputs.pop("ex_data", "")} return {"result": True, "data": data, "message": "", "code": err_code.SUCCESS.code}