def init_column_for_table_api_testcase_info(): """初始化api_testcase_info表的setup_case_list列数据""" to_update_rows = [] objs = atim.get_testcases() for obj in objs: if obj.setup_case_list: continue include_str = obj.include if include_str: include_list = json_loads(include_str) for include in include_list: if 'setup_cases' in include and include['setup_cases'] and isinstance(include['setup_cases'], list): setup_case_list = [] for case_id_str in include['setup_cases']: setup_case_list.append('1-' + case_id_str) to_update_rows.append( { 'id': obj.id, 'setup_case_list': json_dumps(setup_case_list) } ) print(json_dumps(to_update_rows)) print(len(to_update_rows)) x_len = len(to_update_rows) index = 0 for row in to_update_rows: atim.update_testcase(id_=row['id'], setup_case_list=row['setup_case_list']) index += 1 print(index*100.0/x_len)
def init_column_main_list_for_table_api_testcase_sub(): """初始化api_testcase_sub.main_list, 可重复执行""" main_objs = ApiTestcaseMainManager.get_testcase_mains() len_main_objs = len(main_objs) print('总计:{}'.format(len_main_objs)) index = 0 for main_obj in main_objs: index += 1 process = index * 100.0 / len_main_objs print('进度:{:.2f}%'.format(process)) sub_list = json_loads(main_obj.sub_list) for sub_id in sub_list: sub_obj = ApiTestcaseSubManager.get_testcase_sub(id=sub_id) if not sub_obj: print('sub_obj not found, main_obj.id:{0}, sub_id:{1}'.format(main_obj.id, sub_id)) break old_main_list_str = sub_obj.main_list if old_main_list_str: old_main_list = json_loads(old_main_list_str) if old_main_list: if main_obj.id not in old_main_list: old_main_list.append(main_obj.id) main_list_str = json_dumps(old_main_list) else: continue else: main_list_str = '[{}]'.format(main_obj.id) else: main_list_str = '[{}]'.format(main_obj.id) print(main_list_str) ApiTestcaseSubManager.update_testcase_sub(sub_id, main_list=main_list_str)
def download_xmind_api(): from atp.api.mysql_manager import ApiCompanyInfoManager as acim, ApiTestcaseTagRelationManager as attrm project_id = 1 xmind_dic = {} res = acim.query_api_subtree_for_xmind(project_id) for row in res: print(row) if row[0] not in xmind_dic: xmind_dic[row[0]] = {} if row[1] and row[1] not in xmind_dic[row[0]]: xmind_dic[row[0]][row[1]] = {} if row[2] and row[2] not in xmind_dic[row[0]][row[1]]: xmind_dic[row[0]][row[1]][row[2]] = {} if row[3] and row[3] not in xmind_dic[row[0]][row[1]][row[2]]: xmind_dic[row[0]][row[1]][row[2]][row[3]] = {} if row[4]: tag_objs = attrm.query_tag_info_by_testcase(row[4]) tag_name_list = [t_obj[1] for t_obj in tag_objs] if tag_objs else [] tag = '异常场景' if '异常场景' in tag_name_list else '正常场景' if tag not in xmind_dic[row[0]][row[1]][row[2]][row[3]]: xmind_dic[row[0]][row[1]][row[2]][row[3]].update({tag: {}}) expect = row[6] if row[6] else '' xmind_dic[row[0]][row[1]][row[2]][row[3]][tag].update( {row[5]: { '预期': { expect: {} } }}) print(json_dumps(xmind_dic)) from atp.api.xmind_parser import export_xmind_api filename = export_xmind_api(xmind_dic)
def synchronize_baseline(self): """立即同步业务用例""" obj = bjm.get_last_record() if obj: last_time = format(obj.last_time) last_timestamp = str_time_to_timestamp(last_time) now_timestamp = get_current_timestamp() if last_timestamp + 300 > now_timestamp: return make_response({ "code": "200", "desc": "距离上次同步不到5分钟,请稍后再试" }) # git pull code, output = subprocess.getstatusoutput( 'sh /usr/local/src/git_folder/git_pull_baseline.sh') if code != 0: return make_response({"code": "300", "desc": "git pull动作失败"}) # parse baseline summary_info = parse_main() return make_response({ "code": "000", "data": "业务用例同步已完成, 详情:{}".format(json_dumps(summary_info)) })
def delete_testcase_main(self): try: testcase_id = int(self.data.pop('testcaseId')) except KeyError: return make_response({"code": "100", "desc": CODE_DESC_MAP["100"]}) tm_obj = self.atmm.get_testcase_main(id=testcase_id) if not tm_obj: return make_response({ "code": "200", "desc": "用例id\"{}\"不存在, 请刷新后重试".format(testcase_id) }) sub_list = json_loads(tm_obj.sub_list) if tm_obj.case_type == 2: for sub_id in sub_list: sub_obj = self.atsm.get_testcase_sub(id=sub_id) main_list = json_loads( sub_obj.main_list) if sub_obj.main_list else [] if testcase_id in main_list: main_list.remove(testcase_id) self.atsm.update_testcase_sub( sub_id, main_list=json_dumps(main_list)) # self.atsm.delete_testcase_sub(sub_id) else: for sub_id in sub_list: sub_obj = self.atsm.get_testcase_sub(id=sub_id) if sub_obj.api_intf_id == tm_obj.api_intf_id: main_list = json_loads( sub_obj.main_list) if sub_obj.main_list else [] if testcase_id in main_list: main_list.remove(testcase_id) self.atsm.update_testcase_sub( sub_id, main_list=json_dumps(main_list)) # self.atsm.delete_testcase_sub(sub_id) self.atmm.delete_testcase_main(testcase_id) # 删除tag关系 relation_objs = self.atmtrm.get_relations(api_testcase_id=testcase_id) for relation_obj in relation_objs: self.atmtrm.delete_relation(relation_obj.id) return make_response({"code": "000", "desc": "测试用例删除成功"})
def migrate_base(step): """ 迁移基线用例数据 前置条件:先把 base_testcase_info, base_module_info表数据分别导出到相应备份表,并清空 base_module_info表 注意按顺序执行,中途报错不可继续 """ if step == 1: # migrate testcase bt_objs = btm.get_all_testcase() process_len = len(bt_objs) process_id = 0 for bt_obj in bt_objs: process_id += 1 print('{:.1f}%'.format(process_id * 100.0 / process_len)) id_ = bt_obj.id print(id_) try: detail_dic = json_loads(bt_obj.detail) except Exception: detail_dic = {} if isinstance(detail_dic, list): continue new_detail_dic = _change_base_detail(detail_dic) btm.update_base_testcase(id_, detail=json_dumps(new_detail_dic)) if step == 2: # migrate module bm_bak_objs = bmm_bak.get_modules() process_len = len(bm_bak_objs) process_id = 0 for bm_bak_obj in bm_bak_objs: process_id += 1 print('{:.1f}%'.format(process_id * 100.0 / process_len)) if bm_bak_obj.system_id: bmm.insert_base_module(id=bm_bak_obj.id, module_name=bm_bak_obj.module_name, system_id=bm_bak_obj.system_id) second_bm_objs = bmm_bak.get_modules(parent_module_id=bm_bak_obj.id) if not second_bm_objs: new_module_name = bm_bak_obj.module_name bt_objs = btm.get_all_testcase(module_id=bm_bak_obj.id) if bt_objs: bmm.insert_base_module(module_name=new_module_name, parent_module_id=bm_bak_obj.id) second_bm_obj = bmm.get_module(module_name=new_module_name, parent_module_id=bm_bak_obj.id) new_module_id = second_bm_obj.id for bt_obj in bt_objs: btm.update_base_testcase(bt_obj.id, module_id=new_module_id) else: sub_bm_bak_objs = bmm_bak.get_modules(parent_module_id=bm_bak_obj.id) if sub_bm_bak_objs: continue else: new_module_name, new_parent_id = _change_name_and_parent_id(bm_bak_obj.id) bmm.insert_base_module(id=bm_bak_obj.id, module_name=new_module_name, parent_module_id=new_parent_id)
def httprunner_request_update(): """已废弃,请勿执行""" from atp.api.mysql_manager import TestcaseInfoManager tc_objs = TestcaseInfoManager.get_all_testcases() count = 0 for tc_obj in tc_objs: if tc_obj: if tc_obj.request: request = json_loads(tc_obj.request) count += 1 new_request = handle_step_dic(request) print(count) print(json_dumps(new_request))
def add_intf(self): try: system_id = self.data.pop('systemId') intf_desc = self.data.pop('intfNameInChinese') intf_type = self.data.pop('type') intf_info = self.data.pop('info') request_dic = self.data.pop('request', {}) request_detail_dic = self.data.pop('requestDetail', []) intf_relation = self.data.pop('intfRelation') except KeyError: return make_response({"code": "100", "desc": "入参校验失败"}) intf_desc = str(intf_desc).strip() if not self.asim.get_system(id=system_id): return make_response({"code": "201", "desc": "工程id\"{}\"不存在".format(system_id)}) if intf_type != 'MQ': if intf_type == 'HTTP': intf_info['apiUrl'] = intf_info['apiUrl'].strip() intf_name = intf_info['apiUrl'] elif intf_type == 'DUBBO': intf_info['dubboService'] = intf_info['dubboService'].strip() intf_info['dubboMethod'] = intf_info['dubboMethod'].strip() intf_name = '{0}.{1}'.format(intf_info['dubboService'], intf_info['dubboMethod']) company_id = self.asim.get_system(id=system_id).api_company_id system_id_list = [row.id for row in self.asim.get_systems(api_company_id=company_id)] intf_name_list = [row.intf_name for row in self.aiim.get_intfs_in_system_id_list(system_id_list)] if intf_name in intf_name_list: company_name = self.acim.get_company(id=company_id).company_name return make_response({"code": "201", "desc": "\"{0}\"公司下存在相同接口\"{1}\", 请使用已存在的接口".format( company_name, intf_name)}) else: intf_info['topic'] = intf_info['topic'].strip() intf_info['tag'] = intf_info['tag'].strip() intf_name = '{0}.{1}'.format(intf_info['topic'], intf_info['tag']) obj = self.aiim.get_intf(intf_name=intf_name, api_system_id=system_id) if obj: return make_response({"code": "201", "desc": "工程下存在相同MQ接口\"{}\", 请使用已存在的MQ接口".format(intf_name)}) # 增加依赖接口列表属性 if intf_relation: intf_relation = [i[1] for i in intf_relation] self.aiim.insert_intf(intf_name=intf_name, intf_desc=intf_desc, api_system_id=system_id, intf_type=intf_type, intf_info=json_dumps(intf_info), creator=self.username, intf_relation=json_dumps(intf_relation)) else: self.aiim.insert_intf(intf_name=intf_name, intf_desc=intf_desc, api_system_id=system_id, intf_type=intf_type, intf_info=json_dumps(intf_info), creator=self.username) intf_obj = self.aiim.get_intf(intf_name=intf_name, intf_desc=intf_desc, api_system_id=system_id, intf_type=intf_type, intf_info=json_dumps(intf_info), creator=self.username) self.aidrm.insert_request(api_intf_id=intf_obj.id, request=json_dumps(request_dic), request_detail=json_dumps(request_detail_dic)) return make_response({"code": "000", "desc": "接口\"{}\"增加成功".format(intf_name)})
def celery_collect_results(summary_path_list): # with app.app_context(): if True: print("分步任务完成({}),开始收集结果任务...".format(len(summary_path_list))) # print("summary_list:{}".format(json_dumps(summary_list))) try: # api_runner.save_task_results(summary_path_list) collector = TaskResultCollector(summary_path_list) collector.save_task_results() except Exception as err: summary_list_str = json_dumps(summary_path_list) print("收集结果任务出现异常, summary_list:{}:".format(summary_list_str)) print('\n'.join([str(err), traceback.format_exc()])) # print("收集结果任务出现异常") print("收集结果任务完成")
def get_changes_by_seq_no(self): """ 根据流水号获取变更内容 :return: """ try: seq_no = self.data.pop('seq_no') application = self.data.pop('application') except KeyError: return make_response({"code": "100", "mes": CODE_DESC_MAP["100"]}) git_diffs = self.gdvm.get_git_diff_versions(seq_no=seq_no) if not git_diffs: return make_response({"code": "100", "desc": "seq_no不存在"}) elif len(git_diffs) > 1: return make_response({"code": "100", "desc": "seq_no不唯一"}) else: changes = application['changes'] api_task_id = git_diffs[0].api_task_id self.gdvm.update_git_diff_version_by_seq_no(seq_no_=seq_no, detail=json_dumps(changes)) self.update_task_info(api_task_id, changes, seq_no) return make_response({"code": "0000", "msg": "成功"})
def merge_sub_case(): """合并相同的sub_case, 可重复执行""" to_update_main_sub_list_dic = {} testcase_main_objs = ApiTestcaseMainManager.get_testcase_mains() main_sub_list_dic = {} for testcase_main_obj in testcase_main_objs: main_sub_list_dic[testcase_main_obj.id] = json_loads(testcase_main_obj.sub_list) redundant_requests = ApiTestcaseRequestQllManager.get_redundant_requests() len_redundant_requests = len(redundant_requests) print('总计:{}'.format(len_redundant_requests)) index = 0 for row in redundant_requests: index += 1 process = index*100.0/len_redundant_requests print('进度:{:.2f}%'.format(process)) print('开始处理:{}'.format(row[0])) request = row[0] request_objs = ApiTestcaseRequestQllManager.get_requests(request=request) print('重复个数:{}'.format(len(request_objs))) target_sub_id = request_objs[0].api_testcase_id for request_obj in request_objs: print(request_obj.api_testcase_id) to_update_sub_id = request_obj.api_testcase_id if to_update_sub_id == target_sub_id: continue else: for main_id in main_sub_list_dic: if to_update_sub_id in main_sub_list_dic[main_id]: new_sub_list = [ target_sub_id if i == to_update_sub_id else i for i in main_sub_list_dic[main_id] ] main_sub_list_dic[main_id] = copy.deepcopy(new_sub_list) print(new_sub_list) to_update_main_sub_list_dic[main_id] = new_sub_list for main_id, sub_list in to_update_main_sub_list_dic.items(): ApiTestcaseMainManager.update_testcase_main(main_id, sub_list=json_dumps(sub_list))
def repair_sub_case_public_var_error(): import re variable_regexp = r"\$([\w_]+)" sub_objs = ApiTestcaseSubManager.get_testcase_subs() repair_count = 0 for sub_obj in sub_objs: if not sub_obj.include or sub_obj.include in ['[]', '[{"public_variables": []}]']: continue include_list = json_loads(sub_obj.include) pv_id_list = include_list[0]['public_variables'] pv_dic = {} for public_v_id in pv_id_list: pv_obj = ApiPublicVariableInfoManager.get_variable(id=public_v_id) pv_dic[public_v_id] = pv_obj.variable_name request_obj = ApiTestcaseRequestQllManager.get_request(api_testcase_id=sub_obj.id) new_pv_id_list = [] if request_obj and request_obj.request: variables = re.findall(variable_regexp, str(request_obj.request)) variables = list(set(variables)) for variable_name in variables: for pv_id, pv_name in pv_dic.items(): if pv_name == variable_name: new_pv_id_list.append(pv_id) break if set(pv_id_list) != set(new_pv_id_list): new_pv_id_list.append(137) if set(pv_id_list) != set(new_pv_id_list): if 137 not in pv_id_list: new_pv_id_list.remove(137) print('sub_obj.id:{2}, old:{0}, new:{1}'.format(pv_id_list, new_pv_id_list, sub_obj.id)) new_include = [{"public_variables": new_pv_id_list}] include_str = json_dumps(new_include) print(include_str) ApiTestcaseSubManager.update_testcase_sub(id_=sub_obj.id, include=include_str) repair_count += 1 print(repair_count)
def _load_testcase_mixed(self, testcase_id): """ 加载用例到testset,包含接口用例和全链路用例 """ case_chain_list = get_testcase_chain(testcase_id, case_type=1) """ case_chain_list EXAMPLE: [ { 'preSystemName': '', 'customFlowId': 1, 'preCaseType': '全链路用例', 'preCaseName': '新户提交必填信息2', 'preCaseId': 32, 'preIntfName': '' 'customFlowName': '32flow1' }, { 'preCaseType': '接口用例', 'preCaseName': '随机获取身份证134佛挡杀佛水电费', 'preCaseId': 5141 } ] """ print(json_dumps(case_chain_list)) case_chain_list.reverse() testset = None for case_info in case_chain_list: if get_case_type_by_desc(case_info["preCaseType"]) == 1: testset = self._load_api_testcase_itself(testcase_id=case_info["preCaseId"], testset=testset) elif get_case_type_by_desc(case_info["preCaseType"]) == 2: self.has_extract_variable_in_main_teardown = False testset = self._load_main_testcase(testcase_main_id=case_info["preCaseId"], testset=testset, custom_flow_id=case_info["customFlowId"]) return testset
def identify_errors(summary): """识别错误, 更新case_id和error_type到每个record""" error_map = { '环境问题': { 'Http 404': [ 'Failed to establish a new connection: [Errno 111] Connection refused' ], 'Http 502': [ '502 Bad Gateway' ], 'Dubbo请求应用前报错': [ ['远程服务返回失败', '"remoteResponseCode": "101"'] ], 'Dubbo请求应用后报错': [ ['应用服务返回失败', '"remoteResponseCode": "201"'] ] }, '用例问题': { '提取变量错误': [ 'httprunner.exceptions.ExtractFailure: Failed to extract' ], '变量未找到': [ 'httprunner.exceptions.VariableNotFound:' ], }, '框架报错': {}, '断言失败': { '断言报错': [ 'atp.httprunner.exceptions.ValidationFailure' ], }, '前后置失败': { '前置步骤报错': [ 'atp.httprunner.exceptions.SetupHooksFailure' ], '后置步骤报错': [ 'atp.httprunner.exceptions.TeardownHooksFailure' ], } } details = summary['details'] summary['stat'] = { 'testsRun': 0, 'failures': 0, "errors": 0, 'skipped': 0, 'successes': 0 } for detail_dic in details: detail_dic['stat'] = { 'testsRun': 1, 'failures': 0, "errors": 0, 'skipped': 0, 'successes': 0 } case_success = True chain_list = detail_dic['chain_list'] records = detail_dic['records'] for i in range(len(records)): # 每个子用例 records[i].update( { 'case_id': chain_list[i], 'error_type': '' } ) step_success = True # 处理attachment中的错误 if records[i]['attachment']: step_success = False records[i]['status'] = 'fail' # 已知的attachment错误,识别为具体错误 found_error = False for error_type, sub_error_dic in error_map.items(): if found_error: break for sub_error, error_kw_list in sub_error_dic.items(): if found_error: break for error_kw in error_kw_list: if isinstance(error_kw, list): all_found = True for kw in error_kw: if kw not in records[i]['attachment']: all_found = False break if all_found: records[i]['error_type'] = error_type records[i]['attachment'] = sub_error + ' -- ' + records[i]['attachment'] found_error = True break else: if error_kw in records[i]['attachment']: records[i]['error_type'] = error_type records[i]['attachment'] = sub_error + ' -- ' + records[i]['attachment'] found_error = True break # 未知的attachment错误, 识别为框架报错 if not records[i]['error_type']: records[i]['error_type'] = '框架报错' # 处理断言错误 else: for validator_dic in records[i]['meta_data']['validators']: if validator_dic['check_result'] != 'pass': step_success = False records[i]['status'] = 'fail' if isinstance(validator_dic['check_value'], dict): actual_str = json_dumps(validator_dic['check_value']) else: actual_str = str(validator_dic['check_value']) found_error = False # 已知的断言错误,识别为具体错误 for error_type, sub_error_dic in error_map.items(): if found_error: break for sub_error, error_kw_list in sub_error_dic.items(): if found_error: break for error_kw in error_kw_list: if isinstance(error_kw, list): all_found = True for kw in error_kw: if kw not in actual_str: all_found = False break if all_found: records[i]['error_type'] = error_type records[i]['attachment'] = sub_error + ' -- ' + actual_str found_error = True break else: if error_kw in actual_str: records[i]['error_type'] = error_type records[i]['attachment'] = sub_error + ' -- ' + actual_str found_error = True break if found_error: break # 未知的断言错误, 识别为断言失败 if not records[i]['error_type']: records[i]['error_type'] = '断言失败' records[i]['attachment'] = '断言失败 -- 校验方法: {0}, 校验内容: {1}, 预期结果: {2}, 实际结果: {3}'.format( get_validator_desc(validator_dic['comparator']), validator_dic['check'], validator_dic['expect'], actual_str ) break # 如果有子用例失败,则用例失败 if not step_success: case_success = False # 用例成功或失败的数据累加 if case_success: detail_dic['stat']['successes'] = 1 summary['stat']['testsRun'] += 1 summary['stat']['successes'] += 1 else: detail_dic['stat']['failures'] = 1 summary['stat']['testsRun'] += 1 summary['stat']['failures'] += 1 return summary
def handle_ui_testcase(self, action, **kwargs): ''' :param kwargs: :return: ''' base = kwargs.pop("base") module_id = base.pop("moduleId") system_id = base.pop("systemId") testcase_name = base.pop("testcaseName") simple_desc = base.pop("testcaseDesc") setup_info = kwargs.pop("setupInfo") variable_info = kwargs.pop("variableInfo") validate_Info = kwargs.pop("validateInfo") include = kwargs.pop("include") steps = kwargs.pop("steps") setup_case_list = [] # 配置URL # system_obj =BaseSystemInfoManager.query_system(id=system_id) # system_url = system_obj.base_host # setup_info for setup in setup_info: if setup["setup_type"] == 'setupcase': setup_case_list.append(setup["setup_args"]) elif setup["setup_type"] == 'setup_db_operation': # sql = setup["args"]["sql"] pass # steps操作步骤 for step in steps: '''根据页面id返回page名称''' if step["page_id"]: page_id = step["page_id"] obj = UICasePageInfoManager.query_ui_page(id=page_id) page_name = obj.page_name step["page_name"] = page_name # ui_request = { # "systemId":system_id, # "testcases": [ # { # "name": testcase_name, # "teststeps": steps, # "variables": variable_info, # "validates": validate_Info, # } # ] # } # 结果验证 if validate_Info: for validate in validate_Info: page_id = validate["page_id"] obj = UICasePageInfoManager.query_ui_page(id=page_id) page_name = obj.page_name validate["page_name"] = page_name ui_request = { "systemId": system_id, "testcases": [{ "name": testcase_name, "teststeps": steps, "variables": variable_info, "validates": validate_Info, }] } '''公共变量''' if not isinstance(include, list): include = [{"public_variables": []}] include.append({"setup_cases": setup_case_list}) if action == 'add': UITestCaseInfoManage.insert_ui_testcase( testcase_name=testcase_name, simple_desc=simple_desc, request=json_dumps(ui_request), inlude=json_dumps(include), module_id=module_id) elif action == 'edit': testcase_id = base.pop("id", None) UITestCaseInfoManage.update_ui_testcase( id_=testcase_id, testcase_name=testcase_name, inlude=json_dumps(include), request=json_dumps(ui_request), simple_desc=simple_desc, module_id=module_id)
def save_task_results(self): """保存测试结果到任务运行结果表api_run_task_result和用例复用表api_testcase_reuse_record""" summary_list = [] for summary_path in self.summary_path_list: if not summary_path or 'worker_summary_path is None' == summary_path: continue with open(summary_path, 'r') as f: summary_str = f.readline() summary_dict = json_loads(summary_str) summary_list.append(summary_dict) if not self.run_task_result_id: self.run_task_result_id = summary_dict['run_task_result_id'] if 'run_task_result_id' in summary_dict else None if not self.log_dir: self.log_dir = summary_dict['log_dir'] if 'log_dir' in summary_dict else None # run_task_result_id = summary_list[0]['run_task_result_id'] # log_dir = summary_list[0]['log_dir'] # 保存summary_list到服务器文件目录run_task_logs with open('{0}task_run_{1}_summary.log'.format(self.log_dir, self.run_task_result_id), 'w') as f: f.write(json_dumps(summary_list)) callback_task_obj = CeleryTaskRecordManager.get_callback_celery(api_run_task_result_id=self.run_task_result_id) try: # 更新celery_task_record表的字段celery_task_status为RUNNING CeleryTaskRecordManager.update_celery(callback_task_obj.id, celery_task_status='RUNNING') total_cases = 0 for summary in summary_list: total_cases += summary.pop('total_cases') res_obj = ApiRunTaskResultManager.get_result(id=self.run_task_result_id) task_obj = ApiTaskInfoManager.get_task(id=res_obj.api_task_id) # 获取task全部的intf_id_list if task_obj.task_type in (1, 3): task_intf_id_list = json_loads(task_obj.case_tree)['intf_id_list'] else: task_intf_id_list = json_loads(task_obj.effect_intf_id_list) # 更新api_testcase_reuse_record表, 并获取covered_intf_id_set, run_cases, success_cases res_list = save_testcase_reuse_record(summary_list) # 本次测试包含的接口id集合 covered_intf_id_set = res_list[0] # 本次运行的用例数 run_cases = res_list[1] # 本次成功的用例数 success_cases = res_list[2] # 本次未覆盖的接口id列表 uncovered_intf_id_list = list(set(task_intf_id_list) ^ covered_intf_id_set) # 本次失败的用例数 fail_cases = run_cases - success_cases # 本次未运行的用例数 not_run_cases = total_cases - run_cases # 更新api_run_task_result表 ApiRunTaskResultManager.update_result( self.run_task_result_id, total_cases=total_cases, not_run_cases=not_run_cases, run_cases=run_cases, success_cases=success_cases, fail_cases=fail_cases, end_time=datetime.now(), covered_intf_id_list=json_dumps(list(covered_intf_id_set)), uncovered_intf_id_list=json_dumps(uncovered_intf_id_list) ) # 更新celery_task_record表的字段celery_task_status为SUCCESS CeleryTaskRecordManager.update_celery(callback_task_obj.id, celery_task_status='SUCCESS') except Exception as err: # 更新api_run_task_result表 ApiRunTaskResultManager.update_result(self.run_task_result_id, end_time=datetime.now()) # 更新celery_task_record表的字段celery_task_status为ERROR CeleryTaskRecordManager.update_celery(callback_task_obj.id, celery_task_status='ERROR') print('\n'.join([str(err), traceback.format_exc()])) raise Exception(err)
def update_task_info(self, api_task_id, changes, seq_no): """ 解析回调接口返回的变更内容 :param api_task_id: :param changes: :param seq_no: :return: """ affect_http = set() affect_dubbo = set() affect_mq = set() affect_elasticJob = set() for affects in [change['affects'] for change in changes]: for methods in [affect['methods'] for affect in affects]: for method in methods: if method.get('http'): method_http = method.get('http').get('url') affect_http.add(method_http) if method.get('dubbo'): role = method.get('dubbo').get('role') method_dubbo = method.get('dubbo').get('service').get('interface') + '.' + method.get('method') \ if role == 'service' else method.get('dubbo').get('reference').get( 'interface') + '.' + method.get('method') affect_dubbo.add(method_dubbo) if method.get('mq'): role = method.get('mq').get('role') method_mq = method.get('mq').get('producer').get('topic') \ if role == 'producer' else method.get('mq').get('consumer').get( 'topic') + '.' + method.get('mq').get('consumer').get( 'tag') affect_mq.add(method_mq) if method.get('elasticJob'): method_elasticJob = method.get('elasticJob').get('class') + '.' + method.get('method') affect_elasticJob.add(method_elasticJob) affects = { "http": list(affect_http), "dubbo": list(affect_dubbo), "mq": list(affect_mq), "elasticJob": list(affect_elasticJob)} # 查询atp平台是否存在对应接口,如有则将接口id填入effect_intf_id_list,如无则将接口数据填入uncovered_info task_info = self.atim.get_task(id=api_task_id) effect_intf_id_list = set(json_loads(task_info.effect_intf_id_list)) if task_info.effect_intf_id_list else set() uncovered_info = json_loads(task_info.uncovered_info) if task_info.uncovered_info else {} uncovered_info_http = set(uncovered_info.get('http')) if uncovered_info.get('http') else set() uncovered_info_dubbo = set(uncovered_info.get('dubbo')) if uncovered_info.get('dubbo') else set() uncovered_info_mq = set(uncovered_info.get('mq')) if uncovered_info.get('mq') else set() uncovered_info_elasticJob = set(uncovered_info.get('elasticJob')) if uncovered_info.get('elasticJob') else set() for intf_name in affects['http']: if intf_name: intf_info = self.aiif.get_intf(intf_name=intf_name) if intf_info: testcase_info = self.atcm.get_testcase(api_intf_id=intf_info.id) if testcase_info: affects['http'] = affects['http'].remove(intf_name) effect_intf_id_list.add(intf_info.id) else: uncovered_info_http.add(intf_name) else: uncovered_info_http.add(intf_name) for intf_name in affects['dubbo']: if intf_name: intf_info = self.aiif.get_intf(intf_name=intf_name) if intf_info: testcase_info = self.atcm.get_testcase(api_intf_id=intf_info.id) if testcase_info: affects['dubbo'] = affects['dubbo'].remove(intf_name) effect_intf_id_list.add(intf_info.id) else: uncovered_info_dubbo.add(intf_name) else: uncovered_info_dubbo.add(intf_name) for intf_name in affects['mq']: if intf_name: intf_info = self.aiif.get_intf(intf_name=intf_name) if intf_info: testcase_info = self.atcm.get_testcase(api_intf_id=intf_info.id) if testcase_info: affects['mq'] = affects['mq'].remove(intf_name) effect_intf_id_list.add(intf_info.id) else: uncovered_info_mq.add(intf_name) else: uncovered_info_mq.add(intf_name) for intf_name in affects['elasticJob']: if intf_name: intf_info = self.aiif.get_intf(intf_name=intf_name) if intf_info: testcase_info = self.atcm.get_testcase(api_intf_id=intf_info.id) if testcase_info: affects['elasticJob'] = affects['elasticJob'].remove(intf_name) effect_intf_id_list.add(intf_info.id) else: uncovered_info_elasticJob.add(intf_name) else: uncovered_info_elasticJob.add(intf_name) effect_intf_id_list = list(effect_intf_id_list) uncovered_info['http'] = list(uncovered_info_http) uncovered_info['dubbo'] = list(uncovered_info_dubbo) uncovered_info['mq'] = list(uncovered_info_mq) uncovered_info['elasticJob'] = list(uncovered_info_elasticJob) # 判断当前是否是同一个api_task下的最后一个回调,如果是则更新api_task_info表里面对应记录的task_status为1(启动) git_diffs = self.gdvm.get_git_diff_versions_special(seq_no, api_task_id) if not git_diffs: self.atim.update_task(api_task_id, effect_intf_id_list=json_dumps(effect_intf_id_list), uncovered_info=json_dumps(uncovered_info), task_status=1) else: flag = 0 for row in git_diffs: if not row.detail: flag = 1 break if flag == 1: self.atim.update_task(api_task_id, effect_intf_id_list=json_dumps(effect_intf_id_list), uncovered_info=json_dumps(uncovered_info)) if flag == 0: self.atim.update_task(api_task_id, effect_intf_id_list=json_dumps(effect_intf_id_list), uncovered_info=json_dumps(uncovered_info), task_status=1)
def http_runner_run(**kwargs): """调用HttpRunner运行测试""" log_dir = kwargs.pop('log_dir') env_id = kwargs.pop('env_id') testset = kwargs.pop('testset') test_meta_list = kwargs.pop('test_meta_list') run_task_result_id = kwargs.pop('run_task_result_id') intf_id = kwargs.pop('intf_id', None) main_case_id = kwargs.pop('main_case_id', None) main_case_id_list = kwargs.pop('main_case_id_list', None) if intf_id: log_path = '{0}task_run_{1}_intf_{2}.log'.format(log_dir, run_task_result_id, intf_id) elif main_case_id: log_path = '{0}task_run_{1}_main_case_{2}.log'.format(log_dir, run_task_result_id, main_case_id) else: log_path = '{0}task_run_{1}_main_case_list_{2}.log'.format(log_dir, run_task_result_id, main_case_id_list) # 初始化hr_runner hr_kwargs = { "failfast": True, "log_path": log_path } hr_runner = HttpRunner(**hr_kwargs) start_time = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime(time.time())) hr_logger.log_warning("【START】测试开始! (ง •_•)ง") hr_logger.log_warning("【环境】: {}".format(env_id)) # time.sleep(3) try: testset_json = json_dumps(testset) except Exception: testset_json = testset # 执行测试 try: # hr_logger.log_warning("【调用HttpRunner】: {0}".format(testset_json)) hr_runner.run(testset) hr_logger.log_info("【结束调用HttpRunner】") except Exception: raise Exception(traceback.format_exc()) for detail in hr_runner.summary["details"]: for record in detail["records"]: record["meta_data"]["request"].pop("files", None) # 去除summary中的文件对象 summary_remove_file_obj(hr_runner.summary) # 完善summary summary = deepcopy(hr_runner.summary) # summary = hr_runner.summary perfect_summary(summary, test_meta_list) # print(json_dumps(summary)) summary = add_memo(summary) # 识别错误 # print(json_dumps(summary)) summary = identify_errors(summary) return {"summary": json_loads(json_dumps(summary)), "run_task_result_id": run_task_result_id, 'log_dir': log_dir}
def celery_run_single_intf_or_single_main_case(self, run_task_result_id, env_id, intf_id_or_main_case_id, is_intf, log_dir, testcase_id_list=None): # with app.app_context(): if True: print("开始分步任务: {0}, 接口id/全链路用例id: {1}, 是否接口:{2}".format( self.request.id, intf_id_or_main_case_id, is_intf)) CeleryTaskRecordManager.insert_celery( celery_task_no=self.request.id, celery_task_status='_loading', api_run_task_result_id=run_task_result_id) total_cases = 0 kwargs = None intf_id = None main_case_id = None result_dic = { "run_task_result_id": run_task_result_id, "log_dir": log_dir } worker_summary_path = None try: if is_intf: intf_id = intf_id_or_main_case_id # testcase_objs = ApiTestcaseInfoManager.get_testcases(api_intf_id=intf_id, case_status=0) # testcase_id_list = [testcase_obj.id for testcase_obj in testcase_objs] if testcase_id_list: total_cases += len(testcase_id_list) kwargs = { "env_id": env_id, "testcase_id_list": testcase_id_list, } worker_summary_path = '{0}task_run_{1}_intf_{2}.summary'.format( log_dir, run_task_result_id, intf_id) else: main_case_id = intf_id_or_main_case_id # valid_main_case_obj = ApiTestcaseMainManager.get_testcase_main(id=main_case_id, case_status=0) if main_case_id: total_cases += 1 kwargs = { "env_id": env_id, "testcase_main_id_list": [main_case_id], } worker_summary_path = '{0}task_run_{1}_main_case_{2}.summary'.format( log_dir, run_task_result_id, main_case_id) if kwargs: api_loader = ApiTestLoader(**kwargs) testset = api_loader.get_testset_list() test_meta_list = api_loader.get_test_meta_list() CeleryTaskRecordManager.update_celery_by_task_no( celery_task_no=self.request.id, celery_task_status='_running') runner_kwargs = { 'log_dir': log_dir, 'env_id': env_id, 'testset': testset, 'test_meta_list': test_meta_list, 'run_task_result_id': run_task_result_id, 'intf_id': intf_id, 'main_case_id': main_case_id, } result_dic = api_runner.http_runner_run(**runner_kwargs) result_dic['total_cases'] = total_cases CeleryTaskRecordManager.update_celery_by_task_no( celery_task_no=self.request.id, celery_task_status='_success') print("结束分步任务: {0}, 接口id/全链路用例id: {1}, 是否接口:{2}".format( self.request.id, intf_id_or_main_case_id, is_intf)) # return result_dic # 没有有效的测试用例 else: result_dic.update({ 'not_run_intf_id_or_main_case_id': intf_id_or_main_case_id, 'total_cases': total_cases }) CeleryTaskRecordManager.update_celery_by_task_no( celery_task_no=self.request.id, celery_task_status='_success') print("结束分步任务, 未找到可执行用例: {0}, 接口id/全链路用例id: {1}, 是否接口:{2}". format(self.request.id, intf_id_or_main_case_id, is_intf)) # return result_dic except Exception as err: print("分步任务异常退出: {0}, 接口id/全链路用例id: {1}, 是否接口:{2}, 错误信息:".format( self.request.id, intf_id_or_main_case_id, is_intf, traceback)) print('\n'.join([str(err), traceback.format_exc()])) result_dic.update({ 'total_cases': total_cases, 'not_run_intf_id_or_main_case_id': intf_id_or_main_case_id }) CeleryTaskRecordManager.update_celery_by_task_no( celery_task_no=self.request.id, celery_task_status='_error') finally: if worker_summary_path: with open(worker_summary_path, 'w') as f: f.write(json_dumps(result_dic)) return worker_summary_path else: return 'worker_summary_path is None'
def sort_objs_by_id_list(objs, id_list): """ 对数据库对象列表按照id_list顺序进行排序 """ sorted_objs = [] for id_ in id_list: for obj in objs: if id_ == obj.id: sorted_objs.append(obj) break return sorted_objs if __name__ == '__main__': kwargs_ = { "testcase_id_list": [6393], # http demo # "testcase_main_id_list": [183], # http demo # "env_name": "MOCK", # "test_tree": {"5": {"66": ["140", "142"]}, "68": {"71": ["141"]}} "env_id": "1", # "testsuite_id_list": ["1"], } from atp.app import create_app app = create_app() with app.app_context(): loader = ApiTestLoader(**kwargs_) print(json_dumps(loader.get_testset_list())) print(json_dumps(loader.get_test_meta_list()))
def celery_run_main_case_list(self, run_task_result_id, env_id, main_case_id_list, log_dir): # with app.app_context(): if True: print("开始分步任务: {0}, 全链路用例id列表: {1}".format(self.request.id, main_case_id_list)) try: CeleryTaskRecordManager.insert_celery( celery_task_no=self.request.id, celery_task_status='_loading', api_run_task_result_id=run_task_result_id) except Exception as err: print("开始分步任务时存在错误: {0}, 全链路用例id列表: {1}, 错误信息:".format( self.request.id, main_case_id_list, traceback)) print('\n'.join([str(err), traceback.format_exc()])) CeleryTaskRecordManager.insert_celery( celery_task_no=self.request.id, celery_task_status='_loading', api_run_task_result_id=run_task_result_id) total_cases = 0 result_dic = { "run_task_result_id": run_task_result_id, "log_dir": log_dir } kwargs = {"env_id": env_id, "testcase_main_id_list": []} worker_summary_path = '{0}task_run_{1}_main_case_list_{2}.summary'.format( log_dir, run_task_result_id, main_case_id_list) try: # for main_case_id in main_case_id_list: # valid_main_case_obj = ApiTestcaseMainManager.get_testcase_main(id=main_case_id, case_status=0) # if valid_main_case_obj: # total_cases += 1 # kwargs["testcase_main_id_list"].append(main_case_id) total_cases = len(main_case_id_list) kwargs["testcase_main_id_list"] = main_case_id_list if kwargs["testcase_main_id_list"]: api_loader = ApiTestLoader(**kwargs) testset = api_loader.get_testset_list() test_meta_list = api_loader.get_test_meta_list() CeleryTaskRecordManager.update_celery_by_task_no( celery_task_no=self.request.id, celery_task_status='_running') runner_kwargs = { 'log_dir': log_dir, 'env_id': env_id, 'testset': testset, 'test_meta_list': test_meta_list, 'run_task_result_id': run_task_result_id, 'main_case_id_list': main_case_id_list, } result_dic = api_runner.http_runner_run(**runner_kwargs) result_dic['total_cases'] = total_cases CeleryTaskRecordManager.update_celery_by_task_no( celery_task_no=self.request.id, celery_task_status='_success') print("结束分步任务: {0}, 全链路用例id列表: {1}".format( self.request.id, main_case_id_list)) # return result_dic # 没有有效的测试用例 else: result_dic.update({ 'main_case_id_list': main_case_id_list, 'total_cases': total_cases }) CeleryTaskRecordManager.update_celery_by_task_no( celery_task_no=self.request.id, celery_task_status='_success') print("结束分步任务, 未找到可执行用例: {0}, 全链路用例id列表: {1}".format( self.request.id, main_case_id_list)) # return result_dic except Exception as err: print("分步任务异常退出: {0}, 全链路用例id列表: {1}, 错误信息:".format( self.request.id, main_case_id_list, traceback)) print('\n'.join([str(err), traceback.format_exc()])) result_dic.update({ 'total_cases': total_cases, 'main_case_id_list': main_case_id_list }) CeleryTaskRecordManager.update_celery_by_task_no( celery_task_no=self.request.id, celery_task_status='_error') finally: if worker_summary_path: with open(worker_summary_path, 'w') as f: f.write(json_dumps(result_dic)) return worker_summary_path else: return 'worker_summary_path is None'
if width >= 8 and row[6]: mapped_dic[row[0]][row[2]][row[4]][row[6]] = { 'name': row[7] } else: if width >= 6 and row[4] not in mapped_dic[row[0]][row[2]]: mapped_dic[row[0]][row[2]][row[4]] = {'name': row[5]} if width >= 8 and row[6]: mapped_dic[row[0]][row[2]][row[4]][row[6]] = { 'name': row[7] } else: if width >= 8 and row[6] not in mapped_dic[row[0]][row[2]][ row[4]]: mapped_dic[row[0]][row[2]][row[4]][row[6]] = { 'name': row[7] } for row in patch_res: if row[0] in mapped_dic: if row[2] and row[2] not in mapped_dic[row[0]]: mapped_dic[row[0]][row[2]] = {'name': row[3]} # print(json_dumps(mapped_dic)) # print(mapped_dic) return mapped_dic if __name__ == '__main__': print(json_dumps(read_custom()))
def copy_testcase(self): try: testcase_id = self.data.pop('id') copy_num = int(self.data.pop('copyNum')) copy_type = int(self.data.pop('copyType', 1)) # 1:复制主用例,引用子用例 2:复制主用例和子用例 except (KeyError, ValueError): return make_response({"code": "100", "desc": "入参校验失败"}) tm_obj = self.atmm.get_testcase_main(id=testcase_id) if tm_obj.case_type == 2: product_line_id = tm_obj.api_product_line_id pre_obj = self.atmm.get_last_obj_by_product_line(product_line_id) else: intf_id = tm_obj.api_intf_id pre_obj = self.atmm.get_last_obj_by_intf(intf_id) index = pre_obj.index + 1 if pre_obj else 0 table_last_obj = self.atmm.get_last_obj() insert_id = table_last_obj.id + 1 if table_last_obj else 1 if copy_type == 2: from_sub_list = json_loads(tm_obj.sub_list) sub_info_list = [] for from_sub_id in from_sub_list: t_sub_obj = self.atsm.get_testcase_sub(id=from_sub_id) tr_obj = self.atrqm.get_request(api_testcase_id=from_sub_id) sub_info_list.append({ 'request': tr_obj.request, 'sub_name': t_sub_obj.sub_name, 'request_type': t_sub_obj.request_type, 'include': t_sub_obj.include, 'simple_desc': t_sub_obj.simple_desc, 'case_type': t_sub_obj.case_type, 'api_intf_id': t_sub_obj.api_intf_id, 'creator': self.username, 'expect_result': t_sub_obj.expect_result, }) testcase_insert_list = [] testcase_id_list = [] for i in range(copy_num): update_list = copy.deepcopy(sub_info_list) to_sub_list = ApiTestcaseSubManager.batch_update_testcase_sub( update_list) case_name = tm_obj.testcase_name + '_copy_{0}_{1}'.format( testcase_id, i + 1) testcase_insert_list.append({ 'id': insert_id + i, 'testcase_name': case_name, 'simple_desc': tm_obj.simple_desc, 'case_type': tm_obj.case_type, 'case_status': tm_obj.case_status, 'api_intf_id': tm_obj.api_intf_id, 'api_product_line_id': tm_obj.api_product_line_id, 'sub_list': json_dumps(to_sub_list), 'creator': self.username, 'expect_result': tm_obj.expect_result, 'index': index + i, 'setup_flow_list': tm_obj.setup_flow_list, 'main_teardown_hooks': tm_obj.main_teardown_hooks, }) testcase_id_list.append(insert_id + i) self.atmm.batch_insert_testcase_main(testcase_insert_list) # 复制tag tag_relation_objs = self.atmtrm.get_relations( api_testcase_id=testcase_id) tag_id_list = [str(obj.tag_id) for obj in tag_relation_objs] tag_relation_insert_list = [] for i in range(copy_num): for tag_id in tag_id_list: tag_relation_insert_list.append({ 'api_testcase_id': testcase_id_list[i], 'tag_id': tag_id }) self.atmtrm.batch_insert_relation(tag_relation_insert_list) elif copy_type == 1: to_sub_list = json_loads(tm_obj.sub_list) testcase_insert_list = [] testcase_id_list = [] for i in range(copy_num): case_name = tm_obj.testcase_name + '_copy_{0}_{1}'.format( testcase_id, i + 1) testcase_insert_list.append({ 'id': insert_id + i, 'testcase_name': case_name, 'simple_desc': tm_obj.simple_desc, 'case_type': tm_obj.case_type, 'case_status': tm_obj.case_status, 'api_intf_id': tm_obj.api_intf_id, 'api_product_line_id': tm_obj.api_product_line_id, 'sub_list': json_dumps(to_sub_list), 'creator': self.username, 'expect_result': tm_obj.expect_result, 'index': index + i, 'setup_flow_list': tm_obj.setup_flow_list, 'main_teardown_hooks': tm_obj.main_teardown_hooks, }) testcase_id_list.append(insert_id + i) self.atmm.batch_insert_testcase_main(testcase_insert_list) # 复制tag tag_relation_objs = self.atmtrm.get_relations( api_testcase_id=testcase_id) tag_id_list = [str(obj.tag_id) for obj in tag_relation_objs] tag_relation_insert_list = [] for i in range(copy_num): for tag_id in tag_id_list: tag_relation_insert_list.append({ 'api_testcase_id': testcase_id_list[i], 'tag_id': tag_id }) self.atmtrm.batch_insert_relation(tag_relation_insert_list) else: return make_response({ "code": "101", "desc": "错误的copy_type:{0}".format(copy_type) }) return make_response({ "code": "000", "desc": "用例{0}复制成功, 数量{1}".format(testcase_id, copy_num) })
def init_table_api_intf_default_request(): """初始化api_intf_default_request表""" intf_objs = aiim.get_intfs() # tc_objs = atim.get_testcases() process_len = len(intf_objs) process_id = 0 for intf_obj in intf_objs: print('intf_id: {}'.format(intf_obj.id)) t1 = time.time() process_id += 1 print('{:.1f}%'.format(process_id*100.0/process_len)) if aidrm.get_request(api_intf_id=intf_obj.id): continue tc_objs = atim.get_testcases(api_intf_id=intf_obj.id) final_body = None for tc_obj in tc_objs: tc_request_obj = atrm.get_request(api_testcase_id=tc_obj.id) if not tc_request_obj or not tc_request_obj.request: continue request_dic = json_loads(tc_request_obj.request) try: if tc_obj.type == 1: body = request_dic['teststeps'][0]['request']['json'] if not final_body: final_body = body else: final_body = merge_request_body(final_body, body) elif tc_obj.type == 2: body = request_dic['teststeps'][0]['request']['json']['args'] if not final_body: final_body = body else: final_body = merge_request_body(final_body, body, is_list=True) elif tc_obj.type == 3: body_str = request_dic['teststeps'][0]['request']['json']['msg'] body = json_loads(body_str) if not final_body: final_body = body else: final_body = merge_request_body(final_body, body) else: continue except KeyError: print('Error!!') continue t3 = time.time() d_time = t3 - t1 print("==== Finish t3, run {:.3}s ====\n".format(d_time)) if final_body is not None: body = remove_var_mark(final_body) p = ParseBodyToDetail(body) p.parse_main() detail_str = json_dumps(p.detail) body_str = json_dumps(body) aidrm.insert_request(api_intf_id=intf_obj.id, request=body_str, request_detail=detail_str)
def api_run_test(**kwargs): report_id = kwargs.pop('report_id', None) plan_name = kwargs.pop('plan_name', None) project_id = kwargs.pop('project_id', None) testcase_main_id_list = kwargs.get('testcase_main_id_list', None) failfast = kwargs.pop('failfast', False) if testcase_main_id_list: is_main = True else: is_main = False try: logger.debug( '=============================={dir}run_{report_id}.log'.format( dir=run_case_log_dir, report_id=report_id)) hr_kwargs = { "failfast": failfast, "log_path": '{dir}run_{report_id}.log'.format(dir=run_case_log_dir, report_id=report_id) } runner = HttpRunner(**hr_kwargs) # res = load_test(**kwargs) # testset = res[0] # test_meta_list = res[1] # project_id = res[2] loader = ApiTestLoader(**kwargs) testset = loader.get_testset_list() test_meta_list = loader.get_test_meta_list() if not testset: raise LoadCaseError('没有可执行的用例') logger.debug("{1} testset:{0}".format(testset, type(testset))) except Exception as err: save_report(report_path=None, runner_summary=None, project_id=project_id, report_id=report_id) hr_logger.log_error("【ERROR】组装用例出错!") hr_logger.log_error('\n'.join([str(err), traceback.format_exc()])) hr_logger.log_info("【END】测试结束!") hr_logger.remove_handler(runner.handler) raise LoadCaseError try: # summary = run(testset, report_name='testMock') start_time = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime(time.time())) hr_logger.log_info("【START】测试开始! (ง •_•)ง") hr_logger.log_info("【环境】: {}".format(kwargs.get('env_name', None))) # time.sleep(3) try: testset_json = json_dumps(testset) except Exception: testset_json = testset hr_logger.log_debug("【调用HttpRunner】: {0}".format(testset_json)) runner.run(testset) hr_logger.log_info("【结束调用HttpRunner】") # raise RunCaseError perfect_summary(runner.summary, test_meta_list) """记录用例复用记录""" summary_remove_file_obj(runner.summary) summary_for_reuse = copy.deepcopy(runner.summary) summary_for_reuse = add_memo(summary_for_reuse) # 识别错误 summary_for_reuse = identify_errors(summary_for_reuse) # 更新api_testcase_reuse_record表, 并获取covered_intf_id_set, run_cases, success_cases save_testcase_reuse_record([{ "summary": json_loads(json_dumps(summary_for_reuse)) }]) del summary_for_reuse # hr_logger.log_info("【runner.summary】: {}".format(runner.summary)) '''报告优化:1、汉化(包括日志里面的字段) 2、开始时间和持续时间合并成一行 3、增加一个字段“错误类型”,如果用例错误,显示该字段,并说明期望与预期值; 否则该字段不显示 4.log里面去掉一些数据重复和不重要的;行和字段(请求headers,返回体的headers,reason,url,“”ok”) 5.将请求体和返回值数据缩进,且字典里面的key颜色加粗 6.新增接口请求类型字段,http、dubbo、mq''' for detail in runner.summary["details"]: for record in detail["records"]: '''增加用例类型:test_meta_list["intf_type"]''' record["intf_type"] = test_meta_list[0]["intf_type"] '''删除报告一些无需关注的字段''' request_keys = ["json", "start_timestamp"] response_keys = [ "elapsed_ms", "encoding", 'ok', 'url', 'reason', 'cookies' ] for request_key in request_keys: if request_key in record["meta_data"]["request"]: del record["meta_data"]["request"][request_key] for respones_key in response_keys: if respones_key in record["meta_data"]["response"]: del record["meta_data"]["response"][respones_key] '''record.status出现error, 抛出错误信息''' if record['status'] == 'error': error_msg = record['attachment'] raise Exception(error_msg) # '''将body和content字节类型转换dic''' # if "body" in record["meta_data"]["request"].keys() and "content" in record["meta_data"]["response"].keys(): # request_body = record["meta_data"]["request"].pop("body") # response_content = record["meta_data"]["response"].pop("content") # if not request_body: # request_body_dic = {} # else: # try: # request_body_dic = json.loads(request_body) # except TypeError: # request_body_dic = json.loads(request_body.decode('utf-8')) # # 增加捕获异常 # except UnicodeDecodeError: # if isinstance(request_body, bytes): # request_body_dic = {} # # request_body_dic = request_body.decode('utf-8', 'ignore') # else: # request_body_dic = {} # # if not response_content: # response_content_dic = {} # else: # try: # response_content_dic = json.loads(response_content) # except TypeError: # response_content_dic = json.loads(response_content.decode('utf-8')) # except json.decoder.JSONDecodeError: # response_content_dic = {} # # record["meta_data"]["request"]["body"] = request_body_dic # record["meta_data"]["response"]["content"] = response_content_dic # # '''将files去除,避免报告超长影响展示效果''' # if "files" in record["meta_data"]["request"].keys(): # record["meta_data"]["request"].pop("files") '''报告增加一列:错误类型:''' for validate in record["meta_data"]["validators"]: if validate["comparator"] == "json_contains": check_value = validate["check_value"] expect_value = validate["expect"] if json_contains(check_value, expect_value) is not True: validate["check_result"] = "fail" record["status"] = "failure" detail["stat"]["failures"] += 1 detail["stat"]["successes"] -= 1 runner.summary["stat"]["failures"] += 1 runner.summary["stat"]["successes"] -= 1 error_log = ("预期:{}未在返回报文内".format(expect_value)) validate["error_log"] = { "json_contains": error_log } elif validate["comparator"] == "db_validate": check_value = validate["check_value"] expect_value = validate["expect"] if db_validate(check_value, expect_value) is not True: validate["check_result"] = "fail" record["status"] = "failure" detail["stat"]["failures"] += 1 detail["stat"]["successes"] -= 1 runner.summary["stat"]["failures"] += 1 runner.summary["stat"]["successes"] -= 1 error_log = ("预期:{0},实际是:{1}".format( expect_value, check_value)) validate["error_log"] = {"db_validate": error_log} elif validate["comparator"] == "db_json_validate": check_value = validate["check_value"] expect_value = validate["expect"] if not db_json_validate(check_value, expect_value): validate["check_result"] = "fail" record["status"] = "failure" detail["stat"]["failures"] += 1 detail["stat"]["successes"] -= 1 runner.summary["stat"]["failures"] += 1 runner.summary["stat"]["successes"] -= 1 error_log = ("预期:{0},实际是:{1}".format( expect_value, json.dumps(check_value).encode('utf-8').decode( 'unicode_escape'))) validate["error_log"] = { "db_json_validate": error_log } hr_logger.log_info("【runner.summary】: {}".format( json_dumps(runner.summary))) runner_summary = copy.deepcopy(runner.summary) """把每条用例执行成功与否记录到testcase_info.last_run""" try: save_last_run(runner_summary, is_main=is_main) except Exception as e: logger.error('\n'.join([str(e), traceback.format_exc()])) # hr_logger.log_error("【ERROR】运行用例出错!") # hr_logger.log_error('\n'.join([str(e), traceback.format_exc()])) # logger.debug("runner_summary_list{}".format(runner.summary)) # report_path = runner.gen_html_report( # html_report_name=plan_name if plan_name else 'default', # html_report_template=config.REPORT_TEMPLATE_PATH, # html_report_dir=config.REPORT_DIR # ) # logger.debug('report_path:{}'.format(report_path)) # report_path = report_path.split('reports')[1] # report_url = get_host() + r':8899/reports' + report_path # # logger.debug('AC report_path:{}'.format(report_path)) report_url = '不生成报告' save_report(report_url, runner_summary, project_id, report_id=report_id, is_main=is_main) except Exception as err: save_report(report_path=None, runner_summary=runner.summary, project_id=project_id, report_id=report_id) hr_logger.log_error("【ERROR】运行用例出错!") hr_logger.log_error('\n'.join([str(err), traceback.format_exc()])) raise RunCaseError finally: hr_logger.log_info("【END】测试结束!") hr_logger.remove_handler(runner.handler) return report_url
def edit_intf(self): try: intf_id = self.data.pop('intfId') intf_desc = self.data.pop('intfNameInChinese') intf_type = self.data.pop('type') intf_info = self.data.pop('info') request_dic = self.data.pop('request', {}) request_detail_dic = self.data.pop('requestDetail', []) intf_relation = self.data.pop('intfRelation') except KeyError: return make_response({"code": "100", "desc": "入参校验失败"}) intf_obj = self.aiim.get_intf(id=intf_id) if not intf_obj: return make_response({"code": "202", "desc": "接口id\"{}\"不存在, 请刷新后重试".format(intf_id)}) if intf_type != 'MQ': header_variables = [] if intf_type == 'HTTP': intf_info['apiUrl'] = intf_info['apiUrl'].strip() intf_name = intf_info['apiUrl'] header = intf_info['headers'] variable_regexp = r"\$([\w_]+)" header_variables = re.findall(variable_regexp, header) elif intf_type == 'DUBBO': intf_info['dubboService'] = intf_info['dubboService'].strip() intf_info['dubboMethod'] = intf_info['dubboMethod'].strip() intf_name = '{0}.{1}'.format(intf_info['dubboService'], intf_info['dubboMethod']) company_id = self.asim.get_system(id=intf_obj.api_system_id).api_company_id system_id_list = [row.id for row in self.asim.get_systems(api_company_id=company_id)] for obj in self.aiim.get_intfs_in_system_id_list(system_id_list): if obj.intf_name == intf_name and int(obj.id) != int(intf_id): company_name = self.acim.get_company(id=company_id).company_name return make_response({"code": "201", "desc": "\"{0}\"公司下存在相同接口\"{1}\", 无法将当前接口修改为\"{1}\"".format( company_name, intf_name)}) else: intf_info['topic'] = intf_info['topic'].strip() intf_info['tag'] = intf_info['tag'].strip() intf_name = '{0}.{1}'.format(intf_info['topic'], intf_info['tag']) obj = self.aiim.get_intf(intf_name=intf_name, api_system_id=intf_obj.api_system_id) if obj and obj.id != intf_id: return make_response({"code": "201", "desc": "工程下存在相同MQ接口\"{}\", 请使用已存在的MQ接口".format(intf_name)}) if intf_relation: intf_relation = [i[1] for i in intf_relation] self.aiim.update_intf(intf_id, intf_name=intf_name, intf_desc=intf_desc, intf_type=intf_type, intf_info=json_dumps(intf_info), last_modifier=self.username, intf_relation=json_dumps(intf_relation)) else: self.aiim.update_intf(intf_id, intf_name=intf_name, intf_desc=intf_desc, intf_type=intf_type, intf_info=json_dumps(intf_info), last_modifier=self.username) self.aidrm.update_request_by_intf_id(intf_id, request=json_dumps(request_dic), request_detail=json_dumps(request_detail_dic)) # 保存接口headers中的公共变量到接口下的所有用例 if intf_type == 'HTTP' and header_variables: to_add_pv_id_list = [] pv_objs = ApiPublicVariableInfoManager.get_variables(api_company_id=company_id) for pv_obj in pv_objs: for header_variable in header_variables: if header_variable == pv_obj.variable_name: to_add_pv_id_list.append(pv_obj.id) break # 如果存在需添加的公共变量id if to_add_pv_id_list: tc_objs = ApiTestcaseInfoManager.get_testcases(api_intf_id=intf_id) for tc_obj in tc_objs: try: pv_id_list = json_loads(tc_obj.include)[0]['public_variables'] except (json.decoder.JSONDecodeError, IndexError, KeyError): pv_id_list = [] merge_pv_id_list = pv_id_list + to_add_pv_id_list merge_pv_id_list = list(set(merge_pv_id_list)) if set(merge_pv_id_list) != set(pv_id_list): include = json_dumps([{"public_variables": merge_pv_id_list}]) ApiTestcaseInfoManager.update_testcase(id_=tc_obj.id, include=include) ts_objs = ApiTestcaseSubManager.get_testcase_subs(api_intf_id=intf_id) for ts_obj in ts_objs: try: pv_id_list = json_loads(ts_obj.include)[0]['public_variables'] except (json.decoder.JSONDecodeError, IndexError, KeyError): pv_id_list = [] merge_pv_id_list = pv_id_list + to_add_pv_id_list merge_pv_id_list = list(set(merge_pv_id_list)) if set(merge_pv_id_list) != set(pv_id_list): include = json_dumps([{"public_variables": merge_pv_id_list}]) ApiTestcaseSubManager.update_testcase_sub(id_=ts_obj.id, include=include) return make_response({"code": "000", "desc": "接口\"{}\"修改成功".format(intf_name)})