def wrapper(self, *args, **kw): who = 'Nobody' if hasattr(self, 'username'): who = self.username logger.info( '<==================== {1} Begin call [{0}] ===================='. format(__get_full_class(self), who)) start_time = time.time() try: c = func(self, *args, **kw) except Exception as err: text = '\n'.join([ 'an error occured on {}'.format(get_host_port()), str(err), traceback.format_exc() ]) logger.error('ATP: 接口发现未知错误 \n {traceback}'.format(traceback=text)) # subject = 'ATP: 系统发现未知错误' # try: # from atp.api.send_email import intf_send_mail # from atp.config.default import get_config # config = get_config() # email_to = config.EMAIL_TO # intf_send_mail(email_to, subject, text) # logger.info("send mail {} {} {}".format(email_to, subject, text)) # except Exception as e: # logger.error("cannot send email: {} {} {}".format(str(e), subject, text)) c = jsonify({"code": "999", "desc": "system error"}) end_time = time.time() d_time = end_time - start_time logger.info( "==================== End call [{0}], run {1:.3}s ====================>\n" .format(__get_full_class(self), d_time)) return c
def __get_table_columns(db_connect, library_name, table_name): """ 根据表名table_name获取该表的所有列名,list格式 """ #替换db_connect的表名 config_name = db_connect.split('?')[0].split('/')[-1] db_connect = db_connect.replace(config_name, library_name) engine = create_engine(db_connect, echo=False, poolclass=NullPool) columns_list = None with engine.connect() as conn: try: metadata = MetaData(engine, reflect=True) table_name = table_name.strip('`') table_name_upper = table_name.upper() table_name_lower = table_name.lower() if table_name_upper in metadata.tables: ex_table = metadata.tables[table_name_upper] elif table_name_lower in metadata.tables: ex_table = metadata.tables[table_name_lower] else: ex_table = metadata.tables[table_name] columns_list = ex_table.columns.keys() except Exception as err: hr_logger.log_error("数据库操作失败 \n {0}".format(err)) logger.error(traceback.format_exc()) finally: conn.close() return columns_list
def get_mq_log(self, topic, tag): """获取mq消息""" run_sh = 'grep -A 5 "cn.m*****mq" %s' % (PurePosixPath( self.sys_log_path)) # 连接服务器并执行shell命令 with SSHClient(self.server_info) as sh: all_mq_log = sh.exec_cmd(run_sh) if not all_mq_log: return None try: # 解析返回内容 for i in all_mq_log.split('\n--\n'): clean_data = [x for x in i.split('\n')] # 获取日志打印时间 time_str = clean_data.pop(0) mat = re.search( r"(\d{4}-\d{1,2}-\d{1,2}\s\d{1,2}:\d{1,2}:\d{1,2})", time_str) log_time = int( time.mktime( time.strptime(mat.group(0), "%Y-%m-%d %H:%M:%S"))) * 1000 # 只获取最近10分钟的日志 if self.start_time < log_time < self.end_time: if topic in str(clean_data) and tag in str(clean_data): yield clean_data[4].split('=')[1].strip() except Exception as err: logger.error(traceback.format_exc()) raise err
def exec_cmd(self, cmd): stdin, stdout, stderr = self.client.exec_command(cmd) data = stdout.read().decode() if len(data) > 0: logger.info(data.strip()) # 打印正确结果 return data err = stderr.read().decode() if len(err) > 0: logger.error(err.strip()) # 输出错误结果 return err
def __enter__(self): try: logger.info( '开始连接服务器,服务器连接信息:hostname-{0},port-{1}, username-{2}, password-{3}'.format(self.hostname, self.port, self.username, self.password)) self.client.connect(hostname=self.hostname, port=self.port, username=self.username, password=self.password, timeout=self.timeout) return self except Exception as e: logger.error(traceback.format_exc()) raise e
def db_operation_to_json_cycle(sql, db_connect, expect_value, wait_time=30): """数据库json校验 轮询""" step_time = 5 t = 0 return_value = None dict_expect = transfer_json_string_to_dict(expect_value) # 如果期望值非json格式,直接报错 if not isinstance(dict_expect, dict): raise Exception('结果校验中期望结果非json格式,期望结果内容为{0}({1})'.format( dict_expect, type(dict_expect).__name__)) dict_expect_lower_key = { key.lower(): dict_expect[key] for key in dict_expect } while t <= wait_time: try: return_value = db_operation_to_json(sql, db_connect) dict_check = transfer_json_string_to_dict(return_value) dict_check_lower_key = { key.lower(): dict_check[key] for key in dict_check } if not dict_check_lower_key: hr_logger.log_info("【轮询SQL】: {0} 表中无数据,等待5秒后重试".format( sql.replace('\n', ''))) else: res = is_json_contains(dict_check_lower_key, dict_expect_lower_key) if res is True: hr_logger.log_info('【轮询SQL】: {0} 结果为 {1}'.format( sql.replace('\n', ''), dict_check)) return return_value else: hr_logger.log_info('【轮询SQL】: {0} 结果为 {1},等待5秒后重试'.format( sql.replace('\n', ''), dict_check)) time.sleep(step_time) t += step_time except Exception as err: logger.error(traceback.format_exc()) raise Exception("【轮询SQL】: 数据库操作失败, {0}".format(err)) hr_logger.log_info('【轮询SQL】: 超过{0}秒表中仍无预期数据'.format(wait_time)) return return_value
def db_operation_to_json(sql, db_connect, return_info=None, multi=False): """ 执行查询sql,结果保存为一个json对象 :multi 如果为True,说明查询结果包含多条记录,return_info为每条记录中元素 """ try: if not return_info: # 获取sql查询结果 return_info = sql_execute(sql, db_connect=db_connect) if multi: value_list = list(return_info) if return_info else [] else: value_list = list(return_info[0]) if return_info else [] sql_lower = sql.lower() # 组成数据库结果值list for i in range(len(value_list)): # 处理查询结果字段类型是datetime/date/Decimal的情况 value_list[i] = convert_mysql_datatype_to_py(value_list[i]) between_select_from = sql[sql_lower.find('select') + len('select'):sql_lower.find('from')] # 组成字段名list if '*' in between_select_from: columns_list = get_table_columns(sql, db_connect) # database_name = sql[sql_lower.find('from') + len('from'):sql_lower.find('where')].strip() # table_name = database_name.split('.')[1] # library_name = database_name.split('.')[0] # columns_list = __get_table_columns(db_connect, library_name, table_name) # for i in range(len(columns_list)): # columns_list[i] = columns_list[i].lower() else: columns_list = get_sql_columns(sql) # columns_list = sql_lower[sql_lower.find('select') + len('select'):sql_lower.find('from')].split(',') # for i in range(len(columns_list)): # if '.' in columns_list[i]: # columns_list[i] = columns_list[i].split('.', 1)[1] # columns_list[i] = columns_list[i].strip() return dict(zip(columns_list, value_list)) except Exception as err: # print("数据库操作失败, {0}".format(err)) logger.error(traceback.format_exc()) hr_logger.log_error("数据库操作失败, {0}".format(err)) return None
def xml_to_dict(self): """ 解析xml,返回有序字典 :return: """ tree = ET.parse(self.xml_file) root = tree.getroot() try: sheet = root.find(self.xmlns + 'sheet') topic = sheet.find(self.xmlns + 'topic') first_topic = topic.find(self.xmlns + 'title').text except AttributeError: logger.error("Incorrect XML") return {} orderd_dic = OrderedDict() orderd_dic[first_topic] = OrderedDict() self._parse_topic(orderd_dic[first_topic], topic) return orderd_dic
def wrapper(*args, **kw): start_time = time.time() try: c = func(*args, **kw) except Exception as err: text = '\n'.join([ 'an error occured on {}'.format(get_host_port()), str(err), traceback.format_exc() ]) logger.error( 'ATP: 自定义方法发现未知错误 \n {traceback}'.format(traceback=text)) c = "自定义方法调用失败" raise err end_time = time.time() d_time = end_time - start_time logger.info( "==================== custom_func[{0}], run {1:.3}s ====================>\n" .format(func.__name__, d_time)) return c
def get_from_log_content(app_name, ssh_cmd, start_with, end_with, server_app_map, server_default_user): """ 日志-从日志中获取 : 应用名, grep命令, 起始字符, 结束字符 :desc: 说明: 从日志中按相应条件获取内容并返回 :param app_name: 应用名: 如loan-web,同环境配置中IP-应用映射表中应用名称保持一致 :param ssh_cmd: 过滤命令:使用grep xxx 过滤出符合条件的日志,如grep "成功发送通知消息" "/usr/local/src/logs/ups-service-cell01-node01/sys.log" :param start_with: 起始字符: 从日志中筛选起始字符后的内容 :param end_with: 结束字符: 从日志中筛选结束字符前的内容 :return: """ server_app_map = json.loads(server_app_map) server_default_user = json.loads(server_default_user) app_server_ip = "" for k, v in server_app_map.items(): if app_name in v: app_server_ip = k break if not app_server_ip: raise Exception("根据应用名找不到匹配的服务器IP") if not ssh_cmd.startswith("grep"): raise Exception("为了安全考虑,目前暂只支持grep命令") ssh_server_info = [app_server_ip, "22", server_default_user['user'], server_default_user['password']] try: with SSHClient(ssh_server_info) as sc: logs = sc.exec_cmd(ssh_cmd) except Exception as e: logger.error(traceback.format_exc()) raise Exception('出现未知错误: {0}'.format(repr(e))) if not logs: raise Exception("日志中未找到匹配的内容") des_logs = logs.split("\n")[:-1] if len(des_logs) > 1: raise Exception("日志中找到超过一条匹配记录,请增加过滤条件!") glc = GetLogContent(des_logs[0], start_with, end_with) actual_value = glc.get_log_content() return actual_value
def setup_zx_base_file_move(base_file_ori_path, base_file_des_path, server_app_map, server_default_user): """ 服务器-移动征信基础测试报文 : 报文源路径, 报文目标路径 :param base_file_ori_path: 报文源路径:基础测试报文源路径,以/结尾 :param base_file_des_path: 报文目标路径:基础测试报文目标路径,以/结尾 :param server_app_map: 服务器ip映射表 :param server_default_user: 服务器用户映射表 :return: """ if not base_file_ori_path or not base_file_des_path: raise Exception('报文源路径或报文目标路径不能为空') if base_file_ori_path[-1] != '/' or base_file_des_path[-1] != '/': raise Exception('报文源路径或报文目标路径格式错误') base_file_ori_path = base_file_ori_path[:-1] base_file_des_path = base_file_des_path[:-1] if isinstance(server_app_map, basestring): server_app_map = json.loads(server_app_map) if isinstance(server_default_user, basestring): server_default_user = json.loads(server_default_user) app_server_ip = "" for k, v in server_app_map.items(): if 'bds-core' in v: app_server_ip = k break if not app_server_ip: raise Exception("根据应用名 bds-core 找不到匹配的服务器IP") ssh_server_info = [app_server_ip, "22", server_default_user['user'], server_default_user['password']] import os base_file_des_path_backup = os.path.join(os.path.dirname(base_file_des_path), os.path.basename(base_file_des_path) + '_bak') try: with SSHClient(ssh_server_info) as sc: backup_cmd = 'sudo rm -fr %s/*&&sudo mv %s %s&&sudo mkdir -p %s' % ( base_file_des_path_backup, base_file_des_path, base_file_des_path_backup, base_file_des_path) sc.run_cmd(backup_cmd) move_cmd = 'sudo cp -rf %s/. %s' % (base_file_ori_path, base_file_des_path) sc.run_cmd(move_cmd) return True except Exception as e: logger.error(traceback.format_exc()) raise Exception('出现未知错误: {0}'.format(repr(e)))
def setup_zx_test_file_move(test_file_ori_path, test_file_des_path, server_app_map, server_default_user): """ 服务器-移动征信场景测试报文 : 报文源路径, 报文目标路径 :param test_file_ori_path: 报文源路径:基础测试报文源路径,填写完整路径,以/结尾 :param test_file_des_path: 报文目标路径:基础测试报文目标路径,填写完整路径,以/结尾 :param server_app_map: 服务器ip映射表 :param server_default_user: 服务器用户映射表 :return: """ if not test_file_ori_path or not test_file_des_path: raise Exception('报文源路径或测试报文目标路径不能为空') if test_file_des_path[-1] != '/': raise Exception('报文目标路径格式错误') test_file_des_path = test_file_des_path[:-1] if isinstance(server_app_map, basestring): server_app_map = json.loads(server_app_map) if isinstance(server_default_user, basestring): server_default_user = json.loads(server_default_user) app_server_ip = "" for k, v in server_app_map.items(): if 'bds-core' in v: app_server_ip = k break if not app_server_ip: raise Exception("根据应用名找不到匹配的服务器IP") ssh_server_info = [app_server_ip, "22", server_default_user['user'], server_default_user['password']] try: with SSHClient(ssh_server_info) as sc: import os if test_file_ori_path[-1] != '/': move_cmd = 'sudo cp -rf %s %s' % (test_file_ori_path, test_file_des_path) else: move_cmd = 'sudo cp -rf %s/. %s' % (test_file_ori_path, test_file_des_path) sc.run_cmd(move_cmd) return True except Exception as e: logger.error(traceback.format_exc()) raise Exception('出现未知错误: {0}'.format(repr(e)))
# -*- coding:utf-8 -*- import os from atp.api.comm_log import logger try: import paramiko except ImportError: logger.error("ImportError: No module named 'paramiko'") def server_upload_file(ssh_connect, local_path, remote_path): """ 向指定服务器上传文件 :param ssh_connect: :param local_path: :param remote_path: :return: """ if isinstance(ssh_connect, str): ssh_info = eval(ssh_connect) else: ssh_info = ssh_connect # 实例化Transport ssh = paramiko.Transport(ssh_info[0], ssh_info[1]) # 建立连接 ssh.connect(username=ssh_info[2], password=ssh_info[3]) # 实例化一个sftp对象 transport = paramiko.SFTPClient.from_transport(ssh) try: if os.path.isdir(local_path): # 判断本地参数是目录还是文件 local_path_list = os.listdir(local_path) for f in local_path_list: # 遍历本地目录
def teardown_compare_log_content(app_name, ssh_cmd, start_with, end_with, expect_kwargs, server_app_map, server_default_user): """ 日志-检查日志中是否包含期望内容 : 应用名, grep命令, 起始字符, 结束字符, 期望内容 :desc: 说明: 根据条件检索日志内容,并与期望内容比较 :param app_name: 应用名: 如loan-web,同环境配置中IP-应用映射表中应用名称保持一致 :param ssh_cmd: 过滤命令:使用grep xxx 过滤出符合条件的日志,如grep "成功发送通知消息" "/usr/local/src/logs/ups-service-cell01-node01/sys.log" :param start_with: 起始字符: 从日志中筛选起始字符后的内容 :param end_with: 结束字符: 从日志中筛选结束字符前的内容 :param expect_kwargs: 期望内容:json格式,用于同日志中筛选出的内容做json包含比较,如果包含则返回true,否则返回false :return: """ server_app_map = json.loads(server_app_map) server_default_user = json.loads(server_default_user) app_server_ip = "" for k, v in server_app_map.items(): if app_name in v: app_server_ip = k break if not app_server_ip: raise Exception("根据应用名找不到匹配的服务器IP") if not ssh_cmd.startswith("grep"): raise Exception("为了安全考虑,目前暂只支持grep命令") ssh_server_info = [app_server_ip, "22", server_default_user['user'], server_default_user['password']] try: with SSHClient(ssh_server_info) as sc: logs = sc.exec_cmd(ssh_cmd) except Exception as e: logger.error(traceback.format_exc()) raise Exception('出现未知错误: {0}'.format(repr(e))) if not logs: raise Exception("日志中未找到匹配的内容") des_logs = logs.split("\n")[:-1] if len(des_logs) > 1: raise Exception("日志中找到超过一条匹配记录,请增加过滤条件!") glc = GetLogContent(des_logs[0], start_with, end_with) actual_value = glc.get_log_content() if not actual_value and not expect_kwargs: return True elif actual_value and expect_kwargs: try: if isinstance(actual_value, bytes): # bytes转str str_content = actual_value.decode('utf-8') # str转dict dict_check = json.loads(str_content) elif isinstance(actual_value, str): dict_check = json.loads(actual_value) else: # dict dict_check = actual_value except json.decoder.JSONDecodeError: dict_check = actual_value try: dict_expect = json.loads(expect_kwargs) except json.decoder.JSONDecodeError: # json.loads异常时替换布尔类型后再次尝试,False => false , True => true expect_value = expect_kwargs.replace('False', 'false').replace('True', 'true') try: dict_expect = json.loads(expect_value) except json.decoder.JSONDecodeError: dict_expect = expect_value except TypeError: dict_expect = expect_kwargs # dict_check和dict_expect是字符串/数字的情况 if isinstance(dict_expect, (str, int)) and isinstance(dict_check, (str, int)): if str(dict_expect) == str(dict_check): return True else: return False, "实际日志内容同期望不匹配,实际日志内容为 %s" % (str(actual_value)) try: res = is_json_contains(dict_check, dict_expect) return res except Exception as e: return False, "json比对出现未知异常,%s" % traceback.format_exc() else: return False, "实际日志内容同期望不匹配,实际日志内容为 %s" % (str(actual_value))
def sql_execute_cycle(sql, db_connect, expect_value, wait_time=30): """ 定时执行某条查询sql,用于特定场景,如检查贷款表中贷款数据是否生成,默认每5秒检查一次,1分钟超时退出 \n :param expect_value: :param db_connect: :param sql: 待执行的查询sql \n :param wait_time: 超时时长,超过该时间自动退出 ,默认60秒 \n :return: 返回查询结果 \n """ expect_is_json = False step_time = 5 t = 0 return_value = None dict_expect = transfer_json_string_to_dict(expect_value) if isinstance(dict_expect, dict): expect_is_json = True while t <= wait_time: try: return_info = sql_execute(sql, db_connect=db_connect) if expect_is_json: return_value = return_info[0][0] if return_info else None dict_check = transfer_json_string_to_dict(return_value) # dict_check_lower_key = {key.lower(): dict_check[key] for key in dict_check} if not dict_check: hr_logger.log_info("【轮询SQL】: {0} 表中无数据,等待5秒后重试".format( sql.replace('\n', ''))) else: res = is_json_contains(dict_check, dict_expect) if res is True: hr_logger.log_info('【轮询SQL】: {0} 结果为 {1}'.format( sql.replace('\n', ''), dict_check)) return return_value else: hr_logger.log_info( '【轮询SQL】: {0} 结果为 {1},{2},等待5秒后重试'.format( sql.replace('\n', ''), dict_check, res[1])) else: if not return_info: hr_logger.log_info("【轮询SQL】: {0} 表中无数据,等待5秒后重试".format( sql.replace('\n', ''))) else: return_value = str(return_info[0][0]) if return_value == str(expect_value): hr_logger.log_info('【轮询SQL】: {0} 结果为 {1}'.format( sql.replace('\n', ''), return_value)) return return_info[0][0] else: hr_logger.log_info( '【轮询SQL】: {0} 结果为 {1},等待5秒后重试'.format( sql.replace('\n', ''), return_value)) time.sleep(step_time) t += step_time except Exception as err: logger.error(traceback.format_exc()) raise Exception("【轮询SQL】: 数据库操作失败, {0}".format(err)) hr_logger.log_info('【轮询SQL】: 超过{0}秒表中仍无预期数据'.format(wait_time)) return return_value
def api_run_test(**kwargs): report_id = kwargs.pop('report_id', None) plan_name = kwargs.pop('plan_name', None) project_id = kwargs.pop('project_id', None) testcase_main_id_list = kwargs.get('testcase_main_id_list', None) failfast = kwargs.pop('failfast', False) if testcase_main_id_list: is_main = True else: is_main = False try: logger.debug( '=============================={dir}run_{report_id}.log'.format( dir=run_case_log_dir, report_id=report_id)) hr_kwargs = { "failfast": failfast, "log_path": '{dir}run_{report_id}.log'.format(dir=run_case_log_dir, report_id=report_id) } runner = HttpRunner(**hr_kwargs) # res = load_test(**kwargs) # testset = res[0] # test_meta_list = res[1] # project_id = res[2] loader = ApiTestLoader(**kwargs) testset = loader.get_testset_list() test_meta_list = loader.get_test_meta_list() if not testset: raise LoadCaseError('没有可执行的用例') logger.debug("{1} testset:{0}".format(testset, type(testset))) except Exception as err: save_report(report_path=None, runner_summary=None, project_id=project_id, report_id=report_id) hr_logger.log_error("【ERROR】组装用例出错!") hr_logger.log_error('\n'.join([str(err), traceback.format_exc()])) hr_logger.log_info("【END】测试结束!") hr_logger.remove_handler(runner.handler) raise LoadCaseError try: # summary = run(testset, report_name='testMock') start_time = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime(time.time())) hr_logger.log_info("【START】测试开始! (ง •_•)ง") hr_logger.log_info("【环境】: {}".format(kwargs.get('env_name', None))) # time.sleep(3) try: testset_json = json_dumps(testset) except Exception: testset_json = testset hr_logger.log_debug("【调用HttpRunner】: {0}".format(testset_json)) runner.run(testset) hr_logger.log_info("【结束调用HttpRunner】") # raise RunCaseError perfect_summary(runner.summary, test_meta_list) """记录用例复用记录""" summary_remove_file_obj(runner.summary) summary_for_reuse = copy.deepcopy(runner.summary) summary_for_reuse = add_memo(summary_for_reuse) # 识别错误 summary_for_reuse = identify_errors(summary_for_reuse) # 更新api_testcase_reuse_record表, 并获取covered_intf_id_set, run_cases, success_cases save_testcase_reuse_record([{ "summary": json_loads(json_dumps(summary_for_reuse)) }]) del summary_for_reuse # hr_logger.log_info("【runner.summary】: {}".format(runner.summary)) '''报告优化:1、汉化(包括日志里面的字段) 2、开始时间和持续时间合并成一行 3、增加一个字段“错误类型”,如果用例错误,显示该字段,并说明期望与预期值; 否则该字段不显示 4.log里面去掉一些数据重复和不重要的;行和字段(请求headers,返回体的headers,reason,url,“”ok”) 5.将请求体和返回值数据缩进,且字典里面的key颜色加粗 6.新增接口请求类型字段,http、dubbo、mq''' for detail in runner.summary["details"]: for record in detail["records"]: '''增加用例类型:test_meta_list["intf_type"]''' record["intf_type"] = test_meta_list[0]["intf_type"] '''删除报告一些无需关注的字段''' request_keys = ["json", "start_timestamp"] response_keys = [ "elapsed_ms", "encoding", 'ok', 'url', 'reason', 'cookies' ] for request_key in request_keys: if request_key in record["meta_data"]["request"]: del record["meta_data"]["request"][request_key] for respones_key in response_keys: if respones_key in record["meta_data"]["response"]: del record["meta_data"]["response"][respones_key] '''record.status出现error, 抛出错误信息''' if record['status'] == 'error': error_msg = record['attachment'] raise Exception(error_msg) # '''将body和content字节类型转换dic''' # if "body" in record["meta_data"]["request"].keys() and "content" in record["meta_data"]["response"].keys(): # request_body = record["meta_data"]["request"].pop("body") # response_content = record["meta_data"]["response"].pop("content") # if not request_body: # request_body_dic = {} # else: # try: # request_body_dic = json.loads(request_body) # except TypeError: # request_body_dic = json.loads(request_body.decode('utf-8')) # # 增加捕获异常 # except UnicodeDecodeError: # if isinstance(request_body, bytes): # request_body_dic = {} # # request_body_dic = request_body.decode('utf-8', 'ignore') # else: # request_body_dic = {} # # if not response_content: # response_content_dic = {} # else: # try: # response_content_dic = json.loads(response_content) # except TypeError: # response_content_dic = json.loads(response_content.decode('utf-8')) # except json.decoder.JSONDecodeError: # response_content_dic = {} # # record["meta_data"]["request"]["body"] = request_body_dic # record["meta_data"]["response"]["content"] = response_content_dic # # '''将files去除,避免报告超长影响展示效果''' # if "files" in record["meta_data"]["request"].keys(): # record["meta_data"]["request"].pop("files") '''报告增加一列:错误类型:''' for validate in record["meta_data"]["validators"]: if validate["comparator"] == "json_contains": check_value = validate["check_value"] expect_value = validate["expect"] if json_contains(check_value, expect_value) is not True: validate["check_result"] = "fail" record["status"] = "failure" detail["stat"]["failures"] += 1 detail["stat"]["successes"] -= 1 runner.summary["stat"]["failures"] += 1 runner.summary["stat"]["successes"] -= 1 error_log = ("预期:{}未在返回报文内".format(expect_value)) validate["error_log"] = { "json_contains": error_log } elif validate["comparator"] == "db_validate": check_value = validate["check_value"] expect_value = validate["expect"] if db_validate(check_value, expect_value) is not True: validate["check_result"] = "fail" record["status"] = "failure" detail["stat"]["failures"] += 1 detail["stat"]["successes"] -= 1 runner.summary["stat"]["failures"] += 1 runner.summary["stat"]["successes"] -= 1 error_log = ("预期:{0},实际是:{1}".format( expect_value, check_value)) validate["error_log"] = {"db_validate": error_log} elif validate["comparator"] == "db_json_validate": check_value = validate["check_value"] expect_value = validate["expect"] if not db_json_validate(check_value, expect_value): validate["check_result"] = "fail" record["status"] = "failure" detail["stat"]["failures"] += 1 detail["stat"]["successes"] -= 1 runner.summary["stat"]["failures"] += 1 runner.summary["stat"]["successes"] -= 1 error_log = ("预期:{0},实际是:{1}".format( expect_value, json.dumps(check_value).encode('utf-8').decode( 'unicode_escape'))) validate["error_log"] = { "db_json_validate": error_log } hr_logger.log_info("【runner.summary】: {}".format( json_dumps(runner.summary))) runner_summary = copy.deepcopy(runner.summary) """把每条用例执行成功与否记录到testcase_info.last_run""" try: save_last_run(runner_summary, is_main=is_main) except Exception as e: logger.error('\n'.join([str(e), traceback.format_exc()])) # hr_logger.log_error("【ERROR】运行用例出错!") # hr_logger.log_error('\n'.join([str(e), traceback.format_exc()])) # logger.debug("runner_summary_list{}".format(runner.summary)) # report_path = runner.gen_html_report( # html_report_name=plan_name if plan_name else 'default', # html_report_template=config.REPORT_TEMPLATE_PATH, # html_report_dir=config.REPORT_DIR # ) # logger.debug('report_path:{}'.format(report_path)) # report_path = report_path.split('reports')[1] # report_url = get_host() + r':8899/reports' + report_path # # logger.debug('AC report_path:{}'.format(report_path)) report_url = '不生成报告' save_report(report_url, runner_summary, project_id, report_id=report_id, is_main=is_main) except Exception as err: save_report(report_path=None, runner_summary=runner.summary, project_id=project_id, report_id=report_id) hr_logger.log_error("【ERROR】运行用例出错!") hr_logger.log_error('\n'.join([str(err), traceback.format_exc()])) raise RunCaseError finally: hr_logger.log_info("【END】测试结束!") hr_logger.remove_handler(runner.handler) return report_url
def _add_intf_info(self, testset, intf_obj, is_first=False): """ 添加接口基本信息 """ intf_type = intf_obj.intf_type intf_info_dic = json_loads(intf_obj.intf_info) # teststep = testset["teststeps"][0] for teststep in testset["teststeps"]: if intf_type == "HTTP": base_url = self.env_info.base_host api_url = intf_info_dic["apiUrl"].strip() if absolute_http_url_regexp.match(api_url): teststep["request"]["url"] = api_url elif api_url.startswith('/'): teststep["request"]["url"] = base_url + api_url else: teststep["request"]["url"] = base_url + '/' + api_url teststep["request"]["method"] = intf_info_dic["method"] teststep["request"]["headers"] = json_loads(intf_info_dic["headers"].replace("'", "\"")) teststep["request"]["allow_redirects"] = False elif intf_type == "DUBBO": base_url = self.env_info.remote_host teststep["request"]["url"] = base_url + "/invokeDubbo" teststep["request"]["method"] = "POST" teststep["variables"].append({"DUBBO_METHOD": intf_info_dic["dubboMethod"]}) teststep["variables"].append({"DUBBO_INTERFACE": intf_info_dic["dubboService"]}) teststep["variables"].append({"DUBBO_VERSION": intf_info_dic["version"]}) teststep["request"]["json"]["version"] = "$DUBBO_VERSION" teststep["request"]["json"]["interfaceName"] = "$DUBBO_INTERFACE" teststep["request"]["json"]["zkUrl"] = "$DUBBO_ZOOKEEPER" teststep["request"]["json"]["methodName"] = "$DUBBO_METHOD" teststep["request"]["json"]["parameterTypes"] = [] parameter_type_count = 0 try: parameter_type_list = json_loads(intf_info_dic["parameterTypes"]) except json.decoder.JSONDecodeError: logger.error('ERROR parameterTypes: intf_id {}'.format(intf_obj.id)) parameter_type_list = [] for parameter_type in parameter_type_list: parameter_type_count += 1 teststep["variables"].append({ "DUBBO_PARAMETER_TYPE_{no}".format(no=parameter_type_count): parameter_type }) teststep["request"]["json"]["parameterTypes"].append( "$DUBBO_PARAMETER_TYPE_{no}".format(no=parameter_type_count) ) elif intf_type == "MQ": if self.env_info.env_name == "SIT": default_env = self.env_info.env_name else: default_env = "ALIUAT" # MQ的env默认为定值'ALIUAT' base_url = self.env_info.remote_host teststep["request"]["url"] = base_url + "/sendMQ" teststep["request"]["method"] = "POST" teststep["variables"].append({"MQ_TOPIC": intf_info_dic["topic"]}) teststep["variables"].append({"MQ_TAG": intf_info_dic["tag"]}) teststep["variables"].append({"MQ_PID": "PID_{mid}_{env}".format( mid=intf_info_dic["topic"][3:], env=default_env)}) teststep["request"]["json"]["env"] = default_env teststep["request"]["json"]["topic"] = "$MQ_TOPIC" teststep["request"]["json"]["tag"] = "$MQ_TAG" teststep["request"]["json"]["pid"] = "$MQ_PID" teststep["request"]["json"]["onsSecretKey"] = "{{\"{env}\":\"$MQ_SK\"}}".format(env=default_env) teststep["request"]["json"]["onsAccessKey"] = "{{\"{env}\":\"$MQ_AK\"}}".format(env=default_env) if "appid" in intf_info_dic and intf_info_dic["appid"]: teststep["request"]["json"]["appid"] = intf_info_dic["appid"] # 非首次加载接口信息的情况,只处理testset["teststeps"][0] if not is_first: break return testset