def get_mapping_variable(variable_name, variables_mapping): """ get variable from variables_mapping. Args: variable_name (str): variable name variables_mapping (dict): variables mapping Returns: mapping variable value. Raises: exceptions.VariableNotFound: variable is not found. """ try: if '.' in variable_name: top_query = variable_name.split('.', 1)[0] sub_query = variable_name.split('.', 1)[1] json_content = variables_mapping[top_query] json_content = transfer_json_string_to_dict(json_content) return query_json(json_content=json_content, query=sub_query, delimiter='.') return variables_mapping[variable_name] except (KeyError, exceptions.ExtractFailure) as err: # logger.log_debug("\n".join([str(err), traceback.format_exc()])) logger.log_error("【自定义变量未找到】变量名{}".format(variable_name)) raise exceptions.VariableNotFound( "{} is not found.".format(variable_name))
def validate(self, validators, resp_obj): """ make validations """ validate_pass = True evaluated_validators = [] if not validators: return evaluated_validators, validate_pass for validator in validators: # evaluate validators with context variable mapping. try: evaluated_validator = self.__eval_check_item( parser.parse_validator(validator), resp_obj) except Exception as err: logger.log_error('【处理校验数据出错】{}'.format(repr(err))) validate_pass = False continue try: self._do_validation(evaluated_validator) except exceptions.ValidationFailure: validate_pass = False # check_vlue是生成器时做转换处理 # from inspect import isgenerator # if isgenerator(evaluated_validator['check_value']): # evaluated_validator['check_value'] = list(evaluated_validator['check_value']) evaluated_validators.append(evaluated_validator) # if not validate_pass: # raise exceptions.ValidationFailure return evaluated_validators, validate_pass
def __get_table_columns(db_connect, library_name, table_name): """ 根据表名table_name获取该表的所有列名,list格式 """ #替换db_connect的表名 config_name = db_connect.split('?')[0].split('/')[-1] db_connect = db_connect.replace(config_name, library_name) engine = create_engine(db_connect, echo=False, poolclass=NullPool) columns_list = None with engine.connect() as conn: try: metadata = MetaData(engine, reflect=True) table_name = table_name.strip('`') table_name_upper = table_name.upper() table_name_lower = table_name.lower() if table_name_upper in metadata.tables: ex_table = metadata.tables[table_name_upper] elif table_name_lower in metadata.tables: ex_table = metadata.tables[table_name_lower] else: ex_table = metadata.tables[table_name] columns_list = ex_table.columns.keys() except Exception as err: hr_logger.log_error("数据库操作失败 \n {0}".format(err)) logger.error(traceback.format_exc()) finally: conn.close() return columns_list
def get_mapping_function(function_name, functions_mapping): """ get function from functions_mapping, if not found, then try to check if builtin function. Args: function_name (str): variable name functions_mapping (dict): variables mapping Returns: mapping function object. Raises: exceptions.FunctionNotFound: function is neither defined in debugtalk.py nor builtin. """ if function_name in functions_mapping: return functions_mapping[function_name] try: # check if builtin functions item_func = eval(function_name) if callable(item_func): # is builtin function return item_func except (NameError, TypeError): # is not builtin function logger.log_error("【自定义函数未找到】函数名{}".format(function_name)) raise exceptions.FunctionNotFound( "{} is not found.".format(function_name))
def load_testcases(path): """ load testcases from file path, extend and merge with api/testcase definitions. Args: path (str): testcase file/foler path. path could be in several types: - absolute/relative file path - absolute/relative folder path - list/set container with file(s) and/or folder(s) Returns: list: testcases list, each testcase is corresponding to a file [ testcase_dict_1, testcase_dict_2 ] """ if isinstance(path, (list, set)): testcases_list = [] for file_path in set(path): testcases = load_testcases(file_path) if not testcases: continue testcases_list.extend(testcases) return testcases_list if not os.path.isabs(path): path = os.path.join(os.getcwd(), path) if path in testcases_cache_mapping: return testcases_cache_mapping[path] if os.path.isdir(path): load_project_tests(path) files_list = load_folder_files(path) testcases_list = load_testcases(files_list) elif os.path.isfile(path): try: load_project_tests(path) testcase = _load_test_file(path) if testcase["teststeps"]: testcases_list = [testcase] else: testcases_list = [] except exceptions.FileFormatError: testcases_list = [] else: err_msg = "path not exist: {}".format(path) logger.log_error(err_msg) raise exceptions.FileNotFound(err_msg) testcases_cache_mapping[path] = testcases_list return testcases_list
def __getattr__(self, key): try: if key == "json": value = self.resp_obj.json() else: value = getattr(self.resp_obj, key) self.__dict__[key] = value return value except AttributeError: err_msg = "ResponseObject does not have attribute: {}".format(key) logger.log_error(err_msg) raise exceptions.ParamsError(err_msg)
def load_json_file(json_file): """ load json file and check file content format """ with io.open(json_file, encoding='utf-8') as data_file: try: json_content = json.load(data_file) except exceptions.JSONDecodeError: err_msg = u"JSONDecodeError: JSON file format error: {}".format( json_file) logger.log_error(err_msg) raise exceptions.FileFormatError(err_msg) _check_format(json_file, json_content) return json_content
def _check_format(file_path, content): """ check testcase format if valid """ # TODO: replace with JSON schema validation if not content: # testcase file content is empty err_msg = u"Testcase file content is empty: {}".format(file_path) logger.log_error(err_msg) raise exceptions.FileFormatError(err_msg) elif not isinstance(content, (list, dict)): # testcase file content does not match testcase format err_msg = u"Testcase file content format invalid: {}".format(file_path) logger.log_error(err_msg) raise exceptions.FileFormatError(err_msg)
def query_json(json_content, query, delimiter='.'): """ Do an xpath-like query with json_content. @param (dict/list/string) json_content json_content = { "ids": [1, 2, 3, 4], "person": { "name": { "first_name": "Leo", "last_name": "Lee", }, "age": 29, "cities": ["Guangzhou", "Shenzhen"] } } @param (str) query "person.name.first_name" => "Leo" "person.name.first_name.0" => "L" "person.cities.0" => "Guangzhou" @return queried result """ raise_flag = False response_body = u"response body: {}\n".format(json_content) try: for key in query.split(delimiter): if isinstance(json_content, (list, basestring)): json_content = json_content[int(key)] elif isinstance(json_content, dict): if key not in json_content: try: key = int(key) except ValueError: pass json_content = json_content[key] else: logger.log_error("invalid type value: {}({})".format( json_content, type(json_content))) raise_flag = True except (KeyError, ValueError, IndexError): raise_flag = True if raise_flag: err_msg = u"Failed to extract! => {}\n".format(query) err_msg += response_body logger.log_error(err_msg) raise exceptions.ExtractFailure(err_msg) return json_content
def db_operation_to_json(sql, db_connect, return_info=None, multi=False): """ 执行查询sql,结果保存为一个json对象 :multi 如果为True,说明查询结果包含多条记录,return_info为每条记录中元素 """ try: if not return_info: # 获取sql查询结果 return_info = sql_execute(sql, db_connect=db_connect) if multi: value_list = list(return_info) if return_info else [] else: value_list = list(return_info[0]) if return_info else [] sql_lower = sql.lower() # 组成数据库结果值list for i in range(len(value_list)): # 处理查询结果字段类型是datetime/date/Decimal的情况 value_list[i] = convert_mysql_datatype_to_py(value_list[i]) between_select_from = sql[sql_lower.find('select') + len('select'):sql_lower.find('from')] # 组成字段名list if '*' in between_select_from: columns_list = get_table_columns(sql, db_connect) # database_name = sql[sql_lower.find('from') + len('from'):sql_lower.find('where')].strip() # table_name = database_name.split('.')[1] # library_name = database_name.split('.')[0] # columns_list = __get_table_columns(db_connect, library_name, table_name) # for i in range(len(columns_list)): # columns_list[i] = columns_list[i].lower() else: columns_list = get_sql_columns(sql) # columns_list = sql_lower[sql_lower.find('select') + len('select'):sql_lower.find('from')].split(',') # for i in range(len(columns_list)): # if '.' in columns_list[i]: # columns_list[i] = columns_list[i].split('.', 1)[1] # columns_list[i] = columns_list[i].strip() return dict(zip(columns_list, value_list)) except Exception as err: # print("数据库操作失败, {0}".format(err)) logger.error(traceback.format_exc()) hr_logger.log_error("数据库操作失败, {0}".format(err)) return None
def sql_execute_with_params(sql_list, param_list, env_name=None, db_connect=None): """ 执行数据库操作,支持携带参数 """ if len(sql_list) != len(param_list): raise Exception("sql个数同参数个数不匹配") if env_name: try: obj = EnvInfo.query.filter_by(env_name=env_name).first() if not obj: raise Exception("传入的环境不存在") db_info = obj.db_connect # db_info = db_connects[env][db_type] except Exception as err: raise Exception(err) elif db_connect: db_info = db_connect engine = create_engine(db_info, echo=False, poolclass=NullPool) return_info = None with engine.connect() as conn: try: for s, p in zip(sql_list, param_list): if 'insert' not in s.lower() and 'where' not in s.lower(): raise Exception('更新和删除操作不符合安全规范,必须要带上where条件') if s.lower().strip().startswith('select'): return_info = conn.execute(text(s), p).fetchall() hr_logger.log_info("【执行SQL】: {0} 返回数据: {1}".format( s.replace('\n', ''), return_info)) else: return_info = conn.execute(text(s), p).rowcount # logger.info("受影响的行: {0}".format(return_info)) hr_logger.log_info("【执行SQL】: {0} 受影响的行: {1}".format( s.replace('\n', ''), return_info)) except exc.SQLAlchemyError as err: hr_logger.log_error("数据库操作失败, {0}".format(err)) raise err finally: conn.close() return return_info
def _extract_field_with_regex(self, field): """ extract field from response content with regex. requests.Response body could be json or html text. @param (str) field should only be regex string that matched r".*\(.*\).*" e.g. self.text: "LB123abcRB789" field: "LB[\d]*(.*)RB[\d]*" return: abc """ matched = re.search(field, self.text) if not matched: err_msg = u"Failed to extract data with regex! => {}\n".format( field) err_msg += u"response body: {}\n".format(self.text) logger.log_error(err_msg) raise exceptions.ExtractFailure(err_msg) return matched.group(1)
def extract_field(self, field, key=None, context_obj=None): """ extract value from requests.Response. """ if not isinstance(field, basestring): err_msg = u"Invalid extractor! => {}\n".format(field) logger.log_error(err_msg) raise exceptions.ParamsError(err_msg) # msg = "extract: {}".format(field) if key: msg = "【提取变量】: {}".format(field) else: msg = "【提取】: {}".format(field) sub_str_exp = None original_value = None # if text_extractor_regexp_compile.match(field): # value = self._extract_field_with_regex(field) # else: # original_value, sub_str_exp = self._extract_field_with_delimiter(field, context_obj=context_obj) try: original_value, sub_str_exp = self._extract_field_with_delimiter( field, context_obj=context_obj) except Exception as err: raise exceptions.ExtractFailure(err) if sub_str_exp: value = eval('original_value' + sub_str_exp) else: value = original_value if is_py2 and isinstance(value, unicode): value = value.encode("utf-8") if key: if sub_str_exp: msg += " ==> {0} ==> {1} 保存为变量 {2}".format( original_value + sub_str_exp, value, key) else: msg += " ==> {0} 保存为变量 {1}".format(value, key) else: msg += " ==> {0}".format(value) logger.log_info(msg) return value
def sql_execute(sql, env_name=None, db_connect=None): """ """ # env = env.upper() if env_name: try: obj = EnvInfo.query.filter_by(env_name=env_name).first() if not obj: raise Exception("传入的环境不存在") db_info = obj.db_connect # db_info = db_connects[env][db_type] except Exception as err: raise Exception(err) elif db_connect: db_info = db_connect engine = create_engine(db_info, echo=False, poolclass=NullPool) return_info = None with engine.connect() as conn: try: sql = sql.replace('%', '%%') if re.match('select', sql.lower().strip()): return_info = conn.execute(sql).fetchall() elif re.match('exec', sql.lower().strip()): sql_new = DDL(sql) conn.execute(sql_new) else: for s in str(sql).strip().strip(';').split(';'): if 'insert' not in s.lower() and 'where' not in s.lower(): raise Exception('更新和删除操作不符合安全规范,必须要带上where条件') return_info = conn.execute(s).rowcount # logger.info("受影响的行: {0}".format(return_info)) hr_logger.log_info("【执行SQL】: {0} 受影响的行: {1}".format( s.replace('\n', ''), return_info)) except exc.SQLAlchemyError as err: hr_logger.log_error("数据库操作失败, {0}".format(err)) raise err # hr_logger.log_error(err.args[0]) finally: conn.close() return return_info
def _get_block_by_name(ref_call, ref_type): """ get test content by reference name. Args: ref_call (str): call function. e.g. api_v1_Account_Login_POST($UserName, $Password) ref_type (enum): "def-api" or "def-testcase" Returns: dict: api/testcase definition. Raises: exceptions.ParamsError: call args number is not equal to defined args number. """ function_meta = parser.parse_function(ref_call) func_name = function_meta["func_name"] call_args = function_meta["args"] block = _get_test_definition(func_name, ref_type) def_args = block.get("function_meta", {}).get("args", []) if len(call_args) != len(def_args): err_msg = "{}: call args number is not equal to defined args number!\n".format( func_name) err_msg += "defined args: {}\n".format(def_args) err_msg += "reference args: {}".format(call_args) logger.log_error(err_msg) raise exceptions.ParamsError(err_msg) args_mapping = {} for index, item in enumerate(def_args): if call_args[index] == item: continue args_mapping[item] = call_args[index] if args_mapping: block = parser.substitute_variables(block, args_mapping) return block
def mq_validate(check_value, expect_value): """ mq-消息内容包含校验 : topic+tag+system_name, 预期结果 :desc: 说明: 根据topic+tag+system_name查询消息,同预期结果匹配 :param check_value:待校验内容: 需要查询的消息topic和tag,以json格式,如{"topic": "TP_MIME_UNION_FINANCE", "tag": "TAG_capital-mgmt-core_createCashLoan", "system_name": "user-core"},支持使用变量 :param expect_value:期望值: 例如 "abc" 或 {"a": 123, "b": "xxx"},支持使用变量 :return: """ # 返回ture和flase,关联测试报告success和fail if not check_value: return False, '根据topic和tag获取不到最近10分钟的MQ消息' # hr_logger.log_info(str(len(check_value))) for index, item in enumerate(check_value): hr_logger.log_info('消息({0})内容:{1}'.format(str(index+1), item)) try: dict_check = json.loads(item) # hr_logger.log_info(type(dict_check).__name__) if not isinstance(expect_value, dict): return False, '预期结果和实际结果类型不一致,预期结果是"{0}",期望结果是"{1}"'.format(type(dict_check).__name__, type(expect_value).__name__) is_ok = is_json_contains(dict_check, expect_value) # hr_logger.log_info(type(is_ok).__name__) if is_ok is True: return True else: continue except Exception as err: hr_logger.log_error(traceback.format_exc()) dict_check = item if dict_check == expect_value: return True elif str(dict_check) == str(expect_value): return True else: continue return False, '找不到同预期结果匹配的MQ消息'
def _extract_field_with_delimiter(self, field, context_obj=None): """ response content could be json or html text. @param (str) field should be string joined by delimiter. e.g. "status_code" "headers" "cookies" "content" "headers.content-type" "content.person.name.first_name" 含用例内变量 "123$phoneNo" 查询SQL "SELECT NEXT_VALUE FROM user_db.sequence WHERE SEQ_NAME='$MEMBER_ID';" """ # [:] sub_str_exp = None if field.endswith(']') and '[' in field and ':' in field.split( '[')[-1]: sub_str_exp = '[' + field.split('[')[-1] field = field.strip(sub_str_exp) # 支持提取变量步骤中写查询sql,查询结果保存为变量 if str(field).lower().startswith("select "): db_connect_content = '$DB_CONNECT' parsed_db_connect = context_obj.eval_content(db_connect_content) if parser.extract_variables(field): sql = context_obj.eval_content(field) else: sql = field from atp.api.mysql_sql_executor import sql_execute, db_operation_to_json from atp.utils.tools import convert_mysql_datatype_to_py try: res = sql_execute(sql, db_connect=parsed_db_connect) except Exception as err: raise if res: # 支持查询结果是多条数据的情况 if len(res) == 1: if len(res[0]) == 1: res_value = convert_mysql_datatype_to_py(res[0][0]) else: res_value = db_operation_to_json( sql, db_connect=parsed_db_connect, return_info=res) else: res_value = [] for res_item in res: if len(res_item) == 1: res_value.append( convert_mysql_datatype_to_py(res_item[0])) else: res_value.append( db_operation_to_json( sql, db_connect=parsed_db_connect, return_info=res_item, multi=True)) else: res_value = 'variable sql return no result!' # res_value = res[0][0] if res else "DB query returns EMPTY result!" # if isinstance(res_value, decimal.Decimal): # res_value = float(res_value) return res_value, sub_str_exp # string.split(sep=None, maxsplit=-1) -> list of strings # e.g. "content.person.name" => ["content", "person.name"] try: top_query, sub_query = field.split('.', 1) except ValueError: top_query = field sub_query = None # status_code if top_query in ["status_code", "encoding", "ok", "reason", "url"]: if sub_query: # status_code.XX err_msg = u"Failed to extract: {}\n".format(field) logger.log_error(err_msg) raise exceptions.ParamsError(err_msg) return getattr(self, top_query), sub_str_exp # cookies elif top_query == "cookies": cookies = self.cookies.get_dict() if not sub_query: # extract cookies return cookies, sub_str_exp try: return cookies[sub_query], sub_str_exp except KeyError: err_msg = u"Failed to extract cookie! => {}\n".format(field) err_msg += u"response cookies: {}\n".format(cookies) logger.log_error(err_msg) raise exceptions.ExtractFailure(err_msg) # elapsed elif top_query == "elapsed": available_attributes = u"available attributes: days, seconds, microseconds, total_seconds" if not sub_query: err_msg = u"elapsed is datetime.timedelta instance, attribute should also be specified!\n" err_msg += available_attributes logger.log_error(err_msg) raise exceptions.ParamsError(err_msg) elif sub_query in ["days", "seconds", "microseconds"]: return getattr(self.elapsed, sub_query), sub_str_exp elif sub_query == "total_seconds": return self.elapsed.total_seconds(), sub_str_exp else: err_msg = "{} is not valid datetime.timedelta attribute.\n".format( sub_query) err_msg += available_attributes logger.log_error(err_msg) raise exceptions.ParamsError(err_msg) # headers elif top_query == "headers": headers = self.headers if not sub_query: # extract headers return headers, sub_str_exp try: return headers[sub_query], sub_str_exp except KeyError: err_msg = u"Failed to extract header! => {}\n".format(field) err_msg += u"response headers: {}\n".format(headers) logger.log_error(err_msg) raise exceptions.ExtractFailure(err_msg) # response body elif top_query in ["content", "text", "json"]: try: body = self.json except exceptions.JSONDecodeError: body = self.text if not sub_query: # extract response body return body, sub_str_exp if isinstance(body, dict): # content = {"xxx": 123}, content.xxx '''如果body中content是字符串类型'content': "{'headImageUrl':'','isRegister':0,'nickName':''}" 转换成字典,然后'extract': [{'headImageUrl':"content.content.isRegister"}]可提取 ''' # if "content" in body.keys() and '{' in body["content"]: # 修复bug:如果body["content"]是NoneType,报错“TypeError: argument of type 'NoneType' is not iterable ” # if "content" in body.keys() and body["content"] and '{' in body["content"]: # body_content_dict=json.loads(body["content"].replace("'", "\"")) # body["content"]=body_content_dict # 修复bug:"[]"未被json.loads if "content" in body.keys() and body["content"] and isinstance( body["content"], str): try: body_content_dict = json.loads(body["content"].replace( ' style="text-align: center;text-indent: 0;"', '').replace("'", "\"")) body["content"] = body_content_dict except (TypeError, json.decoder.JSONDecodeError) as e: # logger.log_error(body["content"].replace("'", "\"")) logger.log_error('\n'.join([e, traceback.format_exc()])) return utils.query_json(body, sub_query), sub_str_exp elif sub_query.isdigit(): # content = "abcdefg", content.3 => d return utils.query_json(body, sub_query), sub_str_exp else: # content = "<html>abcdefg</html>", content.xxx err_msg = u"Failed to extract attribute from response body! => {}\n".format( field) err_msg += u"response body: {}\n".format(body) logger.log_error(err_msg) raise exceptions.ExtractFailure(err_msg) # new set response attributes in teardown_hooks elif top_query in self.__dict__: attributes = self.__dict__[top_query] if not sub_query: # extract response attributes return attributes, sub_str_exp if isinstance(attributes, (dict, list)): # attributes = {"xxx": 123}, content.xxx return utils.query_json(attributes, sub_query), sub_str_exp elif sub_query.isdigit(): # attributes = "abcdefg", attributes.3 => d return utils.query_json(attributes, sub_query), sub_str_exp else: # content = "attributes.new_attribute_not_exist" err_msg = u"Failed to extract cumstom set attribute from teardown hooks! => {}\n".format( field) err_msg += u"response set attributes: {}\n".format(attributes) logger.log_error(err_msg) raise exceptions.TeardownHooksFailure(err_msg) elif context_obj and parser.extract_variables(top_query): # 表达式带已知变量,保存为新的变量 # ha$phone => ha18551602992 return context_obj.eval_content(top_query), sub_str_exp # others else: err_msg = u"Failed to extract attribute from response! => {}\n".format( field) err_msg += u"available response attributes: status_code, cookies, elapsed, headers, content, text, json, encoding, ok, reason, url.\n\n" err_msg += u"If you want to set attribute in teardown_hooks, take the following example as reference:\n" err_msg += u"response.new_attribute = 'new_attribute_value'\n" logger.log_error(err_msg) raise exceptions.ParamsError(err_msg)
def run_test(self, teststep_dict): """ run single teststep. Args: teststep_dict (dict): teststep info { "name": "teststep description", "skip": "skip this test unconditionally", "times": 3, "variables": [], # optional, override "request": { "url": "http://127.0.0.1:5000/api/users/1000", "method": "POST", "headers": { "Content-Type": "application/json", "authorization": "$authorization", "random": "$random" }, "body": '{"name": "user", "password": "******"}' }, "extract": [], # optional "validate": [], # optional "setup_hooks": [], # optional "teardown_hooks": [] # optional } Raises: exceptions.ParamsError exceptions.ValidationFailure exceptions.ExtractFailure """ self.teststep_dict = teststep_dict is_last = teststep_dict.get("is_last", None) case_name = teststep_dict.get("name", None) case_id = teststep_dict.get("case_id", None) logger.log_info("【开始执行用例】: ID_{0}, {1}".format(case_id, case_name)) self.step_teardown_executed = False # check skip self._handle_skip_feature(teststep_dict) # prepare logger.log_info("-" * 12 + "【变量替换-开始】" + "-" * 12) extractors = teststep_dict.get("extract", []) or teststep_dict.get( "extractors", []) validators = teststep_dict.get("validate", []) or teststep_dict.get( "validators", []) self.step_parse_variable_pass = True self.running_hook = 'step_parse_variable' parsed_request = self.init_config(teststep_dict, level="teststep") self.context.update_teststep_variables_mapping("request", parsed_request) logger.log_info("-" * 12 + "【变量替换-结束】" + "-" * 12) if self.variable_not_found: self.handle_teardown(fail_type='变量替换') raise exceptions.VariableNotFound if not self.step_parse_variable_pass: self.handle_teardown(fail_type='变量替换') raise exceptions.CustomFuncRunError # setup hooks setup_hooks = teststep_dict.get("setup_hooks", []) setup_hooks.insert(0, "${setup_hook_prepare_kwargs($request)}") logger.log_info("-" * 12 + "【请求前置-开始】" + "-" * 12) self.step_setup_pass = True self.running_hook = 'step_setup' self.do_setup_hook_actions(setup_hooks) logger.log_info("-" * 12 + "【请求前置-结束】" + "-" * 12) if not self.step_setup_pass: self.handle_teardown(fail_type='前置动作') raise exceptions.SetupHooksFailure try: url = parsed_request.pop('url') method = parsed_request.pop('method') group_name = parsed_request.pop("group", None) except KeyError: raise exceptions.ParamsError("URL or METHOD missed!") # TODO: move method validation to json schema valid_methods = [ "GET", "HEAD", "POST", "PUT", "PATCH", "DELETE", "OPTIONS" ] if method.upper() not in valid_methods: err_msg = u"Invalid HTTP method! => {}\n".format(method) err_msg += "Available HTTP methods: {}".format( "/".join(valid_methods)) logger.log_error(err_msg) self.handle_teardown(fail_type='校验发送方式') raise exceptions.ParamsError(err_msg) # logger.log_info("{method} {url}".format(method=method, url=url)) # logger.log_debug("request kwargs(raw): {kwargs}".format(kwargs=parsed_request)) # request try: resp = self.http_client_session.request(method, url, name=group_name, **parsed_request) except Exception as e: self.handle_teardown(fail_type='接口请求') raise exceptions.RequestFailure resp_obj = response.ResponseObject(resp) # # teardown hooks # teardown_hooks = teststep_dict.get("teardown_hooks", []) # if teardown_hooks: # # logger.log_info("start to run teardown hooks") # logger.log_info("【开始后置动作】...") # self.context.update_teststep_variables_mapping("response", resp_obj) # self.do_hook_actions(teardown_hooks) # logger.log_info("【结束后置动作】") # request teardown hooks 新增请求后置 request_teardown_hooks = teststep_dict.get("request_teardown_hooks", []) if request_teardown_hooks: # logger.log_info("start to run teardown hooks") logger.log_info("-" * 12 + "【请求后置-开始】" + "-" * 12) self.step_request_teardown_pass = True self.running_hook = 'step_request_teardown' self.context.update_teststep_variables_mapping( "response", resp_obj) self.do_request_teardown_hook_actions(request_teardown_hooks) logger.log_info("-" * 12 + "【请求后置-结束】" + "-" * 12) if not self.step_request_teardown_pass: self.handle_teardown(fail_type='请求后置动作') raise exceptions.TeardownHooksFailure # extract logger.log_info("-" * 12 + "【提取变量-开始】" + "-" * 12) try: extracted_variables_mapping = resp_obj.extract_response( extractors, self.context) self.context.update_testcase_runtime_variables_mapping( extracted_variables_mapping) except Exception as err: logger.log_error('提取变量失败:{0}'.format(err.args[0])) self.handle_teardown(fail_type='提取变量') raise exceptions.ExtractFailure logger.log_info("-" * 12 + "【提取变量-结束】" + "-" * 12) # validate try: logger.log_info("-" * 12 + "【结果校验-开始】" + "-" * 12) self.evaluated_validators, validate_pass = self.context.validate( validators, resp_obj) logger.log_info("-" * 12 + "【结果校验-结束】" + "-" * 12) if not validate_pass: # self.handle_teardown(fail_type='结果校验') raise exceptions.ValidationFailure except (exceptions.ParamsError, exceptions.ValidationFailure, exceptions.ExtractFailure, exceptions.VariableNotFound) as err: # log request # err_req_msg = "request: \n" # err_req_msg += "headers: {}\n".format(parsed_request.pop("headers", {})) # for k, v in parsed_request.items(): # err_req_msg += "{}: {}\n".format(k, repr(v)) # logger.log_error(err_req_msg) # # # log response # err_resp_msg = "response: \n" # err_resp_msg += "status_code: {}\n".format(resp_obj.status_code) # err_resp_msg += "headers: {}\n".format(resp_obj.headers) # err_resp_msg += "body: {}\n".format(repr(resp_obj.text)) # logger.log_error(err_resp_msg) logger.log_error('结果校验失败') self.handle_teardown(fail_type='结果校验') raise exceptions.ValidationFailure # teardown hooks teardown_hooks = teststep_dict.get("teardown_hooks", []) self.step_teardown_executed = True if teardown_hooks: # logger.log_info("start to run teardown hooks") logger.log_info("-" * 12 + "【用例后置-开始】" + "-" * 12) self.step_teardown_pass = True self.running_hook = 'step_teardown' self.context.update_teststep_variables_mapping( "response", resp_obj) self.do_teardown_hook_actions(teardown_hooks) logger.log_info("-" * 12 + "【用例后置-结束】" + "-" * 12) if not self.step_teardown_pass: self.handle_teardown(fail_type='后置动作') raise exceptions.TeardownHooksFailure # total teardown hooks if is_last: if self.testcase_teardown_hooks and not self.testcase_teardown_hooks_executed: logger.log_info("-" * 12 + "【全局后置-开始】" + "-" * 12) self.testcase_teardown_hooks_executed = True self.do_teardown_hook_actions(self.testcase_teardown_hooks) logger.log_info("-" * 12 + "【全局后置-结束】" + "-" * 12) logger.log_info("【结束执行用例】: ID_{0}, {1}".format(case_id, case_name))
def _do_validation(self, validator_dict): """ validate with functions Args: validator_dict (dict): validator dict { "check": "status_code", "check_value": 200, "expect": 201, "comparator": "eq" } """ # TODO: move comparator uniform to init_test_suites comparator = utils.get_uniform_comparator(validator_dict["comparator"]) validate_func = self.TESTCASE_SHARED_FUNCTIONS_MAPPING.get(comparator) if not validate_func: raise exceptions.FunctionNotFound( "comparator not found: {}".format(comparator)) check_item = validator_dict["check"] check_value = validator_dict["check_value"] expect_value = validator_dict["expect"] if (check_value is None or expect_value is None) \ and comparator not in [ "is", "eq", "equals", "==", "json_contains", "json_same", "field_special_check", "db_validate", "db_validate_cycle", "field_check_empty_list", "field_check_not_empty_list", "field_check_not_in_list", "field_check_empty_json", "field_check_not_empty_json", "redis_validate", "mq_validate"]: raise exceptions.ParamsError( "Null value can only be compared with comparator: eq/equals/==" ) # validate_msg = "validate: {} {} {}({})".format( validate_msg = "【验证点】: 校验方法:{}, 待校验内容:{}, 期望结果:{}({})".format( comparator, check_item, expect_value, type(expect_value).__name__) try: validator_dict["check_result"] = "pass" is_ok = validate_func(check_value, expect_value) if is_ok is True: validate_msg += "\t ....................PASS" logger.log_info(validate_msg) else: validate_msg += "\t ....................FAIL" if is_ok is not False: validate_msg += "......原因: {}".format(is_ok[1]) logger.log_error(validate_msg) validator_dict["check_result"] = "fail" raise exceptions.ValidationFailure(validate_msg) except (AssertionError, TypeError) as err: validate_msg += "\t ....................FAIL" validate_msg += "\t{}({}), {}, {}({})".format( check_value, type(check_value).__name__, comparator, expect_value, type(expect_value).__name__) validate_msg += "\t......原因: {}".format(err.args[0]) logger.log_error(validate_msg) validator_dict["check_result"] = "fail" raise exceptions.ValidationFailure(validate_msg)
def parse_string_functions(content, variables_mapping, functions_mapping, runner=None): """ parse string content with functions mapping. Args: content (str): string content to be parsed. variables_mapping (dict): variables mapping. functions_mapping (dict): functions mapping. runner (object) Returns: str: parsed string content. Examples: >>> content = "abc${add_one(3)}def" >>> functions_mapping = {"add_one": lambda x: x + 1} >>> parse_string_functions(content, functions_mapping) "abc4def" """ functions_list = extract_functions(content) for func_content in functions_list: # logger.log_debug("【执行函数】: {}".format(func_content)) function_meta = parse_function(func_content) func_name = function_meta["func_name"] logger.log_info("【识别函数】: {}".format(func_name)) args = function_meta.get("args", []) kwargs = function_meta.get("kwargs", {}) args = parse_data(args, variables_mapping, functions_mapping) logger.log_info("【函数{0}参数列表】: {1}".format(func_name, args)) kwargs = parse_data(kwargs, variables_mapping, functions_mapping) if func_name in ["parameterize", "P"]: from httprunner import loader eval_value = loader.load_csv_file(*args, **kwargs) else: func = get_mapping_function(func_name, functions_mapping) try: eval_value = func(*args, **kwargs) if eval_value is False: logger.log_error("【函数{0}异常返回】: {1}".format( func_name, eval_value)) else: logger.log_info("【函数{0}返回】: {1}".format( func_name, eval_value)) except Exception as err: logger.log_error("【函数{0}异常返回】: {1}".format( func_name, err.args[0])) eval_value = '自定义方法调用失败' if eval_value is False or eval_value == '自定义方法调用失败': if runner.running_hook == 'step_setup': runner.step_setup_pass = False elif runner.running_hook == 'step_teardown': runner.step_teardown_pass = False elif runner.running_hook == 'step_request_teardown': runner.step_request_teardown_pass = False elif runner.running_hook == 'step_parse_variable': runner.step_parse_variable_pass = False func_content = "${" + func_content + "}" if func_content == content: # content is a function, e.g. "${add_one(3)}" content = eval_value else: # content contains one or many functions, e.g. "abc${add_one(3)}def" content = content.replace(func_content, str(eval_value), 1) return content
def api_run_test(**kwargs): report_id = kwargs.pop('report_id', None) plan_name = kwargs.pop('plan_name', None) project_id = kwargs.pop('project_id', None) testcase_main_id_list = kwargs.get('testcase_main_id_list', None) failfast = kwargs.pop('failfast', False) if testcase_main_id_list: is_main = True else: is_main = False try: logger.debug( '=============================={dir}run_{report_id}.log'.format( dir=run_case_log_dir, report_id=report_id)) hr_kwargs = { "failfast": failfast, "log_path": '{dir}run_{report_id}.log'.format(dir=run_case_log_dir, report_id=report_id) } runner = HttpRunner(**hr_kwargs) # res = load_test(**kwargs) # testset = res[0] # test_meta_list = res[1] # project_id = res[2] loader = ApiTestLoader(**kwargs) testset = loader.get_testset_list() test_meta_list = loader.get_test_meta_list() if not testset: raise LoadCaseError('没有可执行的用例') logger.debug("{1} testset:{0}".format(testset, type(testset))) except Exception as err: save_report(report_path=None, runner_summary=None, project_id=project_id, report_id=report_id) hr_logger.log_error("【ERROR】组装用例出错!") hr_logger.log_error('\n'.join([str(err), traceback.format_exc()])) hr_logger.log_info("【END】测试结束!") hr_logger.remove_handler(runner.handler) raise LoadCaseError try: # summary = run(testset, report_name='testMock') start_time = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime(time.time())) hr_logger.log_info("【START】测试开始! (ง •_•)ง") hr_logger.log_info("【环境】: {}".format(kwargs.get('env_name', None))) # time.sleep(3) try: testset_json = json_dumps(testset) except Exception: testset_json = testset hr_logger.log_debug("【调用HttpRunner】: {0}".format(testset_json)) runner.run(testset) hr_logger.log_info("【结束调用HttpRunner】") # raise RunCaseError perfect_summary(runner.summary, test_meta_list) """记录用例复用记录""" summary_remove_file_obj(runner.summary) summary_for_reuse = copy.deepcopy(runner.summary) summary_for_reuse = add_memo(summary_for_reuse) # 识别错误 summary_for_reuse = identify_errors(summary_for_reuse) # 更新api_testcase_reuse_record表, 并获取covered_intf_id_set, run_cases, success_cases save_testcase_reuse_record([{ "summary": json_loads(json_dumps(summary_for_reuse)) }]) del summary_for_reuse # hr_logger.log_info("【runner.summary】: {}".format(runner.summary)) '''报告优化:1、汉化(包括日志里面的字段) 2、开始时间和持续时间合并成一行 3、增加一个字段“错误类型”,如果用例错误,显示该字段,并说明期望与预期值; 否则该字段不显示 4.log里面去掉一些数据重复和不重要的;行和字段(请求headers,返回体的headers,reason,url,“”ok”) 5.将请求体和返回值数据缩进,且字典里面的key颜色加粗 6.新增接口请求类型字段,http、dubbo、mq''' for detail in runner.summary["details"]: for record in detail["records"]: '''增加用例类型:test_meta_list["intf_type"]''' record["intf_type"] = test_meta_list[0]["intf_type"] '''删除报告一些无需关注的字段''' request_keys = ["json", "start_timestamp"] response_keys = [ "elapsed_ms", "encoding", 'ok', 'url', 'reason', 'cookies' ] for request_key in request_keys: if request_key in record["meta_data"]["request"]: del record["meta_data"]["request"][request_key] for respones_key in response_keys: if respones_key in record["meta_data"]["response"]: del record["meta_data"]["response"][respones_key] '''record.status出现error, 抛出错误信息''' if record['status'] == 'error': error_msg = record['attachment'] raise Exception(error_msg) # '''将body和content字节类型转换dic''' # if "body" in record["meta_data"]["request"].keys() and "content" in record["meta_data"]["response"].keys(): # request_body = record["meta_data"]["request"].pop("body") # response_content = record["meta_data"]["response"].pop("content") # if not request_body: # request_body_dic = {} # else: # try: # request_body_dic = json.loads(request_body) # except TypeError: # request_body_dic = json.loads(request_body.decode('utf-8')) # # 增加捕获异常 # except UnicodeDecodeError: # if isinstance(request_body, bytes): # request_body_dic = {} # # request_body_dic = request_body.decode('utf-8', 'ignore') # else: # request_body_dic = {} # # if not response_content: # response_content_dic = {} # else: # try: # response_content_dic = json.loads(response_content) # except TypeError: # response_content_dic = json.loads(response_content.decode('utf-8')) # except json.decoder.JSONDecodeError: # response_content_dic = {} # # record["meta_data"]["request"]["body"] = request_body_dic # record["meta_data"]["response"]["content"] = response_content_dic # # '''将files去除,避免报告超长影响展示效果''' # if "files" in record["meta_data"]["request"].keys(): # record["meta_data"]["request"].pop("files") '''报告增加一列:错误类型:''' for validate in record["meta_data"]["validators"]: if validate["comparator"] == "json_contains": check_value = validate["check_value"] expect_value = validate["expect"] if json_contains(check_value, expect_value) is not True: validate["check_result"] = "fail" record["status"] = "failure" detail["stat"]["failures"] += 1 detail["stat"]["successes"] -= 1 runner.summary["stat"]["failures"] += 1 runner.summary["stat"]["successes"] -= 1 error_log = ("预期:{}未在返回报文内".format(expect_value)) validate["error_log"] = { "json_contains": error_log } elif validate["comparator"] == "db_validate": check_value = validate["check_value"] expect_value = validate["expect"] if db_validate(check_value, expect_value) is not True: validate["check_result"] = "fail" record["status"] = "failure" detail["stat"]["failures"] += 1 detail["stat"]["successes"] -= 1 runner.summary["stat"]["failures"] += 1 runner.summary["stat"]["successes"] -= 1 error_log = ("预期:{0},实际是:{1}".format( expect_value, check_value)) validate["error_log"] = {"db_validate": error_log} elif validate["comparator"] == "db_json_validate": check_value = validate["check_value"] expect_value = validate["expect"] if not db_json_validate(check_value, expect_value): validate["check_result"] = "fail" record["status"] = "failure" detail["stat"]["failures"] += 1 detail["stat"]["successes"] -= 1 runner.summary["stat"]["failures"] += 1 runner.summary["stat"]["successes"] -= 1 error_log = ("预期:{0},实际是:{1}".format( expect_value, json.dumps(check_value).encode('utf-8').decode( 'unicode_escape'))) validate["error_log"] = { "db_json_validate": error_log } hr_logger.log_info("【runner.summary】: {}".format( json_dumps(runner.summary))) runner_summary = copy.deepcopy(runner.summary) """把每条用例执行成功与否记录到testcase_info.last_run""" try: save_last_run(runner_summary, is_main=is_main) except Exception as e: logger.error('\n'.join([str(e), traceback.format_exc()])) # hr_logger.log_error("【ERROR】运行用例出错!") # hr_logger.log_error('\n'.join([str(e), traceback.format_exc()])) # logger.debug("runner_summary_list{}".format(runner.summary)) # report_path = runner.gen_html_report( # html_report_name=plan_name if plan_name else 'default', # html_report_template=config.REPORT_TEMPLATE_PATH, # html_report_dir=config.REPORT_DIR # ) # logger.debug('report_path:{}'.format(report_path)) # report_path = report_path.split('reports')[1] # report_url = get_host() + r':8899/reports' + report_path # # logger.debug('AC report_path:{}'.format(report_path)) report_url = '不生成报告' save_report(report_url, runner_summary, project_id, report_id=report_id, is_main=is_main) except Exception as err: save_report(report_path=None, runner_summary=runner.summary, project_id=project_id, report_id=report_id) hr_logger.log_error("【ERROR】运行用例出错!") hr_logger.log_error('\n'.join([str(err), traceback.format_exc()])) raise RunCaseError finally: hr_logger.log_info("【END】测试结束!") hr_logger.remove_handler(runner.handler) return report_url
def main_locust(): """ Performance test with locust: parse command line options and run commands. """ logger.setup_logger("INFO") try: from atp.httprunner import locusts except ImportError: msg = "Locust is not installed, install first and try again.\n" msg += "install command: pip install locustio" logger.log_warning(msg) exit(1) sys.argv[0] = 'locust' if len(sys.argv) == 1: sys.argv.extend(["-h"]) if sys.argv[1] in ["-h", "--help", "-V", "--version"]: locusts.main() sys.exit(0) try: testcase_index = sys.argv.index('-f') + 1 assert testcase_index < len(sys.argv) except (ValueError, AssertionError): logger.log_error("Testcase file is not specified, exit.") sys.exit(1) testcase_file_path = sys.argv[testcase_index] sys.argv[testcase_index] = locusts.parse_locustfile(testcase_file_path) if "--processes" in sys.argv: """ locusts -f locustfile.py --processes 4 """ if "--no-web" in sys.argv: logger.log_error( "conflict parameter args: --processes & --no-web. \nexit.") sys.exit(1) processes_index = sys.argv.index('--processes') processes_count_index = processes_index + 1 if processes_count_index >= len(sys.argv): """ do not specify processes count explicitly locusts -f locustfile.py --processes """ processes_count = multiprocessing.cpu_count() logger.log_warning( "processes count not specified, use {} by default.".format( processes_count)) else: try: """ locusts -f locustfile.py --processes 4 """ processes_count = int(sys.argv[processes_count_index]) sys.argv.pop(processes_count_index) except ValueError: """ locusts -f locustfile.py --processes -P 8888 """ processes_count = multiprocessing.cpu_count() logger.log_warning( "processes count not specified, use {} by default.".format( processes_count)) sys.argv.pop(processes_index) locusts.run_locusts_with_processes(sys.argv, processes_count) else: locusts.main()