def prettify_json_file(file_list): """ prettify JSON testset format """ for json_file in set(file_list): if not json_file.endswith(".json"): logger.log_warning("Only JSON file format can be prettified, skip: {}".format(json_file)) continue logger.color_print("Start to prettify JSON file: {}".format(json_file), "GREEN") dir_path = os.path.dirname(json_file) file_name, file_suffix = os.path.splitext(os.path.basename(json_file)) outfile = os.path.join(dir_path, "{}.pretty.json".format(file_name)) with io.open(json_file, 'r', encoding='utf-8') as stream: try: obj = json.load(stream) except ValueError as e: raise SystemExit(e) with io.open(outfile, 'w', encoding='utf-8') as out: json.dump(obj, out, indent=4, separators=(',', ': ')) out.write('\n') print("success: {}".format(outfile))
def create_scaffold(project_name): """ create scaffold with specified project name. """ if os.path.isdir(project_name): logger.log_warning( u"Folder {} exists, please specify a new folder name.".format( project_name)) return logger.color_print("Start to create new project: {}".format(project_name), "GREEN") logger.color_print("CWD: {}\n".format(os.getcwd()), "BLUE") def create_path(path, ptype): if ptype == "folder": os.makedirs(path) elif ptype == "file": open(path, 'w').close() msg = "created {}: {}".format(ptype, path) logger.color_print(msg, "BLUE") path_list = [(project_name, "folder"), (os.path.join(project_name, "api"), "folder"), (os.path.join(project_name, "testcases"), "folder"), (os.path.join(project_name, "testsuites"), "folder"), (os.path.join(project_name, "reports"), "folder"), (os.path.join(project_name, "debugtalk.py"), "file"), (os.path.join(project_name, ".env"), "file")] [create_path(p[0], p[1]) for p in path_list]
def create_scaffold(project_path): if os.path.isdir(project_path): folder_name = os.path.basename(project_path) logger.log_warning(u"Folder {} exists, please specify a new folder name.".format(folder_name)) return logger.color_print("Start to create new project: {}\n".format(project_path), "GREEN") def create_path(path, ptype): if ptype == "folder": os.makedirs(path) elif ptype == "file": open(path, 'w').close() return "created {}: {}\n".format(ptype, path) path_list = [ (project_path, "folder"), (os.path.join(project_path, "tests"), "folder"), (os.path.join(project_path, "tests", "api"), "folder"), (os.path.join(project_path, "tests", "suite"), "folder"), (os.path.join(project_path, "tests", "testcases"), "folder"), (os.path.join(project_path, "tests", "debugtalk.py"), "file") ] msg = "" for p in path_list: msg += create_path(p[0], p[1]) logger.color_print(msg, "BLUE")
def create_scaffold(project_path): if os.path.isdir(project_path): folder_name = os.path.basename(project_path) logger.log_warning( u"Folder {} exists, please specify a new folder name.".format( folder_name)) return logger.color_print( "Start to create new project: {}\n".format(project_path), "GREEN") def create_path(path, ptype): if ptype == "folder": os.makedirs(path) elif ptype == "file": open(path, 'w').close() return "created {}: {}\n".format(ptype, path) path_list = [(project_path, "folder"), (os.path.join(project_path, "tests"), "folder"), (os.path.join(project_path, "tests", "api"), "folder"), (os.path.join(project_path, "tests", "suite"), "folder"), (os.path.join(project_path, "tests", "testcases"), "folder"), (os.path.join(project_path, "tests", "debugtalk.py"), "file")] msg = "" for p in path_list: msg += create_path(p[0], p[1]) logger.color_print(msg, "BLUE")
def load_api_folder(api_folder_path): """ load api definitions from api folder. Args: api_folder_path (str): api files folder. api file should be in the following format: [ { "api": { "def": "api_login", "request": {}, "validate": [] } }, { "api": { "def": "api_logout", "request": {}, "validate": [] } } ] Returns: dict: api definition mapping. { "api_login": { "function_meta": {"func_name": "api_login", "args": [], "kwargs": {}} "request": {} }, "api_logout": { "function_meta": {"func_name": "api_logout", "args": [], "kwargs": {}} "request": {} } } """ api_definition_mapping = {} api_items_mapping = load_folder_content(api_folder_path) for api_file_path, api_items in api_items_mapping.items(): # TODO: add JSON schema validation for api_item in api_items: key, api_dict = api_item.popitem() api_def = api_dict.pop("def") function_meta = parser.parse_function(api_def) func_name = function_meta["func_name"] if func_name in api_definition_mapping: logger.log_warning( "API definition duplicated: {}".format(func_name)) api_dict["function_meta"] = function_meta api_definition_mapping[func_name] = api_dict return api_definition_mapping
def prettify_json_file(file_list): """ prettify JSON testcase format """ for json_file in set(file_list): if not json_file.endswith(".json"): logger.log_warning( "Only JSON file format can be prettified, skip: {}".format( json_file)) continue logger.color_print("Start to prettify JSON file: {}".format(json_file), "GREEN") dir_path = os.path.dirname(json_file) file_name, file_suffix = os.path.splitext(os.path.basename(json_file)) outfile = os.path.join(dir_path, "{}.pretty.json".format(file_name)) with io.open(json_file, 'r', encoding='utf-8') as stream: try: obj = json.load(stream) except ValueError as e: raise SystemExit(e) with io.open(outfile, 'w', encoding='utf-8') as out: json.dump(obj, out, indent=4, separators=(',', ': ')) out.write('\n') print("success: {}".format(outfile))
def load_test_file(file_path): """ load testset file, get testset data structure. @param file_path: absolute valid testset file path @return testset dict { "name": "desc1", "config": {}, "api": {}, "testcases": [testcase11, testcase12] } """ testset = { "name": "", "config": { "path": file_path }, "api": {}, "testcases": [] } tests_list = load_file(file_path) for item in tests_list: for key in item: if key == "config": testset["config"].update(item["config"]) testset["name"] = item["config"].get("name", "") elif key == "test": test_block_dict = item["test"] if "api" in test_block_dict: extend_test_api(test_block_dict) testset["testcases"].append(test_block_dict) elif "suite" in test_block_dict: ref_name = test_block_dict["suite"] test_info = get_testinfo_by_reference(ref_name, "suite") testset["testcases"].extend(test_info["testcases"]) else: testset["testcases"].append(test_block_dict) elif key == "api": api_def = item["api"].pop("def") function_meta = parse_function(api_def) func_name = function_meta["func_name"] if func_name in testset["api"]: logger.log_warning( "api definition duplicated: {}".format(func_name)) api_info = {} api_info["function_meta"] = function_meta api_info.update(item["api"]) testset["api"][func_name] = api_info else: logger.log_warning( "unexpected block: {}. block should only be 'config', 'test' or 'api'." .format(key)) return testset
def load_test_file(edit_dict_case): """ load testset file, get testset data structure. @param file_path: absolute valid testset file path @return testset dict { "name": "desc1", "config": {}, "api": {}, "testcases": [testcase11, testcase12] } """ testset = { "name": "", "config": { "path": os.getcwd() + "/debugtalk", "output": [] }, "api": {}, "testcases": [] } tests_list = edit_dict_case for item in tests_list: if not isinstance(item, dict) or len(item) != 1: raise exception.FileFormatError( "Testcase format error: {}".format(item)) key, test_block = item.popitem() if not isinstance(test_block, dict): raise exception.FileFormatError( "Testcase format error: {}".format(item)) if key == "config": testset["config"].update(test_block) testset["name"] = test_block.get("name", "") elif key == "test": if "api" in test_block: ref_call = test_block["api"] def_block = TestcaseLoader._get_block_by_name(ref_call, "api") TestcaseLoader._override_block(def_block, test_block) testset["testcases"].append(test_block) elif "suite" in test_block: ref_call = test_block["suite"] block = TestcaseLoader._get_block_by_name(ref_call, "suite") testset["testcases"].extend(block["testcases"]) else: testset["testcases"].append(test_block) if "extract" in test_block: for i in test_block.get("extract"): testset["config"]["output"].extend(i.keys()) else: logger.log_warning( "unexpected block key: {}. block key should only be 'config' or 'test'." .format(key)) return testset
def _load_testcase(raw_testcase, project_mapping): """ load testcase/testsuite with api/testcase references Args: raw_testcase (list): raw testcase content loaded from JSON/YAML file: [ # config part { "config": { "name": "", "def": "suite_order()", "request": {} } }, # teststeps part { "test": {...} }, { "test": {...} } ] project_mapping (dict): project_mapping Returns: dict: loaded testcase content { "config": {}, "teststeps": [teststep11, teststep12] } """ loaded_testcase = {"config": {}, "teststeps": []} for item in raw_testcase: # TODO: add json schema validation if not isinstance(item, dict) or len(item) != 1: raise exceptions.FileFormatError( "Testcase format error: {}".format(item)) key, test_block = item.popitem() if not isinstance(test_block, dict): raise exceptions.FileFormatError( "Testcase format error: {}".format(item)) if key == "config": loaded_testcase["config"].update(test_block) elif key == "test": loaded_testcase["teststeps"].extend( _load_teststeps(test_block, project_mapping)) else: logger.log_warning( "unexpected block key: {}. block key should only be 'config' or 'test'." .format(key)) return loaded_testcase
def __init__(self, testset, variables_mapping=None, http_client_session=None): super(TestSuite, self).__init__() self.test_runner_list = [] config_dict = testset.get("config", {}) self.output_variables_list = config_dict.get("output", []) self.testset_file_path = config_dict.get("path") config_dict_parameters = config_dict.get("parameters", []) config_dict_variables = config_dict.get("variables", []) variables_mapping = variables_mapping or {} config_dict_variables = utils.override_variables_binds( config_dict_variables, variables_mapping) #zhengchun 20180608 self.testcase_parser = testcase.TestcaseParser() config_parametered_variables_list = self._get_parametered_variables( config_dict_variables, config_dict_parameters, self.testcase_parser) testcases = testset.get("testcases", []) for config_variables in config_parametered_variables_list: # config level config_dict["variables"] = config_variables # update by zhengchun, can use myRunner, 20180608 test_runner = testset["runner"](config_dict, http_client_session) for testcase_dict in testcases: testcase_dict = copy.copy(testcase_dict) # testcase level testcase_parametered_variables_list = self._get_parametered_variables( testcase_dict.get("variables", []), testcase_dict.get("parameters", []), test_runner.context.testcase_parser) for testcase_variables in testcase_parametered_variables_list: testcase_dict["variables"] = testcase_variables # eval testcase name with bind variables variables = utils.override_variables_binds( config_variables, testcase_variables) self.testcase_parser.update_binded_variables(variables) try: testcase_name = self.testcase_parser.eval_content_with_bindings( testcase_dict["name"]) except (AssertionError, exception.ParamsError): logger.log_warning( "failed to eval testcase name: {}".format( testcase_dict["name"])) testcase_name = testcase_dict["name"] self.test_runner_list.append((test_runner, variables)) self._add_test_to_suite(testcase_name, test_runner, testcase_dict)
def _merge_extractor(def_extrators, ref_extractors): """ merge def_extrators with ref_extractors Args: def_extrators (list): [{"var1": "val1"}, {"var2": "val2"}] ref_extractors (list): [{"var1": "val111"}, {"var3": "val3"}] Returns: list: merged extractors Examples: >>> def_extrators = [{"var1": "val1"}, {"var2": "val2"}] >>> ref_extractors = [{"var1": "val111"}, {"var3": "val3"}] >>> _merge_extractor(def_extrators, ref_extractors) [ {"var1": "val111"}, {"var2": "val2"}, {"var3": "val3"} ] """ if not def_extrators: return ref_extractors elif not ref_extractors: return def_extrators else: extractor_dict = OrderedDict() for api_extrator in def_extrators: if len(api_extrator) != 1: logger.log_warning( "incorrect extractor: {}".format(api_extrator)) continue var_name = list(api_extrator.keys())[0] extractor_dict[var_name] = api_extrator[var_name] for test_extrator in ref_extractors: if len(test_extrator) != 1: logger.log_warning( "incorrect extractor: {}".format(test_extrator)) continue var_name = list(test_extrator.keys())[0] extractor_dict[var_name] = test_extrator[var_name] extractor_list = [] for key, value in extractor_dict.items(): extractor_list.append({key: value}) return extractor_list
def load_api_folder(api_folder_path): ''' load api definitions from api folder. Args: api_folder_path (str): api files folder. { 'api':{ 'def':'api_login', 'request':{}, 'validate':[] } }, { 'api':{ 'def':'api_logout', 'request':{}, 'validate':[] } } Returns: dict: api definition mapping. { 'api_login':{ 'function_mata':{'func_name':'api_login','args':[],'kwargs':{}}, 'request':{} }, 'api_logout':{ 'function_mata':{'func_name':'api_logout','args':[],'kwargs':{}}, 'request':{} } } ''' api_definition_mapping = {} api_items_mapping = load_folder_content(api_folder_path) for api_file_path, api_items in api_items_mapping.items(): for api_item in api_items: key, api_dict = api_item.popitem() api_def = api_dict.pop('def') function_meta = parser.parse_function(api_def) func_name = function_meta['func_name'] if func_name in api_definition_mapping: logger.log_warning(f'API definition duplicated: {func_name}') api_dict['function_meta'] = function_meta api_definition_mapping[func_name] = api_dict project_mapping['def-api'] = api_definition_mapping return api_definition_mapping
def __load_file_content(path): try: loaded_content = load_test_file(path) except exceptions.FileFormatError: logger.log_warning("Invalid test file format: {}".format(path)) if not loaded_content: pass elif loaded_content["type"] == "testsuite": tests_mapping.setdefault("testsuites", []).append(loaded_content) elif loaded_content["type"] == "testcase": tests_mapping.setdefault("testcases", []).append(loaded_content) elif loaded_content["type"] == "api": tests_mapping.setdefault("apis", []).append(loaded_content)
def run_tests(self, tests_mapping): """ run testcase/testsuite data """ capture_message("start to run tests") project_mapping = tests_mapping.get("project_mapping", {}) self.project_working_directory = project_mapping.get( "PWD", os.getcwd()) if self.save_tests: utils.dump_logs(tests_mapping, project_mapping, "loaded") # parse tests self.exception_stage = "parse tests" parsed_testcases = parser.parse_tests(tests_mapping) parse_failed_testfiles = parser.get_parse_failed_testfiles() if parse_failed_testfiles: logger.log_warning("parse failures occurred ...") utils.dump_logs(parse_failed_testfiles, project_mapping, "parse_failed") if len(parsed_testcases) == 0: logger.log_error("failed to parse all cases, abort.") raise exceptions.ParseTestsFailure if self.save_tests: utils.dump_logs(parsed_testcases, project_mapping, "parsed") # add tests to test suite self.exception_stage = "add tests to test suite" test_suite = self._add_tests(parsed_testcases) # run test suite self.exception_stage = "run test suite" results = self._run_suite(test_suite) # aggregate results self.exception_stage = "aggregate results" self._summary = self._aggregate(results) # generate html report self.exception_stage = "generate html report" report.stringify_summary(self._summary) if self.save_tests: utils.dump_logs(self._summary, project_mapping, "summary") # save variables and export data vars_out = self.get_vars_out() utils.dump_logs(vars_out, project_mapping, "io") return self._summary
def __init__(self, testset, variables_mapping=None, http_client_session=None): super(TestSuite, self).__init__() self.test_runner_list = [] config_dict = testset.get("config", {}) self.output_variables_list = config_dict.get("output", []) self.testset_file_path = config_dict.get("path") config_dict_parameters = config_dict.get("parameters", []) config_dict_variables = config_dict.get("variables", []) variables_mapping = variables_mapping or {} config_dict_variables = utils.override_variables_binds(config_dict_variables, variables_mapping) config_parametered_variables_list = self._get_parametered_variables( config_dict_variables, config_dict_parameters ) self.testcase_parser = testcase.TestcaseParser() testcases = testset.get("testcases", []) for config_variables in config_parametered_variables_list: # config level config_dict["variables"] = config_variables test_runner = runner.Runner(config_dict, http_client_session) for testcase_dict in testcases: testcase_dict = copy.copy(testcase_dict) # testcase level testcase_parametered_variables_list = self._get_parametered_variables( testcase_dict.get("variables", []), testcase_dict.get("parameters", []) ) for testcase_variables in testcase_parametered_variables_list: testcase_dict["variables"] = testcase_variables # eval testcase name with bind variables variables = utils.override_variables_binds( config_variables, testcase_variables ) self.testcase_parser.update_binded_variables(variables) try: testcase_name = self.testcase_parser.eval_content_with_bindings(testcase_dict["name"]) except (AssertionError, exception.ParamsError): logger.log_warning("failed to eval testcase name: {}".format(testcase_dict["name"])) testcase_name = testcase_dict["name"] self.test_runner_list.append((test_runner, variables)) self._add_test_to_suite(testcase_name, test_runner, testcase_dict)
def load_file(file_path): if not os.path.isfile(file_path): raise exception.FileNotFoundError("{} does not exist.".format(file_path)) file_suffix = os.path.splitext(file_path)[1].lower() if file_suffix == '.json': return FileUtils._load_json_file(file_path) elif file_suffix in ['.yaml', '.yml']: return FileUtils._load_yaml_file(file_path) elif file_suffix == ".csv": return FileUtils._load_csv_file(file_path) else: # '' or other suffix err_msg = u"Unsupported file format: {}".format(file_path) logger.log_warning(err_msg) return []
def load_file(file_path): if not os.path.isfile(file_path): raise exceptions.FileNotFound("{} does not exist.".format(file_path)) file_suffix = os.path.splitext(file_path)[1].lower() if file_suffix == '.json': return load_json_file(file_path) elif file_suffix in ['.yaml', '.yml']: return load_yaml_file(file_path) elif file_suffix == ".csv": return load_csv_file(file_path) else: # '' or other suffix err_msg = u"Unsupported file format: {}".format(file_path) logger.log_warning(err_msg) return []
def load_testcase(raw_testcase): """ load testcase with api/testcase references. 用api/testcase引用加载testcase Args: raw_testcase (list): raw testcase content loaded from JSON/YAML file: [ # config part { "config": { "name": "XXXX", "base_url": "https://debugtalk.com" } }, # teststeps part { "test": {...} }, { "test": {...} } ] Returns: dict: loaded testcase content { "config": {}, "teststeps": [test11, test12] } """ JsonSchemaChecker.validate_testcase_v1_format(raw_testcase) config = {} tests = [] for item in raw_testcase: key, test_block = item.popitem() if key == "config": config.update(test_block) elif key == "test": tests.append(load_teststep(test_block)) else: logger.log_warning( "unexpected block key: {}. block key should only be 'config' or 'test'." .format(key)) return {"config": config, "teststeps": tests}
def validate_json_file(file_list): """ validate JSON testcase format """ for json_file in set(file_list): if not json_file.endswith(".json"): logger.log_warning("Only JSON file format can be validated, skip: {}".format(json_file)) continue logger.color_print("Start to validate JSON file: {}".format(json_file), "GREEN") with io.open(json_file) as stream: try: json.load(stream) except ValueError as e: raise SystemExit(e) print("OK")
def get_all_dep_paths_with_separator(dep_graph, separator='/'): """Get all dependent paths of the dep_graph. Example: graph ={'a': ['b'],'d':['e'],'c':['k','f','z'],'b':['y','c'],'y':['z'],'z':[''],'k':[''],'f':[''],'e':['a','b','c']} dep_paths = get_all_dep_paths(graph) print(dep_paths) >>['/a/b/y/z', '/a/b/c/k', ...] @param dep_graph Dict. This parameter requires a dict that shows the key-value mapping of each node,imagine it as a graph @param separator String. Define the separator and shows the dependency between nodes. @return List. Return the list of all dependent paths of dep_graph. """ validate_dependency_debug(convert_to_std_graph(dep_graph)) original_list = [] for key in dep_graph.keys(): paths = _dfs(dep_graph, [key], []) original_list.append(paths) # Turn # [[['a', 'b', 'y', 'z', ''], ['a', 'b', 'c', 'k', '']],...] # into # ['/a/b/y/z', '/a/b/c/k'] # separator is defined by parameter, default is '/'. all_dep_path_list = [] for each_original in original_list: for each_output in each_original: str_dep = separator + separator.join(each_output) all_dep_path_list.append(str_dep.rstrip(separator)) l1 = len(list(set(all_dep_path_list))) l2 = len(all_dep_path_list) if l1 < l2: logger.log_warning( 'Found duplicates!! len: set->{}, list->{}'.format(l1, l2)) count_dict = collections.Counter(all_dep_path_list) for key, value in count_dict.items(): if value > 1: logger.log_warning('Duplicate:length of ( {} )is {}'.format( key, value)) return all_dep_path_list
def extract_output(self, output_variables_list): """ extract output variables """ variables_mapping = self.context.testcase_variables_mapping output = {} for variable in output_variables_list: if variable not in variables_mapping: logger.log_warning( "variable '{}' can not be found in variables mapping, failed to output!"\ .format(variable) ) continue output[variable] = variables_mapping[variable] return output
def validate_json_file(file_list): """ validate JSON testset format """ for json_file in set(file_list): if not json_file.endswith(".json"): logger.log_warning("Only JSON file format can be validated, skip: {}".format(json_file)) continue logger.color_print("Start to validate JSON file: {}".format(json_file), "GREEN") with io.open(json_file) as stream: try: json.load(stream) except ValueError as e: raise SystemExit(e) print("OK")
def export_variables(self, output_variables_list): """ export current testcase variables 导出当前的testcase变量 """ variables_mapping = self.session_context.session_variables_mapping output = {} for variable in output_variables_list: if variable not in variables_mapping: logger.log_warning( "variable '{}' can not be found in variables mapping, " "failed to export!".format(variable)) continue output[variable] = variables_mapping[variable] utils.print_info(output) return output
def load_api_file(file_path): """ load api definition from file and store in overall_def_dict["api"] api file should be in format below: [ { "api": { "def": "api_login", "request": {}, "validate": [] } }, { "api": { "def": "api_logout", "request": {}, "validate": [] } } ] """ api_items = FileUtils.load_file(file_path) if not isinstance(api_items, list): raise exception.FileFormatError( "API format error: {}".format(file_path)) for api_item in api_items: if not isinstance(api_item, dict) or len(api_item) != 1: raise exception.FileFormatError( "API format error: {}".format(file_path)) key, api_dict = api_item.popitem() if key != "api" or not isinstance(api_dict, dict) or "def" not in api_dict: raise exception.FileFormatError( "API format error: {}".format(file_path)) api_def = api_dict.pop("def") function_meta = parse_function(api_def) func_name = function_meta["func_name"] if func_name in TestcaseLoader.overall_def_dict["api"]: logger.log_warning( "API definition duplicated: {}".format(func_name)) api_dict["function_meta"] = function_meta TestcaseLoader.overall_def_dict["api"][func_name] = api_dict
def _merge_extractor(def_extractors, ref_extractors): ''' merge def_extractors with ref_extractors Args: def_extractors (lsit): [{'var1':'val1'},{'var2':'val2'}] ref_extractors (list): [{'var1':'val111'},{'var3':'val3'}] Returns: list: merged extractors Examples: >>> def_extractors = [{'var1':'val1'},{'var2':'val2'}] >>> ref_extractors = [{'var1':'val111'},{'var3':'val3'}] >>> _merge_extractor(def_extractors,ref_extractors) [ {'var1':'val111'}, {'var2':'val2'}, {'var3':'val3'} ] ''' if not def_extractors: return ref_extractors elif not ref_extractors: return def_extractors else: extractor_dict = collections.OrderedDict() for api_extractor in def_extractors: if len(api_extractor) != 1: logger.log_warning(f'incorrect extractor: {api_extractor}') continue var_name = list(api_extractor.keys())[0] extractor_dict[var_name] = api_extractor[var_name] for test_extractor in ref_extractors: if len(test_extractor) != 1: logger.log_warning(f'incorrect extractor: {test_extractor}') continue var_name = list(test_extractor.keys())[0] extractor_dict[var_name] = test_extractor[var_name] extractor_list = [] for key, value in extractor_dict.items(): extractor_list.append({key: value}) return extractor_list
def extract_output(self, output_variables_list): """ extract output variables """ variables_mapping = self.session_context.session_variables_mapping output = {} for variable in output_variables_list: if variable not in variables_mapping: logger.log_warning( "variable '{}' can not be found in variables mapping, failed to output!"\ .format(variable) ) continue output[variable] = variables_mapping[variable] utils.print_info(output) return output
def run_tests(self, tests_mapping): """ run testcase/testsuite data """ capture_message("start to run tests") project_mapping = tests_mapping.get("project_mapping", {}) self.project_working_directory = project_mapping.get( "PWD", os.getcwd()) if self.save_tests: utils.dump_logs(tests_mapping, project_mapping, "loaded") # parse tests self.exception_stage = "parse tests" parsed_testcases = parser.parse_tests(tests_mapping) print("解析后的测试用例数据集合{}".format(parsed_testcases)) parse_failed_testfiles = parser.get_parse_failed_testfiles() if parse_failed_testfiles: logger.log_warning("parse failures occurred ...") utils.dump_logs(parse_failed_testfiles, project_mapping, "parse_failed") if self.save_tests: utils.dump_logs(parsed_testcases, project_mapping, "parsed") # add tests to test suite self.exception_stage = "add tests to test suite" test_suite = self._add_tests(parsed_testcases) # run test suite self.exception_stage = "run test suite" results = self._run_suite(test_suite) # aggregate results self.exception_stage = "aggregate results" self._summary = self._aggregate(results) # generate html report self.exception_stage = "generate html report" report.stringify_summary(self._summary) if self.save_tests: utils.dump_logs(self._summary, project_mapping, "summary") return self._summary
def _merge_extractor(def_extrators, current_extractors): """ merge def_extrators with current_extractors @params: def_extrators: [{"var1": "val1"}, {"var2": "val2"}] current_extractors: [{"var1": "val111"}, {"var3": "val3"}] @return: [ {"var1": "val111"}, {"var2": "val2"}, {"var3": "val3"} ] """ if not def_extrators: return current_extractors elif not current_extractors: return def_extrators else: extractor_dict = OrderedDict() for api_extrator in def_extrators: if len(api_extrator) != 1: logger.log_warning( "incorrect extractor: {}".format(api_extrator)) continue var_name = list(api_extrator.keys())[0] extractor_dict[var_name] = api_extrator[var_name] for test_extrator in current_extractors: if len(test_extrator) != 1: logger.log_warning( "incorrect extractor: {}".format(test_extrator)) continue var_name = list(test_extrator.keys())[0] extractor_dict[var_name] = test_extrator[var_name] extractor_list = [] for key, value in extractor_dict.items(): extractor_list.append({key: value}) return extractor_list
def load_test_dependency_map_by_path(path): """Get test_dependency_map by the specified path(Relative or Absolute path). Example: dict1,dict2 = load_test_dependency_map_by_path('../my_test') print('{},{}'.format(dict1,dict2)) >> {'nested_para': [''], 'nested_5': [''], 'nested_1': ['nested_3']}, {'nested':'/Users/..../xxx.yml'} @param path String. Relative or Absolute path is accepted. @return List. Two dict in the list. """ all_result = TestcaseLoader.load_testsets_by_path(path) logger.log_debug('{}'.format(all_result)) testcase_dep_dict = collections.defaultdict(list) testcase_path_dict = collections.defaultdict(list) for result in all_result: testcase_name = result['testcases'][0].get('name', '') raw_testcase_dep = result['testcases'][0].get('dependent', '') testcase_path = result['config'].get('path', '') if not testcase_dep_dict[testcase_name]: if not isinstance(raw_testcase_dep, (list, set)): testcase_list = raw_testcase_dep.split(',') testcase_dep_dict[testcase_name].extend(testcase_list) else: testcase_dep_dict[testcase_name].extend(raw_testcase_dep) else: logger.log_warning("Duplicate testcase found!! Please check-->"\ "testcase_name:{},testcase_path:{}".format(testcase_name,testcase_path)) # raise Exception("Duplicate testcase found!!") #根据需要决定是否抛出异常 testcase_path_dict[testcase_name] = testcase_path return testcase_dep_dict, testcase_path_dict
def main_hrun(): """ API test: parse command line options and run commands. """ parser = argparse.ArgumentParser(description=__description__) parser.add_argument( '-V', '--version', dest='version', action='store_true', help="show version") parser.add_argument( 'testset_paths', nargs='*', help="testset file path") parser.add_argument( '--no-html-report', action='store_true', default=False, help="do not generate html report.") parser.add_argument( '--html-report-name', help="specify html report name, only effective when generating html report.") parser.add_argument( '--html-report-template', help="specify html report template path.") parser.add_argument( '--log-level', default='INFO', help="Specify logging level, default is INFO.") parser.add_argument( '--log-file', help="Write logs to specified file path.") parser.add_argument( '--dot-env-path', help="Specify .env file path, which is useful for keeping production credentials.") parser.add_argument( '--failfast', action='store_true', default=False, help="Stop the test run on the first error or failure.") parser.add_argument( '--startproject', help="Specify new project name.") parser.add_argument( '--validate', nargs='*', help="Validate JSON testset format.") parser.add_argument( '--prettify', nargs='*', help="Prettify JSON testset format.") args = parser.parse_args() logger.setup_logger(args.log_level, args.log_file) if is_py2: logger.log_warning(get_python2_retire_msg()) if args.version: logger.color_print("{}".format(__version__), "GREEN") exit(0) if args.validate: validate_json_file(args.validate) exit(0) if args.prettify: prettify_json_file(args.prettify) exit(0) project_name = args.startproject if project_name: project_path = os.path.join(os.getcwd(), project_name) create_scaffold(project_path) exit(0) runner = HttpRunner(failfast=args.failfast, dot_env_path=args.dot_env_path).run(args.testset_paths) if not args.no_html_report: runner.gen_html_report( html_report_name=args.html_report_name, html_report_template=args.html_report_template ) summary = runner.summary print_output(summary["output"]) return 0 if summary["success"] else 1
def create_scaffold(project_name): """ create scaffold with specified project name. """ if os.path.isdir(project_name): logger.log_warning( u"Folder {} exists, please specify a new folder name.".format( project_name)) return logger.color_print("Start to create new project: {}".format(project_name), "GREEN") logger.color_print("CWD: {}\n".format(os.getcwd()), "BLUE") def create_folder(path): os.makedirs(path) msg = "created folder: {}".format(path) logger.color_print(msg, "BLUE") def create_file(path, file_content=""): with open(path, 'w') as f: f.write(file_content) msg = "created file: {}".format(path) logger.color_print(msg, "BLUE") def create_path(path, ptype, file_content=""): if ptype == "folder": os.makedirs(path) elif ptype == "file": with open(path, 'w') as f: f.write(file_content) demo_api_content = """ name: demo api variables: var1: value1 var2: value2 request: url: /api/path/$var1 method: POST headers: Content-Type: "application/json" json: key: $var2 validate: - eq: ["status_code", 200] """ demo_testcase_content = """ config: name: "demo testcase" variables: device_sn: "ABC" username: ${ENV(USERNAME)} password: ${ENV(PASSWORD)} base_url: "http://127.0.0.1:5000" teststeps: - name: demo step 1 api: path/to/api1.yml variables: user_agent: 'iOS/10.3' device_sn: $device_sn extract: - token: content.token validate: - eq: ["status_code", 200] - name: demo step 2 api: path/to/api2.yml variables: token: $token """ demo_testsuite_content = """ config: name: "demo testsuite" variables: device_sn: "XYZ" base_url: "http://127.0.0.1:5000" testcases: - name: call demo_testcase with data 1 testcase: path/to/demo_testcase.yml variables: device_sn: $device_sn - name: call demo_testcase with data 2 testcase: path/to/demo_testcase.yml variables: device_sn: $device_sn """ ignore_content = "\n".join([ ".env", "reports/*", "__pycache__/*", "*.pyc", ".python-version", "logs/*" ]) demo_debugtalk_content = """ import time def sleep(n_secs): time.sleep(n_secs) """ demo_env_content = "\n".join(["USERNAME=leolee", "PASSWORD=123456"]) create_folder(project_name) create_folder(os.path.join(project_name, "api")) create_folder(os.path.join(project_name, "testcases")) create_folder(os.path.join(project_name, "testsuites")) create_folder(os.path.join(project_name, "reports")) create_file(os.path.join(project_name, "api", "demo_api.yml"), demo_api_content) create_file(os.path.join(project_name, "testcases", "demo_testcase.yml"), demo_testcase_content) create_file(os.path.join(project_name, "testsuites", "demo_testsuite.yml"), demo_testsuite_content) create_file(os.path.join(project_name, "debugtalk.py"), demo_debugtalk_content) create_file(os.path.join(project_name, ".env"), demo_env_content) create_file(os.path.join(project_name, ".gitignore"), ignore_content)
def load_test_file(file_path): """ load testcase file or suite file @param file_path: absolute valid file path file_path should be in format below: [ { "config": { "name": "", "def": "suite_order()", "request": {} } }, { "test": { "name": "add product to cart", "api": "api_add_cart()", "validate": [] } }, { "test": { "name": "checkout cart", "request": {}, "validate": [] } } ] @return testset dict { "name": "desc1", "config": {}, "testcases": [testcase11, testcase12] } """ testset = { "name": "", "config": { "path": file_path }, "testcases": [] # TODO: rename to tests } for item in FileUtils.load_file(file_path): if not isinstance(item, dict) or len(item) != 1: raise exception.FileFormatError( "Testcase format error: {}".format(file_path)) key, test_block = item.popitem() if not isinstance(test_block, dict): raise exception.FileFormatError( "Testcase format error: {}".format(file_path)) if key == "config": testset["config"].update(test_block) testset["name"] = test_block.get("name", "") elif key == "test": if "api" in test_block: ref_call = test_block["api"] def_block = TestcaseLoader._get_block_by_name( ref_call, "api") TestcaseLoader._override_block(def_block, test_block) testset["testcases"].append(test_block) elif "suite" in test_block: ref_call = test_block["suite"] block = TestcaseLoader._get_block_by_name( ref_call, "suite") testset["testcases"].extend(block["testcases"]) else: testset["testcases"].append(test_block) else: logger.log_warning( "unexpected block key: {}. block key should only be 'config' or 'test'." .format(key)) return testset
def load_test_folder(test_folder_path): """ load testcases definitions from folder. Args: test_folder_path (str): testcases files folder. testcase file should be in the following format: [ { "config": { "def": "create_and_check", "request": {}, "validate": [] } }, { "test": { "api": "get_user", "validate": [] } } ] Returns: dict: testcases definition mapping. { "create_and_check": [ {"config": {}}, {"test": {}}, {"test": {}} ], "tests/testcases/create_and_get.yml": [ {"config": {}}, {"test": {}}, {"test": {}} ] } """ test_definition_mapping = {} test_items_mapping = load_folder_content(test_folder_path) for test_file_path, items in test_items_mapping.items(): # TODO: add JSON schema validation testcase = {"config": {}, "teststeps": []} for item in items: key, block = item.popitem() if key == "config": testcase["config"].update(block) if "def" not in block: test_definition_mapping[test_file_path] = testcase continue testcase_def = block.pop("def") function_meta = parser.parse_function(testcase_def) func_name = function_meta["func_name"] if func_name in test_definition_mapping: logger.log_warning( "API definition duplicated: {}".format(func_name)) testcase["function_meta"] = function_meta test_definition_mapping[func_name] = testcase else: # key == "test": testcase["teststeps"].append(block) project_mapping["def-testcase"] = test_definition_mapping return test_definition_mapping
def _load_test_file(file_path): """ load testcase file or testsuite file Args: file_path (str): absolute valid file path. file_path should be in the following format: [ { "config": { "name": "", "def": "suite_order()", "request": {} } }, { "test": { "name": "add product to cart", "api": "api_add_cart()", "validate": [] } }, { "test": { "name": "add product to cart", "suite": "create_and_check()", "validate": [] } }, { "test": { "name": "checkout cart", "request": {}, "validate": [] } } ] Returns: dict: testcase dict { "config": {}, "teststeps": [teststep11, teststep12] } """ testcase = {"config": {}, "teststeps": []} for item in load_file(file_path): # TODO: add json schema validation if not isinstance(item, dict) or len(item) != 1: raise exceptions.FileFormatError( "Testcase format error: {}".format(file_path)) key, test_block = item.popitem() if not isinstance(test_block, dict): raise exceptions.FileFormatError( "Testcase format error: {}".format(file_path)) if key == "config": testcase["config"].update(test_block) elif key == "test": def extend_api_definition(block): ref_call = block["api"] def_block = _get_block_by_name(ref_call, "def-api") _extend_block(block, def_block) # reference api if "api" in test_block: extend_api_definition(test_block) testcase["teststeps"].append(test_block) # reference testcase elif "suite" in test_block: # TODO: replace suite with testcase ref_call = test_block["suite"] block = _get_block_by_name(ref_call, "def-testcase") # TODO: bugfix lost block config variables for teststep in block["teststeps"]: if "api" in teststep: extend_api_definition(teststep) testcase["teststeps"].append(teststep) # define directly else: testcase["teststeps"].append(test_block) else: logger.log_warning( "unexpected block key: {}. block key should only be 'config' or 'test'." .format(key)) return testcase
def main_locust(): """ Performance test with locust: parse command line options and run commands. """ logger.setup_logger("INFO") try: from httprunner import locusts except ImportError: msg = "Locust is not installed, install first and try again.\n" msg += "install command: pip install locustio" logger.log_warning(msg) exit(1) sys.argv[0] = 'locust' if len(sys.argv) == 1: sys.argv.extend(["-h"]) if sys.argv[1] in ["-h", "--help", "-V", "--version"]: locusts.main() sys.exit(0) try: testcase_index = sys.argv.index('-f') + 1 assert testcase_index < len(sys.argv) except (ValueError, AssertionError): logger.log_error("Testcase file is not specified, exit.") sys.exit(1) testcase_file_path = sys.argv[testcase_index] sys.argv[testcase_index] = locusts.parse_locustfile(testcase_file_path) if "--processes" in sys.argv: """ locusts -f locustfile.py --processes 4 """ if "--no-web" in sys.argv: logger.log_error( "conflict parameter args: --processes & --no-web. \nexit.") sys.exit(1) processes_index = sys.argv.index('--processes') processes_count_index = processes_index + 1 if processes_count_index >= len(sys.argv): """ do not specify processes count explicitly locusts -f locustfile.py --processes """ processes_count = multiprocessing.cpu_count() logger.log_warning( "processes count not specified, use {} by default.".format( processes_count)) else: try: """ locusts -f locustfile.py --processes 4 """ processes_count = int(sys.argv[processes_count_index]) sys.argv.pop(processes_count_index) except ValueError: """ locusts -f locustfile.py --processes -P 8888 """ processes_count = multiprocessing.cpu_count() logger.log_warning( "processes count not specified, use {} by default.".format( processes_count)) sys.argv.pop(processes_index) locusts.run_locusts_with_processes(sys.argv, processes_count) else: locusts.main()
def main_hrun(): """ API test: parse command line options and run commands. """ parser = argparse.ArgumentParser(description=__description__) parser.add_argument('-V', '--version', dest='version', action='store_true', help="show version") parser.add_argument('testset_paths', nargs='*', help="testset file path") parser.add_argument('--no-html-report', action='store_true', default=False, help="do not generate html report.") parser.add_argument( '--html-report-name', help= "specify html report name, only effective when generating html report." ) parser.add_argument('--html-report-template', help="specify html report template path.") parser.add_argument('--log-level', default='INFO', help="Specify logging level, default is INFO.") parser.add_argument('--log-file', help="Write logs to specified file path.") parser.add_argument( '--dot-env-path', help= "Specify .env file path, which is useful for keeping production credentials." ) parser.add_argument( '--failfast', action='store_true', default=False, help="Stop the test run on the first error or failure.") parser.add_argument('--startproject', help="Specify new project name.") parser.add_argument('--validate', nargs='*', help="Validate JSON testset format.") parser.add_argument('--prettify', nargs='*', help="Prettify JSON testset format.") args = parser.parse_args() logger.setup_logger(args.log_level, args.log_file) if is_py2: logger.log_warning(get_python2_retire_msg()) if args.version: logger.color_print("{}".format(__version__), "GREEN") exit(0) if args.validate: validate_json_file(args.validate) exit(0) if args.prettify: prettify_json_file(args.prettify) exit(0) project_name = args.startproject if project_name: project_path = os.path.join(os.getcwd(), project_name) create_scaffold(project_path) exit(0) runner = HttpRunner(failfast=args.failfast, dot_env_path=args.dot_env_path).run(args.testset_paths) if not args.no_html_report: runner.gen_html_report(html_report_name=args.html_report_name, html_report_template=args.html_report_template) summary = runner.summary print_output(summary["output"]) return 0 if summary["success"] else 1
def main_locust(): """ Performance test with locust: parse command line options and run commands. """ logger.setup_logger("INFO") try: from httprunner import locusts except ImportError: msg = "Locust is not installed, install first and try again.\n" msg += "install command: pip install locustio" logger.log_warning(msg) exit(1) sys.argv[0] = 'locust' if len(sys.argv) == 1: sys.argv.extend(["-h"]) if sys.argv[1] in ["-h", "--help", "-V", "--version"]: locusts.main() sys.exit(0) try: testcase_index = sys.argv.index('-f') + 1 assert testcase_index < len(sys.argv) except (ValueError, AssertionError): logger.log_error("Testcase file is not specified, exit.") sys.exit(1) testcase_file_path = sys.argv[testcase_index] sys.argv[testcase_index] = locusts.parse_locustfile(testcase_file_path) if "--processes" in sys.argv: """ locusts -f locustfile.py --processes 4 """ if "--no-web" in sys.argv: logger.log_error("conflict parameter args: --processes & --no-web. \nexit.") sys.exit(1) processes_index = sys.argv.index('--processes') processes_count_index = processes_index + 1 if processes_count_index >= len(sys.argv): """ do not specify processes count explicitly locusts -f locustfile.py --processes """ processes_count = multiprocessing.cpu_count() logger.log_warning("processes count not specified, use {} by default.".format(processes_count)) else: try: """ locusts -f locustfile.py --processes 4 """ processes_count = int(sys.argv[processes_count_index]) sys.argv.pop(processes_count_index) except ValueError: """ locusts -f locustfile.py --processes -P 8888 """ processes_count = multiprocessing.cpu_count() logger.log_warning("processes count not specified, use {} by default.".format(processes_count)) sys.argv.pop(processes_index) locusts.run_locusts_with_processes(sys.argv, processes_count) else: locusts.main()