def create_scaffold(project_path): if os.path.isdir(project_path): folder_name = os.path.basename(project_path) logger.log_warning( u"Folder {} exists, please specify a new folder name.".format( folder_name)) return logger.color_print( "Start to create new project: {}\n".format(project_path), "GREEN") def create_path(path, ptype): if ptype == "folder": os.makedirs(path) elif ptype == "file": open(path, 'w').close() return "created {}: {}\n".format(ptype, path) path_list = [(project_path, "folder"), (os.path.join(project_path, "api"), "folder"), (os.path.join(project_path, "testcases"), "folder"), (os.path.join(project_path, "testsuites"), "folder"), (os.path.join(project_path, "reports"), "folder"), (os.path.join(project_path, "debugtalk.py"), "file"), (os.path.join(project_path, ".env"), "file")] msg = "" for p in path_list: msg += create_path(p[0], p[1]) logger.color_print(msg, "BLUE")
def prettify_json_file(file_list): """ prettify JSON testset format """ for json_file in set(file_list): if not json_file.endswith(".json"): logger.log_warning( "Only JSON file format can be prettified, skip: {}".format( json_file)) continue logger.color_print("Start to prettify JSON file: {}".format(json_file), "GREEN") dir_path = os.path.dirname(json_file) file_name, file_suffix = os.path.splitext(os.path.basename(json_file)) outfile = os.path.join(dir_path, "{}.pretty.json".format(file_name)) with io.open(json_file, 'r', encoding='utf-8') as stream: try: obj = json.load(stream) except ValueError as e: raise SystemExit(e) with io.open(outfile, 'w', encoding='utf-8') as out: json.dump(obj, out, indent=4, separators=(',', ': ')) out.write('\n') print("success: {}".format(outfile))
def _merge_extractor(def_extrators, ref_extractors): """ merge def_extrators with ref_extractors Args: def_extrators (list): [{"var1": "val1"}, {"var2": "val2"}] ref_extractors (list): [{"var1": "val111"}, {"var3": "val3"}] Returns: list: merged extractors Examples: >>> def_extrators = [{"var1": "val1"}, {"var2": "val2"}] >>> ref_extractors = [{"var1": "val111"}, {"var3": "val3"}] >>> _merge_extractor(def_extrators, ref_extractors) [ {"var1": "val111"}, {"var2": "val2"}, {"var3": "val3"} ] """ if not def_extrators: return ref_extractors elif not ref_extractors: return def_extrators else: extractor_dict = OrderedDict() for api_extrator in def_extrators: if len(api_extrator) != 1: logger.log_warning( "incorrect extractor: {}".format(api_extrator)) continue var_name = list(api_extrator.keys())[0] extractor_dict[var_name] = api_extrator[var_name] for test_extrator in ref_extractors: if len(test_extrator) != 1: logger.log_warning( "incorrect extractor: {}".format(test_extrator)) continue var_name = list(test_extrator.keys())[0] extractor_dict[var_name] = test_extrator[var_name] extractor_list = [] for key, value in extractor_dict.items(): extractor_list.append({key: value}) return extractor_list
def handle_teardown(self, fail_type): logger.log_warning("因【{}】错误, 中断测试".format(fail_type)) if not self.step_teardown_executed: teardown_hooks = self.teststep_dict.get("teardown_hooks", []) self.step_teardown_executed = True if teardown_hooks: logger.log_info("-" * 12 + "【用例后置-开始】" + "-" * 12) self.do_teardown_hook_actions(teardown_hooks) logger.log_info("-" * 12 + "【用例后置-结束】" + "-" * 12) if self.testcase_teardown_hooks and not self.testcase_teardown_hooks_executed: logger.log_info("-" * 12 + "【全局后置-开始】" + "-" * 12) self.testcase_teardown_hooks_executed = True self.do_teardown_hook_actions(self.testcase_teardown_hooks) logger.log_info("-" * 12 + "【全局后置-结束】" + "-" * 12)
def load_file(file_path): if not os.path.isfile(file_path): raise exceptions.FileNotFound("{} does not exist.".format(file_path)) file_suffix = os.path.splitext(file_path)[1].lower() if file_suffix == '.json': return load_json_file(file_path) elif file_suffix in ['.yaml', '.yml']: return load_yaml_file(file_path) elif file_suffix == ".csv": return load_csv_file(file_path) else: # '' or other suffix err_msg = u"Unsupported file format: {}".format(file_path) logger.log_warning(err_msg) return []
def extract_output(self, output_variables_list): """ extract output variables """ variables_mapping = self.context.teststep_variables_mapping output = {} for variable in output_variables_list: if variable not in variables_mapping: logger.log_warning( "variable '{}' can not be found in variables mapping, failed to output!"\ .format(variable) ) continue output[variable] = variables_mapping[variable] return output
def validate_json_file(file_list): """ validate JSON testset format """ for json_file in set(file_list): if not json_file.endswith(".json"): logger.log_warning( "Only JSON file format can be validated, skip: {}".format( json_file)) continue logger.color_print("Start to validate JSON file: {}".format(json_file), "GREEN") with io.open(json_file) as stream: try: json.load(stream) except ValueError as e: raise SystemExit(e) print("OK")
def load_test_folder(test_folder_path): """ load testcases definitions from folder. Args: test_folder_path (str): testcases files folder. testcase file should be in the following format: [ { "config": { "def": "create_and_check", "request": {}, "validate": [] } }, { "test": { "api": "get_user", "validate": [] } } ] Returns: dict: testcases definition mapping. { "create_and_check": [ {"config": {}}, {"test": {}}, {"test": {}} ], "tests/testcases/create_and_get.yml": [ {"config": {}}, {"test": {}}, {"test": {}} ] } """ test_definition_mapping = {} test_items_mapping = load_folder_content(test_folder_path) for test_file_path, items in test_items_mapping.items(): # TODO: add JSON schema validation testcase = {"config": {}, "teststeps": []} for item in items: key, block = item.popitem() if key == "config": testcase["config"].update(block) if "def" not in block: test_definition_mapping[test_file_path] = testcase continue testcase_def = block.pop("def") function_meta = parser.parse_function(testcase_def) func_name = function_meta["func_name"] if func_name in test_definition_mapping: logger.log_warning( "API definition duplicated: {}".format(func_name)) testcase["function_meta"] = function_meta test_definition_mapping[func_name] = testcase else: # key == "test": testcase["teststeps"].append(block) project_mapping["def-testcase"] = test_definition_mapping return test_definition_mapping
def load_api_folder(api_folder_path): """ load api definitions from api folder. Args: api_folder_path (str): api files folder. api file should be in the following format: [ { "api": { "def": "api_login", "request": {}, "validate": [] } }, { "api": { "def": "api_logout", "request": {}, "validate": [] } } ] Returns: dict: api definition mapping. { "api_login": { "function_meta": {"func_name": "api_login", "args": [], "kwargs": {}} "request": {} }, "api_logout": { "function_meta": {"func_name": "api_logout", "args": [], "kwargs": {}} "request": {} } } """ api_definition_mapping = {} api_items_mapping = load_folder_content(api_folder_path) for api_file_path, api_items in api_items_mapping.items(): # TODO: add JSON schema validation for api_item in api_items: key, api_dict = api_item.popitem() api_def = api_dict.pop("def") function_meta = parser.parse_function(api_def) func_name = function_meta["func_name"] if func_name in api_definition_mapping: logger.log_warning( "API definition duplicated: {}".format(func_name)) api_dict["function_meta"] = function_meta api_definition_mapping[func_name] = api_dict project_mapping["def-api"] = api_definition_mapping return api_definition_mapping
def _load_test_file(file_path): """ load testcase file or testsuite file Args: file_path (str): absolute valid file path. file_path should be in the following format: [ { "config": { "name": "", "def": "suite_order()", "request": {} } }, { "test": { "name": "add product to cart", "api": "api_add_cart()", "validate": [] } }, { "test": { "name": "add product to cart", "suite": "create_and_check()", "validate": [] } }, { "test": { "name": "checkout cart", "request": {}, "validate": [] } } ] Returns: dict: testcase dict { "config": {}, "teststeps": [teststep11, teststep12] } """ testcase = {"config": {}, "teststeps": []} for item in load_file(file_path): # TODO: add json schema validation if not isinstance(item, dict) or len(item) != 1: raise exceptions.FileFormatError( "Testcase format error: {}".format(file_path)) key, test_block = item.popitem() if not isinstance(test_block, dict): raise exceptions.FileFormatError( "Testcase format error: {}".format(file_path)) if key == "config": testcase["config"].update(test_block) elif key == "test": def extend_api_definition(block): ref_call = block["api"] def_block = _get_block_by_name(ref_call, "def-api") _extend_block(block, def_block) # reference api if "api" in test_block: extend_api_definition(test_block) testcase["teststeps"].append(test_block) # reference testcase elif "suite" in test_block: # TODO: replace suite with testcase ref_call = test_block["suite"] block = _get_block_by_name(ref_call, "def-testcase") # TODO: bugfix lost block config variables for teststep in block["teststeps"]: if "api" in teststep: extend_api_definition(teststep) testcase["teststeps"].append(teststep) # define directly else: testcase["teststeps"].append(test_block) else: logger.log_warning( "unexpected block key: {}. block key should only be 'config' or 'test'." .format(key)) return testcase
def http_runner_run(**kwargs): """调用HttpRunner运行测试""" log_dir = kwargs.pop('log_dir') env_id = kwargs.pop('env_id') testset = kwargs.pop('testset') test_meta_list = kwargs.pop('test_meta_list') run_task_result_id = kwargs.pop('run_task_result_id') intf_id = kwargs.pop('intf_id', None) main_case_id = kwargs.pop('main_case_id', None) main_case_id_list = kwargs.pop('main_case_id_list', None) if intf_id: log_path = '{0}task_run_{1}_intf_{2}.log'.format(log_dir, run_task_result_id, intf_id) elif main_case_id: log_path = '{0}task_run_{1}_main_case_{2}.log'.format(log_dir, run_task_result_id, main_case_id) else: log_path = '{0}task_run_{1}_main_case_list_{2}.log'.format(log_dir, run_task_result_id, main_case_id_list) # 初始化hr_runner hr_kwargs = { "failfast": True, "log_path": log_path } hr_runner = HttpRunner(**hr_kwargs) start_time = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime(time.time())) hr_logger.log_warning("【START】测试开始! (ง •_•)ง") hr_logger.log_warning("【环境】: {}".format(env_id)) # time.sleep(3) try: testset_json = json_dumps(testset) except Exception: testset_json = testset # 执行测试 try: # hr_logger.log_warning("【调用HttpRunner】: {0}".format(testset_json)) hr_runner.run(testset) hr_logger.log_info("【结束调用HttpRunner】") except Exception: raise Exception(traceback.format_exc()) for detail in hr_runner.summary["details"]: for record in detail["records"]: record["meta_data"]["request"].pop("files", None) # 去除summary中的文件对象 summary_remove_file_obj(hr_runner.summary) # 完善summary summary = deepcopy(hr_runner.summary) # summary = hr_runner.summary perfect_summary(summary, test_meta_list) # print(json_dumps(summary)) summary = add_memo(summary) # 识别错误 # print(json_dumps(summary)) summary = identify_errors(summary) return {"summary": json_loads(json_dumps(summary)), "run_task_result_id": run_task_result_id, 'log_dir': log_dir}
def main_locust(): """ Performance test with locust: parse command line options and run commands. """ logger.setup_logger("INFO") try: from atp.httprunner import locusts except ImportError: msg = "Locust is not installed, install first and try again.\n" msg += "install command: pip install locustio" logger.log_warning(msg) exit(1) sys.argv[0] = 'locust' if len(sys.argv) == 1: sys.argv.extend(["-h"]) if sys.argv[1] in ["-h", "--help", "-V", "--version"]: locusts.main() sys.exit(0) try: testcase_index = sys.argv.index('-f') + 1 assert testcase_index < len(sys.argv) except (ValueError, AssertionError): logger.log_error("Testcase file is not specified, exit.") sys.exit(1) testcase_file_path = sys.argv[testcase_index] sys.argv[testcase_index] = locusts.parse_locustfile(testcase_file_path) if "--processes" in sys.argv: """ locusts -f locustfile.py --processes 4 """ if "--no-web" in sys.argv: logger.log_error( "conflict parameter args: --processes & --no-web. \nexit.") sys.exit(1) processes_index = sys.argv.index('--processes') processes_count_index = processes_index + 1 if processes_count_index >= len(sys.argv): """ do not specify processes count explicitly locusts -f locustfile.py --processes """ processes_count = multiprocessing.cpu_count() logger.log_warning( "processes count not specified, use {} by default.".format( processes_count)) else: try: """ locusts -f locustfile.py --processes 4 """ processes_count = int(sys.argv[processes_count_index]) sys.argv.pop(processes_count_index) except ValueError: """ locusts -f locustfile.py --processes -P 8888 """ processes_count = multiprocessing.cpu_count() logger.log_warning( "processes count not specified, use {} by default.".format( processes_count)) sys.argv.pop(processes_index) locusts.run_locusts_with_processes(sys.argv, processes_count) else: locusts.main()
def main_hrun(): """ API test: parse command line options and run commands. """ parser = argparse.ArgumentParser(description=__description__) parser.add_argument('-V', '--version', dest='version', action='store_true', help="show version") parser.add_argument('testset_paths', nargs='*', help="testset file path") parser.add_argument('--no-html-report', action='store_true', default=False, help="do not generate html report.") parser.add_argument( '--html-report-name', help= "specify html report name, only effective when generating html report." ) parser.add_argument('--html-report-template', help="specify html report template path.") parser.add_argument('--log-level', default='INFO', help="Specify logging level, default is INFO.") parser.add_argument('--log-file', help="Write logs to specified file path.") parser.add_argument( '--failfast', action='store_true', default=False, help="Stop the test run on the first error or failure.") parser.add_argument('--startproject', help="Specify new project name.") parser.add_argument('--validate', nargs='*', help="Validate JSON testset format.") parser.add_argument('--prettify', nargs='*', help="Prettify JSON testset format.") args = parser.parse_args() logger.setup_logger(args.log_level, args.log_file) if is_py2: logger.log_warning(get_python2_retire_msg()) if args.version: logger.color_print("{}".format(__version__), "GREEN") exit(0) if args.validate: validate_json_file(args.validate) exit(0) if args.prettify: prettify_json_file(args.prettify) exit(0) project_name = args.startproject if project_name: project_path = os.path.join(os.getcwd(), project_name) create_scaffold(project_path) exit(0) runner = HttpRunner(failfast=args.failfast).run(args.testset_paths) if not args.no_html_report: runner.gen_html_report(html_report_name=args.html_report_name, html_report_template=args.html_report_template) summary = runner.summary return 0 if summary["success"] else 1