def test_create_scaffold(self): project_path = os.path.join(os.getcwd(), "projectABC") utils.create_scaffold(project_path) self.assertTrue(os.path.isdir(os.path.join(project_path, "tests"))) self.assertTrue(os.path.isdir(os.path.join(project_path, "tests", "api"))) self.assertTrue(os.path.isdir(os.path.join(project_path, "tests", "suite"))) self.assertTrue(os.path.isdir(os.path.join(project_path, "tests", "testcases"))) self.assertTrue(os.path.isfile(os.path.join(project_path, "tests", "debugtalk.py"))) shutil.rmtree(project_path)
def test_create_scaffold(self): project_path = os.path.join(os.getcwd(), "projectABC") utils.create_scaffold(project_path) self.assertTrue(os.path.isdir(os.path.join(project_path, "tests"))) self.assertTrue( os.path.isdir(os.path.join(project_path, "tests", "api"))) self.assertTrue( os.path.isdir(os.path.join(project_path, "tests", "suite"))) self.assertTrue( os.path.isdir(os.path.join(project_path, "tests", "testcases"))) self.assertTrue( os.path.isfile(os.path.join(project_path, "tests", "debugtalk.py"))) shutil.rmtree(project_path)
def main_hrun(): """ API test: parse command line options and run commands. """ parser = argparse.ArgumentParser( description='HTTP test runner, not just about api test and load test.') parser.add_argument('-V', '--version', dest='version', action='store_true', help="show version") parser.add_argument('testset_paths', nargs='*', help="testset file path") parser.add_argument('--log-level', default='INFO', help="Specify logging level, default is INFO.") parser.add_argument( '--failfast', action='store_true', default=False, help="Stop the test run on the first error or failure.") parser.add_argument('--startproject', help="Specify new project name.") args = parser.parse_args() logger.setup_logger(args.log_level) args.testset_paths = 'E:\case' if args.version: logger.color_print("HttpRunner version: {}".format(hrun_version), "GREEN") logger.color_print("PyUnitReport version: {}".format(pyu_version), "GREEN") exit(0) project_name = args.startproject if project_name: project_path = os.path.join(os.getcwd(), project_name) create_scaffold(project_path) exit(0) kwargs = { "output": os.path.join(os.getcwd(), "reports"), "failfast": args.failfast } test_runner = HTMLTestRunner(**kwargs) result = run_suite_path(args.testset_paths, {}, test_runner) print_output(result.output) return 0 if result.success else 1
def run_api_tree(request): run_test_path = settings.RUN_TEST_PATH timedir = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime()) projectPath = os.path.join(run_test_path, timedir) create_scaffold(projectPath) if ('debugtalk' in sys.modules.keys()): del sys.modules['debugtalk'] apiTree = RunTree(type="apiTree", relation=request.data['relation'],project=request.data['project'], projectPath=projectPath,config=request.data['config'],isAsync=request.data['async']) apiTree.serializeTestCase() apiTree.serializeTestSuite() apiTree.serializeDebugtalk() apiTree.generateMapping() apiTree.run() return Response(apiTree.summary)
def run_DebugCaseStep(request): """ run casestep by body """ run_test_path = settings.RUN_TEST_PATH timedir = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime()) projectPath = os.path.join(run_test_path, timedir) create_scaffold(projectPath) if ('debugtalk' in sys.modules.keys()): del sys.modules['debugtalk'] debugApi = RunSingleApiInStep(config=request.data['config'],project=request.data['project'],apiId=request.data['apiId'], apiBody=request.data, projectPath=projectPath) debugApi.serializeApi() debugApi.serializeDebugtalk() debugApi.generateMapping() debugApi.serializeTestCase() debugApi.run() return Response(debugApi.summary)
def run_api_pk(request, **kwargs): """run api by pk """ run_test_path = settings.RUN_TEST_PATH timedir = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime()) projectPath = os.path.join(run_test_path, timedir) create_scaffold(projectPath) if ('debugtalk' in sys.modules.keys()): del sys.modules['debugtalk'] debugApi = RunSingleApi(projectPath=projectPath, config=request.query_params['config'], apiId=kwargs['pk'], type="singleapi") debugApi.serializeTestCase() debugApi.serializeTestSuite() debugApi.serializeDebugtalk() debugApi.generateMapping() debugApi.run() return Response(debugApi.summary)
def run_api(request): """ run api by body """ api = Format(request.data) api.parse() run_test_path = settings.RUN_TEST_PATH timedir = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime()) projectPath = os.path.join(run_test_path, timedir) if ('debugtalk' in sys.modules.keys()): del sys.modules['debugtalk'] create_scaffold(projectPath) debugApi = RunSingleApi(project=api.project,projectPath=projectPath,config=request.data['config'], apiBody=api.testcase,type="debugapi") debugApi.serializeTestCase() debugApi.serializeTestSuite() debugApi.serializeDebugtalk() debugApi.generateMapping() debugApi.run() return Response(debugApi.summary)
def main_hrun(): """ API test: parse command line options and run commands. """ parser = argparse.ArgumentParser(description=__description__) parser.add_argument('-V', '--version', dest='version', action='store_true', help="show version") parser.add_argument('testset_paths', nargs='*', help="testset file path") parser.add_argument('--no-html-report', action='store_true', default=False, help="do not generate html report.") parser.add_argument( '--html-report-name', help= "specify html report name, only effective when generating html report." ) parser.add_argument('--html-report-template', help="specify html report template path.") parser.add_argument('--log-level', default='INFO', help="Specify logging level, default is INFO.") parser.add_argument('--log-file', help="Write logs to specified file path.") parser.add_argument( '--dot-env-path', help= "Specify .env file path, which is useful for keeping production credentials." ) parser.add_argument( '--failfast', action='store_true', default=False, help="Stop the test run on the first error or failure.") parser.add_argument('--startproject', help="Specify new project name.") parser.add_argument('--validate', nargs='*', help="Validate JSON testset format.") parser.add_argument('--prettify', nargs='*', help="Prettify JSON testset format.") args = parser.parse_args() logger.setup_logger(args.log_level, args.log_file) if is_py2: logger.log_warning(get_python2_retire_msg()) if args.version: logger.color_print("{}".format(__version__), "GREEN") exit(0) if args.validate: validate_json_file(args.validate) exit(0) if args.prettify: prettify_json_file(args.prettify) exit(0) project_name = args.startproject if project_name: project_path = os.path.join(os.getcwd(), project_name) create_scaffold(project_path) exit(0) runner = HttpRunner(failfast=args.failfast, dot_env_path=args.dot_env_path).run(args.testset_paths) if not args.no_html_report: runner.gen_html_report(html_report_name=args.html_report_name, html_report_template=args.html_report_template) summary = runner.summary print_output(summary["output"]) return 0 if summary["success"] else 1
def main_ate(): """ API test: parse command line options and run commands. """ parser = argparse.ArgumentParser( description='HTTP test runner, not just about api test and load test.') parser.add_argument('-V', '--version', dest='version', action='store_true', help="show version") parser.add_argument('testset_paths', nargs='*', help="testset file path") parser.add_argument('--log-level', default='INFO', help="Specify logging level, default is INFO.") parser.add_argument('--report-name', help="Specify report name, default is generated time.") parser.add_argument( '--failfast', action='store_true', default=False, help="Stop the test run on the first error or failure.") parser.add_argument('--startproject', help="Specify new project name.") args = parser.parse_args() if args.version: print("HttpRunner version: {}".format(ate_version)) print("PyUnitReport version: {}".format(pyu_version)) exit(0) log_level = getattr(logging, args.log_level.upper()) logging.basicConfig(level=log_level) project_name = args.startproject if project_name: project_path = os.path.join(os.getcwd(), project_name) create_scaffold(project_path) exit(0) report_name = args.report_name if report_name and len(args.testset_paths) > 1: report_name = None logging.warning("More than one testset paths specified, \ report name is ignored, use generated time instead.") results = {} success = True for testset_path in set(args.testset_paths): testset_path = testset_path.rstrip('/') try: task_suite = TaskSuite(testset_path) except exception.TestcaseNotFound: success = False continue output_folder_name = os.path.basename( os.path.splitext(testset_path)[0]) kwargs = { "output": output_folder_name, "report_name": report_name, "failfast": args.failfast } result = HTMLTestRunner(**kwargs).run(task_suite) results[testset_path] = OrderedDict({ "total": result.testsRun, "successes": len(result.successes), "failures": len(result.failures), "errors": len(result.errors), "skipped": len(result.skipped) }) if len(result.successes) != result.testsRun: success = False for task in task_suite.tasks: task.print_output() return 0 if success is True else 1
def main(): """ API test: parse command line options and run commands. """ parser = argparse.ArgumentParser(description=__description__) parser.add_argument( '-V', '--version', dest='version', action='store_true', help="show version") parser.add_argument( 'testfile_paths', nargs='*', help="Specify api/testcases/testsuites file paths to run.") parser.add_argument( '--log-level', default='INFO', help="Specify logging level, default is INFO.") parser.add_argument( '--log-file', help="Write logs to specified file path.") parser.add_argument( '--dot-env-path', help="Specify .env file path, which is useful for keeping sensitive data.") parser.add_argument( '--report-template', help="Specify report template path.") parser.add_argument( '--report-dir', help="Specify report save directory.") parser.add_argument( '--report-file', help="Specify report file path, this has higher priority than specifying report dir.") parser.add_argument( '--save-tests', action='store_true', default=False, help="Save loaded/parsed/vars_out/summary json data to JSON files.") parser.add_argument( '--failfast', action='store_true', default=False, help="Stop the test run on the first error or failure.") parser.add_argument( '--startproject', help="Specify new project name.") args = parser.parse_args() if len(sys.argv) == 1: # no argument passed parser.print_help() sys.exit(0) if args.version: print(f"{__version__}") sys.exit(0) project_name = args.startproject if project_name: create_scaffold(project_name) sys.exit(0) runner = HttpRunner( failfast=args.failfast, save_tests=args.save_tests, log_level=args.log_level, log_file=args.log_file ) err_code = 0 try: for path in args.testfile_paths: summary = runner.run(path, dot_env_path=args.dot_env_path) report_dir = args.report_dir or os.path.join(os.getcwd(), "reports") gen_html_report( summary, report_template=args.report_template, report_dir=report_dir, report_file=args.report_file ) err_code |= (0 if summary and summary["success"] else 1) except Exception as ex: logger.error(f"!!!!!!!!!! exception stage: {runner.exception_stage} !!!!!!!!!!\n{str(ex)}") sentry_sdk.capture_exception(ex) err_code = 1 sys.exit(err_code)
def main_hrun(): """ API test: parse command line options and run commands. """ import sys import argparse from httprunner.logger import color_print from httprunner import __description__, __version__ from httprunner.api import HttpRunner from httprunner.compat import is_py2 from httprunner.validator import validate_json_file from httprunner.utils import (create_scaffold, get_python2_retire_msg, prettify_json_file) parser = argparse.ArgumentParser(description=__description__) parser.add_argument('-V', '--version', dest='version', action='store_true', help="show version") parser.add_argument('testcase_paths', nargs='*', help="testcase file path") parser.add_argument('--log-level', default='INFO', help="Specify logging level, default is INFO.") parser.add_argument('--log-file', help="Write logs to specified file path.") parser.add_argument( '--dot-env-path', help= "Specify .env file path, which is useful for keeping sensitive data.") parser.add_argument('--report-template', help="specify report template path.") parser.add_argument('--report-dir', help="specify report save directory.") parser.add_argument( '--failfast', action='store_true', default=False, help="Stop the test run on the first error or failure.") parser.add_argument( '--save-tests', action='store_true', default=False, help="Save loaded tests and parsed tests to JSON file.") parser.add_argument('--startproject', help="Specify new project name.") parser.add_argument('--validate', nargs='*', help="Validate JSON testcase format.") parser.add_argument('--prettify', nargs='*', help="Prettify JSON testcase format.") args = parser.parse_args() if is_py2: color_print(get_python2_retire_msg(), "YELLOW") if args.version: color_print("{}".format(__version__), "GREEN") exit(0) if args.validate: validate_json_file(args.validate) exit(0) if args.prettify: prettify_json_file(args.prettify) exit(0) project_name = args.startproject if project_name: create_scaffold(project_name) exit(0) runner = HttpRunner(failfast=args.failfast, save_tests=args.save_tests, report_template=args.report_template, report_dir=args.report_dir, log_level=args.log_level, log_file=args.log_file) try: for path in args.testcase_paths: runner.run(path, dot_env_path=args.dot_env_path) except Exception: color_print( "!!!!!!!!!! exception stage: {} !!!!!!!!!!".format( runner.exception_stage), "YELLOW") raise if runner.summary and runner.summary["success"]: sys.exit(0) else: sys.exit(1)
def main(): """ API test: parse command line options and run commands. """ if is_py2: color_print(get_python2_retire_msg(), "YELLOW") parser = argparse.ArgumentParser(description=__description__) parser.add_argument( '-V', '--version', dest='version', action='store_true', help="show version") parser.add_argument( 'testfile_paths', nargs='*', help="Specify api/testcase/testsuite file paths to run.") parser.add_argument( '--log-level', default='INFO', help="Specify logging level, default is INFO.") parser.add_argument( '--log-file', help="Write logs to specified file path.") parser.add_argument( '--dot-env-path', help="Specify .env file path, which is useful for keeping sensitive data.") parser.add_argument( '--report-template', help="Specify report template path.") parser.add_argument( '--report-dir', help="Specify report save directory.") parser.add_argument( '--report-file', help="Specify report file path, this has higher priority than specifying report dir.") parser.add_argument( '--save-tests', action='store_true', default=False, help="Save loaded/parsed/summary json data to JSON files.") parser.add_argument( '--failfast', action='store_true', default=False, help="Stop the test run on the first error or failure.") parser.add_argument( '--startproject', help="Specify new project name.") parser.add_argument( '--validate', nargs='*', help="Validate JSON testcase format.") parser.add_argument( '--prettify', nargs='*', help="Prettify JSON testcase format.") args = parser.parse_args() if len(sys.argv) == 1: # no argument passed parser.print_help() sys.exit(0) if args.version: color_print("{}".format(__version__), "GREEN") sys.exit(0) if args.validate: validate_json_file(args.validate) sys.exit(0) if args.prettify: prettify_json_file(args.prettify) sys.exit(0) project_name = args.startproject if project_name: create_scaffold(project_name) sys.exit(0) runner = HttpRunner( failfast=args.failfast, save_tests=args.save_tests, log_level=args.log_level, log_file=args.log_file ) err_code = 0 try: for path in args.testfile_paths: summary = runner.run(path, dot_env_path=args.dot_env_path) report_dir = args.report_dir or os.path.join(runner.project_working_directory, "reports") gen_html_report( summary, report_template=args.report_template, report_dir=report_dir, report_file=args.report_file ) err_code |= (0 if summary and summary["success"] else 1) except Exception as ex: color_print("!!!!!!!!!! exception stage: {} !!!!!!!!!!".format(runner.exception_stage), "YELLOW") color_print(str(ex), "RED") capture_exception(ex) err_code = 1 sys.exit(err_code)
def main_hrun(): """ API test: parse command line options and run commands. """ parser = argparse.ArgumentParser( description='HTTP test runner, not just about api test and load test.') parser.add_argument('-V', '--version', dest='version', action='store_true', help="show version") parser.add_argument('testset_paths', nargs='*', help="testset file path") parser.add_argument( '--html-report-name', help= "specify html report name, only effective when generating html report." ) parser.add_argument('--html-report-template', help="specify html report template path.") parser.add_argument('--log-level', default='INFO', help="Specify logging level, default is INFO.") parser.add_argument( '--dot-env-path', help= "Specify .env file path, which is useful for keeping production credentials." ) parser.add_argument( '--failfast', action='store_true', default=False, help="Stop the test run on the first error or failure.") parser.add_argument('--startproject', help="Specify new project name.") parser.add_argument('--validate', nargs='*', help="Validate JSON testset format.") parser.add_argument('--prettify', nargs='*', help="Prettify JSON testset format.") args = parser.parse_args() logger.setup_logger(args.log_level) if args.version: logger.color_print("{}".format(__version__), "GREEN") exit(0) if args.validate: validate_json_file(args.validate) exit(0) if args.prettify: prettify_json_file(args.prettify) exit(0) dot_env_path = args.dot_env_path or os.path.join(os.getcwd(), ".env") if dot_env_path: load_dot_env_file(dot_env_path) project_name = args.startproject if project_name: project_path = os.path.join(os.getcwd(), project_name) create_scaffold(project_path) exit(0) result = HttpRunner(args.testset_paths, failfast=args.failfast).run( html_report_name=args.html_report_name, html_report_template=args.html_report_template) print_output(result["output"]) return 0 if result["success"] else 1
def main_ate(): """ API test: parse command line options and run commands. """ parser = argparse.ArgumentParser( description='HTTP test runner, not just about api test and load test.') parser.add_argument( '-V', '--version', dest='version', action='store_true', help="show version") parser.add_argument( 'testset_paths', nargs='*', help="testset file path") parser.add_argument( '--log-level', default='INFO', help="Specify logging level, default is INFO.") parser.add_argument( '--report-name', help="Specify report name, default is generated time.") parser.add_argument( '--failfast', action='store_true', default=False, help="Stop the test run on the first error or failure.") parser.add_argument( '--startproject', help="Specify new project name.") args = parser.parse_args() if args.version: print("HttpRunner version: {}".format(ate_version)) print("PyUnitReport version: {}".format(pyu_version)) exit(0) log_level = getattr(logging, args.log_level.upper()) logging.basicConfig(level=log_level) project_name = args.startproject if project_name: project_path = os.path.join(os.getcwd(), project_name) create_scaffold(project_path) exit(0) report_name = args.report_name if report_name and len(args.testset_paths) > 1: report_name = None logging.warning("More than one testset paths specified, \ report name is ignored, use generated time instead.") results = {} success = True for testset_path in set(args.testset_paths): testset_path = testset_path.rstrip('/') try: task_suite = TaskSuite(testset_path) except exception.TestcaseNotFound: success = False continue output_folder_name = os.path.basename(os.path.splitext(testset_path)[0]) kwargs = { "output": output_folder_name, "report_name": report_name, "failfast": args.failfast } result = HTMLTestRunner(**kwargs).run(task_suite) results[testset_path] = OrderedDict({ "total": result.testsRun, "successes": len(result.successes), "failures": len(result.failures), "errors": len(result.errors), "skipped": len(result.skipped) }) if len(result.successes) != result.testsRun: success = False for task in task_suite.tasks: task.print_output() return 0 if success is True else 1
def main_hrun(): """ API test: parse command line options and run commands. """ parser = argparse.ArgumentParser(description=__description__) parser.add_argument( '-V', '--version', dest='version', action='store_true', help="show version") parser.add_argument( 'testset_paths', nargs='*', help="testset file path") parser.add_argument( '--no-html-report', action='store_true', default=False, help="do not generate html report.") parser.add_argument( '--html-report-name', help="specify html report name, only effective when generating html report.") parser.add_argument( '--html-report-template', help="specify html report template path.") parser.add_argument( '--log-level', default='INFO', help="Specify logging level, default is INFO.") parser.add_argument( '--log-file', help="Write logs to specified file path.") parser.add_argument( '--dot-env-path', help="Specify .env file path, which is useful for keeping production credentials.") parser.add_argument( '--failfast', action='store_true', default=False, help="Stop the test run on the first error or failure.") parser.add_argument( '--startproject', help="Specify new project name.") parser.add_argument( '--validate', nargs='*', help="Validate JSON testset format.") parser.add_argument( '--prettify', nargs='*', help="Prettify JSON testset format.") args = parser.parse_args() logger.setup_logger(args.log_level, args.log_file) if is_py2: logger.log_warning(get_python2_retire_msg()) if args.version: logger.color_print("{}".format(__version__), "GREEN") exit(0) if args.validate: validate_json_file(args.validate) exit(0) if args.prettify: prettify_json_file(args.prettify) exit(0) project_name = args.startproject if project_name: project_path = os.path.join(os.getcwd(), project_name) create_scaffold(project_path) exit(0) runner = HttpRunner(failfast=args.failfast, dot_env_path=args.dot_env_path).run(args.testset_paths) if not args.no_html_report: runner.gen_html_report( html_report_name=args.html_report_name, html_report_template=args.html_report_template ) summary = runner.summary print_output(summary["output"]) return 0 if summary["success"] else 1