def asyn_execute_case(data=None): logger.setup_logger('INFO') service_name = data.get('service_name') service_version = data.get('service_version') execute_source = data.get('execute_source') report_name = data.get('report_name') service_config_id = data.get('service_config_id') id = data.get('id') base_url = data.get('base_url') if base_url is None: base_url = '' if not service_config_id: service_configs = ServiceConfig.objects.filter( service_name=service_name, service_version=service_version).all() service_config_id = list(service_configs)[0].id report_runner = run_by_service(service_config_id, base_url) report_data = { 'execute_service': service_name, 'execute_source': execute_source, 'execute_id': id } if not report_name: report_name = '禅道执行' + service_name + '服务' add_test_reports(report_runner, report_name=report_name, report_type='service', executor='system', report_data=report_data) return report_runner
def single_run(testset): """ API test: parse command line options and run commands. """ parser = argparse.ArgumentParser( description='HTTP test runner, not just about api test and load test.') parser.add_argument('-V', '--version', dest='version', action='store_true', help="show version") parser.add_argument('testset_paths', nargs='*', help="testset file path") parser.add_argument('--log-level', default='INFO', help="Specify logging level, default is INFO.") parser.add_argument( '--failfast', action='store_true', default=False, help="Stop the test run on the first error or failure.") parser.add_argument('--startproject', help="Specify new project name.") args = parser.parse_args() logger.setup_logger(args.log_level) args.testset_paths = 'E:\case' kwargs = { "output": os.path.join(os.getcwd(), "reports"), "failfast": args.failfast } test_runner = HTMLTestRunner(**kwargs) result = run_suite_path(testset, {}, test_runner) print_output(result.output) return 0 if result.success else 1
def __init__(self, failfast=False, save_tests=False, report_template=None, report_dir=None, log_level="INFO", log_file=None): """ initialize HttpRunner. Args: failfast (bool): stop the test run on the first error or failure. save_tests (bool): save loaded/parsed tests to JSON file. report_template (str): report template file path, template should be in Jinja2 format. report_dir (str): html report save directory. log_level (str): logging level. log_file (str): log file path. """ self.exception_stage = "initialize HttpRunner()" kwargs = {"failfast": failfast, "resultclass": report.HtmlTestResult} self.unittest_runner = unittest.TextTestRunner(**kwargs) self.test_loader = unittest.TestLoader() self.save_tests = save_tests self.report_template = report_template self.report_dir = report_dir self._summary = None if log_file: logger.setup_logger(log_level, log_file)
def __init__(self, failfast=False, save_tests=False, report_template=None, report_dir=None, log_level="INFO", log_file=''): """ initialize HttpRunner. Args: failfast (bool): 在第一个错误或者失败时,停止测试运行. save_tests (bool): 将加载/解析的测试保存到json文件. report_template (str): 报表模板文件路径,模板应为jinja2格式. report_dir (str): html报告保存目录. log_level (str): 日志等级. log_file (str): 日志文件前缀名称. """ log_name_timestamp = datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d') log_path = os.path.join(os.getcwd(), 'logs') if not os.path.exists(log_path): os.mkdir(log_path) # 创建logs目录 log_file_path = os.path.join(log_path, (log_file + log_name_timestamp + ".log")) logger.setup_logger(log_level, log_file_path) # 调用日志函数,参数 日志等级和日志文件路径 logger.log_info("HttpRunner version: {}".format(__version__)) self.exception_stage = "initialize HttpRunner()" kwargs = { "failfast": failfast, "resultclass": report.HtmlTestResult } self.unittest_runner = unittest.TextTestRunner(**kwargs) # 实例化 TextTestRunner self.test_loader = unittest.TestLoader() # 实例化 TestLoader self.save_tests = save_tests self.report_template = report_template self.report_dir = report_dir self._summary = None
def debug_api(api, pk): """debug api api :dict pk: int """ body = None if pk: config = models.Config.objects.get(id=pk) body = eval(config.body) if isinstance(api, dict): """ httprunner scripts or teststeps """ api = [api] testcase_list = [parse_tests(api, config=body)] logger.setup_logger('DEBUG') kwargs = {"failfast": False} runner = HttpRunner(**kwargs) runner.run(testcase_list) return runner.summary
def suite_hrun(env_name, base_url, suite, receiver): """ 异步运行模块 :param env_name: str: 环境地址 :param project: str:项目所属模块 :param module: str:模块名称 :return: """ logger.setup_logger('INFO') kwargs = { "failfast": False, } runner = HttpRunner(**kwargs) suite = list(suite) test_case_dir_path = os.path.join(os.getcwd(), "suite") test_case_dir_path = os.path.join(test_case_dir_path, get_time_stamp()) try: for value in suite: run_by_suite(value[0], base_url, test_case_dir_path) except ObjectDoesNotExist: return '找不到Suite信息' runner.run(test_case_dir_path) shutil.rmtree(test_case_dir_path) runner.summary = timestamp_to_datetime(runner.summary) report_path = add_test_reports(runner, report_name=env_name) if receiver != '': send_email_reports(receiver, report_path) os.remove(report_path)
def project_hrun(name, base_url, project, receiver): """ 异步运行整个项目 :param env_name: str: 环境地址 :param project: str :return: """ logger.setup_logger('INFO') kwargs = { "failfast": False, } runner = HttpRunner(**kwargs) id = ProjectInfo.objects.get(project_name=project).id testcase_dir_path = os.path.join(os.getcwd(), "suite") testcase_dir_path = os.path.join(testcase_dir_path, get_time_stamp()) run_by_project(id, base_url, testcase_dir_path) runner.run(testcase_dir_path) shutil.rmtree(testcase_dir_path) runner.summary = timestamp_to_datetime(runner.summary) add_test_reports(report_name=name, **runner.summary) if receiver != '': send_html_reports(receiver, runner) return runner.summary
def module_hrun(name, base_url, module, receiver): """ 异步运行模块 :param env_name: str: 环境地址 :param project: str:项目所属模块 :param module: str:模块名称 :return: """ logger.setup_logger('INFO') kwargs = { "failfast": False, } runner = HttpRunner(**kwargs) testcase_lists = [] module = list(module) try: for value in module: testcase_lists.extend(run_by_module(value[0], base_url)) except ObjectDoesNotExist: return '找不到模块信息' run_time = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime(time.time())) runner.run(testcase_lists) add_test_reports(run_time, report_name=name, **runner.summary) if receiver != '': send_html_reports(receiver, runner) return runner.summary
def suite_hrun(name, base_url, suite, receiver): """ 异步运行模块 :param env_name: str: 环境地址 :param project: str:项目所属模块 :param module: str:模块名称 :return: """ logger.setup_logger('INFO') kwargs = { "failfast": False, } runner = HttpRunner(**kwargs) suite = list(suite) testcase_dir_path = os.path.join(os.getcwd(), "suite") testcase_dir_path = os.path.join(testcase_dir_path, get_time_stamp()) try: for value in suite: run_by_suite(value[0], base_url, testcase_dir_path) except ObjectDoesNotExist: return '找不到Suite信息' run_time = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime(time.time())) runner.run(testcase_dir_path) shutil.rmtree(testcase_dir_path) add_test_reports(run_time, report_name=name, **runner.summary) if receiver != '': send_html_reports(receiver, runner) return runner.summary
def module_hrun(name, base_url, module, receiver): """ 异步运行模块 :param env_name: str: 环境地址 :param project: str:项目所属模块 :param module: str:模块名称 :return: """ logger.setup_logger('INFO') kwargs = { "failfast": False, } runner = HttpRunner(**kwargs) module = list(module) testcase_dir_path = os.path.join(os.getcwd(), "suite") testcase_dir_path = os.path.join(testcase_dir_path, get_time_stamp()) try: for value in module: run_by_module(value[0], base_url, testcase_dir_path) except ObjectDoesNotExist: return '找不到模块信息' runner.run(testcase_dir_path) shutil.rmtree(testcase_dir_path) runner.summary = timestamp_to_datetime(runner.summary) add_test_reports(report_name=name, **runner.summary) if receiver != '': send_html_reports(receiver, runner) return runner.summary
def __init__(self, failfast=False, save_tests=False, log_level="WARNING", log_file=None): """ initialize HttpRunner. Args: failfast (bool): stop the test run on the first error or failure. 在出现第一个错误或失败时停止测试运行 save_tests (bool): save loaded/parsed tests to JSON file. 将加载/解析的测试保存为JSON文件 log_level (str): logging level. 日志级别 log_file (str): log file path. 日志文件路径 """ logger.setup_logger(log_level, log_file) self.exception_stage = "initialize HttpRunner()" #异常阶段 kwargs = { "failfast": failfast, "resultclass": report.HtmlTestResult #结果类 } self.unittest_runner = unittest.TextTestRunner(**kwargs) self.test_loader = unittest.TestLoader() self.save_tests = save_tests self._summary = None self.project_working_directory = None #目录
def main_hrun(testset_path): logger.setup_logger('DEBUG') kwargs = { "failfast": False, } runner = HttpRunner(**kwargs) runner.run(testset_path) add_test_reports(**runner.summary) return runner.summary
def main_hrun(): """ API test: parse command line options and run commands. """ parser = argparse.ArgumentParser( description='HTTP test runner, not just about api test and load test.') parser.add_argument('-V', '--version', dest='version', action='store_true', help="show version") parser.add_argument('testset_paths', nargs='*', help="testset file path") parser.add_argument( '--html-report-name', help= "specify html report name, only effective when generating html report." ) parser.add_argument('--html-report-template', help="specify html report template path.") parser.add_argument('--log-level', default='INFO', help="Specify logging level, default is INFO.") parser.add_argument( '--dot-env-path', help= "Specify .env file path, which is useful for keeping production credentials." ) parser.add_argument( '--failfast', action='store_true', default=False, help="Stop the test run on the first error or failure.") parser.add_argument('--startproject', help="Specify new project name.") args = parser.parse_args() logger.setup_logger(args.log_level) if args.version: logger.color_print("{}".format(__version__), "GREEN") exit(0) dot_env_path = args.dot_env_path or os.path.join(os.getcwd(), ".env") if dot_env_path: load_dot_env_file(dot_env_path) project_name = args.startproject if project_name: project_path = os.path.join(os.getcwd(), project_name) create_scaffold(project_path) exit(0) result = HttpRunner( args.testset_paths, failfast=args.failfast).run(html_report_name=args.html_report_name) print_output(result["output"]) return 0 if result["success"] else 1
def main_hrun(testset_path, report_name): logger.setup_logger('DEBUG') kwargs = { "failfast": False, } runner = HttpRunner(**kwargs) run_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) runner.run(testset_path) add_test_reports(run_time, report_name=report_name, **runner.summary) return runner.summary
def main_hrun(testset_path, executor=None, request_data=None): """ 用例运行 :param testset_path: dict or list :param report_name: str :return: """ logger.setup_logger('INFO') kwargs = { "failfast": False, } runner = HttpRunner(**kwargs) error_info = { 'start_time': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') } report_type = 'test' report_name = '执行异常' try: id = request_data.pop('id') base_url = request_data.pop('env_name') type = request_data.pop('type') report_name = request_data.get('report_name', None) if type: report_type = type report_name2 = run_test_by_type(id, base_url, testset_path, type) if not report_name: report_name = report_name2 runner.run2(testset_path) shutil.rmtree(testset_path) runner.summary = timestamp_to_datetime(runner.summary) report_path = add_test_reports(runner, report_name=report_name, report_type=report_type, executor=executor) except Exception as e: logger.log_info("出现异常: {0}".format(e)) error_info['error_msg'] = "出现异常: {0}".format(e) add_error_reports(error_info, report_name=report_name, report_type=report_type, executor=executor) except BaseException as e: logger.log_info("出现异常: {0}".format(e)) error_info['error_msg'] = "出现异常: {0}".format(e) add_error_reports(error_info, report_name=report_name, report_type=report_type, executor=executor)
def project_hrun(env_name, project): logger.setup_logger('DEBUG') kwargs = { "failfast": False, } runner = HttpRunner(**kwargs) id = ProjectInfo.objects.get(project_name=project).id testcases_dict = run_by_project(id, env_name) run_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) runner.run(testcases_dict) add_test_reports(run_time, report_name=project, **runner.summary) return runner.summary
def main_hrun(): """ API test: parse command line options and run commands. """ parser = argparse.ArgumentParser( description='HTTP test runner, not just about api test and load test.') parser.add_argument('-V', '--version', dest='version', action='store_true', help="show version") parser.add_argument('testset_paths', nargs='*', help="testset file path") parser.add_argument('--log-level', default='INFO', help="Specify logging level, default is INFO.") parser.add_argument( '--failfast', action='store_true', default=False, help="Stop the test run on the first error or failure.") parser.add_argument('--startproject', help="Specify new project name.") args = parser.parse_args() logger.setup_logger(args.log_level) args.testset_paths = 'E:\case' if args.version: logger.color_print("HttpRunner version: {}".format(hrun_version), "GREEN") logger.color_print("PyUnitReport version: {}".format(pyu_version), "GREEN") exit(0) project_name = args.startproject if project_name: project_path = os.path.join(os.getcwd(), project_name) create_scaffold(project_path) exit(0) kwargs = { "output": os.path.join(os.getcwd(), "reports"), "failfast": args.failfast } test_runner = HTMLTestRunner(**kwargs) result = run_suite_path(args.testset_paths, {}, test_runner) print_output(result.output) return 0 if result.success else 1
def main_hrun(testset_path, report_name): """ 用例运行 :param testset_path: dict or list :param report_name: str :return: """ logger.setup_logger('INFO') kwargs = { "failfast": False, } runner = HttpRunner(**kwargs) run_time = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime(time.time())) runner.run(testset_path) add_test_reports(run_time, report_name=report_name, **runner.summary) return runner.summary
def main_hrun(testset_path, report_name): """ 用例运行 :param testset_path: dict or list :param report_name: str :return: """ logger.setup_logger('INFO') kwargs = { "failfast": False, } runner = HttpRunner(**kwargs) runner.run(testset_path) shutil.rmtree(testset_path) runner.summary = timestamp_to_datetime(runner.summary) add_test_reports(report_name=report_name, **runner.summary) return runner.summary
def project_hrun(base_url, project, config): """ 异步运行整个项目 :param env_name: str: 环境地址 :param project: str :return: """ logger.setup_logger('DEBUG') kwargs = { "failfast": False, } runner = HttpRunner(**kwargs) id = ProjectInfo.objects.get(project_name=project).id testcases_dict = run_by_project(id, base_url, config) run_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) runner.run(testcases_dict) add_test_reports(run_time, report_name=project, **runner.summary) return runner.summary
def module_hrun(env_name, project, module): logger.setup_logger('DEBUG') kwargs = { "failfast": False, } runner = HttpRunner(**kwargs) testcase_lists = [] module = module.split(',') try: for module_name in module: id = ModuleInfo.objects.get( module_name__exact=module_name, belong_project__project_name=project).id testcase_lists.extend(run_by_module(id, env_name)) except ObjectDoesNotExist: return '找不到模块信息' run_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) runner.run(testcase_lists) add_test_reports(run_time, report_name=project, **runner.summary) return runner.summary
def main_hrun(testset_path, report_name, envId): # print('testset_path: ', testset_path) """ 用例运行 :param testset_path: dict or list :param report_name: str :return: """ logger.setup_logger('INFO') kwargs = { "failfast": False, } runner = HttpRunner(**kwargs) runner.run(testset_path) shutil.rmtree(testset_path) runner.summary = timestamp_to_datetime(runner.summary, env_id=envId) report_path = add_test_reports(runner, report_name=report_name) # print('report_path: ', report_path) os.remove(report_path)
def __init__(self, failfast=False, save_tests=False, log_level="INFO", log_file=None): """ initialize HttpRunner. Args: failfast (bool): stop the test run on the first error or failure. save_tests (bool): save loaded/parsed tests to JSON file. log_level (str): logging level. log_file (str): log file path. """ logger.setup_logger(log_level, log_file) self.exception_stage = "initialize HttpRunner()" kwargs = { "failfast": failfast, "resultclass": report.HtmlTestResult } self.unittest_runner = unittest.TextTestRunner(**kwargs) self.test_loader = unittest.TestLoader() self.save_tests = save_tests self._summary = None self.project_working_directory = None
def project_hrun(name, base_url, project, receiver): """ 异步运行整个项目 :param env_name: str: 环境地址 :param project: str :return: """ logger.setup_logger('INFO') kwargs = { "failfast": False, } runner = HttpRunner(**kwargs) id = ProjectInfo.objects.get(project_name=project).id testcases_dict = run_by_project(id, base_url) run_time = time.strftime('%Y-%m-%d %H-%M-%S', time.localtime(time.time())) runner.run(testcases_dict) add_test_reports(run_time, report_name=name, **runner.summary) if receiver != '': send_html_reports(receiver, runner) return runner.summary
def module_hrun(base_url, module, project, config): """ 异步运行模块 :param env_name: str: 环境地址 :param project: str:项目所属模块 :param module: str:模块名称 :return: """ logger.setup_logger('DEBUG') kwargs = { "failfast": False, } runner = HttpRunner(**kwargs) testcase_lists = [] module = list(module) try: for value in module: testcase_lists.extend(run_by_module(value[0], base_url, config)) except ObjectDoesNotExist: return '找不到模块信息' run_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) runner.run(testcase_lists) add_test_reports(run_time, report_name=project, **runner.summary) return runner.summary
def main_locust(): """ Performance test with locust: parse command line options and run commands. """ logger.setup_logger("INFO") try: from httprunner import locusts except ImportError: msg = "Locust is not installed, install first and try again.\n" msg += "install command: pip install locustio" logger.log_warning(msg) exit(1) sys.argv[0] = 'locust' if len(sys.argv) == 1: sys.argv.extend(["-h"]) if sys.argv[1] in ["-h", "--help", "-V", "--version"]: locusts.main() sys.exit(0) try: testcase_index = sys.argv.index('-f') + 1 assert testcase_index < len(sys.argv) except (ValueError, AssertionError): logger.log_error("Testcase file is not specified, exit.") sys.exit(1) testcase_file_path = sys.argv[testcase_index] sys.argv[testcase_index] = locusts.parse_locustfile(testcase_file_path) if "--processes" in sys.argv: """ locusts -f locustfile.py --processes 4 """ if "--no-web" in sys.argv: logger.log_error("conflict parameter args: --processes & --no-web. \nexit.") sys.exit(1) processes_index = sys.argv.index('--processes') processes_count_index = processes_index + 1 if processes_count_index >= len(sys.argv): """ do not specify processes count explicitly locusts -f locustfile.py --processes """ processes_count = multiprocessing.cpu_count() logger.log_warning("processes count not specified, use {} by default.".format(processes_count)) else: try: """ locusts -f locustfile.py --processes 4 """ processes_count = int(sys.argv[processes_count_index]) sys.argv.pop(processes_count_index) except ValueError: """ locusts -f locustfile.py --processes -P 8888 """ processes_count = multiprocessing.cpu_count() logger.log_warning("processes count not specified, use {} by default.".format(processes_count)) sys.argv.pop(processes_index) locusts.run_locusts_with_processes(sys.argv, processes_count) else: locusts.main()
import json import os import shutil import sys import tempfile import types import requests import yaml from bs4 import BeautifulSoup from httprunner import HttpRunner, logger from requests.cookies import RequestsCookieJar from fastrunner import models from fastrunner.utils.parser import Format logger.setup_logger('INFO') TEST_NOT_EXISTS = {"code": "0102", "status": False, "msg": "节点下没有接口或者用例集"} def is_function(tup): """ Takes (name, object) tuple, returns True if it is a function. """ name, item = tup return isinstance(item, types.FunctionType) def is_variable(tup): """ Takes (name, object) tuple, returns True if it is a variable. """ name, item = tup
def main(): """ Performance test with locust: parse command line options and run commands. """ sys.argv[0] = 'locust' if len(sys.argv) == 1: sys.argv.extend(["-h"]) if sys.argv[1] in ["-h", "--help", "-V", "--version"]: start_locust_main() def get_arg_index(*target_args): for arg in target_args: if arg not in sys.argv: continue return sys.argv.index(arg) + 1 return None # set logging level loglevel_index = get_arg_index("-L", "--loglevel") if loglevel_index and loglevel_index < len(sys.argv): loglevel = sys.argv[loglevel_index] else: # default loglevel = "WARNING" logger.setup_logger(loglevel) # get testcase file path try: testcase_index = get_arg_index("-f", "--locustfile") assert testcase_index and testcase_index < len(sys.argv) except AssertionError: print("Testcase file is not specified, exit.") sys.exit(1) testcase_file_path = sys.argv[testcase_index] sys.argv[testcase_index] = parse_locustfile(testcase_file_path) if "--processes" in sys.argv: """ locusts -f locustfile.py --processes 4 """ if "--no-web" in sys.argv: logger.log_error( "conflict parameter args: --processes & --no-web. \nexit.") sys.exit(1) processes_index = sys.argv.index('--processes') processes_count_index = processes_index + 1 if processes_count_index >= len(sys.argv): """ do not specify processes count explicitly locusts -f locustfile.py --processes """ processes_count = multiprocessing.cpu_count() logger.log_warning( "processes count not specified, use {} by default.".format( processes_count)) else: try: """ locusts -f locustfile.py --processes 4 """ processes_count = int(sys.argv[processes_count_index]) sys.argv.pop(processes_count_index) except ValueError: """ locusts -f locustfile.py --processes -P 8888 """ processes_count = multiprocessing.cpu_count() logger.log_warning( "processes count not specified, use {} by default.".format( processes_count)) sys.argv.pop(processes_index) run_locusts_with_processes(sys.argv, processes_count) else: start_locust_main()
def main_locust(): """ Performance test with locust: parse command line options and run commands. """ try: # monkey patch ssl at beginning to avoid RecursionError when running locust. from gevent import monkey monkey.patch_ssl() import multiprocessing import sys from httprunner import logger from httprunner import locusts except ImportError: msg = "Locust is not installed, install first and try again.\n" msg += "install command: pip install locustio" print(msg) exit(1) sys.argv[0] = 'locust' if len(sys.argv) == 1: sys.argv.extend(["-h"]) if sys.argv[1] in ["-h", "--help", "-V", "--version"]: locusts.start_locust_main() sys.exit(0) # set logging level if "-L" in sys.argv: loglevel_index = sys.argv.index('-L') + 1 elif "--loglevel" in sys.argv: loglevel_index = sys.argv.index('--loglevel') + 1 else: loglevel_index = None if loglevel_index and loglevel_index < len(sys.argv): loglevel = sys.argv[loglevel_index] else: # default loglevel = "WARNING" logger.setup_logger(loglevel) # get testcase file path try: if "-f" in sys.argv: testcase_index = sys.argv.index('-f') + 1 elif "--locustfile" in sys.argv: testcase_index = sys.argv.index('--locustfile') + 1 else: testcase_index = None assert testcase_index and testcase_index < len(sys.argv) except AssertionError: print("Testcase file is not specified, exit.") sys.exit(1) testcase_file_path = sys.argv[testcase_index] sys.argv[testcase_index] = locusts.parse_locustfile(testcase_file_path) if "--processes" in sys.argv: """ locusts -f locustfile.py --processes 4 """ if "--no-web" in sys.argv: logger.log_error( "conflict parameter args: --processes & --no-web. \nexit.") sys.exit(1) processes_index = sys.argv.index('--processes') processes_count_index = processes_index + 1 if processes_count_index >= len(sys.argv): """ do not specify processes count explicitly locusts -f locustfile.py --processes """ processes_count = multiprocessing.cpu_count() logger.log_warning( "processes count not specified, use {} by default.".format( processes_count)) else: try: """ locusts -f locustfile.py --processes 4 """ processes_count = int(sys.argv[processes_count_index]) sys.argv.pop(processes_count_index) except ValueError: """ locusts -f locustfile.py --processes -P 8888 """ processes_count = multiprocessing.cpu_count() logger.log_warning( "processes count not specified, use {} by default.".format( processes_count)) sys.argv.pop(processes_index) locusts.run_locusts_with_processes(sys.argv, processes_count) else: locusts.start_locust_main()
def main_hrun(): """ API test: parse command line options and run commands. """ parser = argparse.ArgumentParser(description=__description__) parser.add_argument('-V', '--version', dest='version', action='store_true', help="show version") parser.add_argument('testset_paths', nargs='*', help="testset file path") parser.add_argument('--no-html-report', action='store_true', default=False, help="do not generate html report.") parser.add_argument( '--html-report-name', help= "specify html report name, only effective when generating html report." ) parser.add_argument('--html-report-template', help="specify html report template path.") parser.add_argument('--log-level', default='INFO', help="Specify logging level, default is INFO.") parser.add_argument('--log-file', help="Write logs to specified file path.") parser.add_argument( '--dot-env-path', help= "Specify .env file path, which is useful for keeping production credentials." ) parser.add_argument( '--failfast', action='store_true', default=False, help="Stop the test run on the first error or failure.") parser.add_argument('--startproject', help="Specify new project name.") parser.add_argument('--validate', nargs='*', help="Validate JSON testset format.") parser.add_argument('--prettify', nargs='*', help="Prettify JSON testset format.") args = parser.parse_args() logger.setup_logger(args.log_level, args.log_file) if is_py2: logger.log_warning(get_python2_retire_msg()) if args.version: logger.color_print("{}".format(__version__), "GREEN") exit(0) if args.validate: validate_json_file(args.validate) exit(0) if args.prettify: prettify_json_file(args.prettify) exit(0) project_name = args.startproject if project_name: project_path = os.path.join(os.getcwd(), project_name) create_scaffold(project_path) exit(0) runner = HttpRunner(failfast=args.failfast, dot_env_path=args.dot_env_path).run(args.testset_paths) if not args.no_html_report: runner.gen_html_report(html_report_name=args.html_report_name, html_report_template=args.html_report_template) summary = runner.summary print_output(summary["output"]) return 0 if summary["success"] else 1
def main_locust(): """ Performance test with locust: parse command line options and run commands. """ logger.setup_logger("INFO") try: from httprunner import locusts except ImportError: msg = "Locust is not installed, install first and try again.\n" msg += "install command: pip install locustio" logger.log_warning(msg) exit(1) sys.argv[0] = 'locust' if len(sys.argv) == 1: sys.argv.extend(["-h"]) if sys.argv[1] in ["-h", "--help", "-V", "--version"]: locusts.main() sys.exit(0) try: testcase_index = sys.argv.index('-f') + 1 assert testcase_index < len(sys.argv) except (ValueError, AssertionError): logger.log_error("Testcase file is not specified, exit.") sys.exit(1) testcase_file_path = sys.argv[testcase_index] sys.argv[testcase_index] = locusts.parse_locustfile(testcase_file_path) if "--processes" in sys.argv: """ locusts -f locustfile.py --processes 4 """ if "--no-web" in sys.argv: logger.log_error( "conflict parameter args: --processes & --no-web. \nexit.") sys.exit(1) processes_index = sys.argv.index('--processes') processes_count_index = processes_index + 1 if processes_count_index >= len(sys.argv): """ do not specify processes count explicitly locusts -f locustfile.py --processes """ processes_count = multiprocessing.cpu_count() logger.log_warning( "processes count not specified, use {} by default.".format( processes_count)) else: try: """ locusts -f locustfile.py --processes 4 """ processes_count = int(sys.argv[processes_count_index]) sys.argv.pop(processes_count_index) except ValueError: """ locusts -f locustfile.py --processes -P 8888 """ processes_count = multiprocessing.cpu_count() logger.log_warning( "processes count not specified, use {} by default.".format( processes_count)) sys.argv.pop(processes_index) locusts.run_locusts_with_processes(sys.argv, processes_count) else: locusts.main()
def main_hrun(): """ API test: parse command line options and run commands. """ parser = argparse.ArgumentParser(description=__description__) parser.add_argument( '-V', '--version', dest='version', action='store_true', help="show version") parser.add_argument( 'testset_paths', nargs='*', help="testset file path") parser.add_argument( '--no-html-report', action='store_true', default=False, help="do not generate html report.") parser.add_argument( '--html-report-name', help="specify html report name, only effective when generating html report.") parser.add_argument( '--html-report-template', help="specify html report template path.") parser.add_argument( '--log-level', default='INFO', help="Specify logging level, default is INFO.") parser.add_argument( '--log-file', help="Write logs to specified file path.") parser.add_argument( '--dot-env-path', help="Specify .env file path, which is useful for keeping production credentials.") parser.add_argument( '--failfast', action='store_true', default=False, help="Stop the test run on the first error or failure.") parser.add_argument( '--startproject', help="Specify new project name.") parser.add_argument( '--validate', nargs='*', help="Validate JSON testset format.") parser.add_argument( '--prettify', nargs='*', help="Prettify JSON testset format.") args = parser.parse_args() logger.setup_logger(args.log_level, args.log_file) if is_py2: logger.log_warning(get_python2_retire_msg()) if args.version: logger.color_print("{}".format(__version__), "GREEN") exit(0) if args.validate: validate_json_file(args.validate) exit(0) if args.prettify: prettify_json_file(args.prettify) exit(0) project_name = args.startproject if project_name: project_path = os.path.join(os.getcwd(), project_name) create_scaffold(project_path) exit(0) runner = HttpRunner(failfast=args.failfast, dot_env_path=args.dot_env_path).run(args.testset_paths) if not args.no_html_report: runner.gen_html_report( html_report_name=args.html_report_name, html_report_template=args.html_report_template ) summary = runner.summary print_output(summary["output"]) return 0 if summary["success"] else 1