def run(self, path_or_testsets, mapping=None): """ start to run test with varaibles mapping @param path_or_testsets: YAML/JSON testset file path or testset list path: path could be in several type - absolute/relative file path - absolute/relative folder path - list/set container with file(s) and/or folder(s) testsets: testset or list of testset - (dict) testset_dict - (list) list of testset_dict [ testset_dict_1, testset_dict_2 ] @param (dict) mapping: if mapping specified, it will override variables in config block """ try: task_suite = init_task_suite(path_or_testsets, mapping) except exception.TestcaseNotFound: logger.log_error("Testcases not found in {}".format(path_or_testsets)) sys.exit(1) result = self.runner.run(task_suite) self.summary = get_summary(result) output = [] for task in task_suite.tasks: output.extend(task.output) self.summary["output"] = output return self
def __getattr__(self, key): try: if key == "json": value = self.resp_obj.json() else: value = getattr(self.resp_obj, key) self.__dict__[key] = value return value except AttributeError: err_msg = "ResponseObject does not have attribute: {}".format(key) logger.log_error(err_msg) raise exception.ParamsError(err_msg)
def _load_json_file(json_file): """ load json file and check file content format """ with io.open(json_file, encoding='utf-8') as data_file: try: json_content = json.load(data_file) except exception.JSONDecodeError: err_msg = u"JSONDecodeError: JSON file format error: {}".format(json_file) logger.log_error(err_msg) raise exception.FileFormatError(err_msg) FileUtils._check_format(json_file, json_content) return json_content
def _check_format(file_path, content): """ check testcase format if valid """ if not content: # testcase file content is empty err_msg = u"Testcase file content is empty: {}".format(file_path) logger.log_error(err_msg) raise exception.FileFormatError(err_msg) elif not isinstance(content, (list, dict)): # testcase file content does not match testcase format err_msg = u"Testcase file content format invalid: {}".format(file_path) logger.log_error(err_msg) raise exception.FileFormatError(err_msg)
def _extract_field_with_regex(self, field): """ extract field from response content with regex. requests.Response body could be json or html text. @param (str) field should only be regex string that matched r".*\(.*\).*" e.g. self.text: "LB123abcRB789" field: "LB[\d]*(.*)RB[\d]*" return: abc """ matched = re.search(field, self.text) if not matched: err_msg = u"Failed to extract data with regex!\n" err_msg += u"response content: {}\n".format(self.content) err_msg += u"regex: {}\n".format(field) logger.log_error(err_msg) raise exception.ParamsError(err_msg) return matched.group(1)
def extract_field(self, field): """ extract value from requests.Response. """ msg = "extract field: {}".format(field) try: if text_extractor_regexp_compile.match(field): value = self._extract_field_with_regex(field) else: value = self._extract_field_with_delimiter(field) msg += "\t=> {}".format(value) logger.log_debug(msg) # TODO: unify ParseResponseError type except (exception.ParseResponseError, TypeError): logger.log_error("failed to extract field: {}".format(field)) raise return value
def debug_api(api, project, name=None, config=None, save=True, user=''): """debug api api :dict or list project: int """ if len(api) == 0: return TEST_NOT_EXISTS # testcases if isinstance(api, dict): """ httprunner scripts or teststeps """ api = [api] # 参数化过滤,只加载api中调用到的参数 if config and config.get('parameters'): api_params = [] for item in api: params = item['request'].get('params') or item['request'].get( 'json') for v in params.values(): if type(v) == list: api_params.extend(v) else: api_params.append(v) parameters = [] for index, dic in enumerate(config['parameters']): for key in dic.keys(): # key可能是key-key1这种模式,所以需要分割 for i in key.split('-'): if '$' + i in api_params: parameters.append(dic) config['parameters'] = parameters debugtalk = load_debugtalk(project) debugtalk_content = debugtalk[0] debugtalk_path = debugtalk[1] os.chdir(os.path.dirname(debugtalk_path)) try: # testcase_list = [parse_tests(api, load_debugtalk(project), name=name, config=config)] testcase_list = [ parse_tests(api, debugtalk_content, name=name, config=config, project=project) ] kwargs = {"failfast": False} runner = HttpRunner(**kwargs) runner.run(testcase_list) summary = parse_summary(runner.summary) if save: save_summary(name, summary, project, type=1, user=user) return summary except Exception as e: logger.log_error(f"debug_api error: {e}") raise SyntaxError(str(e)) finally: os.chdir(BASE_DIR) shutil.rmtree(os.path.dirname(debugtalk_path))
def request(self, method, url, name=None, **kwargs): """ Constructs and sends a :py:class:`requests.Request`. Returns :py:class:`requests.Response` object. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param name: (optional) Placeholder, make compatible with Locust's HttpSession :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of ``'filename': file-like-objects`` for multipart encoding upload. :param auth: (optional) Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or \ a (`connect timeout, read timeout <user/advanced.html#timeouts>`_) tuple. :type timeout: float or tuple :param allow_redirects: (optional) Set to True by default. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param stream: (optional) whether to immediately download the response content. Defaults to ``False``. :param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. """ # store detail data of request and response self.meta_data = {} # prepend url with hostname unless it's already an absolute URL url = self._build_url(url) # set up pre_request hook for attaching meta data to the request object self.meta_data["method"] = method kwargs.setdefault("timeout", 120) self.meta_data["request_time"] = time.time() response = self._send_request_safe_mode(method, url, **kwargs) # record the consumed time self.meta_data["response_time_ms"] = round((time.time() - self.meta_data["request_time"]) * 1000, 2) self.meta_data["elapsed_ms"] = response.elapsed.microseconds / 1000.0 self.meta_data["url"] = (response.history and response.history[0] or response)\ .request.url self.meta_data["request_headers"] = response.request.headers self.meta_data["request_body"] = response.request.body self.meta_data["status_code"] = response.status_code self.meta_data["response_headers"] = response.headers try: self.meta_data["response_body"] = response.json() except ValueError: self.meta_data["response_body"] = response.content msg = "response details:\n" msg += "> status_code: {}\n".format(self.meta_data["status_code"]) msg += "> headers: {}\n".format(self.meta_data["response_headers"]) msg += "> body: {}".format(self.meta_data["response_body"]) logger.log_debug(msg) # get the length of the content, but if the argument stream is set to True, we take # the size from the content-length header, in order to not trigger fetching of the body if kwargs.get("stream", False): self.meta_data["content_size"] = int(self.meta_data["response_headers"].get("content-length") or 0) else: self.meta_data["content_size"] = len(response.content or "") try: response.raise_for_status() except RequestException as e: logger.log_error(u"{exception}".format(exception=str(e))) else: logger.log_info( """status_code: {}, response_time(ms): {} ms, response_length: {} bytes""".format( self.meta_data["status_code"], self.meta_data["response_time_ms"], self.meta_data["content_size"] ) ) return response
def main(): """ API test: parse command line options and run commands. """ if is_py2: color_print(get_python2_retire_msg(), "YELLOW") parser = argparse.ArgumentParser(description=__description__) parser.add_argument('-V', '--version', dest='version', action='store_true', help="show version") parser.add_argument( 'testfile_paths', nargs='*', help="Specify api/testcase/testsuite file paths to run.") parser.add_argument('--log-level', default='INFO', help="Specify logging level, default is INFO.") parser.add_argument('--log-file', help="Write logs to specified file path.") parser.add_argument( '--dot-env-path', help= "Specify .env file path, which is useful for keeping sensitive data.") parser.add_argument('--report-template', help="Specify report template path.") parser.add_argument('--report-dir', help="Specify report save directory.") parser.add_argument( '--report-file', help= "Specify report file path, this has higher priority than specifying report dir." ) parser.add_argument( '--save-tests', action='store_true', default=False, help="Save loaded/parsed/summary json data to JSON files.") parser.add_argument( '--failfast', action='store_true', default=False, help="Stop the test run on the first error or failure.") parser.add_argument('--startproject', help="Specify new project name.") parser.add_argument( '--validate', nargs='*', help="Validate YAML/JSON api/testcase/testsuite format.") parser.add_argument('--prettify', nargs='*', help="Prettify JSON testcase format.") args = parser.parse_args() if len(sys.argv) == 1: # no argument passed parser.print_help() sys.exit(0) if args.version: color_print("{}".format(__version__), "GREEN") sys.exit(0) if args.validate: for validate_path in args.validate: try: color_print("validate test file: {}".format(validate_path), "GREEN") load_cases(validate_path, args.dot_env_path) except exceptions.MyBaseError as ex: log_error(str(ex)) continue color_print("done!", "BLUE") sys.exit(0) if args.prettify: prettify_json_file(args.prettify) sys.exit(0) project_name = args.startproject if project_name: create_scaffold(project_name) sys.exit(0) runner = HttpRunner(failfast=args.failfast, save_tests=args.save_tests, log_level=args.log_level, log_file=args.log_file) err_code = 0 try: for path in args.testfile_paths: summary = runner.run(path, dot_env_path=args.dot_env_path) report_dir = args.report_dir or os.path.join( runner.project_working_directory, "reports") gen_html_report(summary, report_template=args.report_template, report_dir=report_dir, report_file=args.report_file) err_code |= (0 if summary and summary["success"] else 1) except Exception as ex: color_print( "!!!!!!!!!! exception stage: {} !!!!!!!!!!".format( runner.exception_stage), "YELLOW") color_print(str(ex), "RED") sentry_sdk.capture_exception(ex) err_code = 1 sys.exit(err_code)
def _run_test(self, test_dict): """ run single teststep. Args: test_dict (dict): teststep info { "name": "teststep description", "skip": "skip this test unconditionally", "times": 3, "variables": [], # optional, override "request": { "url": "http://127.0.0.1:5000/api/users/1000", "method": "POST", "headers": { "Content-Type": "application/json", "authorization": "$authorization", "random": "$random" }, "json": {"name": "user", "password": "******"} }, "extract": {}, # optional "validate": [], # optional "setup_hooks": [], # optional "teardown_hooks": [], # optional "wait":{} # optional } Raises: exceptions.ParamsError exceptions.ValidationFailure exceptions.ExtractFailure """ # get wait parameter, if this step result needs to waif for a right one wait_params = test_dict.get("wait", {}) validate_pass = None failures = [] if wait_params: wait_total_time = wait_params["Total_Time"] if wait_params.has_key( 'Total_Time') else 300 wait_interval = wait_params["Interval"] if wait_params.has_key( 'Interval') else 5 # validate for wait function wait_validators = wait_params.get("validate", []) for item in [wait_total_time, wait_interval]: if not isinstance(item, int): err_msg = u"Invalid wait function parameters! => {}\n".format( item) logger.log_error(err_msg) raise exceptions.ParamsError(err_msg) start_time = time.time() current_time = time.time() try: while current_time - start_time < wait_total_time: validate_pass, failures = self._run_test_once( test_dict=test_dict, wait_validators=wait_validators) # to break while when validate is OK if validate_pass: break current_time = time.time() time.sleep(wait_interval) # to raise alarm when failures except (exceptions.ParamsError, exceptions.ValidationFailure, exceptions.ExtractFailure): raise else: validate_pass, failures = self._run_test_once(test_dict=test_dict) if not validate_pass: failures_string = "\n".join([failure for failure in failures]) raise exceptions.ValidationFailure(failures_string)
def request(self, method, url, name=None, **kwargs): """ Constructs and sends a :py:class:`requests.Request`. Returns :py:class:`requests.Response` object. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param name: (optional) Placeholder, make compatible with Locust's HttpSession :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of ``'filename': file-like-objects`` for multipart encoding upload. :param auth: (optional) Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or \ a (`connect timeout, read timeout <user/advanced.html#timeouts>`_) tuple. :type timeout: float or tuple :param allow_redirects: (optional) Set to True by default. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param stream: (optional) whether to immediately download the response content. Defaults to ``False``. :param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. """ self.init_meta_data() # record test name self.meta_data["name"] = name # record original request info self.meta_data["data"][0]["request"]["method"] = method self.meta_data["data"][0]["request"]["url"] = url kwargs.setdefault("timeout", 120) self.meta_data["data"][0]["request"].update(kwargs) # prepend url with hostname unless it's already an absolute URL url = build_url(self.base_url, url) start_timestamp = time.time() response = self._send_request_safe_mode(method, url, **kwargs) response_time_ms = round((time.time() - start_timestamp) * 1000, 2) # get the length of the content, but if the argument stream is set to True, we take # the size from the content-length header, in order to not trigger fetching of the body if kwargs.get("stream", False): content_size = int(dict(response.headers).get("content-length") or 0) else: content_size = len(response.content or "") # record the consumed time self.meta_data["stat"] = { "response_time_ms": response_time_ms, "elapsed_ms": response.elapsed.microseconds / 1000.0, "content_size": content_size } # record request and response histories, include 30X redirection response_list = response.history + [response] self.meta_data["data"] = [ self.get_req_resp_record(resp_obj) for resp_obj in response_list ] try: response.raise_for_status() except RequestException as e: logger.log_error(u"{exception}".format(exception=str(e))) else: logger.log_info( """status_code: {}, response_time(ms): {} ms, response_length: {} bytes\n""".format( response.status_code, response_time_ms, content_size ) ) return response
def _run_test(self, test_dict): """ run single teststep. Args: test_dict (dict): teststep info { "name": "teststep description", "skip": "skip this test unconditionally", "times": 3, "variables": [], # optional, override "request": { "url": "http://127.0.0.1:5000/api/users/1000", "method": "POST", "headers": { "Content-Type": "application/json", "authorization": "$authorization", "random": "$random" }, "json": {"name": "user", "password": "******"} }, "extract": {}, # optional "validate": [], # optional "setup_hooks": [], # optional "teardown_hooks": [] # optional } Raises: exceptions.ParamsError exceptions.ValidationFailure exceptions.ExtractFailure """ # clear meta data first to ensure independence for each test self.__clear_test_data() # check skip self._handle_skip_feature(test_dict) # prepare test_dict = utils.lower_test_dict_keys(test_dict) test_variables = test_dict.get("variables", {}) self.session_context.init_test_variables(test_variables) # teststep name test_name = self.session_context.eval_content(test_dict.get("name", "")) # parse test request raw_request = test_dict.get('request', {}) parsed_test_request = self.session_context.eval_content(raw_request) self.session_context.update_test_variables("request", parsed_test_request) # setup hooks setup_hooks = test_dict.get("setup_hooks", []) if setup_hooks: self.do_hook_actions(setup_hooks, HookTypeEnum.SETUP) # prepend url with base_url unless it's already an absolute URL url = parsed_test_request.pop('url') base_url = self.session_context.eval_content(test_dict.get("base_url", "")) parsed_url = utils.build_url(base_url, url) try: method = parsed_test_request.pop('method') parsed_test_request.setdefault("verify", self.verify) group_name = parsed_test_request.pop("group", None) except KeyError: raise exceptions.ParamsError("URL or METHOD missed!") # TODO: move method validation to json schema valid_methods = ["GET", "HEAD", "POST", "PUT", "PATCH", "DELETE", "OPTIONS"] if method.upper() not in valid_methods: err_msg = u"Invalid HTTP method! => {}\n".format(method) err_msg += "Available HTTP methods: {}".format("/".join(valid_methods)) logger.log_error(err_msg) raise exceptions.ParamsError(err_msg) logger.log_info("{method} {url}".format(method=method, url=parsed_url)) logger.log_debug( "request kwargs(raw): {kwargs}".format(kwargs=parsed_test_request)) # request resp = self.http_client_session.request( method, parsed_url, name=(group_name or test_name), **parsed_test_request ) resp_obj = response.ResponseObject(resp) def log_req_resp_details(): err_msg = "{} DETAILED REQUEST & RESPONSE {}\n".format("*" * 32, "*" * 32) # log request err_msg += "====== request details ======\n" err_msg += "url: {}\n".format(parsed_url) err_msg += "method: {}\n".format(method) err_msg += "headers: {}\n".format(parsed_test_request.pop("headers", {})) for k, v in parsed_test_request.items(): v = utils.omit_long_data(v) err_msg += "{}: {}\n".format(k, repr(v)) err_msg += "\n" # log response err_msg += "====== response details ======\n" err_msg += "status_code: {}\n".format(resp_obj.status_code) err_msg += "headers: {}\n".format(resp_obj.headers) err_msg += "body: {}\n".format(repr(resp_obj.text)) logger.log_error(err_msg) # teardown hooks teardown_hooks = test_dict.get("teardown_hooks", []) if teardown_hooks: self.session_context.update_test_variables("response", resp_obj) self.do_hook_actions(teardown_hooks, HookTypeEnum.TEARDOWN) self.http_client_session.update_last_req_resp_record(resp_obj) # extract extractors = test_dict.get("extract", {}) try: extracted_variables_mapping = resp_obj.extract_response(extractors) self.session_context.update_session_variables(extracted_variables_mapping) except (exceptions.ParamsError, exceptions.ExtractFailure): log_req_resp_details() raise # validate validators = test_dict.get("validate") or test_dict.get("validators") or [] validate_script = test_dict.get("validate_script", []) if validate_script: validators.append({ "type": "python_script", "script": validate_script }) validator = Validator(self.session_context, resp_obj) try: validator.validate(validators) except exceptions.ValidationFailure: log_req_resp_details() raise finally: self.validation_results = validator.validation_results
def _extract_field_with_delimiter(self, field): """ response content could be json or html text. Args: field (str): string joined by delimiter. e.g. "status_code" "headers" "cookies" "content" "headers.content-type" "content.person.name.first_name" """ # string.split(sep=None, maxsplit=1) -> list of strings # e.g. "content.person.name" => ["content", "person.name"] try: top_query, sub_query = field.split('.', 1) except ValueError: top_query = field sub_query = None # status_code if top_query in ["status_code", "encoding", "ok", "reason", "url"]: if sub_query: # status_code.XX err_msg = u"Failed to extract: {}\n".format(field) logger.log_error(err_msg) raise exceptions.ParamsError(err_msg) return getattr(self, top_query) # cookies elif top_query == "cookies": cookies = self.cookies if not sub_query: # extract cookies return cookies try: return cookies[sub_query] except KeyError: err_msg = u"Failed to extract cookie! => {}\n".format(field) err_msg += u"response cookies: {}\n".format(cookies) logger.log_error(err_msg) raise exceptions.ExtractFailure(err_msg) # elapsed elif top_query == "elapsed": available_attributes = u"available attributes: days, seconds, microseconds, total_seconds" if not sub_query: err_msg = u"elapsed is datetime.timedelta instance, attribute should also be specified!\n" err_msg += available_attributes logger.log_error(err_msg) raise exceptions.ParamsError(err_msg) elif sub_query in ["days", "seconds", "microseconds"]: return getattr(self.elapsed, sub_query) elif sub_query == "total_seconds": return self.elapsed.total_seconds() else: err_msg = "{} is not valid datetime.timedelta attribute.\n".format( sub_query) err_msg += available_attributes logger.log_error(err_msg) raise exceptions.ParamsError(err_msg) # headers elif top_query == "headers": headers = self.headers if not sub_query: # extract headers return headers try: return headers[sub_query] except KeyError: err_msg = u"Failed to extract header! => {}\n".format(field) err_msg += u"response headers: {}\n".format(headers) logger.log_error(err_msg) raise exceptions.ExtractFailure(err_msg) # response body elif top_query in ["content", "text", "json"]: try: body = self.json except exceptions.JSONDecodeError: body = self.text if not sub_query: # extract response body return body if isinstance(body, (dict, list)): # content = {"xxx": 123}, content.xxx return utils.query_json(body, sub_query) elif sub_query.isdigit(): # content = "abcdefg", content.3 => d return utils.query_json(body, sub_query) else: # content = "<html>abcdefg</html>", content.xxx err_msg = u"Failed to extract attribute from response body! => {}\n".format( field) err_msg += u"response body: {}\n".format(body) logger.log_error(err_msg) raise exceptions.ExtractFailure(err_msg) # new set response attributes in teardown_hooks elif top_query in self.__dict__: attributes = self.__dict__[top_query] if not sub_query: # extract response attributes return attributes if isinstance(attributes, (dict, list)): # attributes = {"xxx": 123}, content.xxx return utils.query_json(attributes, sub_query) elif sub_query.isdigit(): # attributes = "abcdefg", attributes.3 => d return utils.query_json(attributes, sub_query) else: # content = "attributes.new_attribute_not_exist" err_msg = u"Failed to extract cumstom set attribute from teardown hooks! => {}\n".format( field) err_msg += u"response set attributes: {}\n".format(attributes) logger.log_error(err_msg) raise exceptions.TeardownHooksFailure(err_msg) # others else: err_msg = u"Failed to extract attribute from response! => {}\n".format( field) err_msg += u"available response attributes: status_code, cookies, elapsed, headers, content, text, json, encoding, ok, reason, url.\n\n" err_msg += u"If you want to set attribute in teardown_hooks, take the following example as reference:\n" err_msg += u"response.new_attribute = 'new_attribute_value'\n" logger.log_error(err_msg) raise exceptions.ParamsError(err_msg)
def main(): """ 使用locust进行性能测试:解析命令行选项和运行命令 :return: """ print("HttpRunner version: {}".format(__version__)) sys.argv[0] = "locust" if len(sys.argv) == 1: sys.argv.extend(["-h"]) if sys.argv[1] in ["-h", "--help", "-V", "--version"]: start_locust_main() def get_arg_index(*target_args): for arg in target_args: if arg not in sys.argv: continue return sys.argv.index(arg) + 1 return None loglevel_index = get_arg_index("-L", "--loglevel") if loglevel_index and loglevel_index < len(sys.argv): loglevel_index = sys.argv[loglevel_index] else: loglevel = "WARNING" logger.setup_logger(loglevel) try: testcase_index = get_arg_index("-f", "--locustfile") assert testcase_index and testcase_index < len(sys.argv) except AssertionError: print("Testcase file is not specified,exit.") sys.exit(1) testcase_file_path = sys.argv[testcase_index] sys.argv[testcase_index] = parse_locustfile(testcase_file_path) if "--processes" in sys.argv: """ 例如:locusts -f locustfile.py --processes 4 """ if "--no-web" in sys.argv: logger.log_error("conflict parameter args: --processes & --no-web. \nexit.") sys.exit(1) processes_index = sys.argv.index("--processes") processes_count_index = processes_index +1 if processes_count_index >= len(sys.argv): """ 不显式指定进程计数 如:locusts -f locustfile.py --processes """ processes_count = multiprocessing.cpu_count() logger.log_warnig("没有指定进程数,用{}默认的".format(processes_count)) else: try: """ locusts -f locustfile.py --processes 4 """ processes_count = int(sys.argv[processes_count_index]) sys.argv.pop(processes_count_index) except ValueError: """ locusts -f locustfile.py --processes -P 8888 """ processes_count = multiprocessing.cpu_count() logger.log_warning("processes count not specified, use {} by default".format(processes_count)) sys.argv.pop(processes_index) run_locusts_with_processes(sys.argv, processes_count) else: start_locust_main()
def main_locust(): """ Performance test with locust: parse command line options and run commands. """ # monkey patch ssl at beginning to avoid RecursionError when running locust. from gevent import monkey monkey.patch_ssl() import multiprocessing import sys from httprunner import logger try: from httprunner import locusts except ImportError: msg = "Locust is not installed, install first and try again.\n" msg += "install command: pip install locustio" print(msg) exit(1) sys.argv[0] = 'locust' if len(sys.argv) == 1: sys.argv.extend(["-h"]) if sys.argv[1] in ["-h", "--help", "-V", "--version"]: locusts.start_locust_main() sys.exit(0) # set logging level if "-L" in sys.argv: loglevel_index = sys.argv.index('-L') + 1 elif "--loglevel" in sys.argv: loglevel_index = sys.argv.index('--loglevel') + 1 else: loglevel_index = None if loglevel_index and loglevel_index < len(sys.argv): loglevel = sys.argv[loglevel_index] else: # default loglevel = "WARNING" logger.setup_logger(loglevel) # get testcase file path try: if "-f" in sys.argv: testcase_index = sys.argv.index('-f') + 1 elif "--locustfile" in sys.argv: testcase_index = sys.argv.index('--locustfile') + 1 else: testcase_index = None assert testcase_index and testcase_index < len(sys.argv) except AssertionError: print("Testcase file is not specified, exit.") sys.exit(1) testcase_file_path = sys.argv[testcase_index] sys.argv[testcase_index] = locusts.parse_locustfile(testcase_file_path) if "--processes" in sys.argv: """ locusts -f locustfile.py --processes 4 """ if "--no-web" in sys.argv: logger.log_error( "conflict parameter args: --processes & --no-web. \nexit.") sys.exit(1) processes_index = sys.argv.index('--processes') processes_count_index = processes_index + 1 if processes_count_index >= len(sys.argv): """ do not specify processes count explicitly locusts -f locustfile.py --processes """ processes_count = multiprocessing.cpu_count() logger.log_warning( "processes count not specified, use {} by default.".format( processes_count)) else: try: """ locusts -f locustfile.py --processes 4 """ processes_count = int(sys.argv[processes_count_index]) sys.argv.pop(processes_count_index) except ValueError: """ locusts -f locustfile.py --processes -P 8888 """ processes_count = multiprocessing.cpu_count() logger.log_warning( "processes count not specified, use {} by default.".format( processes_count)) sys.argv.pop(processes_index) locusts.run_locusts_with_processes(sys.argv, processes_count) else: locusts.start_locust_main()
def main_hrun(): """ API test: parse command line options and run commands. """ import argparse from httprunner import logger from httprunner.__about__ import __description__, __version__ from httprunner.api import HttpRunner from httprunner.compat import is_py2 from httprunner.validator import validate_json_file from httprunner.utils import (create_scaffold, get_python2_retire_msg, prettify_json_file) parser = argparse.ArgumentParser(description=__description__) parser.add_argument('-V', '--version', dest='version', action='store_true', help="show version") parser.add_argument('testcase_paths', nargs='*', help="testcase file path") parser.add_argument('--log-level', default='INFO', help="Specify logging level, default is INFO.") parser.add_argument('--log-file', help="Write logs to specified file path.") parser.add_argument( '--dot-env-path', help= "Specify .env file path, which is useful for keeping sensitive data.") parser.add_argument('--report-template', help="specify report template path.") parser.add_argument('--report-dir', help="specify report save directory.") parser.add_argument( '--failfast', action='store_true', default=False, help="Stop the test run on the first error or failure.") parser.add_argument( '--save-tests', action='store_true', default=False, help="Save loaded tests and parsed tests to JSON file.") parser.add_argument('--startproject', help="Specify new project name.") parser.add_argument('--validate', nargs='*', help="Validate JSON testcase format.") parser.add_argument('--prettify', nargs='*', help="Prettify JSON testcase format.") args = parser.parse_args() logger.setup_logger(args.log_level, args.log_file) if is_py2: logger.log_warning(get_python2_retire_msg()) if args.version: logger.color_print("{}".format(__version__), "GREEN") exit(0) if args.validate: validate_json_file(args.validate) exit(0) if args.prettify: prettify_json_file(args.prettify) exit(0) project_name = args.startproject if project_name: create_scaffold(project_name) exit(0) runner = HttpRunner(failfast=args.failfast, save_tests=args.save_tests, report_template=args.report_template, report_dir=args.report_dir) try: for path in args.testcase_paths: runner.run(path, dot_env_path=args.dot_env_path) except Exception: logger.log_error("!!!!!!!!!! exception stage: {} !!!!!!!!!!".format( runner.exception_stage)) raise return 0
def retrieve_data(self): """ 主函数 :return: """ try: r = requests.get(self.url + '/v2/api-docs?group=sign-api').json() write_data(r, 'data.json') # r = get_json('D:\HttpRunner_framework\\testcases\data.json') except Exception as e: logger.log_error('请求swagger url 发生错误. 详情原因: {}'.format(e)) return 'error' self.data = r['paths'] # 接口数据 self.url = 'https://' + r['host'] self.title = r['info']['title'] self.http_suite['config']['name'] = self.title self.http_suite['config']['base_url'] = self.url self.definitions = r['definitions'] # body参数 for tag_dict in r['tags']: self.tags_list.append(tag_dict['name']) i = 0 for tag in self.tags_list: self.http_suite['testcases'].append({ "name": "", "testcase": "", "variables": {} }) self.http_suite['testcases'][i]['name'] = tag self.http_suite['testcases'][i][ 'testcase'] = 'testcases/' + tag + '.json' i += 1 suite_path = os.path.join( os.path.abspath( os.path.join(os.path.dirname("__file__"), os.path.pardir)), 'testsuites') testcase_path = os.path.join(suite_path, 'demo_testsuite.json') write_data(self.http_suite, testcase_path) if isinstance(self.data, dict): for tag in self.tags_list: self.http_case = { "config": { "name": "", "base_url": "", "variables": {} }, "teststeps": [] } for key, value in self.data.items(): for method in list(value.keys()): params = value[method] if not params['deprecated']: # 接口是否被弃用 if params['tags'][0] == tag: self.http_case['config']['name'] = params[ 'tags'][0] self.http_case['config']['base_url'] = self.url case = self.retrieve_params( params, key, method, tag) self.http_case['teststeps'].append(case) else: logger.log_info( 'interface path: {}, if name: {}, is deprecated.' .format(key, params['description'])) break api_path = os.path.join( os.path.abspath( os.path.join(os.path.dirname("__file__"), os.path.pardir)), 'testcases') testcase_path = os.path.join(api_path, tag + '.json') write_data(self.http_case, testcase_path) else: logger.log_error('解析接口数据异常!url 返回值 paths 中不是字典.') return 'error'
def delete_performance(path): try: shutil.rmtree(path) except PermissionError as e: logger.log_error('权限错误,删除日志文件失败!{}'.format(path))
def run_test(self, testcase_dict): """ run single testcase. @param (dict) testcase_dict { "name": "testcase description", "skip": "skip this test unconditionally", "times": 3, "requires": [], # optional, override "function_binds": {}, # optional, override "variables": [], # optional, override "request": { "url": "http://127.0.0.1:5000/api/users/1000", "method": "POST", "headers": { "Content-Type": "application/json", "authorization": "$authorization", "random": "$random" }, "body": '{"name": "user", "password": "******"}' }, "extract": [], # optional "validate": [], # optional "setup_hooks": [], # optional "teardown_hooks": [] # optional } @return True or raise exception during test """ # check skip self._handle_skip_feature(testcase_dict) # prepare parsed_request = self.init_config(testcase_dict, level="testcase") self.context.bind_variables({"request": parsed_request}, level="testcase") try: url = parsed_request.pop('url') method = parsed_request.pop('method') group_name = parsed_request.pop("group", None) except KeyError: raise exception.ParamsError("URL or METHOD missed!") logger.log_info("{method} {url}".format(method=method, url=url)) logger.log_debug( "request kwargs(raw): {kwargs}".format(kwargs=parsed_request)) # setup hooks setup_hooks = testcase_dict.get("setup_hooks", []) setup_hooks.insert(0, "${setup_hook_prepare_kwargs($request)}") self.do_hook_actions(setup_hooks) # request resp = self.http_client_session.request(method, url, name=group_name, **parsed_request) # teardown hooks teardown_hooks = testcase_dict.get("teardown_hooks", []) if teardown_hooks: self.context.bind_variables({"response": resp}, level="testcase") self.do_hook_actions(teardown_hooks) # extract extractors = testcase_dict.get("extract", []) or testcase_dict.get( "extractors", []) resp_obj = response.ResponseObject(resp) extracted_variables_mapping = resp_obj.extract_response(extractors) self.context.bind_extracted_variables(extracted_variables_mapping) # validate validators = testcase_dict.get("validate", []) or testcase_dict.get( "validators", []) try: self.context.validate(validators, resp_obj) except (exception.ParamsError, exception.ResponseError, \ exception.ValidationError, exception.ParseResponseError): # log request err_req_msg = "request: \n" err_req_msg += "headers: {}\n".format( parsed_request.pop("headers", {})) for k, v in parsed_request.items(): err_req_msg += "{}: {}\n".format(k, v) logger.log_error(err_req_msg) # log response err_resp_msg = "response: \n" err_resp_msg += "status_code: {}\n".format(resp.status_code) err_resp_msg += "headers: {}\n".format(resp.headers) err_resp_msg += "body: {}\n".format(resp.text) logger.log_error(err_resp_msg) raise
def run3(self, path_or_testsets, mapping=None): try: test_suite_list = init_test_suites2(path_or_testsets) except exceptions.TestcaseNotFound: logger.log_error( "Testcases not found in {}".format(path_or_testsets)) sys.exit(1) self.summary = { "success": True, "stat": {}, "time": {}, "platform": get_platform(), "details": [] } mapping = mapping or {} def accumulate_stat(origin_stat, new_stat): """ accumulate new_stat to origin_stat """ for key in new_stat: if key not in origin_stat: origin_stat[key] = new_stat[key] elif key == "start_at": # start datetime origin_stat[key] = min(origin_stat[key], new_stat[key]) else: origin_stat[key] += new_stat[key] # 各用例提取的变量 extract_parameter = {} for test_suite in test_suite_list: extract_list = test_suite.get('testcases', {})[0].get('extract') # # 合并各用例提取的变量 # variables = test_suite.testcase_parser.variables # test_suite.testcase_parser.update_binded_variables(dict(variables, **extract_parameter)) try: test_suite = TestSuite_ext(test_suite, dict(extract_parameter, **mapping)) except exceptions.ParamsError as e: raise Exception("出现异常,参数错误: {0}".format(e)) except exceptions.VariableNotFound as e: raise Exception("出现异常,变量不存在: {0}".format(e)) except BaseException as e: raise Exception("出现异常: {0}".format(e)) result = self.runner.run(test_suite) test_suite_summary = get_summary(result) name = test_suite.config.get("name") test_infos = TestCaseInfo.objects.filter(name=name).all() test_infos = list(test_infos) if test_infos: # 清除get_cache_case的缓存 del_case_cache(test_infos[0].id) self.summary["success"] &= test_suite_summary["success"] test_suite_summary["name"] = name test_suite_summary["base_url"] = test_suite.config.get( "request", {}).get("base_url", "") test_suite_summary["output"] = test_suite.output print_output(test_suite_summary["output"]) accumulate_stat(self.summary["stat"], test_suite_summary["stat"]) accumulate_stat(self.summary["time"], test_suite_summary["time"]) # 根据返回结果提取变量值 if extract_list is not None and isinstance(extract_list, list): records = test_suite_summary.get('records') data_result = records[0].get('meta_data').get('response').get( 'json') if data_result is not None: for extract in extract_list: print(extract) for key, value in extract.items(): try: extract_value = {} extract_value['content'] = data_result fields = str(value).split('.') extract_success = True for field in fields: if isinstance(extract_value, dict): extract_value = extract_value.get( field) elif isinstance(extract_value, list): if extract_value: extract_value = extract_value[int( field)] else: extract_success = False # raise Exception('提取变量失败,', '路径:', value, '下的结果不存在,请检查') if extract_success: extract_parameter[key] = extract_value except AttributeError as e: print('run运行错误:未提取到变量') except Exception as e: logger.log_error("出现错误.{0}".format(e)) self.summary["details"].append(test_suite_summary) return self
def run(self, path_or_testcases, mapping=None): """ start to run test with varaibles mapping. Args: path_or_testcases (str/list/dict): YAML/JSON testcase file path or testcase list path: path could be in several type - absolute/relative file path - absolute/relative folder path - list/set container with file(s) and/or folder(s) testcases: testcase dict or list of testcases - (dict) testset_dict - (list) list of testset_dict [ testset_dict_1, testset_dict_2 ] mapping (dict): if mapping specified, it will override variables in config block. Returns: instance: HttpRunner() instance """ try: test_suite_list = init_test_suites(path_or_testcases, mapping) except exceptions.TestcaseNotFound: logger.log_error("Testcases not found in {}".format(path_or_testcases)) sys.exit(1) self.summary = { "success": True, "stat": {}, "time": {}, "platform": get_platform(), "details": [] } def accumulate_stat(origin_stat, new_stat): """accumulate new_stat to origin_stat.""" for key in new_stat: if key not in origin_stat: origin_stat[key] = new_stat[key] elif key == "start_at": # start datetime origin_stat[key] = min(origin_stat[key], new_stat[key]) else: origin_stat[key] += new_stat[key] for test_suite in test_suite_list: result = self.runner.run(test_suite) test_suite_summary = get_summary(result) self.summary["success"] &= test_suite_summary["success"] test_suite_summary["name"] = test_suite.config.get("name") test_suite_summary["base_url"] = test_suite.config.get("request", {}).get("base_url", "") test_suite_summary["output"] = test_suite.output utils.print_output(test_suite_summary["output"]) accumulate_stat(self.summary["stat"], test_suite_summary["stat"]) accumulate_stat(self.summary["time"], test_suite_summary["time"]) self.summary["details"].append(test_suite_summary) return self
def run_test(self, testcase_dict): """ run single testcase. @param (dict) testcase_dict { "name": "testcase description", "skip": "skip this test unconditionally", "times": 3, "requires": [], # optional, override "function_binds": {}, # optional, override "variables": [], # optional, override "request": { "url": "http://127.0.0.1:5000/api/users/1000", "method": "POST", "headers": { "Content-Type": "application/json", "authorization": "$authorization", "random": "$random" }, "body": '{"name": "user", "password": "******"}' }, "extract": [], # optional "validate": [], # optional "setup": [], # optional "teardown": [] # optional } @return True or raise exception during test """ parsed_request = self.init_config(testcase_dict, level="testcase") try: url = parsed_request.pop('url') method = parsed_request.pop('method') group_name = parsed_request.pop("group", None) except KeyError: raise exception.ParamsError("URL or METHOD missed!") extractors = testcase_dict.get("extract", []) validators = testcase_dict.get("validate", []) setup_actions = testcase_dict.get("setup", []) teardown_actions = testcase_dict.get("teardown", []) self._handle_skip_feature(testcase_dict) def setup_teardown(actions): for action in actions: self.context.eval_content(action) setup_teardown(setup_actions) resp = self.http_client_session.request( method, url, name=group_name, **parsed_request ) resp_obj = response.ResponseObject(resp) extracted_variables_mapping = resp_obj.extract_response(extractors) self.context.bind_extracted_variables(extracted_variables_mapping) try: self.context.validate(validators, resp_obj) except (exception.ParamsError, exception.ResponseError, exception.ValidationError): # log request err_req_msg = "request: \n" err_req_msg += "headers: {}\n".format(parsed_request.pop("headers", {})) for k, v in parsed_request.items(): err_req_msg += "{}: {}\n".format(k, v) logger.log_error(err_req_msg) # log response err_resp_msg = "response: \n" err_resp_msg += "status_code: {}\n".format(resp.status_code) err_resp_msg += "headers: {}\n".format(resp.headers) err_resp_msg += "body: {}\n".format(resp.text) logger.log_error(err_resp_msg) raise finally: setup_teardown(teardown_actions)
def request(self, method, url, name=None, **kwargs): """ Constructs and sends a :py:class:`requests.Request`. Returns :py:class:`requests.Response` object. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param name: (optional) Placeholder, make compatible with Locust's HttpSession :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of ``'filename': file-like-objects`` for multipart encoding upload. :param auth: (optional) Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or \ a (`connect timeout, read timeout <user/advanced.html#timeouts>`_) tuple. :type timeout: float or tuple :param allow_redirects: (optional) Set to True by default. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param stream: (optional) whether to immediately download the response content. Defaults to ``False``. :param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. """ def log_print(request_response): msg = "\n================== {} details ==================\n".format( request_response) for key, value in self.meta_data[request_response].items(): msg += "{:<16} : {}\n".format(key, repr(value)) logger.log_debug(msg) # record original request info self.meta_data["request"]["method"] = method self.meta_data["request"]["url"] = url self.meta_data["request"].update(kwargs) self.meta_data["request"]["start_timestamp"] = time.time() # prepend url with hostname unless it's already an absolute URL url = self._build_url(url) # 入参加密,追加签名 now_timestamp = int(round(time.time() * 1000)) datas = json.dumps(kwargs.get('json', {})) token = kwargs.get('headers', {}).get('token', '') # 签名生成规则:md5(md5(token).toUpperCase() + timestamp + JSON.stringify(body || {})).toUpperCase() params = self.md5(token) + str(now_timestamp) + datas sign = self.md5(params) kwargs.get('headers', {})['timestamp'] = str(now_timestamp) kwargs.get('headers', {})['sign'] = sign kwargs.setdefault("timeout", 120) response = self._send_request_safe_mode(method, url, **kwargs) # record the consumed time self.meta_data["response"]["response_time_ms"] = \ round((time.time() - self.meta_data["request"]["start_timestamp"]) * 1000, 2) self.meta_data["response"][ "elapsed_ms"] = response.elapsed.microseconds / 1000.0 # record actual request info self.meta_data["request"]["url"] = (response.history and response.history[0] or response).request.url self.meta_data["request"]["headers"] = dict(response.request.headers) self.meta_data["request"]["body"] = response.request.body # log request details in debug mode log_print("request") # record response info self.meta_data["response"]["ok"] = response.ok self.meta_data["response"]["url"] = response.url self.meta_data["response"]["status_code"] = response.status_code self.meta_data["response"]["reason"] = response.reason self.meta_data["response"]["headers"] = dict(response.headers) self.meta_data["response"]["cookies"] = response.cookies or {} self.meta_data["response"]["encoding"] = response.encoding self.meta_data["response"]["content"] = response.content self.meta_data["response"]["text"] = response.text self.meta_data["response"]["content_type"] = response.headers.get( "Content-Type", "") try: self.meta_data["response"]["json"] = response.json() except ValueError: self.meta_data["response"]["json"] = None # get the length of the content, but if the argument stream is set to True, we take # the size from the content-length header, in order to not trigger fetching of the body if kwargs.get("stream", False): self.meta_data["response"]["content_size"] = int( self.meta_data["response"]["headers"].get("content-length") or 0) else: self.meta_data["response"]["content_size"] = len(response.content or "") # log response details in debug mode log_print("response") try: response.raise_for_status() except RequestException as e: logger.log_error(u"{exception}".format(exception=str(e))) else: logger.log_info( """status_code: {}, response_time(ms): {} ms, response_length: {} bytes""" .format(self.meta_data["response"]["status_code"], self.meta_data["response"]["response_time_ms"], self.meta_data["response"]["content_size"])) return response
def run_test(self, teststep_dict): """ run single teststep. Args: teststep_dict (dict): teststep info { "name": "teststep description", "skip": "skip this test unconditionally", "times": 3, "variables": [], # optional, override "request": { "url": "http://127.0.0.1:5000/api/users/1000", "method": "POST", "headers": { "Content-Type": "application/json", "authorization": "$authorization", "random": "$random" }, "body": '{"name": "user", "password": "******"}' }, "extract": [], # optional "validate": [], # optional "setup_hooks": [], # optional "teardown_hooks": [] # optional } Raises: exceptions.ParamsError exceptions.ValidationFailure exceptions.ExtractFailure """ # check skip self._handle_skip_feature(teststep_dict) # prepare extractors = teststep_dict.get("extract", []) or teststep_dict.get( "extractors", []) validators = teststep_dict.get("validate", []) or teststep_dict.get( "validators", []) parsed_request = self.init_config(teststep_dict, level="teststep") self.context.update_teststep_variables_mapping("request", parsed_request) # setup hooks setup_hooks = teststep_dict.get("setup_hooks", []) setup_hooks.insert(0, "${setup_hook_prepare_kwargs($request)}") self.do_hook_actions(setup_hooks) try: url = parsed_request.pop('url') method = parsed_request.pop('method') group_name = parsed_request.pop("group", None) except KeyError: raise exceptions.ParamsError("URL or METHOD missed!") # TODO: move method validation to json schema valid_methods = [ "GET", "HEAD", "POST", "PUT", "PATCH", "DELETE", "OPTIONS" ] if method.upper() not in valid_methods: err_msg = u"Invalid HTTP method! => {}\n".format(method) err_msg += "Available HTTP methods: {}".format( "/".join(valid_methods)) logger.log_error(err_msg) raise exceptions.ParamsError(err_msg) logger.log_info("{method} {url}".format(method=method, url=url)) logger.log_debug( "request kwargs(raw): {kwargs}".format(kwargs=parsed_request)) # request resp = self.http_client_session.request(method, url, name=group_name, **parsed_request) resp_obj = response.ResponseObject(resp) # teardown hooks teardown_hooks = teststep_dict.get("teardown_hooks", []) if teardown_hooks: logger.log_info("start to run teardown hooks") self.context.update_teststep_variables_mapping( "response", resp_obj) self.do_hook_actions(teardown_hooks) # extract extracted_variables_mapping = resp_obj.extract_response(extractors) self.context.update_testcase_runtime_variables_mapping( extracted_variables_mapping) # validate try: self.evaluated_validators = self.context.validate( validators, resp_obj) except (exceptions.ParamsError, \ exceptions.ValidationFailure, exceptions.ExtractFailure): # log request err_req_msg = "request: \n" err_req_msg += "headers: {}\n".format( parsed_request.pop("headers", {})) for k, v in parsed_request.items(): err_req_msg += "{}: {}\n".format(k, repr(v)) logger.log_error(err_req_msg) # log response err_resp_msg = "response: \n" err_resp_msg += "status_code: {}\n".format(resp_obj.status_code) err_resp_msg += "headers: {}\n".format(resp_obj.headers) err_resp_msg += "body: {}\n".format(repr(resp_obj.text)) logger.log_error(err_resp_msg) raise
def main(): """ Performance test with locust: parse command line options and run commands. """ sys.argv[0] = 'locust' if len(sys.argv) == 1: sys.argv.extend(["-h"]) if sys.argv[1] in ["-h", "--help", "-V", "--version"]: start_locust_main() def get_arg_index(*target_args): for arg in target_args: if arg not in sys.argv: continue return sys.argv.index(arg) + 1 return None # set logging level loglevel_index = get_arg_index("-L", "--loglevel") if loglevel_index and loglevel_index < len(sys.argv): loglevel = sys.argv[loglevel_index] else: # default loglevel = "WARNING" logger.setup_logger(loglevel) # get testcase file path try: testcase_index = get_arg_index("-f", "--locustfile") assert testcase_index and testcase_index < len(sys.argv) except AssertionError: print("Testcase file is not specified, exit.") sys.exit(1) testcase_file_path = sys.argv[testcase_index] sys.argv[testcase_index] = parse_locustfile(testcase_file_path) if "--processes" in sys.argv: """ locusts -f locustfile.py --processes 4 """ if "--no-web" in sys.argv: logger.log_error( "conflict parameter args: --processes & --no-web. \nexit.") sys.exit(1) processes_index = sys.argv.index('--processes') processes_count_index = processes_index + 1 if processes_count_index >= len(sys.argv): """ do not specify processes count explicitly locusts -f locustfile.py --processes """ processes_count = multiprocessing.cpu_count() logger.log_warning( "processes count not specified, use {} by default.".format( processes_count)) else: try: """ locusts -f locustfile.py --processes 4 """ processes_count = int(sys.argv[processes_count_index]) sys.argv.pop(processes_count_index) except ValueError: """ locusts -f locustfile.py --processes -P 8888 """ processes_count = multiprocessing.cpu_count() logger.log_warning( "processes count not specified, use {} by default.".format( processes_count)) sys.argv.pop(processes_index) run_locusts_with_processes(sys.argv, processes_count) else: start_locust_main()
def request(self, method, url, name=None, **kwargs): """ Constructs and sends a :py:class:`requests.Request`. Returns :py:class:`requests.Response` object. :param method: method for the new :class:`Request` object. :param url: URL for the new :class:`Request` object. :param name: (optional) Placeholder, make compatible with Locust's HttpSession :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`. :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of ``'filename': file-like-objects`` for multipart encoding upload. :param auth: (optional) Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or \ a (`connect timeout, read timeout <user/advanced.html#timeouts>`_) tuple. :type timeout: float or tuple :param allow_redirects: (optional) Set to True by default. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param stream: (optional) whether to immediately download the response content. Defaults to ``False``. :param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. """ # store detail data of request and response self.meta_data = {} # prepend url with hostname unless it's already an absolute URL url = self._build_url(url) logger.log_info("{method} {url}".format(method=method, url=url)) logger.log_debug("request kwargs(raw): {kwargs}".format(kwargs=kwargs)) # set up pre_request hook for attaching meta data to the request object self.meta_data["method"] = method kwargs.setdefault("timeout", 120) self.meta_data["request_time"] = time.time() response = self._send_request_safe_mode(method, url, **kwargs) # record the consumed time self.meta_data["response_time"] = int((time.time() - self.meta_data["request_time"]) * 1000) self.meta_data["elapsed"] = response.elapsed.total_seconds() self.meta_data["url"] = (response.history and response.history[0] or response)\ .request.path_url self.meta_data["request_headers"] = response.request.headers self.meta_data["request_body"] = response.request.body self.meta_data["status_code"] = response.status_code self.meta_data["response_headers"] = response.headers self.meta_data["response_body"] = response.text logger.log_debug("response status_code: {}".format(self.meta_data["status_code"])) logger.log_debug("response headers: {}".format(self.meta_data["response_headers"])) logger.log_debug("response body: {}".format(self.meta_data["response_body"])) # get the length of the content, but if the argument stream is set to True, we take # the size from the content-length header, in order to not trigger fetching of the body if kwargs.get("stream", False): self.meta_data["content_size"] = int(self.meta_data["response_headers"].get("content-length") or 0) else: self.meta_data["content_size"] = len(response.content or "") try: response.raise_for_status() except RequestException as e: logger.log_error(u"{exception}".format(exception=str(e))) else: logger.log_info( """status_code: {}, response_time: {} ms, response_length: {} bytes""".format( self.meta_data["status_code"], self.meta_data["response_time"], self.meta_data["content_size"] ) ) return response
def load_tests(path, dot_env_path=None): """ load testcases from file path, extend and merge with api/testcase definitions. Args: path (str/list): testcase file/foler path. path could be in several types: - absolute/relative file path - absolute/relative folder path - list/set container with file(s) and/or folder(s) dot_env_path (str): specified .env file path Returns: list: testcases list, each testcase is corresponding to a file [ { # testcase data structure "config": { "name": "desc1", "path": "testcase1_path", "variables": [], # optional "request": {} # optional "refs": { "debugtalk": { "variables": {}, "functions": {} }, "env": {}, "def-api": {}, "def-testcase": {} } }, "teststeps": [ # teststep data structure { 'name': 'test step desc1', 'variables': [], # optional 'extract': [], # optional 'validate': [], 'request': {}, 'function_meta': {} }, teststep2 # another teststep dict ] }, testcase_dict_2 # another testcase dict ] """ if isinstance(path, (list, set)): testcases_list = [] for file_path in set(path): testcases = load_tests(file_path, dot_env_path) if not testcases: continue testcases_list.extend(testcases) return testcases_list if not os.path.exists(path): err_msg = "path not exist: {}".format(path) logger.log_error(err_msg) raise exceptions.FileNotFound(err_msg) if not os.path.isabs(path): path = os.path.join(os.getcwd(), path) if os.path.isdir(path): files_list = load_folder_files(path) testcases_list = load_tests(files_list, dot_env_path) elif os.path.isfile(path): try: raw_testcase = load_file(path) project_mapping = load_project_tests(path, dot_env_path) testcase = _load_testcase(raw_testcase, project_mapping) testcase["config"]["path"] = path testcase["config"]["refs"] = project_mapping testcases_list = [testcase] except exceptions.FileFormatError: testcases_list = [] return testcases_list
def _run_test_once(self, test_dict, wait_validators=None): # clear meta data first to ensure independence for each test self.__clear_test_data() # check skip self._handle_skip_feature(test_dict) # prepare test_dict = utils.lower_test_dict_keys(test_dict) test_variables = test_dict.get("variables", {}) self.session_context.init_test_variables(test_variables) # teststep name test_name = test_dict.get("name", "") # parse test request raw_request = test_dict.get('request', {}) parsed_test_request = self.session_context.eval_content(raw_request) self.session_context.update_test_variables("request", parsed_test_request) # setup hooks setup_hooks = test_dict.get("setup_hooks", []) if setup_hooks: self.do_hook_actions(setup_hooks, "setup") try: url = parsed_test_request.pop('url') method = parsed_test_request.pop('method') parsed_test_request.setdefault("verify", self.verify) group_name = parsed_test_request.pop("group", None) except KeyError: raise exceptions.ParamsError("URL or METHOD missed!") # TODO: move method validation to json schema valid_methods = [ "GET", "HEAD", "POST", "PUT", "PATCH", "DELETE", "OPTIONS" ] if method.upper() not in valid_methods: err_msg = u"Invalid HTTP method! => {}\n".format(method) err_msg += "Available HTTP methods: {}".format( "/".join(valid_methods)) logger.log_error(err_msg) raise exceptions.ParamsError(err_msg) logger.log_info("{method} {url}".format(method=method, url=url)) logger.log_debug( "request kwargs(raw): {kwargs}".format(kwargs=parsed_test_request)) # request resp = self.http_client_session.request(method, url, name=(group_name or test_name), **parsed_test_request) resp_obj = response.ResponseObject(resp) # teardown hooks teardown_hooks = test_dict.get("teardown_hooks", []) if teardown_hooks: self.session_context.update_test_variables("response", resp_obj) self.do_hook_actions(teardown_hooks, "teardown") # extract extractors = test_dict.get("extract", {}) extracted_variables_mapping = resp_obj.extract_response(extractors) self.session_context.update_session_variables( extracted_variables_mapping) # validate if wait_validators: wait_validate_pass, wait_failures = self.session_context.validate( wait_validators, resp_obj) if not wait_validate_pass: # err_msg = "{} DETAILED REQUEST & RESPONSE {}\n".format("*" * 32, "*" * 32) # # # log request # err_msg += "====== request details ======\n" # err_msg += "url: {}\n".format(url) # err_msg += "method: {}\n".format(method) # err_msg += "headers: {}\n".format(parsed_test_request.pop("headers", {})) # for k, v in parsed_test_request.items(): # v = utils.omit_long_data(v) # err_msg += "{}: {}\n".format(k, repr(v)) # err_msg += "\n" # log response # err_msg += "====== response details ======\n" # err_msg += "status_code: {}\n".format(resp_obj.status_code) # err_msg += "headers: {}\n".format(resp_obj.headers) # err_msg += "body: {}\n".format(repr(resp_obj.text)) # logger.log_error("During the wait: \n" + err_msg) logger.log_error( "====== During the wait: the expect conditions can not Meet the requirements! ======" ) failures_string = "\n".join( [failure for failure in wait_failures]) self.validation_results = self.session_context.validation_results raise exceptions.ValidationFailure(failures_string) validators = test_dict.get("validate", []) validate_pass, failures = self.session_context.validate( validators, resp_obj) self.validation_results = self.session_context.validation_results # try: # validate_pass, failures = self.session_context.validate(validators, resp_obj) # except (exceptions.ParamsError, exceptions.ValidationFailure, exceptions.ExtractFailure): # err_msg = "{} DETAILED REQUEST & RESPONSE {}\n".format("*" * 32, "*" * 32) # # # log request # err_msg += "====== request details ======\n" # err_msg += "url: {}\n".format(url) # err_msg += "method: {}\n".format(method) # err_msg += "headers: {}\n".format(parsed_test_request.pop("headers", {})) # for k, v in parsed_test_request.items(): # v = utils.omit_long_data(v) # err_msg += "{}: {}\n".format(k, repr(v)) # # err_msg += "\n" # # # log response # err_msg += "====== response details ======\n" # err_msg += "status_code: {}\n".format(resp_obj.status_code) # err_msg += "headers: {}\n".format(resp_obj.headers) # err_msg += "body: {}\n".format(repr(resp_obj.text)) # logger.log_error(err_msg) # raise # finally: # self.validation_results = self.session_context.validation_results return validate_pass, failures
def _extract_field_with_delimiter(self, field): """ response content could be json or html text. @param (str) field should be string joined by delimiter. e.g. "status_code" "headers" "cookies" "content" "headers.content-type" "content.person.name.first_name" """ try: # string.split(sep=None, maxsplit=-1) -> list of strings # e.g. "content.person.name" => ["content", "person.name"] try: top_query, sub_query = field.split('.', 1) except ValueError: top_query = field sub_query = None if top_query == "cookies": cookies = self.cookies try: return cookies[sub_query] except KeyError: err_msg = u"Failed to extract attribute from cookies!\n" err_msg += u"cookies: {}\n".format(cookies) err_msg += u"attribute: {}".format(sub_query) logger.log_error(err_msg) raise exception.ParamsError(err_msg) try: top_query_content = getattr(self, top_query) except AttributeError: err_msg = u"Failed to extract attribute from response object: resp_obj.{}".format(top_query) logger.log_error(err_msg) raise exception.ParamsError(err_msg) if sub_query: if not isinstance(top_query_content, (dict, CaseInsensitiveDict, list)): try: # TODO: remove compatibility for content, text if isinstance(top_query_content, bytes): top_query_content = top_query_content.decode("utf-8") if isinstance(top_query_content, PreparedRequest): top_query_content = top_query_content.__dict__ else: top_query_content = json.loads(top_query_content) except json.decoder.JSONDecodeError: err_msg = u"Failed to extract data with delimiter!\n" err_msg += u"response content: {}\n".format(self.content) err_msg += u"regex: {}\n".format(field) logger.log_error(err_msg) raise exception.ParamsError(err_msg) # e.g. key: resp_headers_content_type, sub_query = "content-type" return utils.query_json(top_query_content, sub_query) else: # e.g. key: resp_status_code, resp_content return top_query_content except AttributeError: err_msg = u"Failed to extract value from response!\n" err_msg += u"response content: {}\n".format(self.content) err_msg += u"extract field: {}\n".format(field) logger.log_error(err_msg) raise exception.ParamsError(err_msg)
def load_tests(path, dot_env_path=None): """ load testcases from file path, extend and merge with api/testcase definitions. Args: path (str): testcase/testsuite file/foler path. path could be in 2 types: - absolute/relative file path - absolute/relative folder path dot_env_path (str): specified .env file path Returns: dict: tests mapping, include project_mapping and testcases. each testcase is corresponding to a file. { "project_mapping": { "PWD": "XXXXX", "functions": {}, "env": {} }, "testcases": [ { # testcase data structure "config": { "name": "desc1", "path": "testcase1_path", "variables": [], # optional }, "teststeps": [ # test data structure { 'name': 'test desc1', 'variables': [], # optional 'extract': [], # optional 'validate': [], 'request': {} }, test_dict_2 # another test dict ] }, testcase_2_dict # another testcase dict ], "testsuites": [ { # testsuite data structure "config": {}, "testcases": { "testcase1": {}, "testcase2": {}, } }, testsuite_2_dict ] } """ if not os.path.exists(path): err_msg = "path not exist: {}".format(path) logger.log_error(err_msg) raise exceptions.FileNotFound(err_msg) if not os.path.isabs(path): path = os.path.join(os.getcwd(), path) load_project_tests(path, dot_env_path) tests_mapping = {"project_mapping": project_mapping} def __load_file_content(path): loaded_content = None try: loaded_content = load_test_file(path) except exceptions.FileFormatError: logger.log_warning("Invalid test file format: {}".format(path)) if not loaded_content: pass elif loaded_content["type"] == "testsuite": tests_mapping.setdefault("testsuites", []).append(loaded_content) elif loaded_content["type"] == "testcase": tests_mapping.setdefault("testcases", []).append(loaded_content) elif loaded_content["type"] == "api": tests_mapping.setdefault("apis", []).append(loaded_content) if os.path.isdir(path): files_list = load_folder_files(path) for path in files_list: __load_file_content(path) elif os.path.isfile(path): __load_file_content(path) return tests_mapping
def request(self, host, port, service, method, params="", name=None, **kwargs): """ Send a request to the dubbo server and return its result. TODO Args: host(str): ip or url port(int): dubbo service port service(str): the service to request method(str): the method to request params(dict): params the method need TODO: doc :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`. :param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`. :param files: (optional) Dictionary of ``'filename': file-like-objects`` for multipart encoding upload. :param auth: (optional) Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or \ a (`connect timeout, read timeout <user/advanced.html#timeouts>`_) tuple. :type timeout: float or tuple :param allow_redirects: (optional) Set to True by default. :type allow_redirects: bool :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param stream: (optional) whether to immediately download the response content. Defaults to ``False``. :param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided. :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. """ self.init_meta_data() # record test name self.meta_data["name"] = name # record original request info self.meta_data["data"][0]["request"]["service"] = service self.meta_data["data"][0]["request"]["method"] = method # TODO: 可选参数的设计 # kwargs.setdefault("timeout", 120) # self.meta_data["data"][0]["request"].update(kwargs) start_timestamp = time.time() response = self._send_request_safe_mode(host, port, service, method, params, **kwargs) response_time_ms = round((time.time() - start_timestamp) * 1000, 2) if isinstance(response, dict): response_length = len(str(response.get("body", ""))) else: response_length = -1 # TODO: 可选参数设计 # # get the length of the content, but if the argument stream is set to True, we take # # the size from the content-length header, in order to not trigger fetching of the body # if kwargs.get("stream", False): # content_size = int(dict(response.headers).get("content-length") or 0) # else: # content_size = len(response.content or "") # record the consumed time self.meta_data["stat"] = { "response_time_ms": response_time_ms, "elapsed_ms": response_time_ms, "content_size": response_length } # record request and response histories, include 30X redirection self.response_list.append(response) self.meta_data["data"] = [ self.get_req_resp_record(resp_obj) for resp_obj in self.response_list ] # TODO try: if isinstance(response, DubboApiResponse): response.raise_for_status() except timeout as e: logger.log_error(u"{exception}".format(exception=str(e))) else: logger.log_info( """, response_time(ms): {} ms, response_length: {} bytes\n""". format(response_time_ms, response_length)) return response
def _extract_field_with_delimiter(self, field): """ response content could be json or html text. @param (str) field should be string joined by delimiter. e.g. "status_code" "headers" "cookies" "content" "headers.content-type" "content.person.name.first_name" """ try: # string.split(sep=None, maxsplit=-1) -> list of strings # e.g. "content.person.name" => ["content", "person.name"] try: top_query, sub_query = field.split('.', 1) except ValueError: top_query = field sub_query = None if top_query == "cookies": cookies = self.cookies try: return cookies[sub_query] except KeyError: err_msg = u"Failed to extract attribute from cookies!\n" err_msg += u"cookies: {}\n".format(cookies) err_msg += u"attribute: {}".format(sub_query) logger.log_error(err_msg) raise exception.ParamsError(err_msg) try: top_query_content = getattr(self, top_query) except AttributeError: err_msg = u"Failed to extract attribute from response object: resp_obj.{}".format( top_query) logger.log_error(err_msg) raise exception.ParamsError(err_msg) if sub_query: if not isinstance(top_query_content, (dict, CaseInsensitiveDict, list)): try: # TODO: remove compatibility for content, text if isinstance(top_query_content, bytes): top_query_content = top_query_content.decode( "utf-8") if isinstance(top_query_content, PreparedRequest): top_query_content = top_query_content.__dict__ else: top_query_content = json.loads(top_query_content) except json.decoder.JSONDecodeError: err_msg = u"Failed to extract data with delimiter!\n" err_msg += u"response content: {}\n".format( self.content) err_msg += u"regex: {}\n".format(field) logger.log_error(err_msg) raise exception.ParamsError(err_msg) # e.g. key: resp_headers_content_type, sub_query = "content-type" return utils.query_json(top_query_content, sub_query) else: # e.g. key: resp_status_code, resp_content return top_query_content except AttributeError: err_msg = u"Failed to extract value from response!\n" err_msg += u"response content: {}\n".format(self.content) err_msg += u"extract field: {}\n".format(field) logger.log_error(err_msg) raise exception.ParamsError(err_msg)
def validate(self, validators): """ make validation with comparators """ self.validation_results = {} if not validators: return logger.log_debug("start to validate.") validate_pass = True failures = [] for validator in validators: if isinstance(validator, dict) and validator.get("type") == "python_script": validator_dict, ex = self.validate_script(validator["script"]) if ex: validate_pass = False failures.append(ex) self.validation_results["validate_script"] = validator_dict continue if "validate_extractor" not in self.validation_results: self.validation_results["validate_extractor"] = [] # validator should be LazyFunction object if not isinstance(validator, parser.LazyFunction): raise exceptions.ValidationFailure( "validator should be parsed first: {}".format(validators)) # evaluate validator args with context variable mapping. validator_args = validator.get_args() check_item, expect_item = validator_args check_value = self.__eval_validator_check(check_item) expect_value = self.__eval_validator_expect(expect_item) validator.update_args([check_value, expect_value]) comparator = validator.func_name validator_dict = { "comparator": comparator, "check": check_item, "check_value": check_value, "expect": expect_item, "expect_value": expect_value } validate_msg = "\nvalidate: {} {} {}({})".format( check_item, comparator, expect_value, type(expect_value).__name__) try: validator.to_value(self.session_context.test_variables_mapping) validator_dict["check_result"] = "pass" validate_msg += "\t==> pass" logger.log_debug(validate_msg) except (AssertionError, TypeError): validate_pass = False validator_dict["check_result"] = "fail" validate_msg += "\t==> fail" validate_msg += "\n{}({}) {} {}({})".format( check_value, type(check_value).__name__, comparator, expect_value, type(expect_value).__name__) logger.log_error(validate_msg) failures.append(validate_msg) self.validation_results["validate_extractor"].append( validator_dict) # restore validator args, in case of running multiple times validator.update_args(validator_args) if not validate_pass: failures_string = "\n".join([failure for failure in failures]) raise exceptions.ValidationFailure(failures_string)
def main_hrun(): """ API test: parse command line options and run commands. """ parser = argparse.ArgumentParser(description=__description__) parser.add_argument('-V', '--version', dest='version', action='store_true', help="show version") parser.add_argument('testset_paths', nargs='*', help="testset file path") parser.add_argument('--no-html-report', action='store_true', default=False, help="do not generate html report.") parser.add_argument( '--html-report-name', help= "specify html report name, only effective when generating html report." ) parser.add_argument('--html-report-template', help="specify html report template path.") parser.add_argument('--log-level', default='INFO', help="Specify logging level, default is INFO.") parser.add_argument('--log-file', help="Write logs to specified file path.") parser.add_argument( '--dot-env-path', help= "Specify .env file path, which is useful for keeping sensitive data.") parser.add_argument( '--failfast', action='store_true', default=False, help="Stop the test run on the first error or failure.") parser.add_argument('--startproject', help="Specify new project name.") parser.add_argument('--validate', nargs='*', help="Validate JSON testset format.") parser.add_argument('--prettify', nargs='*', help="Prettify JSON testset format.") args = parser.parse_args() logger.setup_logger(args.log_level, args.log_file) if is_py2: logger.log_warning(get_python2_retire_msg()) if args.version: logger.color_print("{}".format(__version__), "GREEN") exit(0) if args.validate: validate_json_file(args.validate) exit(0) if args.prettify: prettify_json_file(args.prettify) exit(0) project_name = args.startproject if project_name: project_path = os.path.join(os.getcwd(), project_name) create_scaffold(project_path) exit(0) try: runner = HttpRunner(failfast=args.failfast, dot_env_path=args.dot_env_path) runner.run(args.testset_paths) except Exception: logger.log_error("!!!!!!!!!! exception stage: {} !!!!!!!!!!".format( runner.exception_stage)) raise if not args.no_html_report: runner.gen_html_report(html_report_name=args.html_report_name, html_report_template=args.html_report_template) summary = runner.summary return 0 if summary["success"] else 1
def run_test(self, testcase_dict): """ run single testcase. @param (dict) testcase_dict { "name": "testcase description", "skip": "skip this test unconditionally", "times": 3, "requires": [], # optional, override "function_binds": {}, # optional, override "variables": [], # optional, override "request": { "url": "http://127.0.0.1:5000/api/users/1000", "method": "POST", "headers": { "Content-Type": "application/json", "authorization": "$authorization", "random": "$random" }, "body": '{"name": "user", "password": "******"}' }, "extract": [], # optional "validate": [], # optional "setup_hooks": [], # optional "teardown_hooks": [] # optional } @return True or raise exception during test """ # check skip self._handle_skip_feature(testcase_dict) # prepare parsed_request = self.init_config(testcase_dict, level="testcase") self.context.bind_testcase_variable("request", parsed_request) # setup hooks setup_hooks = testcase_dict.get("setup_hooks", []) setup_hooks.insert(0, "${setup_hook_prepare_kwargs($request)}") self.do_hook_actions(setup_hooks) try: url = parsed_request.pop('url') method = parsed_request.pop('method') group_name = parsed_request.pop("group", None) except KeyError: raise exception.ParamsError("URL or METHOD missed!") logger.log_info("{method} {url}".format(method=method, url=url)) logger.log_debug("request kwargs(raw): {kwargs}".format(kwargs=parsed_request)) # request resp = self.http_client_session.request( method, url, name=group_name, **parsed_request ) resp_obj = response.ResponseObject(resp) # teardown hooks teardown_hooks = testcase_dict.get("teardown_hooks", []) if teardown_hooks: self.context.bind_testcase_variable("response", resp_obj) self.do_hook_actions(teardown_hooks) # extract extractors = testcase_dict.get("extract", []) or testcase_dict.get("extractors", []) extracted_variables_mapping = resp_obj.extract_response(extractors) self.context.bind_extracted_variables(extracted_variables_mapping) # validate validators = testcase_dict.get("validate", []) or testcase_dict.get("validators", []) try: self.context.validate(validators, resp_obj) except (exception.ParamsError, exception.ResponseError, \ exception.ValidationError, exception.ParseResponseError): # log request err_req_msg = "request: \n" err_req_msg += "headers: {}\n".format(parsed_request.pop("headers", {})) for k, v in parsed_request.items(): err_req_msg += "{}: {}\n".format(k, v) logger.log_error(err_req_msg) # log response err_resp_msg = "response: \n" err_resp_msg += "status_code: {}\n".format(resp_obj.status_code) err_resp_msg += "headers: {}\n".format(resp_obj.headers) err_resp_msg += "content: {}\n".format(resp_obj.content) logger.log_error(err_resp_msg) raise
def main_locust(): """ Performance test with locust: parse command line options and run commands. """ logger.setup_logger("INFO") try: from httprunner import locusts except ImportError: msg = "Locust is not installed, install first and try again.\n" msg += "install command: pip install locustio" logger.log_warning(msg) exit(1) sys.argv[0] = 'locust' if len(sys.argv) == 1: sys.argv.extend(["-h"]) if sys.argv[1] in ["-h", "--help", "-V", "--version"]: locusts.main() sys.exit(0) try: testcase_index = sys.argv.index('-f') + 1 assert testcase_index < len(sys.argv) except (ValueError, AssertionError): logger.log_error("Testcase file is not specified, exit.") sys.exit(1) testcase_file_path = sys.argv[testcase_index] sys.argv[testcase_index] = locusts.parse_locustfile(testcase_file_path) if "--processes" in sys.argv: """ locusts -f locustfile.py --processes 4 """ if "--no-web" in sys.argv: logger.log_error( "conflict parameter args: --processes & --no-web. \nexit.") sys.exit(1) processes_index = sys.argv.index('--processes') processes_count_index = processes_index + 1 if processes_count_index >= len(sys.argv): """ do not specify processes count explicitly locusts -f locustfile.py --processes """ processes_count = multiprocessing.cpu_count() logger.log_warning( "processes count not specified, use {} by default.".format( processes_count)) else: try: """ locusts -f locustfile.py --processes 4 """ processes_count = int(sys.argv[processes_count_index]) sys.argv.pop(processes_count_index) except ValueError: """ locusts -f locustfile.py --processes -P 8888 """ processes_count = multiprocessing.cpu_count() logger.log_warning( "processes count not specified, use {} by default.".format( processes_count)) sys.argv.pop(processes_index) locusts.run_locusts_with_processes(sys.argv, processes_count) else: locusts.main()
def main_locust(): """ Performance test with locust: parse command line options and run commands. """ logger.setup_logger("INFO") try: from httprunner import locusts except ImportError: msg = "Locust is not installed, install first and try again.\n" msg += "install command: pip install locustio" logger.log_warning(msg) exit(1) sys.argv[0] = 'locust' if len(sys.argv) == 1: sys.argv.extend(["-h"]) if sys.argv[1] in ["-h", "--help", "-V", "--version"]: locusts.main() sys.exit(0) try: testcase_index = sys.argv.index('-f') + 1 assert testcase_index < len(sys.argv) except (ValueError, AssertionError): logger.log_error("Testcase file is not specified, exit.") sys.exit(1) testcase_file_path = sys.argv[testcase_index] sys.argv[testcase_index] = locusts.parse_locustfile(testcase_file_path) if "--processes" in sys.argv: """ locusts -f locustfile.py --processes 4 """ if "--no-web" in sys.argv: logger.log_error("conflict parameter args: --processes & --no-web. \nexit.") sys.exit(1) processes_index = sys.argv.index('--processes') processes_count_index = processes_index + 1 if processes_count_index >= len(sys.argv): """ do not specify processes count explicitly locusts -f locustfile.py --processes """ processes_count = multiprocessing.cpu_count() logger.log_warning("processes count not specified, use {} by default.".format(processes_count)) else: try: """ locusts -f locustfile.py --processes 4 """ processes_count = int(sys.argv[processes_count_index]) sys.argv.pop(processes_count_index) except ValueError: """ locusts -f locustfile.py --processes -P 8888 """ processes_count = multiprocessing.cpu_count() logger.log_warning("processes count not specified, use {} by default.".format(processes_count)) sys.argv.pop(processes_index) locusts.run_locusts_with_processes(sys.argv, processes_count) else: locusts.main()