def generate_report(self, output): report_file = "{}/parrot_{}.html".format(output, now_timestamp()) make_dir(output) Report( stream=open(report_file, 'w', encoding='utf-8')).generate_report( result=self.report) logger.info("You could check the report: {}".format(report_file))
def __do_replace(_dict, _from, _to): if _from in _dict.keys(): # match key first logger.info("Matched. Replace value of {} from {} to {}.".format( _from, _dict[_from], _to)) _dict[_from] = _to # do replace else: # otherwise, match value for _k, _v in _dict.items(): if _from == format(_v): logger.info( "Matched. Replace value of {} from {} to {}.".format( _k, _from, _to)) _dict[_k] = _to # do replace return _dict
def __match_rule(self, _yaml, _dict, rules): _k2v = re.compile( r"(.+)=>(.+)") # key to value, e.g. host=www.example.com _pre = re.compile( r"(.+)::(.+)" ) # only to specified api, e.g. /path/to/api::variable1=1234 logger.info("Do replace actions in {}.".format(_yaml)) for rule in rules: _match = re.findall(_pre, rule) if _match: # only to specified api, fuzzy match with file name if format(_match[0][0]).strip() not in _yaml: continue _match = re.findall(_k2v, format(_match[0][1]).strip()) else: _match = re.findall(_k2v, rule) if _match: logger.debug("To math rule: {}".format(rule)) _from = format(_match[0][0]).strip() _to = format(_match[0][1]).strip() # config.variables => request => request.headers => request.cookies _dict['config']['variables'] = self.__do_replace( _dict=_dict['config']['variables'], _from=_from, _to=_to) if 'request' in _dict.keys(): _dict['request'] = self.__do_replace( _dict=_dict['request'], _from=_from, _to=_to) _dict['request']['params'] = self.__do_replace( _dict=_dict['request']['params'], _from=_from, _to=_to) _dict['request']['data'] = self.__do_replace( _dict=_dict['request']['data'], _from=_from, _to=_to) _dict['request']['headers'] = self.__do_replace( _dict=_dict['request']['headers'], _from=_from, _to=_to) _dict['request']['cookies'] = self.__do_replace( _dict=_dict['request']['cookies'], _from=_from, _to=_to) if 'validations' in _dict.keys(): for _idx, _validate in enumerate(_dict['validations']): for _com, _exp in _validate.items(): _dict['validations'][_idx][ _com] = self.__do_replace(_dict=_exp, _from=_from, _to=_to)
def load_test_case(self, suite_or_case, environment=None): """ :param suite_or_case: file or directory of test suites/cases/steps :param environment: environment defined in test data, None - use defined in suites/cases/steps :return: items of test suites/cases/steps """ logger.info( "Start to load test suite or case: {}".format(suite_or_case)) self.environment = environment items = [] if os.path.isdir(suite_or_case): files = get_dir_files(suite_or_case) else: files = [ suite_or_case, ] for _file in files: logger.info("Load case from file: {}".format(_file)) if not (_file.endswith('yml') or _file.endswith('yaml')): logger.warning("Not a yaml file, ignore: {}".format(_file)) continue try: _dict = yaml.full_load(stream=self.__read_file(_file)) except ScannerError as e: logger.warning("Invalid yaml file: {}".format(e)) continue logger.debug(" - yaml dict: {}".format( json.dumps(_dict, ensure_ascii=False))) _tmp_suite = copy.deepcopy(self.suite_tpl) _tmp_case = copy.deepcopy(self.case_tpl) _tmp_step = copy.deepcopy(self.step_tpl) if 'test_cases' in _dict: # it's a test suite _tmp_suite.update(_dict) self.__parse_test_suite(the_dict=_tmp_suite, base_path=get_file_path(_file)) elif 'test_steps' in _dict: # it's a test case _tmp_case.update(_dict) self.__parse_test_case(the_dict=_tmp_case, base_path=get_file_path(_file)) _tmp_suite['test_cases'].append(_tmp_case) else: # it's a test step _tmp_step.update(_dict) self.__parse_test_step(the_dict=_tmp_step, base_path=get_file_path(_file)) _tmp_case['test_steps'].append(_tmp_step) _tmp_suite['test_cases'].append(_tmp_case) logger.debug(" - one suite: {}".format( json.dumps(_tmp_suite, ensure_ascii=False))) items.append(_tmp_suite) logger.info("Done.") logger.debug("The test suites are: {}".format( json.dumps(items, ensure_ascii=False))) return items
def __har_request(self, entry, step_dict, include, exclude, auto_extract=False): if not ('request' in entry.keys() and entry['request']): logger.warning(" * There is no request in this entry: {}".format( json.dumps(entry, ensure_ascii=False))) return False _req = entry['request'] # get method step_dict['request']['method'] = _req.get('method', 'GET') # get url: protocol, host, url path _url = _req.get('url', "") # logger.info(" Get a {} request: {}".format(step_dict['request']['method'], _url)) try: (_whole_url, step_dict['request']['protocol'], step_dict['request']['host'], step_dict['request']['url'], _) = re.findall(r"((http\w*)://([\w.:]+)([^?]+))\??(.*)", _url)[0] step_dict['config']['name'] = step_dict['request']['url'] logger.debug(" - protocol: {} host: {} url: {}".format( step_dict['request']['protocol'], step_dict['request']['host'], step_dict['request']['url'])) logger.info(" Get a {} request: {}".format( step_dict['request']['method'], step_dict['request']['url'])) except IndexError: logger.warning(" * Invalid url: {}".format(_url)) return False # filter with include and exclude options logger.debug(" - include: {} exclude: {}".format(include, exclude)) if not self.__if_include(_whole_url, include) or self.__if_exclude( _whole_url, exclude): logger.info(" According to include/exclude options, ignore it") return False # get parameters # it may have both queryString and postData in an unusual post request step_dict['request']['params'] = {} step_dict['request']['data'] = {} _param = _req.get('queryString', []) _data = _req.get('postData', []) if _data: if 'params' in _req.get('postData'): _data = _req.get('postData').get('params') else: _data = _req.get('postData').get('text') # if 'mimeType' in _req.get('postData') and _req.get('postData').get('mimeType') == 'application/json': # _tmp = json.loads(_data) # _data = [] # for _tk, _tv in _tmp.items(): # _data.append({'name': _tk, 'value': _tv}) logger.debug(" - params: {}".format(_param)) logger.debug(" - data: {}".format(_data)) # extract all parameter values into variables, and keep {value} in parameters if isinstance(_param, (list, tuple, set)): for _item in _param: self.__har_extract(step_dict, _item['name'], _item['value'], 'params', auto_extract) else: # step_dict['request']['params'] = _param self.__har_extract(step_dict, '', _param, 'params', auto_extract) if isinstance(_data, (list, tuple, set)): for _item in _data: self.__har_extract(step_dict, _item['name'], _item['value'], 'data', auto_extract) else: # step_dict['request']['data'] = _data self.__har_extract(step_dict, '', _data, 'data', auto_extract) logger.debug(" - self.variables: {}".format( json.dumps(self.variables, ensure_ascii=False))) # get headers step_dict['request']['headers'] = {} self.__har_headers(_req.get('headers'), step_dict['request']['headers'], RECORD_HEADERS, auto_extract) logger.debug(" - headers: {}".format( json.dumps(step_dict['request']['headers'], ensure_ascii=False))) # get cookies step_dict['request']['cookies'] = {} self.__har_cookies(_req.get('cookies'), step_dict['request']['cookies'], auto_extract) logger.debug(" - cookies: {}".format( json.dumps(step_dict['request']['cookies'], ensure_ascii=False))) return True
def __generate_case(self, suite, target="ParrotProject"): logger.info("Start to generate test yamls") # generate test data _d_path = "{}/environments".format(target) _d_yaml = "{}/{}_env.yml".format(_d_path, suite['config']['name']) r_d_yaml = "../environments/{}_env.yml".format(suite['config']['name']) make_dir(_d_path) with open(file=_d_yaml, mode='w', encoding='utf-8') as f: yaml.dump(data=self.env_tpl, stream=f, encoding='utf-8', allow_unicode=True) logger.debug(" - environments: {}".format(_d_yaml)) _e_path = "{}/test_suites".format(target) _e_yaml = "{}/{}.yml".format(_e_path, suite['config']['name']) make_dir(_e_path) suite['config']['import'] = r_d_yaml _t_sid = 0 # count total steps in a suite for _cid, _case in enumerate(suite['test_cases']): # generate test steps for _sid, _step in enumerate(_case['test_steps']): _t_sid += 1 # remove 'response' for a clear view # _step['response'] = {'extract': {}} _s_resp = copy.deepcopy(_step['response']) for _k, _v in _s_resp.items(): if _k == 'extract': for _ex_k, _ex_v in _s_resp['extract'].items(): if _ex_k in self.variables.keys( ) and self.variables[_ex_k]['flag']: _step['response']['extract'][_ex_v] = _ex_v del _step['response']['extract'][_ex_k] elif _k != 'time.spent': del _step['response'][_k] _s_id = "{}{}".format('0' * (4 - len(str(_t_sid))), _t_sid) # step id _s_path = "{}/test_steps/{}".format( target, '/'.join(_step['config']['name'].split('/')[1:-1])) _s_yaml = "{}/{}_{}.yml".format( _s_path, _s_id, _step['config']['name'].split('/')[-1]) r_s_yaml = "../test_steps/{}/{}_{}.yml".format( '/'.join(_step['config']['name'].split('/')[1:-1]), _s_id, _step['config']['name'].split('/')[-1]) if len(_step['config']['name'].split('/')) > 1: _step['config'][ 'import'] = "{}environments/{}_env.yml".format( '../' * (len(_step['config']['name'].split('/')) - 1), suite['config']['name']) else: _step['config'][ 'import'] = "../environments/{}_env.yml".format( suite['config']['name']) make_dir(_s_path) with open(file=_s_yaml, mode='w', encoding='utf-8') as f: # use allow_unicode=True to solve Chinese display problem yaml.dump(data=_step, stream=f, encoding='utf-8', allow_unicode=True) _case['test_steps'][_sid] = r_s_yaml logger.debug(" - test step: {}".format(_s_yaml)) # generate test cases _c_path = "{}/test_cases".format(target) _c_yaml = "{}/{}.yml".format(_c_path, _case['config']['name']) r_c_yaml = "../test_cases/{}.yml".format(_case['config']['name']) _case['config']['import'] = r_d_yaml make_dir(_c_path) with open(file=_c_yaml, mode='w', encoding='utf-8') as f: yaml.dump(data=_case, stream=f, encoding='utf-8', allow_unicode=True) logger.debug(" - test case: {}".format(_c_yaml)) suite['test_cases'][_cid] = r_c_yaml # generate test suite with open(file=_e_yaml, mode='w', encoding='utf-8') as f: yaml.dump(data=suite, stream=f, encoding='utf-8', allow_unicode=True) logger.debug(" - test suite: {}".format(_e_yaml)) logger.info("Done. You could get them in {}".format(target))
def har_to_case(self, source, target="ParrotProject", include=None, exclude=None, validate_include=None, validate_exclude=None, auto_extract=False, suite_name=None): """parse source har file and generate test cases :param source: source file :param target: target directory for case output :param include: list, not matched url would be ignored in recording :param exclude: list, matched url would be ignored in recording :param validate_include: list, not matched response would be ignored in validating :param validate_exclude: list, matched response would be ignored in validating :param auto_extract: bool, for automatic identification of interface dependencies :param suite_name: specified suite, new a suite as default :return suite dict """ if not (source and os.path.exists(source)): logger.error("Source file does not exist: {}".format(source)) return False if not source.lower().endswith('.har'): logger.error("The source is not a har file: {}".format(source)) return False logger.info("Start to parse source file: {}".format(source)) content = self.__read_file(source) try: har_dict = json.loads(content)['log']['entries'] except TypeError: logger.error("HAR file content error: {}".format(source)) except KeyError: logger.error("HAR file content error: {}".format(source)) return False case_dict = copy.deepcopy(self.case_tpl) case_dict['config']['name'] = get_file_name(file=source) for entry_dict in har_dict: step_dict = copy.deepcopy(self.step_tpl) self.__har_times(entry=entry_dict, step_dict=step_dict) if not self.__har_request(entry=entry_dict, step_dict=step_dict, include=include, exclude=exclude, auto_extract=auto_extract): continue if not self.__har_response(entry=entry_dict, step_dict=step_dict, include=validate_include, exclude=validate_exclude, auto_extract=auto_extract): continue logger.debug("test_step: {}".format( json.dumps(step_dict, ensure_ascii=False))) # add step into case case_dict['test_steps'].append(step_dict) if suite_name: return case_dict else: suite_dict = copy.deepcopy(self.suite_tpl) # add case into suite suite_dict['test_cases'].append(case_dict) suite_dict['config']['name'] = get_file_name(file=source) logger.info("Parse finished.") self.__generate_case(suite_dict, target) return suite_dict
def source_to_case(self, source, target="ParrotProject", include=None, exclude=None, validate_include=None, validate_exclude=None, auto_extract=False): """ :param source: source file or direcotry :param target: target directory for case output :param include: list, not matched url would be ignored in recording :param exclude: list, matched url would be ignored in recording :param validate_include: list, not matched response would be ignored in validating :param validate_exclude: list, matched response would be ignored in validating :param auto_extract: bool, for automatic identification of interface dependencies :return suite dict """ source = format(source).strip() if not (source and os.path.exists(source)): logger.error( "Source file or directory does not exist: {}".format(source)) sys.exit(-1) if source.endswith("/") or source.endswith("\\"): suite_name = get_file_name(get_file_path(source)) else: suite_name = get_file_name(source) if os.path.isdir(source): files = get_dir_files(source) else: files = [ source, ] suite_dict = copy.deepcopy(self.suite_tpl) suite_dict['config']['name'] = suite_name logger.info( "Start to parse cases from source files: {}".format(source)) for _file in files: if _file.lower().endswith('.har'): one_case = self.har_to_case(_file, target, include, exclude, validate_include, validate_exclude, auto_extract, suite_name) # elif _file.lower().endswith('.trace'): # self.charles_trace_to_case() # elif _file.lower().endswith('.txt'): # self.fiddler_txt_to_case() else: logger.warning( "Unsupported file extension: {}, ignore".format(_file)) continue # add case into suite suite_dict['test_cases'].append(one_case) logger.info("Parse finished.") self.__generate_case(suite_dict, target) return suite_dict
def case_replace(self, suite_or_case, rules, target="ParrotProjectNew"): """ :param suite_or_case: file or directory of test suites/cases/steps :param rules: replace rule list, key=>value or value1=>value2 :param target: target directory for case output :return: suite dict """ logger.info( "Start to load test suite or case: {}".format(suite_or_case)) files = [] if isinstance(suite_or_case, (list, tuple, set)): items = suite_or_case else: items = [ format(suite_or_case), ] for item in items: if not os.path.exists(item): logger.warning("File {} does not exist, ignore.".format(item)) continue if os.path.isdir(item): files.extend(get_dir_files(item)) else: files.append(item) total = {} for _file in files: logger.info("Load case from file: {}".format(_file)) if not (_file.endswith('yml') or _file.endswith('yaml')): logger.warning("Not a yaml file, ignore: {}".format(_file)) continue try: _dict = yaml.full_load(stream=self.__read_file(_file)) except ScannerError as e: logger.warning("Invalid yaml file: {}".format(e)) continue logger.debug(" - yaml dict: {}".format( json.dumps(_dict, ensure_ascii=False))) if 'test_cases' in _dict: # it's a test suite _tmp_suite = copy.deepcopy(self.suite_tpl) _tmp_suite.update(_dict) self.__match_rule(_yaml=_file, _dict=_tmp_suite, rules=rules) _path = re.findall(r"(.+)test_suites(.+)", get_file_path(_file)) if _path: total["{}/test_suites/{}/{}".format( target, _path[0][1], get_file_name(_file, ext=1))] = _tmp_suite else: total["{}/test_suites/{}".format( target, get_file_name(_file, ext=1))] = _tmp_suite _cases = [] for _case in _tmp_suite['test_cases']: _cases.append("{}/{}".format(get_file_path(_file), _case)) if _tmp_suite['config']['import']: _cases.append("{}/{}".format( get_file_path(_file), _tmp_suite['config']['import'])) if _cases: self.case_replace(suite_or_case=_cases, target=target, rules=rules) elif 'test_steps' in _dict: # it's a test case _tmp_case = copy.deepcopy(self.case_tpl) _tmp_case.update(_dict) self.__match_rule(_yaml=_file, _dict=_tmp_case, rules=rules) _path = re.findall(r"(.+)test_cases(.+)", get_file_path(_file)) if _path: total["{}/test_cases/{}/{}".format( target, _path[0][1], get_file_name(_file, ext=1))] = _tmp_case else: total["{}/test_cases/{}".format( target, get_file_name(_file, ext=1))] = _tmp_case _steps = [] for _step in _tmp_case['test_steps']: _steps.append("{}/{}".format(get_file_path(_file), _step)) if _tmp_case['config']['import']: _steps.append("{}/{}".format( get_file_path(_file), _tmp_case['config']['import'])) if _steps: self.case_replace(suite_or_case=_steps, target=target, rules=rules) elif 'request' in _dict: # it's a test step _tmp_step = copy.deepcopy(self.step_tpl) _tmp_step.update(_dict) self.__match_rule(_yaml=_file, _dict=_tmp_step, rules=rules) _path = re.findall(r"(.+)test_steps(.+)", get_file_path(_file)) if _path: total["{}/test_steps/{}/{}".format( target, _path[0][1], get_file_name(_file, ext=1))] = _tmp_step else: total["{}/test_steps/{}".format( target, get_file_name(_file, ext=1))] = _tmp_step if _tmp_step['config']['import']: self.case_replace(suite_or_case="{}/{}".format( get_file_path(_file), _tmp_step['config']['import']), target=target, rules=None) else: # it's environment file _path = re.findall(r"(.+)environments(.+)", get_file_path(_file)) if _path: total["{}/environments/{}/{}".format( target, _path[0][1], get_file_name(_file, ext=1))] = _dict else: total["{}/environments/{}".format( target, get_file_name(_file, ext=1))] = _dict for _k, _v in total.items(): make_dir(get_file_path(_k)) with open(file=format(_k), mode='w', encoding='utf-8') as f: yaml.dump(data=_v, stream=f, encoding='utf-8', allow_unicode=True) logger.info("Write file after replace: {}".format(_k)) logger.info("Done. You could get them in {}".format(target))
def run_cases(self, suite_or_case, environment=None, interval='ms', reset_after_case=False, fail_stop=False, retry_times=0, retry_interval=100, output='.'): """ :param suite_or_case: file or directory of test suites / cases / steps :param environment: environment flag defined in test data, 'None' - only load 'global' data :param interval: interval time(ms) between each step, use the recorded interval as default :param reset_after_case: reset runtime environment after each case or not, 'no' as default :param fail_stop: stop or not when a test step failed on validation, False as default :param retry_times: max retry times when a test step failed on validation, 0 as default :param retry_interval: retry interval(ms) when a test step failed on validation, 100 as default :param output: output path for report, '.' as default :return: """ try: interval = float(interval) except ValueError: interval = 'ms' try: retry_times = int(retry_times) except ValueError: retry_times = 0 try: retry_interval = int(retry_interval) except ValueError: retry_interval = 100 # parse specified cases into dict items = self.parser.load_test_case(suite_or_case=suite_or_case, environment=environment) self.report['title'] = suite_or_case self.report['detail'] = items self.report['time']['start'] = now_ms() if not items: logger.error("Parsed {}, but get nothing.".format(suite_or_case)) return -1 for _sid, _suite in enumerate(items): self.report['summary']['suite']['total'] += 1 _suite['_report_'] = { 'id': _sid, 'name': _suite['config']['name'], 'status': True, 'cases': { 'total': 0, 'pass': 0, 'fail': 0 } } logger.info("Run test suite: {}".format(json.dumps(_suite, ensure_ascii=False))) # do hook actions before a suite logger.info(" - Do setup hook actions of the suite: {}".format(_suite['setup_hooks'])) self.do_hook_actions(_suite['setup_hooks']) for _cid, _case in enumerate(_suite['test_cases']): self.report['summary']['case']['total'] += 1 _suite['_report_']['cases']['total'] += 1 _case['_report_'] = { 'id': _cid, 'name': _case['config']['name'], 'status': True, 'steps': { 'total': 0, 'pass': 0, 'fail': 0 } } logger.info("Run test case: {}".format(json.dumps(_case, ensure_ascii=False))) # do hook actions before a case logger.info(" - Do setup hook actions of the case: {}".format(_case['setup_hooks'])) self.do_hook_actions(_case['setup_hooks']) for _tid, _step in enumerate(_case['test_steps']): self.report['summary']['step']['total'] += 1 _case['_report_']['steps']['total'] += 1 _step['_report_'] = { 'id': _tid, 'name': _step['config']['name'], 'status': True } logger.info("Run test step: {}".format(json.dumps(_step, ensure_ascii=False))) # do hook actions before a request logger.info(" - Do setup hook actions of the step: {}".format(_step['setup_hooks'])) self.do_hook_actions(_step['setup_hooks']) # handle variables, priority: suite > case > step self.__set_variables(_step['config']['variables']) self.__set_variables(_case['config']['variables']) self.__set_variables(_suite['config']['variables']) logger.info(" - Config variables of the step: {}".format(json.dumps(self.variables, ensure_ascii=False))) # handle request interval if not isinstance(interval, (int, float)): if 'time.start' in _step['request']: # use the recorded interval _span = now_timestamp_ms() - int(_step['request']['time.start']) if not self.req_span: self.req_span = _span _sleep = self.req_span - _span if self.req_span > _span else MINOR_INTERVAL_MS # higher than MAX, treat it as request of another batch if _sleep > MAX_INTERVAL_MS: _sleep = MINOR_INTERVAL_MS self.req_span = _span # reset span else: # no recorded interval, use default _sleep = MINOR_INTERVAL_MS else: # use specified interval _sleep = interval if _sleep != MINOR_INTERVAL_MS: logger.info(" - Break time, sleep for {} ms.".format(_sleep)) time.sleep(_sleep/1000.0) try_flag = True while try_flag: # run this request response = self.run_one_request(_step['request']) _step['_report_']['request'] = response['request'] _step['_report_']['response'] = response['response'] _step['_report_']['time'] = response['time'] response['response']['time'] = response['time'] # extract specified variables if 'extract' in _step['response'] and _step['response']['extract']: logger.info(" - Extract variables: {}".format(_step['response']['extract'])) self.__extract_variable(extract=_step['response']['extract'], response=response['response']) logger.debug(" - Variables after extract: {}".format(json.dumps(self.variables, ensure_ascii=False))) # do response validation _validate = self.do_validation(response=response['response'], rules=_step['validations']) _step['_report_']['validation'] = _validate if not _validate['status']: # failed logger.info(" - Test step validation failed") if fail_stop: try_flag = False break elif retry_times: logger.info("Sleep {} ms and Run this test step again..".format(retry_interval)) retry_times -= 1 time.sleep(retry_interval*1.0/1000) else: break else: break if _step['_report_']['validation']['status']: # step pass self.report['summary']['step']['pass'] += 1 _case['_report_']['steps']['pass'] += 1 else: self.report['summary']['step']['fail'] += 1 _case['_report_']['steps']['fail'] += 1 _suite['_report_']['status'] = _case['_report_']['status'] = _step['_report_']['status'] = False if not try_flag: # need to stop _suite['_report_']['cases']['fail'] += 1 self.report['summary']['case']['fail'] += 1 self.report['summary']['case']['pass'] = self.report['summary']['case']['total'] - \ self.report['summary']['case']['fail'] self.report['summary']['suite']['fail'] += 1 self.report['summary']['suite']['pass'] = self.report['summary']['suite']['total'] - \ self.report['summary']['suite']['fail'] self.report['time']['end'] = now_ms() logger.info("Stop according to your --fail-stop argument") return self.generate_report(output=output) # do hook actions after a request logger.info(" - Do teardown hook actions of the step: {}".format(_step['teardown_hooks'])) self.do_hook_actions(_step['teardown_hooks']) # do hook actions after a case logger.info(" - Do teardown hook actions of the case: {}".format(_case['teardown_hooks'])) self.do_hook_actions(_case['teardown_hooks']) if reset_after_case: # reset runtime environment after each case logger.info("Reset runtime environment after the case") self.__reset_env() if _case['_report_']['status']: # case pass _suite['_report_']['cases']['pass'] += 1 self.report['summary']['case']['pass'] += 1 else: _suite['_report_']['cases']['fail'] += 1 self.report['summary']['case']['fail'] += 1 _suite['_report_']['status'] = False # do hook actions after a suite logger.info(" - Do teardown hook actions of the suite: {}".format(_suite['teardown_hooks'])) self.do_hook_actions(_suite['teardown_hooks']) # reset runtime environment after each suite logger.info("Reset runtime environment after the suite") self.__reset_env() if _suite['_report_']['status']: # suite pass self.report['summary']['suite']['pass'] += 1 else: self.report['summary']['suite']['fail'] += 1 self.report['time']['end'] = now_ms() # generate report self.generate_report(output=output)
def do_validation(self, response, rules): logger.info(" - Do response validations: {}".format(rules)) _content = None if 'Content-Type' in response['headers'] and response['headers']['Content-Type'].startswith('application/json'): _content = response['content'].replace("\n", "") try: response['content'] = json.loads(response['content']) except json.decoder.JSONDecodeError: pass _response = get_all_kv_pairs(item=response, mode=0) response = {} for _k, _v in _response.items(): if isinstance(_v, str): _v = _v.replace("\r\n", '__break_line__').replace("\n", '__break_line__') response[_k] = _v if _content: response['content'] = _content result = {'status': True, 'detail': []} self.validator.set_response(response) for _rule in rules: for _com, _item in _rule.items(): if isinstance(_item, dict): for _key, _val in _item.items(): _status = self.validator.validate( comparator=_com, actual=self.__get_variables(_key), expected=self.__get_variables(_val)) result['detail'].append({ 'check': _key, 'comparator': _com, 'expect': _val, 'actual': self.validator.actual, 'status': _status }) if not _status: result['status'] = False elif isinstance(_item, (list, tuple, set)): for __item in _item: if isinstance(__item, dict): for _key, _val in __item.items(): _status = self.validator.validate( comparator=_com, actual=self.__get_variables(_key), expected=self.__get_variables(_val)) result['detail'].append({ 'check': _key, 'comparator': _com, 'expect': _val, 'actual': self.validator.actual, 'status': _status }) if not _status: result['status'] = False else: _status = self.validator.validate( comparator=_com, actual=self.__get_variables(format(__item))) result['detail'].append({ 'check': __item, 'comparator': _com, 'expect': "<{}>".format(_com), 'actual': self.validator.actual, 'status': _status }) if not _status: result['status'] = False else: _status = self.validator.validate(comparator=_com, actual=self.__get_variables(format(_item))) result['detail'].append({ 'check': _item, 'comparator': _com, 'expect': "<{}>".format(_com), 'actual': self.validator.actual, 'status': _status }) if not _status: result['status'] = False logger.info(" - Validation result: {}".format(json.dumps(result, ensure_ascii=False))) return result