Ejemplo n.º 1
0
    def __extract_variable(self, extract, response):
        if not extract:
            return
        # extract status
        _all_ = {
            'status.code': response['status.code']
        }
        # extract content
        try:
            _all_.update(get_all_kv_pairs(item=json.loads(response['content']), prefix='content', mode=0))
        except json.decoder.JSONDecodeError:
            _all_['content'] = response['content']
        # extract headers
        for _key, _val in response['headers'].items():
            _all_["headers.{}".format(_key)] = _val
        # extract cookies
        for _key, _val in response['cookies'].items():
            _all_["cookies.{}".format(_key)] = _val
        logger.debug(" - All optional variables: {}".format(json.dumps(_all_, ensure_ascii=False)))

        # extract specified element, and set in self.variables
        if isinstance(extract, dict):
            for _key, _val in extract.items():
                self.variables[_key] = _all_[_val] if _val in _all_ else _val
        elif isinstance(extract, (list, set, tuple)):
            for _key in extract:
                self.variables[_key] = _all_[_key] if _key in _all_ else ""
        else:
            self.variables[format(extract)] = _all_[format(extract)] if format(extract) in _all_ else ""
Ejemplo n.º 2
0
 def run_one_request(self, request):
     request = self.__get_variables(request)
     logger.debug("Run request: {}".format(json.dumps(request, ensure_ascii=False)))
     ret = self.session.request(
         url="{}://{}{}".format(request['protocol'], request['host'], request['url']),
         method=request['method'],
         params=request['params'],
         data=request['data'],
         headers=request['headers'],
         cookies=request['cookies']
     )
     logger.debug("Get response: {}".format(json.dumps(ret, ensure_ascii=False)))
     return ret
Ejemplo n.º 3
0
 def __parse_test_suite(self, the_dict, base_path):
     self.__parse_environments(the_dict, base_path)
     for _idx, _case in enumerate(the_dict['test_cases']):
         logger.debug(" - case {} in suite: {}".format(_idx, _case))
         if _case.startswith('..'):
             _case = "{}/{}".format(base_path, _case)
         try:
             the_dict['test_cases'][_idx] = copy.deepcopy(self.case_tpl)
             the_dict['test_cases'][_idx].update(
                 yaml.full_load(self.__read_file(_case)))
             logger.debug(" - case info: {}".format(
                 json.dumps(the_dict['test_cases'][_idx],
                            ensure_ascii=False)))
         except ScannerError as e:
             logger.warning("Invalid yaml file: {}".format(e))
             continue
         self.__parse_test_case(the_dict=the_dict['test_cases'][_idx],
                                base_path=get_file_path(_case))
Ejemplo n.º 4
0
 def __match_rule(self, _yaml, _dict, rules):
     _k2v = re.compile(
         r"(.+)=>(.+)")  # key to value, e.g. host=www.example.com
     _pre = re.compile(
         r"(.+)::(.+)"
     )  # only to specified api, e.g. /path/to/api::variable1=1234
     logger.info("Do replace actions in {}.".format(_yaml))
     for rule in rules:
         _match = re.findall(_pre, rule)
         if _match:  # only to specified api, fuzzy match with file name
             if format(_match[0][0]).strip() not in _yaml:
                 continue
             _match = re.findall(_k2v, format(_match[0][1]).strip())
         else:
             _match = re.findall(_k2v, rule)
         if _match:
             logger.debug("To math rule: {}".format(rule))
             _from = format(_match[0][0]).strip()
             _to = format(_match[0][1]).strip()
             # config.variables => request => request.headers => request.cookies
             _dict['config']['variables'] = self.__do_replace(
                 _dict=_dict['config']['variables'], _from=_from, _to=_to)
             if 'request' in _dict.keys():
                 _dict['request'] = self.__do_replace(
                     _dict=_dict['request'], _from=_from, _to=_to)
                 _dict['request']['params'] = self.__do_replace(
                     _dict=_dict['request']['params'], _from=_from, _to=_to)
                 _dict['request']['data'] = self.__do_replace(
                     _dict=_dict['request']['data'], _from=_from, _to=_to)
                 _dict['request']['headers'] = self.__do_replace(
                     _dict=_dict['request']['headers'],
                     _from=_from,
                     _to=_to)
                 _dict['request']['cookies'] = self.__do_replace(
                     _dict=_dict['request']['cookies'],
                     _from=_from,
                     _to=_to)
             if 'validations' in _dict.keys():
                 for _idx, _validate in enumerate(_dict['validations']):
                     for _com, _exp in _validate.items():
                         _dict['validations'][_idx][
                             _com] = self.__do_replace(_dict=_exp,
                                                       _from=_from,
                                                       _to=_to)
Ejemplo n.º 5
0
 def __parse_environments(self, the_dict, base_path):
     if 'import' in the_dict['config'] and the_dict['config']['import']:
         if the_dict['config']['import'].startswith('..'):
             the_dict['config']['import'] = "{}/{}".format(
                 base_path, the_dict['config']['import'])
         try:
             _tmp_env = yaml.full_load(
                 stream=self.__read_file(the_dict['config']['import']))
             the_dict['config']['import'] = _tmp_env
             _variables = copy.deepcopy(
                 _tmp_env['global']) if 'global' in _tmp_env else {}
             # env priority: argument > config
             _env = self.environment if self.environment else the_dict[
                 'config']['environment']
             if _env in _tmp_env:
                 _variables.update(_tmp_env[_env])
             _variables.update(the_dict['config']['variables'])
             the_dict['config']['variables'] = _variables
         except ScannerError as e:
             logger.warning("Invalid yaml file: {}".format(e))
     logger.debug(" - config variables: {}".format(
         json.dumps(the_dict['config']['variables'], ensure_ascii=False)))
Ejemplo n.º 6
0
 def load_test_case(self, suite_or_case, environment=None):
     """
     :param suite_or_case: file or directory of test suites/cases/steps
     :param environment: environment defined in test data, None - use defined in suites/cases/steps
     :return: items of test suites/cases/steps
     """
     logger.info(
         "Start to load test suite or case: {}".format(suite_or_case))
     self.environment = environment
     items = []
     if os.path.isdir(suite_or_case):
         files = get_dir_files(suite_or_case)
     else:
         files = [
             suite_or_case,
         ]
     for _file in files:
         logger.info("Load case from file: {}".format(_file))
         if not (_file.endswith('yml') or _file.endswith('yaml')):
             logger.warning("Not a yaml file, ignore: {}".format(_file))
             continue
         try:
             _dict = yaml.full_load(stream=self.__read_file(_file))
         except ScannerError as e:
             logger.warning("Invalid yaml file: {}".format(e))
             continue
         logger.debug(" - yaml dict: {}".format(
             json.dumps(_dict, ensure_ascii=False)))
         _tmp_suite = copy.deepcopy(self.suite_tpl)
         _tmp_case = copy.deepcopy(self.case_tpl)
         _tmp_step = copy.deepcopy(self.step_tpl)
         if 'test_cases' in _dict:  # it's a test suite
             _tmp_suite.update(_dict)
             self.__parse_test_suite(the_dict=_tmp_suite,
                                     base_path=get_file_path(_file))
         elif 'test_steps' in _dict:  # it's a test case
             _tmp_case.update(_dict)
             self.__parse_test_case(the_dict=_tmp_case,
                                    base_path=get_file_path(_file))
             _tmp_suite['test_cases'].append(_tmp_case)
         else:  # it's a test step
             _tmp_step.update(_dict)
             self.__parse_test_step(the_dict=_tmp_step,
                                    base_path=get_file_path(_file))
             _tmp_case['test_steps'].append(_tmp_step)
             _tmp_suite['test_cases'].append(_tmp_case)
         logger.debug(" - one suite: {}".format(
             json.dumps(_tmp_suite, ensure_ascii=False)))
         items.append(_tmp_suite)
     logger.info("Done.")
     logger.debug("The test suites are: {}".format(
         json.dumps(items, ensure_ascii=False)))
     return items
Ejemplo n.º 7
0
    def __har_response(self,
                       entry,
                       step_dict,
                       include,
                       exclude,
                       auto_extract=False):
        if not ('response' in entry.keys() and entry['response']):
            logger.warning(" * There is no response in this entry: {}".format(
                json.dumps(entry, ensure_ascii=False)))
            return False
        _rsp = entry['response']

        # get status
        step_dict['response']['status'] = _rsp.get('status', 200)
        step_dict['validations'].append(
            {"eq": {
                'status.code': step_dict['response']['status']
            }})

        # get headers
        step_dict['response']['headers'] = {}
        self.__har_headers(_rsp.get('headers'),
                           step_dict['response']['headers'], VALIDATE_HEADERS)
        __headers = get_all_kv_pairs(item=step_dict['response']['headers'],
                                     prefix='headers')
        _vin = get_matched_keys(key=include,
                                keys=list(__headers.keys()),
                                fuzzy=1)
        _vex = get_matched_keys(key=exclude,
                                keys=list(__headers.keys()),
                                fuzzy=1) if exclude else []
        for _k, _v in step_dict['response']['headers'].items():
            _k = "headers.{}".format(_k)
            # Extracting temporary variables for automatic identification of interface dependencies
            if auto_extract and isinstance(_v,
                                           str) and len(_v) >= IDENTIFY_LEN:
                if _v not in self.variables.keys():
                    self.variables[_v] = {'key': _k, 'flag': 0}
                    step_dict['response']['extract'][_v] = _k

            if _k in _vin and _k not in _vex:
                step_dict['validations'].append({"eq": {_k: _v}})

        logger.debug(" - self.variables: {}".format(
            json.dumps(self.variables, ensure_ascii=False)))

        # get cookies
        step_dict['response']['cookies'] = {}
        self.__har_cookies(_rsp.get('cookies'),
                           step_dict['response']['cookies'])

        # get content
        try:
            _text = _rsp.get('content').get('text', '')
            _mime = _rsp.get('content').get('mimeType') or ''
            _code = _rsp.get('content').get('encoding')
        except AttributeError:
            logger.warning(" * Invalid response content: {}".format(
                _rsp.get('content')))
            return False
        if _code and _code == 'base64':
            try:
                _text = base64.b64decode(_text).decode('utf-8')
            except UnicodeDecodeError as e:
                logger.warning(" * Decode error: {}".format(e))
        elif _code:
            logger.warning(" * Unsupported encoding method: {}".format(_code))
            return False
        logger.debug(" - mimeType: {}, encoding: {}".format(_mime, _code))
        logger.debug(" - content text: {}".format(_text))
        if _mime.startswith('application/json'):  # json => dict
            try:
                step_dict['response']['content'] = json.loads(_text)
                # extract all content values into validations
                logger.debug(" - validation include: {}, exclude: {}".format(
                    include, exclude))
                _pairs = get_all_kv_pairs(item=json.loads(_text),
                                          prefix='content')
                _vin = get_matched_keys(key=include,
                                        keys=list(_pairs.keys()),
                                        fuzzy=1)
                _vex = get_matched_keys(key=exclude,
                                        keys=list(_pairs.keys()),
                                        fuzzy=1) if exclude else []
                for _k, _v in _pairs.items():
                    if isinstance(_v, str):
                        # _v = "__break_line__".join(_v.split("\n"))
                        _v = _v.replace("\r\n", '__break_line__').replace(
                            "\n", '__break_line__')
                    # Extracting temporary variables for automatic identification of interface dependencies
                    if auto_extract and isinstance(
                            _v, str) and len(_v) >= IDENTIFY_LEN:
                        if _v not in self.variables.keys():
                            self.variables[_v] = {'key': _k, 'flag': 0}
                            step_dict['response']['extract'][_v] = _k
                    if _k in _vin and _k not in _vex:
                        step_dict['validations'].append({"eq": {_k: _v}})

            except json.decoder.JSONDecodeError:
                logger.warning(
                    " * Invalid response content in json: {}".format(_text))
                # sys.exit(-1)
        elif _mime.startswith('text/html'):  # TODO: html => dom tree, xpath
            pass
        else:
            logger.warning(" * Unsupported mimeType: {}".format(_mime))
            # step_dict['validations'].append({"eq": {'content': _text}})
        logger.debug(" - validations: {}".format(
            json.dumps(step_dict['validations'], ensure_ascii=False)))
        logger.debug(" - self.variables: {}".format(
            json.dumps(self.variables, ensure_ascii=False)))

        return True
Ejemplo n.º 8
0
    def __har_request(self,
                      entry,
                      step_dict,
                      include,
                      exclude,
                      auto_extract=False):
        if not ('request' in entry.keys() and entry['request']):
            logger.warning(" * There is no request in this entry: {}".format(
                json.dumps(entry, ensure_ascii=False)))
            return False
        _req = entry['request']

        # get method
        step_dict['request']['method'] = _req.get('method', 'GET')

        # get url: protocol, host, url path
        _url = _req.get('url', "")
        # logger.info(" Get a {} request: {}".format(step_dict['request']['method'], _url))

        try:
            (_whole_url, step_dict['request']['protocol'],
             step_dict['request']['host'], step_dict['request']['url'],
             _) = re.findall(r"((http\w*)://([\w.:]+)([^?]+))\??(.*)", _url)[0]
            step_dict['config']['name'] = step_dict['request']['url']
            logger.debug(" - protocol: {} host: {} url: {}".format(
                step_dict['request']['protocol'], step_dict['request']['host'],
                step_dict['request']['url']))
            logger.info(" Get a {} request: {}".format(
                step_dict['request']['method'], step_dict['request']['url']))
        except IndexError:
            logger.warning(" * Invalid url: {}".format(_url))
            return False

        # filter with include and exclude options
        logger.debug(" - include: {} exclude: {}".format(include, exclude))
        if not self.__if_include(_whole_url, include) or self.__if_exclude(
                _whole_url, exclude):
            logger.info(" According to include/exclude options, ignore it")
            return False

        # get parameters
        # it may have both queryString and postData in an unusual post request
        step_dict['request']['params'] = {}
        step_dict['request']['data'] = {}
        _param = _req.get('queryString', [])
        _data = _req.get('postData', [])
        if _data:
            if 'params' in _req.get('postData'):
                _data = _req.get('postData').get('params')
            else:
                _data = _req.get('postData').get('text')
            # if 'mimeType' in _req.get('postData') and _req.get('postData').get('mimeType') == 'application/json':
            #     _tmp = json.loads(_data)
            #     _data = []
            #     for _tk, _tv in _tmp.items():
            #         _data.append({'name': _tk, 'value': _tv})
        logger.debug(" - params: {}".format(_param))
        logger.debug(" - data: {}".format(_data))

        # extract all parameter values into variables, and keep {value} in parameters
        if isinstance(_param, (list, tuple, set)):
            for _item in _param:
                self.__har_extract(step_dict, _item['name'], _item['value'],
                                   'params', auto_extract)
        else:
            # step_dict['request']['params'] = _param
            self.__har_extract(step_dict, '', _param, 'params', auto_extract)
        if isinstance(_data, (list, tuple, set)):
            for _item in _data:
                self.__har_extract(step_dict, _item['name'], _item['value'],
                                   'data', auto_extract)
        else:
            # step_dict['request']['data'] = _data
            self.__har_extract(step_dict, '', _data, 'data', auto_extract)
        logger.debug(" - self.variables: {}".format(
            json.dumps(self.variables, ensure_ascii=False)))

        # get headers
        step_dict['request']['headers'] = {}
        self.__har_headers(_req.get('headers'),
                           step_dict['request']['headers'], RECORD_HEADERS,
                           auto_extract)
        logger.debug(" - headers: {}".format(
            json.dumps(step_dict['request']['headers'], ensure_ascii=False)))

        # get cookies
        step_dict['request']['cookies'] = {}
        self.__har_cookies(_req.get('cookies'),
                           step_dict['request']['cookies'], auto_extract)
        logger.debug(" - cookies: {}".format(
            json.dumps(step_dict['request']['cookies'], ensure_ascii=False)))

        return True
Ejemplo n.º 9
0
    def __generate_case(self, suite, target="ParrotProject"):
        logger.info("Start to generate test yamls")

        # generate test data
        _d_path = "{}/environments".format(target)
        _d_yaml = "{}/{}_env.yml".format(_d_path, suite['config']['name'])
        r_d_yaml = "../environments/{}_env.yml".format(suite['config']['name'])
        make_dir(_d_path)
        with open(file=_d_yaml, mode='w', encoding='utf-8') as f:
            yaml.dump(data=self.env_tpl,
                      stream=f,
                      encoding='utf-8',
                      allow_unicode=True)
        logger.debug(" - environments: {}".format(_d_yaml))

        _e_path = "{}/test_suites".format(target)
        _e_yaml = "{}/{}.yml".format(_e_path, suite['config']['name'])
        make_dir(_e_path)
        suite['config']['import'] = r_d_yaml
        _t_sid = 0  # count total steps in a suite
        for _cid, _case in enumerate(suite['test_cases']):
            # generate test steps
            for _sid, _step in enumerate(_case['test_steps']):
                _t_sid += 1
                # remove 'response' for a clear view
                # _step['response'] = {'extract': {}}
                _s_resp = copy.deepcopy(_step['response'])
                for _k, _v in _s_resp.items():
                    if _k == 'extract':
                        for _ex_k, _ex_v in _s_resp['extract'].items():
                            if _ex_k in self.variables.keys(
                            ) and self.variables[_ex_k]['flag']:
                                _step['response']['extract'][_ex_v] = _ex_v
                            del _step['response']['extract'][_ex_k]
                    elif _k != 'time.spent':
                        del _step['response'][_k]
                _s_id = "{}{}".format('0' * (4 - len(str(_t_sid))),
                                      _t_sid)  # step id
                _s_path = "{}/test_steps/{}".format(
                    target, '/'.join(_step['config']['name'].split('/')[1:-1]))
                _s_yaml = "{}/{}_{}.yml".format(
                    _s_path, _s_id, _step['config']['name'].split('/')[-1])
                r_s_yaml = "../test_steps/{}/{}_{}.yml".format(
                    '/'.join(_step['config']['name'].split('/')[1:-1]), _s_id,
                    _step['config']['name'].split('/')[-1])
                if len(_step['config']['name'].split('/')) > 1:
                    _step['config'][
                        'import'] = "{}environments/{}_env.yml".format(
                            '../' *
                            (len(_step['config']['name'].split('/')) - 1),
                            suite['config']['name'])
                else:
                    _step['config'][
                        'import'] = "../environments/{}_env.yml".format(
                            suite['config']['name'])
                make_dir(_s_path)
                with open(file=_s_yaml, mode='w', encoding='utf-8') as f:
                    # use allow_unicode=True to solve Chinese display problem
                    yaml.dump(data=_step,
                              stream=f,
                              encoding='utf-8',
                              allow_unicode=True)
                _case['test_steps'][_sid] = r_s_yaml
                logger.debug(" - test step: {}".format(_s_yaml))

            # generate test cases
            _c_path = "{}/test_cases".format(target)
            _c_yaml = "{}/{}.yml".format(_c_path, _case['config']['name'])
            r_c_yaml = "../test_cases/{}.yml".format(_case['config']['name'])
            _case['config']['import'] = r_d_yaml
            make_dir(_c_path)
            with open(file=_c_yaml, mode='w', encoding='utf-8') as f:
                yaml.dump(data=_case,
                          stream=f,
                          encoding='utf-8',
                          allow_unicode=True)
            logger.debug(" - test case: {}".format(_c_yaml))
            suite['test_cases'][_cid] = r_c_yaml

        # generate test suite
        with open(file=_e_yaml, mode='w', encoding='utf-8') as f:
            yaml.dump(data=suite,
                      stream=f,
                      encoding='utf-8',
                      allow_unicode=True)
        logger.debug(" - test suite: {}".format(_e_yaml))
        logger.info("Done. You could get them in {}".format(target))
Ejemplo n.º 10
0
    def har_to_case(self,
                    source,
                    target="ParrotProject",
                    include=None,
                    exclude=None,
                    validate_include=None,
                    validate_exclude=None,
                    auto_extract=False,
                    suite_name=None):
        """parse source har file and generate test cases
        :param source: source file
        :param target: target directory for case output
        :param include: list, not matched url would be ignored in recording
        :param exclude: list, matched url would be ignored in recording
        :param validate_include: list, not matched response would be ignored in validating
        :param validate_exclude: list, matched response would be ignored in validating
        :param auto_extract: bool, for automatic identification of interface dependencies
        :param suite_name: specified suite, new a suite as default
        :return suite dict
        """
        if not (source and os.path.exists(source)):
            logger.error("Source file does not exist: {}".format(source))
            return False
        if not source.lower().endswith('.har'):
            logger.error("The source is not a har file: {}".format(source))
            return False
        logger.info("Start to parse source file: {}".format(source))

        content = self.__read_file(source)
        try:
            har_dict = json.loads(content)['log']['entries']
        except TypeError:
            logger.error("HAR file content error: {}".format(source))
        except KeyError:
            logger.error("HAR file content error: {}".format(source))
            return False

        case_dict = copy.deepcopy(self.case_tpl)
        case_dict['config']['name'] = get_file_name(file=source)
        for entry_dict in har_dict:
            step_dict = copy.deepcopy(self.step_tpl)
            self.__har_times(entry=entry_dict, step_dict=step_dict)
            if not self.__har_request(entry=entry_dict,
                                      step_dict=step_dict,
                                      include=include,
                                      exclude=exclude,
                                      auto_extract=auto_extract):
                continue
            if not self.__har_response(entry=entry_dict,
                                       step_dict=step_dict,
                                       include=validate_include,
                                       exclude=validate_exclude,
                                       auto_extract=auto_extract):
                continue
            logger.debug("test_step: {}".format(
                json.dumps(step_dict, ensure_ascii=False)))

            # add step into case
            case_dict['test_steps'].append(step_dict)
        if suite_name:
            return case_dict
        else:
            suite_dict = copy.deepcopy(self.suite_tpl)
            # add case into suite
            suite_dict['test_cases'].append(case_dict)
            suite_dict['config']['name'] = get_file_name(file=source)
            logger.info("Parse finished.")

            self.__generate_case(suite_dict, target)
            return suite_dict
Ejemplo n.º 11
0
    def case_replace(self, suite_or_case, rules, target="ParrotProjectNew"):
        """
        :param suite_or_case: file or directory of test suites/cases/steps
        :param rules: replace rule list, key=>value or value1=>value2
        :param target: target directory for case output
        :return: suite dict
        """
        logger.info(
            "Start to load test suite or case: {}".format(suite_or_case))
        files = []
        if isinstance(suite_or_case, (list, tuple, set)):
            items = suite_or_case
        else:
            items = [
                format(suite_or_case),
            ]
        for item in items:
            if not os.path.exists(item):
                logger.warning("File {} does not exist, ignore.".format(item))
                continue
            if os.path.isdir(item):
                files.extend(get_dir_files(item))
            else:
                files.append(item)
        total = {}
        for _file in files:
            logger.info("Load case from file: {}".format(_file))
            if not (_file.endswith('yml') or _file.endswith('yaml')):
                logger.warning("Not a yaml file, ignore: {}".format(_file))
                continue
            try:
                _dict = yaml.full_load(stream=self.__read_file(_file))
            except ScannerError as e:
                logger.warning("Invalid yaml file: {}".format(e))
                continue
            logger.debug(" - yaml dict: {}".format(
                json.dumps(_dict, ensure_ascii=False)))

            if 'test_cases' in _dict:  # it's a test suite
                _tmp_suite = copy.deepcopy(self.suite_tpl)
                _tmp_suite.update(_dict)
                self.__match_rule(_yaml=_file, _dict=_tmp_suite, rules=rules)
                _path = re.findall(r"(.+)test_suites(.+)",
                                   get_file_path(_file))
                if _path:
                    total["{}/test_suites/{}/{}".format(
                        target, _path[0][1],
                        get_file_name(_file, ext=1))] = _tmp_suite
                else:
                    total["{}/test_suites/{}".format(
                        target, get_file_name(_file, ext=1))] = _tmp_suite
                _cases = []
                for _case in _tmp_suite['test_cases']:
                    _cases.append("{}/{}".format(get_file_path(_file), _case))
                if _tmp_suite['config']['import']:
                    _cases.append("{}/{}".format(
                        get_file_path(_file), _tmp_suite['config']['import']))
                if _cases:
                    self.case_replace(suite_or_case=_cases,
                                      target=target,
                                      rules=rules)
            elif 'test_steps' in _dict:  # it's a test case
                _tmp_case = copy.deepcopy(self.case_tpl)
                _tmp_case.update(_dict)
                self.__match_rule(_yaml=_file, _dict=_tmp_case, rules=rules)
                _path = re.findall(r"(.+)test_cases(.+)", get_file_path(_file))
                if _path:
                    total["{}/test_cases/{}/{}".format(
                        target, _path[0][1], get_file_name(_file,
                                                           ext=1))] = _tmp_case
                else:
                    total["{}/test_cases/{}".format(
                        target, get_file_name(_file, ext=1))] = _tmp_case
                _steps = []
                for _step in _tmp_case['test_steps']:
                    _steps.append("{}/{}".format(get_file_path(_file), _step))
                if _tmp_case['config']['import']:
                    _steps.append("{}/{}".format(
                        get_file_path(_file), _tmp_case['config']['import']))
                if _steps:
                    self.case_replace(suite_or_case=_steps,
                                      target=target,
                                      rules=rules)
            elif 'request' in _dict:  # it's a test step
                _tmp_step = copy.deepcopy(self.step_tpl)
                _tmp_step.update(_dict)
                self.__match_rule(_yaml=_file, _dict=_tmp_step, rules=rules)
                _path = re.findall(r"(.+)test_steps(.+)", get_file_path(_file))
                if _path:
                    total["{}/test_steps/{}/{}".format(
                        target, _path[0][1], get_file_name(_file,
                                                           ext=1))] = _tmp_step
                else:
                    total["{}/test_steps/{}".format(
                        target, get_file_name(_file, ext=1))] = _tmp_step
                if _tmp_step['config']['import']:
                    self.case_replace(suite_or_case="{}/{}".format(
                        get_file_path(_file), _tmp_step['config']['import']),
                                      target=target,
                                      rules=None)
            else:  # it's environment file
                _path = re.findall(r"(.+)environments(.+)",
                                   get_file_path(_file))
                if _path:
                    total["{}/environments/{}/{}".format(
                        target, _path[0][1], get_file_name(_file,
                                                           ext=1))] = _dict
                else:
                    total["{}/environments/{}".format(
                        target, get_file_name(_file, ext=1))] = _dict
        for _k, _v in total.items():
            make_dir(get_file_path(_k))
            with open(file=format(_k), mode='w', encoding='utf-8') as f:
                yaml.dump(data=_v,
                          stream=f,
                          encoding='utf-8',
                          allow_unicode=True)
            logger.info("Write file after replace: {}".format(_k))
        logger.info("Done. You could get them in {}".format(target))
Ejemplo n.º 12
0
 def __parse_test_step(self, the_dict, base_path):
     self.__parse_environments(the_dict, base_path)
     logger.debug(" - test step: {}".format(
         json.dumps(the_dict, ensure_ascii=False)))
Ejemplo n.º 13
0
    def run_cases(self, suite_or_case, environment=None, interval='ms', reset_after_case=False,
                  fail_stop=False, retry_times=0, retry_interval=100, output='.'):
        """
        :param suite_or_case: file or directory of test suites / cases / steps
        :param environment: environment flag defined in test data, 'None' - only load 'global' data
        :param interval: interval time(ms) between each step, use the recorded interval as default
        :param reset_after_case: reset runtime environment after each case or not, 'no' as default
        :param fail_stop: stop or not when a test step failed on validation, False as default
        :param retry_times: max retry times when a test step failed on validation, 0 as default
        :param retry_interval: retry interval(ms) when a test step failed on validation, 100 as default
        :param output: output path for report, '.' as default
        :return:
        """
        try:
            interval = float(interval)
        except ValueError:
            interval = 'ms'
        try:
            retry_times = int(retry_times)
        except ValueError:
            retry_times = 0
        try:
            retry_interval = int(retry_interval)
        except ValueError:
            retry_interval = 100

        # parse specified cases into dict
        items = self.parser.load_test_case(suite_or_case=suite_or_case, environment=environment)
        self.report['title'] = suite_or_case
        self.report['detail'] = items
        self.report['time']['start'] = now_ms()
        if not items:
            logger.error("Parsed {}, but get nothing.".format(suite_or_case))
            return -1
        for _sid, _suite in enumerate(items):
            self.report['summary']['suite']['total'] += 1
            _suite['_report_'] = {
                'id': _sid,
                'name': _suite['config']['name'],
                'status': True,
                'cases': {
                    'total': 0,
                    'pass': 0,
                    'fail': 0
                }
            }
            logger.info("Run test suite: {}".format(json.dumps(_suite, ensure_ascii=False)))

            # do hook actions before a suite
            logger.info(" - Do setup hook actions of the suite: {}".format(_suite['setup_hooks']))
            self.do_hook_actions(_suite['setup_hooks'])

            for _cid, _case in enumerate(_suite['test_cases']):
                self.report['summary']['case']['total'] += 1
                _suite['_report_']['cases']['total'] += 1
                _case['_report_'] = {
                    'id': _cid,
                    'name': _case['config']['name'],
                    'status': True,
                    'steps': {
                        'total': 0,
                        'pass': 0,
                        'fail': 0
                    }
                }
                logger.info("Run test case: {}".format(json.dumps(_case, ensure_ascii=False)))

                # do hook actions before a case
                logger.info(" - Do setup hook actions of the case: {}".format(_case['setup_hooks']))
                self.do_hook_actions(_case['setup_hooks'])

                for _tid, _step in enumerate(_case['test_steps']):
                    self.report['summary']['step']['total'] += 1
                    _case['_report_']['steps']['total'] += 1
                    _step['_report_'] = {
                        'id': _tid,
                        'name': _step['config']['name'],
                        'status': True
                    }
                    logger.info("Run test step: {}".format(json.dumps(_step, ensure_ascii=False)))

                    # do hook actions before a request
                    logger.info(" - Do setup hook actions of the step: {}".format(_step['setup_hooks']))
                    self.do_hook_actions(_step['setup_hooks'])

                    # handle variables, priority: suite > case > step
                    self.__set_variables(_step['config']['variables'])
                    self.__set_variables(_case['config']['variables'])
                    self.__set_variables(_suite['config']['variables'])
                    logger.info(" - Config variables of the step: {}".format(json.dumps(self.variables, ensure_ascii=False)))

                    # handle request interval
                    if not isinstance(interval, (int, float)):
                        if 'time.start' in _step['request']:  # use the recorded interval
                            _span = now_timestamp_ms() - int(_step['request']['time.start'])
                            if not self.req_span:
                                self.req_span = _span
                            _sleep = self.req_span - _span if self.req_span > _span else MINOR_INTERVAL_MS
                            # higher than MAX, treat it as request of another batch
                            if _sleep > MAX_INTERVAL_MS:
                                _sleep = MINOR_INTERVAL_MS
                                self.req_span = _span  # reset span
                        else:  # no recorded interval, use default
                            _sleep = MINOR_INTERVAL_MS
                    else:  # use specified interval
                        _sleep = interval
                    if _sleep != MINOR_INTERVAL_MS:
                        logger.info(" - Break time, sleep for {} ms.".format(_sleep))
                    time.sleep(_sleep/1000.0)

                    try_flag = True
                    while try_flag:
                        # run this request
                        response = self.run_one_request(_step['request'])
                        _step['_report_']['request'] = response['request']
                        _step['_report_']['response'] = response['response']
                        _step['_report_']['time'] = response['time']
                        response['response']['time'] = response['time']

                        # extract specified variables
                        if 'extract' in _step['response'] and _step['response']['extract']:
                            logger.info(" - Extract variables: {}".format(_step['response']['extract']))
                            self.__extract_variable(extract=_step['response']['extract'], response=response['response'])
                            logger.debug(" - Variables after extract: {}".format(json.dumps(self.variables, ensure_ascii=False)))

                        # do response validation
                        _validate = self.do_validation(response=response['response'], rules=_step['validations'])
                        _step['_report_']['validation'] = _validate
                        if not _validate['status']:  # failed
                            logger.info(" - Test step validation failed")
                            if fail_stop:
                                try_flag = False
                                break
                            elif retry_times:
                                logger.info("Sleep {} ms and Run this test step again..".format(retry_interval))
                                retry_times -= 1
                                time.sleep(retry_interval*1.0/1000)
                            else:
                                break
                        else:
                            break
                    if _step['_report_']['validation']['status']:  # step pass
                        self.report['summary']['step']['pass'] += 1
                        _case['_report_']['steps']['pass'] += 1
                    else:
                        self.report['summary']['step']['fail'] += 1
                        _case['_report_']['steps']['fail'] += 1
                        _suite['_report_']['status'] = _case['_report_']['status'] = _step['_report_']['status'] = False

                    if not try_flag:  # need to stop
                        _suite['_report_']['cases']['fail'] += 1
                        self.report['summary']['case']['fail'] += 1
                        self.report['summary']['case']['pass'] = self.report['summary']['case']['total'] - \
                                                                 self.report['summary']['case']['fail']
                        self.report['summary']['suite']['fail'] += 1
                        self.report['summary']['suite']['pass'] = self.report['summary']['suite']['total'] - \
                                                                  self.report['summary']['suite']['fail']
                        self.report['time']['end'] = now_ms()
                        logger.info("Stop according to your --fail-stop argument")
                        return self.generate_report(output=output)

                    # do hook actions after a request
                    logger.info(" - Do teardown hook actions of the step: {}".format(_step['teardown_hooks']))
                    self.do_hook_actions(_step['teardown_hooks'])

                # do hook actions after a case
                logger.info(" - Do teardown hook actions of the case: {}".format(_case['teardown_hooks']))
                self.do_hook_actions(_case['teardown_hooks'])

                if reset_after_case:  # reset runtime environment after each case
                    logger.info("Reset runtime environment after the case")
                    self.__reset_env()

                if _case['_report_']['status']:  # case pass
                    _suite['_report_']['cases']['pass'] += 1
                    self.report['summary']['case']['pass'] += 1
                else:
                    _suite['_report_']['cases']['fail'] += 1
                    self.report['summary']['case']['fail'] += 1
                    _suite['_report_']['status'] = False

            # do hook actions after a suite
            logger.info(" - Do teardown hook actions of the suite: {}".format(_suite['teardown_hooks']))
            self.do_hook_actions(_suite['teardown_hooks'])

            # reset runtime environment after each suite
            logger.info("Reset runtime environment after the suite")
            self.__reset_env()

            if _suite['_report_']['status']:  # suite pass
                self.report['summary']['suite']['pass'] += 1
            else:
                self.report['summary']['suite']['fail'] += 1
            self.report['time']['end'] = now_ms()

        # generate report
        self.generate_report(output=output)
Ejemplo n.º 14
0
 def __set_variables(self, variables):
     logger.debug(" - To set variables: {}".format(json.dumps(variables, ensure_ascii=False)))
     self.variables.update(self.__get_variables(variables))
     logger.debug(" - Variables after update: {}".format(json.dumps(self.variables, ensure_ascii=False)))