Пример #1
0
    def __get_real_value(self, variable):
        _rs = re.compile(r"(\${([^{}]+)})")  # to match ${VariableA}
        _rf = re.compile(r"(\${{([^{}]+)}})")  # to match ${{Function(A)}}

        while re.search(_rs, format(variable)):
            for _pair in re.findall(_rs, format(variable)):
                _pair = list(_pair)
                if _pair[1] in self.variables:
                    _pair[1] = self.variables[_pair[1]]
                else:
                    logger.warning("Undefined variable: {}, set to ''".format(
                        _pair[1]))
                    _pair[1] = ''
                if variable == _pair[0]:
                    variable = _pair[1]
                    break
                variable = variable.replace(_pair[0], str(_pair[1]))
        while re.search(_rf, format(variable)):
            for _pair in re.findall(_rf, format(variable)):
                _pair = list(_pair)
                try:
                    _pair[1] = eval(_pair[1])
                except SyntaxError or Exception as e:
                    logger.warning(
                        "Invalid function: {}: {}, set to ''".format(
                            _pair[0], e))
                    _pair[1] = ''
                if variable == _pair[0]:
                    variable = _pair[1]
                    break
                variable = variable.replace(_pair[0], str(_pair[1]))
        return variable
Пример #2
0
    def __har_times(entry, step_dict):
        # startedDateTime:
        #   Chrome: 2019-07-24T03:42:07.867Z
        #   Fiddler: 2019-07-24T18:52:38.4367088+08:00
        #   Charles: 2019-07-31T14:21:35.033+08:00
        #   Other: Wed, 30 Jan 2019 07:56:42
        if not ('startedDateTime' in entry.keys()
                and entry['startedDateTime']):
            logger.warning(
                " * There is no startedDateTime in this entry: {}".format(
                    json.dumps(entry, ensure_ascii=False)))
            return False
        s_time = entry['startedDateTime']

        if not ('time' in entry.keys() and entry['time']):
            if not ('times' in entry.keys() and entry['times']):
                logger.warning(" * There is no time in this entry: {}".format(
                    json.dumps(entry, ensure_ascii=False)))
                return False
            else:
                i_time = int(round(entry['times']))
        else:
            i_time = int(round(entry['time']))

        # convert startedDateTime to timestamp
        step_dict['request']['time.start'] = har_time2timestamp(
            har_time=s_time, ms=1)
        step_dict['response']['time.spent'] = i_time
Пример #3
0
 def load_test_case(self, suite_or_case, environment=None):
     """
     :param suite_or_case: file or directory of test suites/cases/steps
     :param environment: environment defined in test data, None - use defined in suites/cases/steps
     :return: items of test suites/cases/steps
     """
     logger.info(
         "Start to load test suite or case: {}".format(suite_or_case))
     self.environment = environment
     items = []
     if os.path.isdir(suite_or_case):
         files = get_dir_files(suite_or_case)
     else:
         files = [
             suite_or_case,
         ]
     for _file in files:
         logger.info("Load case from file: {}".format(_file))
         if not (_file.endswith('yml') or _file.endswith('yaml')):
             logger.warning("Not a yaml file, ignore: {}".format(_file))
             continue
         try:
             _dict = yaml.full_load(stream=self.__read_file(_file))
         except ScannerError as e:
             logger.warning("Invalid yaml file: {}".format(e))
             continue
         logger.debug(" - yaml dict: {}".format(
             json.dumps(_dict, ensure_ascii=False)))
         _tmp_suite = copy.deepcopy(self.suite_tpl)
         _tmp_case = copy.deepcopy(self.case_tpl)
         _tmp_step = copy.deepcopy(self.step_tpl)
         if 'test_cases' in _dict:  # it's a test suite
             _tmp_suite.update(_dict)
             self.__parse_test_suite(the_dict=_tmp_suite,
                                     base_path=get_file_path(_file))
         elif 'test_steps' in _dict:  # it's a test case
             _tmp_case.update(_dict)
             self.__parse_test_case(the_dict=_tmp_case,
                                    base_path=get_file_path(_file))
             _tmp_suite['test_cases'].append(_tmp_case)
         else:  # it's a test step
             _tmp_step.update(_dict)
             self.__parse_test_step(the_dict=_tmp_step,
                                    base_path=get_file_path(_file))
             _tmp_case['test_steps'].append(_tmp_step)
             _tmp_suite['test_cases'].append(_tmp_case)
         logger.debug(" - one suite: {}".format(
             json.dumps(_tmp_suite, ensure_ascii=False)))
         items.append(_tmp_suite)
     logger.info("Done.")
     logger.debug("The test suites are: {}".format(
         json.dumps(items, ensure_ascii=False)))
     return items
Пример #4
0
 def __parse_test_suite(self, the_dict, base_path):
     self.__parse_environments(the_dict, base_path)
     for _idx, _case in enumerate(the_dict['test_cases']):
         logger.debug(" - case {} in suite: {}".format(_idx, _case))
         if _case.startswith('..'):
             _case = "{}/{}".format(base_path, _case)
         try:
             the_dict['test_cases'][_idx] = copy.deepcopy(self.case_tpl)
             the_dict['test_cases'][_idx].update(
                 yaml.full_load(self.__read_file(_case)))
             logger.debug(" - case info: {}".format(
                 json.dumps(the_dict['test_cases'][_idx],
                            ensure_ascii=False)))
         except ScannerError as e:
             logger.warning("Invalid yaml file: {}".format(e))
             continue
         self.__parse_test_case(the_dict=the_dict['test_cases'][_idx],
                                base_path=get_file_path(_case))
Пример #5
0
 def __parse_environments(self, the_dict, base_path):
     if 'import' in the_dict['config'] and the_dict['config']['import']:
         if the_dict['config']['import'].startswith('..'):
             the_dict['config']['import'] = "{}/{}".format(
                 base_path, the_dict['config']['import'])
         try:
             _tmp_env = yaml.full_load(
                 stream=self.__read_file(the_dict['config']['import']))
             the_dict['config']['import'] = _tmp_env
             _variables = copy.deepcopy(
                 _tmp_env['global']) if 'global' in _tmp_env else {}
             # env priority: argument > config
             _env = self.environment if self.environment else the_dict[
                 'config']['environment']
             if _env in _tmp_env:
                 _variables.update(_tmp_env[_env])
             _variables.update(the_dict['config']['variables'])
             the_dict['config']['variables'] = _variables
         except ScannerError as e:
             logger.warning("Invalid yaml file: {}".format(e))
     logger.debug(" - config variables: {}".format(
         json.dumps(the_dict['config']['variables'], ensure_ascii=False)))
Пример #6
0
    def __har_response(self,
                       entry,
                       step_dict,
                       include,
                       exclude,
                       auto_extract=False):
        if not ('response' in entry.keys() and entry['response']):
            logger.warning(" * There is no response in this entry: {}".format(
                json.dumps(entry, ensure_ascii=False)))
            return False
        _rsp = entry['response']

        # get status
        step_dict['response']['status'] = _rsp.get('status', 200)
        step_dict['validations'].append(
            {"eq": {
                'status.code': step_dict['response']['status']
            }})

        # get headers
        step_dict['response']['headers'] = {}
        self.__har_headers(_rsp.get('headers'),
                           step_dict['response']['headers'], VALIDATE_HEADERS)
        __headers = get_all_kv_pairs(item=step_dict['response']['headers'],
                                     prefix='headers')
        _vin = get_matched_keys(key=include,
                                keys=list(__headers.keys()),
                                fuzzy=1)
        _vex = get_matched_keys(key=exclude,
                                keys=list(__headers.keys()),
                                fuzzy=1) if exclude else []
        for _k, _v in step_dict['response']['headers'].items():
            _k = "headers.{}".format(_k)
            # Extracting temporary variables for automatic identification of interface dependencies
            if auto_extract and isinstance(_v,
                                           str) and len(_v) >= IDENTIFY_LEN:
                if _v not in self.variables.keys():
                    self.variables[_v] = {'key': _k, 'flag': 0}
                    step_dict['response']['extract'][_v] = _k

            if _k in _vin and _k not in _vex:
                step_dict['validations'].append({"eq": {_k: _v}})

        logger.debug(" - self.variables: {}".format(
            json.dumps(self.variables, ensure_ascii=False)))

        # get cookies
        step_dict['response']['cookies'] = {}
        self.__har_cookies(_rsp.get('cookies'),
                           step_dict['response']['cookies'])

        # get content
        try:
            _text = _rsp.get('content').get('text', '')
            _mime = _rsp.get('content').get('mimeType') or ''
            _code = _rsp.get('content').get('encoding')
        except AttributeError:
            logger.warning(" * Invalid response content: {}".format(
                _rsp.get('content')))
            return False
        if _code and _code == 'base64':
            try:
                _text = base64.b64decode(_text).decode('utf-8')
            except UnicodeDecodeError as e:
                logger.warning(" * Decode error: {}".format(e))
        elif _code:
            logger.warning(" * Unsupported encoding method: {}".format(_code))
            return False
        logger.debug(" - mimeType: {}, encoding: {}".format(_mime, _code))
        logger.debug(" - content text: {}".format(_text))
        if _mime.startswith('application/json'):  # json => dict
            try:
                step_dict['response']['content'] = json.loads(_text)
                # extract all content values into validations
                logger.debug(" - validation include: {}, exclude: {}".format(
                    include, exclude))
                _pairs = get_all_kv_pairs(item=json.loads(_text),
                                          prefix='content')
                _vin = get_matched_keys(key=include,
                                        keys=list(_pairs.keys()),
                                        fuzzy=1)
                _vex = get_matched_keys(key=exclude,
                                        keys=list(_pairs.keys()),
                                        fuzzy=1) if exclude else []
                for _k, _v in _pairs.items():
                    if isinstance(_v, str):
                        # _v = "__break_line__".join(_v.split("\n"))
                        _v = _v.replace("\r\n", '__break_line__').replace(
                            "\n", '__break_line__')
                    # Extracting temporary variables for automatic identification of interface dependencies
                    if auto_extract and isinstance(
                            _v, str) and len(_v) >= IDENTIFY_LEN:
                        if _v not in self.variables.keys():
                            self.variables[_v] = {'key': _k, 'flag': 0}
                            step_dict['response']['extract'][_v] = _k
                    if _k in _vin and _k not in _vex:
                        step_dict['validations'].append({"eq": {_k: _v}})

            except json.decoder.JSONDecodeError:
                logger.warning(
                    " * Invalid response content in json: {}".format(_text))
                # sys.exit(-1)
        elif _mime.startswith('text/html'):  # TODO: html => dom tree, xpath
            pass
        else:
            logger.warning(" * Unsupported mimeType: {}".format(_mime))
            # step_dict['validations'].append({"eq": {'content': _text}})
        logger.debug(" - validations: {}".format(
            json.dumps(step_dict['validations'], ensure_ascii=False)))
        logger.debug(" - self.variables: {}".format(
            json.dumps(self.variables, ensure_ascii=False)))

        return True
Пример #7
0
    def __har_request(self,
                      entry,
                      step_dict,
                      include,
                      exclude,
                      auto_extract=False):
        if not ('request' in entry.keys() and entry['request']):
            logger.warning(" * There is no request in this entry: {}".format(
                json.dumps(entry, ensure_ascii=False)))
            return False
        _req = entry['request']

        # get method
        step_dict['request']['method'] = _req.get('method', 'GET')

        # get url: protocol, host, url path
        _url = _req.get('url', "")
        # logger.info(" Get a {} request: {}".format(step_dict['request']['method'], _url))

        try:
            (_whole_url, step_dict['request']['protocol'],
             step_dict['request']['host'], step_dict['request']['url'],
             _) = re.findall(r"((http\w*)://([\w.:]+)([^?]+))\??(.*)", _url)[0]
            step_dict['config']['name'] = step_dict['request']['url']
            logger.debug(" - protocol: {} host: {} url: {}".format(
                step_dict['request']['protocol'], step_dict['request']['host'],
                step_dict['request']['url']))
            logger.info(" Get a {} request: {}".format(
                step_dict['request']['method'], step_dict['request']['url']))
        except IndexError:
            logger.warning(" * Invalid url: {}".format(_url))
            return False

        # filter with include and exclude options
        logger.debug(" - include: {} exclude: {}".format(include, exclude))
        if not self.__if_include(_whole_url, include) or self.__if_exclude(
                _whole_url, exclude):
            logger.info(" According to include/exclude options, ignore it")
            return False

        # get parameters
        # it may have both queryString and postData in an unusual post request
        step_dict['request']['params'] = {}
        step_dict['request']['data'] = {}
        _param = _req.get('queryString', [])
        _data = _req.get('postData', [])
        if _data:
            if 'params' in _req.get('postData'):
                _data = _req.get('postData').get('params')
            else:
                _data = _req.get('postData').get('text')
            # if 'mimeType' in _req.get('postData') and _req.get('postData').get('mimeType') == 'application/json':
            #     _tmp = json.loads(_data)
            #     _data = []
            #     for _tk, _tv in _tmp.items():
            #         _data.append({'name': _tk, 'value': _tv})
        logger.debug(" - params: {}".format(_param))
        logger.debug(" - data: {}".format(_data))

        # extract all parameter values into variables, and keep {value} in parameters
        if isinstance(_param, (list, tuple, set)):
            for _item in _param:
                self.__har_extract(step_dict, _item['name'], _item['value'],
                                   'params', auto_extract)
        else:
            # step_dict['request']['params'] = _param
            self.__har_extract(step_dict, '', _param, 'params', auto_extract)
        if isinstance(_data, (list, tuple, set)):
            for _item in _data:
                self.__har_extract(step_dict, _item['name'], _item['value'],
                                   'data', auto_extract)
        else:
            # step_dict['request']['data'] = _data
            self.__har_extract(step_dict, '', _data, 'data', auto_extract)
        logger.debug(" - self.variables: {}".format(
            json.dumps(self.variables, ensure_ascii=False)))

        # get headers
        step_dict['request']['headers'] = {}
        self.__har_headers(_req.get('headers'),
                           step_dict['request']['headers'], RECORD_HEADERS,
                           auto_extract)
        logger.debug(" - headers: {}".format(
            json.dumps(step_dict['request']['headers'], ensure_ascii=False)))

        # get cookies
        step_dict['request']['cookies'] = {}
        self.__har_cookies(_req.get('cookies'),
                           step_dict['request']['cookies'], auto_extract)
        logger.debug(" - cookies: {}".format(
            json.dumps(step_dict['request']['cookies'], ensure_ascii=False)))

        return True
Пример #8
0
    def source_to_case(self,
                       source,
                       target="ParrotProject",
                       include=None,
                       exclude=None,
                       validate_include=None,
                       validate_exclude=None,
                       auto_extract=False):
        """
        :param source: source file or direcotry
        :param target: target directory for case output
        :param include: list, not matched url would be ignored in recording
        :param exclude: list, matched url would be ignored in recording
        :param validate_include: list, not matched response would be ignored in validating
        :param validate_exclude: list, matched response would be ignored in validating
        :param auto_extract: bool, for automatic identification of interface dependencies
        :return suite dict
        """
        source = format(source).strip()
        if not (source and os.path.exists(source)):
            logger.error(
                "Source file or directory does not exist: {}".format(source))
            sys.exit(-1)

        if source.endswith("/") or source.endswith("\\"):
            suite_name = get_file_name(get_file_path(source))
        else:
            suite_name = get_file_name(source)

        if os.path.isdir(source):
            files = get_dir_files(source)
        else:
            files = [
                source,
            ]

        suite_dict = copy.deepcopy(self.suite_tpl)
        suite_dict['config']['name'] = suite_name

        logger.info(
            "Start to parse cases from source files: {}".format(source))

        for _file in files:
            if _file.lower().endswith('.har'):
                one_case = self.har_to_case(_file, target, include, exclude,
                                            validate_include, validate_exclude,
                                            auto_extract, suite_name)
            # elif _file.lower().endswith('.trace'):
            #     self.charles_trace_to_case()
            # elif _file.lower().endswith('.txt'):
            #     self.fiddler_txt_to_case()
            else:
                logger.warning(
                    "Unsupported file extension: {}, ignore".format(_file))
                continue

            # add case into suite
            suite_dict['test_cases'].append(one_case)
            logger.info("Parse finished.")

        self.__generate_case(suite_dict, target)
        return suite_dict
Пример #9
0
    def case_replace(self, suite_or_case, rules, target="ParrotProjectNew"):
        """
        :param suite_or_case: file or directory of test suites/cases/steps
        :param rules: replace rule list, key=>value or value1=>value2
        :param target: target directory for case output
        :return: suite dict
        """
        logger.info(
            "Start to load test suite or case: {}".format(suite_or_case))
        files = []
        if isinstance(suite_or_case, (list, tuple, set)):
            items = suite_or_case
        else:
            items = [
                format(suite_or_case),
            ]
        for item in items:
            if not os.path.exists(item):
                logger.warning("File {} does not exist, ignore.".format(item))
                continue
            if os.path.isdir(item):
                files.extend(get_dir_files(item))
            else:
                files.append(item)
        total = {}
        for _file in files:
            logger.info("Load case from file: {}".format(_file))
            if not (_file.endswith('yml') or _file.endswith('yaml')):
                logger.warning("Not a yaml file, ignore: {}".format(_file))
                continue
            try:
                _dict = yaml.full_load(stream=self.__read_file(_file))
            except ScannerError as e:
                logger.warning("Invalid yaml file: {}".format(e))
                continue
            logger.debug(" - yaml dict: {}".format(
                json.dumps(_dict, ensure_ascii=False)))

            if 'test_cases' in _dict:  # it's a test suite
                _tmp_suite = copy.deepcopy(self.suite_tpl)
                _tmp_suite.update(_dict)
                self.__match_rule(_yaml=_file, _dict=_tmp_suite, rules=rules)
                _path = re.findall(r"(.+)test_suites(.+)",
                                   get_file_path(_file))
                if _path:
                    total["{}/test_suites/{}/{}".format(
                        target, _path[0][1],
                        get_file_name(_file, ext=1))] = _tmp_suite
                else:
                    total["{}/test_suites/{}".format(
                        target, get_file_name(_file, ext=1))] = _tmp_suite
                _cases = []
                for _case in _tmp_suite['test_cases']:
                    _cases.append("{}/{}".format(get_file_path(_file), _case))
                if _tmp_suite['config']['import']:
                    _cases.append("{}/{}".format(
                        get_file_path(_file), _tmp_suite['config']['import']))
                if _cases:
                    self.case_replace(suite_or_case=_cases,
                                      target=target,
                                      rules=rules)
            elif 'test_steps' in _dict:  # it's a test case
                _tmp_case = copy.deepcopy(self.case_tpl)
                _tmp_case.update(_dict)
                self.__match_rule(_yaml=_file, _dict=_tmp_case, rules=rules)
                _path = re.findall(r"(.+)test_cases(.+)", get_file_path(_file))
                if _path:
                    total["{}/test_cases/{}/{}".format(
                        target, _path[0][1], get_file_name(_file,
                                                           ext=1))] = _tmp_case
                else:
                    total["{}/test_cases/{}".format(
                        target, get_file_name(_file, ext=1))] = _tmp_case
                _steps = []
                for _step in _tmp_case['test_steps']:
                    _steps.append("{}/{}".format(get_file_path(_file), _step))
                if _tmp_case['config']['import']:
                    _steps.append("{}/{}".format(
                        get_file_path(_file), _tmp_case['config']['import']))
                if _steps:
                    self.case_replace(suite_or_case=_steps,
                                      target=target,
                                      rules=rules)
            elif 'request' in _dict:  # it's a test step
                _tmp_step = copy.deepcopy(self.step_tpl)
                _tmp_step.update(_dict)
                self.__match_rule(_yaml=_file, _dict=_tmp_step, rules=rules)
                _path = re.findall(r"(.+)test_steps(.+)", get_file_path(_file))
                if _path:
                    total["{}/test_steps/{}/{}".format(
                        target, _path[0][1], get_file_name(_file,
                                                           ext=1))] = _tmp_step
                else:
                    total["{}/test_steps/{}".format(
                        target, get_file_name(_file, ext=1))] = _tmp_step
                if _tmp_step['config']['import']:
                    self.case_replace(suite_or_case="{}/{}".format(
                        get_file_path(_file), _tmp_step['config']['import']),
                                      target=target,
                                      rules=None)
            else:  # it's environment file
                _path = re.findall(r"(.+)environments(.+)",
                                   get_file_path(_file))
                if _path:
                    total["{}/environments/{}/{}".format(
                        target, _path[0][1], get_file_name(_file,
                                                           ext=1))] = _dict
                else:
                    total["{}/environments/{}".format(
                        target, get_file_name(_file, ext=1))] = _dict
        for _k, _v in total.items():
            make_dir(get_file_path(_k))
            with open(file=format(_k), mode='w', encoding='utf-8') as f:
                yaml.dump(data=_v,
                          stream=f,
                          encoding='utf-8',
                          allow_unicode=True)
            logger.info("Write file after replace: {}".format(_k))
        logger.info("Done. You could get them in {}".format(target))