コード例 #1
0
class RpManager:
    def __init__(self, config, strategy):
        self.url = config.get('rp_endpoint')
        self.uuid = config.get('rp_uuid')
        self.project = config.get('rp_project')
        self.launch_description = config.get('launch_description')
        self.launch_tags = config.get('launch_tags').split()
        self.upload_xunit = config.get('upload_xunit')
        self.update_headers = {
            'Authorization': 'bearer %s' % self.uuid,
            'Accept': 'application/json',
            'Cache-Control': 'no-cache',
            'content-type': 'application/json',
        }
        self.import_headers = {
            'Authorization': 'bearer %s' % self.uuid,
            'Accept': 'application/json',
            'Cache-Control': 'no-cache',
        }
        self.launch_url = "{url}/api/v1/{project_name}/launch/%s".format(
            url=self.url, project_name=self.project)
        self.launch_public_url = "{url}/ui/#{project_name}/launches/all/%s".format(
            url=self.url, project_name=self.project)
        self.launch_id = ''
        self.xunit_feed = config.get('xunit_feed')
        self.launch_name = config.get('launch_name', 'rp_cli-launch')
        self.strategy = strategy
        self.service = ReportPortalServiceAsync(
            endpoint=self.url,
            project=self.project,
            token=self.uuid,
            error_handler=self.strategy.my_error_handler)
        self.test_logs = config.get('test_logs')
        self.zipped = config.get('zipped')
        self.test_owners = config.get('test_owners', {})
        self.strategy = strategy

    @staticmethod
    def _check_return_code(req):
        if req.status_code != 200:
            logger.error('Something went wrong status code is %s; MSG: %s',
                         req.status_code,
                         req.json()['message'])
            sys.exit(1)

    def _import_results(self):
        with open(self.upload_xunit, 'rb') as xunit_file:
            files = {'file': xunit_file}
            req = requests.post(self.launch_url % "import",
                                headers=self.import_headers,
                                files=files)

        response = req.json()
        self._check_return_code(req)
        logger.info("Import is done successfully")
        response_msg = response['msg'].encode('ascii', 'ignore')
        logger.info('Status code: %s; %s', req.status_code, response_msg)

        # returning the launch_id
        return response_msg.split()[4]

    def _verify_upload_succeeded(self, launch_id):
        launch_id_url = self.launch_url % launch_id
        req = requests.get(launch_id_url, headers=self.update_headers)
        self._check_return_code(req)
        logger.info('Launch have been created successfully')
        return True

    def _update_launch_description_and_tags(self, launch_id):
        update_url = self.launch_url % launch_id + "/update"

        data = {
            "description": self.launch_description,
            "tags": self.launch_tags
        }

        req = requests.put(url=update_url,
                           headers=self.update_headers,
                           data=json.dumps(data))
        self._check_return_code(req)
        logger.info(
            'Launch description %s and tags %s where updated for launch id %s',
            self.launch_description, self.launch_tags, launch_id)

    def import_results(self):
        self.launch_id = self._import_results()
        self._verify_upload_succeeded(self.launch_id)
        self._update_launch_description_and_tags(self.launch_id)

    def _start_launch(self):
        return self.service.start_launch(name=self.launch_name,
                                         start_time=timestamp(),
                                         description=self.launch_description,
                                         tags=self.launch_tags)

    def _end_launch(self):
        self.service.finish_launch(end_time=timestamp())
        self.service.terminate()
        self.launch_id = self.service.rp_client.launch_id

    def _upload_attachment(self, file, name):
        with open(file, "rb") as fh:
            attachment = {
                "name": name,
                "data": fh.read(),
                "mime": guess_type(file)[0]
            }
            self.service.log(timestamp(), name, "INFO", attachment)

    def upload_test_case_attachments(self, path):
        for root, dirs, files in os.walk(path):
            for log_file in files:
                file_name = os.path.join(root, log_file)
                self._upload_attachment(file_name, log_file)

    def upload_zipped_test_case_attachments(self, zip_file_name, path):
        whole_path = os.path.join(self.test_logs, path)
        try:
            ld = os.listdir(whole_path)
        except OSError:
            logger.warning("Path (%s) with log files does not exist!" %
                           (whole_path, ))
            return
        # check if there is something to zip
        if len(ld) > 0:
            zip_file_name = shutil.make_archive(zip_file_name, 'zip',
                                                whole_path)
            self._upload_attachment(zip_file_name,
                                    os.path.basename(zip_file_name))
            os.remove(zip_file_name)

        else:
            logger.warning("There are no logs on the path (%s)!" %
                           (whole_path, ))

    def _log_message_to_rp_console(self, msg, level):
        self.service.log(time=timestamp(), message=msg, level=level)

    def _process_failed_case(self, case):
        msg = self.strategy.extract_failure_msg_from_xunit(case)
        self._log_message_to_rp_console(msg, "ERROR")

    def store_launch_info(self, dest):
        launch_url = self.launch_public_url % self.launch_id
        json_data = {
            "rp_launch_url": launch_url,
            "rp_launch_name": self.launch_name,
            "rp_launch_tags": self.launch_tags,
            "rp_launch_desc": self.launch_description,
            "rp_launch_id": self.launch_id
        }
        with open(dest, "w") as file:
            json.dump(json_data, file)

    def attach_logs_to_failed_case(self, case):
        path_to_logs_per_test = self.strategy.get_logs_per_test_path(case)

        if self.zipped:
            # zip logs per test and upload zip file
            self.upload_zipped_test_case_attachments(
                "{0}".format(case.get('@name')), path_to_logs_per_test)
        else:
            # upload logs per tests one by one and do not zip them
            self.upload_test_case_attachments("{0}/{1}".format(
                self.test_logs, path_to_logs_per_test))

    def _open_new_folder(self, folder_name):
        self.service.start_test_item(
            name=folder_name,
            start_time=timestamp(),
            item_type="SUITE",
        )

    def _close_folder(self):
        self.service.finish_test_item(end_time=timestamp(), status=None)

    def feed_results(self):
        self._start_launch()

        with open(self.xunit_feed) as fd:
            data = xmltodict.parse(fd.read())

        xml = data.get("testsuite").get("testcase")

        # if there is only 1 test case, convert 'xml' from dict to list
        # otherwise, 'xml' is always list
        if not isinstance(xml, list):
            xml = [xml]

        xml = sorted(xml, key=lambda k: k['@classname'])

        for case in xml:
            issue = None
            name = self.strategy.get_testcase_name(case)
            description = self.strategy.get_testcase_description(case)
            tags = self.strategy.get_tags(case, test_owners=self.test_owners)

            if self.strategy.should_create_folders_in_launch():
                open_new_folder, folder_name = self.strategy.create_folder(
                    case)
                if self.strategy.is_first_folder():
                    if open_new_folder:
                        self._open_new_folder(folder_name)
                elif open_new_folder:  # in case a new folder should be open, need to close last one and open new one
                    self._close_folder()
                    self._open_new_folder(folder_name)

            self.service.start_test_item(
                name=name[:255],
                description=description,
                tags=tags,
                start_time=timestamp(),
                item_type="STEP",
            )
            # Create text log message with INFO level.
            if case.get('system_out'):
                self._log_message_to_rp_console(case.get('system_out'), "INFO")

            if case.has_key('skipped'):
                issue = {
                    "issue_type": "NOT_ISSUE"
                }  # this will cause skipped test to not be "To Investigate"
                status = 'SKIPPED'
                if case.get('skipped'):
                    self._log_message_to_rp_console(
                        case.get('skipped').get('@message'), "DEBUG")
                else:
コード例 #2
0
class PyTestServiceClass(with_metaclass(Singleton, object)):
    def __init__(self):
        self.RP = None
        self.ignore_errors = True
        self.ignored_tags = []

        self._errors = queue.Queue()
        self._loglevels = ('TRACE', 'DEBUG', 'INFO', 'WARN', 'ERROR')

    def init_service(self, endpoint, project, uuid, log_batch_size,
                     ignore_errors, ignored_tags):
        self._errors = queue.Queue()
        if self.RP is None:
            self.ignore_errors = ignore_errors
            self.ignored_tags = ignored_tags
            logging.debug('ReportPortal - Init service: endpoint=%s, '
                          'project=%s, uuid=%s', endpoint, project, uuid)
            self.RP = ReportPortalServiceAsync(
                endpoint=endpoint,
                project=project,
                token=uuid,
                error_handler=self.async_error_handler,
                log_batch_size=log_batch_size
            )
        else:
            logging.debug('The pytest is already initialized')
        return self.RP

    def async_error_handler(self, exc_info):
        self.terminate_service()
        self.RP = None
        self._errors.put_nowait(exc_info)

    def _stop_if_necessary(self):
        try:
            exc, msg, tb = self._errors.get(False)
            traceback.print_exception(exc, msg, tb)
            sys.stderr.flush()
            if not self.ignore_errors:
                pytest.exit(msg)
        except queue.Empty:
            pass

    def terminate_service(self):
        if self.RP is not None:
            self.RP.terminate()

    def start_launch(
            self, launch_name, mode=None, tags=None, description=None):
        self._stop_if_necessary()
        if self.RP is None:
            return

        sl_pt = {
            'name': launch_name,
            'start_time': timestamp(),
            'description': description,
            'mode': mode,
            'tags': tags
        }
        logging.debug('ReportPortal - Start launch: '
                      'request_body=%s', sl_pt)
        req_data = self.RP.start_launch(**sl_pt)
        logging.debug('ReportPortal - Launch started: '
                      'response_body=%s', req_data)

    def start_pytest_item(self, test_item=None):
        self._stop_if_necessary()
        if self.RP is None:
            return

        start_rq = {
            'name': self._get_full_name(test_item),
            'description': self._get_description(test_item),
            'tags': self._get_tags(test_item),
            'start_time': timestamp(),
            'item_type': 'STEP'
        }

        logging.debug('ReportPortal - Start TestItem: '
                      'request_body=%s', start_rq)
        self.RP.start_test_item(**start_rq)

    def _get_tags(self, item):
        # Try to extract names of @pytest.mark.* decorators used for test item
        # and exclude those which present in rp_ignore_tags parameter
        markers_list = []
        for k in item.keywords:
            if not item.get_marker(k) or k in self.ignored_tags:
                continue
            # simple MarkDecorator
            if not item.get_marker(k).args and not item.get_marker(k).kwargs:
                markers_list.append(k)
            # parametrized MarkDecorator
            if item.get_marker(k).args:
                for marker_arg in item.get_marker(k).args:
                    markers_list.append("%s(%s)" % (k, marker_arg))
            # parametrized MarkDecorator with kwargs
            if item.get_marker(k).kwargs:
                for mrk_key, mrk_value in item.get_marker(k).kwargs.iteritems():
                    markers_list.append("%s(%s=%s)" % (k, mrk_key, mrk_value))
        return markers_list

    def finish_pytest_item(self, status, issue=None):
        self._stop_if_necessary()
        if self.RP is None:
            return

        fta_rq = {
            'end_time': timestamp(),
            'status': status,
            'issue': issue
        }

        logging.debug('ReportPortal - Finish TestItem: '
                      'request_body=%s', fta_rq)
        self.RP.finish_test_item(**fta_rq)

    def finish_launch(self, launch=None, status='rp_launch'):
        self._stop_if_necessary()
        if self.RP is None:
            return

        # To finish launch session str parameter is needed
        fl_rq = {
            'end_time': timestamp(),
            'status': status
        }
        logging.debug('ReportPortal - Finish launch: request_body=%s', fl_rq)
        self.RP.finish_launch(**fl_rq)

    def post_log(self, message, loglevel='INFO', attachment=None):
        self._stop_if_necessary()
        if self.RP is None:
            return

        if loglevel not in self._loglevels:
            logging.warning('Incorrect loglevel = %s. Force set to INFO. '
                            'Available levels: %s.', loglevel, self._loglevels)
            loglevel = 'INFO'

        sl_rq = {
            'time': timestamp(),
            'message': message,
            'level': loglevel,
            'attachment': attachment,
        }
        self.RP.log(**sl_rq)

    @staticmethod
    def _get_full_name(test_item):
        return test_item.nodeid

    @staticmethod
    def _get_description(test_item):
        try:
            # for common items
            return test_item.function.__doc__
        except AttributeError:
            # doctest has no `function` attribute
            return test_item.reportinfo()[2]
コード例 #3
0
class PyTestServiceClass(with_metaclass(Singleton, object)):

    def __init__(self):
        self.RP = None
        try:
            pkg_resources.get_distribution('reportportal_client >= 3.2.0')
            self.RP_SUPPORTS_PARAMETERS = True
        except pkg_resources.VersionConflict:
            self.RP_SUPPORTS_PARAMETERS = False

        self.ignore_errors = True
        self.ignored_tags = []

        self._errors = queue.Queue()
        self._loglevels = ('TRACE', 'DEBUG', 'INFO', 'WARN', 'ERROR')
        self._start_stack = []
        self._finish_stack = []

    def init_service(self, endpoint, project, uuid, log_batch_size,
                     ignore_errors, ignored_tags):
        self._errors = queue.Queue()
        if self.RP is None:
            self.ignore_errors = ignore_errors
            if self.RP_SUPPORTS_PARAMETERS:
                self.ignored_tags = list(set(ignored_tags).union({'parametrize'}))
            else:
                self.ignored_tags = ignored_tags
            log.debug('ReportPortal - Init service: endpoint=%s, '
                      'project=%s, uuid=%s', endpoint, project, uuid)
            self.RP = ReportPortalServiceAsync(
                endpoint=endpoint,
                project=project,
                token=uuid,
                error_handler=self.async_error_handler,
                log_batch_size=log_batch_size
            )
        else:
            log.debug('The pytest is already initialized')
        return self.RP

    def async_error_handler(self, exc_info):
        self.terminate_service(nowait=True)
        self.RP = None
        self._errors.put_nowait(exc_info)

    def terminate_service(self, nowait=False):
        if self.RP is not None:
            self.RP.terminate(nowait)
            self.RP = None

    def start_launch(self, launch_name,
                     mode=None,
                     tags=None,
                     description=None):
        self._stop_if_necessary()
        if self.RP is None:
            return

        sl_pt = {
            'name': launch_name,
            'start_time': timestamp(),
            'description': description,
            'mode': mode,
            'tags': tags
        }
        log.debug('ReportPortal - Start launch: equest_body=%s', sl_pt)
        req_data = self.RP.start_launch(**sl_pt)
        log.debug('ReportPortal - Launch started: response_body=%s', req_data)


    def collect_tests(self, session):
        self._stop_if_necessary()
        if self.RP is None:
            return

        for item in session.items:
            # Start collecting test item parts
            parts_in = []
            parts_out = []
            parts = self._get_item_parts(item)
            # Add all parts in revers order to parts_out
            parts_out.extend(reversed(parts))
            while parts:
                part = parts.pop(0)
                if part in self._start_stack:
                    # If we've seen this part, skip it
                    continue
                # We haven't seen this part yet. Could be a Class, Module or Function
                # Appent to parts_in
                parts_in.append(part)

            # Update self._start_stack and self._finish_stack
            self._start_stack.extend(parts_in)
            self._finish_stack.extend(parts_out)

    def start_pytest_item(self, test_item=None):
        self._stop_if_necessary()
        if self.RP is None:
            return

        while True:
            part = self._start_stack.pop(0)
            if part is test_item:
                break
            payload = {
                'name': self._get_item_name(part),
                'description': self._get_item_description(part),
                'tags': self._get_item_tags(part),
                'start_time': timestamp(),
                'item_type': 'SUITE'
            }
            log.debug('ReportPortal - Start Suite: request_body=%s', payload)
            self.RP.start_test_item(**payload)

        start_rq = {
            'name': self._get_item_name(test_item),
            'description': self._get_item_description(test_item),
            'tags': self._get_item_tags(test_item),
            'start_time': timestamp(),
            'item_type': 'STEP'
        }
        if self.RP_SUPPORTS_PARAMETERS:
            start_rq['parameters'] = self._get_parameters(test_item)

        log.debug('ReportPortal - Start TestItem: request_body=%s', start_rq)
        self.RP.start_test_item(**start_rq)

    def finish_pytest_item(self, status, issue=None):
        self._stop_if_necessary()
        if self.RP is None:
            return

        # Remove the test from the finish stack
        self._finish_stack.pop(0)

        fta_rq = {
            'end_time': timestamp(),
            'status': status,
            'issue': issue
        }

        log.debug('ReportPortal - Finish TestItem: request_body=%s', fta_rq)
        self.RP.finish_test_item(**fta_rq)

        while self._finish_stack:
            if isinstance(self._finish_stack[0], Function):
                break
            part = self._finish_stack.pop(0)
            if self._finish_stack.count(part):
                continue
            payload = {
                'end_time': timestamp(),
                'issue': issue,
                'status': 'PASSED'
            }
            log.debug('ReportPortal - End TestSuite: request_body=%s', payload)
            self.RP.finish_test_item(**payload)


    def finish_launch(self, launch=None, status='rp_launch'):
        self._stop_if_necessary()
        if self.RP is None:
            return

        # To finish launch session str parameter is needed
        fl_rq = {
            'end_time': timestamp(),
            'status': status
        }
        log.debug('ReportPortal - Finish launch: request_body=%s', fl_rq)
        self.RP.finish_launch(**fl_rq)

    def post_log(self, message, loglevel='INFO', attachment=None):
        self._stop_if_necessary()
        if self.RP is None:
            return

        if loglevel not in self._loglevels:
            log.warning('Incorrect loglevel = %s. Force set to INFO. '
                        'Available levels: %s.', loglevel, self._loglevels)
            loglevel = 'INFO'

        sl_rq = {
            'time': timestamp(),
            'message': message,
            'level': loglevel,
            'attachment': attachment,
        }
        self.RP.log(**sl_rq)

    def _stop_if_necessary(self):
        try:
            exc, msg, tb = self._errors.get(False)
            traceback.print_exception(exc, msg, tb)
            sys.stderr.flush()
            if not self.ignore_errors:
                pytest.exit(msg)
        except queue.Empty:
            pass

    @staticmethod
    def _get_item_parts(item):
        parts = []
        parent = item.parent
        if not isinstance(parent, Instance):
            parts.append(parent)
        while True:
            parent = parent.parent
            if parent is None:
                break
            if isinstance(parent, Instance):
                continue
            if isinstance(parent, Session):
                break
            parts.append(parent)

        parts.reverse()
        parts.append(item)
        return parts

    def _get_item_tags(self, item):
        # Try to extract names of @pytest.mark.* decorators used for test item
        # and exclude those which present in rp_ignore_tags parameter
        return [k for k in item.keywords if item.get_marker(k) is not None
                and k not in self.ignored_tags]

    def _get_parameters(self, item):
        return item.callspec.params if hasattr(item, 'callspec') else {}

    @staticmethod
    def _get_item_name(test_item):
        name = test_item.name
        if len(name) > 256:
            name = name[:256]
            test_item.warn(
                'C1',
                'Test node ID was truncated to "{}" because of name size '
                'constrains on reportportal'.format(name)
            )
        return name

    @staticmethod
    def _get_item_description(test_item):
        if isinstance(test_item, (Class, Function, Module)):
            doc = test_item.obj.__doc__
            if doc is not None:
                return doc.strip()
        if isinstance(test_item, DoctestItem):
            return test_item.reportinfo()[2]
コード例 #4
0
class PyTestServiceClass(with_metaclass(Singleton, object)):
    def __init__(self):
        self.RP = None
        try:
            pkg_resources.get_distribution('reportportal_client >= 3.2.0')
            self.RP_SUPPORTS_PARAMETERS = True
        except pkg_resources.VersionConflict:
            self.RP_SUPPORTS_PARAMETERS = False

        self.ignore_errors = True
        self.ignored_tags = []

        self._errors = queue.Queue()
        self._loglevels = ('TRACE', 'DEBUG', 'INFO', 'WARN', 'ERROR')
        self._hier_parts = {}
        self._item_parts = {}

    def init_service(self,
                     endpoint,
                     project,
                     uuid,
                     log_batch_size,
                     ignore_errors,
                     ignored_tags,
                     verify_ssl=False):
        self._errors = queue.Queue()
        if self.RP is None:
            self.ignore_errors = ignore_errors
            if self.RP_SUPPORTS_PARAMETERS:
                self.ignored_tags = list(
                    set(ignored_tags).union({'parametrize'}))
            else:
                self.ignored_tags = ignored_tags
            log.debug(
                'ReportPortal - Init service: endpoint=%s, '
                'project=%s, uuid=%s', endpoint, project, uuid)
            self.RP = ReportPortalServiceAsync(
                endpoint=endpoint,
                project=project,
                token=uuid,
                error_handler=self.async_error_handler,
                log_batch_size=log_batch_size,
                # verify_ssl=False
            )
            self.project_settiings = None  # self.RP.rp_client.get_project_settings() if self.RP else None
            self.issue_types = self.get_issue_types()
        else:
            log.debug('The pytest is already initialized')
        return self.RP

    def async_error_handler(self, exc_info):
        self.terminate_service(nowait=True)
        self.RP = None
        self._errors.put_nowait(exc_info)

    def terminate_service(self, nowait=False):
        if self.RP is not None:
            self.RP.terminate(nowait)
            self.RP = None

    def start_launch(self,
                     launch_name,
                     mode=None,
                     tags=None,
                     description=None):
        self._stop_if_necessary()
        if self.RP is None:
            return

        sl_pt = {
            'name': launch_name,
            'start_time': timestamp(),
            'description': description,
            'mode': mode,
            'tags': tags
        }
        log.debug('ReportPortal - Start launch: equest_body=%s', sl_pt)
        req_data = self.RP.start_launch(**sl_pt)
        log.debug('ReportPortal - Launch started: response_body=%s', req_data)

    def collect_tests(self, session):
        self._stop_if_necessary()
        if self.RP is None:
            return

        hier_dirs = False
        hier_module = False
        hier_class = False
        hier_param = False

        if not hasattr(session.config, 'slaveinput'):
            hier_dirs = session.config.getini('rp_hierarchy_dirs')
            hier_module = session.config.getini('rp_hierarchy_module')
            hier_class = session.config.getini('rp_hierarchy_class')
            hier_param = session.config.getini('rp_hierarchy_parametrize')

        try:
            hier_dirs_level = int(
                session.config.getini('rp_hierarchy_dirs_level'))
        except ValueError:
            hier_dirs_level = 0

        dirs_parts = {}
        tests_parts = {}

        for item in session.items:
            # Start collecting test item parts
            parts = []

            # Hierarchy for directories
            rp_name = self._add_item_hier_parts_dirs(item, hier_dirs,
                                                     hier_dirs_level, parts,
                                                     dirs_parts)

            # Hierarchy for Module and Class/UnitTestCase
            item_parts = self._get_item_parts(item)
            rp_name = self._add_item_hier_parts_other(item_parts, item, Module,
                                                      hier_module, parts,
                                                      rp_name)
            rp_name = self._add_item_hier_parts_other(item_parts, item, Class,
                                                      hier_class, parts,
                                                      rp_name)
            rp_name = self._add_item_hier_parts_other(item_parts, item,
                                                      UnitTestCase, hier_class,
                                                      parts, rp_name)

            # Hierarchy for parametrized tests
            if hier_param:
                rp_name = self._add_item_hier_parts_parametrize(
                    item, parts, tests_parts, rp_name)

            # Hierarchy for test itself (Function/TestCaseFunction)
            item._rp_name = rp_name + ("::" if rp_name else "") + item.name

            # Result initialization
            for part in parts:
                part._rp_result = "PASSED"

            self._item_parts[item] = parts
            for part in parts:
                if part not in self._hier_parts:
                    self._hier_parts[part] = {
                        "finish_counter": 1,
                        "start_flag": False
                    }
                else:
                    self._hier_parts[part]["finish_counter"] += 1

    def start_pytest_item(self, test_item=None):
        self._stop_if_necessary()
        if self.RP is None:
            return

        for part in self._item_parts[test_item]:
            if self._hier_parts[part]["start_flag"]:
                continue
            self._hier_parts[part]["start_flag"] = True

            payload = {
                'name': self._get_item_name(part),
                'description': self._get_item_description(part),
                'tags': self._get_item_tags(part),
                'start_time': timestamp(),
                'item_type': 'SUITE'
            }
            log.debug('ReportPortal - Start Suite: request_body=%s', payload)
            self.RP.start_test_item(**payload)

        start_rq = {
            'name': self._get_item_name(test_item),
            'description': self._get_item_description(test_item),
            'tags': self._get_item_tags(test_item),
            'start_time': timestamp(),
            'item_type': 'STEP'
        }
        if self.RP_SUPPORTS_PARAMETERS:
            start_rq['parameters'] = self._get_parameters(test_item)

        log.debug('ReportPortal - Start TestItem: request_body=%s', start_rq)
        self.RP.start_test_item(**start_rq)

    def finish_pytest_item(self, test_item, status, issue=None):
        self._stop_if_necessary()
        if self.RP is None:
            return

        fta_rq = {'end_time': timestamp(), 'status': status, 'issue': issue}

        log.debug('ReportPortal - Finish TestItem: request_body=%s', fta_rq)
        self.RP.finish_test_item(**fta_rq)

        parts = self._item_parts[test_item]
        while len(parts) > 0:
            part = parts.pop()
            if status == "FAILED":
                part._rp_result = status
            self._hier_parts[part]["finish_counter"] -= 1
            if self._hier_parts[part]["finish_counter"] > 0:
                continue
            payload = {
                'end_time': timestamp(),
                'issue': issue,
                'status': part._rp_result
            }
            log.debug('ReportPortal - End TestSuite: request_body=%s', payload)
            self.RP.finish_test_item(**payload)

    def finish_launch(self, launch=None, status='rp_launch'):
        self._stop_if_necessary()
        if self.RP is None:
            return

        # To finish launch session str parameter is needed
        fl_rq = {'end_time': timestamp(), 'status': status}
        log.debug('ReportPortal - Finish launch: request_body=%s', fl_rq)
        self.RP.finish_launch(**fl_rq)

    def post_log(self, message, loglevel='INFO', attachment=None):
        self._stop_if_necessary()
        if self.RP is None:
            return

        if loglevel not in self._loglevels:
            log.warning(
                'Incorrect loglevel = %s. Force set to INFO. '
                'Available levels: %s.', loglevel, self._loglevels)
            loglevel = 'INFO'

        sl_rq = {
            'time': timestamp(),
            'message': message,
            'level': loglevel,
            'attachment': attachment,
        }
        self.RP.log(**sl_rq)

    def _stop_if_necessary(self):
        try:
            exc, msg, tb = self._errors.get(False)
            traceback.print_exception(exc, msg, tb)
            sys.stderr.flush()
            if not self.ignore_errors:
                pytest.exit(msg)
        except queue.Empty:
            pass

    def get_issue_types(self):
        issue_types = {}
        if not self.project_settiings:
            return issue_types

        for item_type in ("AUTOMATION_BUG", "PRODUCT_BUG", "SYSTEM_ISSUE",
                          "NO_DEFECT", "TO_INVESTIGATE"):
            for item in self.project_settiings["subTypes"][item_type]:
                issue_types[item["shortName"]] = item["locator"]

        return issue_types

    @staticmethod
    def _add_item_hier_parts_dirs(item,
                                  hier_flag,
                                  dirs_level,
                                  report_parts,
                                  dirs_parts,
                                  rp_name=""):

        parts_dirs = PyTestServiceClass._get_item_dirs(item)
        dir_path = item.fspath.new(dirname="", basename="", drive="")
        rp_name_path = ""

        for dir_name in parts_dirs[dirs_level:]:
            dir_path = dir_path.join(dir_name)
            path = str(dir_path)

            if hier_flag:
                if path in dirs_parts:
                    item_dir = dirs_parts[path]
                    rp_name = ""
                else:
                    item_dir = File(dir_name,
                                    nodeid=dir_name,
                                    session=item.session,
                                    config=item.session.config)
                    rp_name += dir_name
                    item_dir._rp_name = rp_name
                    dirs_parts[path] = item_dir
                    rp_name = ""

                report_parts.append(item_dir)
            else:
                rp_name_path = path[1:]

        if not hier_flag:
            rp_name += rp_name_path

        return rp_name

    @staticmethod
    def _add_item_hier_parts_parametrize(item,
                                         report_parts,
                                         tests_parts,
                                         rp_name=""):

        for mark in item.own_markers:
            if mark.name == 'parametrize':
                ch_index = item.nodeid.find("[")
                test_fullname = item.nodeid[:ch_index if ch_index > 0 else len(
                    item.nodeid)]
                test_name = item.originalname

                rp_name += ("::" if rp_name else "") + test_name

                if test_fullname in tests_parts:
                    item_test = tests_parts[test_fullname]
                else:
                    item_test = Item(test_fullname,
                                     nodeid=test_fullname,
                                     session=item.session,
                                     config=item.session.config)
                    item_test._rp_name = rp_name
                    item_test.obj = item.obj
                    item_test.keywords = item.keywords
                    item_test.own_markers = item.own_markers
                    item_test.parent = item.parent

                    tests_parts[test_fullname] = item_test

                rp_name = ""
                report_parts.append(item_test)
                break

        return rp_name

    @staticmethod
    def _add_item_hier_parts_other(item_parts,
                                   item,
                                   item_type,
                                   hier_flag,
                                   report_parts,
                                   rp_name=""):

        for part in item_parts:

            if type(part) is item_type:

                if item_type is Module:
                    module_path = str(
                        item.fspath.new(dirname=rp_name,
                                        basename=part.fspath.basename,
                                        drive=""))
                    rp_name = module_path if rp_name else module_path[1:]
                elif item_type in (Class, Function, UnitTestCase,
                                   TestCaseFunction):
                    rp_name += ("::" if rp_name else "") + part.name

                if hier_flag:
                    part._rp_name = rp_name
                    rp_name = ""
                    report_parts.append(part)

        return rp_name

    @staticmethod
    def _get_item_parts(item):
        parts = []
        parent = item.parent
        if not isinstance(parent, Instance):
            parts.append(parent)
        while True:
            parent = parent.parent
            if parent is None:
                break
            if isinstance(parent, Instance):
                continue
            if isinstance(parent, Session):
                break
            parts.append(parent)

        parts.reverse()
        return parts

    @staticmethod
    def _get_item_dirs(item):

        root_path = item.session.config.rootdir.strpath
        dir_path = item.fspath.new(basename="")
        rel_dir = dir_path.new(dirname=dir_path.relto(root_path),
                               basename="",
                               drive="")

        dir_list = []
        for directory in rel_dir.parts(reverse=False):
            dir_name = directory.basename
            if dir_name:
                dir_list.append(dir_name)

        return dir_list

    def _get_item_tags(self, item):
        # Try to extract names of @pytest.mark.* decorators used for test item
        # and exclude those which present in rp_ignore_tags parameter
        def get_marker_value(item, keyword):
            try:
                marker = item.get_closest_marker(keyword)
            except AttributeError:
                # pytest < 3.6
                marker = item.keywords.get(keyword)

            return "{}:{}".format(keyword, marker.args[0]) \
                if marker and marker.args else keyword

        try:
            tags = [
                get_marker_value(item, k) for k in item.keywords
                if item.get_closest_marker(k) is not None
                and k not in self.ignored_tags
            ]
        except AttributeError:
            # pytest < 3.6
            tags = [
                get_marker_value(item, k) for k in item.keywords if
                item.get_marker(k) is not None and k not in self.ignored_tags
            ]

        tags.extend(item.session.config.getini('rp_tests_tags'))

        return tags

    def _get_parameters(self, item):
        return item.callspec.params if hasattr(item, 'callspec') else {}

    @staticmethod
    def _get_item_name(test_item):
        name = test_item._rp_name
        if len(name) > 256:
            name = name[:256]
            test_item.warn(
                'C1',
                'Test node ID was truncated to "{}" because of name size '
                'constrains on reportportal'.format(name))
        return name

    @staticmethod
    def _get_item_description(test_item):
        if isinstance(test_item, (Class, Function, Module, Item)):
            doc = test_item.obj.__doc__
            if doc is not None:
                return trim_docstring(doc)
        if isinstance(test_item, DoctestItem):
            return test_item.reportinfo()[2]
コード例 #5
0
class PyTestServiceClass(with_metaclass(Singleton, object)):

    _loglevels = ('TRACE', 'DEBUG', 'INFO', 'WARN', 'ERROR')

    def __init__(self):
        self.RP = None
        self.ignore_errors = True
        self._errors = queue.Queue()

    def init_service(self, endpoint, project, uuid, log_batch_size,
                     ignore_errors):
        self._errors = queue.Queue()
        if self.RP is None:
            self.ignore_errors = ignore_errors
            logging.debug(
                msg="ReportPortal - Init service: "
                    "endpoint={0}, project={1}, uuid={2}".
                    format(endpoint, project, uuid))
            self.RP = ReportPortalServiceAsync(
                endpoint=endpoint,
                project=project,
                token=uuid,
                error_handler=self.async_error_handler,
                log_batch_size=log_batch_size
            )
        else:
            logging.debug("The pytest is already initialized")
        return self.RP

    def async_error_handler(self, exc_info):
        self.terminate_service()
        self.RP = None
        self._errors.put_nowait(exc_info)

    def _stop_if_neccessary(self):
        try:
            exc, msg, tb = self._errors.get(False)
            traceback.print_exception(exc, msg, tb)
            if not self.ignore_errors:
                pytest.exit(msg)
        except queue.Empty:
            pass

    def terminate_service(self):
        if self.RP is not None:
            self.RP.terminate()

    def start_launch(
            self, launch_name, mode=None, tags=None, description=None):
        self._stop_if_neccessary()
        if self.RP is None:
            return

        sl_pt = {
            "name": launch_name,
            "start_time": timestamp(),
            "description": description,
            "mode": mode,
            "tags": tags
        }
        logging.debug("ReportPortal - Start launch: "
                      "request_body=%s", sl_pt)
        req_data = self.RP.start_launch(**sl_pt)
        logging.debug("ReportPortal - Launch started: "
                      "response_body=%s", req_data)

    def start_pytest_item(self, test_item=None):
        self._stop_if_neccessary()
        if self.RP is None:
            return

        start_rq = {
            "name": self._get_full_name(test_item),
            "description": self._get_description(test_item),
            "tags": self._get_tags(test_item),
            "start_time": timestamp(),
            "item_type": "STEP"
        }

        logging.debug(
            "ReportPortal - Start TestItem: "
            "request_body=%s", start_rq)

        self.RP.start_test_item(**start_rq)

    def _get_full_name(self, test_item):
        return test_item.nodeid

    def _get_description(self, test_item):
        try:
            # for common items
            return test_item.function.__doc__
        except AttributeError:
            # doctest has no `function` attribute
            return test_item.reportinfo()[2]

    def _get_tags(self, test_item):
        # try to extract names of @pytest.mark.* decorators used for test item
        mark_plugin = test_item.config.pluginmanager.getplugin("mark")
        if mark_plugin:
            keywords = test_item.keywords
            return list(mark_plugin.MarkMapping(keywords)._mymarks)
        else:
            return []

    def finish_pytest_item(self, status, issue=None):
        self._stop_if_neccessary()
        if self.RP is None:
            return

        fta_rq = {
            "end_time": timestamp(),
            "status": status,
            "issue": issue
        }

        logging.debug(
            "ReportPortal - Finish TestItem:"
            " request_body=%s", fta_rq)
        self.RP.finish_test_item(**fta_rq)

    def finish_launch(self, launch=None, status="rp_launch"):
        self._stop_if_neccessary()
        if self.RP is None:
            return

        # To finish launch session str parameter is needed
        fl_rq = {
            "end_time": timestamp(),
            "status": status
        }
        logging.debug("ReportPortal - Finish launch: "
                      "request_body=%s", fl_rq)
        self.RP.finish_launch(**fl_rq)

    def post_log(self, message, loglevel='INFO', attachment=None):
        self._stop_if_neccessary()
        if self.RP is None:
            return

        if loglevel not in self._loglevels:
            logging.warning('Incorrect loglevel = %s. Force set to INFO. '
                            'Avaliable levels: %s.', loglevel, self._loglevels)
            loglevel = 'INFO'

        sl_rq = {
            "time": timestamp(),
            "message": message,
            "level": loglevel,
            "attachment": attachment,
        }
        self.RP.log(**sl_rq)
コード例 #6
0
class ReportPortalDataWriter(object):
    """Wrapper around async Report Portal service"""
    def __init__(self,
                 endpoint,
                 token,
                 project,
                 launch_name=None,
                 launch_doc=None,
                 launch_tags=None,
                 verify_ssl=False):
        """
        :param endpoint:
            link to Report Portal
        :param token:
            user token
        :param project:
            Report Portal project name
        :param launch_name:
            Report Portal launch name
        :param launch_doc:
            launch description
        :param launch_doc:
            launch tags
        :param verify_ssl:
            option to not verify ssl certificates
        """
        self.endpoint = endpoint
        self.token = token
        self.project = project
        self.launch_name = launch_name
        self.launch_doc = launch_doc
        self.launch_tags = launch_tags
        self.service = None
        self.test = None
        self.verify_ssl = verify_ssl

    def start_test(self):
        """
        Start new launch in Report Portal
        """
        self.service = ReportPortalServiceAsync(endpoint=self.endpoint,
                                                project=self.project,
                                                token=self.token,
                                                error_handler=my_error_handler,
                                                verify_ssl=self.verify_ssl)
        self.service.start_launch(name=self.launch_name,
                                  start_time=timestamp(),
                                  description=self.launch_doc,
                                  tags=self.launch_tags)

    def finish_test(self):
        """
        Finish started launch in Report Portal
        """
        self.service.finish_launch(end_time=timestamp())
        self.service.terminate()
        self.service = None

    def is_test_started(self) -> bool:
        """
        Return True in case if launch was started
        """
        if self.service:
            return True
        return False

    def start_test_item(self, issue, description, tags, parameters):
        """
        Start new test item inside the launch
        :param issue:
        :param description:
        :param tags:
        :param parameters:
        """
        self.test = self.service.start_test_item(issue,
                                                 description=description,
                                                 tags=tags,
                                                 start_time=timestamp(),
                                                 item_type="STEP",
                                                 parameters=parameters)

    def test_item_message(self, message, level="ERROR", attachment=None):
        """
        Add new log message inside test item
        :param message:
        :param level:
        :param attachment:
        """
        self.service.log(time=timestamp(),
                         message=message[:MAX_MESSAGE_LEN],
                         level=level,
                         attachment=attachment)

    def finish_test_item(self, defect_type_info):
        """
        Finish started test item
        :param defect_type_info:
        """
        defect_mapping = {
            'To Investigate': 'TI001',
            'No Defect': 'ND001',
            'Product Bug': 'PB001',
            'System Issue': 'SI001'
        }
        defect_type = defect_type_info['RP Defect Type']
        issue = None
        if defect_type in defect_mapping:
            issue = {
                'issue_type': defect_mapping[defect_type],
                'comment': defect_type_info['RP Comment']
            }
        self.service.finish_test_item(end_time=timestamp(),
                                      status="FAILED",
                                      issue=issue)
コード例 #7
0
class ReportPortalHandler(AbstractResultHandler):
    """Send tests results and logs to the Report Portal system.

    Attributes:
        main_test (object): the main test instance to be run.
        service (ReportPortalServiceAsync): Endpoint for interacting with
            Report Portal.
        log_handler (ReportPortalLogHandler): A log handler to send every log
            message to the Report Portal system. Logs can be sent only when
            a test is currently running.
    """
    NAME = "reportportal"

    MODE_TO_STRING = {
        MODE_CRITICAL: "Critical",
        MODE_OPTIONAL: "Optional",
        MODE_FINALLY: "Finally"
    }

    EXCEPTION_TYPE_TO_STATUS = {
        TestOutcome.SUCCESS: "PASSED",
        TestOutcome.ERROR: "FAILED",
        TestOutcome.FAILED: "FAILED",
        TestOutcome.SKIPPED: "SKIPPED",
        TestOutcome.EXPECTED_FAILURE: "PASSED",
        TestOutcome.UNEXPECTED_SUCCESS: "FAILED"
    }

    EXCEPTION_TYPE_TO_ISSUE = {
        TestOutcome.ERROR: "AUTOMATION_BUG",
        TestOutcome.FAILED: "PRODUCT_BUG",
        TestOutcome.SKIPPED: "NO_DEFECT"
    }

    def __init__(self, main_test, *args, **kwargs):
        super(ReportPortalHandler, self).__init__(main_test=main_test,
                                                  *args,
                                                  **kwargs)

        configuration = get_configuration()
        self.service = ReportPortalServiceAsync(
            endpoint=configuration.endpoint,
            project=configuration.project,
            token=configuration.token)

        self.log_handler = ReportPortalLogHandler(self.service)
        self.comments = []

    def start_test_run(self):
        """Called once before any tests are executed."""
        run_name = self.main_test.data.run_data.run_name
        mode = "DEFAULT"
        if not run_name:
            run_name = self.main_test.__class__.__name__
            mode = "DEBUG"

        description = self.main_test.__doc__

        self.service.start_launch(name=run_name,
                                  start_time=timestamp(),
                                  description=description,
                                  mode=mode)

    def start_test(self, test):
        """Called when the given test is about to be run.

        Args:
            test (object): test item instance.
        """
        item_type = "STEP"
        description = test.shortDescription()

        if isinstance(test, TestFlow):
            description = test.__doc__

        mode = getattr(test, "mode", None)
        if mode is not None:
            description = "|{}| {}".format(self.MODE_TO_STRING[mode],
                                           description)

        self.service.start_test_item(
            name=test.data.name,
            description=description,
            tags=test.TAGS if hasattr(test, "TAGS") else None,
            start_time=timestamp(),
            item_type=item_type)

        core_log.addHandler(self.log_handler)
        self.service.log(time=timestamp(),
                         level="INFO",
                         message="work dir:\n{0}".format(
                             os.path.abspath(test.work_dir)))

    def start_composite(self, test):
        """Called when the given TestSuite is about to be run.

        Args:
            test (rotest.core.suite.TestSuite): test item instance.
        """
        if test == self.main_test:
            return

        self.service.start_test_item(
            name=test.data.name,
            description=test.__doc__,
            tags=test.TAGS if hasattr(test, "TAGS") else None,
            start_time=timestamp(),
            item_type="Suite")

    def stop_composite(self, test):
        """Called when the given TestSuite has been run.

        Args:
            test (rotest.core.suite.TestSuite): test item instance.
        """
        if test == self.main_test:
            return

        if test.data.success:
            status = "PASSED"
        else:
            status = "FAILED"

        self.service.finish_test_item(end_time=timestamp(), status=status)

    def stop_test_run(self):
        """Called once after all tests are executed."""
        self.service.finish_launch(end_time=timestamp())
        self.service.terminate()

    def stop_test(self, test):
        """Called once after a test is finished."""
        core_log.removeHandler(self.log_handler)
        exception_type = test.data.exception_type
        status = self.EXCEPTION_TYPE_TO_STATUS.get(exception_type, "FAILED")

        issue = None
        if exception_type in self.EXCEPTION_TYPE_TO_ISSUE or \
                exception_type is None or exception_type == "":
            issue = {
                "issue_type":
                self.EXCEPTION_TYPE_TO_ISSUE.get(exception_type,
                                                 "TO_INVESTIGATE"),
                "comment":
                "\n".join(self.comments)
            }

        self.service.finish_test_item(end_time=timestamp(),
                                      status=status,
                                      issue=issue)

        self.comments = []

    def add_skip(self, test, reason):
        self.comments.append(reason)

    def add_error(self, test, exception_string):
        reason = [line for line in exception_string.split("\n") if line][-1]
        self.comments.append(reason)

    def add_failure(self, test, exception_string):
        reason = [line for line in exception_string.split("\n") if line][-1]
        self.comments.append(reason)

    def add_unexpected_success(self, test):
        self.service.log(time=timestamp(),
                         message="The test was supposed to fail, but instead "
                         "it passed",
                         level="ERROR")
コード例 #8
0
class PortalService:
    def __init__(self, portal_launch_name, portal_launch_doc):
        # Report Portal versions below 5.0.0:

        self.endpoint = get_portal_config().get("ENDPOINT")  # portal服务地址
        self.project = get_portal_config().get("PROJECT")  # portal项目名称
        self.token = get_portal_config().get("TOKEN")  # portal token

        self.service = ReportPortalServiceAsync(
            endpoint=self.endpoint,
            project=self.project,
            token=self.token,
            error_handler=self.my_error_handler)

        # Start launch.
        self.launch = self.service.start_launch(name=portal_launch_name,
                                                start_time=timestamp(),
                                                description=portal_launch_doc)

        # Start test item Report Portal versions below 5.0.0:
        self.test = self.service.start_test_item(name="Test Case",
                                                 description="First Test Case",
                                                 tags=["Image", "Smoke"],
                                                 start_time=timestamp(),
                                                 item_type="STEP",
                                                 parameters={
                                                     "key1": "val1",
                                                     "key2": "val2"
                                                 })

        self.service.finish_test_item(end_time=timestamp(), status="PASSED")
        # Finish launch.
        self.service.finish_launch(end_time=timestamp())
        self.service.terminate()

    @staticmethod
    def my_error_handler(exc_info):
        """
        This callback function will be called by async service client when error occurs.
        Return True if error is not critical and you want to continue module_2.
        :param exc_info: result of sys.exc_info() -> (type, value, traceback)
        :return:
        """
        print("Error occurred: {}".format(exc_info[1]))
        traceback.print_exception(*exc_info)

    def service_text_message(self):
        # Create text log message with INFO level.
        self.service.log(time=timestamp(),
                         message="Hello World!",
                         level="INFO")

    def service_message_with_attached_text(self):
        # Create log message with attached text output and WARN level.
        self.service.log(time=timestamp(),
                         message="Too high memory usage!",
                         level="WARN",
                         attachment={
                             "name": "free_memory.txt",
                             "data":
                             "subprocess.check_output('free -h'.split())",
                             "mime": "text/plain"
                         })

    def service_message_with_image(self):
        # Create log message with binary file, INFO level and custom mimetype.
        image = "./image.png"
        with open(image, "rb") as fh:
            attachment = {
                "name": os.path.basename(image),
                "data": fh.read(),
                "mime": guess_type(image)[0] or "application/octet-stream"
            }
            self.service.log(timestamp(), "Screen shot of issue.", "INFO",
                             attachment)

    def service_message_with_command_line(self):
        # Create log message supplying only contents
        self.service.log(timestamp(),
                         "running processes",
                         "INFO",
                         attachment=subprocess.check_output("ps aux".split()))
コード例 #9
0
class ReportPortalDataWriter:
    def __init__(self,
                 endpoint,
                 token,
                 project,
                 log_batch_size=100,
                 launch_name=None,
                 tags=None,
                 launch_doc=None,
                 launch_id=None,
                 verify_ssl=False):
        self.endpoint = endpoint
        self.token = token
        self.project = project
        self.log_batch_size = log_batch_size
        self.launch_name = launch_name
        self.tags = tags
        self.launch_doc = launch_doc
        self.service = None
        self.test = None
        self.verify_ssl = verify_ssl
        self.launch_id = launch_id

    def start_service(self):
        self.service = ReportPortalService(endpoint=self.endpoint,
                                           project=self.project,
                                           token=self.token,
                                           log_batch_size=self.log_batch_size,
                                           verify_ssl=self.verify_ssl)
        if self.launch_id:
            self.service.launch_id = self.launch_id

    def start_test(self):
        if not self.service:
            self.start_service()
        return self.service.start_launch(name=self.launch_name,
                                         start_time=timestamp(),
                                         description=self.launch_doc,
                                         tags=self.tags)

    def finish_test(self):
        self.service.finish_launch(end_time=timestamp())
        self.service.terminate()
        self.service = None

    def is_test_started(self):
        if self.service:
            return True
        return False

    def start_test_item(self,
                        issue,
                        description,
                        tags,
                        item_type='STEP',
                        parameters={}):
        self.service.start_test_item(issue,
                                     description=description,
                                     tags=tags,
                                     start_time=timestamp(),
                                     item_type=item_type,
                                     parameters=parameters)

    def test_item_message(self, message, level="ERROR", attachment=None):
        if len(message) > constants.MAX_MESSAGE_LEN:
            index = 0
            while index < len(message):
                increment = constants.MAX_MESSAGE_LEN
                if index + increment > len(message):
                    increment = len(message) - index
                self.service.log(time=timestamp(),
                                 message=message[index:index + increment],
                                 level=level,
                                 attachment=attachment)
                index = index + increment
        else:
            self.service.log(time=timestamp(),
                             message=message,
                             level=level,
                             attachment=attachment)

    def finish_test_item(self, status="FAILED"):
        self.service.finish_test_item(end_time=timestamp(), status=status)
コード例 #10
0
    def report_errors(self):
        with self.no_ssl_verification():
            self.create_project()
            service = ReportPortalServiceAsync(
                endpoint=self.rp_url,
                project=self.rp_project,
                token=self.rp_token,
                error_handler=self.my_error_handler)

            errors = self.errors
            errors_len = len(errors)
            if errors_len > 0:
                # Start launch.
                service.start_launch(
                    name=self.rp_launch_name,
                    start_time=self.timestamp(),
                    description='This simulation has {} fails'.format(
                        errors_len))
                for key in errors:
                    # Start test item.
                    item_name = self.get_item_name(errors[key])
                    service.start_test_item(
                        name=item_name,
                        description="This request was failed {} times".format(
                            errors[key]['Error count']),
                        tags=[
                            self.args['type'], errors[key]['Request URL'],
                            'gatling_test'
                        ],
                        start_time=self.timestamp(),
                        item_type="STEP",
                        parameters={
                            "simulation":
                            self.args['simulation'],
                            'duration':
                            int(self.args['end_time']) / 1000 -
                            int(self.args['start_time']) / 1000,
                            'test type':
                            self.args['type']
                        })

                    self.log_message(service, 'Request name', errors[key],
                                     'WARN')
                    self.log_message(service, 'Method', errors[key], 'WARN')
                    self.log_message(service, 'Request URL', errors[key],
                                     'WARN')
                    self.log_message(service, 'Request_params', errors[key],
                                     'WARN')
                    self.log_message(service, 'Request headers', errors[key],
                                     'INFO')
                    self.log_message(service, 'Error count', errors[key],
                                     'WARN')
                    self.log_message(service, 'Error code', errors[key],
                                     'WARN')
                    self.log_message(service, 'Error_message', errors[key],
                                     'WARN')
                    self.log_message(service, 'Response code', errors[key],
                                     'WARN')
                    self.log_message(service, 'Response', errors[key], 'WARN')
                    self.log_unique_error_id(service,
                                             errors[key]['Request name'],
                                             errors[key]['Method'],
                                             errors[key]['Response code'],
                                             errors[key]['Error code'])

                    service.finish_test_item(end_time=self.timestamp(),
                                             status="FAILED")
            else:
                service.start_launch(
                    name=self.rp_launch_name,
                    start_time=self.timestamp(),
                    description='This simulation has no fails')

            # Finish launch.
            service.finish_launch(end_time=self.timestamp())

            service.terminate()
コード例 #11
0
    def report_test_results(self, errors, performance_degradation_rate, compare_with_baseline, missed_threshold_rate, compare_with_thresholds):
        with self.no_ssl_verification():
            self.create_project()
            service = ReportPortalServiceAsync(endpoint=self.rp_url, project=self.rp_project,
                                               token=self.rp_token, error_handler=self.my_error_handler)

            # Start launch.
            service.start_launch(name=self.rp_launch_name + ": performance testing results",
                                 start_time=self.timestamp(),
                                 description='Test name - {}'.format(self.args['simulation']))
            errors_len = len(errors)

            if errors_len > 0:
                service.start_test_item(name="Functional errors",
                                        start_time=self.timestamp(),
                                        description="This simulation has failed requests",
                                        item_type="SUITE")
                for key in errors:
                    # Start test item.
                    item_name = self.get_item_name(errors[key])
                    service.start_test_item(name=item_name,
                                            description="This request was failed {} times".format(
                                                errors[key]['Error count']),
                                            tags=[errors[key]['Request URL']],
                                            start_time=self.timestamp(),
                                            item_type="STEP",
                                            parameters={"simulation": self.args['simulation'],
                                                        'test type': self.args['type']})

                    self.log_message(service, 'Request name', errors[key], 'WARN')
                    self.log_message(service, 'Method', errors[key], 'WARN')
                    self.log_message(service, 'Request URL', errors[key], 'WARN')
                    self.log_message(service, 'Request_params', errors[key], 'WARN')
                    self.log_message(service, 'Request headers', errors[key], 'INFO')
                    self.log_message(service, 'Error count', errors[key], 'WARN')
                    self.log_message(service, 'Error_message', errors[key], 'WARN')
                    self.log_message(service, 'Response code', errors[key], 'WARN')
                    self.log_message(service, 'Response', errors[key], 'WARN')
                    self.log_unique_error_id(service, errors[key]['Request name'], errors[key]['Method'],
                                             errors[key]['Response code'])

                    service.finish_test_item(end_time=self.timestamp(), status="FAILED")
                service.finish_test_item(end_time=self.timestamp(), status="FAILED")
            else:
                service.start_test_item(name="Functional errors",
                                        start_time=self.timestamp(),
                                        item_type="STEP",
                                        description='This simulation has no functional errors')
                service.finish_test_item(end_time=self.timestamp(), status="PASSED")

            if performance_degradation_rate > self.performance_degradation_rate:
                service.start_test_item(name="Compare to baseline",
                                        start_time=self.timestamp(),
                                        description="Test \"{}\" failed with performance degradation rate {}"
                                        .format(self.args['simulation'], performance_degradation_rate),
                                        item_type="SUITE")

                service.log(time=self.timestamp(),
                            message="The following requests are slower than baseline:",
                            level="{}".format('INFO'))
                for request in compare_with_baseline:
                    service.start_test_item(name="\"{}\" reached {} ms by {}. Baseline {} ms."
                                .format(request['request_name'], request['response_time'],
                                        self.args['comparison_metric'], request['baseline']),
                                            tags=['performance degradation'],
                                            start_time=self.timestamp(),
                                            item_type="STEP",
                                            parameters={'simulation': self.args['simulation'],
                                                        'test type': self.args['type']})

                    service.log(time=self.timestamp(), message="\"{}\" reached {} ms by {}. Baseline {} ms."
                                .format(request['request_name'], request['response_time'],
                                        self.args['comparison_metric'], request['baseline']),
                                level="{}".format('WARN'))
                    service.finish_test_item(end_time=self.timestamp(), status="FAILED")
                service.log(time=self.timestamp(), message=hashlib.sha256(
                    "{} performance degradation".format(self.args['simulation']).strip().encode('utf-8')).hexdigest(),
                            level='ERROR')

                service.finish_test_item(end_time=self.timestamp(), status="FAILED")
            else:
                service.start_test_item(name="Compare to baseline",
                                        start_time=self.timestamp(),
                                        item_type="STEP",
                                        description='Performance degradation rate less than {}'
                                        .format(self.performance_degradation_rate))
                service.finish_test_item(end_time=self.timestamp(), status="PASSED")

            if missed_threshold_rate > self.missed_thresholds_rate:
                service.start_test_item(name="Compare with thresholds",
                                        start_time=self.timestamp(),
                                        description="Test \"{}\" failed with missed thresholds rate {}"
                                                    .format(self.args['simulation'], missed_threshold_rate),
                                        item_type="SUITE")

                for color in ["yellow", "red"]:
                    colored = False
                    for th in compare_with_thresholds:
                        if th['threshold'] == color:
                            service.start_test_item(name="{} threshold for  \"{}\""
                                                    .format(color, th['request_name']),
                                                    tags=['missed thresholds'],
                                                    start_time=self.timestamp(),
                                                    item_type="STEP",
                                                    parameters={'simulation': self.args['simulation'],
                                                                'test type': self.args['type']})
                            if not colored:
                                service.log(time=self.timestamp(),
                                            message=f"The following {color} thresholds were exceeded:", level="INFO")
                            appendage = calculate_appendage(th['target'])
                            service.log(time=self.timestamp(),
                                        message=f"\"{th['request_name']}\" {th['target']}{appendage} with value {th['metric']}{appendage} exceeded threshold of {th[color]}{appendage}",
                                        level="WARN")
                            service.finish_test_item(end_time=self.timestamp(), status="FAILED")
                service.log(time=self.timestamp(), message=hashlib.sha256(
                    "{} missed thresholds".format(self.args['simulation']).strip().encode('utf-8')).hexdigest(),
                            level='ERROR')

                service.finish_test_item(end_time=self.timestamp(), status="FAILED")
            else:
                service.start_test_item(name="Compare with thresholds",
                                        start_time=self.timestamp(),
                                        item_type="STEP",
                                        description='Missed thresholds rate less than {}'
                                        .format(self.missed_thresholds_rate))
                service.finish_test_item(end_time=self.timestamp(), status="PASSED")
            # Finish launch.
            service.finish_launch(end_time=self.timestamp())

            service.terminate()
コード例 #12
0
class ReportPortalPlugin:
    """reportportal.io plugin to hook_plug.

    behave propertys:behave.readthedocs.io/en/latest/context_attributes.html
    """
    @staticmethod
    def error(exc_info):
        print_exc(*exc_info)

    @staticmethod
    def step_table(step):
        if step.table:
            rows = '|\n|'.join(['|'.join(row) for row in step.table.rows])
            return '|{}|'.format(rows)
        return None

    @staticmethod
    def step_text(step):
        if hasattr(step, 'text'):
            return step.text
        return None

    @staticmethod
    def timestamp():
        return str(int(time() * 1000))

    @staticmethod
    def check_context(context):
        if not hasattr(context, 'config'):
            raise EnvironmentError(
                'Please, check if context is a behave context')
        try:
            context.config.userdata['rp_project']
            context.config.userdata['rp_endpoint']
            context.config.userdata['rp_launch']
            context.config.userdata['rp_token']
        except KeyError:
            raise EnvironmentError('Please, check yout behave.ini file')
        return True

    @tag_behavior
    def before_all(self, context):
        """
        TODO: get data from behave.userdata

            endpoint: archteture/SO/Browser
            token: user report portal api token
            project: project name or label
        """
        self._rp = ReportPortalServiceAsync(
            endpoint=context.config.userdata.get('rp_endpoint', None),
            project=context.config.userdata.get('rp_project', None),
            token=context.config.userdata.get('rp_token', None),
            error_handler=self.error,
        )
        self._rp.start_launch(
            name=context.config.userdata.get('rp_launch', None),
            start_time=self.timestamp(),
        )

    @tag_behavior
    def before_feature(self, context, feature):
        self._rp.start_test_item(name=feature.name,
                                 start_time=self.timestamp(),
                                 description=' '.join(feature.description),
                                 tags=feature.tags,
                                 item_type='STORY')

    @tag_behavior
    def before_scenario(self, context, scenario):
        self._rp.start_test_item(name=scenario.name,
                                 start_time=self.timestamp(),
                                 description=' '.join(scenario.description),
                                 tags=scenario.tags,
                                 item_type='Scenario')

    @tag_behavior
    def before_step(self, context, step):
        """NOTE: step doesn't has tag"""
        self._rp.start_test_item(name=step.name,
                                 start_time=self.timestamp(),
                                 description=self.step_table(step)
                                 or self.step_text(step),
                                 tags=None,
                                 item_type='step')

    @tag_behavior
    def after_all(self, context):
        self._rp.finish_launch(end_time=self.timestamp())
        self._rp.terminate()

    @tag_behavior
    def after_feature(self, context, feature):
        self._rp.finish_test_item(
            end_time=self.timestamp(),
            status=feature.status.name,
        )

    @tag_behavior
    def after_scenario(self, context, scenario):
        self._rp.finish_test_item(
            end_time=self.timestamp(),
            status=scenario.status.name,
        )

    @tag_behavior
    def after_step(self, context, step):
        if step.status.name == 'failed':
            self._rp.log(
                time=self.timestamp(),
                message=''.join(format_tb(step.exc_traceback)),
                level='ERROR',
            )
        self._rp.finish_test_item(
            end_time=self.timestamp(),
            status=step.status.name,
        )
コード例 #13
0
class ReportPortalPlugin(Plugin):
    can_configure = True
    score = 200
    status = {}
    enableOpt = None
    name = "reportportal"
    def options(self, parser, env):
        """
        Add options to command line.
        """
        super(ReportPortalPlugin, self).options(parser, env)
        parser.add_option('--rp-config-file',
                          action='store',
                          default=env.get('NOSE_RP_CONFIG_FILE'),
                          dest='rp_config',
                          help='config file path')
        parser.add_option('--rp-launch',
                          action='store',
                          default=None,
                          dest='rp_launch',
                          help='postfix of launch name in report portal')

        parser.add_option('--rp-mode',
                          action='store',
                          default=None,
                          dest='rp_mode',
                          help='level of logging')

    def configure(self, options, conf):
        """
        Configure plugin.
        """
        try:
            self.status.pop('active')
        except KeyError:
            pass
        super(ReportPortalPlugin, self).configure(options, conf)
        if self.enabled:

            self.conf = conf
            self.rp_config = options.rp_config
            config = configparser.ConfigParser()
            config.read(self.rp_config)

            if options.rp_launch:
                slaunch = options.rp_launch
            else:
                slaunch = "(unit tests)"
                if "type=integration" in options.attr:
                    slaunch ="(integration tests)"
                elif "type=component" in options.attr:
                    slaunch = "(component tests)"

            self.rp_mode = options.rp_mode or "DEBUG"
            self.clear = True
            if "base" in config:
                self.rp_uuid = config.get("base", "rp_uuid", fallback="")
                self.rp_endpoint = config.get("base", "rp_endpoint", fallback="")
                self.rp_project = config.get("base", "rp_project", fallback="")
                self.rp_launch = config.get("base", "rp_launch", fallback="{}").format(slaunch)
                self.rp_launch_tags = config.get("base", "rp_launch_tags", fallback="")
                self.rp_launch_description = config.get("base", "rp_launch_description", fallback="")

    def setupLoghandler(self):
        # setup our handler with root logger
        root_logger = logging.getLogger()
        if self.clear:
            if hasattr(root_logger, "handlers"):
                for handler in root_logger.handlers:
                    root_logger.removeHandler(handler)
            for logger in logging.Logger.manager.loggerDict.values():
                if hasattr(logger, "handlers"):
                    for handler in logger.handlers:
                        logger.removeHandler(handler)
        # make sure there isn't one already
        # you can't simply use "if self.handler not in root_logger.handlers"
        # since at least in unit tests this doesn't work --
        # LogCapture() is instantiated for each test case while root_logger
        # is module global
        # so we always add new MyMemoryHandler instance
        for handler in root_logger.handlers[:]:
            if isinstance(handler, RPNoseLogHandler):
                root_logger.handlers.remove(handler)
        root_logger.addHandler(self.handler)
        # Also patch any non-propagating loggers in the tree
        for logger in logging.Logger.manager.loggerDict.values():
            if not getattr(logger, 'propagate', True) and hasattr(logger, "addHandler"):
                for handler in logger.handlers[:]:
                    if isinstance(handler, RPNoseLogHandler):
                        logger.handlers.remove(handler)
                logger.addHandler(self.handler)
        # to make sure everything gets captured
        loglevel = getattr(self, "loglevel", "NOTSET")
        root_logger.setLevel(getattr(logging, loglevel))

    def begin(self):
        """Called before any tests are collected or run. Use this to
        perform any setup needed before testing begins.
        """
        self.service = ReportPortalServiceAsync(endpoint=self.rp_endpoint, project=self.rp_project,
                                                   token=self.rp_uuid, error_handler=my_error_handler, queue_get_timeout=20)

        log.setLevel(logging.DEBUG)

        # Start launch.
        self.launch = self.service.start_launch(name=self.rp_launch,
                                      start_time=timestamp(),
                                      description=self.rp_launch_description, mode=self.rp_mode)
        self.handler = RPNoseLogHandler(service=self.service, level=logging.DEBUG,endpoint=self.rp_endpoint)
        self.setupLoghandler()


    def finalize(self, result):
        """Called after all report output, including output from all
        plugins, has been sent to the stream. Use this to print final
        test results or perform final cleanup. Return None to allow
        other plugins to continue printing, or any other value to stop
        them.

        :param result: test result object

        .. Note:: When tests are run under a test runner other than
           :class:`nose.core.TextTestRunner`, such as
           via ``python setup.py test``, this method may be called
           **before** the default report output is sent.
        """
        # Finish launch.
        self.service.finish_launch(end_time=timestamp())

        # Due to async nature of the service we need to call terminate() method which
        # ensures all pending requests to server are processed.
        # Failure to call terminate() may result in lost data.
        self.service.terminate()


    def startTest(self, test):
        """Prepare or wrap an individual test case. Called before
        execution of the test. The test passed here is a
        nose.case.Test instance; the case to be executed is in the
        test attribute of the passed case. To modify the test to be
        run, you should return a callable that takes one argument (the
        test result object) -- it is recommended that you *do not*
        side-effect the nose.case.Test instance you have been passed.

        Keep in mind that when you replace the test callable you are
        replacing the run() method of the test case -- including the
        exception handling and result calls, etc.

        :param test: the test case
        :type test: :class:`nose.case.Test`
        """

        self.service.start_test_item(name=str(test),
                                       description=test.test._testMethodDoc,
                                       tags=test.test.suites,
                                       start_time=timestamp(),
                                       item_type='TEST',
                                       parameters={})
        self.setupLoghandler()
        self.service.log(timestamp(), str(test), "INFO")


    def addDeprecated(self, test):
        """Called when a deprecated test is seen. DO NOT return a value
        unless you want to stop other plugins from seeing the deprecated
        test.

        .. warning :: DEPRECATED -- check error class in addError instead
        """
        self.service.log(timestamp(), "Deprecated test", "INFO")

    def _sendError(self, test, err):
        etype, value, tb = err
        self.service.log(timestamp(), value, "INFO")
        self.service.log(timestamp(), str(etype.__name__) + ":\n" +
                         "".join(traceback.format_tb(tb)), "ERROR")

    def addError(self, test,  err):
        """Called when a test raises an uncaught exception. DO NOT return a
        value unless you want to stop other plugins from seeing that the
        test has raised an error.

        :param test: the test case
        :type test: :class:`nose.case.Test`
        :param err: sys.exc_info() tuple
        :type err: 3-tuple
        """
        self._sendError(test, err)


    def addFailure(self, test, err):
        """Called when a test fails. DO NOT return a value unless you
        want to stop other plugins from seeing that the test has failed.

        :param test: the test case
        :type test: :class:`nose.case.Test`
        :param err: 3-tuple
        :type err: sys.exc_info() tuple
        """
        self._sendError(test, err)


    def addSkip(self, test):
        """Called when a test is skipped. DO NOT return a value unless
        you want to stop other plugins from seeing the skipped test.

        .. warning:: DEPRECATED -- check error class in addError instead
        """
        self.service.log(timestamp(), "SKIPPED test", "INFO")


    def addSuccess(self, test):
        """Called when a test passes. DO NOT return a value unless you
        want to stop other plugins from seeing the passing test.

        :param test: the test case
        :type test: :class:`nose.case.Test`
        """
        self.service.log(time=timestamp(), message="OK", level="INFO")


    def stopTest(self, test):
        """Called after each test is run. DO NOT return a value unless
        you want to stop other plugins from seeing that the test has stopped.

        :param test: the test case
        :type test: :class:`nose.case.Test`
        """
        if test.capturedOutput:
            self.service.log(timestamp(), str(test.capturedOutput), "INFO")

        if test.test._outcome.skipped:
            self.service.finish_test_item(end_time=timestamp(), status="SKIPPED")
        elif test.test._outcome.success:
            self.service.finish_test_item(end_time=timestamp(), status="PASSED")
        else:
            self.service.finish_test_item(end_time=timestamp(), status="FAILED")
コード例 #14
0
class ReportPortalReportingSession(ReportingSession):
    def __init__(self, url, auth_token, project, launch_name,
                 launch_description, report_dir, report):
        self.service = ReportPortalServiceAsync(
            endpoint=url,
            project=project,
            token=auth_token,
            error_handler=self._handle_rp_error)
        self.launch_name = launch_name
        self.launch_description = launch_description
        self.report_dir = report_dir
        self.report = report
        self._rp_exc_info = None

    def _handle_rp_error(self, exc_info):
        self._rp_exc_info = exc_info
        return False  # stop on error

    def _has_rp_error(self):
        return self._rp_exc_info is not None

    def _show_rp_error(self):
        print(
            "Got the following exception using ReportPortal, "
            "test results have not been properly synced:",
            file=sys.stderr)
        traceback.print_exception(*self._rp_exc_info, file=sys.stderr)

    def _end_current_test_item(self, end_time, status):
        self.service.finish_test_item(end_time=make_time(end_time),
                                      status=status)

    def _start_test_item(self,
                         item_type,
                         start_time,
                         name,
                         description,
                         wrapped=False):
        if wrapped:
            self.service.start_test_item(item_type="SUITE",
                                         start_time=make_time(start_time),
                                         name=name,
                                         description=description)
        self.service.start_test_item(item_type=item_type,
                                     start_time=make_time(start_time),
                                     name=name,
                                     description=description)

    def _end_test_item(self, end_time, is_successful, wrapped=False):
        status = "passed" if is_successful else "failed"
        if wrapped:
            self._end_current_test_item(end_time, status=status)
        self._end_current_test_item(end_time, status=status)

    def on_test_session_start(self, event):
        if self._has_rp_error():
            return

        self.service.start_launch(name=self.launch_name,
                                  description=self.launch_description,
                                  start_time=make_time(event.time))

    def on_test_session_end(self, event):
        if self._has_rp_error():
            self._show_rp_error()
        else:
            self.service.finish_launch(end_time=make_time(event.time))
            self.service.terminate()

            if self._has_rp_error():
                self._show_rp_error()

    def on_test_session_setup_start(self, event):
        if self._has_rp_error():
            return

        self._start_test_item(item_type="BEFORE_CLASS",
                              start_time=event.time,
                              name="session_setup",
                              description="Test Session Setup",
                              wrapped=True)

    def on_test_session_setup_end(self, event):
        if self._has_rp_error():
            return

        self._end_test_item(event.time,
                            not self.report.test_session_setup
                            or self.report.test_session_setup.is_successful(),
                            wrapped=True)

    def on_test_session_teardown_start(self, event):
        if self._has_rp_error():
            return

        self._start_test_item(item_type="AFTER_CLASS",
                              start_time=event.time,
                              name="session_teardown",
                              description="Test Session Teardown",
                              wrapped=True)

    def on_test_session_teardown_end(self, event):
        if self._has_rp_error():
            return

        self._end_test_item(
            event.time,
            not self.report.test_session_teardown
            or self.report.test_session_teardown.is_successful(),
            wrapped=True)

    def on_suite_start(self, event):
        if self._has_rp_error():
            return

        suite = event.suite
        self.service.start_test_item(
            item_type="SUITE",
            start_time=make_time(event.time),
            name=suite.name,
            description=suite.description,
            tags=suite.tags + convert_properties_into_tags(suite.properties) +
            convert_links_into_tags(suite.links),
        )

    def on_suite_end(self, event):
        if self._has_rp_error():
            return

        self._end_current_test_item(event.time, status="passed")

    def on_suite_setup_start(self, event):
        if self._has_rp_error():
            return

        self._start_test_item(item_type="BEFORE_CLASS",
                              start_time=event.time,
                              name="suite_setup",
                              description="Suite Setup",
                              wrapped=len(event.suite.get_suites()) > 0)

    def on_suite_setup_end(self, event):
        if self._has_rp_error():
            return

        suite_data = self.report.get_suite(event.suite)

        self._end_test_item(event.time,
                            not suite_data.suite_setup
                            or suite_data.suite_setup.is_successful(),
                            wrapped=len(event.suite.get_suites()) > 0)

    def on_suite_teardown_start(self, event):
        if self._has_rp_error():
            return

        self._start_test_item(item_type="AFTER_CLASS",
                              start_time=event.time,
                              name="suite_teardown",
                              description="Suite Teardown",
                              wrapped=len(event.suite.get_suites()) > 0)

    def on_suite_teardown_end(self, event):
        if self._has_rp_error():
            return

        suite_data = self.report.get_suite(event.suite)

        self._end_test_item(event.time,
                            not suite_data.suite_teardown
                            or suite_data.suite_teardown.is_successful(),
                            wrapped=len(event.suite.get_suites()) > 0)

    def on_test_start(self, event):
        if self._has_rp_error():
            return

        test = event.test
        self.service.start_test_item(
            item_type="TEST",
            start_time=make_time(event.time),
            name=test.name,
            description=test.description,
            tags=test.tags + convert_properties_into_tags(test.properties) +
            convert_links_into_tags(test.links))

    def on_test_end(self, event):
        if self._has_rp_error():
            return

        test_data = self.report.get_test(event.test)
        self._end_current_test_item(event.time, test_data.status)

    def _bypass_test(self, test, status, time):
        if self._has_rp_error():
            return

        self.service.start_test_item(
            item_type="TEST",
            start_time=make_time(time),
            name=test.name,
            description=test.description,
            tags=test.tags,
        )
        self._end_current_test_item(time, status=status)

    def on_test_skipped(self, event):
        if self._has_rp_error():
            return

        self._bypass_test(event.test, "skipped", event.time)

    def on_disabled_test(self, event):
        # do not log disabled test, moreover it seems that there is not corresponding status in ReportPortal
        pass

    def on_step(self, event):
        if self._has_rp_error():
            return

        self.service.log(make_time(event.time),
                         "--- STEP: %s ---" % event.step_description, "INFO")

    def on_log(self, event):
        if self._has_rp_error():
            return

        self.service.log(make_time(event.time), event.log_message,
                         event.log_level)

    def on_check(self, event):
        if self._has_rp_error():
            return

        message = "%s => %s" % (event.check_description,
                                "OK" if event.check_outcome else "NOT OK")
        if event.check_details is not None:
            message += "\nDetails: %s" % event.check_details
        self.service.log(make_time(event.time), message,
                         "INFO" if event.check_outcome else "ERROR")

    def on_log_attachment(self, event):
        if self._has_rp_error():
            return

        abspath = os.path.join(self.report_dir, event.attachment_path)
        with open(abspath, "rb") as fh:
            self.service.log(make_time(event.time),
                             event.attachment_description,
                             "INFO",
                             attachment={
                                 "name":
                                 osp.basename(event.attachment_path),
                                 "data":
                                 fh.read(),
                                 "mime":
                                 mimetypes.guess_type(abspath)[0]
                                 or "application/octet-stream"
                             })

    def on_log_url(self, event):
        if self._has_rp_error():
            return

        if event.url_description and event.url_description != event.url:
            message = "%s: %s" % (event.url_description, event.url)
        else:
            message = event.url
        self.service.log(make_time(event.time), message, "INFO")
コード例 #15
0
class NoseServiceClass(with_metaclass(Singleton, object)):
    def __init__(self):
        self.RP = None
        try:
            pkg_resources.get_distribution('reportportal_client >= 3.2.0')
            self.RP_SUPPORTS_PARAMETERS = True
        except pkg_resources.VersionConflict:
            self.RP_SUPPORTS_PARAMETERS = False

        self.ignore_errors = True
        self.ignored_tags = []

        self._errors = queue.Queue()
        self._loglevels = ('TRACE', 'DEBUG', 'INFO', 'WARN', 'ERROR')

    def init_service(self,
                     endpoint,
                     project,
                     token,
                     ignore_errors=True,
                     ignored_tags=[],
                     log_batch_size=20,
                     queue_get_timeout=5,
                     retries=0):
        self._errors = queue.Queue()
        if self.RP is None:
            self.ignore_errors = ignore_errors
            if self.RP_SUPPORTS_PARAMETERS:
                self.ignored_tags = list(
                    set(ignored_tags).union({'parametrize'}))
            else:
                self.ignored_tags = ignored_tags
            log.debug(
                'ReportPortal - Init service: endpoint=%s, '
                'project=%s, uuid=%s', endpoint, project, token)
            self.RP = ReportPortalServiceAsync(
                endpoint=endpoint,
                project=project,
                token=token,
                error_handler=self.async_error_handler,
                queue_get_timeout=queue_get_timeout,
                retries=retries,
                log_batch_size=log_batch_size,
                # verify_ssl=verify_ssl
            )
            if self.RP and hasattr(self.RP.rp_client, "get_project_settings"):
                self.project_settiings = self.RP.rp_client.get_project_settings(
                )
            else:
                self.project_settiings = None
            self.issue_types = self.get_issue_types()
        else:
            log.debug('The pytest is already initialized')
        return self.RP

    def async_error_handler(self, exc_info):
        self.terminate_service(nowait=True)
        self.RP = None
        self._errors.put_nowait(exc_info)

    def terminate_service(self, nowait=False):
        if self.RP is not None:
            self.RP.terminate(nowait)
            self.RP = None

    def start_launch(self, name, mode=None, tags=None, description=None):
        self._stop_if_necessary()
        if self.RP is None:
            return

        sl_pt = {
            'name': name,
            'start_time': timestamp(),
            'description': description,
            'mode': mode,
            'tags': tags,
        }
        self.RP.start_launch(**sl_pt)

    def start_nose_item(self, ev, test=None):
        self._stop_if_necessary()
        if self.RP is None:
            return
        tags = []
        try:
            tags = test.test.suites
        except AttributeError:
            pass
        name = str(test)
        start_rq = {
            "name": name,
            "description": ev.describeTest(test),
            "tags": tags,
            "start_time": timestamp(),
            "item_type": "TEST",
            "parameters": {},
        }
        self.RP.start_test_item(**start_rq)
        self.post_log(name)

    def finish_nose_item(self, status, issue=None):
        self._stop_if_necessary()
        if self.RP is None:
            return

        self.post_log(status)
        fta_rq = {
            'end_time': timestamp(),
            'status': status,
            'issue': issue,
        }

        self.RP.finish_test_item(**fta_rq)

    def finish_launch(self, launch=None, status='rp_launch'):
        self._stop_if_necessary()
        if self.RP is None:
            return

        # To finish launch session str parameter is needed
        fl_rq = {
            'end_time': timestamp(),
            'status': status,
        }
        self.RP.finish_launch(**fl_rq)

    def terminate_service(self, nowait=False):
        if self.RP is not None:
            self.RP.terminate(nowait)
            self.RP = None

    def post_log(self, message, loglevel='INFO', attachment=None):
        self._stop_if_necessary()
        if self.RP is None:
            return

        if loglevel not in self._loglevels:
            log.warning(
                'Incorrect loglevel = %s. Force set to INFO. '
                'Available levels: %s.', loglevel, self._loglevels)
            loglevel = 'INFO'

        sl_rq = {
            'time': timestamp(),
            'message': message,
            'level': loglevel,
            'attachment': attachment,
        }
        self.RP.log(**sl_rq)

    def _stop_if_necessary(self):
        try:
            exc, msg, tb = self._errors.get(False)
            traceback.print_exception(exc, msg, tb)
            sys.stderr.flush()
            if not self.ignore_errors:
                sys.exit(msg)
        except queue.Empty:
            pass

    def get_issue_types(self):
        issue_types = {}
        if not self.project_settiings:
            return issue_types

        for item_type in ("AUTOMATION_BUG", "PRODUCT_BUG", "SYSTEM_ISSUE",
                          "NO_DEFECT", "TO_INVESTIGATE"):
            for item in self.project_settiings["subTypes"][item_type]:
                issue_types[item["shortName"]] = item["locator"]

        return issue_types
コード例 #16
0
class ReportPortalManager:
    service: ReportPortalServiceAsync

    valid_batteries = ['smoke', 'full', 'develop']
    endpoint: str
    project: str
    token: str
    launch_name = "[{battery}] {product} {so} "
    launch_doc = "{product} V:{version}{build_version} {browser} {build_url}"

    @staticmethod
    def timestamp() -> str:
        """
        :return:
            str: timestamp convertido em str para uso nos relatorios
        """
        return
        return str(int(time() * 1000))

    @staticmethod
    def error_handler(exc_info):
        """
        Método paradão para gerenciar erros nas chamadas do Report Portal.
        :param exc_info:
            Exception responsavel pelo tratamento do erro.
        """
        return
        print("Error occurred: {}".format(exc_info[1]))
        traceback.print_exception(*exc_info)

    @staticmethod
    def format_traceback(step_traceback) -> str:
        """
        Concatena os erros do step para enviar ao Report Portal.
        :param step_traceback:
            Traceback contendo o erro.
        :return:
            str: Traceback convertida em string.
        """
        return
        val = ''
        for tb in traceback.format_tb(step_traceback):
            val += tb
        return val

    @staticmethod
    def create_attachment(path: str, name: str = None) -> dict:
        """
        Retorna um objeto de anexo pronto para ser enviado ao report portal.
        :param name:
            str: Nome do arquivo
        :param path:
            str: Caminho local ate o arquivo
        :return:
            dict: Objeto pronto para ser enviado ao seridor.
        """
        return
        with open(path, 'rb') as file:
            attachment = {
                "name": basename(path) if not name else name,
                "data": file.read(),
                "mime": guess_type(path)[0] or "application/octet-stream"
            }
        return attachment

    def __init__(self, launch_name: str, launch_doc: str, endpoint: str,
                 token: str, project: str):
        """
        Cria o gerenciador do processo de relatorios.
        :param launch_name:
            str: Nome do Launch
        :param launch_doc:
            str: Documentação do Launch
        :param endpoint:
            str: ReportPortal endpoint
        :param token:
            str: Auth token
        :param project:
            str: Nome do projeto
        """
        self.launch_name = launch_name
        self.launch_doc = launch_doc
        self.endpoint = endpoint
        self.project = project
        self.token = token
        return
        try:
            self.service = ReportPortalServiceAsync(
                endpoint=self.endpoint,
                project=self.project,
                token=self.token,
                error_handler=self.error_handler)
        except:
            print('Report Portal is having issues, please check your server.')

    def start_service(self):
        """
        Inicializa um novo serviço para a bateria de testes no Report Portal.
        """
        return
        try:
            self.service.start_launch(name=self.launch_name,
                                      start_time=self.timestamp(),
                                      description=self.launch_doc)
        except:
            pass

    def start_feature(self, feature: Feature):
        """
        Inicializa um novo teste de feature.
        Itens validos para o test_item (SUITE, STORY, TEST, SCENARIO, STEP,
        BEFORE_CLASS, BEFORE_GROUPS, BEFORE_METHOD, BEFORE_SUITE, BEFORE_TEST,
        AFTER_CLASS, AFTER_GROUPS, AFTER_METHOD, AFTER_SUITE, AFTER_TEST)
        :param feature:
            Objeto da feature utilizada no teste.
        """
        return
        try:
            self.service.start_test_item(name=feature.name,
                                         description=f'{feature.description}',
                                         tags=feature.tags,
                                         start_time=self.timestamp(),
                                         item_type="STORY")
        except:
            pass

    def start_scenario(self, scenario: Scenario):
        """
        Inicializa um novo cenario de testes.
        Itens validos para o test_item (SUITE, STORY, TEST, SCENARIO, STEP,
        BEFORE_CLASS, BEFORE_GROUPS, BEFORE_METHOD, BEFORE_SUITE, BEFORE_TEST,
        AFTER_CLASS, AFTER_GROUPS, AFTER_METHOD, AFTER_SUITE, AFTER_TEST)
        :param scenario:
            Objeto scenario utilizado no teste
        """
        return
        try:
            self.service.start_test_item(name=scenario.name,
                                         description=f'{scenario.description}',
                                         tags=scenario.tags,
                                         start_time=self.timestamp(),
                                         item_type="SCENARIO")
        except:
            print('Report Portal is having issues, please check your server.')

    def start_step(self, step: Step, attachment=None):
        """
        Cria um log relativo ao step realizado.
        :param step:
            Objeto step utilizado no teste.
        :param attachment:
            dict/str: anexo a ser enviado ao servidor.
        """
        return
        try:
            self.service.log(
                time=self.timestamp(),
                message=f"{step.name}[:{step.line}] - Has started...",
                attachment=attachment,
                level="INFO")
        except:
            pass

    def finish_step(self, step: Step, message_extras=None, attachment=None):
        """
        Cria um log de finalização de step. Acusando erro ou sucesso, de acordo
        com seu status.
        Atualmente gera um anexo com o um arquivo e envia ao servidor.
        :param step:
            Objeto step utilizado no teste.
        :param message_extras:
            str: adicionar texto extra na corpo da mensagem.
        :param attachment:
            dict/str: anexo a ser enviado ao servidor.
        """
        return
        try:
            status = step.status if type(
                step.status) == str else step.status.name
            if status == 'failed':
                message = (f'{step.name}[:{step.line}] - Has failed...\n' +
                           self.format_traceback(step.exc_traceback))
                level = 'ERROR'
            else:
                message = f"{step.name}[:{step.line}] - Has finished..."
                level = "INFO"

            message += message_extras if message_extras else ''

            allow_attachment = False
            for battery in self.valid_batteries:
                if battery in self.launch_name:
                    allow_attachment = True
                    break

            # Desabilitando temporariamente o uso dos txts para testes de stress.
            if attachment and '.txt' in attachment.name:
                allow_attachment = False

            self.service.log(
                time=self.timestamp(),
                message=message,
                level=level,
                attachment=attachment if allow_attachment else None)
        except:
            pass

    def finish_scenario(self, scenario: Scenario):
        """
        Finaliza o cenario de testes atual.
        :param scenario:
            Objeto scenario utilizado no teste
        """
        return
        try:
            status = scenario.status if type(
                scenario.status) == str else scenario.status.name
            self.service.finish_test_item(end_time=self.timestamp(),
                                          status=status)
        except:
            pass

    def finish_feature(self, feature: Feature):
        """
        Finaliza a feature de testes atual.
        :param feature:
            Objeto da feature utilizada no teste.
        """
        return
        try:
            status = feature.status if type(
                feature.status) == str else feature.status.name
            self.service.finish_test_item(end_time=self.timestamp(),
                                          status=status)
        except:
            pass

    def finish_service(self):
        """
        Finaliza o serviço, fecha a conexão com o servidor e conclui a
        bateria de testes.
        """
        return
        try:
            self.service.finish_launch(end_time=self.timestamp())
            self.service.terminate()
        except:
            pass