Пример #1
0
 def start_service(self):
     self.service = ReportPortalService(endpoint=self.endpoint,
                                        project=self.project,
                                        token=self.token,
                                        verify_ssl=self.verify_ssl)
     if self.launch_id:
         self.service.launch_id = self.launch_id
Пример #2
0
 def init_service(self,
                  endpoint,
                  project,
                  uuid,
                  log_batch_size,
                  ignore_errors,
                  ignored_attributes,
                  verify_ssl=True,
                  retries=0):
     """Update self.rp with the instance of the ReportPortalService."""
     self._errors = queue.Queue()
     if self.rp is None:
         self.ignore_errors = ignore_errors
         self.ignored_attributes = ignored_attributes
         if self.rp_supports_parameters:
             self.ignored_attributes = list(
                 set(ignored_attributes).union({'parametrize'}))
         log.debug(
             'ReportPortal - Init service: endpoint=%s, '
             'project=%s, uuid=%s', endpoint, project, uuid)
         self.rp = ReportPortalService(endpoint=endpoint,
                                       project=project,
                                       token=uuid,
                                       retries=retries,
                                       verify_ssl=verify_ssl)
         self.project_settings = None
         if self.rp and hasattr(self.rp, "get_project_settings"):
             self.project_settings = self.rp.get_project_settings()
     else:
         log.debug('The pytest is already initialized')
     return self.rp
Пример #3
0
    def init_service(self, endpoint, project, token, ignore_errors=True,
                     ignored_tags=[], log_batch_size=20, queue_get_timeout=5, retries=0):
        if self.rp is None:
            self.ignore_errors = ignore_errors
            if self.rp_supports_parameters:
                self.ignored_tags = list(set(ignored_tags).union({'parametrize'}))
            else:
                self.ignored_tags = ignored_tags
            log.debug('ReportPortal - Init service: endpoint=%s, project=%s, uuid=%s', endpoint, project, token)
            self.rp = ReportPortalService(
                endpoint=endpoint,
                project=project,
                token=token,
                retries=retries,
                log_batch_size=log_batch_size,
                # verify_ssl=verify_ssl
            )

            if self.rp and hasattr(self.rp, "get_project_settings"):
                self.project_settings = self.rp.get_project_settings()
            else:
                self.project_settings = None

            self.issue_types = self.get_issue_types()
        else:
            log.debug('The pytest is already initialized')
        return self.rp
Пример #4
0
    def init_service(self, endpoint, project, uuid):

        if self.RP is None:
            logging.debug(msg="ReportPortal - Init service: "
                          "endpoint={0}, project={1}, uuid={2}".format(
                              endpoint, project, uuid))
            self.RP = ReportPortalService(endpoint=endpoint,
                                          project=project,
                                          token=uuid)
        else:
            logging.debug("The pytest is already initialized")
        return self.RP
 def __init__(self,
              rp_endpoint,
              rp_project,
              rp_token,
              rp_launch_name,
              rp_launch_description,
              verify_ssl=False):
     self.rp_endpoint = rp_endpoint
     self.rp_project = rp_project
     self.rp_token = rp_token
     self.rp_launch_name = rp_launch_name
     self.rp_launch_description = rp_launch_description
     self.rp_async_service = ReportPortalService(endpoint=self.rp_endpoint,
                                                 project=self.rp_project,
                                                 token=self.rp_token,
                                                 verify_ssl=verify_ssl)
Пример #6
0
    def __init__(self):
        """Initializes the instance."""
        cfg = get_cephci_config()
        access = cfg.get("report-portal")

        self.client = None
        self._test_id = None

        if access:
            try:
                self.client = ReportPortalService(
                    endpoint=access["endpoint"],
                    project=access["project"],
                    token=access["token"],
                    verify_ssl=False,
                )
            except BaseException:  # noqa
                log.warning("Unable to connect to Report Portal.")
Пример #7
0
 def init_service(endpoint, project, uuid):
     if RobotService.rp is None:
         logging.debug("ReportPortal - Init service: "
                       "endpoint={0}, project={1}, uuid={2}".format(
                           endpoint, project, uuid))
         RobotService.rp = ReportPortalService(endpoint=endpoint,
                                               project=project,
                                               token=uuid)
     else:
         raise Exception("RobotFrameworkService is already initialized")
Пример #8
0
def create_rp_service(cfg):
    """Create instance of ReportPortalService."""
    if cfg.enabled:
        return ReportPortalService(
            endpoint=cfg.endpoint,
            launch_id=cfg.launch_id,
            project=cfg.project,
            token=cfg.token,
            is_skipped_an_issue=cfg.is_skipped_an_issue,
            retries=cfg.retries,
        )
Пример #9
0
    def service(self):
        """get service"""
        # creating service on first call to get service
        if self._service is None:
            self._service = ReportPortalService(endpoint=self.endpoint,
                                                project=self.project,
                                                token=self.api_token)
            self._service.session.verify = False

            # TODO: validate the service works

        return self._service
    def init_service(endpoint, project, uuid):
        """Init service for Report Portal.

        Args:
            endpoint: ReportPortal API endpoint.
            project: project name in Report Portal.
            uuid: unique id of user in Report Portal profile.
        """
        if RobotService.rp is None:
            RobotService.rp = ReportPortalService(endpoint=endpoint,
                                                  project=project,
                                                  token=uuid)
        else:
            raise Exception("RobotFrameworkService is already initialized.")
Пример #11
0
def create_report_portal_session():
    """
    Configures and creates a session to the Report Portal instance.

    Returns:
        The session object
    """
    cfg = get_cephci_config()["report-portal"]

    try:
        return ReportPortalService(
            endpoint=cfg["endpoint"],
            project=cfg["project"],
            token=cfg["token"],
            verify_ssl=False,
        )
    except BaseException:  # noqa
        print("Encountered an issue in connecting to report portal.")
Пример #12
0
class PyTestServiceClass(with_metaclass(Singleton, object)):
    """Pytest service class for reporting test results to the Report Portal."""
    def __init__(self):
        """Initialize instance attributes."""
        self._agent_name = 'pytest-reportportal'
        self._errors = queue.Queue()
        self._hier_parts = {}
        self._issue_types = {}
        self._item_parts = {}
        self._loglevels = ('TRACE', 'DEBUG', 'INFO', 'WARN', 'ERROR')
        self.ignore_errors = True
        self.ignored_attributes = []
        self.log_batch_size = 20
        self.log_item_id = None
        self.parent_item_id = None
        self.rp = None
        self.rp_supports_parameters = True
        try:
            pkg_resources.get_distribution('reportportal_client >= 3.2.0')
        except pkg_resources.VersionConflict:
            self.rp_supports_parameters = False

    @property
    def issue_types(self):
        """Issue types for the Report Portal project."""
        if not self._issue_types:
            if not self.project_settings:
                return self._issue_types
            for item_type in ("AUTOMATION_BUG", "PRODUCT_BUG", "SYSTEM_ISSUE",
                              "NO_DEFECT", "TO_INVESTIGATE"):
                for item in self.project_settings["subTypes"][item_type]:
                    self._issue_types[item["shortName"]] = item["locator"]
        return self._issue_types

    def init_service(self,
                     endpoint,
                     project,
                     uuid,
                     log_batch_size,
                     ignore_errors,
                     ignored_attributes,
                     verify_ssl=True,
                     retries=0):
        """Update self.rp with the instance of the ReportPortalService."""
        self._errors = queue.Queue()
        if self.rp is None:
            self.ignore_errors = ignore_errors
            self.ignored_attributes = ignored_attributes
            if self.rp_supports_parameters:
                self.ignored_attributes = list(
                    set(ignored_attributes).union({'parametrize'}))
            log.debug(
                'ReportPortal - Init service: endpoint=%s, '
                'project=%s, uuid=%s', endpoint, project, uuid)
            self.rp = ReportPortalService(endpoint=endpoint,
                                          project=project,
                                          token=uuid,
                                          retries=retries,
                                          verify_ssl=verify_ssl)
            self.project_settings = None
            if self.rp and hasattr(self.rp, "get_project_settings"):
                self.project_settings = self.rp.get_project_settings()
        else:
            log.debug('The pytest is already initialized')
        return self.rp

    def start_launch(self,
                     launch_name,
                     mode=None,
                     description=None,
                     attributes=None,
                     **kwargs):
        self._stop_if_necessary()
        if self.rp is None:
            return

        sl_pt = {
            'attributes': self._get_launch_attributes(attributes),
            'name': launch_name,
            'start_time': timestamp(),
            'description': description,
            'mode': mode,
        }
        log.debug('ReportPortal - Start launch: request_body=%s', sl_pt)
        item_id = self.rp.start_launch(**sl_pt)
        log.debug('ReportPortal - Launch started: id=%s', item_id)
        return item_id

    def collect_tests(self, session):
        self._stop_if_necessary()
        if self.rp is None:
            return

        hier_dirs = False
        hier_module = False
        hier_class = False
        hier_param = False
        display_suite_file_name = True

        if not hasattr(session.config, 'slaveinput'):
            hier_dirs = session.config.getini('rp_hierarchy_dirs')
            hier_module = session.config.getini('rp_hierarchy_module')
            hier_class = session.config.getini('rp_hierarchy_class')
            hier_param = session.config.getini('rp_hierarchy_parametrize')
            display_suite_file_name = session.config.getini(
                'rp_display_suite_test_file')

        try:
            hier_dirs_level = int(
                session.config.getini('rp_hierarchy_dirs_level'))
        except ValueError:
            hier_dirs_level = 0

        dirs_parts = {}
        tests_parts = {}

        for item in session.items:
            # Start collecting test item parts
            parts = []

            # Hierarchy for directories
            rp_name = self._add_item_hier_parts_dirs(item, hier_dirs,
                                                     hier_dirs_level, parts,
                                                     dirs_parts)

            # Hierarchy for Module and Class/UnitTestCase
            item_parts = self._get_item_parts(item)
            rp_name = self._add_item_hier_parts_other(item_parts, item, Module,
                                                      hier_module, parts,
                                                      rp_name)
            rp_name = self._add_item_hier_parts_other(item_parts, item, Class,
                                                      hier_class, parts,
                                                      rp_name)
            rp_name = self._add_item_hier_parts_other(item_parts, item,
                                                      UnitTestCase, hier_class,
                                                      parts, rp_name)

            # Hierarchy for parametrized tests
            if hier_param:
                rp_name = self._add_item_hier_parts_parametrize(
                    item, parts, tests_parts, rp_name)

            # Hierarchy for test itself (Function/TestCaseFunction)
            item._rp_name = rp_name + ("::" if rp_name else "") + item.name

            # Result initialization
            for part in parts:
                part._rp_result = "PASSED"

            self._item_parts[item] = parts
            for part in parts:
                if '_pytest.python.Class' in str(
                        type(part)
                ) and not display_suite_file_name and not hier_module:
                    part._rp_name = part._rp_name.split("::")[-1]
                if part not in self._hier_parts:
                    self._hier_parts[part] = {
                        "finish_counter": 1,
                        "start_flag": False
                    }
                else:
                    self._hier_parts[part]["finish_counter"] += 1

    def start_pytest_item(self, test_item=None):
        self._stop_if_necessary()
        if self.rp is None:
            return

        self.parent_item_id = None
        for part in self._item_parts[test_item]:
            if self._hier_parts[part]["start_flag"]:
                self.parent_item_id = self._hier_parts[part]["item_id"]
                continue
            self._hier_parts[part]["start_flag"] = True

            payload = {
                'name': self._get_item_name(part),
                'description': self._get_item_description(part),
                'start_time': timestamp(),
                'item_type': 'SUITE',
                'parent_item_id': self.parent_item_id
            }
            log.debug('ReportPortal - Start Suite: request_body=%s', payload)
            item_id = self.rp.start_test_item(**payload)
            self.log_item_id = item_id
            self.parent_item_id = item_id
            self._hier_parts[part]["item_id"] = item_id

        start_rq = {
            'attributes': self._get_item_markers(test_item),
            'name': self._get_item_name(test_item),
            'description': self._get_item_description(test_item),
            'start_time': timestamp(),
            # Item type should be sent as "STEP" until we upgrade to RPv6.
            # Details at: https://github.com/reportportal/agent-Python-RobotFramework/issues/56
            'item_type': 'STEP',
            'parent_item_id': self.parent_item_id
        }
        if self.rp_supports_parameters:
            start_rq['parameters'] = self._get_parameters(test_item)

        log.debug('ReportPortal - Start TestItem: request_body=%s', start_rq)
        item_id = self.rp.start_test_item(**start_rq)
        self.log_item_id = item_id
        return item_id

    def finish_pytest_item(self, test_item, item_id, status, issue=None):
        self._stop_if_necessary()
        if self.rp is None:
            return

        fta_rq = {
            'end_time': timestamp(),
            'status': status,
            'issue': issue,
            'item_id': item_id
        }

        log.debug('ReportPortal - Finish TestItem: request_body=%s', fta_rq)

        parts = self._item_parts[test_item]
        self.rp.finish_test_item(**fta_rq)
        while len(parts) > 0:
            part = parts.pop()
            if status == "FAILED":
                part._rp_result = status
            self._hier_parts[part]["finish_counter"] -= 1
            if self._hier_parts[part]["finish_counter"] > 0:
                continue
            payload = {
                'end_time': timestamp(),
                'issue': issue,
                'item_id': self._hier_parts[part]["item_id"],
                'status': part._rp_result
            }
            log.debug('ReportPortal - End TestSuite: request_body=%s', payload)
            self.rp.finish_test_item(**payload)

    def finish_launch(self, status=None, **kwargs):
        self._stop_if_necessary()
        if self.rp is None:
            return

        # To finish launch session str parameter is needed
        fl_rq = {'end_time': timestamp(), 'status': status}
        log.debug('ReportPortal - Finish launch: request_body=%s', fl_rq)
        self.rp.finish_launch(**fl_rq)

    def post_log(self, message, loglevel='INFO', attachment=None):
        self._stop_if_necessary()
        if self.rp is None:
            return

        if loglevel not in self._loglevels:
            log.warning(
                'Incorrect loglevel = %s. Force set to INFO. '
                'Available levels: %s.', loglevel, self._loglevels)
            loglevel = 'INFO'

        sl_rq = {
            'item_id': self.log_item_id,
            'time': timestamp(),
            'message': message,
            'level': loglevel,
            'attachment': attachment
        }
        self.rp.log(**sl_rq)

    def _stop_if_necessary(self):
        try:
            exc, msg, tb = self._errors.get(False)
            traceback.print_exception(exc, msg, tb)
            sys.stderr.flush()
            if not self.ignore_errors:
                pytest.exit(msg)
        except queue.Empty:
            pass

    @staticmethod
    def _add_item_hier_parts_dirs(item,
                                  hier_flag,
                                  dirs_level,
                                  report_parts,
                                  dirs_parts,
                                  rp_name=""):

        parts_dirs = PyTestServiceClass._get_item_dirs(item)
        dir_path = item.fspath.new(dirname="", basename="", drive="")
        rp_name_path = ""

        for dir_name in parts_dirs[dirs_level:]:
            dir_path = dir_path.join(dir_name)
            path = str(dir_path)

            if hier_flag:
                if path in dirs_parts:
                    item_dir = dirs_parts[path]
                    rp_name = ""
                else:
                    item_dir = File(dir_path,
                                    nodeid=dir_name,
                                    session=item.session,
                                    config=item.session.config)
                    rp_name += dir_name
                    item_dir._rp_name = rp_name
                    dirs_parts[path] = item_dir
                    rp_name = ""

                report_parts.append(item_dir)
            else:
                rp_name_path = path[1:]

        if not hier_flag:
            rp_name += rp_name_path

        return rp_name

    @staticmethod
    def _add_item_hier_parts_parametrize(item,
                                         report_parts,
                                         tests_parts,
                                         rp_name=""):

        for mark in item.own_markers:
            if mark.name == 'parametrize':
                ch_index = item.nodeid.find("[")
                test_fullname = item.nodeid[:ch_index if ch_index > 0 else len(
                    item.nodeid)]
                test_name = item.originalname

                rp_name += ("::" if rp_name else "") + test_name

                if test_fullname in tests_parts:
                    item_test = tests_parts[test_fullname]
                else:
                    item_test = Item(test_fullname,
                                     nodeid=test_fullname,
                                     session=item.session,
                                     config=item.session.config)
                    item_test._rp_name = rp_name
                    item_test.obj = item.obj
                    item_test.keywords = item.keywords
                    item_test.own_markers = item.own_markers
                    item_test.parent = item.parent

                    tests_parts[test_fullname] = item_test

                rp_name = ""
                report_parts.append(item_test)
                break

        return rp_name

    @staticmethod
    def _add_item_hier_parts_other(item_parts,
                                   item,
                                   item_type,
                                   hier_flag,
                                   report_parts,
                                   rp_name=""):

        for part in item_parts:

            if type(part) is item_type:

                if item_type is Module:
                    module_path = str(
                        item.fspath.new(dirname=rp_name,
                                        basename=part.fspath.basename,
                                        drive=""))
                    rp_name = module_path if rp_name else module_path[1:]
                elif item_type in (Class, Function, UnitTestCase,
                                   TestCaseFunction):
                    rp_name += ("::" if rp_name else "") + part.name

                if hier_flag:
                    part._rp_name = rp_name
                    rp_name = ""
                    report_parts.append(part)

        return rp_name

    @staticmethod
    def _get_item_parts(item):
        parts = []
        parent = item.parent
        if not isinstance(parent, Instance):
            parts.append(parent)
        while True:
            parent = parent.parent
            if parent is None:
                break
            if isinstance(parent, Instance):
                continue
            if isinstance(parent, Session):
                break
            parts.append(parent)

        parts.reverse()
        return parts

    @staticmethod
    def _get_item_dirs(item):

        root_path = item.session.config.rootdir.strpath
        dir_path = item.fspath.new(basename="")
        rel_dir = dir_path.new(dirname=dir_path.relto(root_path),
                               basename="",
                               drive="")

        dir_list = []
        for directory in rel_dir.parts(reverse=False):
            dir_name = directory.basename
            if dir_name:
                dir_list.append(dir_name)

        return dir_list

    def _get_launch_attributes(self, ini_attrs):
        """Generate launch attributes in the format supported by the client.

        :param list ini_attrs: List for attributes from the pytest.ini file
        """
        attributes = ini_attrs or []

        system_info = self.rp.get_system_information(self._agent_name)
        system_info['system'] = True
        system_attributes = _dict_to_payload(system_info)

        return attributes + system_attributes

    def _get_item_markers(self, item):
        # Try to extract names of @pytest.mark.* decorators used for test item
        # and exclude those which present in rp_ignore_attributes parameter
        def get_marker_value(item, keyword):
            try:
                marker = item.get_closest_marker(keyword)
            except AttributeError:
                # pytest < 3.6
                marker = item.keywords.get(keyword)

            return "{}:{}".format(keyword, marker.args[0]) \
                if marker and marker.args else keyword

        try:
            get_marker = getattr(item, "get_closest_marker")
        except AttributeError:
            get_marker = getattr(item, "get_marker")
        attributes = [
            {
                "value": get_marker_value(item, k)
            } for k in item.keywords
            if get_marker(k) is not None and k not in self.ignored_attributes
        ]

        attributes.extend([{
            "value": tag
        } for tag in item.session.config.getini('rp_tests_attributes')])
        return attributes

    def _get_parameters(self, item):
        return item.callspec.params if hasattr(item, 'callspec') else None

    @staticmethod
    def _get_item_name(test_item):
        name = test_item._rp_name
        if len(name) > 256:
            name = name[:256]
            test_item.warn(
                PytestWarning(
                    'Test node ID was truncated to "{}" because of name size '
                    'constrains on reportportal'.format(name)))
        return name

    @staticmethod
    def _get_item_description(test_item):
        if isinstance(test_item, (Class, Function, Module, Item)):
            doc = test_item.obj.__doc__
            if doc is not None:
                return trim_docstring(doc)
        if isinstance(test_item, DoctestItem):
            return test_item.reportinfo()[2]
Пример #13
0
class NoseServiceClass(with_metaclass(Singleton, object)):

    def __init__(self):
        self.rp = None
        try:
            pkg_resources.get_distribution('reportportal_client >= 3.2.0')
            self.rp_supports_parameters = True
        except pkg_resources.VersionConflict:
            self.rp_supports_parameters = False

        self.ignore_errors = True
        self.ignored_tags = []

        self._loglevels = ('TRACE', 'DEBUG', 'INFO', 'WARN', 'ERROR')

    def init_service(self, endpoint, project, token, ignore_errors=True,
                     ignored_tags=[], log_batch_size=20, queue_get_timeout=5, retries=0):
        if self.rp is None:
            self.ignore_errors = ignore_errors
            if self.rp_supports_parameters:
                self.ignored_tags = list(set(ignored_tags).union({'parametrize'}))
            else:
                self.ignored_tags = ignored_tags
            log.debug('ReportPortal - Init service: endpoint=%s, project=%s, uuid=%s', endpoint, project, token)
            self.rp = ReportPortalService(
                endpoint=endpoint,
                project=project,
                token=token,
                retries=retries,
                log_batch_size=log_batch_size,
                # verify_ssl=verify_ssl
            )

            if self.rp and hasattr(self.rp, "get_project_settings"):
                self.project_settings = self.rp.get_project_settings()
            else:
                self.project_settings = None

            self.issue_types = self.get_issue_types()
        else:
            log.debug('The pytest is already initialized')
        return self.rp

    def start_launch(self, name,
                     mode=None,
                     tags=None,
                     description=None):
        if self.rp is None:
            return

        sl_pt = {
            'name': name,
            'start_time': timestamp(),
            'description': description,
            'mode': mode,
            'tags': tags,
        }
        self.rp.start_launch(**sl_pt)

    def start_nose_item(self, ev, test=None):
        if self.rp is None:
            return
        tags = []
        try:
            tags = test.test.suites
        except AttributeError:
            pass
        name = str(test)
        start_rq = {
            "name": name,
            "description": ev.describeTest(test),
            "tags": tags,
            "start_time": timestamp(),
            "item_type": "TEST",
            "parameters": None,
        }
        self.post_log(name)
        return self.rp.start_test_item(**start_rq)

    def finish_nose_item(self, test_item, status, issue=None):
        if self.rp is None:
            return

        self.post_log(status)
        fta_rq = {
            'item_id': test_item,
            'end_time': timestamp(),
            'status': status,
            'issue': issue,
        }

        self.rp.finish_test_item(**fta_rq)

    def finish_launch(self, status=None):
        if self.rp is None:
            return

        # To finish launch session str parameter is needed
        fl_rq = {
            'end_time': timestamp(),
            'status': status,
        }
        self.rp.finish_launch(**fl_rq)

    def terminate_service(self, nowait=False):
        if self.rp is not None:
            self.rp.terminate(nowait)
            self.rp = None

    def post_log(self, message, loglevel='INFO', attachment=None):
        if self.rp is None:
            return

        if loglevel not in self._loglevels:
            log.warning('Incorrect loglevel = %s. Force set to INFO. '
                        'Available levels: %s.', loglevel, self._loglevels)
            loglevel = 'INFO'

        sl_rq = {
            'time': timestamp(),
            'message': message,
            'level': loglevel,
            'attachment': attachment,
        }
        self.rp.log(**sl_rq)

    def get_issue_types(self):
        issue_types = {}

        if not self.project_settings:
            return issue_types

        for item_type in ("AUTOMATION_BUG", "PRODUCT_BUG", "SYSTEM_ISSUE", "NO_DEFECT", "TO_INVESTIGATE"):
            for item in self.project_settings["subTypes"][item_type]:
                issue_types[item["shortName"]] = item["locator"]

        return issue_types
class IntegrationService:
    def __init__(self,
                 rp_endpoint,
                 rp_project,
                 rp_token,
                 rp_launch_name,
                 rp_launch_description,
                 verify_ssl=False):
        self.rp_endpoint = rp_endpoint
        self.rp_project = rp_project
        self.rp_token = rp_token
        self.rp_launch_name = rp_launch_name
        self.rp_launch_description = rp_launch_description
        self.rp_async_service = ReportPortalService(endpoint=self.rp_endpoint,
                                                    project=self.rp_project,
                                                    token=self.rp_token,
                                                    verify_ssl=verify_ssl)

    def start_launcher(self,
                       name,
                       start_time,
                       attributes,
                       description=None,
                       tags=None):
        return self.rp_async_service.start_launch(name=name,
                                                  start_time=start_time,
                                                  description=description,
                                                  attributes=attributes,
                                                  tags=tags)

    def start_feature_test(self, **kwargs):
        return self._start_test(**kwargs)

    def start_scenario_test(self, **kwargs):
        return self._start_test(**kwargs)

    def start_step_test(self, **kwargs):
        return self._start_test(**kwargs)

    def finish_step_test(self, **kwargs):
        return self._finish_test(**kwargs)

    def finish_scenario_test(self, **kwargs):
        return self._finish_test(**kwargs)

    def finish_feature(self, **kwargs):
        return self._finish_test(**kwargs)

    def finish_launcher(self, end_time, launch_id, status=None):
        return self.rp_async_service.finish_launch(end_time=end_time,
                                                   status=status,
                                                   launch_id=launch_id)

    def log_step_result(self,
                        end_time,
                        message,
                        level='INFO',
                        attachment=None,
                        item_id=None):
        self.rp_async_service.log(time=end_time,
                                  message=message,
                                  level=level,
                                  attachment=attachment,
                                  item_id=item_id)

    def terminate_service(self):
        self.rp_async_service.terminate()

    def _start_test(self,
                    name,
                    start_time,
                    item_type,
                    description=None,
                    tags=None,
                    parent_item_id=None):
        """
        item_type can be (SUITE, STORY, TEST, SCENARIO, STEP, BEFORE_CLASS,
        BEFORE_GROUPS, BEFORE_METHOD, BEFORE_SUITE, BEFORE_TEST, AFTER_CLASS,
        AFTER_GROUPS, AFTER_METHOD, AFTER_SUITE, AFTER_TEST)
        Types taken from report_portal/service.py
        Mark item as started
        """
        return self.rp_async_service.start_test_item(
            name=name,
            description=description,
            tags=tags,
            start_time=start_time,
            item_type=item_type,
            parent_item_id=parent_item_id)

    def _finish_test(self, end_time, status, item_id, issue=None):
        """
        Mark item as completed and set the status accordingly
        :param end_time: the end time of the execution
        :param status: the status
        :param item_id: the id of the execution to mark as complete
        :param issue: associate existing issue with the failure
        :return: the response of the
        """
        return self.rp_async_service.finish_test_item(end_time=end_time,
                                                      status=status,
                                                      issue=issue,
                                                      item_id=item_id)
Пример #15
0
class PyTestServiceClass(with_metaclass(Singleton, object)):

    _loglevels = ('TRACE', 'DEBUG', 'INFO', 'WARN', 'ERROR')

    def __init__(self):
        self.RP = None
        self.TEST_ITEM_STACK = []
        self.launch_id = None

    def init_service(self, endpoint, project, uuid):

        if self.RP is None:
            logging.debug(msg="ReportPortal - Init service: "
                          "endpoint={0}, project={1}, uuid={2}".format(
                              endpoint, project, uuid))
            self.RP = ReportPortalService(endpoint=endpoint,
                                          project=project,
                                          token=uuid)
        else:
            logging.debug("The pytest is already initialized")
        return self.RP

    def start_launch(self,
                     launch_name=None,
                     mode=None,
                     tags=None,
                     launch=None):
        # In next versions launch object(suite, testcase)
        # could be set as parameter
        sl_pt = StartLaunchRQ(name=launch_name,
                              start_time=timestamp(),
                              description='Pytest Launch',
                              mode=mode,
                              tags=tags)
        logging.debug(msg="ReportPortal - Start launch: "
                      "request_body={0}".format(sl_pt.data))
        req_data = self.RP.start_launch(sl_pt)
        logging.debug(msg="ReportPortal - Launch started: "
                      "response_body={0}".format(req_data.raw))
        self.launch_id = req_data.id

        self.TEST_ITEM_STACK.append((None, "SUITE"))
        logging.debug(
            msg="ReportPortal - Stack: {0}".format(self.TEST_ITEM_STACK))

    def start_pytest_item(self, test_item=None):
        try:
            # for common items
            item_description = test_item.function.__doc__
        except AttributeError:
            # doctest  has no `function` attribute
            item_description = test_item.reportinfo()[2]
        start_rq = StartTestItemRQ(name=test_item.name,
                                   description=item_description,
                                   tags=['PyTest Item Tag'],
                                   start_time=timestamp(),
                                   launch_id=self.launch_id,
                                   type="TEST")

        parent_item_id = self._get_top_id_from_stack()

        logging.debug(msg="ReportPortal - Start TestItem: "
                      "request_body={0}, parent_item={1}".format(
                          start_rq.data, parent_item_id))

        req_data = self.RP.start_test_item(parent_item_id=parent_item_id,
                                           start_test_item_rq=start_rq)

        self.TEST_ITEM_STACK.append((req_data.id, "TEST"))
        logging.debug(
            msg="ReportPortal - Stack: {0}".format(self.TEST_ITEM_STACK))

    def finish_pytest_item(self, status, issue=None):
        fta_rq = FinishTestItemRQ(end_time=timestamp(),
                                  status=status,
                                  issue=issue)

        test_item_id = self._get_top_id_from_stack()
        logging.debug(
            msg="ReportPortal - Finish TetsItem:"
            " request_body={0}, test_id={1}".format(fta_rq.data, test_item_id))
        self.RP.finish_test_item(item_id=test_item_id,
                                 finish_test_item_rq=fta_rq)
        self.TEST_ITEM_STACK.pop()
        logging.debug(
            msg="ReportPortal - Stack: {0}".format(self.TEST_ITEM_STACK))

    def finish_launch(self, launch=None, status="rp_launch"):
        # TO finish launch session str parameter is needed
        fl_rq = FinishExecutionRQ(end_time=timestamp(), status=status)
        launch_id = self.launch_id
        logging.debug(
            msg="ReportPortal - Finish launch: "
            "request_body={0}, launch_id={1}".format(fl_rq.data, launch_id))
        self.RP.finish_launch(launch_id, fl_rq)
        self.TEST_ITEM_STACK.pop()
        logging.debug(
            msg="ReportPortal - Stack: {0}".format(self.TEST_ITEM_STACK))

    def _get_top_id_from_stack(self):
        try:
            return self.TEST_ITEM_STACK[-1][0]
        except IndexError:
            return None

    def post_log(self, message, loglevel='INFO'):
        if loglevel not in self._loglevels:
            logging.warning(
                'Incorrect loglevel = {}. Force set to INFO. Avaliable levels: '
                '{}.'.format(loglevel, self._loglevels))
            loglevel = 'INFO'

        sl_rq = SaveLogRQ(item_id=self._get_top_id_from_stack(),
                          time=timestamp(),
                          message=message,
                          level=loglevel)
        self.RP.log(sl_rq)
Пример #16
0
class ReportPortal:
    """Handles logging to report portal."""
    def __init__(self):
        """Initializes the instance."""
        cfg = get_cephci_config()
        access = cfg.get("report-portal")

        self.client = None
        self._test_id = None

        if access:
            try:
                self.client = ReportPortalService(
                    endpoint=access["endpoint"],
                    project=access["project"],
                    token=access["token"],
                    verify_ssl=False,
                )
            except BaseException:  # noqa
                log.warning("Unable to connect to Report Portal.")

    @rp_deco
    def start_launch(self, name: str, description: str,
                     attributes: dict) -> None:
        """
        Initiates a test execution with the provided details

        Args:
            name (str):         Name of test execution.
            description (str):  Meta data information to be added to the launch.
            attributes (dict):  Meta data information as dict

        Returns:
             None
        """
        self.client.start_launch(name,
                                 start_time=timestamp(),
                                 description=description,
                                 attributes=attributes)

    @rp_deco
    def start_test_item(self, name: str, description: str,
                        item_type: str) -> None:
        """
        Records a entry within the initiated launch.

        Args:
            name (str):         Name to be set for the test step
            description (str):  Meta information to be used.
            item_type (str):    Type of entry to be created.

        Returns:
            None
        """
        self._test_id = self.client.start_test_item(name,
                                                    start_time=timestamp(),
                                                    item_type=item_type,
                                                    description=description)

    @rp_deco
    def finish_test_item(self, status: Optional[str] = "PASSED") -> None:
        """
        Ends a test entry with the given status.

        Args:
            status (str):
        """
        if not self._test_id:
            return

        self.client.finish_test_item(item_id=self._test_id,
                                     end_time=timestamp(),
                                     status=status)

    @rp_deco
    def finish_launch(self) -> None:
        """Closes the Report Portal execution run."""
        self.client.finish_launch(end_time=timestamp())
        self.client.terminate()

    @rp_deco
    def log(self, message: str) -> None:
        """
        Adds log records to the event.

        Args:
            message (str):  Message to be logged.

        Returns:
            None
        """
        self.client.log(time=timestamp(), message=message, level="INFO")
Пример #17
0
    def report_test_results(self, errors, performance_degradation_rate,
                            compare_with_baseline, missed_threshold_rate,
                            compare_with_thresholds):
        self.create_project()
        service = ReportPortalService(endpoint=self.rp_url,
                                      project=self.rp_project,
                                      token=self.rp_token,
                                      error_handler=self.my_error_handler,
                                      verify_ssl=self.verify_ssl)

        # Start launch.
        service.start_launch(
            name=self.rp_launch_name + ": performance testing results",
            start_time=self.timestamp(),
            description='Test name - {}'.format(self.args['simulation']))
        errors_len = len(errors)

        if errors_len > 0:
            functional_error_test_item = service.start_test_item(
                name="Functional errors",
                start_time=self.timestamp(),
                description="This simulation has failed requests",
                item_type="SUITE")
            for key in errors:
                # Start test item.
                item_name = self.get_item_name(errors[key])
                item_id = service.start_test_item(
                    name=item_name,
                    parent_item_id=functional_error_test_item,
                    description="This request was failed {} times".format(
                        errors[key]['Error count']),
                    start_time=self.timestamp(),
                    item_type="STEP",
                    parameters={
                        "simulation": self.args['simulation'],
                        'test type': self.args['type']
                    })

                self.log_message(item_id, service, 'Request name', errors[key],
                                 'WARN')
                self.log_message(item_id, service, 'Method', errors[key],
                                 'WARN')
                self.log_message(item_id, service, 'Request URL', errors[key],
                                 'WARN')
                self.log_message(item_id, service, 'Request_params',
                                 errors[key], 'WARN')
                self.log_message(item_id, service, 'Request headers',
                                 errors[key], 'INFO')
                self.log_message(item_id, service, 'Error count', errors[key],
                                 'WARN')
                self.log_message(item_id, service, 'Error_message',
                                 errors[key], 'WARN')
                self.log_message(item_id, service, 'Response code',
                                 errors[key], 'WARN')
                self.log_message(item_id, service, 'Response', errors[key],
                                 'WARN')
                self.log_unique_error_id(item_id, service,
                                         errors[key]['Request name'],
                                         errors[key]['Method'],
                                         errors[key]['Response code'])

                service.finish_test_item(item_id=item_id,
                                         end_time=self.timestamp(),
                                         status="FAILED")
            service.finish_test_item(item_id=functional_error_test_item,
                                     end_time=self.timestamp(),
                                     status="FAILED")
        else:
            item_id = service.start_test_item(
                name="Functional errors",
                start_time=self.timestamp(),
                item_type="STEP",
                description='This simulation has no functional errors')
            service.finish_test_item(item_id=item_id,
                                     end_time=self.timestamp(),
                                     status="PASSED")

        if performance_degradation_rate > self.performance_degradation_rate:
            baseline_item_id = service.start_test_item(
                name="Compare to baseline",
                start_time=self.timestamp(),
                description="Test \"{}\" failed with performance degradation"
                " rate {}".format(self.args['simulation'],
                                  performance_degradation_rate),
                item_type="SUITE")

            service.log(
                item_id=baseline_item_id,
                time=self.timestamp(),
                message="The following requests are slower than baseline:",
                level="{}".format('INFO'))
            for request in compare_with_baseline:
                item_id = service.start_test_item(
                    name="\"{}\" reached {} ms by {}. Baseline {} ms.".format(
                        request['request_name'], request['response_time'],
                        self.args['comparison_metric'], request['baseline']),
                    parent_item_id=baseline_item_id,
                    start_time=self.timestamp(),
                    item_type="STEP",
                    parameters={
                        'simulation': self.args['simulation'],
                        'test type': self.args['type']
                    })

                service.log(item_id=item_id,
                            time=self.timestamp(),
                            message="\"{}\" reached {} ms by {}."
                            " Baseline {} ms.".format(
                                request['request_name'],
                                request['response_time'],
                                self.args['comparison_metric'],
                                request['baseline']),
                            level="{}".format('WARN'))
                service.finish_test_item(item_id=item_id,
                                         end_time=self.timestamp(),
                                         status="FAILED")
            service.log(time=self.timestamp(),
                        message=hashlib.sha256(
                            "{} performance degradation".format(
                                self.args['simulation']).strip().encode(
                                    'utf-8')).hexdigest(),
                        level='ERROR')

            service.finish_test_item(item_id=baseline_item_id,
                                     end_time=self.timestamp(),
                                     status="FAILED")
        else:
            item_id = service.start_test_item(
                name="Compare to baseline",
                start_time=self.timestamp(),
                item_type="STEP",
                description='Performance degradation rate less than {}'.format(
                    self.performance_degradation_rate))
            service.finish_test_item(item_id=item_id,
                                     end_time=self.timestamp(),
                                     status="PASSED")

        if missed_threshold_rate > self.missed_thresholds_rate:
            thresholds_item_id = service.start_test_item(
                name="Compare with thresholds",
                start_time=self.timestamp(),
                description="Test \"{}\" failed with missed thresholds"
                " rate {}".format(self.args['simulation'],
                                  missed_threshold_rate),
                item_type="SUITE")

            for color in ["yellow", "red"]:
                colored = False
                for th in compare_with_thresholds:
                    if th['threshold'] == color:
                        item_id = service.start_test_item(
                            name="{} threshold for  \"{}\"".format(
                                color, th['request_name']),
                            start_time=self.timestamp(),
                            parent_item_id=thresholds_item_id,
                            item_type="STEP",
                            parameters={
                                'simulation': self.args['simulation'],
                                'test type': self.args['type']
                            })
                        if not colored:
                            service.log(
                                item_id=item_id,
                                time=self.timestamp(),
                                message=
                                f"The following {color} thresholds were exceeded:",
                                level="INFO")
                        appendage = calculate_appendage(th['target'])
                        service.log(
                            item_id=item_id,
                            time=self.timestamp(),
                            message=
                            f"\"{th['request_name']}\" {th['target']}{appendage} with value {th['metric']}{appendage} exceeded threshold of {th[color]}{appendage}",
                            level="WARN")
                        service.finish_test_item(item_id=item_id,
                                                 end_time=self.timestamp(),
                                                 status="FAILED")
            service.log(item_id=item_id,
                        time=self.timestamp(),
                        message=hashlib.sha256("{} missed thresholds".format(
                            self.args['simulation']).strip().encode(
                                'utf-8')).hexdigest(),
                        level='ERROR')

            service.finish_test_item(item_id=thresholds_item_id,
                                     end_time=self.timestamp(),
                                     status="FAILED")
        else:
            item_id = service.start_test_item(
                name="Compare with thresholds",
                start_time=self.timestamp(),
                item_type="STEP",
                description='Missed thresholds rate less than {}'.format(
                    self.missed_thresholds_rate))
            service.finish_test_item(item_id=item_id,
                                     end_time=self.timestamp(),
                                     status="PASSED")
        # Finish launch.
        service.finish_launch(end_time=self.timestamp())

        service.terminate()
Пример #18
0
class ReportPortalDataWriter(object):
    def __init__(self,
                 endpoint,
                 token,
                 project,
                 launch_name=None,
                 launch_doc=None,
                 launch_id=None,
                 verify_ssl=False):
        self.endpoint = endpoint
        self.token = token
        self.project = project
        self.launch_name = launch_name
        self.launch_doc = launch_doc
        self.service = None
        self.test = None
        self.verify_ssl = verify_ssl
        self.launch_id = launch_id

    def start_service(self):
        self.service = ReportPortalService(endpoint=self.endpoint,
                                           project=self.project,
                                           token=self.token,
                                           verify_ssl=self.verify_ssl)
        if self.launch_id:
            self.service.launch_id = self.launch_id

    def start_test(self):
        if not self.service:
            self.start_service()
        return self.service.start_launch(name=self.launch_name,
                                         start_time=timestamp(),
                                         description=self.launch_doc)

    def finish_test(self):
        self.service.finish_launch(end_time=timestamp())
        self.service.terminate()
        self.service = None

    def is_test_started(self):
        if self.service:
            return True
        return False

    def start_test_item(self,
                        issue,
                        description,
                        tags,
                        item_type='STEP',
                        parameters={}):
        self.service.start_test_item(issue,
                                     description=description,
                                     tags=tags,
                                     start_time=timestamp(),
                                     item_type=item_type,
                                     parameters=parameters)

    def test_item_message(self, message, level="ERROR", attachment=None):
        if len(message) > constants.MAX_MESSAGE_LEN:
            index = 0
            while index < len(message):
                increment = constants.MAX_MESSAGE_LEN
                if index + increment > len(message):
                    increment = len(message) - index
                self.service.log(time=timestamp(),
                                 message=message[index:index + increment],
                                 level=level,
                                 attachment=attachment)
                index = index + increment
        else:
            self.service.log(time=timestamp(),
                             message=message,
                             level=level,
                             attachment=attachment)

    def finish_test_item(self):
        self.service.finish_test_item(end_time=timestamp(), status="FAILED")
Пример #19
0
class PyTestServiceClass(with_metaclass(Singleton, object)):
    """Pytest service class for reporting test results to the Report Portal."""
    def __init__(self):
        """Initialize instance attributes."""
        self._errors = queue.Queue()
        self._hier_parts = {}
        self._issue_types = {}
        self._item_parts = {}
        self._loglevels = ('TRACE', 'DEBUG', 'INFO', 'WARN', 'ERROR')
        self._skip_analytics = getenv('ALLURE_NO_ANALYTICS')
        self.agent_name = 'pytest-reportportal'
        self.agent_version = get_package_version(self.agent_name)
        self.ignore_errors = True
        self.ignored_attributes = []
        self.log_batch_size = 20
        self.is_skipped_an_issue = True
        self.log_item_id = None
        self.parent_item_id = None
        self.rp = None
        self.rp_supports_parameters = True
        try:
            pkg_resources.get_distribution('reportportal_client >= 3.2.0')
        except pkg_resources.VersionConflict:
            self.rp_supports_parameters = False

    @property
    def issue_types(self):
        """Issue types for the Report Portal project."""
        if not self._issue_types:
            if not self.project_settings:
                return self._issue_types
            for item_type in ("AUTOMATION_BUG", "PRODUCT_BUG", "SYSTEM_ISSUE",
                              "NO_DEFECT", "TO_INVESTIGATE"):
                for item in self.project_settings["subTypes"][item_type]:
                    self._issue_types[item["shortName"]] = item["locator"]
        return self._issue_types

    def init_service(self,
                     endpoint,
                     project,
                     uuid,
                     log_batch_size,
                     is_skipped_an_issue,
                     ignore_errors,
                     ignored_attributes,
                     verify_ssl=True,
                     custom_launch=None,
                     parent_item_id=None,
                     retries=0):
        """Update self.rp with the instance of the ReportPortalService."""
        self._errors = queue.Queue()
        if self.rp is None:
            self.ignore_errors = ignore_errors
            self.ignored_attributes = ignored_attributes
            self.parent_item_id = parent_item_id
            if self.rp_supports_parameters:
                self.ignored_attributes = list(
                    set(ignored_attributes).union({'parametrize'}))
            self.log_batch_size = log_batch_size
            self.is_skipped_an_issue = is_skipped_an_issue
            log.debug(
                'ReportPortal - Init service: endpoint=%s, '
                'project=%s, uuid=%s', endpoint, project, uuid)
            self.rp = ReportPortalService(
                endpoint=endpoint,
                project=project,
                token=uuid,
                log_batch_size=log_batch_size,
                is_skipped_an_issue=is_skipped_an_issue,
                retries=retries,
                verify_ssl=verify_ssl,
                launch_id=custom_launch,
            )
            self.project_settings = None
            if self.rp and hasattr(self.rp, "get_project_settings"):
                self.project_settings = self.rp.get_project_settings()
        else:
            log.debug('The pytest is already initialized')
        return self.rp

    def start_launch(self,
                     launch_name,
                     mode=None,
                     description=None,
                     attributes=None,
                     rerun=False,
                     rerun_of=None,
                     **kwargs):
        """
        Launch test items.

        :param launch_name: name of the launch
        :param mode:        mode
        :param description: description of launch test
        :param kwargs:      additional params
        :return: item ID
        """
        self._stop_if_necessary()
        if self.rp is None:
            return

        sl_pt = {
            'attributes': self._get_launch_attributes(attributes),
            'name': launch_name,
            'start_time': timestamp(),
            'description': description,
            'mode': mode,
            'rerun': rerun,
            'rerunOf': rerun_of
        }
        log.debug('ReportPortal - Start launch: request_body=%s', sl_pt)
        item_id = self.rp.start_launch(**sl_pt)
        log.debug('ReportPortal - Launch started: id=%s', item_id)
        if not self._skip_analytics:
            send_event(self.agent_name, self.agent_version)
        return item_id

    def collect_tests(self, session):
        """
        Collect all tests.

        :param session: pytest.Session
        :return: None
        """
        self._stop_if_necessary()
        if self.rp is None:
            return

        hier_dirs = False
        hier_module = False
        hier_class = False
        hier_param = False
        display_suite_file_name = True

        if not hasattr(session.config, 'workerinput'):
            hier_dirs = session.config.getini('rp_hierarchy_dirs')
            hier_module = session.config.getini('rp_hierarchy_module')
            hier_class = session.config.getini('rp_hierarchy_class')
            hier_param = session.config.getini('rp_hierarchy_parametrize')
            display_suite_file_name = session.config.getini(
                'rp_display_suite_test_file')

        try:
            hier_dirs_level = int(
                session.config.getini('rp_hierarchy_dirs_level'))
        except ValueError:
            hier_dirs_level = 0

        dirs_parts = {}
        tests_parts = {}

        for item in session.items:
            # Start collecting test item parts
            parts = []

            # Hierarchy for directories
            rp_name = self._add_item_hier_parts_dirs(item, hier_dirs,
                                                     hier_dirs_level, parts,
                                                     dirs_parts)

            # Hierarchy for Module and Class/UnitTestCase
            item_parts = self._get_item_parts(item)
            rp_name = self._add_item_hier_parts_other(item_parts, item, Module,
                                                      hier_module, parts,
                                                      rp_name)
            rp_name = self._add_item_hier_parts_other(item_parts, item, Class,
                                                      hier_class, parts,
                                                      rp_name)
            rp_name = self._add_item_hier_parts_other(item_parts, item,
                                                      UnitTestCase, hier_class,
                                                      parts, rp_name)

            # Hierarchy for parametrized tests
            if hier_param:
                rp_name = self._add_item_hier_parts_parametrize(
                    item, parts, tests_parts, rp_name)

            # Hierarchy for test itself (Function/TestCaseFunction)
            item._rp_name = rp_name + ("::" if rp_name else "") + item.name

            # Result initialization
            for part in parts:
                part._rp_result = "PASSED"

            self._item_parts[item] = parts
            for part in parts:
                if '_pytest.python.Class' in str(type(
                        part)) and not display_suite_file_name and not \
                        hier_module:
                    part._rp_name = part._rp_name.split("::")[-1]
                if part not in self._hier_parts:
                    self._hier_parts[part] = {
                        "finish_counter": 1,
                        "start_flag": False
                    }
                else:
                    self._hier_parts[part]["finish_counter"] += 1

    def start_pytest_item(self, test_item=None):
        """
        Start pytest_item.

        :param test_item: pytest.Item
        :return: item ID
        """
        self._stop_if_necessary()
        if self.rp is None:
            return

        for part in self._item_parts[test_item]:
            if self._hier_parts[part]["start_flag"]:
                self.parent_item_id = self._hier_parts[part]["item_id"]
                continue
            self._hier_parts[part]["start_flag"] = True
            payload = {
                'name': self._get_item_name(part),
                'description': self._get_item_description(part),
                'start_time': timestamp(),
                'item_type': 'SUITE',
                'parent_item_id': self.parent_item_id,
                'code_ref': str(test_item.fspath)
            }
            log.debug('ReportPortal - Start Suite: request_body=%s', payload)
            item_id = self.rp.start_test_item(**payload)
            self.log_item_id = item_id
            self.parent_item_id = item_id
            self._hier_parts[part]["item_id"] = item_id

        # Item type should be sent as "STEP" until we upgrade to RPv6.
        # Details at:
        # https://github.com/reportportal/agent-Python-RobotFramework/issues/56
        start_rq = {
            'attributes': self._get_item_markers(test_item),
            'name': self._get_item_name(test_item),
            'description': self._get_item_description(test_item),
            'start_time': timestamp(),
            'item_type': 'STEP',
            'parent_item_id': self.parent_item_id,
            'code_ref': '{0}:{1}'.format(test_item.fspath, test_item.name)
        }
        if self.rp_supports_parameters:
            start_rq['parameters'] = self._get_parameters(test_item)

        log.debug('ReportPortal - Start TestItem: request_body=%s', start_rq)
        item_id = self.rp.start_test_item(**start_rq)
        self.log_item_id = item_id
        return item_id

    def finish_pytest_item(self, test_item, item_id, status, issue=None):
        """
        Finish pytest_item.

        :param test_item: test_item
        :param item_id:   Pytest.Item
        :param status:    an item finish status (PASSED, FAILED, STOPPED,
        SKIPPED, RESETED, CANCELLED, INFO, WARN)
        :param issue:     an external system issue reference
        :return: None
        """
        self._stop_if_necessary()
        if self.rp is None:
            return

        fta_rq = {
            'end_time': timestamp(),
            'status': status,
            'issue': issue,
            'item_id': item_id
        }

        log.debug('ReportPortal - Finish TestItem: request_body=%s', fta_rq)

        parts = self._item_parts[test_item]
        self.rp.finish_test_item(**fta_rq)
        while len(parts) > 0:
            part = parts.pop()
            if status == "FAILED":
                part._rp_result = status
            self._hier_parts[part]["finish_counter"] -= 1
            if self._hier_parts[part]["finish_counter"] > 0:
                continue
            payload = {
                'end_time': timestamp(),
                'issue': issue,
                'item_id': self._hier_parts[part]["item_id"],
                'status': part._rp_result
            }
            log.debug('ReportPortal - End TestSuite: request_body=%s', payload)
            self.rp.finish_test_item(**payload)

    def finish_launch(self, status=None, **kwargs):
        """
        Finish tests launch.

        :param status: an launch status (PASSED, FAILED, STOPPED, SKIPPED,
        INTERRUPTED, CANCELLED, INFO, WARN)
        :param kwargs: additional params
        :return: None
        """
        self._stop_if_necessary()
        if self.rp is None:
            return

        # To finish launch session str parameter is needed
        fl_rq = {'end_time': timestamp(), 'status': status}
        log.debug('ReportPortal - Finish launch: request_body=%s', fl_rq)
        self.rp.finish_launch(**fl_rq)

    def post_log(self, message, loglevel='INFO', attachment=None):
        """
        Send a log message to the Report Portal.

        :param message:    message in log body
        :param loglevel:   a level of a log entry (ERROR, WARN, INFO, DEBUG,
        TRACE, FATAL, UNKNOWN)
        :param attachment: attachment file
        :return: None
        """
        self._stop_if_necessary()
        if self.rp is None:
            return

        if loglevel not in self._loglevels:
            log.warning(
                'Incorrect loglevel = %s. Force set to INFO. '
                'Available levels: %s.', loglevel, self._loglevels)
            loglevel = 'INFO'

        sl_rq = {
            'item_id': self.log_item_id,
            'time': timestamp(),
            'message': message,
            'level': loglevel,
            'attachment': attachment
        }
        self.rp.log(**sl_rq)

    def _stop_if_necessary(self):
        """
        Stop tests if any error occurs.

        :return: None
        """
        try:
            exc, msg, tb = self._errors.get(False)
            traceback.print_exception(exc, msg, tb)
            sys.stderr.flush()
            if not self.ignore_errors:
                pytest.exit(msg)
        except queue.Empty:
            pass

    @staticmethod
    def _add_item_hier_parts_dirs(item,
                                  hier_flag,
                                  dirs_level,
                                  report_parts,
                                  dirs_parts,
                                  rp_name=""):
        """
        Add item to hierarchy of parents located in directory.

        :param item:         Pytest.Item
        :param hier_flag:    flag
        :param dirs_level:   int value of level
        :param report_parts: ''
        :param dirs_parts:   ''
        :param rp_name:      report name
        :return: str rp_name
        """
        parts_dirs = PyTestServiceClass._get_item_dirs(item)
        dir_path = item.fspath.new(dirname="", basename="", drive="")
        rp_name_path = ""

        for dir_name in parts_dirs[dirs_level:]:
            dir_path = dir_path.join(dir_name)
            path = str(dir_path)

            if hier_flag:
                if path in dirs_parts:
                    item_dir = dirs_parts[path]
                    rp_name = ""
                else:
                    item_dir = File(dir_path,
                                    nodeid=dir_name,
                                    session=item.session,
                                    config=item.session.config)
                    rp_name += dir_name
                    item_dir._rp_name = rp_name
                    dirs_parts[path] = item_dir
                    rp_name = ""

                report_parts.append(item_dir)
            else:
                rp_name_path = path[1:]

        if not hier_flag:
            rp_name += rp_name_path

        return rp_name

    @staticmethod
    def _add_item_hier_parts_parametrize(item,
                                         report_parts,
                                         tests_parts,
                                         rp_name=""):
        """
        Add item to hierarchy of parents with params.

        :param item:         pytest.Item
        :param report_parts: Parent reports
        :param tests_parts:  test item parts
        :param rp_name:      name of report
        :return: str rp_name
        """
        for mark in item.own_markers:
            if mark.name == 'parametrize':
                ch_index = item.nodeid.find("[")
                test_fullname = item.nodeid[:ch_index if ch_index > 0 else len(
                    item.nodeid)]
                test_name = item.originalname

                rp_name += ("::" if rp_name else "") + test_name

                if test_fullname in tests_parts:
                    item_test = tests_parts[test_fullname]
                else:
                    item_test = Item(test_fullname,
                                     nodeid=test_fullname,
                                     session=item.session,
                                     config=item.session.config)
                    item_test._rp_name = rp_name
                    item_test.obj = item.obj
                    item_test.keywords = item.keywords
                    item_test.own_markers = item.own_markers
                    item_test.parent = item.parent

                    tests_parts[test_fullname] = item_test

                rp_name = ""
                report_parts.append(item_test)
                break

        return rp_name

    @staticmethod
    def _add_item_hier_parts_other(item_parts,
                                   item,
                                   item_type,
                                   hier_flag,
                                   report_parts,
                                   rp_name=""):
        """
        Add item to hierarchy of parents.

        :param item_parts:  Parent_items
        :param item:        pytest.Item
        :param item_type:   (SUITE, STORY, TEST, SCENARIO, STEP, BEFORE_CLASS,
         BEFORE_GROUPS, BEFORE_METHOD, BEFORE_SUITE, BEFORE_TEST, AFTER_CLASS,
        AFTER_GROUPS, AFTER_METHOD, AFTER_SUITE, AFTER_TEST)
        :param hier_flag:    bool state
        :param report_parts: list of parent reports
        :param rp_name:      report name
        :return: str rp_name
        """
        for part in item_parts:

            if type(part) is item_type:

                if item_type is Module:
                    module_path = str(
                        item.fspath.new(dirname=rp_name,
                                        basename=part.fspath.basename,
                                        drive=""))
                    rp_name = module_path if rp_name else module_path[1:]
                elif item_type in (Class, Function, UnitTestCase,
                                   TestCaseFunction):
                    rp_name += ("::" if rp_name else "") + part.name

                if hier_flag:
                    part._rp_name = rp_name
                    rp_name = ""
                    report_parts.append(part)

        return rp_name

    @staticmethod
    def _get_item_parts(item):
        """
        Get item of parents.

        :param item: pytest.Item
        :return list of parents
        """
        parts = []
        parent = item.parent
        if not isinstance(parent, Instance):
            parts.append(parent)
        while True:
            parent = parent.parent
            if parent is None:
                break
            if isinstance(parent, Instance):
                continue
            if isinstance(parent, Session):
                break
            parts.append(parent)

        parts.reverse()
        return parts

    @staticmethod
    def _get_item_dirs(item):
        """
        Get directory of item.

        :param item: pytest.Item
        :return: list of dirs
        """
        root_path = item.session.config.rootdir.strpath
        dir_path = item.fspath.new(basename="")
        rel_dir = dir_path.new(dirname=dir_path.relto(root_path),
                               basename="",
                               drive="")

        dir_list = []
        for directory in rel_dir.parts(reverse=False):
            dir_name = directory.basename
            if dir_name:
                dir_list.append(dir_name)

        return dir_list

    def _get_launch_attributes(self, ini_attrs):
        """Generate launch attributes in the format supported by the client.

        :param list ini_attrs: List for attributes from the pytest.ini file
        """
        attributes = ini_attrs or []
        system_attributes = get_launch_sys_attrs()
        system_attributes['agent'] = ('{}-{}'.format(self.agent_name,
                                                     self.agent_version))
        return attributes + _dict_to_payload(system_attributes)

    def _get_item_markers(self, item):
        """
        Get attributes of item.

        :param item: pytest.Item
        :return: list of tags
        """

        # Try to extract names of @pytest.mark.* decorators used for test item
        # and exclude those which present in rp_ignore_attributes parameter
        def get_marker_value(item, keyword):
            try:
                marker = item.get_closest_marker(keyword)
            except AttributeError:
                # pytest < 3.6
                marker = item.keywords.get(keyword)

            marker_values = []
            if marker and marker.args:
                for arg in marker.args:
                    marker_values.append("{}:{}".format(keyword, arg))
            else:
                marker_values.append(keyword)
            # returns a list of strings to accommodate multiple values
            return marker_values

        try:
            get_marker = getattr(item, "get_closest_marker")
        except AttributeError:
            get_marker = getattr(item, "get_marker")

        raw_attrs = []
        for k in item.keywords:
            if get_marker(k) is not None and k not in self.ignored_attributes:
                raw_attrs.extend(get_marker_value(item, k))
        raw_attrs.extend(item.session.config.getini('rp_tests_attributes'))
        return gen_attributes(raw_attrs)

    def _get_parameters(self, item):
        """
        Get params of item.

        :param item: Pytest.Item
        :return: dict of params
        """
        return item.callspec.params if hasattr(item, 'callspec') else None

    @staticmethod
    def _get_item_name(test_item):
        """
        Get name of item.

        :param test_item: pytest.Item
        :return: name
        """
        name = test_item._rp_name
        if len(name) > 256:
            name = name[:256]
            test_item.warn(
                PytestWarning(
                    'Test node ID was truncated to "{}" because of name size '
                    'constrains on reportportal'.format(name)))
        return name

    @staticmethod
    def _get_item_description(test_item):
        """
        Get description of item.

        :param test_item: pytest.Item
        :return string description
        """
        if isinstance(test_item, (Class, Function, Module, Item)):
            doc = test_item.obj.__doc__
            if doc is not None:
                return trim_docstring(doc)
        if isinstance(test_item, DoctestItem):
            return test_item.reportinfo()[2]
Пример #20
0
def main():

    result = {}

    module_args = dict(url=dict(type='str', required=True),
                       token=dict(type='str', required=True),
                       ssl_verify=dict(type='bool', default=True),
                       threads=dict(type='int', default=8),
                       ignore_skipped_tests=dict(type='bool', default=False),
                       project_name=dict(type='str', required=True),
                       launch_name=dict(type='str', required=True),
                       launch_tags=dict(type='list', required=False),
                       launch_description=dict(type='str', default=''),
                       launch_start_time=dict(type='str', default=None),
                       launch_end_time=dict(type='str', default=None),
                       tests_paths=dict(type='list', required=True),
                       tests_exclude_paths=dict(type='list', required=False),
                       log_last_traceback_only=dict(type='bool',
                                                    default=False),
                       full_log_attachment=dict(type='bool', default=False))

    module = AnsibleModule(argument_spec=module_args,
                           supports_check_mode=False)

    service = None
    launch_end_time = None

    try:
        tests_paths = module.params.pop('tests_paths')
        tests_exclude_paths = module.params.pop('tests_exclude_paths')
        ssl_verify = module.params.pop('ssl_verify')
        launch_start_time = module.params.pop('launch_start_time')
        launch_end_time = module.params.pop('launch_end_time')

        expanded_paths = get_expanded_paths(tests_paths)
        expanded_exclude_paths = [] if not tests_exclude_paths else \
            get_expanded_paths(tests_exclude_paths)

        expanded_paths = \
            list(set(expanded_paths) - set(expanded_exclude_paths))

        if not expanded_paths:
            raise IOError("There are no paths to fetch data from")

        missing_paths = []
        for a_path in expanded_paths:
            if not os.path.exists(a_path):
                missing_paths.append(a_path)
        if missing_paths:
            raise FileNotFoundError("Paths not exist: {missing_paths}'".format(
                missing_paths=str(missing_paths)))

        # Get the ReportPortal service instance
        service = ReportPortalService(
            endpoint=module.params.pop('url'),
            project=module.params.pop('project_name'),
            token=module.params.pop('token'),
        )

        service.session.verify = ssl_verify
        if not ssl_verify:
            os.environ.pop('REQUESTS_CA_BUNDLE', None)

        launch_tags = module.params.pop('launch_tags')
        launch_attrs = {}
        for tag in launch_tags:
            tag_attr = tag.split(':', 1)
            if len(tag_attr) == 2:
                if len(tag_attr[0]) > 127:
                    key = tag_attr[0][:127]
                else:
                    key = tag_attr[0]
                if not tag_attr[1]:
                    val = 'N/A'
                elif len(tag_attr[1]) > 127:
                    val = tag_attr[1][:127]
                else:
                    val = tag_attr[1]
                launch_attrs[key] = val

        publisher = ReportPortalPublisher(
            service=service,
            launch_name=module.params.pop('launch_name'),
            launch_attrs=launch_attrs,
            launch_description=module.params.pop('launch_description'),
            ignore_skipped_tests=module.params.pop('ignore_skipped_tests'),
            log_last_traceback_only=\
                    module.params.pop('log_last_traceback_only'),
            full_log_attachment=module.params.pop('full_log_attachment'),
            threads=module.params.pop('threads'),
            expanded_paths=expanded_paths
        )

        if launch_start_time is not None:
            # Time in deployment report may be higher than the time set
            # as launch_start_time because of all the rounds
            fixed_start_time = str(int(launch_start_time) - 1000)
            publisher.launch_start_time = fixed_start_time

        publisher.publish_tests()

        result['expanded_paths'] = expanded_paths
        result['expanded_exclude_paths'] = expanded_exclude_paths
        result['launch_id'] = service.launch_id

        # Set launch ending time
        if launch_end_time is None:
            launch_end_time = str(int(time.time() * 1000))

        # Finish launch.
        service.finish_launch(end_time=launch_end_time)

        module.exit_json(**result)

    except Exception as ex:
        if service is not None and service.launch_id:
            if launch_end_time is None:
                launch_end_time = str(int(time.time() * 1000))
            service.finish_launch(end_time=launch_end_time, status="FAILED")
        result['msg'] = ex
        module.fail_json(**result)