コード例 #1
0
ファイル: utils.py プロジェクト: rakeshgm/cephci
class ReportPortal:
    """Handles logging to report portal."""
    def __init__(self):
        """Initializes the instance."""
        cfg = get_cephci_config()
        access = cfg.get("report-portal")

        self.client = None
        self._test_id = None

        if access:
            try:
                self.client = ReportPortalService(
                    endpoint=access["endpoint"],
                    project=access["project"],
                    token=access["token"],
                    verify_ssl=False,
                )
            except BaseException:  # noqa
                log.warning("Unable to connect to Report Portal.")

    @rp_deco
    def start_launch(self, name: str, description: str,
                     attributes: dict) -> None:
        """
        Initiates a test execution with the provided details

        Args:
            name (str):         Name of test execution.
            description (str):  Meta data information to be added to the launch.
            attributes (dict):  Meta data information as dict

        Returns:
             None
        """
        self.client.start_launch(name,
                                 start_time=timestamp(),
                                 description=description,
                                 attributes=attributes)

    @rp_deco
    def start_test_item(self, name: str, description: str,
                        item_type: str) -> None:
        """
        Records a entry within the initiated launch.

        Args:
            name (str):         Name to be set for the test step
            description (str):  Meta information to be used.
            item_type (str):    Type of entry to be created.

        Returns:
            None
        """
        self._test_id = self.client.start_test_item(name,
                                                    start_time=timestamp(),
                                                    item_type=item_type,
                                                    description=description)

    @rp_deco
    def finish_test_item(self, status: Optional[str] = "PASSED") -> None:
        """
        Ends a test entry with the given status.

        Args:
            status (str):
        """
        if not self._test_id:
            return

        self.client.finish_test_item(item_id=self._test_id,
                                     end_time=timestamp(),
                                     status=status)

    @rp_deco
    def finish_launch(self) -> None:
        """Closes the Report Portal execution run."""
        self.client.finish_launch(end_time=timestamp())
        self.client.terminate()

    @rp_deco
    def log(self, message: str) -> None:
        """
        Adds log records to the event.

        Args:
            message (str):  Message to be logged.

        Returns:
            None
        """
        self.client.log(time=timestamp(), message=message, level="INFO")
コード例 #2
0
ファイル: report_portal_writer.py プロジェクト: hunkom/dusty
class ReportPortalDataWriter(object):
    def __init__(self,
                 endpoint,
                 token,
                 project,
                 launch_name=None,
                 launch_doc=None,
                 launch_id=None,
                 verify_ssl=False):
        self.endpoint = endpoint
        self.token = token
        self.project = project
        self.launch_name = launch_name
        self.launch_doc = launch_doc
        self.service = None
        self.test = None
        self.verify_ssl = verify_ssl
        self.launch_id = launch_id

    def start_service(self):
        self.service = ReportPortalService(endpoint=self.endpoint,
                                           project=self.project,
                                           token=self.token,
                                           verify_ssl=self.verify_ssl)
        if self.launch_id:
            self.service.launch_id = self.launch_id

    def start_test(self):
        if not self.service:
            self.start_service()
        return self.service.start_launch(name=self.launch_name,
                                         start_time=timestamp(),
                                         description=self.launch_doc)

    def finish_test(self):
        self.service.finish_launch(end_time=timestamp())
        self.service.terminate()
        self.service = None

    def is_test_started(self):
        if self.service:
            return True
        return False

    def start_test_item(self,
                        issue,
                        description,
                        tags,
                        item_type='STEP',
                        parameters={}):
        self.service.start_test_item(issue,
                                     description=description,
                                     tags=tags,
                                     start_time=timestamp(),
                                     item_type=item_type,
                                     parameters=parameters)

    def test_item_message(self, message, level="ERROR", attachment=None):
        if len(message) > constants.MAX_MESSAGE_LEN:
            index = 0
            while index < len(message):
                increment = constants.MAX_MESSAGE_LEN
                if index + increment > len(message):
                    increment = len(message) - index
                self.service.log(time=timestamp(),
                                 message=message[index:index + increment],
                                 level=level,
                                 attachment=attachment)
                index = index + increment
        else:
            self.service.log(time=timestamp(),
                             message=message,
                             level=level,
                             attachment=attachment)

    def finish_test_item(self):
        self.service.finish_test_item(end_time=timestamp(), status="FAILED")
class IntegrationService:
    def __init__(self,
                 rp_endpoint,
                 rp_project,
                 rp_token,
                 rp_launch_name,
                 rp_launch_description,
                 verify_ssl=False):
        self.rp_endpoint = rp_endpoint
        self.rp_project = rp_project
        self.rp_token = rp_token
        self.rp_launch_name = rp_launch_name
        self.rp_launch_description = rp_launch_description
        self.rp_async_service = ReportPortalService(endpoint=self.rp_endpoint,
                                                    project=self.rp_project,
                                                    token=self.rp_token,
                                                    verify_ssl=verify_ssl)

    def start_launcher(self,
                       name,
                       start_time,
                       attributes,
                       description=None,
                       tags=None):
        return self.rp_async_service.start_launch(name=name,
                                                  start_time=start_time,
                                                  description=description,
                                                  attributes=attributes,
                                                  tags=tags)

    def start_feature_test(self, **kwargs):
        return self._start_test(**kwargs)

    def start_scenario_test(self, **kwargs):
        return self._start_test(**kwargs)

    def start_step_test(self, **kwargs):
        return self._start_test(**kwargs)

    def finish_step_test(self, **kwargs):
        return self._finish_test(**kwargs)

    def finish_scenario_test(self, **kwargs):
        return self._finish_test(**kwargs)

    def finish_feature(self, **kwargs):
        return self._finish_test(**kwargs)

    def finish_launcher(self, end_time, launch_id, status=None):
        return self.rp_async_service.finish_launch(end_time=end_time,
                                                   status=status,
                                                   launch_id=launch_id)

    def log_step_result(self,
                        end_time,
                        message,
                        level='INFO',
                        attachment=None,
                        item_id=None):
        self.rp_async_service.log(time=end_time,
                                  message=message,
                                  level=level,
                                  attachment=attachment,
                                  item_id=item_id)

    def terminate_service(self):
        self.rp_async_service.terminate()

    def _start_test(self,
                    name,
                    start_time,
                    item_type,
                    description=None,
                    tags=None,
                    parent_item_id=None):
        """
        item_type can be (SUITE, STORY, TEST, SCENARIO, STEP, BEFORE_CLASS,
        BEFORE_GROUPS, BEFORE_METHOD, BEFORE_SUITE, BEFORE_TEST, AFTER_CLASS,
        AFTER_GROUPS, AFTER_METHOD, AFTER_SUITE, AFTER_TEST)
        Types taken from report_portal/service.py
        Mark item as started
        """
        return self.rp_async_service.start_test_item(
            name=name,
            description=description,
            tags=tags,
            start_time=start_time,
            item_type=item_type,
            parent_item_id=parent_item_id)

    def _finish_test(self, end_time, status, item_id, issue=None):
        """
        Mark item as completed and set the status accordingly
        :param end_time: the end time of the execution
        :param status: the status
        :param item_id: the id of the execution to mark as complete
        :param issue: associate existing issue with the failure
        :return: the response of the
        """
        return self.rp_async_service.finish_test_item(end_time=end_time,
                                                      status=status,
                                                      issue=issue,
                                                      item_id=item_id)
コード例 #4
0
    def report_test_results(self, errors, performance_degradation_rate,
                            compare_with_baseline, missed_threshold_rate,
                            compare_with_thresholds):
        self.create_project()
        service = ReportPortalService(endpoint=self.rp_url,
                                      project=self.rp_project,
                                      token=self.rp_token,
                                      error_handler=self.my_error_handler,
                                      verify_ssl=self.verify_ssl)

        # Start launch.
        service.start_launch(
            name=self.rp_launch_name + ": performance testing results",
            start_time=self.timestamp(),
            description='Test name - {}'.format(self.args['simulation']))
        errors_len = len(errors)

        if errors_len > 0:
            functional_error_test_item = service.start_test_item(
                name="Functional errors",
                start_time=self.timestamp(),
                description="This simulation has failed requests",
                item_type="SUITE")
            for key in errors:
                # Start test item.
                item_name = self.get_item_name(errors[key])
                item_id = service.start_test_item(
                    name=item_name,
                    parent_item_id=functional_error_test_item,
                    description="This request was failed {} times".format(
                        errors[key]['Error count']),
                    start_time=self.timestamp(),
                    item_type="STEP",
                    parameters={
                        "simulation": self.args['simulation'],
                        'test type': self.args['type']
                    })

                self.log_message(item_id, service, 'Request name', errors[key],
                                 'WARN')
                self.log_message(item_id, service, 'Method', errors[key],
                                 'WARN')
                self.log_message(item_id, service, 'Request URL', errors[key],
                                 'WARN')
                self.log_message(item_id, service, 'Request_params',
                                 errors[key], 'WARN')
                self.log_message(item_id, service, 'Request headers',
                                 errors[key], 'INFO')
                self.log_message(item_id, service, 'Error count', errors[key],
                                 'WARN')
                self.log_message(item_id, service, 'Error_message',
                                 errors[key], 'WARN')
                self.log_message(item_id, service, 'Response code',
                                 errors[key], 'WARN')
                self.log_message(item_id, service, 'Response', errors[key],
                                 'WARN')
                self.log_unique_error_id(item_id, service,
                                         errors[key]['Request name'],
                                         errors[key]['Method'],
                                         errors[key]['Response code'])

                service.finish_test_item(item_id=item_id,
                                         end_time=self.timestamp(),
                                         status="FAILED")
            service.finish_test_item(item_id=functional_error_test_item,
                                     end_time=self.timestamp(),
                                     status="FAILED")
        else:
            item_id = service.start_test_item(
                name="Functional errors",
                start_time=self.timestamp(),
                item_type="STEP",
                description='This simulation has no functional errors')
            service.finish_test_item(item_id=item_id,
                                     end_time=self.timestamp(),
                                     status="PASSED")

        if performance_degradation_rate > self.performance_degradation_rate:
            baseline_item_id = service.start_test_item(
                name="Compare to baseline",
                start_time=self.timestamp(),
                description="Test \"{}\" failed with performance degradation"
                " rate {}".format(self.args['simulation'],
                                  performance_degradation_rate),
                item_type="SUITE")

            service.log(
                item_id=baseline_item_id,
                time=self.timestamp(),
                message="The following requests are slower than baseline:",
                level="{}".format('INFO'))
            for request in compare_with_baseline:
                item_id = service.start_test_item(
                    name="\"{}\" reached {} ms by {}. Baseline {} ms.".format(
                        request['request_name'], request['response_time'],
                        self.args['comparison_metric'], request['baseline']),
                    parent_item_id=baseline_item_id,
                    start_time=self.timestamp(),
                    item_type="STEP",
                    parameters={
                        'simulation': self.args['simulation'],
                        'test type': self.args['type']
                    })

                service.log(item_id=item_id,
                            time=self.timestamp(),
                            message="\"{}\" reached {} ms by {}."
                            " Baseline {} ms.".format(
                                request['request_name'],
                                request['response_time'],
                                self.args['comparison_metric'],
                                request['baseline']),
                            level="{}".format('WARN'))
                service.finish_test_item(item_id=item_id,
                                         end_time=self.timestamp(),
                                         status="FAILED")
            service.log(time=self.timestamp(),
                        message=hashlib.sha256(
                            "{} performance degradation".format(
                                self.args['simulation']).strip().encode(
                                    'utf-8')).hexdigest(),
                        level='ERROR')

            service.finish_test_item(item_id=baseline_item_id,
                                     end_time=self.timestamp(),
                                     status="FAILED")
        else:
            item_id = service.start_test_item(
                name="Compare to baseline",
                start_time=self.timestamp(),
                item_type="STEP",
                description='Performance degradation rate less than {}'.format(
                    self.performance_degradation_rate))
            service.finish_test_item(item_id=item_id,
                                     end_time=self.timestamp(),
                                     status="PASSED")

        if missed_threshold_rate > self.missed_thresholds_rate:
            thresholds_item_id = service.start_test_item(
                name="Compare with thresholds",
                start_time=self.timestamp(),
                description="Test \"{}\" failed with missed thresholds"
                " rate {}".format(self.args['simulation'],
                                  missed_threshold_rate),
                item_type="SUITE")

            for color in ["yellow", "red"]:
                colored = False
                for th in compare_with_thresholds:
                    if th['threshold'] == color:
                        item_id = service.start_test_item(
                            name="{} threshold for  \"{}\"".format(
                                color, th['request_name']),
                            start_time=self.timestamp(),
                            parent_item_id=thresholds_item_id,
                            item_type="STEP",
                            parameters={
                                'simulation': self.args['simulation'],
                                'test type': self.args['type']
                            })
                        if not colored:
                            service.log(
                                item_id=item_id,
                                time=self.timestamp(),
                                message=
                                f"The following {color} thresholds were exceeded:",
                                level="INFO")
                        appendage = calculate_appendage(th['target'])
                        service.log(
                            item_id=item_id,
                            time=self.timestamp(),
                            message=
                            f"\"{th['request_name']}\" {th['target']}{appendage} with value {th['metric']}{appendage} exceeded threshold of {th[color]}{appendage}",
                            level="WARN")
                        service.finish_test_item(item_id=item_id,
                                                 end_time=self.timestamp(),
                                                 status="FAILED")
            service.log(item_id=item_id,
                        time=self.timestamp(),
                        message=hashlib.sha256("{} missed thresholds".format(
                            self.args['simulation']).strip().encode(
                                'utf-8')).hexdigest(),
                        level='ERROR')

            service.finish_test_item(item_id=thresholds_item_id,
                                     end_time=self.timestamp(),
                                     status="FAILED")
        else:
            item_id = service.start_test_item(
                name="Compare with thresholds",
                start_time=self.timestamp(),
                item_type="STEP",
                description='Missed thresholds rate less than {}'.format(
                    self.missed_thresholds_rate))
            service.finish_test_item(item_id=item_id,
                                     end_time=self.timestamp(),
                                     status="PASSED")
        # Finish launch.
        service.finish_launch(end_time=self.timestamp())

        service.terminate()
コード例 #5
0
class NoseServiceClass(with_metaclass(Singleton, object)):

    def __init__(self):
        self.rp = None
        try:
            pkg_resources.get_distribution('reportportal_client >= 3.2.0')
            self.rp_supports_parameters = True
        except pkg_resources.VersionConflict:
            self.rp_supports_parameters = False

        self.ignore_errors = True
        self.ignored_tags = []

        self._loglevels = ('TRACE', 'DEBUG', 'INFO', 'WARN', 'ERROR')

    def init_service(self, endpoint, project, token, ignore_errors=True,
                     ignored_tags=[], log_batch_size=20, queue_get_timeout=5, retries=0):
        if self.rp is None:
            self.ignore_errors = ignore_errors
            if self.rp_supports_parameters:
                self.ignored_tags = list(set(ignored_tags).union({'parametrize'}))
            else:
                self.ignored_tags = ignored_tags
            log.debug('ReportPortal - Init service: endpoint=%s, project=%s, uuid=%s', endpoint, project, token)
            self.rp = ReportPortalService(
                endpoint=endpoint,
                project=project,
                token=token,
                retries=retries,
                log_batch_size=log_batch_size,
                # verify_ssl=verify_ssl
            )

            if self.rp and hasattr(self.rp, "get_project_settings"):
                self.project_settings = self.rp.get_project_settings()
            else:
                self.project_settings = None

            self.issue_types = self.get_issue_types()
        else:
            log.debug('The pytest is already initialized')
        return self.rp

    def start_launch(self, name,
                     mode=None,
                     tags=None,
                     description=None):
        if self.rp is None:
            return

        sl_pt = {
            'name': name,
            'start_time': timestamp(),
            'description': description,
            'mode': mode,
            'tags': tags,
        }
        self.rp.start_launch(**sl_pt)

    def start_nose_item(self, ev, test=None):
        if self.rp is None:
            return
        tags = []
        try:
            tags = test.test.suites
        except AttributeError:
            pass
        name = str(test)
        start_rq = {
            "name": name,
            "description": ev.describeTest(test),
            "tags": tags,
            "start_time": timestamp(),
            "item_type": "TEST",
            "parameters": None,
        }
        self.post_log(name)
        return self.rp.start_test_item(**start_rq)

    def finish_nose_item(self, test_item, status, issue=None):
        if self.rp is None:
            return

        self.post_log(status)
        fta_rq = {
            'item_id': test_item,
            'end_time': timestamp(),
            'status': status,
            'issue': issue,
        }

        self.rp.finish_test_item(**fta_rq)

    def finish_launch(self, status=None):
        if self.rp is None:
            return

        # To finish launch session str parameter is needed
        fl_rq = {
            'end_time': timestamp(),
            'status': status,
        }
        self.rp.finish_launch(**fl_rq)

    def terminate_service(self, nowait=False):
        if self.rp is not None:
            self.rp.terminate(nowait)
            self.rp = None

    def post_log(self, message, loglevel='INFO', attachment=None):
        if self.rp is None:
            return

        if loglevel not in self._loglevels:
            log.warning('Incorrect loglevel = %s. Force set to INFO. '
                        'Available levels: %s.', loglevel, self._loglevels)
            loglevel = 'INFO'

        sl_rq = {
            'time': timestamp(),
            'message': message,
            'level': loglevel,
            'attachment': attachment,
        }
        self.rp.log(**sl_rq)

    def get_issue_types(self):
        issue_types = {}

        if not self.project_settings:
            return issue_types

        for item_type in ("AUTOMATION_BUG", "PRODUCT_BUG", "SYSTEM_ISSUE", "NO_DEFECT", "TO_INVESTIGATE"):
            for item in self.project_settings["subTypes"][item_type]:
                issue_types[item["shortName"]] = item["locator"]

        return issue_types