def __init__(self, portal_launch_name, portal_launch_doc):
        # Report Portal versions below 5.0.0:

        self.endpoint = get_portal_config().get("ENDPOINT")  # portal服务地址
        self.project = get_portal_config().get("PROJECT")  # portal项目名称
        self.token = get_portal_config().get("TOKEN")  # portal token

        self.service = ReportPortalServiceAsync(
            endpoint=self.endpoint,
            project=self.project,
            token=self.token,
            error_handler=self.my_error_handler)

        # Start launch.
        self.launch = self.service.start_launch(name=portal_launch_name,
                                                start_time=timestamp(),
                                                description=portal_launch_doc)

        # Start test item Report Portal versions below 5.0.0:
        self.test = self.service.start_test_item(name="Test Case",
                                                 description="First Test Case",
                                                 tags=["Image", "Smoke"],
                                                 start_time=timestamp(),
                                                 item_type="STEP",
                                                 parameters={
                                                     "key1": "val1",
                                                     "key2": "val2"
                                                 })

        self.service.finish_test_item(end_time=timestamp(), status="PASSED")
        # Finish launch.
        self.service.finish_launch(end_time=timestamp())
        self.service.terminate()
Exemple #2
0
 def init_service(self,
                  endpoint,
                  project,
                  uuid,
                  log_batch_size,
                  ignore_errors,
                  ignored_tags,
                  verify_ssl=False):
     self._errors = queue.Queue()
     if self.RP is None:
         self.ignore_errors = ignore_errors
         if self.RP_SUPPORTS_PARAMETERS:
             self.ignored_tags = list(
                 set(ignored_tags).union({'parametrize'}))
         else:
             self.ignored_tags = ignored_tags
         log.debug(
             'ReportPortal - Init service: endpoint=%s, '
             'project=%s, uuid=%s', endpoint, project, uuid)
         self.RP = ReportPortalServiceAsync(
             endpoint=endpoint,
             project=project,
             token=uuid,
             error_handler=self.async_error_handler,
             log_batch_size=log_batch_size,
             # verify_ssl=False
         )
         self.project_settiings = None  # self.RP.rp_client.get_project_settings() if self.RP else None
         self.issue_types = self.get_issue_types()
     else:
         log.debug('The pytest is already initialized')
     return self.RP
Exemple #3
0
 def __init__(self, config, strategy):
     self.url = config.get('rp_endpoint')
     self.uuid = config.get('rp_uuid')
     self.project = config.get('rp_project')
     self.launch_description = config.get('launch_description')
     self.launch_tags = config.get('launch_tags').split()
     self.upload_xunit = config.get('upload_xunit')
     self.update_headers = {
         'Authorization': 'bearer %s' % self.uuid,
         'Accept': 'application/json',
         'Cache-Control': 'no-cache',
         'content-type': 'application/json',
     }
     self.import_headers = {
         'Authorization': 'bearer %s' % self.uuid,
         'Accept': 'application/json',
         'Cache-Control': 'no-cache',
     }
     self.launch_url = "{url}/api/v1/{project_name}/launch/%s".format(
         url=self.url, project_name=self.project)
     self.launch_public_url = "{url}/ui/#{project_name}/launches/all/%s".format(
         url=self.url, project_name=self.project)
     self.launch_id = ''
     self.xunit_feed = config.get('xunit_feed')
     self.launch_name = config.get('launch_name', 'rp_cli-launch')
     self.strategy = strategy
     self.service = ReportPortalServiceAsync(
         endpoint=self.url,
         project=self.project,
         token=self.uuid,
         error_handler=self.strategy.my_error_handler)
     self.test_logs = config.get('test_logs')
     self.zipped = config.get('zipped')
     self.test_owners = config.get('test_owners', {})
     self.strategy = strategy
 def __init__(self, launch_name: str, launch_doc: str, endpoint: str,
              token: str, project: str):
     """
     Cria o gerenciador do processo de relatorios.
     :param launch_name:
         str: Nome do Launch
     :param launch_doc:
         str: Documentação do Launch
     :param endpoint:
         str: ReportPortal endpoint
     :param token:
         str: Auth token
     :param project:
         str: Nome do projeto
     """
     self.launch_name = launch_name
     self.launch_doc = launch_doc
     self.endpoint = endpoint
     self.project = project
     self.token = token
     return
     try:
         self.service = ReportPortalServiceAsync(
             endpoint=self.endpoint,
             project=self.project,
             token=self.token,
             error_handler=self.error_handler)
     except:
         print('Report Portal is having issues, please check your server.')
 def __init__(self, url, auth_token, project, launch_name,
              launch_description, report_dir, report):
     self.service = ReportPortalServiceAsync(
         endpoint=url,
         project=project,
         token=auth_token,
         error_handler=self._handle_rp_error)
     self.launch_name = launch_name
     self.launch_description = launch_description
     self.report_dir = report_dir
     self.report = report
     self._rp_exc_info = None
    def __init__(self, main_test, *args, **kwargs):
        super(ReportPortalHandler, self).__init__(main_test=main_test,
                                                  *args, **kwargs)

        configuration = get_configuration()
        self.service = ReportPortalServiceAsync(
            endpoint=configuration.endpoint,
            project=configuration.project,
            token=configuration.token)

        self.log_handler = ReportPortalLogHandler(self.service)
        self.comments = []
Exemple #7
0
 def start_test(self):
     """
     Start new launch in Report Portal
     """
     self.service = ReportPortalServiceAsync(endpoint=self.endpoint,
                                             project=self.project,
                                             token=self.token,
                                             error_handler=my_error_handler,
                                             verify_ssl=self.verify_ssl)
     self.service.start_launch(name=self.launch_name,
                               start_time=timestamp(),
                               description=self.launch_doc,
                               tags=self.launch_tags)
Exemple #8
0
    def begin(self):
        """Called before any tests are collected or run. Use this to
        perform any setup needed before testing begins.
        """
        self.service = ReportPortalServiceAsync(endpoint=self.rp_endpoint, project=self.rp_project,
                                                   token=self.rp_uuid, error_handler=my_error_handler, queue_get_timeout=20)

        log.setLevel(logging.DEBUG)

        # Start launch.
        self.launch = self.service.start_launch(name=self.rp_launch,
                                      start_time=timestamp(),
                                      description=self.rp_launch_description, mode=self.rp_mode)
        self.handler = RPNoseLogHandler(service=self.service, level=logging.DEBUG,endpoint=self.rp_endpoint)
        self.setupLoghandler()
Exemple #9
0
 def start_service(self):
     self.service = ReportPortalService(endpoint=self.endpoint,
                                        project=self.project,
                                        token=self.token,
                                        log_batch_size=self.log_batch_size,
                                        verify_ssl=self.verify_ssl)
     if self.launch_id:
         self.service.launch_id = self.launch_id
 def init_service(self, endpoint, project, uuid, log_batch_size,
                  ignore_errors, ignored_tags):
     self._errors = queue.Queue()
     if self.RP is None:
         self.ignore_errors = ignore_errors
         self.ignored_tags = ignored_tags
         logging.debug('ReportPortal - Init service: endpoint=%s, '
                       'project=%s, uuid=%s', endpoint, project, uuid)
         self.RP = ReportPortalServiceAsync(
             endpoint=endpoint,
             project=project,
             token=uuid,
             error_handler=self.async_error_handler,
             log_batch_size=log_batch_size
         )
     else:
         logging.debug('The pytest is already initialized')
     return self.RP
Exemple #11
0
    def before_all(self, context):
        """
        TODO: get data from behave.userdata

            endpoint: archteture/SO/Browser
            token: user report portal api token
            project: project name or label
        """
        self._rp = ReportPortalServiceAsync(
            endpoint=context.config.userdata.get('rp_endpoint', None),
            project=context.config.userdata.get('rp_project', None),
            token=context.config.userdata.get('rp_token', None),
            error_handler=self.error,
        )
        self._rp.start_launch(
            name=context.config.userdata.get('rp_launch', None),
            start_time=self.timestamp(),
        )
 def init_service(self, endpoint, project, uuid, log_batch_size,
                  ignore_errors):
     self._errors = queue.Queue()
     if self.RP is None:
         self.ignore_errors = ignore_errors
         logging.debug(
             msg="ReportPortal - Init service: "
                 "endpoint={0}, project={1}, uuid={2}".
                 format(endpoint, project, uuid))
         self.RP = ReportPortalServiceAsync(
             endpoint=endpoint,
             project=project,
             token=uuid,
             error_handler=self.async_error_handler,
             log_batch_size=log_batch_size
         )
     else:
         logging.debug("The pytest is already initialized")
     return self.RP
Exemple #13
0
def create_report_portal_session():
    """
    Configures and creates a session to the Report Portal instance.

    Returns:
        The session object
    """
    cfg = get_cephci_config()['report-portal']

    return ReportPortalServiceAsync(
        endpoint=cfg['endpoint'], project=cfg['project'], token=cfg['token'], error_handler=error_handler)
Exemple #14
0
 def init_service(self, endpoint, project, uuid, log_batch_size,
                  ignore_errors, ignored_tags):
     self._errors = queue.Queue()
     if self.RP is None:
         self.ignore_errors = ignore_errors
         if self.RP_SUPPORTS_PARAMETERS:
             self.ignored_tags = list(set(ignored_tags).union({'parametrize'}))
         else:
             self.ignored_tags = ignored_tags
         log.debug('ReportPortal - Init service: endpoint=%s, '
                   'project=%s, uuid=%s', endpoint, project, uuid)
         self.RP = ReportPortalServiceAsync(
             endpoint=endpoint,
             project=project,
             token=uuid,
             error_handler=self.async_error_handler,
             log_batch_size=log_batch_size
         )
     else:
         log.debug('The pytest is already initialized')
     return self.RP
Exemple #15
0
 def init_service(endpoint, project, uuid, log_batch_size):
     if RobotService.rp is None:
         logging.debug("ReportPortal - Init service: "
                       "endpoint={0}, project={1}, uuid={2}".format(
                           endpoint, project, uuid))
         RobotService.rp = ReportPortalServiceAsync(
             endpoint=endpoint,
             project=project,
             token=uuid,
             error_handler=async_error_handler,
             log_batch_size=log_batch_size)
     else:
         raise Exception("RobotFrameworkService is already initialized")
Exemple #16
0
 def init_service(self,
                  endpoint,
                  project,
                  token,
                  ignore_errors=True,
                  ignored_tags=[],
                  log_batch_size=20,
                  queue_get_timeout=5,
                  retries=0):
     self._errors = queue.Queue()
     if self.RP is None:
         self.ignore_errors = ignore_errors
         if self.RP_SUPPORTS_PARAMETERS:
             self.ignored_tags = list(
                 set(ignored_tags).union({'parametrize'}))
         else:
             self.ignored_tags = ignored_tags
         log.debug(
             'ReportPortal - Init service: endpoint=%s, '
             'project=%s, uuid=%s', endpoint, project, token)
         self.RP = ReportPortalServiceAsync(
             endpoint=endpoint,
             project=project,
             token=token,
             error_handler=self.async_error_handler,
             queue_get_timeout=queue_get_timeout,
             retries=retries,
             log_batch_size=log_batch_size,
             # verify_ssl=verify_ssl
         )
         if self.RP and hasattr(self.RP.rp_client, "get_project_settings"):
             self.project_settiings = self.RP.rp_client.get_project_settings(
             )
         else:
             self.project_settiings = None
         self.issue_types = self.get_issue_types()
     else:
         log.debug('The pytest is already initialized')
     return self.RP
 def init_service(self, endpoint, project, uuid, log_batch_size,
                  ignore_errors, ignored_tags):
     self._errors = queue.Queue()
     if self.RP is None:
         self.ignore_errors = ignore_errors
         self.ignored_tags = list(set(ignored_tags).union({'parametrize'}))
         log.debug(
             'ReportPortal - Init service: endpoint=%s, project=%s, uuid=%s',
             endpoint, project, uuid)
         self.RP = ReportPortalServiceAsync(
             endpoint=endpoint,
             project=project,
             token=uuid,
             error_handler=self.async_error_handler,
             log_batch_size=log_batch_size)
         if self.RP and hasattr(self.RP.rp_client, "get_project_settings"):
             self.project_settings = self.RP.rp_client.get_project_settings(
             )
         else:
             self.project_settings = None
         self.issue_types = self.get_issue_types()
     else:
         log.debug('The pytest is already initialized')
     return self.RP
Exemple #18
0
class RpManager:
    def __init__(self, config, strategy):
        self.url = config.get('rp_endpoint')
        self.uuid = config.get('rp_uuid')
        self.project = config.get('rp_project')
        self.launch_description = config.get('launch_description')
        self.launch_tags = config.get('launch_tags').split()
        self.upload_xunit = config.get('upload_xunit')
        self.update_headers = {
            'Authorization': 'bearer %s' % self.uuid,
            'Accept': 'application/json',
            'Cache-Control': 'no-cache',
            'content-type': 'application/json',
        }
        self.import_headers = {
            'Authorization': 'bearer %s' % self.uuid,
            'Accept': 'application/json',
            'Cache-Control': 'no-cache',
        }
        self.launch_url = "{url}/api/v1/{project_name}/launch/%s".format(
            url=self.url, project_name=self.project)
        self.launch_public_url = "{url}/ui/#{project_name}/launches/all/%s".format(
            url=self.url, project_name=self.project)
        self.launch_id = ''
        self.xunit_feed = config.get('xunit_feed')
        self.launch_name = config.get('launch_name', 'rp_cli-launch')
        self.strategy = strategy
        self.service = ReportPortalServiceAsync(
            endpoint=self.url,
            project=self.project,
            token=self.uuid,
            error_handler=self.strategy.my_error_handler)
        self.test_logs = config.get('test_logs')
        self.zipped = config.get('zipped')
        self.test_owners = config.get('test_owners', {})
        self.strategy = strategy

    @staticmethod
    def _check_return_code(req):
        if req.status_code != 200:
            logger.error('Something went wrong status code is %s; MSG: %s',
                         req.status_code,
                         req.json()['message'])
            sys.exit(1)

    def _import_results(self):
        with open(self.upload_xunit, 'rb') as xunit_file:
            files = {'file': xunit_file}
            req = requests.post(self.launch_url % "import",
                                headers=self.import_headers,
                                files=files)

        response = req.json()
        self._check_return_code(req)
        logger.info("Import is done successfully")
        response_msg = response['msg'].encode('ascii', 'ignore')
        logger.info('Status code: %s; %s', req.status_code, response_msg)

        # returning the launch_id
        return response_msg.split()[4]

    def _verify_upload_succeeded(self, launch_id):
        launch_id_url = self.launch_url % launch_id
        req = requests.get(launch_id_url, headers=self.update_headers)
        self._check_return_code(req)
        logger.info('Launch have been created successfully')
        return True

    def _update_launch_description_and_tags(self, launch_id):
        update_url = self.launch_url % launch_id + "/update"

        data = {
            "description": self.launch_description,
            "tags": self.launch_tags
        }

        req = requests.put(url=update_url,
                           headers=self.update_headers,
                           data=json.dumps(data))
        self._check_return_code(req)
        logger.info(
            'Launch description %s and tags %s where updated for launch id %s',
            self.launch_description, self.launch_tags, launch_id)

    def import_results(self):
        self.launch_id = self._import_results()
        self._verify_upload_succeeded(self.launch_id)
        self._update_launch_description_and_tags(self.launch_id)

    def _start_launch(self):
        return self.service.start_launch(name=self.launch_name,
                                         start_time=timestamp(),
                                         description=self.launch_description,
                                         tags=self.launch_tags)

    def _end_launch(self):
        self.service.finish_launch(end_time=timestamp())
        self.service.terminate()
        self.launch_id = self.service.rp_client.launch_id

    def _upload_attachment(self, file, name):
        with open(file, "rb") as fh:
            attachment = {
                "name": name,
                "data": fh.read(),
                "mime": guess_type(file)[0]
            }
            self.service.log(timestamp(), name, "INFO", attachment)

    def upload_test_case_attachments(self, path):
        for root, dirs, files in os.walk(path):
            for log_file in files:
                file_name = os.path.join(root, log_file)
                self._upload_attachment(file_name, log_file)

    def upload_zipped_test_case_attachments(self, zip_file_name, path):
        whole_path = os.path.join(self.test_logs, path)
        try:
            ld = os.listdir(whole_path)
        except OSError:
            logger.warning("Path (%s) with log files does not exist!" %
                           (whole_path, ))
            return
        # check if there is something to zip
        if len(ld) > 0:
            zip_file_name = shutil.make_archive(zip_file_name, 'zip',
                                                whole_path)
            self._upload_attachment(zip_file_name,
                                    os.path.basename(zip_file_name))
            os.remove(zip_file_name)

        else:
            logger.warning("There are no logs on the path (%s)!" %
                           (whole_path, ))

    def _log_message_to_rp_console(self, msg, level):
        self.service.log(time=timestamp(), message=msg, level=level)

    def _process_failed_case(self, case):
        msg = self.strategy.extract_failure_msg_from_xunit(case)
        self._log_message_to_rp_console(msg, "ERROR")

    def store_launch_info(self, dest):
        launch_url = self.launch_public_url % self.launch_id
        json_data = {
            "rp_launch_url": launch_url,
            "rp_launch_name": self.launch_name,
            "rp_launch_tags": self.launch_tags,
            "rp_launch_desc": self.launch_description,
            "rp_launch_id": self.launch_id
        }
        with open(dest, "w") as file:
            json.dump(json_data, file)

    def attach_logs_to_failed_case(self, case):
        path_to_logs_per_test = self.strategy.get_logs_per_test_path(case)

        if self.zipped:
            # zip logs per test and upload zip file
            self.upload_zipped_test_case_attachments(
                "{0}".format(case.get('@name')), path_to_logs_per_test)
        else:
            # upload logs per tests one by one and do not zip them
            self.upload_test_case_attachments("{0}/{1}".format(
                self.test_logs, path_to_logs_per_test))

    def _open_new_folder(self, folder_name):
        self.service.start_test_item(
            name=folder_name,
            start_time=timestamp(),
            item_type="SUITE",
        )

    def _close_folder(self):
        self.service.finish_test_item(end_time=timestamp(), status=None)

    def feed_results(self):
        self._start_launch()

        with open(self.xunit_feed) as fd:
            data = xmltodict.parse(fd.read())

        xml = data.get("testsuite").get("testcase")

        # if there is only 1 test case, convert 'xml' from dict to list
        # otherwise, 'xml' is always list
        if not isinstance(xml, list):
            xml = [xml]

        xml = sorted(xml, key=lambda k: k['@classname'])

        for case in xml:
            issue = None
            name = self.strategy.get_testcase_name(case)
            description = self.strategy.get_testcase_description(case)
            tags = self.strategy.get_tags(case, test_owners=self.test_owners)

            if self.strategy.should_create_folders_in_launch():
                open_new_folder, folder_name = self.strategy.create_folder(
                    case)
                if self.strategy.is_first_folder():
                    if open_new_folder:
                        self._open_new_folder(folder_name)
                elif open_new_folder:  # in case a new folder should be open, need to close last one and open new one
                    self._close_folder()
                    self._open_new_folder(folder_name)

            self.service.start_test_item(
                name=name[:255],
                description=description,
                tags=tags,
                start_time=timestamp(),
                item_type="STEP",
            )
            # Create text log message with INFO level.
            if case.get('system_out'):
                self._log_message_to_rp_console(case.get('system_out'), "INFO")

            if case.has_key('skipped'):
                issue = {
                    "issue_type": "NOT_ISSUE"
                }  # this will cause skipped test to not be "To Investigate"
                status = 'SKIPPED'
                if case.get('skipped'):
                    self._log_message_to_rp_console(
                        case.get('skipped').get('@message'), "DEBUG")
                else:
Exemple #19
0
class PyTestServiceClass(with_metaclass(Singleton, object)):

    def __init__(self):
        self.RP = None
        try:
            pkg_resources.get_distribution('reportportal_client >= 3.2.0')
            self.RP_SUPPORTS_PARAMETERS = True
        except pkg_resources.VersionConflict:
            self.RP_SUPPORTS_PARAMETERS = False

        self.ignore_errors = True
        self.ignored_tags = []

        self._errors = queue.Queue()
        self._loglevels = ('TRACE', 'DEBUG', 'INFO', 'WARN', 'ERROR')
        self._start_stack = []
        self._finish_stack = []

    def init_service(self, endpoint, project, uuid, log_batch_size,
                     ignore_errors, ignored_tags):
        self._errors = queue.Queue()
        if self.RP is None:
            self.ignore_errors = ignore_errors
            if self.RP_SUPPORTS_PARAMETERS:
                self.ignored_tags = list(set(ignored_tags).union({'parametrize'}))
            else:
                self.ignored_tags = ignored_tags
            log.debug('ReportPortal - Init service: endpoint=%s, '
                      'project=%s, uuid=%s', endpoint, project, uuid)
            self.RP = ReportPortalServiceAsync(
                endpoint=endpoint,
                project=project,
                token=uuid,
                error_handler=self.async_error_handler,
                log_batch_size=log_batch_size
            )
        else:
            log.debug('The pytest is already initialized')
        return self.RP

    def async_error_handler(self, exc_info):
        self.terminate_service(nowait=True)
        self.RP = None
        self._errors.put_nowait(exc_info)

    def terminate_service(self, nowait=False):
        if self.RP is not None:
            self.RP.terminate(nowait)
            self.RP = None

    def start_launch(self, launch_name,
                     mode=None,
                     tags=None,
                     description=None):
        self._stop_if_necessary()
        if self.RP is None:
            return

        sl_pt = {
            'name': launch_name,
            'start_time': timestamp(),
            'description': description,
            'mode': mode,
            'tags': tags
        }
        log.debug('ReportPortal - Start launch: equest_body=%s', sl_pt)
        req_data = self.RP.start_launch(**sl_pt)
        log.debug('ReportPortal - Launch started: response_body=%s', req_data)


    def collect_tests(self, session):
        self._stop_if_necessary()
        if self.RP is None:
            return

        for item in session.items:
            # Start collecting test item parts
            parts_in = []
            parts_out = []
            parts = self._get_item_parts(item)
            # Add all parts in revers order to parts_out
            parts_out.extend(reversed(parts))
            while parts:
                part = parts.pop(0)
                if part in self._start_stack:
                    # If we've seen this part, skip it
                    continue
                # We haven't seen this part yet. Could be a Class, Module or Function
                # Appent to parts_in
                parts_in.append(part)

            # Update self._start_stack and self._finish_stack
            self._start_stack.extend(parts_in)
            self._finish_stack.extend(parts_out)

    def start_pytest_item(self, test_item=None):
        self._stop_if_necessary()
        if self.RP is None:
            return

        while True:
            part = self._start_stack.pop(0)
            if part is test_item:
                break
            payload = {
                'name': self._get_item_name(part),
                'description': self._get_item_description(part),
                'tags': self._get_item_tags(part),
                'start_time': timestamp(),
                'item_type': 'SUITE'
            }
            log.debug('ReportPortal - Start Suite: request_body=%s', payload)
            self.RP.start_test_item(**payload)

        start_rq = {
            'name': self._get_item_name(test_item),
            'description': self._get_item_description(test_item),
            'tags': self._get_item_tags(test_item),
            'start_time': timestamp(),
            'item_type': 'STEP'
        }
        if self.RP_SUPPORTS_PARAMETERS:
            start_rq['parameters'] = self._get_parameters(test_item)

        log.debug('ReportPortal - Start TestItem: request_body=%s', start_rq)
        self.RP.start_test_item(**start_rq)

    def finish_pytest_item(self, status, issue=None):
        self._stop_if_necessary()
        if self.RP is None:
            return

        # Remove the test from the finish stack
        self._finish_stack.pop(0)

        fta_rq = {
            'end_time': timestamp(),
            'status': status,
            'issue': issue
        }

        log.debug('ReportPortal - Finish TestItem: request_body=%s', fta_rq)
        self.RP.finish_test_item(**fta_rq)

        while self._finish_stack:
            if isinstance(self._finish_stack[0], Function):
                break
            part = self._finish_stack.pop(0)
            if self._finish_stack.count(part):
                continue
            payload = {
                'end_time': timestamp(),
                'issue': issue,
                'status': 'PASSED'
            }
            log.debug('ReportPortal - End TestSuite: request_body=%s', payload)
            self.RP.finish_test_item(**payload)


    def finish_launch(self, launch=None, status='rp_launch'):
        self._stop_if_necessary()
        if self.RP is None:
            return

        # To finish launch session str parameter is needed
        fl_rq = {
            'end_time': timestamp(),
            'status': status
        }
        log.debug('ReportPortal - Finish launch: request_body=%s', fl_rq)
        self.RP.finish_launch(**fl_rq)

    def post_log(self, message, loglevel='INFO', attachment=None):
        self._stop_if_necessary()
        if self.RP is None:
            return

        if loglevel not in self._loglevels:
            log.warning('Incorrect loglevel = %s. Force set to INFO. '
                        'Available levels: %s.', loglevel, self._loglevels)
            loglevel = 'INFO'

        sl_rq = {
            'time': timestamp(),
            'message': message,
            'level': loglevel,
            'attachment': attachment,
        }
        self.RP.log(**sl_rq)

    def _stop_if_necessary(self):
        try:
            exc, msg, tb = self._errors.get(False)
            traceback.print_exception(exc, msg, tb)
            sys.stderr.flush()
            if not self.ignore_errors:
                pytest.exit(msg)
        except queue.Empty:
            pass

    @staticmethod
    def _get_item_parts(item):
        parts = []
        parent = item.parent
        if not isinstance(parent, Instance):
            parts.append(parent)
        while True:
            parent = parent.parent
            if parent is None:
                break
            if isinstance(parent, Instance):
                continue
            if isinstance(parent, Session):
                break
            parts.append(parent)

        parts.reverse()
        parts.append(item)
        return parts

    def _get_item_tags(self, item):
        # Try to extract names of @pytest.mark.* decorators used for test item
        # and exclude those which present in rp_ignore_tags parameter
        return [k for k in item.keywords if item.get_marker(k) is not None
                and k not in self.ignored_tags]

    def _get_parameters(self, item):
        return item.callspec.params if hasattr(item, 'callspec') else {}

    @staticmethod
    def _get_item_name(test_item):
        name = test_item.name
        if len(name) > 256:
            name = name[:256]
            test_item.warn(
                'C1',
                'Test node ID was truncated to "{}" because of name size '
                'constrains on reportportal'.format(name)
            )
        return name

    @staticmethod
    def _get_item_description(test_item):
        if isinstance(test_item, (Class, Function, Module)):
            doc = test_item.obj.__doc__
            if doc is not None:
                return doc.strip()
        if isinstance(test_item, DoctestItem):
            return test_item.reportinfo()[2]
Exemple #20
0
class ReportPortalPlugin:
    """reportportal.io plugin to hook_plug.

    behave propertys:behave.readthedocs.io/en/latest/context_attributes.html
    """
    @staticmethod
    def error(exc_info):
        print_exc(*exc_info)

    @staticmethod
    def step_table(step):
        if step.table:
            rows = '|\n|'.join(['|'.join(row) for row in step.table.rows])
            return '|{}|'.format(rows)
        return None

    @staticmethod
    def step_text(step):
        if hasattr(step, 'text'):
            return step.text
        return None

    @staticmethod
    def timestamp():
        return str(int(time() * 1000))

    @staticmethod
    def check_context(context):
        if not hasattr(context, 'config'):
            raise EnvironmentError(
                'Please, check if context is a behave context')
        try:
            context.config.userdata['rp_project']
            context.config.userdata['rp_endpoint']
            context.config.userdata['rp_launch']
            context.config.userdata['rp_token']
        except KeyError:
            raise EnvironmentError('Please, check yout behave.ini file')
        return True

    @tag_behavior
    def before_all(self, context):
        """
        TODO: get data from behave.userdata

            endpoint: archteture/SO/Browser
            token: user report portal api token
            project: project name or label
        """
        self._rp = ReportPortalServiceAsync(
            endpoint=context.config.userdata.get('rp_endpoint', None),
            project=context.config.userdata.get('rp_project', None),
            token=context.config.userdata.get('rp_token', None),
            error_handler=self.error,
        )
        self._rp.start_launch(
            name=context.config.userdata.get('rp_launch', None),
            start_time=self.timestamp(),
        )

    @tag_behavior
    def before_feature(self, context, feature):
        self._rp.start_test_item(name=feature.name,
                                 start_time=self.timestamp(),
                                 description=' '.join(feature.description),
                                 tags=feature.tags,
                                 item_type='STORY')

    @tag_behavior
    def before_scenario(self, context, scenario):
        self._rp.start_test_item(name=scenario.name,
                                 start_time=self.timestamp(),
                                 description=' '.join(scenario.description),
                                 tags=scenario.tags,
                                 item_type='Scenario')

    @tag_behavior
    def before_step(self, context, step):
        """NOTE: step doesn't has tag"""
        self._rp.start_test_item(name=step.name,
                                 start_time=self.timestamp(),
                                 description=self.step_table(step)
                                 or self.step_text(step),
                                 tags=None,
                                 item_type='step')

    @tag_behavior
    def after_all(self, context):
        self._rp.finish_launch(end_time=self.timestamp())
        self._rp.terminate()

    @tag_behavior
    def after_feature(self, context, feature):
        self._rp.finish_test_item(
            end_time=self.timestamp(),
            status=feature.status.name,
        )

    @tag_behavior
    def after_scenario(self, context, scenario):
        self._rp.finish_test_item(
            end_time=self.timestamp(),
            status=scenario.status.name,
        )

    @tag_behavior
    def after_step(self, context, step):
        if step.status.name == 'failed':
            self._rp.log(
                time=self.timestamp(),
                message=''.join(format_tb(step.exc_traceback)),
                level='ERROR',
            )
        self._rp.finish_test_item(
            end_time=self.timestamp(),
            status=step.status.name,
        )
class PyTestServiceClass(with_metaclass(Singleton, object)):
    def __init__(self):
        self.RP = None
        self.ignore_errors = True
        self.ignored_tags = []

        self._errors = queue.Queue()
        self._loglevels = ('TRACE', 'DEBUG', 'INFO', 'WARN', 'ERROR')

    def init_service(self, endpoint, project, uuid, log_batch_size,
                     ignore_errors, ignored_tags):
        self._errors = queue.Queue()
        if self.RP is None:
            self.ignore_errors = ignore_errors
            self.ignored_tags = ignored_tags
            logging.debug('ReportPortal - Init service: endpoint=%s, '
                          'project=%s, uuid=%s', endpoint, project, uuid)
            self.RP = ReportPortalServiceAsync(
                endpoint=endpoint,
                project=project,
                token=uuid,
                error_handler=self.async_error_handler,
                log_batch_size=log_batch_size
            )
        else:
            logging.debug('The pytest is already initialized')
        return self.RP

    def async_error_handler(self, exc_info):
        self.terminate_service()
        self.RP = None
        self._errors.put_nowait(exc_info)

    def _stop_if_necessary(self):
        try:
            exc, msg, tb = self._errors.get(False)
            traceback.print_exception(exc, msg, tb)
            sys.stderr.flush()
            if not self.ignore_errors:
                pytest.exit(msg)
        except queue.Empty:
            pass

    def terminate_service(self):
        if self.RP is not None:
            self.RP.terminate()

    def start_launch(
            self, launch_name, mode=None, tags=None, description=None):
        self._stop_if_necessary()
        if self.RP is None:
            return

        sl_pt = {
            'name': launch_name,
            'start_time': timestamp(),
            'description': description,
            'mode': mode,
            'tags': tags
        }
        logging.debug('ReportPortal - Start launch: '
                      'request_body=%s', sl_pt)
        req_data = self.RP.start_launch(**sl_pt)
        logging.debug('ReportPortal - Launch started: '
                      'response_body=%s', req_data)

    def start_pytest_item(self, test_item=None):
        self._stop_if_necessary()
        if self.RP is None:
            return

        start_rq = {
            'name': self._get_full_name(test_item),
            'description': self._get_description(test_item),
            'tags': self._get_tags(test_item),
            'start_time': timestamp(),
            'item_type': 'STEP'
        }

        logging.debug('ReportPortal - Start TestItem: '
                      'request_body=%s', start_rq)
        self.RP.start_test_item(**start_rq)

    def _get_tags(self, item):
        # Try to extract names of @pytest.mark.* decorators used for test item
        # and exclude those which present in rp_ignore_tags parameter
        markers_list = []
        for k in item.keywords:
            if not item.get_marker(k) or k in self.ignored_tags:
                continue
            # simple MarkDecorator
            if not item.get_marker(k).args and not item.get_marker(k).kwargs:
                markers_list.append(k)
            # parametrized MarkDecorator
            if item.get_marker(k).args:
                for marker_arg in item.get_marker(k).args:
                    markers_list.append("%s(%s)" % (k, marker_arg))
            # parametrized MarkDecorator with kwargs
            if item.get_marker(k).kwargs:
                for mrk_key, mrk_value in item.get_marker(k).kwargs.iteritems():
                    markers_list.append("%s(%s=%s)" % (k, mrk_key, mrk_value))
        return markers_list

    def finish_pytest_item(self, status, issue=None):
        self._stop_if_necessary()
        if self.RP is None:
            return

        fta_rq = {
            'end_time': timestamp(),
            'status': status,
            'issue': issue
        }

        logging.debug('ReportPortal - Finish TestItem: '
                      'request_body=%s', fta_rq)
        self.RP.finish_test_item(**fta_rq)

    def finish_launch(self, launch=None, status='rp_launch'):
        self._stop_if_necessary()
        if self.RP is None:
            return

        # To finish launch session str parameter is needed
        fl_rq = {
            'end_time': timestamp(),
            'status': status
        }
        logging.debug('ReportPortal - Finish launch: request_body=%s', fl_rq)
        self.RP.finish_launch(**fl_rq)

    def post_log(self, message, loglevel='INFO', attachment=None):
        self._stop_if_necessary()
        if self.RP is None:
            return

        if loglevel not in self._loglevels:
            logging.warning('Incorrect loglevel = %s. Force set to INFO. '
                            'Available levels: %s.', loglevel, self._loglevels)
            loglevel = 'INFO'

        sl_rq = {
            'time': timestamp(),
            'message': message,
            'level': loglevel,
            'attachment': attachment,
        }
        self.RP.log(**sl_rq)

    @staticmethod
    def _get_full_name(test_item):
        return test_item.nodeid

    @staticmethod
    def _get_description(test_item):
        try:
            # for common items
            return test_item.function.__doc__
        except AttributeError:
            # doctest has no `function` attribute
            return test_item.reportinfo()[2]
Exemple #22
0
    def report_errors(self):
        with self.no_ssl_verification():
            self.create_project()
            service = ReportPortalServiceAsync(
                endpoint=self.rp_url,
                project=self.rp_project,
                token=self.rp_token,
                error_handler=self.my_error_handler)

            errors = self.errors
            errors_len = len(errors)
            if errors_len > 0:
                # Start launch.
                service.start_launch(
                    name=self.rp_launch_name,
                    start_time=self.timestamp(),
                    description='This simulation has {} fails'.format(
                        errors_len))
                for key in errors:
                    # Start test item.
                    item_name = self.get_item_name(errors[key])
                    service.start_test_item(
                        name=item_name,
                        description="This request was failed {} times".format(
                            errors[key]['Error count']),
                        tags=[
                            self.args['type'], errors[key]['Request URL'],
                            'gatling_test'
                        ],
                        start_time=self.timestamp(),
                        item_type="STEP",
                        parameters={
                            "simulation":
                            self.args['simulation'],
                            'duration':
                            int(self.args['end_time']) / 1000 -
                            int(self.args['start_time']) / 1000,
                            'test type':
                            self.args['type']
                        })

                    self.log_message(service, 'Request name', errors[key],
                                     'WARN')
                    self.log_message(service, 'Method', errors[key], 'WARN')
                    self.log_message(service, 'Request URL', errors[key],
                                     'WARN')
                    self.log_message(service, 'Request_params', errors[key],
                                     'WARN')
                    self.log_message(service, 'Request headers', errors[key],
                                     'INFO')
                    self.log_message(service, 'Error count', errors[key],
                                     'WARN')
                    self.log_message(service, 'Error code', errors[key],
                                     'WARN')
                    self.log_message(service, 'Error_message', errors[key],
                                     'WARN')
                    self.log_message(service, 'Response code', errors[key],
                                     'WARN')
                    self.log_message(service, 'Response', errors[key], 'WARN')
                    self.log_unique_error_id(service,
                                             errors[key]['Request name'],
                                             errors[key]['Method'],
                                             errors[key]['Response code'],
                                             errors[key]['Error code'])

                    service.finish_test_item(end_time=self.timestamp(),
                                             status="FAILED")
            else:
                service.start_launch(
                    name=self.rp_launch_name,
                    start_time=self.timestamp(),
                    description='This simulation has no fails')

            # Finish launch.
            service.finish_launch(end_time=self.timestamp())

            service.terminate()
    """
    This callback function will be called by async service client when error occurs.
    Return True if error is not critical and you want to continue work.
    :param exc_info: result of sys.exc_info() -> (type, value, traceback)
    :return:
    """
    print("Error occurred: {}".format(exc_info[1]))
    traceback.print_exception(*exc_info)


def timestamp():
    return str(int(time() * 1000))


service = ReportPortalServiceAsync(endpoint=rp_endpoint,
                                   project=rp_project,
                                   token=rp_token,
                                   error_handler=my_error_handler)
# Disables SSL verification
# service.rp_client.session.verify = False


def start_launcher(name, start_time, description=None, tags=None):
    service.start_launch(name=name,
                         start_time=start_time,
                         description=description,
                         tags=tags)


def start_feature_test(**kwargs):
    start_test(**kwargs)
Exemple #24
0
    def report_test_results(self, errors, performance_degradation_rate, compare_with_baseline, missed_threshold_rate, compare_with_thresholds):
        with self.no_ssl_verification():
            self.create_project()
            service = ReportPortalServiceAsync(endpoint=self.rp_url, project=self.rp_project,
                                               token=self.rp_token, error_handler=self.my_error_handler)

            # Start launch.
            service.start_launch(name=self.rp_launch_name + ": performance testing results",
                                 start_time=self.timestamp(),
                                 description='Test name - {}'.format(self.args['simulation']))
            errors_len = len(errors)

            if errors_len > 0:
                service.start_test_item(name="Functional errors",
                                        start_time=self.timestamp(),
                                        description="This simulation has failed requests",
                                        item_type="SUITE")
                for key in errors:
                    # Start test item.
                    item_name = self.get_item_name(errors[key])
                    service.start_test_item(name=item_name,
                                            description="This request was failed {} times".format(
                                                errors[key]['Error count']),
                                            tags=[errors[key]['Request URL']],
                                            start_time=self.timestamp(),
                                            item_type="STEP",
                                            parameters={"simulation": self.args['simulation'],
                                                        'test type': self.args['type']})

                    self.log_message(service, 'Request name', errors[key], 'WARN')
                    self.log_message(service, 'Method', errors[key], 'WARN')
                    self.log_message(service, 'Request URL', errors[key], 'WARN')
                    self.log_message(service, 'Request_params', errors[key], 'WARN')
                    self.log_message(service, 'Request headers', errors[key], 'INFO')
                    self.log_message(service, 'Error count', errors[key], 'WARN')
                    self.log_message(service, 'Error_message', errors[key], 'WARN')
                    self.log_message(service, 'Response code', errors[key], 'WARN')
                    self.log_message(service, 'Response', errors[key], 'WARN')
                    self.log_unique_error_id(service, errors[key]['Request name'], errors[key]['Method'],
                                             errors[key]['Response code'])

                    service.finish_test_item(end_time=self.timestamp(), status="FAILED")
                service.finish_test_item(end_time=self.timestamp(), status="FAILED")
            else:
                service.start_test_item(name="Functional errors",
                                        start_time=self.timestamp(),
                                        item_type="STEP",
                                        description='This simulation has no functional errors')
                service.finish_test_item(end_time=self.timestamp(), status="PASSED")

            if performance_degradation_rate > self.performance_degradation_rate:
                service.start_test_item(name="Compare to baseline",
                                        start_time=self.timestamp(),
                                        description="Test \"{}\" failed with performance degradation rate {}"
                                        .format(self.args['simulation'], performance_degradation_rate),
                                        item_type="SUITE")

                service.log(time=self.timestamp(),
                            message="The following requests are slower than baseline:",
                            level="{}".format('INFO'))
                for request in compare_with_baseline:
                    service.start_test_item(name="\"{}\" reached {} ms by {}. Baseline {} ms."
                                .format(request['request_name'], request['response_time'],
                                        self.args['comparison_metric'], request['baseline']),
                                            tags=['performance degradation'],
                                            start_time=self.timestamp(),
                                            item_type="STEP",
                                            parameters={'simulation': self.args['simulation'],
                                                        'test type': self.args['type']})

                    service.log(time=self.timestamp(), message="\"{}\" reached {} ms by {}. Baseline {} ms."
                                .format(request['request_name'], request['response_time'],
                                        self.args['comparison_metric'], request['baseline']),
                                level="{}".format('WARN'))
                    service.finish_test_item(end_time=self.timestamp(), status="FAILED")
                service.log(time=self.timestamp(), message=hashlib.sha256(
                    "{} performance degradation".format(self.args['simulation']).strip().encode('utf-8')).hexdigest(),
                            level='ERROR')

                service.finish_test_item(end_time=self.timestamp(), status="FAILED")
            else:
                service.start_test_item(name="Compare to baseline",
                                        start_time=self.timestamp(),
                                        item_type="STEP",
                                        description='Performance degradation rate less than {}'
                                        .format(self.performance_degradation_rate))
                service.finish_test_item(end_time=self.timestamp(), status="PASSED")

            if missed_threshold_rate > self.missed_thresholds_rate:
                service.start_test_item(name="Compare with thresholds",
                                        start_time=self.timestamp(),
                                        description="Test \"{}\" failed with missed thresholds rate {}"
                                                    .format(self.args['simulation'], missed_threshold_rate),
                                        item_type="SUITE")

                for color in ["yellow", "red"]:
                    colored = False
                    for th in compare_with_thresholds:
                        if th['threshold'] == color:
                            service.start_test_item(name="{} threshold for  \"{}\""
                                                    .format(color, th['request_name']),
                                                    tags=['missed thresholds'],
                                                    start_time=self.timestamp(),
                                                    item_type="STEP",
                                                    parameters={'simulation': self.args['simulation'],
                                                                'test type': self.args['type']})
                            if not colored:
                                service.log(time=self.timestamp(),
                                            message=f"The following {color} thresholds were exceeded:", level="INFO")
                            appendage = calculate_appendage(th['target'])
                            service.log(time=self.timestamp(),
                                        message=f"\"{th['request_name']}\" {th['target']}{appendage} with value {th['metric']}{appendage} exceeded threshold of {th[color]}{appendage}",
                                        level="WARN")
                            service.finish_test_item(end_time=self.timestamp(), status="FAILED")
                service.log(time=self.timestamp(), message=hashlib.sha256(
                    "{} missed thresholds".format(self.args['simulation']).strip().encode('utf-8')).hexdigest(),
                            level='ERROR')

                service.finish_test_item(end_time=self.timestamp(), status="FAILED")
            else:
                service.start_test_item(name="Compare with thresholds",
                                        start_time=self.timestamp(),
                                        item_type="STEP",
                                        description='Missed thresholds rate less than {}'
                                        .format(self.missed_thresholds_rate))
                service.finish_test_item(end_time=self.timestamp(), status="PASSED")
            # Finish launch.
            service.finish_launch(end_time=self.timestamp())

            service.terminate()
Exemple #25
0
class PyTestServiceClass(with_metaclass(Singleton, object)):
    def __init__(self):
        self.RP = None
        try:
            pkg_resources.get_distribution('reportportal_client >= 3.2.0')
            self.RP_SUPPORTS_PARAMETERS = True
        except pkg_resources.VersionConflict:
            self.RP_SUPPORTS_PARAMETERS = False

        self.ignore_errors = True
        self.ignored_tags = []

        self._errors = queue.Queue()
        self._loglevels = ('TRACE', 'DEBUG', 'INFO', 'WARN', 'ERROR')
        self._hier_parts = {}
        self._item_parts = {}

    def init_service(self,
                     endpoint,
                     project,
                     uuid,
                     log_batch_size,
                     ignore_errors,
                     ignored_tags,
                     verify_ssl=False):
        self._errors = queue.Queue()
        if self.RP is None:
            self.ignore_errors = ignore_errors
            if self.RP_SUPPORTS_PARAMETERS:
                self.ignored_tags = list(
                    set(ignored_tags).union({'parametrize'}))
            else:
                self.ignored_tags = ignored_tags
            log.debug(
                'ReportPortal - Init service: endpoint=%s, '
                'project=%s, uuid=%s', endpoint, project, uuid)
            self.RP = ReportPortalServiceAsync(
                endpoint=endpoint,
                project=project,
                token=uuid,
                error_handler=self.async_error_handler,
                log_batch_size=log_batch_size,
                # verify_ssl=False
            )
            self.project_settiings = None  # self.RP.rp_client.get_project_settings() if self.RP else None
            self.issue_types = self.get_issue_types()
        else:
            log.debug('The pytest is already initialized')
        return self.RP

    def async_error_handler(self, exc_info):
        self.terminate_service(nowait=True)
        self.RP = None
        self._errors.put_nowait(exc_info)

    def terminate_service(self, nowait=False):
        if self.RP is not None:
            self.RP.terminate(nowait)
            self.RP = None

    def start_launch(self,
                     launch_name,
                     mode=None,
                     tags=None,
                     description=None):
        self._stop_if_necessary()
        if self.RP is None:
            return

        sl_pt = {
            'name': launch_name,
            'start_time': timestamp(),
            'description': description,
            'mode': mode,
            'tags': tags
        }
        log.debug('ReportPortal - Start launch: equest_body=%s', sl_pt)
        req_data = self.RP.start_launch(**sl_pt)
        log.debug('ReportPortal - Launch started: response_body=%s', req_data)

    def collect_tests(self, session):
        self._stop_if_necessary()
        if self.RP is None:
            return

        hier_dirs = False
        hier_module = False
        hier_class = False
        hier_param = False

        if not hasattr(session.config, 'slaveinput'):
            hier_dirs = session.config.getini('rp_hierarchy_dirs')
            hier_module = session.config.getini('rp_hierarchy_module')
            hier_class = session.config.getini('rp_hierarchy_class')
            hier_param = session.config.getini('rp_hierarchy_parametrize')

        try:
            hier_dirs_level = int(
                session.config.getini('rp_hierarchy_dirs_level'))
        except ValueError:
            hier_dirs_level = 0

        dirs_parts = {}
        tests_parts = {}

        for item in session.items:
            # Start collecting test item parts
            parts = []

            # Hierarchy for directories
            rp_name = self._add_item_hier_parts_dirs(item, hier_dirs,
                                                     hier_dirs_level, parts,
                                                     dirs_parts)

            # Hierarchy for Module and Class/UnitTestCase
            item_parts = self._get_item_parts(item)
            rp_name = self._add_item_hier_parts_other(item_parts, item, Module,
                                                      hier_module, parts,
                                                      rp_name)
            rp_name = self._add_item_hier_parts_other(item_parts, item, Class,
                                                      hier_class, parts,
                                                      rp_name)
            rp_name = self._add_item_hier_parts_other(item_parts, item,
                                                      UnitTestCase, hier_class,
                                                      parts, rp_name)

            # Hierarchy for parametrized tests
            if hier_param:
                rp_name = self._add_item_hier_parts_parametrize(
                    item, parts, tests_parts, rp_name)

            # Hierarchy for test itself (Function/TestCaseFunction)
            item._rp_name = rp_name + ("::" if rp_name else "") + item.name

            # Result initialization
            for part in parts:
                part._rp_result = "PASSED"

            self._item_parts[item] = parts
            for part in parts:
                if part not in self._hier_parts:
                    self._hier_parts[part] = {
                        "finish_counter": 1,
                        "start_flag": False
                    }
                else:
                    self._hier_parts[part]["finish_counter"] += 1

    def start_pytest_item(self, test_item=None):
        self._stop_if_necessary()
        if self.RP is None:
            return

        for part in self._item_parts[test_item]:
            if self._hier_parts[part]["start_flag"]:
                continue
            self._hier_parts[part]["start_flag"] = True

            payload = {
                'name': self._get_item_name(part),
                'description': self._get_item_description(part),
                'tags': self._get_item_tags(part),
                'start_time': timestamp(),
                'item_type': 'SUITE'
            }
            log.debug('ReportPortal - Start Suite: request_body=%s', payload)
            self.RP.start_test_item(**payload)

        start_rq = {
            'name': self._get_item_name(test_item),
            'description': self._get_item_description(test_item),
            'tags': self._get_item_tags(test_item),
            'start_time': timestamp(),
            'item_type': 'STEP'
        }
        if self.RP_SUPPORTS_PARAMETERS:
            start_rq['parameters'] = self._get_parameters(test_item)

        log.debug('ReportPortal - Start TestItem: request_body=%s', start_rq)
        self.RP.start_test_item(**start_rq)

    def finish_pytest_item(self, test_item, status, issue=None):
        self._stop_if_necessary()
        if self.RP is None:
            return

        fta_rq = {'end_time': timestamp(), 'status': status, 'issue': issue}

        log.debug('ReportPortal - Finish TestItem: request_body=%s', fta_rq)
        self.RP.finish_test_item(**fta_rq)

        parts = self._item_parts[test_item]
        while len(parts) > 0:
            part = parts.pop()
            if status == "FAILED":
                part._rp_result = status
            self._hier_parts[part]["finish_counter"] -= 1
            if self._hier_parts[part]["finish_counter"] > 0:
                continue
            payload = {
                'end_time': timestamp(),
                'issue': issue,
                'status': part._rp_result
            }
            log.debug('ReportPortal - End TestSuite: request_body=%s', payload)
            self.RP.finish_test_item(**payload)

    def finish_launch(self, launch=None, status='rp_launch'):
        self._stop_if_necessary()
        if self.RP is None:
            return

        # To finish launch session str parameter is needed
        fl_rq = {'end_time': timestamp(), 'status': status}
        log.debug('ReportPortal - Finish launch: request_body=%s', fl_rq)
        self.RP.finish_launch(**fl_rq)

    def post_log(self, message, loglevel='INFO', attachment=None):
        self._stop_if_necessary()
        if self.RP is None:
            return

        if loglevel not in self._loglevels:
            log.warning(
                'Incorrect loglevel = %s. Force set to INFO. '
                'Available levels: %s.', loglevel, self._loglevels)
            loglevel = 'INFO'

        sl_rq = {
            'time': timestamp(),
            'message': message,
            'level': loglevel,
            'attachment': attachment,
        }
        self.RP.log(**sl_rq)

    def _stop_if_necessary(self):
        try:
            exc, msg, tb = self._errors.get(False)
            traceback.print_exception(exc, msg, tb)
            sys.stderr.flush()
            if not self.ignore_errors:
                pytest.exit(msg)
        except queue.Empty:
            pass

    def get_issue_types(self):
        issue_types = {}
        if not self.project_settiings:
            return issue_types

        for item_type in ("AUTOMATION_BUG", "PRODUCT_BUG", "SYSTEM_ISSUE",
                          "NO_DEFECT", "TO_INVESTIGATE"):
            for item in self.project_settiings["subTypes"][item_type]:
                issue_types[item["shortName"]] = item["locator"]

        return issue_types

    @staticmethod
    def _add_item_hier_parts_dirs(item,
                                  hier_flag,
                                  dirs_level,
                                  report_parts,
                                  dirs_parts,
                                  rp_name=""):

        parts_dirs = PyTestServiceClass._get_item_dirs(item)
        dir_path = item.fspath.new(dirname="", basename="", drive="")
        rp_name_path = ""

        for dir_name in parts_dirs[dirs_level:]:
            dir_path = dir_path.join(dir_name)
            path = str(dir_path)

            if hier_flag:
                if path in dirs_parts:
                    item_dir = dirs_parts[path]
                    rp_name = ""
                else:
                    item_dir = File(dir_name,
                                    nodeid=dir_name,
                                    session=item.session,
                                    config=item.session.config)
                    rp_name += dir_name
                    item_dir._rp_name = rp_name
                    dirs_parts[path] = item_dir
                    rp_name = ""

                report_parts.append(item_dir)
            else:
                rp_name_path = path[1:]

        if not hier_flag:
            rp_name += rp_name_path

        return rp_name

    @staticmethod
    def _add_item_hier_parts_parametrize(item,
                                         report_parts,
                                         tests_parts,
                                         rp_name=""):

        for mark in item.own_markers:
            if mark.name == 'parametrize':
                ch_index = item.nodeid.find("[")
                test_fullname = item.nodeid[:ch_index if ch_index > 0 else len(
                    item.nodeid)]
                test_name = item.originalname

                rp_name += ("::" if rp_name else "") + test_name

                if test_fullname in tests_parts:
                    item_test = tests_parts[test_fullname]
                else:
                    item_test = Item(test_fullname,
                                     nodeid=test_fullname,
                                     session=item.session,
                                     config=item.session.config)
                    item_test._rp_name = rp_name
                    item_test.obj = item.obj
                    item_test.keywords = item.keywords
                    item_test.own_markers = item.own_markers
                    item_test.parent = item.parent

                    tests_parts[test_fullname] = item_test

                rp_name = ""
                report_parts.append(item_test)
                break

        return rp_name

    @staticmethod
    def _add_item_hier_parts_other(item_parts,
                                   item,
                                   item_type,
                                   hier_flag,
                                   report_parts,
                                   rp_name=""):

        for part in item_parts:

            if type(part) is item_type:

                if item_type is Module:
                    module_path = str(
                        item.fspath.new(dirname=rp_name,
                                        basename=part.fspath.basename,
                                        drive=""))
                    rp_name = module_path if rp_name else module_path[1:]
                elif item_type in (Class, Function, UnitTestCase,
                                   TestCaseFunction):
                    rp_name += ("::" if rp_name else "") + part.name

                if hier_flag:
                    part._rp_name = rp_name
                    rp_name = ""
                    report_parts.append(part)

        return rp_name

    @staticmethod
    def _get_item_parts(item):
        parts = []
        parent = item.parent
        if not isinstance(parent, Instance):
            parts.append(parent)
        while True:
            parent = parent.parent
            if parent is None:
                break
            if isinstance(parent, Instance):
                continue
            if isinstance(parent, Session):
                break
            parts.append(parent)

        parts.reverse()
        return parts

    @staticmethod
    def _get_item_dirs(item):

        root_path = item.session.config.rootdir.strpath
        dir_path = item.fspath.new(basename="")
        rel_dir = dir_path.new(dirname=dir_path.relto(root_path),
                               basename="",
                               drive="")

        dir_list = []
        for directory in rel_dir.parts(reverse=False):
            dir_name = directory.basename
            if dir_name:
                dir_list.append(dir_name)

        return dir_list

    def _get_item_tags(self, item):
        # Try to extract names of @pytest.mark.* decorators used for test item
        # and exclude those which present in rp_ignore_tags parameter
        def get_marker_value(item, keyword):
            try:
                marker = item.get_closest_marker(keyword)
            except AttributeError:
                # pytest < 3.6
                marker = item.keywords.get(keyword)

            return "{}:{}".format(keyword, marker.args[0]) \
                if marker and marker.args else keyword

        try:
            tags = [
                get_marker_value(item, k) for k in item.keywords
                if item.get_closest_marker(k) is not None
                and k not in self.ignored_tags
            ]
        except AttributeError:
            # pytest < 3.6
            tags = [
                get_marker_value(item, k) for k in item.keywords if
                item.get_marker(k) is not None and k not in self.ignored_tags
            ]

        tags.extend(item.session.config.getini('rp_tests_tags'))

        return tags

    def _get_parameters(self, item):
        return item.callspec.params if hasattr(item, 'callspec') else {}

    @staticmethod
    def _get_item_name(test_item):
        name = test_item._rp_name
        if len(name) > 256:
            name = name[:256]
            test_item.warn(
                'C1',
                'Test node ID was truncated to "{}" because of name size '
                'constrains on reportportal'.format(name))
        return name

    @staticmethod
    def _get_item_description(test_item):
        if isinstance(test_item, (Class, Function, Module, Item)):
            doc = test_item.obj.__doc__
            if doc is not None:
                return trim_docstring(doc)
        if isinstance(test_item, DoctestItem):
            return test_item.reportinfo()[2]
Exemple #26
0
class ReportPortalHandler(AbstractResultHandler):
    """Send tests results and logs to the Report Portal system.

    Attributes:
        main_test (object): the main test instance to be run.
        service (ReportPortalServiceAsync): Endpoint for interacting with
            Report Portal.
        log_handler (ReportPortalLogHandler): A log handler to send every log
            message to the Report Portal system. Logs can be sent only when
            a test is currently running.
    """
    NAME = "reportportal"

    MODE_TO_STRING = {
        MODE_CRITICAL: "Critical",
        MODE_OPTIONAL: "Optional",
        MODE_FINALLY: "Finally"
    }

    EXCEPTION_TYPE_TO_STATUS = {
        TestOutcome.SUCCESS: "PASSED",
        TestOutcome.ERROR: "FAILED",
        TestOutcome.FAILED: "FAILED",
        TestOutcome.SKIPPED: "SKIPPED",
        TestOutcome.EXPECTED_FAILURE: "PASSED",
        TestOutcome.UNEXPECTED_SUCCESS: "FAILED"
    }

    EXCEPTION_TYPE_TO_ISSUE = {
        TestOutcome.ERROR: "AUTOMATION_BUG",
        TestOutcome.FAILED: "PRODUCT_BUG",
        TestOutcome.SKIPPED: "NO_DEFECT"
    }

    def __init__(self, main_test, *args, **kwargs):
        super(ReportPortalHandler, self).__init__(main_test=main_test,
                                                  *args,
                                                  **kwargs)

        configuration = get_configuration()
        self.service = ReportPortalServiceAsync(
            endpoint=configuration.endpoint,
            project=configuration.project,
            token=configuration.token)

        self.log_handler = ReportPortalLogHandler(self.service)
        self.comments = []

    def start_test_run(self):
        """Called once before any tests are executed."""
        run_name = self.main_test.data.run_data.run_name
        mode = "DEFAULT"
        if not run_name:
            run_name = self.main_test.__class__.__name__
            mode = "DEBUG"

        description = self.main_test.__doc__

        self.service.start_launch(name=run_name,
                                  start_time=timestamp(),
                                  description=description,
                                  mode=mode)

    def start_test(self, test):
        """Called when the given test is about to be run.

        Args:
            test (object): test item instance.
        """
        item_type = "STEP"
        description = test.shortDescription()

        if isinstance(test, TestFlow):
            description = test.__doc__

        mode = getattr(test, "mode", None)
        if mode is not None:
            description = "|{}| {}".format(self.MODE_TO_STRING[mode],
                                           description)

        self.service.start_test_item(
            name=test.data.name,
            description=description,
            tags=test.TAGS if hasattr(test, "TAGS") else None,
            start_time=timestamp(),
            item_type=item_type)

        core_log.addHandler(self.log_handler)
        self.service.log(time=timestamp(),
                         level="INFO",
                         message="work dir:\n{0}".format(
                             os.path.abspath(test.work_dir)))

    def start_composite(self, test):
        """Called when the given TestSuite is about to be run.

        Args:
            test (rotest.core.suite.TestSuite): test item instance.
        """
        if test == self.main_test:
            return

        self.service.start_test_item(
            name=test.data.name,
            description=test.__doc__,
            tags=test.TAGS if hasattr(test, "TAGS") else None,
            start_time=timestamp(),
            item_type="Suite")

    def stop_composite(self, test):
        """Called when the given TestSuite has been run.

        Args:
            test (rotest.core.suite.TestSuite): test item instance.
        """
        if test == self.main_test:
            return

        if test.data.success:
            status = "PASSED"
        else:
            status = "FAILED"

        self.service.finish_test_item(end_time=timestamp(), status=status)

    def stop_test_run(self):
        """Called once after all tests are executed."""
        self.service.finish_launch(end_time=timestamp())
        self.service.terminate()

    def stop_test(self, test):
        """Called once after a test is finished."""
        core_log.removeHandler(self.log_handler)
        exception_type = test.data.exception_type
        status = self.EXCEPTION_TYPE_TO_STATUS.get(exception_type, "FAILED")

        issue = None
        if exception_type in self.EXCEPTION_TYPE_TO_ISSUE or \
                exception_type is None or exception_type == "":
            issue = {
                "issue_type":
                self.EXCEPTION_TYPE_TO_ISSUE.get(exception_type,
                                                 "TO_INVESTIGATE"),
                "comment":
                "\n".join(self.comments)
            }

        self.service.finish_test_item(end_time=timestamp(),
                                      status=status,
                                      issue=issue)

        self.comments = []

    def add_skip(self, test, reason):
        self.comments.append(reason)

    def add_error(self, test, exception_string):
        reason = [line for line in exception_string.split("\n") if line][-1]
        self.comments.append(reason)

    def add_failure(self, test, exception_string):
        reason = [line for line in exception_string.split("\n") if line][-1]
        self.comments.append(reason)

    def add_unexpected_success(self, test):
        self.service.log(time=timestamp(),
                         message="The test was supposed to fail, but instead "
                         "it passed",
                         level="ERROR")
class PortalService:
    def __init__(self, portal_launch_name, portal_launch_doc):
        # Report Portal versions below 5.0.0:

        self.endpoint = get_portal_config().get("ENDPOINT")  # portal服务地址
        self.project = get_portal_config().get("PROJECT")  # portal项目名称
        self.token = get_portal_config().get("TOKEN")  # portal token

        self.service = ReportPortalServiceAsync(
            endpoint=self.endpoint,
            project=self.project,
            token=self.token,
            error_handler=self.my_error_handler)

        # Start launch.
        self.launch = self.service.start_launch(name=portal_launch_name,
                                                start_time=timestamp(),
                                                description=portal_launch_doc)

        # Start test item Report Portal versions below 5.0.0:
        self.test = self.service.start_test_item(name="Test Case",
                                                 description="First Test Case",
                                                 tags=["Image", "Smoke"],
                                                 start_time=timestamp(),
                                                 item_type="STEP",
                                                 parameters={
                                                     "key1": "val1",
                                                     "key2": "val2"
                                                 })

        self.service.finish_test_item(end_time=timestamp(), status="PASSED")
        # Finish launch.
        self.service.finish_launch(end_time=timestamp())
        self.service.terminate()

    @staticmethod
    def my_error_handler(exc_info):
        """
        This callback function will be called by async service client when error occurs.
        Return True if error is not critical and you want to continue module_2.
        :param exc_info: result of sys.exc_info() -> (type, value, traceback)
        :return:
        """
        print("Error occurred: {}".format(exc_info[1]))
        traceback.print_exception(*exc_info)

    def service_text_message(self):
        # Create text log message with INFO level.
        self.service.log(time=timestamp(),
                         message="Hello World!",
                         level="INFO")

    def service_message_with_attached_text(self):
        # Create log message with attached text output and WARN level.
        self.service.log(time=timestamp(),
                         message="Too high memory usage!",
                         level="WARN",
                         attachment={
                             "name": "free_memory.txt",
                             "data":
                             "subprocess.check_output('free -h'.split())",
                             "mime": "text/plain"
                         })

    def service_message_with_image(self):
        # Create log message with binary file, INFO level and custom mimetype.
        image = "./image.png"
        with open(image, "rb") as fh:
            attachment = {
                "name": os.path.basename(image),
                "data": fh.read(),
                "mime": guess_type(image)[0] or "application/octet-stream"
            }
            self.service.log(timestamp(), "Screen shot of issue.", "INFO",
                             attachment)

    def service_message_with_command_line(self):
        # Create log message supplying only contents
        self.service.log(timestamp(),
                         "running processes",
                         "INFO",
                         attachment=subprocess.check_output("ps aux".split()))
class PyTestServiceClass(with_metaclass(Singleton, object)):

    _loglevels = ('TRACE', 'DEBUG', 'INFO', 'WARN', 'ERROR')

    def __init__(self):
        self.RP = None
        self.ignore_errors = True
        self._errors = queue.Queue()

    def init_service(self, endpoint, project, uuid, log_batch_size,
                     ignore_errors):
        self._errors = queue.Queue()
        if self.RP is None:
            self.ignore_errors = ignore_errors
            logging.debug(
                msg="ReportPortal - Init service: "
                    "endpoint={0}, project={1}, uuid={2}".
                    format(endpoint, project, uuid))
            self.RP = ReportPortalServiceAsync(
                endpoint=endpoint,
                project=project,
                token=uuid,
                error_handler=self.async_error_handler,
                log_batch_size=log_batch_size
            )
        else:
            logging.debug("The pytest is already initialized")
        return self.RP

    def async_error_handler(self, exc_info):
        self.terminate_service()
        self.RP = None
        self._errors.put_nowait(exc_info)

    def _stop_if_neccessary(self):
        try:
            exc, msg, tb = self._errors.get(False)
            traceback.print_exception(exc, msg, tb)
            if not self.ignore_errors:
                pytest.exit(msg)
        except queue.Empty:
            pass

    def terminate_service(self):
        if self.RP is not None:
            self.RP.terminate()

    def start_launch(
            self, launch_name, mode=None, tags=None, description=None):
        self._stop_if_neccessary()
        if self.RP is None:
            return

        sl_pt = {
            "name": launch_name,
            "start_time": timestamp(),
            "description": description,
            "mode": mode,
            "tags": tags
        }
        logging.debug("ReportPortal - Start launch: "
                      "request_body=%s", sl_pt)
        req_data = self.RP.start_launch(**sl_pt)
        logging.debug("ReportPortal - Launch started: "
                      "response_body=%s", req_data)

    def start_pytest_item(self, test_item=None):
        self._stop_if_neccessary()
        if self.RP is None:
            return

        start_rq = {
            "name": self._get_full_name(test_item),
            "description": self._get_description(test_item),
            "tags": self._get_tags(test_item),
            "start_time": timestamp(),
            "item_type": "STEP"
        }

        logging.debug(
            "ReportPortal - Start TestItem: "
            "request_body=%s", start_rq)

        self.RP.start_test_item(**start_rq)

    def _get_full_name(self, test_item):
        return test_item.nodeid

    def _get_description(self, test_item):
        try:
            # for common items
            return test_item.function.__doc__
        except AttributeError:
            # doctest has no `function` attribute
            return test_item.reportinfo()[2]

    def _get_tags(self, test_item):
        # try to extract names of @pytest.mark.* decorators used for test item
        mark_plugin = test_item.config.pluginmanager.getplugin("mark")
        if mark_plugin:
            keywords = test_item.keywords
            return list(mark_plugin.MarkMapping(keywords)._mymarks)
        else:
            return []

    def finish_pytest_item(self, status, issue=None):
        self._stop_if_neccessary()
        if self.RP is None:
            return

        fta_rq = {
            "end_time": timestamp(),
            "status": status,
            "issue": issue
        }

        logging.debug(
            "ReportPortal - Finish TestItem:"
            " request_body=%s", fta_rq)
        self.RP.finish_test_item(**fta_rq)

    def finish_launch(self, launch=None, status="rp_launch"):
        self._stop_if_neccessary()
        if self.RP is None:
            return

        # To finish launch session str parameter is needed
        fl_rq = {
            "end_time": timestamp(),
            "status": status
        }
        logging.debug("ReportPortal - Finish launch: "
                      "request_body=%s", fl_rq)
        self.RP.finish_launch(**fl_rq)

    def post_log(self, message, loglevel='INFO', attachment=None):
        self._stop_if_neccessary()
        if self.RP is None:
            return

        if loglevel not in self._loglevels:
            logging.warning('Incorrect loglevel = %s. Force set to INFO. '
                            'Avaliable levels: %s.', loglevel, self._loglevels)
            loglevel = 'INFO'

        sl_rq = {
            "time": timestamp(),
            "message": message,
            "level": loglevel,
            "attachment": attachment,
        }
        self.RP.log(**sl_rq)
Exemple #29
0
class ReportPortalDataWriter(object):
    """Wrapper around async Report Portal service"""
    def __init__(self,
                 endpoint,
                 token,
                 project,
                 launch_name=None,
                 launch_doc=None,
                 launch_tags=None,
                 verify_ssl=False):
        """
        :param endpoint:
            link to Report Portal
        :param token:
            user token
        :param project:
            Report Portal project name
        :param launch_name:
            Report Portal launch name
        :param launch_doc:
            launch description
        :param launch_doc:
            launch tags
        :param verify_ssl:
            option to not verify ssl certificates
        """
        self.endpoint = endpoint
        self.token = token
        self.project = project
        self.launch_name = launch_name
        self.launch_doc = launch_doc
        self.launch_tags = launch_tags
        self.service = None
        self.test = None
        self.verify_ssl = verify_ssl

    def start_test(self):
        """
        Start new launch in Report Portal
        """
        self.service = ReportPortalServiceAsync(endpoint=self.endpoint,
                                                project=self.project,
                                                token=self.token,
                                                error_handler=my_error_handler,
                                                verify_ssl=self.verify_ssl)
        self.service.start_launch(name=self.launch_name,
                                  start_time=timestamp(),
                                  description=self.launch_doc,
                                  tags=self.launch_tags)

    def finish_test(self):
        """
        Finish started launch in Report Portal
        """
        self.service.finish_launch(end_time=timestamp())
        self.service.terminate()
        self.service = None

    def is_test_started(self) -> bool:
        """
        Return True in case if launch was started
        """
        if self.service:
            return True
        return False

    def start_test_item(self, issue, description, tags, parameters):
        """
        Start new test item inside the launch
        :param issue:
        :param description:
        :param tags:
        :param parameters:
        """
        self.test = self.service.start_test_item(issue,
                                                 description=description,
                                                 tags=tags,
                                                 start_time=timestamp(),
                                                 item_type="STEP",
                                                 parameters=parameters)

    def test_item_message(self, message, level="ERROR", attachment=None):
        """
        Add new log message inside test item
        :param message:
        :param level:
        :param attachment:
        """
        self.service.log(time=timestamp(),
                         message=message[:MAX_MESSAGE_LEN],
                         level=level,
                         attachment=attachment)

    def finish_test_item(self, defect_type_info):
        """
        Finish started test item
        :param defect_type_info:
        """
        defect_mapping = {
            'To Investigate': 'TI001',
            'No Defect': 'ND001',
            'Product Bug': 'PB001',
            'System Issue': 'SI001'
        }
        defect_type = defect_type_info['RP Defect Type']
        issue = None
        if defect_type in defect_mapping:
            issue = {
                'issue_type': defect_mapping[defect_type],
                'comment': defect_type_info['RP Comment']
            }
        self.service.finish_test_item(end_time=timestamp(),
                                      status="FAILED",
                                      issue=issue)
Exemple #30
0
class ReportPortalDataWriter:
    def __init__(self,
                 endpoint,
                 token,
                 project,
                 log_batch_size=100,
                 launch_name=None,
                 tags=None,
                 launch_doc=None,
                 launch_id=None,
                 verify_ssl=False):
        self.endpoint = endpoint
        self.token = token
        self.project = project
        self.log_batch_size = log_batch_size
        self.launch_name = launch_name
        self.tags = tags
        self.launch_doc = launch_doc
        self.service = None
        self.test = None
        self.verify_ssl = verify_ssl
        self.launch_id = launch_id

    def start_service(self):
        self.service = ReportPortalService(endpoint=self.endpoint,
                                           project=self.project,
                                           token=self.token,
                                           log_batch_size=self.log_batch_size,
                                           verify_ssl=self.verify_ssl)
        if self.launch_id:
            self.service.launch_id = self.launch_id

    def start_test(self):
        if not self.service:
            self.start_service()
        return self.service.start_launch(name=self.launch_name,
                                         start_time=timestamp(),
                                         description=self.launch_doc,
                                         tags=self.tags)

    def finish_test(self):
        self.service.finish_launch(end_time=timestamp())
        self.service.terminate()
        self.service = None

    def is_test_started(self):
        if self.service:
            return True
        return False

    def start_test_item(self,
                        issue,
                        description,
                        tags,
                        item_type='STEP',
                        parameters={}):
        self.service.start_test_item(issue,
                                     description=description,
                                     tags=tags,
                                     start_time=timestamp(),
                                     item_type=item_type,
                                     parameters=parameters)

    def test_item_message(self, message, level="ERROR", attachment=None):
        if len(message) > constants.MAX_MESSAGE_LEN:
            index = 0
            while index < len(message):
                increment = constants.MAX_MESSAGE_LEN
                if index + increment > len(message):
                    increment = len(message) - index
                self.service.log(time=timestamp(),
                                 message=message[index:index + increment],
                                 level=level,
                                 attachment=attachment)
                index = index + increment
        else:
            self.service.log(time=timestamp(),
                             message=message,
                             level=level,
                             attachment=attachment)

    def finish_test_item(self, status="FAILED"):
        self.service.finish_test_item(end_time=timestamp(), status=status)