Exemplo n.º 1
0
def test_utils():
    appendage = calculate_appendage("throughput")
    assert appendage == " RPS"
    appendage = calculate_appendage("response_time")
    assert appendage == " ms"
    appendage = calculate_appendage("error_rate")
    assert appendage == " %"
Exemplo n.º 2
0
 def create_missed_thresholds_description(missed_threshold_rate,
                                          compare_with_thresholds,
                                          issue_hash):
     description = f"Percentage of requests exceeding the threshold was {missed_threshold_rate}%. <br>"
     for color in ['yellow', 'red']:
         colored = False
         for th in compare_with_thresholds:
             if th['threshold'] == color:
                 if not colored:
                     description += f"<h2>The following {color} thresholds were exceeded:</h2>"
                     colored = True
                 appendage = calculate_appendage(th['target'])
                 description += f"\"{th['request_name']}\" {th['target']}{appendage} " \
                                f"with value {th['metric']}{appendage} " \
                                f"exceeded threshold of {th[color]}{appendage}<br>"
     description += "<br><strong>Issue hash: </strong>" + str(issue_hash)
     return description
Exemplo n.º 3
0
 def create_missed_thresholds_description(missed_threshold_rate,
                                          compare_with_thresholds,
                                          arguments):
     title = "Missed thresholds in test: " + str(arguments['simulation'])
     description = "{panel:title=" + title + \
                   "|borderStyle=solid|borderColor=#ccc|titleBGColor=#23b7c9|bgColor=#d7f0f3} \n"
     description += "{color:red}" + "Percentage of requests exceeding the threshold was {}%." \
         .format(missed_threshold_rate) + "{color} \n"
     for color in ['yellow', 'red']:
         colored = False
         for th in compare_with_thresholds:
             if th['threshold'] == color:
                 if not colored:
                     description += f"h3. The following {color} thresholds were exceeded:\n"
                     colored = True
                 appendage = calculate_appendage(th['target'])
                 description += f"\"{th['request_name']}\" {th['target']}{appendage} " \
                                f"with value {th['metric']}{appendage} " \
                                f"exceeded threshold of {th[color]}{appendage}\n"
     description += "{panel}"
     return description
Exemplo n.º 4
0
    def report_test_results(self, errors, performance_degradation_rate,
                            compare_with_baseline, missed_threshold_rate,
                            compare_with_thresholds):
        self.create_project()
        service = ReportPortalService(endpoint=self.rp_url,
                                      project=self.rp_project,
                                      token=self.rp_token,
                                      error_handler=self.my_error_handler,
                                      verify_ssl=self.verify_ssl)

        # Start launch.
        service.start_launch(
            name=self.rp_launch_name + ": performance testing results",
            start_time=self.timestamp(),
            description='Test name - {}'.format(self.args['simulation']))
        errors_len = len(errors)

        if errors_len > 0:
            functional_error_test_item = service.start_test_item(
                name="Functional errors",
                start_time=self.timestamp(),
                description="This simulation has failed requests",
                item_type="SUITE")
            for key in errors:
                # Start test item.
                item_name = self.get_item_name(errors[key])
                item_id = service.start_test_item(
                    name=item_name,
                    parent_item_id=functional_error_test_item,
                    description="This request was failed {} times".format(
                        errors[key]['Error count']),
                    start_time=self.timestamp(),
                    item_type="STEP",
                    parameters={
                        "simulation": self.args['simulation'],
                        'test type': self.args['type']
                    })

                self.log_message(item_id, service, 'Request name', errors[key],
                                 'WARN')
                self.log_message(item_id, service, 'Method', errors[key],
                                 'WARN')
                self.log_message(item_id, service, 'Request URL', errors[key],
                                 'WARN')
                self.log_message(item_id, service, 'Request_params',
                                 errors[key], 'WARN')
                self.log_message(item_id, service, 'Request headers',
                                 errors[key], 'INFO')
                self.log_message(item_id, service, 'Error count', errors[key],
                                 'WARN')
                self.log_message(item_id, service, 'Error_message',
                                 errors[key], 'WARN')
                self.log_message(item_id, service, 'Response code',
                                 errors[key], 'WARN')
                self.log_message(item_id, service, 'Response', errors[key],
                                 'WARN')
                self.log_unique_error_id(item_id, service,
                                         errors[key]['Request name'],
                                         errors[key]['Method'],
                                         errors[key]['Response code'])

                service.finish_test_item(item_id=item_id,
                                         end_time=self.timestamp(),
                                         status="FAILED")
            service.finish_test_item(item_id=functional_error_test_item,
                                     end_time=self.timestamp(),
                                     status="FAILED")
        else:
            item_id = service.start_test_item(
                name="Functional errors",
                start_time=self.timestamp(),
                item_type="STEP",
                description='This simulation has no functional errors')
            service.finish_test_item(item_id=item_id,
                                     end_time=self.timestamp(),
                                     status="PASSED")

        if performance_degradation_rate > self.performance_degradation_rate:
            baseline_item_id = service.start_test_item(
                name="Compare to baseline",
                start_time=self.timestamp(),
                description="Test \"{}\" failed with performance degradation"
                " rate {}".format(self.args['simulation'],
                                  performance_degradation_rate),
                item_type="SUITE")

            service.log(
                item_id=baseline_item_id,
                time=self.timestamp(),
                message="The following requests are slower than baseline:",
                level="{}".format('INFO'))
            for request in compare_with_baseline:
                item_id = service.start_test_item(
                    name="\"{}\" reached {} ms by {}. Baseline {} ms.".format(
                        request['request_name'], request['response_time'],
                        self.args['comparison_metric'], request['baseline']),
                    parent_item_id=baseline_item_id,
                    start_time=self.timestamp(),
                    item_type="STEP",
                    parameters={
                        'simulation': self.args['simulation'],
                        'test type': self.args['type']
                    })

                service.log(item_id=item_id,
                            time=self.timestamp(),
                            message="\"{}\" reached {} ms by {}."
                            " Baseline {} ms.".format(
                                request['request_name'],
                                request['response_time'],
                                self.args['comparison_metric'],
                                request['baseline']),
                            level="{}".format('WARN'))
                service.finish_test_item(item_id=item_id,
                                         end_time=self.timestamp(),
                                         status="FAILED")
            service.log(time=self.timestamp(),
                        message=hashlib.sha256(
                            "{} performance degradation".format(
                                self.args['simulation']).strip().encode(
                                    'utf-8')).hexdigest(),
                        level='ERROR')

            service.finish_test_item(item_id=baseline_item_id,
                                     end_time=self.timestamp(),
                                     status="FAILED")
        else:
            item_id = service.start_test_item(
                name="Compare to baseline",
                start_time=self.timestamp(),
                item_type="STEP",
                description='Performance degradation rate less than {}'.format(
                    self.performance_degradation_rate))
            service.finish_test_item(item_id=item_id,
                                     end_time=self.timestamp(),
                                     status="PASSED")

        if missed_threshold_rate > self.missed_thresholds_rate:
            thresholds_item_id = service.start_test_item(
                name="Compare with thresholds",
                start_time=self.timestamp(),
                description="Test \"{}\" failed with missed thresholds"
                " rate {}".format(self.args['simulation'],
                                  missed_threshold_rate),
                item_type="SUITE")

            for color in ["yellow", "red"]:
                colored = False
                for th in compare_with_thresholds:
                    if th['threshold'] == color:
                        item_id = service.start_test_item(
                            name="{} threshold for  \"{}\"".format(
                                color, th['request_name']),
                            start_time=self.timestamp(),
                            parent_item_id=thresholds_item_id,
                            item_type="STEP",
                            parameters={
                                'simulation': self.args['simulation'],
                                'test type': self.args['type']
                            })
                        if not colored:
                            service.log(
                                item_id=item_id,
                                time=self.timestamp(),
                                message=
                                f"The following {color} thresholds were exceeded:",
                                level="INFO")
                        appendage = calculate_appendage(th['target'])
                        service.log(
                            item_id=item_id,
                            time=self.timestamp(),
                            message=
                            f"\"{th['request_name']}\" {th['target']}{appendage} with value {th['metric']}{appendage} exceeded threshold of {th[color]}{appendage}",
                            level="WARN")
                        service.finish_test_item(item_id=item_id,
                                                 end_time=self.timestamp(),
                                                 status="FAILED")
            service.log(item_id=item_id,
                        time=self.timestamp(),
                        message=hashlib.sha256("{} missed thresholds".format(
                            self.args['simulation']).strip().encode(
                                'utf-8')).hexdigest(),
                        level='ERROR')

            service.finish_test_item(item_id=thresholds_item_id,
                                     end_time=self.timestamp(),
                                     status="FAILED")
        else:
            item_id = service.start_test_item(
                name="Compare with thresholds",
                start_time=self.timestamp(),
                item_type="STEP",
                description='Missed thresholds rate less than {}'.format(
                    self.missed_thresholds_rate))
            service.finish_test_item(item_id=item_id,
                                     end_time=self.timestamp(),
                                     status="PASSED")
        # Finish launch.
        service.finish_launch(end_time=self.timestamp())

        service.terminate()