Beispiel #1
0
    def point_from_locust(self, ts, sid, data):
        """
        :type sid: str
        :type ts: str
        :type data: dict
        :rtype: DataPoint
        """
        point = DataPoint(int(ts))
        point[DataPoint.SOURCE_ID] = sid
        overall = KPISet()
        for item in data['stats']:
            if ts not in item['num_reqs_per_sec']:
                continue

            kpiset = KPISet()
            kpiset[KPISet.SAMPLE_COUNT] = item['num_reqs_per_sec'][ts]
            kpiset[KPISet.CONCURRENCY] = data['user_count']
            if item['num_requests']:
                avg_rt = (item['total_response_time'] / 1000.0) / item['num_requests']
                kpiset.sum_rt = item['num_reqs_per_sec'][ts] * avg_rt
            point[DataPoint.CURRENT][item['name']] = kpiset
            overall.merge_kpis(kpiset)

        point[DataPoint.CURRENT][''] = overall
        point.recalculate()
        return point
Beispiel #2
0
 def new():
     subj = KPISet(perc_levels=(100.0,))
     subj[KPISet.RESP_TIMES].add(0.1)
     subj[KPISet.RESP_TIMES].add(0.01)
     subj[KPISet.RESP_TIMES].add(0.001)
     subj.recalculate()
     return subj
Beispiel #3
0
    def point_from_locust(timestamp, sid, data):
        """
        :type timestamp: str
        :type sid: str
        :type data: dict
        :rtype: DataPoint
        """
        point = DataPoint(int(timestamp))
        point[DataPoint.SOURCE_ID] = sid
        overall = KPISet()
        for item in data['stats']:
            if timestamp not in item['num_reqs_per_sec']:
                continue

            kpiset = KPISet()
            kpiset[KPISet.SAMPLE_COUNT] = item['num_reqs_per_sec'][timestamp]
            kpiset[KPISet.CONCURRENCY] = data['user_count']
            kpiset[KPISet.BYTE_COUNT] = item['total_content_length']
            if item['num_requests']:
                avg_rt = (item['total_response_time'] / 1000.0) / item['num_requests']
                kpiset.sum_rt = item['num_reqs_per_sec'][timestamp] * avg_rt

            for err in data['errors'].values():
                if err['name'] == item['name']:
                    new_err = KPISet.error_item_skel(err['error'], None, err['occurences'], KPISet.ERRTYPE_ERROR,
                                                     Counter())
                    KPISet.inc_list(kpiset[KPISet.ERRORS], ("msg", err['error']), new_err)
                    kpiset[KPISet.FAILURES] += err['occurences']

            point[DataPoint.CURRENT][item['name']] = kpiset
            overall.merge_kpis(kpiset)

        point[DataPoint.CURRENT][''] = overall
        point.recalculate()
        return point
Beispiel #4
0
    def test_prepare_no_filename_in_settings(self):
        obj = JUnitXMLReporter()
        obj.engine = EngineEmul()
        obj.parameters = BetterDict.from_dict({"data-source": "sample-labels"})

        obj.prepare()
        datapoint = DataPoint(0, [])

        cumul_data = KPISet.from_dict({
            KPISet.AVG_CONN_TIME: 7.890211417203362e-06,
            KPISet.RESP_TIMES: Counter({
                0.0: 32160, 0.001: 24919, 0.002: 1049, 0.003: 630, 0.004: 224, 0.005: 125,
                0.006: 73, 0.007: 46, 0.008: 32, 0.009: 20, 0.011: 8, 0.01: 8, 0.017: 3,
                0.016: 3, 0.014: 3, 0.013: 3, 0.04: 2, 0.012: 2, 0.079: 1, 0.081: 1,
                0.019: 1, 0.015: 1
            }),
            KPISet.ERRORS: [{'msg': 'Forbidden', 'cnt': 7373, 'type': 0,
                             'urls': Counter({'http://192.168.25.8/': 7373}), KPISet.RESP_CODES: '403'}],
            KPISet.STDEV_RESP_TIME: 0.04947974228872108,
            KPISet.AVG_LATENCY: 0.0002825639815220692,
            KPISet.RESP_CODES: Counter({'304': 29656, '403': 29656, '200': 2}),
            KPISet.PERCENTILES: {'95.0': 0.001, '0.0': 0.0, '99.9': 0.008, '90.0': 0.001,
                                 '100.0': 0.081, '99.0': 0.003, '50.0': 0.0},
            KPISet.SUCCESSES: 29658,
            KPISet.SAMPLE_COUNT: 59314,
            KPISet.CONCURRENCY: 0,
            KPISet.AVG_RESP_TIME: 0.0005440536804127192,
            KPISet.FAILURES: 29656})

        datapoint[DataPoint.CUMULATIVE][""] = cumul_data

        obj.aggregated_second(datapoint)
        obj.post_process()

        self.assertTrue(os.path.exists(obj.report_file_path))
    def test_log_messages_samples_count(self):
        obj = FinalStatus()
        obj.engine = EngineEmul
        obj.parameters = BetterDict()
        obj.log = logger_mock()
        obj.parameters.merge({"failed-labels": False, "percentiles": False, "summary": True, "test-duration": False})

        datapoint = DataPoint(None, None)

        cumul_data = KPISet.from_dict(
            {KPISet.AVG_CONN_TIME: 7.890211417203362e-06,
             KPISet.RESP_TIMES: Counter(
                 {0.0: 32160, 0.001: 24919, 0.002: 1049, 0.003: 630, 0.004: 224, 0.005: 125,
                  0.006: 73, 0.007: 46, 0.008: 32, 0.009: 20, 0.011: 8, 0.01: 8, 0.017: 3,
                  0.016: 3, 0.014: 3, 0.013: 3, 0.04: 2, 0.012: 2, 0.079: 1, 0.081: 1,
                  0.019: 1, 0.015: 1}),
             KPISet.ERRORS: [{'msg': 'Forbidden', 'cnt': 7373, 'type': 0,
                              'urls': Counter({'http://192.168.25.8/': 7373}), KPISet.RESP_CODES: '403'}],
             KPISet.STDEV_RESP_TIME: 0.04947974228872108,
             KPISet.AVG_LATENCY: 0.0002825639815220692,
             KPISet.RESP_CODES: Counter({'304': 29656, '403': 29656, '200': 2}),
             KPISet.PERCENTILES: defaultdict(None, {'95.0': 0.001, '0.0': 0.0, '99.9': 0.008, '90.0': 0.001,
                                                    '100.0': 0.081, '99.0': 0.003, '50.0': 0.0}),
             KPISet.SUCCESSES: 29658,
             KPISet.SAMPLE_COUNT: 59314,
             KPISet.CONCURRENCY: 0,
             KPISet.AVG_RESP_TIME: 0.0005440536804127192,
             KPISet.FAILURES: 29656})

        datapoint[DataPoint.CUMULATIVE][""] = cumul_data
        obj.last_sec = datapoint

        obj.post_process()

        self.assertEqual("Samples count: 59314, 50.00% failures\n", obj.log.info_buf.getvalue())
Beispiel #6
0
    def point_from_locust(timestamp, sid, data):
        """
        :type timestamp: str
        :type sid: str
        :type data: dict
        :rtype: DataPoint
        """
        point = DataPoint(int(timestamp))
        point[DataPoint.SOURCE_ID] = sid
        overall = KPISet()
        for item in data['stats']:
            if timestamp not in item['num_reqs_per_sec']:
                continue

            kpiset = KPISet()
            kpiset[KPISet.SAMPLE_COUNT] = item['num_reqs_per_sec'][timestamp]
            kpiset[KPISet.CONCURRENCY] = data['user_count']
            kpiset[KPISet.BYTE_COUNT] = item['total_content_length']
            if item['num_requests']:
                avg_rt = (item['total_response_time'] / 1000.0) / item['num_requests']
                kpiset.sum_rt = item['num_reqs_per_sec'][timestamp] * avg_rt
            point[DataPoint.CURRENT][item['name']] = kpiset
            overall.merge_kpis(kpiset)

        point[DataPoint.CURRENT][''] = overall
        point.recalculate()
        return point
Beispiel #7
0
 def __get_kpi_errors(self, errors):
     result = []
     for msg in errors:
         kpi_error = KPISet.error_item_skel(
             error=msg,
             ret_c=errors[msg]['rc'],
             cnt=errors[msg]['count'],
             errtype=KPISet.ERRTYPE_ERROR,  # TODO: what about asserts?
             urls=Counter(), tag=None)
         result.append(kpi_error)
     return result
Beispiel #8
0
    def point_from_locust(timestamp, sid, data):
        """
        :type timestamp: str
        :type sid: str
        :type data: dict
        :rtype: DataPoint
        """
        point = DataPoint(int(timestamp))
        point[DataPoint.SOURCE_ID] = sid
        overall = KPISet()
        for item in data['stats']:
            if timestamp not in item['num_reqs_per_sec']:
                continue

            kpiset = KPISet()
            kpiset[KPISet.SAMPLE_COUNT] = item['num_reqs_per_sec'][timestamp]
            kpiset[KPISet.CONCURRENCY] = data['user_count']
            kpiset[KPISet.BYTE_COUNT] = item['total_content_length']
            if item['num_requests']:
                avg_rt = (item['total_response_time'] /
                          1000.0) / item['num_requests']
                kpiset.sum_rt = item['num_reqs_per_sec'][timestamp] * avg_rt

            for err in data['errors'].values():
                if err['name'] == item['name']:
                    new_err = KPISet.error_item_skel(err['error'], None,
                                                     err['occurences'],
                                                     KPISet.ERRTYPE_ERROR,
                                                     Counter(), None)
                    KPISet.inc_list(kpiset[KPISet.ERRORS],
                                    ("msg", err['error']), new_err)
                    kpiset[KPISet.FAILURES] += err['occurences']

            kpiset[KPISet.SUCCESSES] = kpiset[KPISet.SAMPLE_COUNT] - kpiset[
                KPISet.FAILURES]
            point[DataPoint.CURRENT][item['name']] = kpiset
            overall.merge_kpis(kpiset, sid)

        point[DataPoint.CURRENT][''] = overall
        point.recalculate()
        return point
 def test_kpiset_merge_many_rtimes(self):
     vals = {
         round(random() * 20 + 0.1,
               int(random() * 3) + 2): int(random() * 3 + 1)
         for _ in range(1000)
     }
     src = KPISet()
     src[KPISet.RESP_TIMES].update(vals)
     dst = KPISet()
     dst.rtimes_len = 100
     for _ in range(100):
         dst.merge_kpis(src)
         dst.compact_times()
         self.assertEqual(100, len(dst[KPISet.RESP_TIMES]))
Beispiel #10
0
 def __get_kpiset(self, aggr, kpi, label):
     kpiset = KPISet()
     kpiset[KPISet.FAILURES] = kpi['ec']
     kpiset[KPISet.CONCURRENCY] = kpi['na']
     kpiset[KPISet.SAMPLE_COUNT] = kpi['n']
     assert kpi['n'] > 0 and kpi['n'] >= kpi['ec']
     kpiset[KPISet.SUCCESSES] = kpi['n'] - kpi['ec']
     kpiset.sum_rt += kpi['t_avg'] * kpi['n'] / 1000.0
     kpiset.sum_lt += kpi['lt_avg'] * kpi['n'] / 1000.0
     perc_map = {'90line': 90.0, "95line": 95.0, "99line": 99.0}
     for field, level in iteritems(perc_map):
         kpiset[KPISet.PERCENTILES][str(level)] = aggr[label][field] / 1000.0
     return kpiset
Beispiel #11
0
    def __add_err_diff(self, point, err_diff):
        for label in err_diff:
            point_label = '' if label == 'ALL' else label
            if point_label not in point[DataPoint.CURRENT]:
                self.log.warning("Got inconsistent kpi/error data for label: %s", point_label)
                kpiset = KPISet()
                point[DataPoint.CURRENT][point_label] = kpiset
                kpiset[KPISet.SAMPLE_COUNT] = sum([item['count'] for item in err_diff[label].values()])
            else:
                kpiset = point[DataPoint.CURRENT][point_label]

            kpiset[KPISet.ERRORS] = self.__get_kpi_errors(err_diff[label])
            kpiset[KPISet.FAILURES] = sum([x['cnt'] for x in kpiset[KPISet.ERRORS]])
            kpiset[KPISet.SAMPLE_COUNT] = kpiset[KPISet.SUCCESSES] + kpiset[KPISet.FAILURES]
            assert kpiset[KPISet.SAMPLE_COUNT] > 0, point_label
def random_datapoint(n):
    point = DataPoint(n)
    overall = point[DataPoint.CURRENT].setdefault('', KPISet())
    overall[KPISet.CONCURRENCY] = r(100)
    overall[KPISet.SAMPLE_COUNT] = int(100 * r(1000)) + 1
    overall[KPISet.SUCCESSES] = int(overall[KPISet.SAMPLE_COUNT] * random())
    overall[KPISet.FAILURES] = overall[KPISet.SAMPLE_COUNT] - overall[
        KPISet.SUCCESSES]
    overall[KPISet.BYTE_COUNT] = int(random() * 1000) + 1
    overall[KPISet.PERCENTILES]['25.0'] = r(10)
    overall[KPISet.PERCENTILES]['50.0'] = r(20)
    overall[KPISet.PERCENTILES]['75.0'] = r(30)
    overall[KPISet.PERCENTILES]['90.0'] = r(40)
    overall[KPISet.PERCENTILES]['99.0'] = r(50)
    overall[KPISet.PERCENTILES]['100.0'] = r(100)
    overall[KPISet.RESP_CODES][rc()] = 1

    overall[KPISet.AVG_RESP_TIME] = r(100)
    overall[KPISet.AVG_CONN_TIME] = overall[KPISet.AVG_RESP_TIME] / 3.0
    overall[KPISet.AVG_LATENCY] = 2.0 * overall[KPISet.AVG_RESP_TIME] / 3.0

    overall.sum_rt = overall[KPISet.AVG_RESP_TIME] * overall[
        KPISet.SAMPLE_COUNT]
    overall.sum_cn = overall[KPISet.AVG_CONN_TIME] * overall[
        KPISet.SAMPLE_COUNT]
    overall.sum_lt = overall[KPISet.AVG_LATENCY] * overall[KPISet.SAMPLE_COUNT]
    cumul = point[DataPoint.CUMULATIVE].setdefault('', KPISet())
    cumul.merge_kpis(overall)
    cumul.recalculate()

    point.recalculate()

    overall[KPISet.AVG_RESP_TIME] = r(100)
    overall[KPISet.AVG_CONN_TIME] = overall[KPISet.AVG_RESP_TIME] / 3.0
    overall[KPISet.AVG_LATENCY] = 2.0 * overall[KPISet.AVG_RESP_TIME] / 3.0
    return point
Beispiel #13
0
    def add_data(self, data):
        """
        Append data

        :type data: bzt.modules.aggregator.DataPoint
        """
        while len(self.body):
            self.body.pop(0)

        self.body.append(Text(("stat-hdr", " Percentiles: "), align=RIGHT))
        overall = data.get(self.key).get('', KPISet())
        for key in sorted(overall.get(KPISet.PERCENTILES).keys(), key=float):
            dat = (float(key), overall[KPISet.PERCENTILES][key])
            self.body.append(
                Text(("stat-txt", "%.1f%%: %.3f" % dat), align=RIGHT))
Beispiel #14
0
 def __get_datapoint(self, n):
     point = DataPoint(n)
     overall = point[DataPoint.CURRENT].get('', KPISet())
     overall[KPISet.CONCURRENCY] = r(100)
     overall[KPISet.SAMPLE_COUNT] = int(100 * r(1000))
     overall[KPISet.FAILURES] = overall[KPISet.SAMPLE_COUNT] / 2.0
     overall[KPISet.AVG_RESP_TIME] = r(100)
     overall[KPISet.AVG_CONN_TIME] = overall[KPISet.AVG_RESP_TIME] / 3.0
     overall[KPISet.AVG_LATENCY] = 2.0 * overall[KPISet.AVG_RESP_TIME] / 3.0
     overall[KPISet.PERCENTILES]['25'] = r(10)
     overall[KPISet.PERCENTILES]['50'] = r(20)
     overall[KPISet.PERCENTILES]['75'] = r(30)
     overall[KPISet.PERCENTILES]['90'] = r(40)
     overall[KPISet.PERCENTILES]['99'] = r(50)
     overall[KPISet.PERCENTILES]['100'] = r(100)
     overall[KPISet.RESP_CODES][rc()] = 1
     return point
Beispiel #15
0
    def _calculate_datapoints(self, final_pass=False):
        if self.master_id is None:
            return

        try:
            data = self.client.get_kpis(self.master_id, self.min_ts)
        except URLError as exc:
            self.log.warning(
                "Failed to get result KPIs, will retry in %s seconds...",
                self.client.timeout)
            self.log.debug("Full exception: %s", traceback.format_exc())
            time.sleep(self.client.timeout)
            data = self.client.get_kpis(self.master_id, self.min_ts)
            self.log.info("Succeeded with retry")

        for label in data:
            if label['kpis']:
                label['kpis'].pop(
                    -1)  # never take last second since it could be incomplete

        timestamps = []
        for label in data:
            if label['label'] == 'ALL':
                timestamps.extend([kpi['ts'] for kpi in label['kpis']])

        for tstmp in timestamps:
            point = DataPoint(tstmp)
            for label in data:
                for kpi in label['kpis']:
                    if kpi['ts'] != tstmp:
                        continue

                    kpiset = KPISet()
                    kpiset[KPISet.FAILURES] = kpi['ec']
                    kpiset[KPISet.CONCURRENCY] = kpi['na']
                    kpiset[KPISet.SAMPLE_COUNT] = kpi['n']
                    kpiset.sum_rt += kpi['t_avg'] * kpi['n'] / 1000.0
                    kpiset.sum_lt += kpi['lt_avg'] * kpi['n'] / 1000.0
                    point[
                        DataPoint.CURRENT]['' if label['label'] ==
                                           'ALL' else label['label']] = kpiset

            point.recalculate()
            self.min_ts = point[DataPoint.TIMESTAMP] + 1
            yield point
 def __extend_reported_data(dp_list):
     for dp in dp_list:
         for data in dp['cumulative'], dp['current']:
             del data['']
             for key in list(
                     data.keys()
             ):  # list() is important due to changing dictionary size in the cycle
                 sep = key.rindex('-')
                 original_label, state_idx = key[:sep], int(key[sep + 1:])
                 kpi_set = data.pop(key)
                 if original_label not in data:
                     data[original_label] = {}
                 data[original_label][state_idx] = kpi_set
                 if '' not in data:
                     data[''] = dict()
                 if state_idx not in data['']:
                     data[''][state_idx] = KPISet()
                 data[''][state_idx].merge_kpis(kpi_set)
Beispiel #17
0
    def add_data(self, data):
        """
        Append data point

        :type data: bzt.modules.aggregator.DataPoint
        """
        while len(self.body):
            self.body.pop(0)

        overall = data.get(self.key).get('', KPISet())

        self.body.append(Text(("stat-hdr", " Response Codes: "), align=RIGHT))

        for key in sorted(overall.get(KPISet.RESP_CODES).keys()):
            if overall[KPISet.SAMPLE_COUNT]:
                part = 100 * float(overall[KPISet.RESP_CODES][key]) / overall[
                    KPISet.SAMPLE_COUNT]
            else:
                part = 0

            dat = (
                key,
                part,
                overall[KPISet.RESP_CODES][key],
            )
            if not len(key):
                style = "stat-nonhttp"
            elif key[0] == '2':
                style = 'stat-2xx'
            elif key[0] == '3':
                style = 'stat-3xx'
            elif key[0] == '4':
                style = 'stat-4xx'
            elif key[0] == '5':
                style = 'stat-5xx'
            else:
                style = "stat-nonhttp"
            self.body.append(
                Text((style, "%s:  %.2f%% (%s)" % dat), align=RIGHT))

        dat = (100, overall[KPISet.SAMPLE_COUNT])
        self.body.append(
            Text(('stat-txt', "All: %.2f%% (%s)" % dat), align=RIGHT))
Beispiel #18
0
    def test_log_messages_percentiles(self):
        obj = FinalStatus()
        obj.engine = EngineEmul
        obj.parameters = BetterDict()
        obj.log = logger_mock()
        obj.parameters.merge({"failed-labels": False, "percentiles": True, "summary": False, "test-duration":False})

        datapoint = DataPoint(None, None)

        cumul_data = KPISet.from_dict(
            {KPISet.AVG_CONN_TIME: 7.890211417203362e-06,
             KPISet.RESP_TIMES: Counter(
                 {0.0: 32160, 0.001: 24919, 0.002: 1049, 0.003: 630, 0.004: 224, 0.005: 125,
                  0.006: 73, 0.007: 46, 0.008: 32, 0.009: 20, 0.011: 8, 0.01: 8, 0.017: 3,
                  0.016: 3, 0.014: 3, 0.013: 3, 0.04: 2, 0.012: 2, 0.079: 1, 0.081: 1,
                  0.019: 1, 0.015: 1}),
             KPISet.ERRORS: [{'msg': 'Forbidden', 'cnt': 7373, 'type': 0,
                              'urls': Counter({'http://192.168.25.8/': 7373}), KPISet.RESP_CODES: '403'}],
             KPISet.STDEV_RESP_TIME: 0.04947974228872108,
             KPISet.AVG_LATENCY: 0.0002825639815220692,
             KPISet.RESP_CODES: Counter({'304': 29656, '403': 29656, '200': 2}),
             KPISet.PERCENTILES: defaultdict(None, {'95.0': 0.001, '0.0': 0.0, '99.9': 0.008, '90.0': 0.001,
                                                    '100.0': 0.081, '99.0': 0.003, '50.0': 0.0}),
             KPISet.SUCCESSES: 29658,
             KPISet.SAMPLE_COUNT: 59314,
             KPISet.CONCURRENCY: 0,
             KPISet.AVG_RESP_TIME: 0.0005440536804127192,
             KPISet.FAILURES: 29656})

        datapoint[DataPoint.CUMULATIVE][""] = cumul_data
        obj.last_sec = datapoint

        obj.post_process()
        target_output = ("Average times: total 0.001, latency 0.000, connect 0.000\n"
                         "Percentile 0.0%: 0.000\n"
                         "Percentile 50.0%: 0.000\n"
                         "Percentile 90.0%: 0.001\n"
                         "Percentile 95.0%: 0.001\n"
                         "Percentile 99.0%: 0.003\n"
                         "Percentile 99.9%: 0.008\n"
                         "Percentile 100.0%: 0.081\n"
                         )
        self.assertEqual(target_output, obj.log.info_buf.getvalue())
Beispiel #19
0
    def add_data(self, data):
        """
        Append data

        :type data: bzt.modules.aggregator.DataPoint
        """
        while len(self.body):
            self.body.pop(0)

        self.body.append(Text(("stat-hdr", " Average Times: "), align=RIGHT))
        overall = data.get(self.key).get('', KPISet())
        self.body.append(
            Text(("stat-txt", "Elapsed: %.3f" % overall[KPISet.AVG_RESP_TIME]),
                 align=RIGHT))
        self.body.append(
            Text(("stat-txt", "Connect: %.3f" % overall[KPISet.AVG_CONN_TIME]),
                 align=RIGHT))
        self.body.append(
            Text(("stat-txt", "Latency: %.3f" % overall[KPISet.AVG_LATENCY]),
                 align=RIGHT))
Beispiel #20
0
    def test_prepare_filename_in_settings(self):
        # test path parameter from config
        obj = JUnitXMLReporter()
        obj.engine = EngineEmul()
        obj.parameters = BetterDict()

        path_from_config = tempfile.mktemp(suffix='.xml', prefix='junit-xml-path-in-settings',
                                           dir=obj.engine.artifacts_dir)

        obj.parameters.merge({"filename": path_from_config, "data-source": "sample-labels"})

        obj.prepare()

        datapoint = DataPoint(0, [])

        cumul_data = KPISet.from_dict({
            KPISet.AVG_CONN_TIME: 7.890211417203362e-06,
            KPISet.RESP_TIMES: Counter({
                0.0: 32160, 0.001: 24919, 0.002: 1049, 0.003: 630, 0.004: 224, 0.005: 125,
                0.006: 73, 0.007: 46, 0.008: 32, 0.009: 20, 0.011: 8, 0.01: 8, 0.017: 3,
                0.016: 3, 0.014: 3, 0.013: 3, 0.04: 2, 0.012: 2, 0.079: 1, 0.081: 1,
                0.019: 1, 0.015: 1
            }),
            KPISet.ERRORS: [{'msg': 'Forbidden', 'cnt': 7373, 'type': 0,
                             'urls': Counter({'http://192.168.25.8/': 7373}), KPISet.RESP_CODES: '403'}],
            KPISet.STDEV_RESP_TIME: 0.04947974228872108,
            KPISet.AVG_LATENCY: 0.0002825639815220692,
            KPISet.RESP_CODES: Counter({'304': 29656, '403': 29656, '200': 2}),
            KPISet.PERCENTILES: {'95.0': 0.001, '0.0': 0.0, '99.9': 0.008, '90.0': 0.001, '100.0': 0.081,
                                 '99.0': 0.003, '50.0': 0.0},
            KPISet.SUCCESSES: 29658,
            KPISet.SAMPLE_COUNT: 59314,
            KPISet.CONCURRENCY: 0,
            KPISet.AVG_RESP_TIME: 0.0005440536804127192,
            KPISet.FAILURES: 29656})

        datapoint[DataPoint.CUMULATIVE][""] = cumul_data
        obj.aggregated_second(datapoint)
        obj.post_process()

        self.assertTrue(os.path.exists(obj.report_file_path))
Beispiel #21
0
    def add_data(self, data):
        """
        New datapoint notification

        :type data: bzt.modules.aggregator.DataPoint
        """
        overall = data[DataPoint.CURRENT].get('', KPISet())
        # self.log.debug("Got data for second: %s", to_json(data))

        active = int(math.floor(overall[KPISet.SAMPLE_COUNT] * overall[
            KPISet.AVG_RESP_TIME]))
        self.graphs.append(overall[KPISet.CONCURRENCY],
                           min(overall[KPISet.CONCURRENCY], active),
                           overall[KPISet.SAMPLE_COUNT],
                           overall[KPISet.FAILURES],
                           overall[KPISet.AVG_RESP_TIME],
                           overall[KPISet.AVG_CONN_TIME],
                           overall[KPISet.AVG_LATENCY], )

        self.latest_stats.add_data(data)
        self.cumulative_stats.add_data(data)
    def test_merging(self):
        dst = DataPoint(0)
        src = DataPoint(0)
        src[DataPoint.CUMULATIVE].setdefault('', KPISet())
        src[DataPoint.CUMULATIVE][''].sum_rt = 0.5

        src[DataPoint.CUMULATIVE][''][KPISet.SAMPLE_COUNT] = 1
        dst.merge_point(src)
        self.assertEquals(0.5, dst[DataPoint.CUMULATIVE][''].sum_rt)
        self.assertEquals(0.5, dst[DataPoint.CUMULATIVE][''][KPISet.AVG_RESP_TIME])

        src[DataPoint.CUMULATIVE][''][KPISet.SAMPLE_COUNT] = 3
        dst.merge_point(src)
        self.assertEquals(4, dst[DataPoint.CUMULATIVE][''][KPISet.SAMPLE_COUNT])
        self.assertEquals(1, dst[DataPoint.CUMULATIVE][''].sum_rt)
        self.assertEquals(0.25, dst[DataPoint.CUMULATIVE][''][KPISet.AVG_RESP_TIME])

        src[DataPoint.CUMULATIVE][''][KPISet.SAMPLE_COUNT] = 6
        dst.merge_point(src)
        self.assertEquals(10, dst[DataPoint.CUMULATIVE][''][KPISet.SAMPLE_COUNT])
        self.assertEquals(1.5, dst[DataPoint.CUMULATIVE][''].sum_rt)
        self.assertEquals(0.15, dst[DataPoint.CUMULATIVE][''][KPISet.AVG_RESP_TIME])
    def test_log_messages_failed_labels(self):
        obj = FinalStatus()
        obj.engine = EngineEmul
        obj.parameters = BetterDict()
        obj.log = logger_mock()
        obj.parameters.merge({"failed-labels": True, "percentiles": False, "summary": False, "test-duration": False})

        datapoint = DataPoint(None, None)
        cumul_data = datapoint[DataPoint.CUMULATIVE]

        cumul_data[""] = KPISet.from_dict(
            {KPISet.AVG_CONN_TIME: 7.890211417203362e-06,
             KPISet.RESP_TIMES: Counter(
                 {0.0: 32160, 0.001: 24919, 0.002: 1049, 0.003: 630, 0.004: 224, 0.005: 125,
                  0.006: 73, 0.007: 46, 0.008: 32, 0.009: 20, 0.011: 8, 0.01: 8, 0.017: 3,
                  0.016: 3, 0.014: 3, 0.013: 3, 0.04: 2, 0.012: 2, 0.079: 1, 0.081: 1,
                  0.019: 1, 0.015: 1}),
             KPISet.ERRORS: [{'msg': 'Forbidden', 'cnt': 7373, 'type': 0,
                              'urls': Counter({'http://192.168.1.1/anotherquery': 7373}), KPISet.RESP_CODES: '403'}],
             KPISet.STDEV_RESP_TIME: 0.04947974228872108,
             KPISet.AVG_LATENCY: 0.0002825639815220692,
             KPISet.RESP_CODES: Counter({'304': 29656, '403': 29656, '200': 2}),
             KPISet.PERCENTILES: defaultdict(None, {'95.0': 0.001, '0.0': 0.0, '99.9': 0.008, '90.0': 0.001,
                                                    '100.0': 0.081, '99.0': 0.003, '50.0': 0.0}),
             KPISet.SUCCESSES: 29658,
             KPISet.SAMPLE_COUNT: 59314,
             KPISet.CONCURRENCY: 0,
             KPISet.AVG_RESP_TIME: 0.0005440536804127192,
             KPISet.FAILURES: 29656})

        cumul_data["http://192.168.1.1/somequery"] = KPISet.from_dict(
            {KPISet.AVG_CONN_TIME: 9.609548856969457e-06,
             KPISet.RESP_TIMES: Counter(
                 {0.0: 17219, 0.001: 11246, 0.002: 543, 0.003: 341,
                  0.004: 121,
                  0.005: 66, 0.006: 36, 0.007: 33, 0.008: 18,
                  0.009: 12, 0.011: 6,
                  0.01: 5, 0.013: 2, 0.017: 2, 0.012: 2, 0.079: 1,
                  0.016: 1,
                  0.014: 1, 0.019: 1, 0.04: 1, 0.081: 1}),
             KPISet.ERRORS: [],
             KPISet.STDEV_RESP_TIME: 0.04073402130687656,
             KPISet.AVG_LATENCY: 1.7196034796682178e-06,
             KPISet.RESP_CODES: Counter({'304': 29656, '200': 2}),
             KPISet.PERCENTILES: defaultdict(None, {'95.0': 0.001, '0.0': 0.0,
                                                    '99.9': 0.009,
                                                    '90.0': 0.001,
                                                    '100.0': 0.081,
                                                    '99.0': 0.004,
                                                    '50.0': 0.0}),
             KPISet.SUCCESSES: 29658,
             KPISet.SAMPLE_COUNT: 29658,
             KPISet.CONCURRENCY: 0,
             KPISet.AVG_RESP_TIME: 0.0005164542450603551, KPISet.FAILURES: 0})

        cumul_data["http://192.168.1.1/anotherquery"] = KPISet.from_dict(
            {KPISet.AVG_CONN_TIME: 6.1707580253574335e-06,
             KPISet.RESP_TIMES: Counter({0.0: 14941, 0.001: 13673, 0.002: 506,
                                         0.003: 289, 0.004: 103,
                                         0.005: 59, 0.006: 37, 0.008: 14,
                                         0.007: 13, 0.009: 8, 0.01: 3,
                                         0.011: 2, 0.016: 2, 0.014: 2,
                                         0.017: 1, 0.013: 1, 0.015: 1,
                                         0.04: 1}),
             KPISet.ERRORS: [
                 {'msg': 'Forbidden', 'cnt': 7373, 'type': 0,
                  'urls': Counter(
                      {'http://192.168.1.1/anotherquery': 7373}),
                  KPISet.RESP_CODES: '403'}],
             KPISet.STDEV_RESP_TIME: 0.032465137860758844,
             KPISet.AVG_LATENCY: 0.0005634272997032645,
             KPISet.RESP_CODES: Counter({'403': 29656}),
             KPISet.PERCENTILES: defaultdict(None, {'95.0': 0.001, '0.0': 0.0,
                                                    '99.9': 0.008, '90.0': 0.001,
                                                    '100.0': 0.04, '99.0': 0.003,
                                                    '50.0': 0.0}),
             KPISet.SUCCESSES: 0,
             KPISet.SAMPLE_COUNT: 29656,
             KPISet.CONCURRENCY: 0,
             KPISet.AVG_RESP_TIME: 0.0005716549770704078,
             KPISet.FAILURES: 29656})

        cumul_data["http://192.168.100.100/somequery"] = KPISet.from_dict(
            {KPISet.AVG_CONN_TIME: 9.609548856969457e-06,
             KPISet.RESP_TIMES: Counter(
                 {0.0: 17219, 0.001: 11246, 0.002: 543,
                  0.003: 341, 0.004: 121,
                  0.005: 66, 0.006: 36, 0.007: 33, 0.008: 18,
                  0.009: 12, 0.011: 6,
                  0.01: 5, 0.013: 2, 0.017: 2, 0.012: 2,
                  0.079: 1, 0.016: 1,
                  0.014: 1, 0.019: 1, 0.04: 1, 0.081: 1}),
             KPISet.ERRORS: [],
             KPISet.STDEV_RESP_TIME: 0.04073402130687656,
             KPISet.AVG_LATENCY: 1.7196034796682178e-06,
             KPISet.RESP_CODES: Counter({'304': 29656, '200': 2}),
             KPISet.PERCENTILES: defaultdict(None, {'95.0': 0.001, '0.0': 0.0,
                                                    '99.9': 0.009, '90.0': 0.001,
                                                    '100.0': 0.081, '99.0': 0.004,
                                                    '50.0': 0.0}),
             KPISet.SUCCESSES: 29658,
             KPISet.SAMPLE_COUNT: 29658,
             KPISet.CONCURRENCY: 0,
             KPISet.AVG_RESP_TIME: 0.0005164542450603551,
             KPISet.FAILURES: 0})

        obj.last_sec = datapoint
        obj.post_process()
        self.assertEqual("29656 failed samples: http://192.168.1.1/anotherquery\n", obj.log.info_buf.getvalue())
Beispiel #24
0
    def test_log_messages_failed_labels(self):
        obj = FinalStatus()
        obj.engine = EngineEmul
        obj.parameters = BetterDict()
        obj.log = logger_mock()
        obj.parameters.merge({
            "failed-labels": True,
            "percentiles": False,
            "summary": False
        })

        datapoint = DataPoint(None, None)
        cumul_data = datapoint[DataPoint.CUMULATIVE]

        cumul_data[""] = KPISet.from_dict({
            KPISet.AVG_CONN_TIME:
            7.890211417203362e-06,
            KPISet.RESP_TIMES:
            Counter({
                0.0: 32160,
                0.001: 24919,
                0.002: 1049,
                0.003: 630,
                0.004: 224,
                0.005: 125,
                0.006: 73,
                0.007: 46,
                0.008: 32,
                0.009: 20,
                0.011: 8,
                0.01: 8,
                0.017: 3,
                0.016: 3,
                0.014: 3,
                0.013: 3,
                0.04: 2,
                0.012: 2,
                0.079: 1,
                0.081: 1,
                0.019: 1,
                0.015: 1
            }),
            KPISet.ERRORS: [{
                'msg':
                'Forbidden',
                'cnt':
                7373,
                'type':
                0,
                'urls':
                Counter({'http://192.168.1.1/anotherquery': 7373}),
                KPISet.RESP_CODES:
                '403'
            }],
            KPISet.STDEV_RESP_TIME:
            0.04947974228872108,
            KPISet.AVG_LATENCY:
            0.0002825639815220692,
            KPISet.RESP_CODES:
            Counter({
                '304': 29656,
                '403': 29656,
                '200': 2
            }),
            KPISet.PERCENTILES:
            defaultdict(
                None, {
                    '95.0': 0.001,
                    '0.0': 0.0,
                    '99.9': 0.008,
                    '90.0': 0.001,
                    '100.0': 0.081,
                    '99.0': 0.003,
                    '50.0': 0.0
                }),
            KPISet.SUCCESSES:
            29658,
            KPISet.SAMPLE_COUNT:
            59314,
            KPISet.CONCURRENCY:
            0,
            KPISet.AVG_RESP_TIME:
            0.0005440536804127192,
            KPISet.FAILURES:
            29656
        })

        cumul_data["http://192.168.1.1/somequery"] = KPISet.from_dict({
            KPISet.AVG_CONN_TIME:
            9.609548856969457e-06,
            KPISet.RESP_TIMES:
            Counter({
                0.0: 17219,
                0.001: 11246,
                0.002: 543,
                0.003: 341,
                0.004: 121,
                0.005: 66,
                0.006: 36,
                0.007: 33,
                0.008: 18,
                0.009: 12,
                0.011: 6,
                0.01: 5,
                0.013: 2,
                0.017: 2,
                0.012: 2,
                0.079: 1,
                0.016: 1,
                0.014: 1,
                0.019: 1,
                0.04: 1,
                0.081: 1
            }),
            KPISet.ERRORS: [],
            KPISet.STDEV_RESP_TIME:
            0.04073402130687656,
            KPISet.AVG_LATENCY:
            1.7196034796682178e-06,
            KPISet.RESP_CODES:
            Counter({
                '304': 29656,
                '200': 2
            }),
            KPISet.PERCENTILES:
            defaultdict(
                None, {
                    '95.0': 0.001,
                    '0.0': 0.0,
                    '99.9': 0.009,
                    '90.0': 0.001,
                    '100.0': 0.081,
                    '99.0': 0.004,
                    '50.0': 0.0
                }),
            KPISet.SUCCESSES:
            29658,
            KPISet.SAMPLE_COUNT:
            29658,
            KPISet.CONCURRENCY:
            0,
            KPISet.AVG_RESP_TIME:
            0.0005164542450603551,
            KPISet.FAILURES:
            0
        })

        cumul_data["http://192.168.1.1/anotherquery"] = KPISet.from_dict({
            KPISet.AVG_CONN_TIME:
            6.1707580253574335e-06,
            KPISet.RESP_TIMES:
            Counter({
                0.0: 14941,
                0.001: 13673,
                0.002: 506,
                0.003: 289,
                0.004: 103,
                0.005: 59,
                0.006: 37,
                0.008: 14,
                0.007: 13,
                0.009: 8,
                0.01: 3,
                0.011: 2,
                0.016: 2,
                0.014: 2,
                0.017: 1,
                0.013: 1,
                0.015: 1,
                0.04: 1
            }),
            KPISet.ERRORS: [{
                'msg':
                'Forbidden',
                'cnt':
                7373,
                'type':
                0,
                'urls':
                Counter({'http://192.168.1.1/anotherquery': 7373}),
                KPISet.RESP_CODES:
                '403'
            }],
            KPISet.STDEV_RESP_TIME:
            0.032465137860758844,
            KPISet.AVG_LATENCY:
            0.0005634272997032645,
            KPISet.RESP_CODES:
            Counter({'403': 29656}),
            KPISet.PERCENTILES:
            defaultdict(
                None, {
                    '95.0': 0.001,
                    '0.0': 0.0,
                    '99.9': 0.008,
                    '90.0': 0.001,
                    '100.0': 0.04,
                    '99.0': 0.003,
                    '50.0': 0.0
                }),
            KPISet.SUCCESSES:
            0,
            KPISet.SAMPLE_COUNT:
            29656,
            KPISet.CONCURRENCY:
            0,
            KPISet.AVG_RESP_TIME:
            0.0005716549770704078,
            KPISet.FAILURES:
            29656
        })

        cumul_data["http://192.168.100.100/somequery"] = KPISet.from_dict({
            KPISet.AVG_CONN_TIME:
            9.609548856969457e-06,
            KPISet.RESP_TIMES:
            Counter({
                0.0: 17219,
                0.001: 11246,
                0.002: 543,
                0.003: 341,
                0.004: 121,
                0.005: 66,
                0.006: 36,
                0.007: 33,
                0.008: 18,
                0.009: 12,
                0.011: 6,
                0.01: 5,
                0.013: 2,
                0.017: 2,
                0.012: 2,
                0.079: 1,
                0.016: 1,
                0.014: 1,
                0.019: 1,
                0.04: 1,
                0.081: 1
            }),
            KPISet.ERRORS: [],
            KPISet.STDEV_RESP_TIME:
            0.04073402130687656,
            KPISet.AVG_LATENCY:
            1.7196034796682178e-06,
            KPISet.RESP_CODES:
            Counter({
                '304': 29656,
                '200': 2
            }),
            KPISet.PERCENTILES:
            defaultdict(
                None, {
                    '95.0': 0.001,
                    '0.0': 0.0,
                    '99.9': 0.009,
                    '90.0': 0.001,
                    '100.0': 0.081,
                    '99.0': 0.004,
                    '50.0': 0.0
                }),
            KPISet.SUCCESSES:
            29658,
            KPISet.SAMPLE_COUNT:
            29658,
            KPISet.CONCURRENCY:
            0,
            KPISet.AVG_RESP_TIME:
            0.0005164542450603551,
            KPISet.FAILURES:
            0
        })

        obj.last_sec = datapoint
        obj.post_process()
        self.assertEqual(
            "29656 failed samples: http://192.168.1.1/anotherquery\n",
            obj.log.info_buf.getvalue())
 def __get_datapoint(self):
     datapoint = DataPoint(None, None)
     cumul_data = datapoint[DataPoint.CUMULATIVE]
     cumul_data[""] = KPISet.from_dict({
         KPISet.AVG_CONN_TIME:
         7.890211417203362e-06,
         KPISet.RESP_TIMES:
         Counter({
             0.0: 32160,
             0.001: 24919,
             0.002: 1049,
             0.003: 630,
             0.004: 224,
             0.005: 125,
             0.006: 73,
             0.007: 46,
             0.008: 32,
             0.009: 20,
             0.011: 8,
             0.01: 8,
             0.017: 3,
             0.016: 3,
             0.014: 3,
             0.013: 3,
             0.04: 2,
             0.012: 2,
             0.079: 1,
             0.081: 1,
             0.019: 1,
             0.015: 1
         }),
         KPISet.ERRORS: [{
             'msg':
             'Forbidden',
             'cnt':
             7373,
             'type':
             0,
             'urls':
             Counter({'http://192.168.1.1/anotherquery': 7373}),
             KPISet.RESP_CODES:
             '403'
         }],
         KPISet.STDEV_RESP_TIME:
         0.04947974228872108,
         KPISet.AVG_LATENCY:
         0.0002825639815220692,
         KPISet.RESP_CODES:
         Counter({
             '304': 29656,
             '403': 29656,
             '200': 2
         }),
         KPISet.PERCENTILES: {
             '95.0': 0.001,
             '0.0': 0.0,
             '99.9': 0.008,
             '90.0': 0.001,
             '100.0': 0.081,
             '99.0': 0.003,
             '50.0': 0.0
         },
         KPISet.SUCCESSES:
         29658,
         KPISet.SAMPLE_COUNT:
         59314,
         KPISet.CONCURRENCY:
         0,
         KPISet.AVG_RESP_TIME:
         0.0005440536804127192,
         KPISet.FAILURES:
         29656
     })
     cumul_data["http://192.168.1.1/somequery"] = KPISet.from_dict({
         KPISet.AVG_CONN_TIME:
         9.609548856969457e-06,
         KPISet.RESP_TIMES:
         Counter({
             0.0: 17219,
             0.001: 11246,
             0.002: 543,
             0.003: 341,
             0.004: 121,
             0.005: 66,
             0.006: 36,
             0.007: 33,
             0.008: 18,
             0.009: 12,
             0.011: 6,
             0.01: 5,
             0.013: 2,
             0.017: 2,
             0.012: 2,
             0.079: 1,
             0.016: 1,
             0.014: 1,
             0.019: 1,
             0.04: 1,
             0.081: 1
         }),
         KPISet.ERRORS: [],
         KPISet.STDEV_RESP_TIME:
         0.04073402130687656,
         KPISet.AVG_LATENCY:
         1.7196034796682178e-06,
         KPISet.RESP_CODES:
         Counter({
             '304': 29656,
             '200': 2
         }),
         KPISet.PERCENTILES: {
             '95.0': 0.001,
             '0.0': 0.0,
             '99.9': 0.009,
             '90.0': 0.001,
             '100.0': 0.081,
             '99.0': 0.004,
             '50.0': 0.0
         },
         KPISet.SUCCESSES:
         29658,
         KPISet.SAMPLE_COUNT:
         29658,
         KPISet.CONCURRENCY:
         0,
         KPISet.AVG_RESP_TIME:
         0.0005164542450603551,
         KPISet.FAILURES:
         0
     })
     cumul_data["http://192.168.1.1/anotherquery"] = KPISet.from_dict({
         KPISet.AVG_CONN_TIME:
         6.1707580253574335e-06,
         KPISet.RESP_TIMES:
         Counter({
             0.0: 14941,
             0.001: 13673,
             0.002: 506,
             0.003: 289,
             0.004: 103,
             0.005: 59,
             0.006: 37,
             0.008: 14,
             0.007: 13,
             0.009: 8,
             0.01: 3,
             0.011: 2,
             0.016: 2,
             0.014: 2,
             0.017: 1,
             0.013: 1,
             0.015: 1,
             0.04: 1
         }),
         KPISet.ERRORS: [{
             'msg':
             'Forbidden',
             'cnt':
             7373,
             'type':
             0,
             'urls':
             Counter({'http://192.168.1.1/anotherquery': 7373}),
             KPISet.RESP_CODES:
             '403'
         }],
         KPISet.STDEV_RESP_TIME:
         0.032465137860758844,
         KPISet.AVG_LATENCY:
         0.0005634272997032645,
         KPISet.RESP_CODES:
         Counter({'403': 29656}),
         KPISet.PERCENTILES: {
             '95.0': 0.001,
             '0.0': 0.0,
             '99.9': 0.008,
             '90.0': 0.001,
             '100.0': 0.04,
             '99.0': 0.003,
             '50.0': 0.0
         },
         KPISet.SUCCESSES:
         0,
         KPISet.SAMPLE_COUNT:
         29656,
         KPISet.CONCURRENCY:
         0,
         KPISet.AVG_RESP_TIME:
         0.0005716549770704078,
         KPISet.FAILURES:
         29656
     })
     cumul_data["http://192.168.100.100/somequery"] = KPISet.from_dict({
         KPISet.AVG_CONN_TIME:
         9.609548856969457e-06,
         KPISet.RESP_TIMES:
         Counter({
             0.0: 17219,
             0.001: 11246,
             0.002: 543,
             0.003: 341,
             0.004: 121,
             0.005: 66,
             0.006: 36,
             0.007: 33,
             0.008: 18,
             0.009: 12,
             0.011: 6,
             0.01: 5,
             0.013: 2,
             0.017: 2,
             0.012: 2,
             0.079: 1,
             0.016: 1,
             0.014: 1,
             0.019: 1,
             0.04: 1,
             0.081: 1
         }),
         KPISet.ERRORS: [],
         KPISet.STDEV_RESP_TIME:
         0.04073402130687656,
         KPISet.AVG_LATENCY:
         1.7196034796682178e-06,
         KPISet.RESP_CODES:
         Counter({
             '304': 29656,
             '200': 2
         }),
         KPISet.PERCENTILES: {
             '95.0': 0.001,
             '0.0': 0.0,
             '99.9': 0.009,
             '90.0': 0.001,
             '100.0': 0.081,
             '99.0': 0.004,
             '50.0': 0.0
         },
         KPISet.SUCCESSES:
         29658,
         KPISet.SAMPLE_COUNT:
         29658,
         KPISet.CONCURRENCY:
         0,
         KPISet.AVG_RESP_TIME:
         0.0005164542450603551,
         KPISet.FAILURES:
         0
     })
     return datapoint
Beispiel #26
0
    def test_xml_format_sample_labels(self):
        # generate xml, compare hash

        obj = JUnitXMLReporter()
        obj.engine = EngineEmul()
        rep = BlazeMeterUploader()
        rep.results_url = "http://report/123"
        obj.engine.reporters.append(rep)
        obj.parameters = BetterDict()

        path_from_config = tempfile.mktemp(suffix='.xml',
                                           prefix='junit-xml-sample-labels',
                                           dir=obj.engine.artifacts_dir)

        # data-source: finalstats by default
        obj.parameters.merge({"filename": path_from_config})

        obj.prepare()

        datapoint = DataPoint(0, [])
        cumul_data = datapoint[DataPoint.CUMULATIVE]

        cumul_data[""] = KPISet.from_dict({
            KPISet.AVG_CONN_TIME:
            7.890211417203362e-06,
            KPISet.RESP_TIMES:
            Counter({
                0.0: 32160,
                0.001: 24919,
                0.002: 1049,
                0.003: 630,
                0.004: 224,
                0.005: 125,
                0.006: 73,
                0.007: 46,
                0.008: 32,
                0.009: 20,
                0.011: 8,
                0.01: 8,
                0.017: 3,
                0.016: 3,
                0.014: 3,
                0.013: 3,
                0.04: 2,
                0.012: 2,
                0.079: 1,
                0.081: 1,
                0.019: 1,
                0.015: 1
            }),
            KPISet.ERRORS: [
                {
                    'msg': 'Forbidden',
                    'cnt': 7300,
                    'type': 0,
                    'urls': Counter({'http://192.168.1.1/anotherquery': 7300}),
                    KPISet.RESP_CODES: '403'
                },
                {
                    'msg': 'Assertion failed: text /smth/ not found',
                    'cnt': 73,
                    'type': 1,
                    'urls': Counter({'http://192.168.1.1/anotherquery': 73}),
                    KPISet.RESP_CODES: '200'
                },
            ],
            KPISet.STDEV_RESP_TIME:
            0.04947974228872108,
            KPISet.AVG_LATENCY:
            0.0002825639815220692,
            KPISet.RESP_CODES:
            Counter({
                '304': 29656,
                '403': 29656,
                '200': 2
            }),
            KPISet.PERCENTILES: {
                '95.0': 0.001,
                '0.0': 0.0,
                '99.9': 0.008,
                '90.0': 0.001,
                '100.0': 0.081,
                '99.0': 0.003,
                '50.0': 0.0
            },
            KPISet.SUCCESSES:
            29658,
            KPISet.SAMPLE_COUNT:
            59314,
            KPISet.CONCURRENCY:
            0,
            KPISet.AVG_RESP_TIME:
            0.0005440536804127192,
            KPISet.FAILURES:
            29656
        })

        cumul_data["http://192.168.1.1/somequery"] = KPISet.from_dict({
            KPISet.AVG_CONN_TIME:
            9.609548856969457e-06,
            KPISet.RESP_TIMES:
            Counter({
                0.0: 17219,
                0.001: 11246,
                0.002: 543,
                0.003: 341,
                0.004: 121,
                0.005: 66,
                0.006: 36,
                0.007: 33,
                0.008: 18,
                0.009: 12,
                0.011: 6,
                0.01: 5,
                0.013: 2,
                0.017: 2,
                0.012: 2,
                0.079: 1,
                0.016: 1,
                0.014: 1,
                0.019: 1,
                0.04: 1,
                0.081: 1
            }),
            KPISet.ERRORS: [],
            KPISet.STDEV_RESP_TIME:
            0.04073402130687656,
            KPISet.AVG_LATENCY:
            1.7196034796682178e-06,
            KPISet.RESP_CODES:
            Counter({
                '304': 29656,
                '200': 2
            }),
            KPISet.PERCENTILES: {
                '95.0': 0.001,
                '0.0': 0.0,
                '99.9': 0.009,
                '90.0': 0.001,
                '100.0': 0.081,
                '99.0': 0.004,
                '50.0': 0.0
            },
            KPISet.SUCCESSES:
            29658,
            KPISet.SAMPLE_COUNT:
            29658,
            KPISet.CONCURRENCY:
            0,
            KPISet.AVG_RESP_TIME:
            0.0005164542450603551,
            KPISet.FAILURES:
            0
        })

        cumul_data["http://192.168.1.1/anotherquery"] = KPISet.from_dict({
            KPISet.AVG_CONN_TIME:
            6.1707580253574335e-06,
            KPISet.RESP_TIMES:
            Counter({
                0.0: 14941,
                0.001: 13673,
                0.002: 506,
                0.003: 289,
                0.004: 103,
                0.005: 59,
                0.006: 37,
                0.008: 14,
                0.007: 13,
                0.009: 8,
                0.01: 3,
                0.011: 2,
                0.016: 2,
                0.014: 2,
                0.017: 1,
                0.013: 1,
                0.015: 1,
                0.04: 1
            }),
            KPISet.ERRORS: [
                {
                    'msg': 'Forbidden',
                    'cnt': 7300,
                    'type': 0,
                    'urls': Counter({'http://192.168.1.1/anotherquery': 7300}),
                    KPISet.RESP_CODES: '403'
                },
                {
                    'msg': 'Assertion failed: text /smth/ not found',
                    'cnt': 73,
                    'type': 1,
                    'urls': Counter({'http://192.168.1.1/anotherquery': 73}),
                    KPISet.RESP_CODES: '200'
                },
            ],
            KPISet.STDEV_RESP_TIME:
            0.032465137860758844,
            KPISet.AVG_LATENCY:
            0.0005634272997032645,
            KPISet.RESP_CODES:
            Counter({'403': 29656}),
            KPISet.PERCENTILES: {
                '95.0': 0.001,
                '0.0': 0.0,
                '99.9': 0.008,
                '90.0': 0.001,
                '100.0': 0.04,
                '99.0': 0.003,
                '50.0': 0.0
            },
            KPISet.SUCCESSES:
            0,
            KPISet.SAMPLE_COUNT:
            29656,
            KPISet.CONCURRENCY:
            0,
            KPISet.AVG_RESP_TIME:
            0.0005716549770704078,
            KPISet.FAILURES:
            29656
        })

        cumul_data["http://192.168.100.100/somequery"] = KPISet.from_dict({
            KPISet.AVG_CONN_TIME:
            9.609548856969457e-06,
            KPISet.RESP_TIMES:
            Counter({
                0.0: 17219,
                0.001: 11246,
                0.002: 543,
                0.003: 341,
                0.004: 121,
                0.005: 66,
                0.006: 36,
                0.007: 33,
                0.008: 18,
                0.009: 12,
                0.011: 6,
                0.01: 5,
                0.013: 2,
                0.017: 2,
                0.012: 2,
                0.079: 1,
                0.016: 1,
                0.014: 1,
                0.019: 1,
                0.04: 1,
                0.081: 1
            }),
            KPISet.ERRORS: [],
            KPISet.STDEV_RESP_TIME:
            0.04073402130687656,
            KPISet.AVG_LATENCY:
            1.7196034796682178e-06,
            KPISet.RESP_CODES:
            Counter({
                '304': 29656,
                '200': 2
            }),
            KPISet.PERCENTILES: {
                '95.0': 0.001,
                '0.0': 0.0,
                '99.9': 0.009,
                '90.0': 0.001,
                '100.0': 0.081,
                '99.0': 0.004,
                '50.0': 0.0
            },
            KPISet.SUCCESSES:
            29658,
            KPISet.SAMPLE_COUNT:
            29658,
            KPISet.CONCURRENCY:
            0,
            KPISet.AVG_RESP_TIME:
            0.0005164542450603551,
            KPISet.FAILURES:
            0
        })

        obj.aggregated_second(datapoint)

        obj.post_process()

        with open(obj.report_file_path, 'rb') as fds:
            f_contents = fds.read()

        logging.info("File: %s", f_contents)
        xml_tree = etree.fromstring(f_contents)
        self.assertEqual('testsuites', xml_tree.tag)
        suite = xml_tree.getchildren()[0]
        self.assertEqual('testsuite', suite.tag)
        self.assertListEqual(['sample_labels', "bzt"], suite.values())
        test_cases = suite.getchildren()
        self.assertEqual(3, len(test_cases))
        self.assertEqual('testcase', test_cases[0].tag)
        self.assertEqual('error', test_cases[0].getchildren()[1].tag)
        self.assertEqual('failure', test_cases[0].getchildren()[2].tag)
        self.assertEqual('system-out', test_cases[0].getchildren()[0].tag)
        self.assertIn('BlazeMeter report link: http://report/123',
                      test_cases[0].getchildren()[0].text)
 def __get_datapoint(self):
     datapoint = DataPoint(None, None)
     cumul_data = datapoint[DataPoint.CUMULATIVE]
     cumul_data[""] = KPISet.from_dict(
         {KPISet.AVG_CONN_TIME: 7.890211417203362e-06,
          KPISet.RESP_TIMES: Counter(
              {0.0: 32160, 0.001: 24919, 0.002: 1049, 0.003: 630, 0.004: 224, 0.005: 125,
               0.006: 73, 0.007: 46, 0.008: 32, 0.009: 20, 0.011: 8, 0.01: 8, 0.017: 3,
               0.016: 3, 0.014: 3, 0.013: 3, 0.04: 2, 0.012: 2, 0.079: 1, 0.081: 1,
               0.019: 1, 0.015: 1}),
          KPISet.ERRORS: [{'msg': 'Forbidden', 'cnt': 7373, 'type': 0,
                           'urls': Counter({'http://192.168.1.1/anotherquery': 7373}), KPISet.RESP_CODES: '403'}],
          KPISet.STDEV_RESP_TIME: 0.04947974228872108,
          KPISet.AVG_LATENCY: 0.0002825639815220692,
          KPISet.RESP_CODES: Counter({'304': 29656, '403': 29656, '200': 2}),
          KPISet.PERCENTILES: {'95.0': 0.001, '0.0': 0.0, '99.9': 0.008, '90.0': 0.001,
                               '100.0': 0.081, '99.0': 0.003, '50.0': 0.0},
          KPISet.SUCCESSES: 29658,
          KPISet.SAMPLE_COUNT: 59314,
          KPISet.CONCURRENCY: 0,
          KPISet.AVG_RESP_TIME: 0.0005440536804127192,
          KPISet.FAILURES: 29656})
     cumul_data["http://192.168.1.1/somequery"] = KPISet.from_dict(
         {KPISet.AVG_CONN_TIME: 9.609548856969457e-06,
          KPISet.RESP_TIMES: Counter(
              {0.0: 17219, 0.001: 11246, 0.002: 543, 0.003: 341,
               0.004: 121,
               0.005: 66, 0.006: 36, 0.007: 33, 0.008: 18,
               0.009: 12, 0.011: 6,
               0.01: 5, 0.013: 2, 0.017: 2, 0.012: 2, 0.079: 1,
               0.016: 1,
               0.014: 1, 0.019: 1, 0.04: 1, 0.081: 1}),
          KPISet.ERRORS: [],
          KPISet.STDEV_RESP_TIME: 0.04073402130687656,
          KPISet.AVG_LATENCY: 1.7196034796682178e-06,
          KPISet.RESP_CODES: Counter({'304': 29656, '200': 2}),
          KPISet.PERCENTILES: {'95.0': 0.001, '0.0': 0.0,
                               '99.9': 0.009,
                               '90.0': 0.001,
                               '100.0': 0.081,
                               '99.0': 0.004,
                               '50.0': 0.0},
          KPISet.SUCCESSES: 29658,
          KPISet.SAMPLE_COUNT: 29658,
          KPISet.CONCURRENCY: 0,
          KPISet.AVG_RESP_TIME: 0.0005164542450603551, KPISet.FAILURES: 0})
     cumul_data["http://192.168.1.1/anotherquery"] = KPISet.from_dict(
         {KPISet.AVG_CONN_TIME: 6.1707580253574335e-06,
          KPISet.RESP_TIMES: Counter({0.0: 14941, 0.001: 13673, 0.002: 506,
                                      0.003: 289, 0.004: 103,
                                      0.005: 59, 0.006: 37, 0.008: 14,
                                      0.007: 13, 0.009: 8, 0.01: 3,
                                      0.011: 2, 0.016: 2, 0.014: 2,
                                      0.017: 1, 0.013: 1, 0.015: 1,
                                      0.04: 1}),
          KPISet.ERRORS: [
              {'msg': 'Forbidden', 'cnt': 7373, 'type': 0,
               'urls': Counter(
                   {'http://192.168.1.1/anotherquery': 7373}),
               KPISet.RESP_CODES: '403'}],
          KPISet.STDEV_RESP_TIME: 0.032465137860758844,
          KPISet.AVG_LATENCY: 0.0005634272997032645,
          KPISet.RESP_CODES: Counter({'403': 29656}),
          KPISet.PERCENTILES: {'95.0': 0.001, '0.0': 0.0,
                               '99.9': 0.008, '90.0': 0.001,
                               '100.0': 0.04, '99.0': 0.003,
                               '50.0': 0.0},
          KPISet.SUCCESSES: 0,
          KPISet.SAMPLE_COUNT: 29656,
          KPISet.CONCURRENCY: 0,
          KPISet.AVG_RESP_TIME: 0.0005716549770704078,
          KPISet.FAILURES: 29656})
     cumul_data["http://192.168.100.100/somequery"] = KPISet.from_dict(
         {KPISet.AVG_CONN_TIME: 9.609548856969457e-06,
          KPISet.RESP_TIMES: Counter(
              {0.0: 17219, 0.001: 11246, 0.002: 543,
               0.003: 341, 0.004: 121,
               0.005: 66, 0.006: 36, 0.007: 33, 0.008: 18,
               0.009: 12, 0.011: 6,
               0.01: 5, 0.013: 2, 0.017: 2, 0.012: 2,
               0.079: 1, 0.016: 1,
               0.014: 1, 0.019: 1, 0.04: 1, 0.081: 1}),
          KPISet.ERRORS: [],
          KPISet.STDEV_RESP_TIME: 0.04073402130687656,
          KPISet.AVG_LATENCY: 1.7196034796682178e-06,
          KPISet.RESP_CODES: Counter({'304': 29656, '200': 2}),
          KPISet.PERCENTILES: {'95.0': 0.001, '0.0': 0.0,
                               '99.9': 0.009, '90.0': 0.001,
                               '100.0': 0.081, '99.0': 0.004,
                               '50.0': 0.0},
          KPISet.SUCCESSES: 29658,
          KPISet.SAMPLE_COUNT: 29658,
          KPISet.CONCURRENCY: 0,
          KPISet.AVG_RESP_TIME: 0.0005164542450603551,
          KPISet.FAILURES: 0})
     return datapoint
    def test_xml_format_sample_labels(self):
        # generate xml, compare hash

        obj = JUnitXMLReporter()
        obj.engine = EngineEmul()
        obj.parameters = BetterDict()

        path_from_config = tempfile.mktemp(suffix='.xml', prefix='junit-xml-sample-labels',
                                           dir=obj.engine.artifacts_dir)

        # data-source: finalstats by default
        obj.parameters.merge({"filename": path_from_config})

        obj.prepare()

        datapoint = DataPoint(None, None)
        cumul_data = datapoint[DataPoint.CUMULATIVE]

        cumul_data[""] = KPISet.from_dict(
            {KPISet.AVG_CONN_TIME: 7.890211417203362e-06,
             KPISet.RESP_TIMES: Counter(
                 {0.0: 32160, 0.001: 24919, 0.002: 1049, 0.003: 630, 0.004: 224, 0.005: 125,
                  0.006: 73, 0.007: 46, 0.008: 32, 0.009: 20, 0.011: 8, 0.01: 8, 0.017: 3,
                  0.016: 3, 0.014: 3, 0.013: 3, 0.04: 2, 0.012: 2, 0.079: 1, 0.081: 1,
                  0.019: 1, 0.015: 1}),
             KPISet.ERRORS: [{'msg': 'Forbidden', 'cnt': 7373, 'type': 0,
                              'urls': Counter({'http://192.168.1.1/anotherquery': 7373}), KPISet.RESP_CODES: '403'}],
             KPISet.STDEV_RESP_TIME: 0.04947974228872108,
             KPISet.AVG_LATENCY: 0.0002825639815220692,
             KPISet.RESP_CODES: Counter({'304': 29656, '403': 29656, '200': 2}),
             KPISet.PERCENTILES: defaultdict(None, {'95.0': 0.001, '0.0': 0.0, '99.9': 0.008, '90.0': 0.001,
                                                    '100.0': 0.081, '99.0': 0.003, '50.0': 0.0}),
             KPISet.SUCCESSES: 29658,
             KPISet.SAMPLE_COUNT: 59314,
             KPISet.CONCURRENCY: 0,
             KPISet.AVG_RESP_TIME: 0.0005440536804127192,
             KPISet.FAILURES: 29656})

        cumul_data["http://192.168.1.1/somequery"] = KPISet.from_dict(
            {KPISet.AVG_CONN_TIME: 9.609548856969457e-06,
             KPISet.RESP_TIMES: Counter(
                 {0.0: 17219, 0.001: 11246, 0.002: 543, 0.003: 341,
                  0.004: 121,
                  0.005: 66, 0.006: 36, 0.007: 33, 0.008: 18,
                  0.009: 12, 0.011: 6,
                  0.01: 5, 0.013: 2, 0.017: 2, 0.012: 2, 0.079: 1,
                  0.016: 1,
                  0.014: 1, 0.019: 1, 0.04: 1, 0.081: 1}),
             KPISet.ERRORS: [],
             KPISet.STDEV_RESP_TIME: 0.04073402130687656,
             KPISet.AVG_LATENCY: 1.7196034796682178e-06,
             KPISet.RESP_CODES: Counter({'304': 29656, '200': 2}),
             KPISet.PERCENTILES: defaultdict(None, {'95.0': 0.001, '0.0': 0.0,
                                                    '99.9': 0.009,
                                                    '90.0': 0.001,
                                                    '100.0': 0.081,
                                                    '99.0': 0.004,
                                                    '50.0': 0.0}),
             KPISet.SUCCESSES: 29658,
             KPISet.SAMPLE_COUNT: 29658,
             KPISet.CONCURRENCY: 0,
             KPISet.AVG_RESP_TIME: 0.0005164542450603551, KPISet.FAILURES: 0})

        cumul_data["http://192.168.1.1/anotherquery"] = KPISet.from_dict(
            {KPISet.AVG_CONN_TIME: 6.1707580253574335e-06,
             KPISet.RESP_TIMES: Counter({0.0: 14941, 0.001: 13673, 0.002: 506,
                                         0.003: 289, 0.004: 103,
                                         0.005: 59, 0.006: 37, 0.008: 14,
                                         0.007: 13, 0.009: 8, 0.01: 3,
                                         0.011: 2, 0.016: 2, 0.014: 2,
                                         0.017: 1, 0.013: 1, 0.015: 1,
                                         0.04: 1}),
             KPISet.ERRORS: [
                 {'msg': 'Forbidden', 'cnt': 7373, 'type': 0,
                  'urls': Counter(
                      {'http://192.168.1.1/anotherquery': 7373}),
                  KPISet.RESP_CODES: '403'}],
             KPISet.STDEV_RESP_TIME: 0.032465137860758844,
             KPISet.AVG_LATENCY: 0.0005634272997032645,
             KPISet.RESP_CODES: Counter({'403': 29656}),
             KPISet.PERCENTILES: defaultdict(None, {'95.0': 0.001, '0.0': 0.0,
                                                    '99.9': 0.008, '90.0': 0.001,
                                                    '100.0': 0.04, '99.0': 0.003,
                                                    '50.0': 0.0}),
             KPISet.SUCCESSES: 0,
             KPISet.SAMPLE_COUNT: 29656,
             KPISet.CONCURRENCY: 0,
             KPISet.AVG_RESP_TIME: 0.0005716549770704078,
             KPISet.FAILURES: 29656})

        cumul_data["http://192.168.100.100/somequery"] = KPISet.from_dict(
            {KPISet.AVG_CONN_TIME: 9.609548856969457e-06,
             KPISet.RESP_TIMES: Counter(
                 {0.0: 17219, 0.001: 11246, 0.002: 543,
                  0.003: 341, 0.004: 121,
                  0.005: 66, 0.006: 36, 0.007: 33, 0.008: 18,
                  0.009: 12, 0.011: 6,
                  0.01: 5, 0.013: 2, 0.017: 2, 0.012: 2,
                  0.079: 1, 0.016: 1,
                  0.014: 1, 0.019: 1, 0.04: 1, 0.081: 1}),
             KPISet.ERRORS: [],
             KPISet.STDEV_RESP_TIME: 0.04073402130687656,
             KPISet.AVG_LATENCY: 1.7196034796682178e-06,
             KPISet.RESP_CODES: Counter({'304': 29656, '200': 2}),
             KPISet.PERCENTILES: defaultdict(None, {'95.0': 0.001, '0.0': 0.0,
                                                    '99.9': 0.009, '90.0': 0.001,
                                                    '100.0': 0.081, '99.0': 0.004,
                                                    '50.0': 0.0}),
             KPISet.SUCCESSES: 29658,
             KPISet.SAMPLE_COUNT: 29658,
             KPISet.CONCURRENCY: 0,
             KPISet.AVG_RESP_TIME: 0.0005164542450603551,
             KPISet.FAILURES: 0})

        obj.aggregated_second(datapoint)

        obj.post_process()

        with open(obj.report_file_path, 'rb') as fds:
            f_contents = fds.read()

        xml_tree = etree.fromstring(f_contents)
        self.assertEqual('testsuite', xml_tree.tag)
        self.assertEqual(4, len(xml_tree.getchildren()))
        self.assertEqual('testcase', xml_tree.getchildren()[0].tag)
        self.assertEqual('error', xml_tree.getchildren()[0].getchildren()[0].tag)
        self.assertEqual('error', xml_tree.getchildren()[1].getchildren()[0].tag)
        self.assertListEqual(['29656', 'taurus_sample-labels', '0', '59314'], xml_tree.values())