def test_set_rtimes_len(self):
     obj = ConsolidatingAggregator()
     obj.settings['rtimes-len'] = 42
     obj.prepare()
     reader = self.get_fail_reader()
     obj.add_underling(reader)
     listener = MockListener()
     obj.add_listener(listener)
     obj.check()
     for dp in listener.results:
         for kpiset in dp['cumulative'].values():
             self.assertEqual(42, kpiset.rtimes_len)
         for kpiset in dp['current'].values():
             self.assertNotEqual(42, kpiset.rtimes_len)
 def test_datapoint_to_json(self):
     obj = ConsolidatingAggregator()
     obj.track_percentiles = [0.0, 50.0, 95.0, 99.0, 100.0]
     obj.prepare()
     obj.add_underling(self.get_success_reader())
     for point in obj.datapoints():
         obj.log.info(to_json(point))
 def test_errors_cumulative(self):
     aggregator = ConsolidatingAggregator()
     aggregator.track_percentiles = [50]
     aggregator.prepare()
     reader = self.get_fail_reader()
     aggregator.add_underling(reader)
     aggregator.shutdown()
     aggregator.post_process()
     cum_dict = aggregator.underlings[0].cumulative
     first_err_ids = [id(err) for err in cum_dict['first']['errors']]
     second_err_ids = [id(err) for err in cum_dict['second']['errors']]
     total_err_ids = [id(err) for err in cum_dict['']['errors']]
     all_ids = first_err_ids + second_err_ids + total_err_ids
     self.assertEqual(len(all_ids), len(set(all_ids)))
     for label in cum_dict:
         data = cum_dict[label]
         total_errors_count = sum(err['cnt'] for err in data['errors'])
         self.assertEqual(data['fail'], total_errors_count)
    def test_two_executions(self):
        # check consolidator
        obj = ConsolidatingAggregator()
        obj.prepare()
        obj.track_percentiles = [0, 50, 100]
        underling1 = self.get_reader()
        underling2 = self.get_reader()
        obj.add_underling(underling1)
        obj.add_underling(underling2)

        cnt = 0
        for _ in range(1, 10):
            for point in obj.datapoints():
                overall = point[DataPoint.CURRENT]['']
                self.assertEquals(2, overall[KPISet.CONCURRENCY])
                self.assertGreater(overall[KPISet.PERCENTILES]["100.0"], 0)
                self.assertGreater(overall[KPISet.AVG_RESP_TIME], 0)
                cnt += 1

        self.assertEquals(2, cnt)
class TestConsolidatingAggregator(BZTestCase):
    def setUp(self):
        super(TestConsolidatingAggregator, self).setUp()
        self.obj = ConsolidatingAggregator()

    def test_two_executions(self):
        self.obj.track_percentiles = [0, 50, 100]
        self.obj.prepare()
        underling1 = get_success_reader()
        underling2 = get_success_reader()
        self.obj.add_underling(underling1)
        self.obj.add_underling(underling2)

        cnt = 0
        for _ in range(1, 10):
            for point in self.obj.datapoints():
                self.assertEqual(2, len(point[DataPoint.SUBRESULTS]))
                overall = point[DataPoint.CURRENT]['']
                self.assertEquals(2, overall[KPISet.CONCURRENCY])
                self.assertGreater(overall[KPISet.PERCENTILES]["100.0"], 0)
                self.assertGreater(overall[KPISet.AVG_RESP_TIME], 0)
                cnt += 1

        self.assertEquals(2, cnt)

    def test_errors_cumulative(self):
        self.obj.track_percentiles = [50]
        self.obj.prepare()
        reader = get_fail_reader()
        self.obj.add_underling(reader)
        self.obj.shutdown()
        self.obj.post_process()
        cum_dict = self.obj.underlings[0].cumulative
        first_err_ids = [id(err) for err in cum_dict['first']['errors']]
        second_err_ids = [id(err) for err in cum_dict['second']['errors']]
        total_err_ids = [id(err) for err in cum_dict['']['errors']]
        all_ids = first_err_ids + second_err_ids + total_err_ids
        self.assertEqual(len(all_ids), len(set(all_ids)))
        for label in cum_dict:
            data = cum_dict[label]
            total_errors_count = sum(err['cnt'] for err in data['errors'])
            self.assertEqual(data['fail'], total_errors_count)

    def test_labels_variety(self):
        self.obj.track_percentiles = [50]
        self.obj.prepare()
        reader1 = get_success_reader()
        reader2 = get_success_reader_alot()
        self.obj.log.info(len(reader1.data) + len(reader2.data))
        self.obj.generalize_labels = 25
        self.obj.add_underling(reader1)
        self.obj.add_underling(reader2)
        self.obj.shutdown()
        self.obj.post_process()
        cum_dict = self.obj.cumulative
        len_limit = (
            self.obj.generalize_labels + 1
        )  # due to randomness, it it can go a bit higher than limit
        labels = list(cum_dict.keys())
        self.assertGreaterEqual(len(labels), self.obj.generalize_labels /
                                2)  # assert that it's at least half full
        self.assertLessEqual(len(labels), len_limit +
                             1)  # allow +1 label because '' is cumulative

    def test_labels_constant_part(self):
        self.obj.track_percentiles = [50]
        self.obj.prepare()
        reader = get_success_reader_alot(prefix='http://blazedemo.com/?r=')
        self.obj.log.info(len(reader.data))
        self.obj.generalize_labels = 25
        self.obj.add_underling(reader)
        self.obj.shutdown()
        self.obj.post_process()
        cum_dict = self.obj.cumulative
        labels = list(cum_dict.keys())
        self.assertGreaterEqual(len(labels), self.obj.generalize_labels /
                                2)  # assert that it's at least half full
        self.assertLessEqual(len(labels), self.obj.generalize_labels +
                             1)  # allow +1 label because '' is cumulative

    def test_labels_aggressive_folding(self):
        self.obj.track_percentiles = [50]
        self.obj.prepare()
        reader = get_success_reader_selected_labels()
        self.obj.log.info(len(reader.data))
        self.obj.generalize_labels = 25
        self.obj.add_underling(reader)
        self.obj.shutdown()
        self.obj.post_process()
        cum_dict = self.obj.cumulative
        labels = list(cum_dict.keys())
        self.assertEqual(len(labels), 6)

    def test_labels_aggressive_folding_2(self):
        self.obj.track_percentiles = [50]
        self.obj.prepare()
        label_count = 50
        reader = get_success_reader_shrinking_labels(max_label_size=int(
            label_count * 2),
                                                     count=label_count)
        self.obj.log.info(len(reader.data))
        self.obj.generalize_labels = label_count
        self.obj.add_underling(reader)
        last = None
        for point in self.obj.datapoints():
            last = point
        cum_dict = self.obj.cumulative
        labels = list(cum_dict.keys())
        labels_count = len(labels)
        self.assertLessEqual(labels_count, label_count + 1)  # didn't overflow
        self.assertGreaterEqual(labels_count, label_count *
                                0.25)  # at least a quarter-filled
        self.assertEqual(1, len(last[DataPoint.SUBRESULTS]))
        self.assertEqual(last, last[DataPoint.SUBRESULTS][0])

    def test_errors_variety(self):
        self.obj.track_percentiles = [50]
        self.obj.prepare()
        reader1 = get_fail_reader()
        reader2 = get_fail_reader_alot()
        self.obj.max_error_count = 50
        self.obj.add_underling(reader1)
        self.obj.add_underling(reader2)
        self.obj.shutdown()
        self.obj.post_process()
        expected = self.obj.max_error_count  # due to randomness, it it can go a bit higher than limit
        self.assertLessEqual(len(self.obj.known_errors), expected)
        self.assertGreaterEqual(len(self.obj.known_errors),
                                self.obj.max_error_count /
                                2)  # assert that it's at least half full

    def test_uniq_errors(self):
        self.obj.track_percentiles = [50]
        self.obj.prepare()
        reader = get_fail_reader()
        self.obj.max_error_count = 9
        self.obj.add_underling(reader)
        self.obj.shutdown()
        self.obj.post_process()
        cum_dict = self.obj.cumulative
        self.assertEqual(len(cum_dict['']['errors']), 3)

    def test_set_rtimes_len(self):
        self.obj.settings['histogram-initial'] = 10.0
        self.obj.prepare()
        reader = get_fail_reader()
        self.obj.add_underling(reader)
        listener = MockListener()
        self.obj.add_listener(listener)
        self.obj.check()
        for dp in listener.results:
            for kpiset in dp['cumulative'].values():
                self.assertEqual(10000, kpiset[KPISet.RESP_TIMES].high)
            for kpiset in dp['current'].values():
                self.assertEqual(10000, kpiset[KPISet.RESP_TIMES].high)

    def test_inf_values(self):
        self.obj.settings['max-buffer-len'] = "inf"
        self.obj.prepare()
        self.assertEqual(self.obj.max_buffer_len, float("inf"))

    def test_datapoint_to_json(self):
        self.obj.track_percentiles = [0.0, 50.0, 95.0, 99.0, 100.0]
        self.obj.prepare()
        self.obj.add_underling(get_success_reader())
        for point in self.obj.datapoints():
            if point[DataPoint.SUBRESULTS] == [point]:
                del point[DataPoint.SUBRESULTS]
            self.obj.log.info(to_json(point))

    def test_negative_response_time_scaling_crash(self):
        self.obj.track_percentiles = [0.0, 50.0, 95.0, 99.0, 100.0]
        self.obj.prepare()

        self.sniff_log(self.obj.log)

        mock = MockReader()
        mock.data.append((1, "first", 1, -r(), r(), r(), 200, 'FAILx3', '', 0))
        mock.data.append((2, "first", 1, -r(), r(), r(), 200, 'FAILx1', '', 0))
        mock.data.append((5, "first", 1, -r(), r(), r(), 200, None, '', 0))
        mock.data.append(
            (7, "second", 1, -r(), r(), r(), 200, 'FAILx3', '', 0))
        mock.data.append((3, "first", 1, -r(), r(), r(), 200, 'FAILx3', '', 0))
        mock.data.append(
            (6, "second", 1, -r(), r(), r(), 200, 'unique FAIL', '', 0))

        self.obj.add_underling(mock)

        self.obj.check()
        for point in self.obj.datapoints():
            self.obj.log.info(to_json(point))

        self.assertIn("Negative response time reported",
                      self.log_recorder.warn_buff.getvalue())
Beispiel #6
0
 def setUp(self):
     super(TestSiegeExecutor, self).setUp()
     self.obj.engine.aggregator = ConsolidatingAggregator()
     self.obj.settings.merge({"path": TOOL_PATH})
Beispiel #7
0
        def test_simple(self):
            obj = PBenchExecutor()
            obj.engine = EngineEmul()
            obj.engine.aggregator = ConsolidatingAggregator()
            obj.engine.aggregator.add_listener(DataPointLogger())
            obj.engine.config.merge({"provisioning": "test"})

            if os.path.exists("/home/undera/Sources/phantom"
                              ):  # FIXME: not good, get rid of it
                obj.settings.merge({
                    "path":
                    "/home/undera/Sources/phantom/bin/phantom",
                    "modules-path":
                    "/home/undera/Sources/phantom/lib/phantom",
                })
            else:
                obj.settings.merge({
                    "path":
                    os.path.join(os.path.dirname(__file__), '..',
                                 "phantom.sh"),
                })

            obj.execution.merge({
                "log-responses": "proto_error",
                # "iterations": 5000000,
                "concurrency": 10,
                "throughput": 1000,
                "ramp-up": "1m",
                # "steps": 5,
                "hold-for": "15",
                "scenario": {
                    "timeout":
                    1,
                    "default-address":
                    "http://localhost:33",
                    "headers": {
                        "Connection": "close"
                    },
                    "requests": [
                        # "/",
                        {
                            "url": "/api",
                            "method": "POST",
                            "headers": {
                                "Content-Length": 0
                            },
                            "body": {
                                "param": "value"
                            }
                        }
                    ]
                }
            })
            obj.engine.aggregator.prepare()
            obj.prepare()

            obj.engine.aggregator.startup()
            obj.startup()

            while not obj.check():
                logging.debug("Running...")
                obj.engine.aggregator.check()
                time.sleep(1)

            obj.shutdown()
            obj.engine.aggregator.shutdown()

            obj.post_process()
            obj.engine.aggregator.post_process()
Beispiel #8
0
 def configure(self, config):
     super(TestExternalResultsLoader, self).configure(config)
     self.results_listener = MockReader()
     self.obj.engine.aggregator = ConsolidatingAggregator()
     self.obj.engine.aggregator.engine = self.obj.engine
     self.obj.engine.aggregator.add_listener(self.results_listener)
 def setUp(self):
     super(TestConsolidatingAggregator, self).setUp()
     self.obj = ConsolidatingAggregator()
Beispiel #10
0
 def setUp(self):
     super(TestConsolidatingAggregator, self).setUp()
     self.obj = ConsolidatingAggregator()
     self.obj.engine = EngineEmul()
    def test_extend_datapoints(self):
        # check reported data format conversion for test state filtering on BM side

        def get_mock(origin_func, store):
            # generate replacement for BlazemeterUploader._dpoint_serializer.get_kpi_body
            def mock_get_kpi_body(data, isfinal):
                store.append(data)  # save received data for verifying
                return origin_func(
                    data, isfinal)  # call original get_kpi_body as well

            return mock_get_kpi_body

        mock = BZMock()
        mock.mock_get.update({
            '1': {
                "result": []
            },
            'https://a.blazemeter.com/api/v4/tests?projectId=1&name=Taurus+Test':
            {
                "result": []
            },
            '3': {
                "result": []
            },
        })
        mock.mock_post.update({
            'https://a.blazemeter.com/api/v4/projects': {
                "result": {
                    "id": 1
                }
            },
            'https://a.blazemeter.com/api/v4/tests': {
                "result": {
                    'id': 1
                }
            },
            'https://a.blazemeter.com/api/v4/tests/1/start-external': {
                "result": {
                    'session': {
                        'id': 1,
                        'userId': 1,
                        'testId': 1
                    },
                    'master': {
                        'id': 1,
                        'userId': 1
                    },
                    'signature': 'sign'
                }
            },
            'https://data.blazemeter.com/submit.php?session_id=1&signature=sign&test_id=1&user_id=1&pq=0&target=labels_bulk&update=1':
            [
                {},
                {
                    "result": {
                        'session': {
                            "statusCode": 140,
                            'status': 'ENDED'
                        }
                    }
                },
                {},
            ],
            'https://data.blazemeter.com/api/v4/image/1/files?signature=sign':
            [
                IOError("monitoring push expected fail"),
                {
                    "result": True
                },
                {
                    "result": True
                },
                {
                    "result": True
                },
                {
                    "result": True
                },
                {
                    "result": True
                },
                {
                    "result": True
                },
                {
                    "result": True
                },
                {
                    "result": True
                },
            ],
            'https://a.blazemeter.com/api/v4/sessions/1/stop': {},
            'https://data.blazemeter.com/submit.php?session_id=1&signature=sign&test_id=1&user_id=1&pq=0&target=engine_health&update=1':
            {
                "result": {
                    'session': {}
                }
            }
        })

        obj = BlazeMeterUploader()
        sent_data_points = []
        obj._dpoint_serializer.get_kpi_body = get_mock(
            obj._dpoint_serializer.get_kpi_body, sent_data_points)
        obj.parameters['project'] = 'Proj name'
        obj.settings['token'] = '123'
        obj.settings['browser-open'] = 'none'
        obj.engine = EngineEmul()
        aggregator = ConsolidatingAggregator()
        aggregator.engine = obj.engine
        aggregator.settings['extend-aggregation'] = True
        reader = MockReader()
        watcher = MockReader()

        reader.buffer_scale_idx = '100.0'
        # data format: t_stamp, label, conc, r_time, con_time, latency, r_code, error, trname, byte_count
        reader.data.append((1, "a", 1, 1, 1, 1, 200, None, '', 1))
        reader.data.append((2, "b", 1, 2, 2, 2, 200, 'OK', '', 2))
        reader.data.append((2, "b", 1, 3, 3, 3, 404, "Not Found", '', 3))
        reader.data.append((2, "c", 1, 4, 4, 4, 200, None, '', 4))
        reader.data.append((3, "d", 1, 5, 5, 5, 200, None, '', 5))
        reader.data.append((5, "b", 1, 6, 6, 6, 200, None, '', 6))
        reader.data.append((5, "c", 1, 7, 7, 7, 200, None, '', 7))
        original_labels = list(d[1] for d in reader.data)

        aggregator.add_underling(reader)
        aggregator.add_listener(watcher)
        obj.engine.aggregator = aggregator

        mock.apply(obj._user)
        obj._user.timeout = 0.001

        obj.engine.aggregator.prepare()
        obj.prepare()

        obj.engine.aggregator.startup()
        obj.startup()

        obj.engine.aggregator.check()
        obj.check()

        obj.engine.aggregator.shutdown()
        obj.shutdown()

        obj.engine.aggregator.post_process()
        obj.post_process()

        sent_data_points = sent_data_points[0] + sent_data_points[1]

        state_labels = [0, 1, 2]
        for dp in sent_data_points:
            for data in dp['cumulative'], dp['current']:
                for label in data:
                    self.assertIn(label, original_labels + [''])
                    self.assertIsInstance(data[label], dict)
                    for key in data[label]:
                        self.assertIn(key, state_labels)
Beispiel #12
0
class TestConsolidatingAggregator(BZTestCase):
    def setUp(self):
        super(TestConsolidatingAggregator, self).setUp()
        self.obj = ConsolidatingAggregator()

    def test_two_executions(self):
        self.obj.track_percentiles = [0, 50, 100]
        self.obj.prepare()
        underling1 = get_success_reader()
        underling2 = get_success_reader()
        self.obj.add_underling(underling1)
        self.obj.add_underling(underling2)

        cnt = 0
        for _ in range(1, 10):
            for point in self.obj.datapoints():
                overall = point[DataPoint.CURRENT]['']
                self.assertEquals(2, overall[KPISet.CONCURRENCY])
                self.assertGreater(overall[KPISet.PERCENTILES]["100.0"], 0)
                self.assertGreater(overall[KPISet.AVG_RESP_TIME], 0)
                cnt += 1

        self.assertEquals(2, cnt)

    def test_errors_cumulative(self):
        self.obj.track_percentiles = [50]
        self.obj.prepare()
        reader = get_fail_reader()
        self.obj.add_underling(reader)
        self.obj.shutdown()
        self.obj.post_process()
        cum_dict = self.obj.underlings[0].cumulative
        first_err_ids = [id(err) for err in cum_dict['first']['errors']]
        second_err_ids = [id(err) for err in cum_dict['second']['errors']]
        total_err_ids = [id(err) for err in cum_dict['']['errors']]
        all_ids = first_err_ids + second_err_ids + total_err_ids
        self.assertEqual(len(all_ids), len(set(all_ids)))
        for label in cum_dict:
            data = cum_dict[label]
            total_errors_count = sum(err['cnt'] for err in data['errors'])
            self.assertEqual(data['fail'], total_errors_count)

    def test_errors_variety(self):
        self.obj.track_percentiles = [50]
        self.obj.prepare()
        reader1 = get_fail_reader()
        reader2 = get_fail_reader_alot()
        self.obj.max_error_count = 9
        self.obj.add_underling(reader1)
        self.obj.add_underling(reader2)
        self.obj.shutdown()
        self.obj.post_process()
        cum_dict = self.obj.cumulative
        self.assertLessEqual(len(cum_dict['']['errors']), self.obj.max_error_count)

    def test_uniq_errors(self):
        self.obj.track_percentiles = [50]
        self.obj.prepare()
        reader = get_fail_reader()
        self.obj.max_error_count = 9
        self.obj.add_underling(reader)
        self.obj.shutdown()
        self.obj.post_process()
        cum_dict = self.obj.cumulative
        self.assertEqual(len(cum_dict['']['errors']), 3)

    def test_set_rtimes_len(self):
        self.obj.settings['rtimes-len'] = 42
        self.obj.prepare()
        reader = get_fail_reader()
        self.obj.add_underling(reader)
        listener = MockListener()
        self.obj.add_listener(listener)
        self.obj.check()
        for dp in listener.results:
            for kpiset in dp['cumulative'].values():
                self.assertEqual(42, kpiset.rtimes_len)
            for kpiset in dp['current'].values():
                self.assertNotEqual(42, kpiset.rtimes_len)

    def test_inf_values(self):
        self.obj.settings['max-buffer-len'] = "inf"
        self.obj.prepare()
        self.assertEqual(self.obj.max_buffer_len, float("inf"))

    def test_datapoint_to_json(self):
        self.obj.track_percentiles = [0.0, 50.0, 95.0, 99.0, 100.0]
        self.obj.prepare()
        self.obj.add_underling(get_success_reader())
        for point in self.obj.datapoints():
            self.obj.log.info(to_json(point))

    def test_negative_response_time_scaling_crash(self):
        self.obj.track_percentiles = [0.0, 50.0, 95.0, 99.0, 100.0]
        self.obj.prepare()

        self.sniff_log(self.obj.log)

        mock = MockReader()
        mock.data.append((1, "first", 1, -r(), r(), r(), 200, 'FAILx3', '', 0))
        mock.data.append((2, "first", 1, -r(), r(), r(), 200, 'FAILx1', '', 0))
        mock.data.append((5, "first", 1, -r(), r(), r(), 200, None, '', 0))
        mock.data.append((7, "second", 1, -r(), r(), r(), 200, 'FAILx3', '', 0))
        mock.data.append((3, "first", 1, -r(), r(), r(), 200, 'FAILx3', '', 0))
        mock.data.append((6, "second", 1, -r(), r(), r(), 200, 'unique FAIL', '', 0))

        self.obj.add_underling(mock)

        self.obj.check()
        for point in self.obj.datapoints():
            self.obj.log.info(to_json(point))

        self.assertIn("Negative response time reported", self.log_recorder.warn_buff.getvalue())
 def test_set_rtimes_len(self):
     obj = ConsolidatingAggregator()
     obj.settings['rtimes-len'] = 42
     obj.prepare()
     reader = self.get_fail_reader()
     obj.add_underling(reader)
     listener = MockListener()
     obj.add_listener(listener)
     obj.check()
     for dp in listener.results:
         for kpiset in dp['cumulative'].values():
             self.assertEqual(42, kpiset.rtimes_len)
         for kpiset in dp['current'].values():
             self.assertNotEqual(42, kpiset.rtimes_len)
 def test_errors_cumulative(self):
     aggregator = ConsolidatingAggregator()
     aggregator.track_percentiles = [50]
     aggregator.prepare()
     reader = self.get_fail_reader()
     aggregator.add_underling(reader)
     aggregator.shutdown()
     aggregator.post_process()
     cum_dict = aggregator.underlings[0].cumulative
     first_err_ids = [id(err) for err in cum_dict['first']['errors']]
     second_err_ids = [id(err) for err in cum_dict['second']['errors']]
     total_err_ids = [id(err) for err in cum_dict['']['errors']]
     all_ids = first_err_ids + second_err_ids + total_err_ids
     self.assertEqual(len(all_ids), len(set(all_ids)))
     for label in cum_dict:
         data = cum_dict[label]
         total_errors_count = sum(err['cnt'] for err in data['errors'])
         self.assertEqual(data['fail'], total_errors_count)
 def test_inf_values(self):
     obj = ConsolidatingAggregator()
     obj.settings['max-buffer-len'] = "inf"
     obj.prepare()
     self.assertEqual(obj.max_buffer_len, float("inf"))
 def setUp(self):
     super(TestConsolidatingAggregator, self).setUp()
     self.obj = ConsolidatingAggregator()
Beispiel #17
0
class TestConsolidatingAggregator(BZTestCase):
    def setUp(self):
        super(TestConsolidatingAggregator, self).setUp()
        self.obj = ConsolidatingAggregator()
        self.obj.engine = EngineEmul()

    def test_two_executions(self):
        self.obj.track_percentiles = [0, 50, 100]
        self.obj.prepare()
        underling1 = get_success_reader()
        underling2 = get_success_reader()
        self.obj.add_underling(underling1)
        self.obj.add_underling(underling2)

        cnt = 0
        for _ in range(1, 10):
            for point in self.obj.datapoints():
                self.assertEqual(2, len(point[DataPoint.SUBRESULTS]))
                overall = point[DataPoint.CURRENT]['']
                self.assertEquals(2, overall[KPISet.CONCURRENCY])
                self.assertGreater(overall[KPISet.PERCENTILES]["100.0"], 0)
                self.assertGreater(overall[KPISet.AVG_RESP_TIME], 0)
                cnt += 1

        self.assertEquals(2, cnt)

    def test_new_aggregator(self):
        # aggregator's config
        self.obj.extend_aggregation = True

        reader = MockReader()
        watcher = MockReader()

        # executor/reporter prepare level
        self.obj.add_underling(reader)
        self.obj.add_listener(watcher)

        # send rules to underlings
        self.obj.startup()

        reader.buffer_scale_idx = '100.0'
        # data format: t_stamp, label, conc, r_time, con_time, latency, r_code, error, trname, byte_count
        reader.data.append((1, "a", 1, 1, 1, 1, 200, None, '', 0))
        reader.data.append((2, "b", 1, 2, 2, 2, 200, 'OK', '', 0))
        reader.data.append((2, "b", 1, 3, 3, 3, 404, "Not Found", '', 0))
        reader.data.append((2, "c", 1, 4, 4, 4, 200, None, '', 0))
        reader.data.append((3, "d", 1, 5, 5, 5, 200, None, '', 0))
        reader.data.append((4, "b", 1, 6, 6, 6, 200, None, '', 0))

        # let's collect data to seconds and send something aggregated to watcher
        self.obj.shutdown()
        self.obj.post_process()

        data_points = watcher.results[-1][DataPoint.CUMULATIVE]
        self.assertEquals(7, len(data_points))
        sample_labels = {'a-0', 'b-0', 'b-1', 'b-2', 'c-0', 'd-0', ''}
        self.assertEquals(sample_labels, set(data_points.keys()))

    def test_errors_cumulative(self):
        self.obj.track_percentiles = [50]
        self.obj.prepare()
        reader = get_fail_reader()
        self.obj.add_underling(reader)
        self.obj.shutdown()
        self.obj.post_process()
        cum_dict = self.obj.underlings[0].cumulative
        first_err_ids = [id(err) for err in cum_dict['first']['errors']]
        second_err_ids = [id(err) for err in cum_dict['second']['errors']]
        total_err_ids = [id(err) for err in cum_dict['']['errors']]
        all_ids = first_err_ids + second_err_ids + total_err_ids
        self.assertEqual(len(all_ids), len(set(all_ids)))
        for label in cum_dict:
            data = cum_dict[label]
            total_errors_count = sum(err['cnt'] for err in data['errors'])
            self.assertEqual(data['fail'], total_errors_count)

    def test_labels_variety(self):
        self.obj.track_percentiles = [50]
        self.obj.prepare()
        reader1 = get_success_reader()
        reader2 = get_success_reader_alot()
        self.obj.log.info(len(reader1.data) + len(reader2.data))
        self.obj.generalize_labels = 25
        self.obj.add_underling(reader1)
        self.obj.add_underling(reader2)
        self.obj.shutdown()
        self.obj.post_process()
        cum_dict = self.obj.cumulative
        len_limit = (self.obj.generalize_labels + 1)  # due to randomness, it it can go a bit higher than limit
        labels = list(cum_dict.keys())
        self.assertGreaterEqual(len(labels), self.obj.generalize_labels / 2)  # assert that it's at least half full
        self.assertLessEqual(len(labels), len_limit + 1)  # allow +1 label because '' is cumulative

    def test_labels_constant_part(self):
        self.obj.track_percentiles = [50]
        self.obj.prepare()
        reader = get_success_reader_alot(prefix='http://blazedemo.com/?r=')
        self.obj.log.info(len(reader.data))
        self.obj.generalize_labels = 25
        self.obj.add_underling(reader)
        self.obj.shutdown()
        self.obj.post_process()
        cum_dict = self.obj.cumulative
        labels = list(cum_dict.keys())
        self.assertGreaterEqual(len(labels), self.obj.generalize_labels / 2)  # assert that it's at least half full
        self.assertLessEqual(len(labels), self.obj.generalize_labels + 1)  # allow +1 label because '' is cumulative

    def test_labels_aggressive_folding(self):
        self.obj.track_percentiles = [50]
        self.obj.prepare()
        reader = get_success_reader_selected_labels()
        self.obj.log.info(len(reader.data))
        self.obj.generalize_labels = 25
        self.obj.add_underling(reader)
        self.obj.shutdown()
        self.obj.post_process()
        cum_dict = self.obj.cumulative
        labels = list(cum_dict.keys())
        self.assertEqual(len(labels), 6)

    def test_labels_aggressive_folding_2(self):
        self.obj.track_percentiles = [50]
        self.obj.prepare()
        label_count = 50
        reader = get_success_reader_shrinking_labels(max_label_size=int(label_count * 2), count=label_count)
        self.obj.log.info(len(reader.data))
        self.obj.generalize_labels = label_count
        self.obj.add_underling(reader)
        last = None
        for point in self.obj.datapoints(True):
            last = point
        cum_dict = self.obj.cumulative
        labels = list(cum_dict.keys())
        labels_count = len(labels)
        self.assertLessEqual(labels_count, label_count + 1)  # didn't overflow
        self.assertGreaterEqual(labels_count, label_count * 0.25)  # at least a quarter-filled
        self.assertEqual(1, len(last[DataPoint.SUBRESULTS]))
        self.assertEqual(last, last[DataPoint.SUBRESULTS][0])

    def test_errors_variety(self):
        self.obj.track_percentiles = [50]
        self.obj.prepare()
        reader1 = get_fail_reader()
        reader2 = get_fail_reader_alot()
        self.obj.max_error_count = 50
        self.obj.add_underling(reader1)
        self.obj.add_underling(reader2)
        self.obj.shutdown()
        self.obj.post_process()
        expected = self.obj.max_error_count  # due to randomness, it it can go a bit higher than limit
        self.assertLessEqual(len(self.obj.known_errors), expected)
        self.assertGreaterEqual(len(self.obj.known_errors),
                                self.obj.max_error_count / 2)  # assert that it's at least half full

    def test_uniq_errors(self):
        self.obj.track_percentiles = [50]
        self.obj.prepare()
        reader = get_fail_reader()
        self.obj.max_error_count = 9
        self.obj.add_underling(reader)
        self.obj.shutdown()
        self.obj.post_process()
        cum_dict = self.obj.cumulative
        self.assertEqual(len(cum_dict['']['errors']), 3)

    def test_set_rtimes_len(self):
        self.obj.settings['histogram-initial'] = 10.0
        self.obj.prepare()
        reader = get_fail_reader()
        self.obj.add_underling(reader)
        listener = MockListener()
        self.obj.add_listener(listener)
        self.obj.check()
        for dp in listener.results:
            for kpiset in dp['cumulative'].values():
                self.assertEqual(10000, kpiset[KPISet.RESP_TIMES].high)
            for kpiset in dp['current'].values():
                self.assertEqual(10000, kpiset[KPISet.RESP_TIMES].high)

    def test_inf_values(self):
        self.obj.settings['max-buffer-len'] = "inf"
        self.obj.prepare()
        self.assertEqual(self.obj.max_buffer_len, float("inf"))

    def test_datapoint_to_json(self):
        self.obj.track_percentiles = [0.0, 50.0, 95.0, 99.0, 100.0]
        self.obj.prepare()
        self.obj.add_underling(get_success_reader())
        for point in self.obj.datapoints():
            if point[DataPoint.SUBRESULTS] == [point]:
                del point[DataPoint.SUBRESULTS]
            self.obj.log.info(to_json(point))

    def test_negative_response_time_scaling_crash(self):
        self.obj.track_percentiles = [0.0, 50.0, 95.0, 99.0, 100.0]
        self.obj.prepare()

        self.sniff_log(self.obj.log)

        mock = MockReader()
        mock.data.append((1, "first", 1, -r(), r(), r(), 200, 'FAILx3', '', 0))
        mock.data.append((2, "first", 1, -r(), r(), r(), 200, 'FAILx1', '', 0))
        mock.data.append((5, "first", 1, -r(), r(), r(), 200, None, '', 0))
        mock.data.append((7, "second", 1, -r(), r(), r(), 200, 'FAILx3', '', 0))
        mock.data.append((3, "first", 1, -r(), r(), r(), 200, 'FAILx3', '', 0))
        mock.data.append((6, "second", 1, -r(), r(), r(), 200, 'unique FAIL', '', 0))

        self.obj.add_underling(mock)

        self.obj.check()
        for point in self.obj.datapoints():
            self.obj.log.info(to_json(point))

        self.assertIn("Negative response time reported", self.log_recorder.warn_buff.getvalue())

    def test_ramp_up_exclude(self):
        self.obj.track_percentiles = [50]
        self.obj.prepare()
        self.obj.engine.config['settings']['ramp-up-exclude'] = True
        self.obj.engine.config['execution'] = [
            {'scenario': 'first', 'ramp-up': 50},
            {'scenario': 'second', 'ramp-up': '1s'},
            {'scenario': 'third'}
        ]
        self.obj.engine.config['scenarios'] = BetterDict.from_dict({
            'first': {'requests': [{'url': 'first'}]},
            'second': {'requests': [{'url': 'second'}]},
            'third': {'requests': [{'url': 'third'}]}})
        reader = get_success_reader()
        self.obj.add_underling(reader)
        self.obj.shutdown()
        self.obj.post_process()
        self.assertEquals(self.obj.cumulative, {})
Beispiel #18
0
    def test_case1(self):
        mock = BZMock()

        mock.mock_get.update({
            'https://a.blazemeter.com/api/v4/masters/1/sessions': {
                "result": {
                    "sessions": []
                }
            },
            'https://a.blazemeter.com/api/v4/masters/1/full': {
                "result": {
                    "sessions": []
                }
            },
            'https://a.blazemeter.com/api/v4/masters/1': {
                "result": {
                    "note": "message"
                }
            },
            'https://a.blazemeter.com/api/v4/masters/1/status': [{
                "result": {
                    "id": 1,
                    "status": "CREATE"
                }
            }, {
                "result": {
                    "id": 1,
                    "status": "ENDED",
                    "progress": 101
                }
            }],
        })

        mock.mock_post = {
            'https://a.blazemeter.com/api/v4/projects': {
                "result": {
                    "id": 1
                }
            },
            'https://a.blazemeter.com/api/v4/tests': {
                "result": {
                    "id": 1
                }
            },
            'https://a.blazemeter.com/api/v4/tests/1/files': {
                "result": None
            },
            'https://a.blazemeter.com/api/v4/tests/1/start': {
                "result": {
                    "id": 1
                }
            },
            'https://a.blazemeter.com/api/v4/masters/1/stop': {
                "result": None
            },
            'https://a.blazemeter.com/api/v4/masters/1/public-token': {
                "result": {
                    "publicToken": "token"
                }
            },
        }

        mock.mock_patch = {
            'https://a.blazemeter.com/api/v4/tests/1': {
                "result": {}
            }
        }

        prov = CloudProvisioning()
        prov.browser_open = None
        prov.public_report = True
        prov.user.token = "test"
        prov.engine = EngineEmul()
        prov.engine.aggregator = ConsolidatingAggregator()
        # prov.engine.config.merge({"modules": {"blazemeter": {"browser-open": False}}})
        prov.engine.config[ScenarioExecutor.EXEC] = [{
            "executor":
            "mock",
            "locations": {
                "aws": 1
            },
            "files":
            ModuleMock().get_resource_files()
        }]
        mock.apply(prov.user)

        prov.prepare()
        prov.startup()
        prov.check()
        prov._last_check_time = 0
        prov.check()
        prov.shutdown()
        prov.post_process()
class TestConsolidatingAggregator(BZTestCase):
    def setUp(self):
        super(TestConsolidatingAggregator, self).setUp()
        self.obj = ConsolidatingAggregator()

    def test_two_executions(self):
        self.obj.track_percentiles = [0, 50, 100]
        self.obj.prepare()
        underling1 = get_success_reader()
        underling2 = get_success_reader()
        self.obj.add_underling(underling1)
        self.obj.add_underling(underling2)

        cnt = 0
        for _ in range(1, 10):
            for point in self.obj.datapoints():
                overall = point[DataPoint.CURRENT]['']
                self.assertEquals(2, overall[KPISet.CONCURRENCY])
                self.assertGreater(overall[KPISet.PERCENTILES]["100.0"], 0)
                self.assertGreater(overall[KPISet.AVG_RESP_TIME], 0)
                cnt += 1

        self.assertEquals(2, cnt)

    def test_errors_cumulative(self):
        self.obj.track_percentiles = [50]
        self.obj.prepare()
        reader = get_fail_reader()
        self.obj.add_underling(reader)
        self.obj.shutdown()
        self.obj.post_process()
        cum_dict = self.obj.underlings[0].cumulative
        first_err_ids = [id(err) for err in cum_dict['first']['errors']]
        second_err_ids = [id(err) for err in cum_dict['second']['errors']]
        total_err_ids = [id(err) for err in cum_dict['']['errors']]
        all_ids = first_err_ids + second_err_ids + total_err_ids
        self.assertEqual(len(all_ids), len(set(all_ids)))
        for label in cum_dict:
            data = cum_dict[label]
            total_errors_count = sum(err['cnt'] for err in data['errors'])
            self.assertEqual(data['fail'], total_errors_count)

    def test_labels_variety(self):
        self.obj.track_percentiles = [50]
        self.obj.prepare()
        reader1 = get_success_reader()
        reader2 = get_success_reader_alot()
        self.obj.log.info(len(reader1.data) + len(reader2.data))
        self.obj.generalize_labels = 25
        self.obj.add_underling(reader1)
        self.obj.add_underling(reader2)
        self.obj.shutdown()
        self.obj.post_process()
        cum_dict = self.obj.cumulative
        len_limit = (self.obj.generalize_labels + 1)  # due to randomness, it it can go a bit higher than limit
        labels = list(cum_dict.keys())
        self.assertGreaterEqual(len(labels), self.obj.generalize_labels / 2)  # assert that it's at least half full
        self.assertLessEqual(len(labels), len_limit + 1)  # allow +1 label because '' is cumulative

    def test_labels_constant_part(self):
        self.obj.track_percentiles = [50]
        self.obj.prepare()
        reader = get_success_reader_alot(prefix='http://blazedemo.com/?r=')
        self.obj.log.info(len(reader.data))
        self.obj.generalize_labels = 25
        self.obj.add_underling(reader)
        self.obj.shutdown()
        self.obj.post_process()
        cum_dict = self.obj.cumulative
        labels = list(cum_dict.keys())
        self.assertGreaterEqual(len(labels), self.obj.generalize_labels / 2)  # assert that it's at least half full
        self.assertLessEqual(len(labels), self.obj.generalize_labels + 1)  # allow +1 label because '' is cumulative

    def test_labels_aggressive_folding(self):
        self.obj.track_percentiles = [50]
        self.obj.prepare()
        reader = get_success_reader_selected_labels()
        self.obj.log.info(len(reader.data))
        self.obj.generalize_labels = 25
        self.obj.add_underling(reader)
        self.obj.shutdown()
        self.obj.post_process()
        cum_dict = self.obj.cumulative
        labels = list(cum_dict.keys())
        self.assertEqual(len(labels), 6)

    def test_labels_aggressive_folding_2(self):
        self.obj.track_percentiles = [50]
        self.obj.prepare()
        LABEL_COUNT = 50
        reader = get_success_reader_shrinking_labels(max_label_size=int(LABEL_COUNT * 2), count=LABEL_COUNT)
        self.obj.log.info(len(reader.data))
        self.obj.generalize_labels = LABEL_COUNT
        self.obj.add_underling(reader)
        for point in self.obj.datapoints():
            last = point
        cum_dict = self.obj.cumulative
        labels = list(cum_dict.keys())
        labels_count = len(labels)
        self.assertLessEqual(labels_count, LABEL_COUNT + 1)  # didn't overflow
        self.assertGreaterEqual(labels_count, LABEL_COUNT * 0.25)  # at least a quorter-filled

    def test_errors_variety(self):
        self.obj.track_percentiles = [50]
        self.obj.prepare()
        reader1 = get_fail_reader()
        reader2 = get_fail_reader_alot()
        self.obj.max_error_count = 50
        self.obj.add_underling(reader1)
        self.obj.add_underling(reader2)
        self.obj.shutdown()
        self.obj.post_process()
        expected = self.obj.max_error_count  # due to randomness, it it can go a bit higher than limit
        self.assertLessEqual(len(self.obj.known_errors), expected)
        self.assertGreaterEqual(len(self.obj.known_errors), self.obj.max_error_count / 2)  # assert that it's at least half full

    def test_uniq_errors(self):
        self.obj.track_percentiles = [50]
        self.obj.prepare()
        reader = get_fail_reader()
        self.obj.max_error_count = 9
        self.obj.add_underling(reader)
        self.obj.shutdown()
        self.obj.post_process()
        cum_dict = self.obj.cumulative
        self.assertEqual(len(cum_dict['']['errors']), 3)

    def test_set_rtimes_len(self):
        self.obj.settings['histogram-initial'] = 10.0
        self.obj.prepare()
        reader = get_fail_reader()
        self.obj.add_underling(reader)
        listener = MockListener()
        self.obj.add_listener(listener)
        self.obj.check()
        for dp in listener.results:
            for kpiset in dp['cumulative'].values():
                self.assertEqual(10000, kpiset[KPISet.RESP_TIMES].high)
            for kpiset in dp['current'].values():
                self.assertEqual(10000, kpiset[KPISet.RESP_TIMES].high)

    def test_inf_values(self):
        self.obj.settings['max-buffer-len'] = "inf"
        self.obj.prepare()
        self.assertEqual(self.obj.max_buffer_len, float("inf"))

    def test_datapoint_to_json(self):
        self.obj.track_percentiles = [0.0, 50.0, 95.0, 99.0, 100.0]
        self.obj.prepare()
        self.obj.add_underling(get_success_reader())
        for point in self.obj.datapoints():
            self.obj.log.info(to_json(point))

    def test_negative_response_time_scaling_crash(self):
        self.obj.track_percentiles = [0.0, 50.0, 95.0, 99.0, 100.0]
        self.obj.prepare()

        self.sniff_log(self.obj.log)

        mock = MockReader()
        mock.data.append((1, "first", 1, -r(), r(), r(), 200, 'FAILx3', '', 0))
        mock.data.append((2, "first", 1, -r(), r(), r(), 200, 'FAILx1', '', 0))
        mock.data.append((5, "first", 1, -r(), r(), r(), 200, None, '', 0))
        mock.data.append((7, "second", 1, -r(), r(), r(), 200, 'FAILx3', '', 0))
        mock.data.append((3, "first", 1, -r(), r(), r(), 200, 'FAILx3', '', 0))
        mock.data.append((6, "second", 1, -r(), r(), r(), 200, 'unique FAIL', '', 0))

        self.obj.add_underling(mock)

        self.obj.check()
        for point in self.obj.datapoints():
            self.obj.log.info(to_json(point))

        self.assertIn("Negative response time reported", self.log_recorder.warn_buff.getvalue())
Beispiel #20
0
    def test_terminate_only(self):
        "test is terminated only when it was started and didn't finished"
        obj = CloudProvisioning()
        obj.engine = EngineEmul()
        obj.engine.config.merge({
            ScenarioExecutor.EXEC: {
                "executor": "mock",
                "concurrency": 5500,
                "locations": {
                    "us-east-1": 1,
                    "us-west": 1,
                }
            },
            "modules": {
                "mock": ModuleMock.__module__ + "." + ModuleMock.__name__
            },
            "provisioning": "mock"
        })
        obj.parameters = obj.engine.config['execution']
        obj.engine.aggregator = ConsolidatingAggregator()

        obj.settings["token"] = "FakeToken"
        obj.settings["browser-open"] = False
        obj.settings["check-interval"] = "0ms"  # do not skip checks
        obj.settings["use-deprecated-api"] = False
        obj.client = client = BlazeMeterClientEmul(obj.log)
        client.results.append({"result": []})  # find collection
        client.results.append({"result": []})  # find test
        client.results.append(self.__get_user_info())  # user
        client.results.append({})  # upload files
        client.results.append(
            {"result": {
                "name": "Taurus Collection",
                "items": []
            }})  # transform config to collection
        client.results.append({"result": {"id": 42}})  # create collection
        client.results.append({"result": {"id": id(obj)}})  # start
        client.results.append({
            "result": {
                "id":
                id(obj),
                "sessions": [{
                    "id": "s1",
                    "status": "JMETER_CONSOLE_INIT"
                }, {
                    "id": "s2",
                    "status": "JMETER_CONSOLE_INIT"
                }]
            }
        })
        client.results.append({"result": []})  # sessions
        client.results.append({"result": {}})  # force start
        client.results.append({"result": {
            "progress": 120,
            "status": "ENDED"
        }})  # status should trigger shutdown
        client.results.append({"result": []})  # sessions

        obj.prepare()
        obj.startup()
        obj.check()  # this one should trigger force start
        self.assertTrue(obj.check())
        obj.shutdown()
        obj.post_process()
        self.assertEqual(client.results, [])
 def setUp(self):
     super(TestConsolidatingAggregator, self).setUp()
     self.obj = ConsolidatingAggregator()
     self.obj.engine=EngineEmul()
Beispiel #22
0
 def test_inf_values(self):
     obj = ConsolidatingAggregator()
     obj.settings['max-buffer-len'] = "inf"
     obj.prepare()
     self.assertEqual(obj.max_buffer_len, float("inf"))