def test_negative_response_time_scaling_crash(self):
        obj = ConsolidatingAggregator()
        obj.track_percentiles = [0.0, 50.0, 95.0, 99.0, 100.0]
        obj.prepare()

        self.sniff_log(obj.log)

        mock = MockReader()
        mock.data.append((1, "first", 1, -r(), r(), r(), 200, 'FAILx3', '', 0))
        mock.data.append((2, "first", 1, -r(), r(), r(), 200, 'FAILx1', '', 0))
        mock.data.append((5, "first", 1, -r(), r(), r(), 200, None, '', 0))
        mock.data.append(
            (7, "second", 1, -r(), r(), r(), 200, 'FAILx3', '', 0))
        mock.data.append((3, "first", 1, -r(), r(), r(), 200, 'FAILx3', '', 0))
        mock.data.append(
            (6, "second", 1, -r(), r(), r(), 200, 'unique FAIL', '', 0))

        obj.add_underling(mock)

        obj.check()
        for point in obj.datapoints():
            obj.log.info(to_json(point))

        self.assertIn("Negative response time reported",
                      self.log_recorder.warn_buff.getvalue())
 def test_datapoint_to_json(self):
     obj = ConsolidatingAggregator()
     obj.track_percentiles = [0.0, 50.0, 95.0, 99.0, 100.0]
     obj.prepare()
     obj.add_underling(self.get_success_reader())
     for point in obj.datapoints():
         obj.log.info(to_json(point))
 def test_datapoint_to_json(self):
     obj = ConsolidatingAggregator()
     obj.track_percentiles = [0.0, 50.0, 95.0, 99.0, 100.0]
     obj.prepare()
     obj.add_underling(self.get_success_reader())
     for point in obj.datapoints():
         obj.log.info(to_json(point))
 def test_errors_cumulative(self):
     aggregator = ConsolidatingAggregator()
     aggregator.track_percentiles = [50]
     aggregator.prepare()
     reader = self.get_fail_reader()
     aggregator.add_underling(reader)
     aggregator.shutdown()
     aggregator.post_process()
     cum_dict = aggregator.underlings[0].cumulative
     first_err_ids = [id(err) for err in cum_dict['first']['errors']]
     second_err_ids = [id(err) for err in cum_dict['second']['errors']]
     total_err_ids = [id(err) for err in cum_dict['']['errors']]
     all_ids = first_err_ids + second_err_ids + total_err_ids
     self.assertEqual(len(all_ids), len(set(all_ids)))
     for label in cum_dict:
         data = cum_dict[label]
         total_errors_count = sum(err['cnt'] for err in data['errors'])
         self.assertEqual(data['fail'], total_errors_count)
 def test_errors_cumulative(self):
     aggregator = ConsolidatingAggregator()
     aggregator.track_percentiles = [50]
     aggregator.prepare()
     reader = self.get_fail_reader()
     aggregator.add_underling(reader)
     aggregator.shutdown()
     aggregator.post_process()
     cum_dict = aggregator.underlings[0].cumulative
     first_err_ids = [id(err) for err in cum_dict['first']['errors']]
     second_err_ids = [id(err) for err in cum_dict['second']['errors']]
     total_err_ids = [id(err) for err in cum_dict['']['errors']]
     all_ids = first_err_ids + second_err_ids + total_err_ids
     self.assertEqual(len(all_ids), len(set(all_ids)))
     for label in cum_dict:
         data = cum_dict[label]
         total_errors_count = sum(err['cnt'] for err in data['errors'])
         self.assertEqual(data['fail'], total_errors_count)
예제 #6
0
    def test_two_executions(self):
        # check consolidator
        obj = ConsolidatingAggregator()
        obj.prepare()
        obj.track_percentiles = [0, 50, 100]
        underling1 = self.get_reader()
        underling2 = self.get_reader()
        obj.add_underling(underling1)
        obj.add_underling(underling2)

        cnt = 0
        for _ in range(1, 10):
            for point in obj.datapoints():
                overall = point[DataPoint.CURRENT]['']
                self.assertEquals(2, overall[KPISet.CONCURRENCY])
                self.assertGreater(overall[KPISet.PERCENTILES]["100.0"], 0)
                self.assertGreater(overall[KPISet.AVG_RESP_TIME], 0)
                cnt += 1

        self.assertEquals(2, cnt)