def test_aggregate(self):
        record_aggregator = RecordAggregator()

        # Insert 10 records
        for i in range(10):
            record_aggregator.insert(PerfThroughput(i))

        # Aggregate them with max, min and average
        max_vals = record_aggregator.aggregate(record_types=[PerfThroughput],
                                               reduce_func=max)
        min_vals = record_aggregator.aggregate(record_types=[PerfThroughput],
                                               reduce_func=min)

        def average(seq):
            return (sum(seq[1:], start=seq[0]) * 1.0) / len(seq)

        average_vals = record_aggregator.aggregate(
            record_types=[PerfThroughput], reduce_func=average)

        self.assertEqual(max_vals[PerfThroughput],
                         PerfThroughput(9),
                         msg="Aggregation failed with max")
        self.assertEqual(min_vals[PerfThroughput],
                         PerfThroughput(0),
                         msg="Aggregation failed with min")
        self.assertEqual(average_vals[PerfThroughput],
                         PerfThroughput(4.5),
                         msg="Aggregation failed with average")
Exemplo n.º 2
0
    def test_filter_records_filtered(self):
        record_aggregator = RecordAggregator()

        # Test for malformed inputs
        with self.assertRaises(Exception):
            record_aggregator.filter_records(filters=[(lambda x: False)])
        with self.assertRaises(Exception):
            record_aggregator.filter_records(record_types=[None, None],
                                             filters=[(lambda x: False)])

        # Insert 3 throughputs
        record_aggregator.insert(PerfThroughput(5))
        record_aggregator.insert(PerfThroughput(1))
        record_aggregator.insert(PerfThroughput(10))

        # Test get with filters
        retrieved_records = record_aggregator.filter_records(
            record_types=[PerfThroughput],
            filters=[(lambda v: v.value() >= 5)]).get_records()

        # Should return 2 records
        self.assertEqual(len(retrieved_records[PerfThroughput]), 2)
        retrieved_values = [
            record.value() for record in retrieved_records[PerfThroughput]
        ]
        self.assertIn(5, retrieved_values)
        self.assertIn(10, retrieved_values)

        # Insert 2 Latency records
        record_aggregator.insert(PerfLatency(3))
        record_aggregator.insert(PerfLatency(6))

        # Test get with multiple headers
        retrieved_records = record_aggregator.filter_records(
            record_types=[PerfLatency, PerfThroughput],
            filters=[(lambda v: v.value() == 3),
                     (lambda v: v.value() < 5)]).get_records()

        retrieved_values = {
            record_type:
            [record.value() for record in retrieved_records[record_type]]
            for record_type in [PerfLatency, PerfThroughput]
        }

        self.assertEqual(len(retrieved_records[PerfLatency]), 1)
        self.assertIn(3, retrieved_values[PerfLatency])

        self.assertEqual(len(retrieved_records[PerfThroughput]), 1)
        self.assertIn(1, retrieved_values[PerfThroughput])
 def _get_next_fake_results(self):
     self._fake_throughput += 1
     perf_throughput = PerfThroughput(self._fake_throughput)
     measurement = Measurement(gpu_data=MagicMock(),
                               non_gpu_data=[perf_throughput],
                               perf_config=MagicMock())
     return [measurement]
Exemplo n.º 4
0
    def test_record_types(self):
        record_aggregator = RecordAggregator()

        throughput_record = PerfThroughput(5)
        record_aggregator.insert(throughput_record)

        self.assertEqual(record_aggregator.record_types()[0], PerfThroughput)
Exemplo n.º 5
0
    def _parse_output(self):
        """
        Extract metrics from the output of
        the perf_analyzer
        """

        self._perf_records = []
        perf_out_lines = self._output.split('\n')
        for line in perf_out_lines[:-3]:
            # Get first word after Throughput
            if 'Throughput:' in line:
                throughput = float(line.split()[1])
                self._perf_records.append(PerfThroughput(value=throughput))

            # Get first word and first word after 'latency:'
            elif 'p99 latency:' in line:
                latency_tags = line.split(' latency: ')

                # Convert value to ms from us
                latency = float(latency_tags[1].split()[0]) / 1e3
                self._perf_records.append(PerfLatency(value=latency))

        if not self._perf_records:
            raise TritonModelAnalyzerException(
                'perf_analyzer output was not as expected.')
 def _get_next_fake_results(self):
     throughput_value = self._get_next_perf_throughput_value()
     perf_throughput = PerfThroughput(throughput_value)
     measurement = Measurement(gpu_data=MagicMock(),
                               non_gpu_data=[perf_throughput],
                               perf_config=MagicMock())
     return [measurement]
Exemplo n.º 7
0
    def test_filter_records_default(self):
        record_aggregator = RecordAggregator()

        # insert throughput record and check its presence
        throughput_record = PerfThroughput(5)
        record_aggregator.insert(throughput_record)

        # Get the record
        retrieved_records = record_aggregator.filter_records().get_records()
        retrieved_throughput = retrieved_records[PerfThroughput][0]

        self.assertIsInstance(
            retrieved_throughput,
            PerfThroughput,
            msg="Record types do not match after filter_records")

        self.assertEqual(retrieved_throughput.value(),
                         throughput_record.value(),
                         msg="Values do not match after filter_records")
Exemplo n.º 8
0
    def test_insert(self):
        record_aggregator = RecordAggregator()

        self.assertEqual(record_aggregator.total(), 0)

        throughput_record = PerfThroughput(5)
        record_aggregator.insert(throughput_record)

        # Assert record is added
        self.assertEqual(record_aggregator.total(), 1)
Exemplo n.º 9
0
    def test_aggregate(self):
        record_aggregator = RecordAggregator()

        # Insert 10 records
        for i in range(10):
            record_aggregator.insert(PerfThroughput(i))

        for i in range(10):
            record_aggregator.insert(GPUUtilization(i))
        # Aggregate them with max, min and average
        max_vals = record_aggregator.aggregate(record_types=[PerfThroughput])
        avg_vals = record_aggregator.aggregate(record_types=[GPUUtilization])

        self.assertEqual(max_vals[PerfThroughput],
                         PerfThroughput(9),
                         msg="Aggregation failed with max")

        self.assertEqual(avg_vals[GPUUtilization],
                         GPUUtilization(4.5),
                         msg="Aggregation failed with max")
    def _get_next_measurements(self):
        """ Return fake measurements as if the run_configs had been executed """

        throughput_value = self._get_next_perf_throughput_value()
        if throughput_value is None:
            return None
        else:
            perf_throughput = PerfThroughput(throughput_value)
            non_gpu_data = [perf_throughput]
            return Measurement(gpu_data=MagicMock(),
                               non_gpu_data=non_gpu_data,
                               perf_config=MagicMock())
Exemplo n.º 11
0
    def test_groupby(self):
        record_aggregator = RecordAggregator()
        # Insert 3 throughputs
        record_aggregator.insert(PerfThroughput(5, timestamp=0))
        record_aggregator.insert(PerfThroughput(1, timestamp=1))
        record_aggregator.insert(PerfThroughput(10, timestamp=1))

        def groupby_criteria(record):
            return record.timestamp()

        records = record_aggregator.groupby([PerfThroughput], groupby_criteria)
        self.assertEqual(list(records[PerfThroughput]), [0, 1])
        self.assertEqual(
            list(records[PerfThroughput].values()),
            [PerfThroughput(5.0), PerfThroughput(10.0)])

        records = record_aggregator.groupby([PerfThroughput], groupby_criteria)
        self.assertEqual(list(records[PerfThroughput]), [0, 1])
        self.assertEqual(
            list(records[PerfThroughput].values()),
            [PerfThroughput(5.0), PerfThroughput(10.0)])