Пример #1
0
    def test_to_formatted_string(self):
        table = ResultTable(headers=TEST_HEADERS)
        for row in TEST_ROWS:
            table.insert_row_by_index(row)
        self.assertEqual(table.to_formatted_string(), TEST_TABLE_STR)

        table = ResultTable(headers=TEST_HEADERS)
        for row in TEST_ROWS:
            table.insert_row_by_index(row)
        self.assertEqual(
            table.to_formatted_string(separator=',', ignore_widths=True),
            TEST_CSV_STR)
Пример #2
0
    def _build_summary_table(self, report_key, num_measurements, gpu_name):
        """
        Creates a result table corresponding
        to the best measurements for a particular
        model
        """

        summary_table = ResultTable(headers=[
            'Model Config Name', 'Max Dynamic Batch Size', 'Instance Count',
            'p99 Latency (ms)', 'Throughput (infer/sec)',
            'Max GPU Memory Usage (MB)', 'Average GPU Utilization (%)'
        ],
                                    title="Report Table")

        sorted_measurements = sorted(self._data[report_key],
                                     key=lambda x: x[1])

        # Construct summary sentence using best config
        best_config = sorted_measurements[0][0]
        dynamic_batching_str = best_config.dynamic_batching_string()
        dynamic_batch_phrase = "dynamic batching disabled" if \
            dynamic_batching_str == "Disabled" else \
            f"max dynamic batch size of {dynamic_batching_str}"
        model_config_dict = best_config.get_config()
        platform = model_config_dict['backend'] if \
            'backend' in model_config_dict \
            else model_config_dict['platform']
        summary_sentence = (
            f"In {num_measurements} measurement(s), "
            f"{best_config.instance_group_string()} model instance(s) "
            f"with {dynamic_batch_phrase} "
            f"on platform {platform} delivers "
            f"maximum throughput under the given constraints on GPU(s) {gpu_name}."
        )

        # Construct table
        for model_config, measurement in sorted_measurements:
            instance_group_str = model_config.instance_group_string()
            row = [
                model_config.get_field('name'), dynamic_batching_str,
                instance_group_str,
                measurement.get_metric('perf_latency').value(),
                measurement.get_metric('perf_throughput').value(),
                measurement.get_metric('gpu_used_memory').value(),
                round(measurement.get_metric('gpu_utilization').value(), 1)
            ]
            summary_table.insert_row_by_index(row)
        return summary_table, summary_sentence
Пример #3
0
    def test_add_table(self):
        result_table = ResultTable(['header1', 'header2'])
        # Try empty table
        self.report.add_table(table=result_table)
        table_style = "border: 1px solid black;border-collapse: collapse;text-align: center;width: 80%;padding: 5px 10px;font-size: 11pt"
        expected_report_body = ("<html><head><style></style></head><body>"
                                f"<center><table style=\"{table_style}\">"
                                "<tr>"
                                f"<th style=\"{table_style}\">header1</th>"
                                f"<th style=\"{table_style}\">header2</th>"
                                "</tr>"
                                "</table></center>"
                                "</body></html>")
        self.assertEqual(self.report.document(), expected_report_body)

        # Fill table
        for i in range(2):
            result_table.insert_row_by_index([f'value{i}1', f'value{i}2'])

        # Table has 5 rows
        self.report.add_table(table=result_table)
        expected_report_body = ("<html><head><style></style></head><body>"
                                f"<center><table style=\"{table_style}\">"
                                "<tr>"
                                f"<th style=\"{table_style}\">header1</th>"
                                f"<th style=\"{table_style}\">header2</th>"
                                "</tr>"
                                "</table></center>"
                                f"<center><table style=\"{table_style}\">"
                                "<tr>"
                                f"<th style=\"{table_style}\">header1</th>"
                                f"<th style=\"{table_style}\">header2</th>"
                                "</tr>"
                                "<tr>"
                                f"<td style=\"{table_style}\">value01</td>"
                                f"<td style=\"{table_style}\">value02</td>"
                                "</tr>"
                                "<tr>"
                                f"<td style=\"{table_style}\">value11</td>"
                                f"<td style=\"{table_style}\">value12</td>"
                                "</tr>"
                                "</table></center>"
                                "</body></html>")

        self.assertEqual(self.report.document(), expected_report_body)
Пример #4
0
    def test_remove_methods(self):
        table = ResultTable(headers=TEST_HEADERS)
        # Pick a row
        row_idx = 4

        with self.assertRaises(TritonModelAnalyzerException):
            table.remove_row_by_index(index=row_idx)

        # Fill table
        for row in TEST_ROWS:
            table.insert_row_by_index(row)

        self.assertEqual(table.get_row_by_index(index=row_idx),
                         [f"value 4{j}" for j in range(10)])

        # remove rows and check that the next one is in its place
        table.remove_row_by_index(index=row_idx)
        self.assertEqual(table.get_row_by_index(index=row_idx),
                         [f"value 5{j}" for j in range(10)])

        # Remove all rows and then one
        with self.assertRaises(TritonModelAnalyzerException):
            for i in range(10):
                table.remove_row_by_index(index=0)
Пример #5
0
    def test_add_get_methods(self):
        table = ResultTable(headers=["Column 0", "Column 1"])

        # add/get single result
        table.insert_row_by_index(["value 0,0", "value 0,1"])
        self.assertEqual(table.get_row_by_index(index=0),
                         ["value 0,0", "value 0,1"])
        table.insert_row_by_index(["value -1,0", "value -1,1"], index=0)
        self.assertEqual(table.get_row_by_index(index=0),
                         ["value -1,0", "value -1,1"])
        table.insert_row_by_index(["value 1,0", "value 1,1"], index=2)
        self.assertEqual(table.get_row_by_index(index=2),
                         ["value 1,0", "value 1,1"])
Пример #6
0
 def test_create_headers(self):
     table = ResultTable(headers=["Column 0"])
     self.assertEqual(table.headers(), ["Column 0"])
     self.assertEqual(table.column_widths(),
                      [len("Column 0") + ResultTable.column_padding])
    def _build_detailed_table(self, model_config_name):
        """
        Build the table used in the detailed report
        """

        model_config, measurements = self._detailed_report_data[
            model_config_name]
        sort_by_tag = 'perf_latency_p99' if self._mode == 'online' else 'perf_throughput'
        measurements = sorted(measurements,
                              key=lambda x: x.get_metric_value(sort_by_tag),
                              reverse=True)
        cpu_only = model_config.cpu_only()

        first_column_header = 'Request Concurrency' if self._mode == 'online' else 'Client Batch Size'
        first_column_tag = 'concurrency-range' if self._mode == 'online' else 'batch-size'
        if not cpu_only:
            detailed_table = ResultTable(headers=[
                first_column_header, 'p99 Latency (ms)',
                'Client Response Wait (ms)', 'Server Queue (ms)',
                'Server Compute Input (ms)', 'Server Compute Infer (ms)',
                'Throughput (infer/sec)', 'Max CPU Memory Usage (MB)',
                'Max GPU Memory Usage (MB)', 'Average GPU Utilization (%)'
            ],
                                         title="Detailed Table")
        else:
            detailed_table = ResultTable(headers=[
                first_column_header, 'p99 Latency (ms)',
                'Client Response Wait (ms)', 'Server Queue (ms)',
                'Server Compute Input (ms)', 'Server Compute Infer (ms)',
                'Throughput (infer/sec)', 'Max CPU Memory Usage (MB)'
            ],
                                         title="Detailed Table")

        # Construct table
        if not cpu_only:
            for measurement in measurements:
                row = [
                    measurement.get_parameter(first_column_tag),
                    measurement.get_metric_value('perf_latency_p99'),
                    measurement.get_metric_value('perf_client_response_wait'),
                    measurement.get_metric_value('perf_server_queue'),
                    measurement.get_metric_value('perf_server_compute_input'),
                    measurement.get_metric_value('perf_server_compute_infer'),
                    measurement.get_metric_value('perf_throughput'),
                    measurement.get_metric_value('cpu_used_ram'),
                    measurement.get_metric_value('gpu_used_memory'),
                    round(measurement.get_metric_value('gpu_utilization'), 1)
                ]
                detailed_table.insert_row_by_index(row)
        else:
            for measurement in measurements:
                row = [
                    measurement.get_parameter(first_column_tag),
                    measurement.get_metric_value('perf_latency_p99'),
                    measurement.get_metric_value('perf_client_response_wait'),
                    measurement.get_metric_value('perf_server_queue'),
                    measurement.get_metric_value('perf_server_compute_input'),
                    measurement.get_metric_value('perf_server_compute_infer'),
                    measurement.get_metric_value('perf_throughput'),
                    measurement.get_metric_value('cpu_used_ram')
                ]
                detailed_table.insert_row_by_index(row)
        return detailed_table
    def _build_summary_table(self,
                             report_key,
                             num_measurements,
                             gpu_name=None,
                             cpu_only=False):
        """
        Creates a result table corresponding
        to the best measurements for a particular
        model
        """

        if not cpu_only:
            summary_table = ResultTable(headers=[
                'Model Config Name', 'Max Batch Size', 'Dynamic Batching',
                'Instance Count', 'p99 Latency (ms)', 'Throughput (infer/sec)',
                'Max CPU Memory Usage (MB)', 'Max GPU Memory Usage (MB)',
                'Average GPU Utilization (%)'
            ],
                                        title="Report Table")
        else:
            summary_table = ResultTable(headers=[
                'Model Config Name', 'Max Batch Size', 'Dynamic Batching',
                'Instance Count', 'p99 Latency (ms)', 'Throughput (infer/sec)',
                'Max CPU Memory Usage (MB)'
            ],
                                        title="Report Table")

        sorted_measurements = sorted(self._summary_data[report_key],
                                     key=lambda x: x[1])

        # Construct summary sentence using best config
        best_config = sorted_measurements[0][0]
        model_config_dict = best_config.get_config()
        platform = model_config_dict['backend'] if \
            'backend' in model_config_dict \
            else model_config_dict['platform']
        max_batch_size_phrase = f"max batch size of {best_config.max_batch_size()}"
        dynamic_batch_phrase = self._get_dynamic_batching_phrase(best_config)

        summary_sentence = (
            f"In {num_measurements} measurement(s), "
            f"config {best_config.get_field('name')} ("
            f"{best_config.instance_group_string()} model instance(s) "
            f"with {max_batch_size_phrase} and {dynamic_batch_phrase}) "
            f"on platform {platform} delivers maximum throughput under "
            f"the given constraints{' on GPU(s) '+gpu_name if not best_config.cpu_only() else ''}."
        )

        # Construct table
        if not cpu_only:
            for model_config, measurement in sorted_measurements:
                instance_group_str = model_config.instance_group_string()
                max_batch_size = model_config.max_batch_size()
                row = [
                    model_config.get_field('name'), max_batch_size,
                    model_config.dynamic_batching_string(), instance_group_str,
                    measurement.get_metric_value('perf_latency_p99'),
                    measurement.get_metric_value('perf_throughput'),
                    measurement.get_metric_value('cpu_used_ram'),
                    measurement.get_metric_value('gpu_used_memory'),
                    round(measurement.get_metric_value('gpu_utilization'), 1)
                ]
                summary_table.insert_row_by_index(row)
        else:
            for model_config, measurement in sorted_measurements:
                instance_group_str = model_config.instance_group_string()
                max_batch_size = model_config.max_batch_size()
                row = [
                    model_config.get_field('name'), max_batch_size,
                    model_config.dynamic_batching_string(), instance_group_str,
                    measurement.get_metric_value('perf_latency_p99'),
                    measurement.get_metric_value('perf_throughput'),
                    measurement.get_metric_value('cpu_used_ram')
                ]
                summary_table.insert_row_by_index(row)
        return summary_table, summary_sentence