Example #1
0
    def _construct_measurement(self, gpu_metric_values, non_gpu_metric_values,
                               comparator):
        """
        Construct a measurement from the given data
        """

        # gpu_data will be a dict whose keys are gpu_ids and values
        # are lists of Records
        gpu_data = {}
        for gpu_id, metrics_values in gpu_metric_values.items():
            gpu_data[gpu_id] = []
            gpu_metric_tags = list(metrics_values.keys())
            for i, gpu_metric in enumerate(
                    MetricsManager.get_metric_types(gpu_metric_tags)):
                gpu_data[gpu_id].append(
                    gpu_metric(value=metrics_values[gpu_metric_tags[i]]))

        # Non gpu data will be a list of records
        non_gpu_data = []
        non_gpu_metric_tags = list(non_gpu_metric_values.keys())
        for i, metric in enumerate(
                MetricsManager.get_metric_types(non_gpu_metric_tags)):
            non_gpu_data.append(
                metric(value=non_gpu_metric_values[non_gpu_metric_tags[i]]))

        return Measurement(gpu_data=gpu_data,
                           non_gpu_data=non_gpu_data,
                           perf_config=None,
                           comparator=comparator)
def construct_measurement(model_name, gpu_metric_values, non_gpu_metric_values,
                          comparator):
    """
    Construct a measurement from the given data

    Parameters
    ----------
    model_name: str
        The name of the model that generated this result
    gpu_metric_values: dict
        Keys are gpu id, values are dict
        The dict where keys are gpu based metric tags, values are the data
    non_gpu_metric_values: dict
        Keys are non gpu perf metrics, values are their values
    comparator: ResultComparator
        The comparator used to compare measurements/results
    
    Returns
    -------
    Measurement
        constructed with all of the above data.
    """

    # gpu_data will be a dict whose keys are gpu_ids and values
    # are lists of Records
    gpu_data = {}
    for gpu_uuid, metrics_values in gpu_metric_values.items():
        gpu_data[gpu_uuid] = []
        gpu_metric_tags = list(metrics_values.keys())
        for i, gpu_metric in enumerate(
                MetricsManager.get_metric_types(gpu_metric_tags)):
            gpu_data[gpu_uuid].append(
                gpu_metric(value=metrics_values[gpu_metric_tags[i]]))

    # Non gpu data will be a list of records
    non_gpu_data = []
    non_gpu_metric_tags = list(non_gpu_metric_values.keys())
    for i, metric in enumerate(
            MetricsManager.get_metric_types(non_gpu_metric_tags)):
        non_gpu_data.append(
            metric(value=non_gpu_metric_values[non_gpu_metric_tags[i]]))

    # Perf Config needs a protocol
    perf_config = PerfAnalyzerConfig()
    perf_config['model-name'] = model_name
    perf_config['protocol'] = 'http'

    measurement = Measurement(gpu_data=gpu_data,
                              non_gpu_data=non_gpu_data,
                              perf_config=perf_config)
    measurement.set_result_comparator(comparator=comparator)
    return measurement
Example #3
0
    def _construct_result_comparator(self, gpu_metric_tags,
                                     non_gpu_metric_tags, objective_spec):
        """
        Constructs a result comparator from the given
        objective spec dictionary
        """

        gpu_metric_types = MetricsManager.get_metric_types(gpu_metric_tags)
        non_gpu_metric_types = MetricsManager.get_metric_types(
            non_gpu_metric_tags)
        objective_tags = list(objective_spec.keys())
        objective_metrics = MetricsManager.get_metric_types(objective_tags)
        objectives = {
            objective_metrics[i]: objective_spec[objective_tags[i]]
            for i in range(len(objective_tags))
        }

        return ResultComparator(gpu_metric_types=gpu_metric_types,
                                non_gpu_metric_types=non_gpu_metric_types,
                                metric_objectives=objectives)
Example #4
0
    def __init__(self, name, title, bar_width=0.5):
        """
        Parameters
        ----------
        name: str
            The name of the file that the plot
            will be saved as 
        title : str
            The title of this plot/figure
        bar_width: float
            width of the latency breakdown bars
        """

        self._name = name
        self._title = title

        self._fig, self._ax_latency = plt.subplots()
        self._ax_latency.set_title(title)
        self._ax_throughput = self._ax_latency.twinx()

        latency_axis_label, throughput_axis_label = [
            metric.header(aggregation_tag='')
            for metric in MetricsManager.get_metric_types(
                ['perf_latency_p99', 'perf_throughput'])
        ]

        self._bar_colors = {
            'perf_client_send_recv': '#ffc372',
            'perf_client_response_wait': '#9daecc',
            'perf_server_queue': '#addc91',
            'perf_server_compute_input': '#7eb7e8',
            'perf_server_compute_infer': '#0072ce',
            'perf_server_compute_output': '#254b87',
            'perf_throughput': '#5E5E5E'
        }

        self._bar_width = bar_width
        self._legend_x = 1.05
        self._legend_y = 1.15
        self._legend_font_size = 10
        self._fig.set_figheight(8)
        self._fig.set_figwidth(12)

        self._ax_latency.set_xlabel('Concurrent Client Requests')
        self._ax_latency.set_ylabel(latency_axis_label)
        self._ax_throughput.set_ylabel(throughput_axis_label)

        self._data = defaultdict(list)
def convert_gpu_metrics_to_data(gpu_metric_values):
    """
    GPU data will be a dict whose keys are gpu_ids and values
    are lists of Records
    
    Parameters
    ----------
    gpu_metric_values: dict of gpu metrics
    """
    gpu_data = {}
    for gpu_uuid, metrics_values in gpu_metric_values.items():
        gpu_data[gpu_uuid] = []
        gpu_metric_tags = list(metrics_values.keys())
        for i, gpu_metric in enumerate(
                MetricsManager.get_metric_types(gpu_metric_tags)):
            gpu_data[gpu_uuid].append(
                gpu_metric(value=metrics_values[gpu_metric_tags[i]]))

    return gpu_data
def convert_non_gpu_metrics_to_data(non_gpu_metric_values):
    """ 
    Non GPU data will be a dict whose keys and values are
    a list of Records
    
    Parameters
    ----------
    non_gpu_metric_values: dict of non-gpu metrics
    """

    non_gpu_data = []
    non_gpu_metric_tags = list(non_gpu_metric_values.keys())

    for i, metric in enumerate(
            MetricsManager.get_metric_types(non_gpu_metric_tags)):
        non_gpu_data.append(
            metric(value=non_gpu_metric_values[non_gpu_metric_tags[i]]))

    return non_gpu_data
    def _build_constraint_strings(self):
        """
        Constructs constraint strings to show the constraints under which
        each model is being run.
        """

        constraint_strs = {}
        for model_name, model_constraints in ConstraintManager.get_constraints_for_all_models(
                self._config).items():
            strs = []
            if model_constraints:
                for metric, constraint in model_constraints.items():
                    metric_header = MetricsManager.get_metric_types(
                        [metric])[0].header(aggregation_tag='')
                    for constraint_type, constraint_val in constraint.items():
                        # String looks like 'Max p99 Latency : 99 ms'
                        metric_header_name = metric_header.rsplit(' ', 1)[0]
                        metric_unit = metric_header.rsplit(' ', 1)[1][1:-1]
                        strs.append(
                            f"{constraint_type.capitalize()} {metric_header_name} : {constraint_val} {metric_unit}"
                        )
                constraint_strs[model_name] = ', '.join(strs)
        return constraint_strs
Example #8
0
    def __init__(self, name, title, x_axis, y_axis, monotonic=False):
        """
        Parameters
        ----------
        name: str
            The name of the file that the plot
            will be saved as 
        title : str
            The title of this plot/figure
        x_axis : str
            The metric tag for the x-axis of this plot
        y_axis : str
            The metric tag for the y-axis of this plot
        monotonic: bool
            Whether or not to prune decreasing points in this
            plot
        """

        self._name = name
        self._title = title
        self._x_axis = x_axis
        self._y_axis = y_axis
        self._monotonic = monotonic

        self._fig, self._ax = plt.subplots()

        self._data = {}

        self._ax.set_title(title)

        self._x_header, self._y_header = [
            metric.header(aggregation_tag='')
            for metric in MetricsManager.get_metric_types([x_axis, y_axis])
        ]

        self._ax.set_xlabel(self._x_header)
        self._ax.set_ylabel(self._y_header)
Example #9
0
    def plot_data(self):
        """
        Calls plotting function
        on this plot's Axes object
        """

        # Sort the data by concurrency
        concurrency_sort_indices = list(
            zip(*sorted(enumerate(self._data['concurrency']),
                        key=lambda x: x[1])))[0]

        sorted_data = {
            key: [data_list[i] for i in concurrency_sort_indices]
            for key, data_list in self._data.items()
        }

        # Plot latency breakdown bars
        labels = dict(
            zip(self.detailed_metrics, [
                metric.header() for metric in MetricsManager.get_metric_types(
                    tags=self.detailed_metrics)
            ]))
        bottoms = None

        sorted_data['concurrency'] = list(map(str, sorted_data['concurrency']))

        # Plot latency breakdown with concurrency casted as string to make uniform x
        for metric, label in labels.items():
            self._ax_latency.bar(sorted_data['concurrency'],
                                 sorted_data[metric],
                                 width=self._bar_width,
                                 label=label,
                                 bottom=bottoms,
                                 color=self._bar_colors[metric])
            if not bottoms:
                bottoms = sorted_data[metric]
            else:
                bottoms = list(
                    map(lambda x, y: x + y, bottoms, sorted_data[metric]))

        # Plot the inference line
        inference_line = self._ax_throughput.plot(
            sorted_data['concurrency'],
            sorted_data['perf_throughput'],
            label='Inferences/second',
            marker='o',
            color=self._bar_colors['perf_throughput'])

        # Create legend handles
        handles = [
            mpatches.Patch(color=self._bar_colors[m], label=labels[m])
            for m in self._bar_colors if m != 'perf_throughput'
        ]
        handles.append(inference_line[0])

        self._ax_latency.legend(handles=handles,
                                ncol=(len(self._bar_colors) // 2) + 1,
                                bbox_to_anchor=(self._legend_x,
                                                self._legend_y),
                                prop=dict(size=self._legend_font_size))
        # Annotate inferences
        for x, y in zip(sorted_data['concurrency'],
                        sorted_data['perf_throughput']):
            self._ax_throughput.annotate(
                str(round(y, 2)),
                xy=(x, y),
                textcoords="offset points",  # how to position the text
                xytext=(0, 10),  # distance from text to points (x,y)
                ha='center')

        self._ax_latency.grid()
        self._ax_latency.set_axisbelow(True)
    def plot_data_and_constraints(self, constraints):
        """
        Calls plotting function
        on this plot's Axes object

        Parameters
        ----------
        constraints: dict
            The keys are metric tags and values are dicts whose
            keys are constraint types (min, max) and values are their 
            values
        """

        self._ax.set_title(self._title)

        if self._x_axis.replace('_', '-') in PerfAnalyzerConfig.allowed_keys():
            self._x_header = self._x_axis.replace('_', ' ').title()
        else:
            self._x_header = MetricsManager.get_metric_types(
                [self._x_axis])[0].header(aggregation_tag='')

        if self._y_axis.replace('_', '-') in PerfAnalyzerConfig.allowed_keys():
            self._y_header = self._y_axis.replace('_', ' ').title()
        else:
            self._y_header = MetricsManager.get_metric_types(
                [self._y_axis])[0].header(aggregation_tag='')

        self._ax.set_xlabel(self._x_header)
        self._ax.set_ylabel(self._y_header)

        for model_config_name, data in self._data.items():
            # Sort the data by x-axis
            x_data, y_data = (
                list(t)
                for t in zip(*sorted(zip(data['x_data'], data['y_data']))))

            if self._monotonic:
                filtered_x, filtered_y = [x_data[0]], [y_data[0]]
                for i in range(1, len(x_data)):
                    if y_data[i] > filtered_y[-1]:
                        filtered_x.append(x_data[i])
                        filtered_y.append(y_data[i])
                x_data, y_data = filtered_x, filtered_y

            self._ax.plot(x_data, y_data, marker='o', label=model_config_name)

        # Plot constraints
        if constraints:
            if self._x_axis in constraints:
                for _, constraint_val in constraints[self._x_axis].items():
                    constraint_label = f"Target {self._x_header.rsplit(' ',1)[0]}"
                    self._ax.axvline(x=constraint_val,
                                     linestyle='--',
                                     label=constraint_label)
            if self._y_axis in constraints:
                for _, constraint_val in constraints[self._y_axis].items():
                    constraint_label = f"Target {self._y_header.rsplit(' ', 1)[0]}"
                    self._ax.axhline(y=constraint_val,
                                     linestyle='--',
                                     label=constraint_label)
            # plot h lines
        self._ax.legend()
        self._ax.grid()