コード例 #1
0
ファイル: vegeta.py プロジェクト: opsani/servox
def _time_series_readings_from_vegeta_reports(
        metrics: Optional[List[str]],
        vegeta_reports: List[VegetaReport]) -> List[servo.TimeSeries]:
    readings = []

    for metric in METRICS:
        if metrics and metric.name not in metrics:
            continue

        if metric.name in (
                "throughput",
                "error_rate",
        ):
            key = metric.name
        elif metric.name.startswith("latency_"):
            key = "latencies" + "." + metric.name.replace("latency_", "")
        else:
            raise NameError(f'Unexpected metric name "{metric.name}"')

        data_points: List[servo.DataPoint] = []
        for report in vegeta_reports:
            value = servo.value_for_key_path(report.dict(by_alias=True), key)
            data_points.append(servo.DataPoint(metric, report.end, value))

        readings.append(servo.TimeSeries(metric, data_points))

    return readings
コード例 #2
0
def measurement() -> servo.Measurement:
    return servo.Measurement(readings=[
        servo.DataPoint(
            value=31337,
            time=datetime.datetime.now(),
            metric=servo.Metric(
                name="Some Metric",
                unit=servo.Unit.requests_per_minute,
            ),
        )
    ])
コード例 #3
0
ファイル: prometheus.py プロジェクト: opsani/servox
 def _time_series_from_vector(self, vector: BaseVector) -> servo.TimeSeries:
     instance = vector.metric.get("instance")
     job = vector.metric.get("job")
     annotation = " ".join(
         map(
             lambda m: "=".join(m),
             sorted(vector.metric.items(), key=operator.itemgetter(0)),
         ))
     return servo.TimeSeries(
         self.metric,
         list(map(lambda v: servo.DataPoint(self.metric, *v),
                  iter(vector))),
         id=f"{{instance={instance},job={job}}}",
         annotation=annotation,
     )
コード例 #4
0
    async def _query_appd_direct(self, metric: AppdynamicsMetric,
                                 start: datetime,
                                 end: datetime) -> list[servo.TimeSeries]:
        """Queries AppDynamics for measurements that are taken from a metric exactly as defined in the config, e.g.
        from nodes that are not dynamic and remain consistent. Currently not utilized in the main/tuning workflow as
        both of these utilize a dynamic node naming.

        Args:
            metric (AppdynamicsMetric, required): The metric to query for.
            start (datetime, required): Metric start time.
            end (datetime, required). Metric end time.

        Returns:
            Readings: A list of TimeSeries with metric readings.
        """

        data = await self._appd_api(metric=metric, start=start, end=end)

        metric_path = data[0][
            "metricPath"]  # e.g. "Business Transaction Performance|Business Transactions|frontend-service|/payment|Individual Nodes|frontend-tuning|Calls per Minute"
        metric_name = data[0][
            "metricName"]  # e.g. "BTM|BTs|BT:270723|Component:8435|Calls per Minute"

        readings: list[servo.TimeSeries] = []
        data_points: list[servo.DataPoint] = []

        for result_dict in data[0]["metricValues"]:
            self.logger.trace(
                f"Captured {result_dict['value']} at {result_dict['startTimeInMillis']} for {metric}"
            )

            data_points.append(
                servo.DataPoint(
                    metric,
                    result_dict["startTimeInMillis"],
                    float(result_dict["value"]),
                ))

        readings.append(
            servo.TimeSeries(
                metric,
                data_points,
                id=f"{{metric_path={metric_path}, metric_name={metric_name}}}",
            ))

        return readings
コード例 #5
0
    def node_sync_and_transpose(
        self,
        node_readings: list[list[servo.DataPoint]],
    ) -> tuple[list[list[servo.DataPoint]], list[servo.DataPoint]]:
        """Converts a multi-node aggregate response into a uniform reading, inserting substitute zero value metric data
        for times the node (thus pod) did not exist, synchronized to timestamps from the longest living node within the
        measurement cycle. Transposes the nested list from [nodes[readings]] to [readings[nodes]] for operations, and
        additionally returns all data points from the longest lived node to prevent re-querying.

        Args:
            node_readings (list[list[servo.DataPoint]], required): Nested list of node readings.
        Returns:
            (transposed_node_readings, max_length_node_items): A tuple of the synced+converted readings with the
            readings from the longest-lived node.
        """

        self.logger.trace(
            f"Syncing and transposing node data: {node_readings}")

        readings_lengths = [len(node) for node in node_readings]
        max_length = max(readings_lengths)
        max_length_node_index, max_length_node_items = [
            (index, items) for index, items in enumerate(node_readings)
            if len(items) == max_length
        ][0]
        max_length_times = [reading.time for reading in max_length_node_items]

        # Pad 0 values for nodes with shorter lives, synced to timestamp of longest-lived node
        for node in node_readings:
            times = [reading.time for reading in node]
            unset_readings = list(set(max_length_times) - set(times))
            for time in unset_readings:
                datapoint = servo.DataPoint(node[0].metric, time, float(0))
                node.append(datapoint)
            node.sort(key=lambda x: x.time)
        transposed_node_readings = list(map(list, zip(*node_readings)))
        self.logger.trace(
            f"Synced and transposed node data to: {node_readings}")

        return transposed_node_readings, max_length_node_items
コード例 #6
0
ファイル: prometheus.py プロジェクト: opsani/servox
    def results(self) -> Optional[List[servo.Reading]]:
        """Return `DataPoint` or `TimeSeries` representations of the query results.

        Response data containing vector and matrix results are serialized into
        `TimeSeries` objects. Scalar and string results are serialized into `DataPoint`.
        """
        if self.status == Status.error:
            return None
        elif not self.data:
            return []

        results_ = []
        for result in self.data:
            if self.data.is_vector:
                results_.append(self._time_series_from_vector(result))
            elif self.data.is_value:
                results_.append(servo.DataPoint(self.metric, **result))
            else:
                raise TypeError(
                    f"unknown Result type '{result.__class__.name}' encountered"
                )

        return results_
コード例 #7
0
    async def _appd_dynamic_node(
        self,
        node: str,
        metric: AppdynamicsMetric,
        start: datetime,
        end: datetime,
    ):
        """Queries AppDynamics for measurements used directly when a dynamic node is being read that requires the
        metric endpoint to be substituted from the config. Substitutes the metric path with the individual nodes
        endpoint, as well as substitutes reading values of 0 when the response is empty from a metric that does always
        report (calls per minute) to synchronize timestamps and number of readings.

        Args:
            node (str, required): The active node to be queried.
            metric (AppdynamicsMetric, required): The metric to query for.
            start (datetime, required): Metric start time.
            end (datetime, required). Metric end time.

        Returns:
            Readings: A list of TimeSeries with metric readings.
        """

        node_data = await self._appd_api(metric=metric,
                                         node=node,
                                         start=start,
                                         end=end)

        metric_path = node_data[0][
            "metricPath"]  # e.g. "Business Transaction Performance|Business Transactions|frontend-service|/payment|Calls per Minute"
        metric_name = node_data[0][
            "metricName"]  # e.g. "BTM|BTs|BT:270723|Component:8435|Calls per Minute"

        node_readings: list[servo.TimeSeries] = []
        data_points: list[servo.DataPoint] = []

        # If the metric data isn't consistently above 0, sometimes no data is returned
        # This requires a substitution with a working call to sync timestamps and number of readings
        # This function is only called for just-verified active nodes

        if not node_data[0]["metricValues"]:

            node_data = await self._appd_api(metric=metric,
                                             node=node,
                                             start=start,
                                             end=end,
                                             override=True)
            self.logger.trace(
                f"Got substitute data for {metric.query} on node: {node}")

            # Substitute in 0's for the actual metric values
            for result_dict in node_data[0]["metricValues"]:
                data_points.append(
                    servo.DataPoint(metric, result_dict["startTimeInMillis"],
                                    float(0)))

        # Main capture logic
        for result_dict in node_data[0]["metricValues"]:
            self.logger.trace(
                f"Captured {result_dict['value']} at {result_dict['startTimeInMillis']} for {metric.query}"
            )
            data_points.append(
                servo.DataPoint(
                    metric,
                    result_dict["startTimeInMillis"],
                    float(result_dict["value"]),
                ))

        node_readings.append(
            servo.TimeSeries(
                metric,
                data_points,
                id=f"{{metric_path={metric_path}, metric_name={metric_name}}}",
            ))

        return node_readings
コード例 #8
0
    async def _query_instance_count(
        self,
        metric: AppdynamicsMetric,
        start: datetime,
        end: datetime,
        active_nodes: list[str],
    ):
        """Queries AppDynamics for instances count. Individual node responses are gathered via _appd_node_response(),
        transposed to synchronize reading times, and computed via either sum or average.

        Args:
            metric (AppdynamicsMetric, required): The metric to query for.
            start (datetime, required): Metric start time.
            end (datetime, required). Metric end time.
            active_nodes (list[str], required): The list of actively reporting nodes to aggregate on

        Returns:
            Readings: A list of TimeSeries with metric readings.
        """

        readings: list[servo.TimeSeries] = []
        data_points: list[servo.DataPoint] = []

        if len(active_nodes) > 1:
            node_readings = await asyncio.gather(*list(
                map(
                    lambda m: self._appd_node_response(m, metric, start, end),
                    active_nodes,
                )))
            instance_count_readings: list[int] = []

            # Transpose node readings from [nodes[readings]] to [readings[nodes]] for computed aggregation
            (
                transposed_node_readings,
                max_length_node_items,
            ) = self.node_sync_and_transpose(node_readings)

            for node_reading in transposed_node_readings:
                value = [reading.value for reading in node_reading]
                nonzero_instance_count = list(filter(lambda x: x > 0, value))
                instant_instance_count = len(nonzero_instance_count)
                instance_count_readings.append(instant_instance_count)

                self.logger.trace(
                    f"Found instance count {instant_instance_count} at time {node_reading[0].time}"
                )

            for max_items, aggregate_value in zip(max_length_node_items,
                                                  instance_count_readings):
                self.logger.trace(
                    f"Syncing aggregate metric {metric.name} value {aggregate_value} to time {max_items.time}"
                )
                data_points.append(
                    servo.DataPoint(metric, max_items.time,
                                    float(aggregate_value)))

        # TODO: Cleanup this conditional handling for single instance counting
        elif len(active_nodes) == 1:

            node_readings = await self._appd_node_response(
                active_nodes[0], metric, start, end)
            instance_count_readings = [float(1) for reading in node_readings]

            for reading in node_readings:
                self.logger.trace(
                    f"Found single instance count at time {reading[0].time}")

            for item, instance_value in zip(node_readings,
                                            instance_count_readings):
                self.logger.trace(
                    f"Syncing instance count {instance_value} to time {item.time}"
                )
                data_points.append(
                    servo.DataPoint(metric, item.time, float(instance_value)))

        readings.append(servo.TimeSeries(
            metric,
            data_points,
        ))

        return readings
コード例 #9
0
    async def _appd_node_response(
        self,
        node: str,
        metric: AppdynamicsMetric,
        start: datetime,
        end: datetime,
    ):
        """Queries AppDynamics for measurements either used directly or in aggregation when a dynamic node is being read
        that requires the metric endpoint to be substituted from the config. Substitutes the metric path with the
        individual nodes' endpoint, as well as substitutes reading values of 0 when the response is empty from a metric
        that does always report (calls per minute) to synchronize timestamps and number of readings.

        Args:
            individual_node (str, required): The active node to be queried.
            metric (AppdynamicsMetric, required): The metric to query for.
            start (datetime, required): Metric start time.
            end (datetime, required). Metric end time.

        Returns:
            Readings: A list of TimeSeries with metric readings.
        """

        node_data = await self._appd_api(metric=metric,
                                         node=node,
                                         start=start,
                                         end=end)

        data_points: list[servo.DataPoint] = []

        # If the metric data isn't consistently above 0, sometimes no data is returned
        # This requires a substitution with a working call to sync timestamps and number of readings
        # This function is only called for just-verified active nodes

        if not node_data[0]["metricValues"]:

            substitute_node_data = await self._appd_api(metric=metric,
                                                        node=node,
                                                        start=start,
                                                        end=end,
                                                        override=True)

            self.logger.trace(
                f"Got substitute data for {metric.query} on node: {node}")

            # Substitute in 0's for the actual metric values
            for substitute_result_dict in substitute_node_data[0][
                    "metricValues"]:
                data_point = servo.DataPoint(
                    metric, substitute_result_dict["startTimeInMillis"],
                    float(0))
                data_points.append(data_point)

        # Main capture logic
        else:
            for result_dict in node_data[0]["metricValues"]:
                self.logger.trace(
                    f"Captured {result_dict['value']} at {result_dict['startTimeInMillis']} for {metric.query}"
                )
                data_point = servo.DataPoint(
                    metric,
                    result_dict["startTimeInMillis"],
                    float(result_dict["value"]),
                )
                data_points.append(data_point)

        return data_points
コード例 #10
0
    async def _query_appd_aggregate(
        self,
        metric: AppdynamicsMetric,
        start: datetime,
        end: datetime,
        active_nodes: list[str],
    ):
        """Queries AppDynamics for measurements that need to be aggregated across multiple AppD nodes/K8s pods.
        Individual node responses are gathered via _appd_node_response(), transposed to synchronize reading times,
        and computed via either sum or average.

        Args:
            metric (AppdynamicsMetric, required): The metric to query for.
            start (datetime, required): Metric start time.
            end (datetime, required). Metric end time.
            active_nodes (list[str], required): The list of actively reporting nodes to aggregate on

        Returns:
            Readings: A list of TimeSeries with metric readings.
        """

        # Begin metric collection and aggregation for active nodes
        node_readings = await asyncio.gather(*list(
            map(
                lambda m: self._appd_node_response(m, metric, start, end),
                active_nodes,
            )))

        aggregate_readings: list[float] = []

        # Transpose node readings from [nodes[readings]] to [readings[nodes]] for computed aggregation
        transposed_node_readings, max_length_node_items = self.node_sync_and_transpose(
            node_readings)

        for time_reading in transposed_node_readings:
            aggregate_data_points: list[Union[int, float]] = []

            for data_points in time_reading:
                aggregate_data_points.append(data_points.value)

            nonzero_aggregate_data_points = list(
                filter(lambda x: x > 0, aggregate_data_points))
            denominator = (len(nonzero_aggregate_data_points)
                           if len(nonzero_aggregate_data_points) > 0 else 1)
            computed_aggregate = sum(
                nonzero_aggregate_data_points) / denominator

            self.logger.trace(
                f"Aggregating nonzero values {aggregate_data_points} for {metric.query} ({metric.unit}) via average into {computed_aggregate}"
            )
            aggregate_readings.append(computed_aggregate)

        readings: list[servo.TimeSeries] = []
        data_points: list[servo.DataPoint] = []

        for max_items, aggregate_value in zip(max_length_node_items,
                                              aggregate_readings):
            self.logger.trace(
                f"Syncing aggregate metric {metric.name} value {aggregate_value} to time {max_items.time}"
            )
            data_points.append(
                servo.DataPoint(metric, max_items.time,
                                float(aggregate_value)))

        readings.append(servo.TimeSeries(
            metric,
            data_points,
        ))

        return readings