Пример #1
0
    def __init__(self,
                 mean_data,
                 count_data,
                 min_,
                 max_,
                 sum_of_sqd_deviations,
                 counts_per_bucket=None,
                 bounds=None,
                 exemplars=None):
        super(DistributionAggregationData, self).__init__(mean_data)
        self._mean_data = mean_data
        self._count_data = count_data
        self._min = min_
        self._max = max_
        self._sum_of_sqd_deviations = sum_of_sqd_deviations
        if bounds is None:
            bounds = []

        if counts_per_bucket is None:
            counts_per_bucket = []
            bucket_size = len(bounds) + 1
            for i in range(bucket_size):
                counts_per_bucket.append(0)
        self._counts_per_bucket = counts_per_bucket
        self._bounds = bucket_boundaries.BucketBoundaries(
                                            boundaries=bounds).boundaries
        bucket = 0
        for _ in self.bounds:
            bucket = bucket + 1

        # If there is no histogram, do not record an exemplar
        self._exemplars = \
            {bucket: exemplars} if len(self._bounds) > 0 else None
Пример #2
0
    def __init__(self,
                 mean_data,
                 count_data,
                 min_,
                 max_,
                 sum_of_sqd_deviations,
                 counts_per_bucket=None,
                 bounds=None):
        super(DistributionAggregationData, self).__init__(mean_data)
        self._mean_data = mean_data
        self._count_data = count_data
        self._min = min_
        self._max = max_
        self._sum_of_sqd_deviations = sum_of_sqd_deviations
        if bounds is None:
            bounds = []

        if counts_per_bucket is None:
            counts_per_bucket = []
            bucket_size = len(bounds) + 1
            for i in range(bucket_size):
                counts_per_bucket.append(0)
        self._counts_per_bucket = counts_per_bucket
        self._bounds = bucket_boundaries.BucketBoundaries(
                                            boundaries=bounds).boundaries
Пример #3
0
    def __init__(self,
                 mean_data,
                 count_data,
                 sum_of_sqd_deviations,
                 counts_per_bucket=None,
                 bounds=None,
                 exemplars=None):
        if bounds is None and exemplars is not None:
            raise ValueError
        if exemplars is not None and len(exemplars) != len(bounds) + 1:
            raise ValueError

        self._mean_data = mean_data
        self._count_data = count_data
        self._sum_of_sqd_deviations = sum_of_sqd_deviations

        if bounds is None:
            bounds = []
            self._exemplars = None
        else:
            assert bounds == list(sorted(set(bounds)))
            assert all(bb > 0 for bb in bounds)
            if exemplars is None:
                self._exemplars = {ii: None for ii in range(len(bounds) + 1)}
            else:
                self._exemplars = {ii: ex for ii, ex in enumerate(exemplars)}
        self._bounds = (bucket_boundaries.BucketBoundaries(
            boundaries=bounds).boundaries)

        if counts_per_bucket is None:
            counts_per_bucket = [0 for ii in range(len(bounds) + 1)]
        else:
            assert all(cc >= 0 for cc in counts_per_bucket)
            assert len(counts_per_bucket) == len(bounds) + 1
        self._counts_per_bucket = counts_per_bucket
Пример #4
0
    def __init__(self,
                 boundaries=None,
                 distribution=None,
                 aggregation_type=Type.DISTRIBUTION):
        if boundaries:
            if not all(boundaries[ii] < boundaries[ii + 1]
                       for ii in range(len(boundaries) - 1)):
                raise ValueError("bounds must be sorted in increasing order")
            for ii, bb in enumerate(boundaries):
                if bb > 0:
                    break
            else:
                ii += 1
            if ii:
                logger.warning("Dropping %s non-positive bucket boundaries",
                               ii)
            boundaries = boundaries[ii:]

        super(DistributionAggregation,
              self).__init__(buckets=boundaries,
                             aggregation_type=aggregation_type)
        self._boundaries = bucket_boundaries.BucketBoundaries(boundaries)
        self._distribution = distribution or {}
        self.aggregation_data = aggregation_data.DistributionAggregationData(
            0, 0, 0, None, boundaries)
Пример #5
0
    def test_is_valid_boundaries(self):
        boundaries = [0, 1/4, 1/2]
        bucket_boundaries = bucket_boundaries_module.BucketBoundaries(boundaries=boundaries)
        self.assertTrue(bucket_boundaries.is_valid_boundaries(boundaries=boundaries))

        boundaries = [2, 1, 0]
        bucket_boundaries = bucket_boundaries_module.BucketBoundaries(
            boundaries=boundaries)
        self.assertFalse(bucket_boundaries.is_valid_boundaries(
            boundaries=boundaries))

        boundaries = None
        bucket_boundaries = bucket_boundaries_module.BucketBoundaries(
            boundaries=boundaries
        )
        self.assertFalse(bucket_boundaries.is_valid_boundaries(boundaries=boundaries))
    def __init__(self,
                 mean_data,
                 count_data,
                 min_,
                 max_,
                 sum_of_sqd_deviations,
                 counts_per_bucket=None,
                 bounds=None,
                 exemplars=None):
        super(DistributionAggregationData, self).__init__(mean_data)
        self._mean_data = mean_data
        self._count_data = count_data
        self._min = min_
        self._max = max_
        self._sum_of_sqd_deviations = sum_of_sqd_deviations
        if bounds is None:
            bounds = []

        if counts_per_bucket is None:
            counts_per_bucket = [0 for ii in range(len(bounds) + 1)]
        elif len(counts_per_bucket) != len(bounds) + 1:
            raise ValueError("counts_per_bucket length does not match bounds "
                             "length")

        self._counts_per_bucket = counts_per_bucket
        self._bounds = bucket_boundaries.BucketBoundaries(
            boundaries=bounds).boundaries
        bucket = 0
        for _ in self.bounds:
            bucket = bucket + 1

        # If there is no histogram, do not record an exemplar
        self._exemplars = \
            {bucket: exemplars} if len(self._bounds) > 0 else None
Пример #7
0
 def __init__(self,
              boundaries=None,
              distribution=None,
              aggregation_type=Type.DISTRIBUTION):
     super(DistributionAggregation,
           self).__init__(buckets=boundaries,
                          aggregation_type=aggregation_type)
     self._boundaries = bucket_boundaries.BucketBoundaries(boundaries)
     self._distribution = distribution or {}
Пример #8
0
 def __init__(self,
              boundaries=None,
              distribution=None,
              aggregation_type=Type.DISTRIBUTION):
     super(DistributionAggregation,
           self).__init__(buckets=boundaries,
                          aggregation_type=aggregation_type)
     self._boundaries = bucket_boundaries.BucketBoundaries(boundaries)
     self._distribution = distribution or {}
     self.aggregation_data = aggregation_data.DistributionAggregationData(
         0, 0, float('inf'), float('-inf'), 0, None, boundaries)
Пример #9
0
 def __init__(self,
              mean_data,
              count_data,
              min_,
              max_,
              sum_of_sqd_deviations,
              counts_per_bucket,
              bounds):
     super(DistributionAggregationData, self).__init__(mean_data)
     self._mean_data = mean_data
     self._count_data = count_data
     self._min = min_
     self._max = max_
     self._sum_of_sqd_deviations = sum_of_sqd_deviations
     self._counts_per_bucket = counts_per_bucket
     self._bounds = bucket_boundaries.BucketBoundaries(
                                         boundaries=bounds).boundaries
Пример #10
0
    def __init__(self,
                 mean_data,
                 count_data,
                 min_,
                 max_,
                 sum_of_sqd_deviations,
                 counts_per_bucket=None,
                 bounds=None,
                 exemplars=None):
        super(DistributionAggregationData, self).__init__(mean_data)
        self._mean_data = mean_data
        self._count_data = count_data
        self._min = min_
        self._max = max_
        self._sum_of_sqd_deviations = sum_of_sqd_deviations
        if bounds is None:
            bounds = []
        else:
            assert bounds == list(sorted(set(bounds)))

        if counts_per_bucket is None:
            counts_per_bucket = [0 for ii in range(len(bounds) + 1)]
        else:
            assert all(cc >= 0 for cc in counts_per_bucket)
            assert len(counts_per_bucket) == len(bounds) + 1

        assert bounds == sorted(bounds)
        assert all(bb > 0 for bb in bounds)

        self._counts_per_bucket = counts_per_bucket
        self._bounds = bucket_boundaries.BucketBoundaries(
            boundaries=bounds).boundaries
        bucket = 0
        for _ in self.bounds:
            bucket = bucket + 1

        # If there is no histogram, do not record an exemplar
        self._exemplars = \
            {bucket: exemplars} if len(self._bounds) > 0 else None
Пример #11
0
class RPCViewConstants:
    """
    Define variables used by constants below
    """
    # Buckets for distributions in default views
    # Common histogram bucket boundaries for bytes
    # received/sets Views (in bytes).
    rpc_bytes_bucket_boundaries = [
        0, 1024, 2048, 4096, 16384, 65536, 262144, 4194304, 16777216, 67108864,
        268435456, 1073741824, 4294967296
    ]

    # Common histogram bucket boundaries for latency and
    # elapsed-time Views (in milliseconds).
    rpc_millis_bucket_boundaries = [
        0.0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 8.0,
        10.0, 13.0, 16.0, 20.0, 25.0, 30.0, 40.0, 50.0, 65.0, 80.0, 100.0,
        130.0, 160.0, 200.0, 250.0, 300.0, 400.0, 500.0, 650.0, 800.0, 1000.0,
        2000.0, 5000.0, 10000.0, 20000.0, 50000.0, 100000.0
    ]

    # Common histogram bucket boundaries for request/response
    # count Views (no unit).
    rpc_count_bucket_boundaries = [
        0.0, 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0, 1024.0,
        2048.0, 4096.0, 8192.0, 16384.0, 32768.0, 65536.0
    ]

    # Record sum and count stats at the same time.
    count = CountAggregation()
    sum = SumAggregation()

    # Set up aggregation object for rpc_bytes_bucket_boundaries
    bytes_bucket_boundaries = bucket_boundaries.BucketBoundaries(
        rpc_bytes_bucket_boundaries)
    aggregation_with_bytes_histogram = DistributionAggregation(
        bytes_bucket_boundaries.boundaries)

    # Set up aggregation object for rpc_millis_bucket_boundaries
    millis_bucket_boundaries = bucket_boundaries.BucketBoundaries(
        rpc_millis_bucket_boundaries)
    aggregation_with_millis_histogram = DistributionAggregation(
        millis_bucket_boundaries.boundaries)

    # Set up aggregation object for rpc_count_bucket_boundaries
    count_bucket_boundaries = bucket_boundaries.BucketBoundaries(
        rpc_count_bucket_boundaries)
    aggregation_with_count_histogram = DistributionAggregation(
        count_bucket_boundaries.boundaries)

    # Initialize an instance of RPC Measure Constants
    rpc_m_c = rpc_measure_constants.RPCMeasureConstants()
    """
    RPC Client Cumulative Views
    """
    # Default Views
    # The following set of views are considered minimum
    #  required to monitor client-side performance
    grpc_client_sent_bytes_per_rpc_view = view.View(
        name="grpc.io/client/sent_bytes_per_rpc",
        description="Sent bytes per RPC",
        columns=[rpc_m_c.grpc_client_method],
        measure=rpc_m_c.grpc_client_sent_bytes_per_rpc,
        aggregation=aggregation_with_bytes_histogram)

    grpc_client_received_bytes_per_rpc_view = view.View(
        name="grpc.io/client/received_bytes_per_rpc",
        description="Received bytes per RPC",
        columns=[rpc_m_c.grpc_client_method],
        measure=rpc_m_c.grpc_client_received_bytes_per_rpc,
        aggregation=aggregation_with_bytes_histogram)

    grpc_client_roundtrip_latency_view = view.View(
        name="grpc.io/client/roundtrip_latency",
        description="Latency in msecs",
        columns=[rpc_m_c.grpc_client_method],
        measure=rpc_m_c.grpc_client_roundtrip_latency,
        aggregation=aggregation_with_millis_histogram)

    grpc_client_completed_rpc_view = view.View(
        name="grpc.io/client/completed_rpcs",
        description="Number of completed client RPCs",
        columns=[rpc_m_c.grpc_client_method, rpc_m_c.grpc_client_status],
        measure=rpc_m_c.grpc_client_roundtrip_latency,
        aggregation=count)

    grpc_client_started_rpc_view = view.View(
        name="grpc.io/client/started_rpcs",
        description="Number of started client RPCs",
        columns=[rpc_m_c.grpc_client_method],
        measure=rpc_m_c.grpc_client_started_rpcs,
        aggregation=count)

    # Extra Views
    # The following set of views are considered useful
    #  but not mandatory to monitor client side performance
    grpc_client_sent_messages_per_rpc_view = view.View(
        name="grpc.io/client/sent_messages_per_rpc",
        description="Number of messages sent in the RPC",
        columns=[rpc_m_c.grpc_client_method],
        measure=rpc_m_c.grpc_client_sent_messages_per_rpc,
        aggregation=aggregation_with_count_histogram)

    grpc_client_received_messages_per_rpc_view = view.View(
        name="grpc.io/client/received_messages_per_rpc",
        description="Number of response messages received per RPC",
        columns=[rpc_m_c.grpc_client_method],
        measure=rpc_m_c.grpc_client_received_messages_per_rpc,
        aggregation=aggregation_with_count_histogram)

    grpc_client_server_latency_view = view.View(
        name="grpc.io/client/server_latency",
        description="Server latency in msecs",
        columns=[rpc_m_c.grpc_client_method],
        measure=rpc_m_c.grpc_client_server_latency,
        aggregation=aggregation_with_millis_histogram)

    grpc_client_sent_messages_per_method_view = view.View(
        name="grpc.io/client/sent_messages_per_method",
        description="Number of messages sent",
        columns=[rpc_m_c.grpc_client_method],
        measure=rpc_m_c.grpc_client_sent_messages_per_method,
        aggregation=count)

    grpc_client_received_messages_per_method_view = view.View(
        name="grpc.io/client/received_messages_per_method",
        description="Number of messages received",
        columns=[rpc_m_c.grpc_client_method],
        measure=rpc_m_c.grpc_client_received_messages_per_method,
        aggregation=count)
    grpc_client_sent_bytes_per_method_view = view.View(
        name="grpc.io/client/sent_bytes_per_method",
        description="Sent bytes per method",
        columns=[rpc_m_c.grpc_client_method],
        measure=rpc_m_c.grpc_client_sent_bytes_per_method,
        aggregation=sum)

    grpc_client_received_bytes_per_method_view = view.View(
        name="grpc.io/client/received_bytes_per_method",
        description="Received bytes per method",
        columns=[rpc_m_c.grpc_client_method],
        measure=rpc_m_c.grpc_client_received_bytes_per_method,
        aggregation=sum)
    """
    RPC Server Cumulative Views
    """
    # Default Views
    # The following set of views are considered minimum
    #  required to monitor server-side performance
    grpc_server_received_bytes_per_rpc = view.View(
        name="grpc.io/server/received_bytes_per_rpc",
        description="Received bytes per RPC",
        columns=[rpc_m_c.grpc_server_method],
        measure=rpc_m_c.grpc_server_received_bytes_per_rpc,
        aggregation=sum)

    grpc_server_sent_bytes_per_rpc = view.View(
        name="grpc.io/server/sent_bytes_per_rpc",
        description="Sent bytes per RPC",
        columns=[rpc_m_c.grpc_server_method],
        measure=rpc_m_c.grpc_server_sent_bytes_per_method,
        aggregation=sum)

    grpc_server_server_latency = view.View(
        name="grpc.io/server/server_latency",
        description="Latency in msecs",
        columns=[rpc_m_c.grpc_server_method],
        measure=rpc_m_c.grpc_server_server_latency,
        aggregation=aggregation_with_millis_histogram)

    grpc_server_completed_rpcs = view.View(
        name="grpc.io/server/completed_rpcs",
        description="Number of completed server RPCs",
        columns=[rpc_m_c.grpc_server_method, rpc_m_c.grpc_server_status],
        measure=rpc_m_c.grpc_server_server_latency,
        aggregation=count)

    grpc_server_started_rpcs = view.View(
        name="grpc.io/server/started_rpcs",
        description="Number of started server RPCs",
        columns=[rpc_m_c.grpc_server_method],
        measure=rpc_m_c.grpc_server_started_rpcs,
        aggregation=count)

    # Extra Views
    # The following set of views are considered useful
    #  but not mandatory to monitor server-side performance
    grpc_server_received_messages_per_rpc = view.View(
        name="grpc.io/server/received_messages_per_rpc",
        description="Number of response messages received in each RPC",
        columns=[rpc_m_c.grpc_server_method],
        measure=rpc_m_c.grpc_server_received_messages_per_rpc,
        aggregation=aggregation_with_count_histogram)

    grpc_server_sent_messages_per_rpc = view.View(
        name="grpc.io/server/sent_messages_per_rpc",
        description="Number of messages sent in each RPC",
        columns=[rpc_m_c.grpc_server_method],
        measure=rpc_m_c.grpc_server_sent_messages_per_rpc,
        aggregation=aggregation_with_count_histogram)

    grpc_server_sent_messages_per_method = view.View(
        name="grpc.io/server/sent_messages_per_method",
        description="Number of messages sent",
        columns=[rpc_m_c.grpc_server_method],
        measure=rpc_m_c.grpc_server_sent_messages_per_method,
        aggregation=count)

    grpc_server_received_messages_per_method = view.View(
        name="grpc.io/server/received_messages_per_method",
        description="Number of messages received",
        columns=[rpc_m_c.grpc_server_method],
        measure=rpc_m_c.grpc_server_received_messages_per_method,
        aggregation=count)

    grpc_server_sent_bytes_per_method = view.View(
        name="grpc.io/server/sent_bytes_per_method",
        description="Sent bytes per method",
        columns=[rpc_m_c.grpc_server_method],
        measure=rpc_m_c.grpc_server_sent_bytes_per_method,
        aggregation=sum)

    grpc_server_received_bytes_per_method = view.View(
        name="grpc.io/server/received_bytes_per_method",
        description="Received bytes per method",
        columns=[rpc_m_c.grpc_server_method],
        measure=rpc_m_c.grpc_server_received_bytes_per_method,
        aggregation=sum)
Пример #12
0
 def test_constructor_explicit(self):
     boundaries = [1/4]
     bucket_boundaries = bucket_boundaries_module.BucketBoundaries(boundaries=boundaries)
     self.assertTrue(bucket_boundaries.is_valid_boundaries(boundaries))
     self.assertEqual(boundaries, bucket_boundaries.boundaries)
Пример #13
0
 def test_constructor_defaults(self):
     bucket_boundaries = bucket_boundaries_module.BucketBoundaries()
     self.assertEqual([], bucket_boundaries.boundaries)
Пример #14
0
 def __init__(self, boundaries=None, distribution=None):
     super(DistributionAggregation, self).__init__(boundaries)
     self._aggregation_type = "distribution"
     self._boundaries = bucket_boundaries.BucketBoundaries(boundaries)
     self._distribution = distribution if distribution is not None else {}