示例#1
0
 def to_runner_api_monitoring_info(self, name, transform_id):
     from apache_beam.metrics import monitoring_infos
     return monitoring_infos.int64_user_counter(
         name.namespace,
         name.name,
         metrics_pb2.Metric(counter_data=metrics_pb2.CounterData(
             int64_value=self.get_cumulative())),
         ptransform=transform_id)
示例#2
0
 def to_runner_api_monitoring_info(self):
     """Returns a Metric with this counter value for use in a MonitoringInfo."""
     # TODO(ajamato): Update this code to be consistent with Gauges
     # and Distributions. Since there is no CounterData class this method
     # was added to CounterCell. Consider adding a CounterData class or
     # removing the GaugeData and DistributionData classes.
     return metrics_pb2.Metric(counter_data=metrics_pb2.CounterData(
         int64_value=self.get_cumulative()))
def int64_gauge(urn, metric, ptransform=None, tag=None):
    """Return the gauge monitoring info for the URN, metric and labels.

  Args:
    urn: The URN of the monitoring info/metric.
    metric: The metric proto field to use in the monitoring info.
    ptransform: The ptransform/step name used as a label.
    tag: The output tag name, used as a label.
  """
    labels = create_labels(ptransform=ptransform, tag=tag)
    if isinstance(metric, int):
        metric = metrics_pb2.Metric(counter_data=metrics_pb2.CounterData(
            int64_value=metric))
    return create_monitoring_info(urn, LATEST_INT64_TYPE, metric, labels)
def int64_counter(urn, metric, ptransform=None, tag=None):
    # type: (...) -> metrics_pb2.MonitoringInfo
    """Return the counter monitoring info for the specifed URN, metric and labels.

  Args:
    urn: The URN of the monitoring info/metric.
    metric: The metric proto field to use in the monitoring info.
        Or an int value.
    ptransform: The ptransform/step name used as a label.
    tag: The output tag name, used as a label.
  """
    labels = create_labels(ptransform=ptransform, tag=tag)
    if isinstance(metric, int):
        metric = metrics_pb2.Metric(counter_data=metrics_pb2.CounterData(
            int64_value=metric))
    return create_monitoring_info(urn, SUM_INT64_TYPE, metric, labels)
示例#5
0
 def to_runner_api_monitoring_info(self):
     """Returns a Metric with this value for use in a MonitoringInfo."""
     return metrics_pb2.Metric(counter_data=metrics_pb2.CounterData(
         int64_value=self.value))

def distribution_combiner(metric_a, metric_b):
    a_data = metric_a.distribution_data.int_distribution_data
    b_data = metric_b.distribution_data.int_distribution_data
    return metrics_pb2.Metric(distribution_data=metrics_pb2.DistributionData(
        int_distribution_data=metrics_pb2.IntDistributionData(
            count=a_data.count + b_data.count,
            sum=a_data.sum + b_data.sum,
            min=min(a_data.min, b_data.min),
            max=max(a_data.max, b_data.max))))


_KNOWN_COMBINERS = {
    SUM_INT64_TYPE:
    lambda a, b: metrics_pb2.Metric(counter_data=metrics_pb2.CounterData(
        int64_value=a.counter_data.int64_value + b.counter_data.int64_value)),
    DISTRIBUTION_INT64_TYPE:
    distribution_combiner,
}


def max_timestamp(a, b):
    if a.ToNanoseconds() > b.ToNanoseconds():
        return a
    else:
        return b


def consolidate(metrics, key=to_key):
    grouped = collections.defaultdict(list)
    for metric in metrics:
def distribution_combiner(metric_a, metric_b):
  a_data = metric_a.distribution_data.int_distribution_data
  b_data = metric_b.distribution_data.int_distribution_data
  return metrics_pb2.Metric(
      distribution_data=metrics_pb2.DistributionData(
          int_distribution_data=metrics_pb2.IntDistributionData(
              count=a_data.count + b_data.count,
              sum=a_data.sum + b_data.sum,
              min=min(a_data.min, b_data.min),
              max=max(a_data.max, b_data.max))))


_KNOWN_COMBINERS = {
    SUM_INT64_TYPE: lambda a,
    b: metrics_pb2.Metric(
        counter_data=metrics_pb2.CounterData(
            int64_value=a.counter_data.int64_value + b.counter_data.int64_value)
    ),
    DISTRIBUTION_INT64_TYPE: distribution_combiner,
}


def max_timestamp(a, b):
  if a.ToNanoseconds() > b.ToNanoseconds():
    return a
  else:
    return b


def consolidate(metrics, key=to_key):
  grouped = collections.defaultdict(list)
  for metric in metrics: