def collect_with_metrics(self, service_metric_map):
        """Puts Spectator metrics into Prometheus client library REGISTRY."""
        service_to_name_to_info = {}
        spectator_client.foreach_metric_in_service_map(
            service_metric_map, self.__collect_instance_info,
            service_to_name_to_info)

        metric_collection = PrometheusMetricsCollection(self.__metalabels)
        for service, name_to_info in service_to_name_to_info.items():
            for name, info in name_to_info.items():
                metric_collection.add_info(service, name, info)

        return metric_collection.metrics
예제 #2
0
    def publish_metrics(self, service_metrics):
        """Writes time series data to Datadog for a metric snapshot."""
        points = []
        spectator_client.foreach_metric_in_service_map(
            service_metrics, self.__append_timeseries_point, points)

        try:
            self.api.Metric.send(points)
        except IOError as ioerr:
            logging.error('Error sending to datadog: %s', ioerr)
            raise

        return len(points)
예제 #3
0
  def publish_metrics(self, service_metrics):
    """Writes time series data to Datadog for a metric snapshot."""
    points = []
    spectator_client.foreach_metric_in_service_map(
        service_metrics, self.__append_timeseries_point, points)

    try:
      response = self.api.Metric.send(points)
    except IOError as ioerr:
      logging.error('Error sending to datadog: %s', ioerr)
      raise

    return len(points)
예제 #4
0
    def collect(self):
        """Implements Prometheus Client interface."""
        service_to_name_to_info = {}

        service_metric_map = self.__spectator.scan_by_service(
            self.__service_endpoints)
        spectator_client.foreach_metric_in_service_map(
            service_metric_map, self.__collect_instance_info,
            service_to_name_to_info)

        all_members = []
        for service, name_to_info in service_to_name_to_info.items():
            for name, info in name_to_info.items():
                family = (CounterMetricFamily if info.kind
                          in ('Counter', 'Timer') else GaugeMetricFamily)

                member_name = '{service}:{name}'.format(service=service,
                                                        name=name.replace(
                                                            '.', ':'))

                tags = list(info.tags)
                all_tags = list(tags)
                if self.__add_metalabels:
                    all_tags.extend(['job', 'instance'])
                member = family(member_name, '', labels=all_tags)
                all_members.append(member)

                for record in info.records:
                    if isinstance(record, dict):
                        print '*** RECORD {0}'.format(record)
                        print '*** INFO {0}'.format(info)

                    instance = record.data
                    labels = [''] * len(tags)
                    for elem in record.tags:
                        index = tags.index(elem['key'])
                        if index >= 0:
                            labels[index] = elem['value']
                    if self.__add_metalabels:
                        labels.append(record.service)
                        labels.append(record.netloc)

                    # Just use the first value. We arent controlling the timestamp
                    # so multiple values would be meaningless anyway.
                    member.add_metric(labels=labels,
                                      value=instance['values'][0]['v'])

        for metric in all_members:
            yield metric
    def collect(self):
        """Implements Prometheus Client interface."""
        service_to_name_to_info = {}

        service_metric_map = self.__spectator.scan_by_service(self.__catalog)
        spectator_client.foreach_metric_in_service_map(
            service_metric_map, self.__collect_instance_info,
            service_to_name_to_info)

        all_members = []
        for service, name_to_info in service_to_name_to_info.items():
            for name, info in name_to_info.items():
                family = (CounterMetricFamily if info.kind
                          in ('Counter', 'Timer') else GaugeMetricFamily)

                member_name = '{service}:{name}'.format(service=service,
                                                        name=name.replace(
                                                            '.', ':'))

                tags = list(info.tags)
                all_tags = list(tags)
                if self.__add_metalabels:
                    all_tags.extend(['job', 'instance'])
                member = family(member_name, '', labels=all_tags)
                all_members.append(member)

                # All the Prometheus metrics need to have the same sequence of tags.
                # However the did not necessarily come this way from Spectator so we
                # will normalize them. Fortunately it doesnt matter if the tags change
                # from period to period (call to call). They only need to be consistent
                # within the individual collection response.
                for record in info.records:
                    instance = record.data
                    labels = [''] * len(tags)
                    for elem in record.tags:
                        index = tags.index(elem['key'])
                        if index >= 0:
                            labels[index] = elem['value']
                    if self.__add_metalabels:
                        labels.append(record.service)
                        labels.append(record.netloc)

                    # Just use the first value. We arent controlling the timestamp
                    # so multiple values would be meaningless anyway.
                    member.add_metric(labels=labels,
                                      value=instance['values'][0]['v'])

        for metric in all_members:
            yield metric
 def publish_metrics(self, service_metrics):
     metric_list = []
     spectator_client.foreach_metric_in_service_map(service_metrics,
                                                    self.parse_metric,
                                                    metric_list)
     # using 1000 as a known-good size, not a theoretical maximum
     chunk_size = 1000
     chunks = [
         metric_list[i:i + chunk_size]
         for i in xrange(0, len(metric_list), chunk_size)
     ]
     for chunk in chunks:
         response = self.metric_client.send_batch(chunk)
         response.raise_for_status()
     return len(metric_list)
    def publish_metrics(self, service_metrics):
        """Writes time series data to Datadog for a metric snapshot."""
        points = []
        spectator_client.foreach_metric_in_service_map(
            service_metrics, self.__append_timeseries_point, points)

        offset = 0
        while offset < len(points):
            last = min(offset + self.MAX_BATCH, len(points))
            chunk = points[offset:last]
            try:
                self.api.Metric.send(chunk)
            except IOError as ioerr:
                logging.error('Error sending to datadog: %s', ioerr)
            offset = last
        return len(points)
예제 #8
0
  def publish_metrics(self, service_metrics):
    time_series = []
    spectator_client.foreach_metric_in_service_map(
        service_metrics, self.add_metric_to_timeseries, time_series)
    offset = 0
    method = self.stub.projects().timeSeries().create

    while offset < len(time_series):
      last = min(offset + self.MAX_BATCH, len(time_series))
      chunk = time_series[offset:last]
      try:
        (method(name=self.project_to_resource(self.__project),
                body={'timeSeries': chunk})
         .execute())
      except HttpError as err:
        self.handle_time_series_http_error(err, chunk)
      offset = last
    return len(time_series)
예제 #9
0
  def publish_metrics(self, service_metrics):
    time_series = []
    spectator_client.foreach_metric_in_service_map(
        service_metrics, self.add_metric_to_timeseries, time_series)
    offset = 0
    method = self.stub.projects().timeSeries().create

    while offset < len(time_series):
      last = min(offset + self.MAX_BATCH, len(time_series))
      chunk = time_series[offset:last]
      try:
        (method(name=self.project_to_resource(self.__project),
                body={'timeSeries': chunk})
         .execute())
      except HttpError as err:
        self.handle_time_series_http_error(err, chunk)
      offset = last
    return len(time_series)
    def publish_metrics(self, service_metrics):
        self._update_monitored_resources(service_metrics)

        operation_map = {}
        spectator_client.foreach_metric_in_service_map(
            service_metrics, self.add_metric_operation, operation_map)

        total_count = 0
        try:
            (self.stub.services().report(
                name=self.project_to_resource(self.project),
                body={
                    'operations': operation_map.values()
                }).execute())
            for service, operation in operation_map.items():
                total_count += len(operation.get('metricValueSets', []))
        except HttpError as err:
            logging.error(err)

        return total_count
예제 #11
0
 def store(self, service_metrics):
   spectator_client.foreach_metric_in_service_map(service_metrics, self.dump)
   return -1