Exemple #1
0
 def timing(cls, stat, dt):
     try:
         metric = GaugeMetric.from_value(stat,
                                         dt.microseconds,
                                         tags={"units": "microseconds"})
     except AttributeError:
         metric = GaugeMetric.from_value(stat, float(dt))
     cls.recorder().record(metric)
Exemple #2
0
def test_harvest_loop_flush(harvester, client, batch, response_ok):
    # Override the response ok flag
    Response.ok = response_ok

    # place two merged items into the batch
    metric = GaugeMetric("foo", 1000)
    timestamp = batch._interval_start
    harvester.record(metric)
    harvester.record(metric)

    harvester._deadline = time.time() + 600

    drain_queue(harvester)

    harvester._deadline = 0
    current_time = time.time()
    exited = harvester._loop()
    assert exited is False

    assert harvester._deadline > (current_time + 5)

    calls = client.calls
    assert len(calls) == 1
    items, common = calls[0]
    assert len(items) == 1
    assert items[0]["name"] == "foo"
    assert items[0]["value"] == 1000
    assert common["timestamp"] == timestamp
    assert "interval.ms" in common
Exemple #3
0
def test_create_identity(tags):
    metric = GaugeMetric("name", 1000, tags=tags)

    expected_tags = frozenset(tags.items()) if tags else None
    identity = MetricBatch.create_identity(metric)
    assert len(identity) == 3
    assert identity[0] is GaugeMetric
    assert identity[1] == "name"
    assert identity[2] == expected_tags
Exemple #4
0
def test_send_batch_failed(harvester, batch, caplog):
    batch.record(GaugeMetric("foo", 1000))

    # Cause a 500 response from send_batch
    drain_queue(harvester)

    assert (
        "newrelic_airflow_plugin.harvester",
        logging.ERROR,
        "New Relic send_batch failed with status code: 500",
    ) in caplog.record_tuples
Exemple #5
0
def test_send_batch_exception(harvester, caplog):
    batch = MetricBatch()
    batch.record(GaugeMetric("foo", 1000))

    # Cause an exception to be raised since send_batch doesn't exist on object
    harvester._loop(object(), batch)

    assert (
        "newrelic_airflow_plugin.harvester",
        logging.ERROR,
        "New Relic send_batch failed with an exception.",
    ) in caplog.record_tuples
Exemple #6
0
def test_harvest_loop_unflushed(harvester, batch):
    # place two merged items into the batch
    metric = GaugeMetric("foo", 1000)
    harvester.record(metric)
    harvester.record(metric)

    # disable sending (should not call flush)
    harvester._deadline = time.time() + 600

    drain_queue(harvester)

    items, _ = batch.flush()
    assert len(items) == 1
    assert items[0]["name"] == "foo"
    assert items[0]["value"] == 1000
 def parse_metric(self, service, metric_name, metric_instance, metric_data,
                  service_data, metric_list):
     kind = self.spectator_helper.determine_primitive_kind(
         metric_data["kind"])
     for metric_data_value in metric_instance["values"]:
         tags = self.tags.copy()
         for tag in metric_instance["tags"]:
             tags[tag["key"]] = tag["value"]
         tags["applicationName"] = service_data["applicationName"]
         tags["applicationVersion"] = service_data["applicationVersion"]
         interval = metric_data_value["t"] - \
             service_data["__collectStartTime"]
         if kind == spectator_client.GAUGE_PRIMITIVE_KIND:
             metric_list.append(
                 GaugeMetric(name=metric_name,
                             value=metric_data_value["v"],
                             tags=tags))
         else:
             metric_list.append(
                 CountMetric(name=metric_name,
                             value=metric_data_value["v"],
                             interval_ms=interval,
                             tags=tags))
Exemple #8
0
 def gauge(cls, stat, value, rate=1, delta=False):
     metric = GaugeMetric.from_value(stat, value)
     cls.recorder().record(metric)
Exemple #9
0
@pytest.mark.parametrize("tags", (None, {"foo": "bar"}))
def test_create_identity(tags):
    metric = GaugeMetric("name", 1000, tags=tags)

    expected_tags = frozenset(tags.items()) if tags else None
    identity = MetricBatch.create_identity(metric)
    assert len(identity) == 3
    assert identity[0] is GaugeMetric
    assert identity[1] == "name"
    assert identity[2] == expected_tags


@pytest.mark.parametrize(
    "metric_a, metric_b, expected_value",
    (
        (GaugeMetric("name", 1), GaugeMetric("name", 2), 2),
        (CountMetric("name", 1), CountMetric("name", 2), 3),
        (
            SummaryMetric.from_value("name", 1),
            SummaryMetric.from_value("name", 2),
            {"count": 2, "max": 2, "min": 1, "sum": 3},
        ),
    ),
)
def test_merge_metric(metric_a, metric_b, expected_value):
    batch = MetricBatch()

    batch.record(metric_a)
    batch.record(metric_b)

    assert metric_a.start_time_ms
    def export_metrics(self, metrics):
        """Immediately send all metric data to the monitoring backend.

        :param metrics: list of Metric objects to send to the monitoring
            backend
        :type metrics: :class:`opencensus.metrics.export.metric.Metric`
        """
        nr_metrics = []
        for metric in metrics:
            descriptor = metric.descriptor
            name = descriptor.name
            view = self.views[name]
            measure_name = view.measure.name
            measure_unit = view.measure.unit
            aggregation_type = view.aggregation

            tags = {"measure.name": measure_name, "measure.unit": measure_unit}

            for timeseries in metric.time_series:
                value = timeseries.points[0].value
                if hasattr(value, "value"):
                    value = value.value
                elif hasattr(value, "count") and hasattr(value, "sum"):
                    value = {"count": value.count, "sum": value.sum}
                else:
                    _logger.warning("Unable to send metric %s with value: %s",
                                    name, value)
                    break

                timestamp = timeseries.points[0].timestamp
                time_tuple = timestamp.utctimetuple()
                epoch_time_secs = calendar.timegm(time_tuple)
                epoch_time_mus = epoch_time_secs * 1e6 + timestamp.microsecond
                end_time_ms = epoch_time_mus // 1000

                labels = (
                    (k, l.value)
                    for k, l in zip(view.columns, timeseries.label_values))

                _tags = tags.copy()
                _tags.update(labels)

                if isinstance(value, dict):
                    identity = MetricBatch.create_identity(
                        name, _tags, "summary")

                    # compute a delta count based on the previous value. if one
                    # does not exist, report the raw count value.
                    if identity in self.merged_values:
                        last = self.merged_values[identity]
                        delta_count = value["count"] - last["count"]
                        delta_sum = value["sum"] - last["sum"]
                    else:
                        delta_count = value["count"]
                        delta_sum = value["sum"]

                    self.merged_values[identity] = value

                    nr_metric = SummaryMetric(
                        name=name,
                        count=delta_count,
                        sum=delta_sum,
                        min=None,
                        max=None,
                        tags=_tags,
                        end_time_ms=end_time_ms,
                        interval_ms=None,
                    )

                elif type(aggregation_type) in COUNT_AGGREGATION_TYPES:
                    identity = MetricBatch.create_identity(
                        name, _tags, "count")

                    # Compute a delta count based on the previous value. If one
                    # does not exist, report the raw count value.
                    delta = value - self.merged_values.get(identity, 0)
                    self.merged_values[identity] = value
                    value = delta

                    nr_metric = CountMetric(
                        name=name,
                        value=value,
                        tags=_tags,
                        end_time_ms=end_time_ms,
                        interval_ms=None,
                    )

                else:
                    nr_metric = GaugeMetric(name=name,
                                            value=value,
                                            tags=_tags,
                                            end_time_ms=end_time_ms)

                nr_metrics.append(nr_metric)

        # Do not send an empty metrics payload
        if not nr_metrics:
            return

        try:
            response = self.client.send_batch(nr_metrics, common=self._common)
        except Exception:
            _logger.exception(
                "New Relic send_metrics failed with an exception.")
            return

        if not response.ok:
            _logger.error("New Relic send_metrics failed with status code: %r",
                          response.status)
        return response
#!/usr/bin/env python

import os

from newrelic_telemetry_sdk import GaugeMetric, MetricClient

metric_client = MetricClient(os.environ['NEW_RELIC_INSERT_KEY'])

temperature = GaugeMetric("temperature", 78.6, {"units": "Farenheit"})

response = metric_client.send(temperature)
    def export_metrics(self, metrics):
        """Immediately send all metric data to the monitoring backend.

        :param metrics: list of Metric objects to send to the monitoring
            backend
        :type metrics: :class:`opencensus.metrics.export.metric.Metric`
        """
        nr_metrics = []
        for metric in metrics:
            descriptor = metric.descriptor
            name = descriptor.name
            view = self.views[name]
            measure_name = view.measure.name
            measure_unit = view.measure.unit
            aggregation_type = view.aggregation

            tags = {"measure.name": measure_name, "measure.unit": measure_unit}

            for timeseries in metric.time_series:
                # In distribution aggregations, the values do not have a value attribute
                # We simply ignore this case for now
                try:
                    value = timeseries.points[0].value.value
                except AttributeError:
                    break

                timestamp = timeseries.points[0].timestamp
                time_tuple = timestamp.utctimetuple()
                epoch_time_secs = calendar.timegm(time_tuple)
                epoch_time_mus = epoch_time_secs * 1e6 + timestamp.microsecond
                end_time_ms = epoch_time_mus // 1000

                labels = (
                    (k, l.value)
                    for k, l in zip(view.columns, timeseries.label_values))

                _tags = tags.copy()
                _tags.update(labels)

                if type(aggregation_type) in COUNT_AGGREGATION_TYPES:
                    identity = create_identity(name, _tags)

                    # Compute a delta count based on the previous value. If one
                    # does not exist, report the raw count value.
                    delta = value - self.count_values.get(identity, 0)
                    self.count_values[identity] = value
                    value = delta

                    nr_metric = CountMetric(name=name,
                                            value=value,
                                            tags=_tags,
                                            end_time_ms=end_time_ms)

                else:
                    nr_metric = GaugeMetric(name=name,
                                            value=value,
                                            tags=_tags,
                                            end_time_ms=end_time_ms)

                nr_metrics.append(nr_metric)

        # Do not send an empty metrics payload (only DistributionAggregation values)
        if not nr_metrics:
            return

        try:
            response = self.client.send_batch(nr_metrics, common=self._common)
        except Exception:
            _logger.exception(
                "New Relic send_metrics failed with an exception.")
            return

        if not response.ok:
            _logger.error("New Relic send_metrics failed with status code: %r",
                          response.status)
        return response