Example #1
0
    def test_convert_view_without_labels(self):
        mock_measure = mock.Mock(spec=measure.MeasureFloat)
        mock_aggregation = mock.Mock(spec=aggregation.DistributionAggregation)
        mock_aggregation.aggregation_type = aggregation.Type.DISTRIBUTION

        vd = mock.Mock(spec=view_data.ViewData)
        vd.view = view.View(name=mock.Mock(),
                            description=mock.Mock(),
                            columns=[],
                            measure=mock_measure,
                            aggregation=mock_aggregation)
        vd.start_time = '2019-04-11T22:33:44.555555Z'

        mock_point = mock.Mock(spec=point.Point)
        mock_point.value = mock.Mock(spec=value.ValueDistribution)

        mock_agg = mock.Mock(spec=aggregation_data.SumAggregationDataFloat)
        mock_agg.to_point.return_value = mock_point

        vd.tag_value_aggregation_data_map = {tuple(): mock_agg}

        current_time = '2019-04-11T22:33:55.666666Z'
        metric = metric_utils.view_data_to_metric(vd, current_time)

        self.assertEqual(metric.descriptor.label_keys, [])
        self.assertEqual(len(metric.time_series), 1)
        [ts] = metric.time_series
        self.assertEqual(ts.label_values, [])
        self.assertEqual(len(ts.points), 1)
        [pt] = ts.points
        self.assertEqual(pt, mock_point)
def generate_metrics(view_data_objects):
    metrics = []
    for view_data_object in view_data_objects:
        metric = metric_utils.view_data_to_metric(view_data_object,
                                                  TEST_TIMESTAMP)
        metrics.append(metric)
    return metrics
    def test_create_batched_time_series(self, monitor_resource_mock):
        client = mock.Mock()
        v_data = view_data_module.ViewData(view=VIDEO_SIZE_VIEW,
                                           start_time=TEST_TIME_STR,
                                           end_time=TEST_TIME_STR)
        v_data.record(context=tag_map_module.TagMap(), value=2, timestamp=None)
        view_data = [v_data]

        option = stackdriver.Options(project_id="project-test")
        exporter = stackdriver.StackdriverStatsExporter(options=option,
                                                        client=client)

        view_data = [metric_utils.view_data_to_metric(view_data[0], TEST_TIME)]

        time_series_batches = exporter.create_batched_time_series(view_data, 1)

        self.assertEqual(len(time_series_batches), 1)
        [time_series_batch] = time_series_batches
        self.assertEqual(len(time_series_batch), 1)
        [time_series] = time_series_batch
        self.assertEqual(
            time_series.metric.type,
            'custom.googleapis.com/opencensus/' + VIDEO_SIZE_VIEW_NAME)
        self.check_labels(time_series.metric.labels, {},
                          include_opencensus=True)
    def test_create_timeseries_multiple_tag_values(self,
                                                   monitoring_resoure_mock):
        view_manager, stats_recorder, exporter = \
            self.setup_create_timeseries_test()

        view_manager.register_view(VIDEO_SIZE_VIEW)

        measure_map = stats_recorder.new_measurement_map()

        # Add first point with one tag value
        tag_map = tag_map_module.TagMap()
        tag_map.insert(FRONTEND_KEY, tag_value_module.TagValue("1200"))
        measure_map.measure_int_put(VIDEO_SIZE_MEASURE, 25 * MiB)
        measure_map.record(tag_map)

        # Add second point with different tag value
        tag_map = tag_map_module.TagMap()
        tag_map.insert(FRONTEND_KEY, tag_value_module.TagValue("1400"))
        measure_map.measure_int_put(VIDEO_SIZE_MEASURE, 12 * MiB)
        measure_map.record(tag_map)

        v_data = measure_map.measure_to_view_map.get_view(
            VIDEO_SIZE_VIEW_NAME, None)

        v_data = metric_utils.view_data_to_metric(v_data, TEST_TIME)

        time_series_list = exporter.create_time_series_list(v_data)

        self.assertEqual(len(time_series_list), 2)
        ts_by_frontend = {
            ts.metric.labels.get(FRONTEND_KEY_CLEAN): ts
            for ts in time_series_list
        }
        self.assertEqual(set(ts_by_frontend.keys()), {"1200", "1400"})
        ts1 = ts_by_frontend["1200"]
        ts2 = ts_by_frontend["1400"]

        # Verify first time series
        self.assertEqual(ts1.resource.type, "global")
        self.assertEqual(
            ts1.metric.type,
            "custom.googleapis.com/opencensus/my.org/views/video_size_test2")
        self.assertIsNotNone(ts1.resource)

        self.assertEqual(len(ts1.points), 1)
        value1 = ts1.points[0].value
        self.assertEqual(value1.distribution_value.count, 1)

        # Verify second time series
        self.assertEqual(ts2.resource.type, "global")
        self.assertEqual(
            ts2.metric.type,
            "custom.googleapis.com/opencensus/my.org/views/video_size_test2")
        self.assertIsNotNone(ts2.resource)

        self.assertEqual(len(ts2.points), 1)
        value2 = ts2.points[0].value
        self.assertEqual(value2.distribution_value.count, 1)
    def do_test_view_data_to_metric(self, aggregation_class,
                                    value_type, metric_descriptor_type):
        """Test that ViewDatas are converted correctly into Metrics.

        This test doesn't check that the various aggregation data `to_point`
        methods handle the point conversion correctly, just that converted
        Point is included in the Metric, and the metric has the expected
        structure, descriptor, and labels.
        """
        start_time = datetime.datetime(2019, 1, 25, 11, 12, 13)
        current_time = datetime.datetime(2019, 1, 25, 12, 13, 14)

        mock_measure = mock.Mock(spec=measure.MeasureFloat)
        mock_aggregation = mock.Mock(spec=aggregation_class)
        mock_aggregation.get_metric_type.return_value = metric_descriptor_type

        vv = view.View(
            name=mock.Mock(),
            description=mock.Mock(),
            columns=[tag_key.TagKey('k1'), tag_key.TagKey('k2')],
            measure=mock_measure,
            aggregation=mock_aggregation)

        vd = mock.Mock(spec=view_data.ViewData)
        vd.view = vv
        vd.start_time = start_time

        mock_point = mock.Mock(spec=point.Point)
        mock_point.value = mock.Mock(spec=value_type)

        mock_agg = mock.Mock(spec=aggregation_data.SumAggregationData)
        mock_agg.to_point.return_value = mock_point

        vd.tag_value_aggregation_data_map = {
            (tag_value.TagValue('v1'), tag_value.TagValue('v2')): mock_agg
        }

        metric = metric_utils.view_data_to_metric(vd, current_time)
        mock_agg.to_point.assert_called_once_with(current_time)

        self.assertEqual(metric.descriptor.name, vv.name)
        self.assertEqual(metric.descriptor.description, vv.description)
        self.assertEqual(metric.descriptor.unit, vv.measure.unit)
        self.assertEqual(metric.descriptor.type, metric_descriptor_type)
        self.assertListEqual(
            [lk.key for lk in metric.descriptor.label_keys],
            ['k1', 'k2'])

        self.assertEqual(len(metric.time_series), 1)
        [ts] = metric.time_series
        self.assertEqual(ts.start_timestamp, start_time)
        self.assertListEqual(
            [lv.value for lv in ts.label_values],
            ['v1', 'v2'])
        self.assertEqual(len(ts.points), 1)
        [pt] = ts.points
        self.assertEqual(pt, mock_point)
    def test_create_timeseries_float_tagvalue(self, monitor_resource_mock):
        client = mock.Mock()

        option = stackdriver.Options(project_id="project-test",
                                     resource="global")
        exporter = stackdriver.StackdriverStatsExporter(options=option,
                                                        client=client)

        stats = stats_module.stats
        view_manager = stats.view_manager
        stats_recorder = stats.stats_recorder

        if len(view_manager.measure_to_view_map.exporters) > 0:
            view_manager.unregister_exporter(
                view_manager.measure_to_view_map.exporters[0])

        view_manager.register_exporter(exporter)

        agg_3 = aggregation_module.SumAggregation(sum=2.2)
        view_name3 = "view-name3"
        new_view3 = view_module.View(view_name3,
                                     "processed video size over time",
                                     [FRONTEND_KEY_FLOAT],
                                     VIDEO_SIZE_MEASURE_FLOAT, agg_3)

        view_manager.register_view(new_view3)

        tag_value_float = tag_value_module.TagValue("1200")
        tag_map = tag_map_module.TagMap()
        tag_map.insert(FRONTEND_KEY_FLOAT, tag_value_float)

        measure_map = stats_recorder.new_measurement_map()
        measure_map.measure_float_put(VIDEO_SIZE_MEASURE_FLOAT, 25 * MiB)
        measure_map.record(tag_map)

        v_data = measure_map.measure_to_view_map.get_view(view_name3, None)

        v_data = metric_utils.view_data_to_metric(v_data, TEST_TIME)

        time_series_list = exporter.create_time_series_list(v_data)
        self.assertEqual(len(time_series_list), 1)
        [time_series] = time_series_list
        self.assertEqual(time_series.metric.type,
                         "custom.googleapis.com/opencensus/view-name3")
        self.check_labels(time_series.metric.labels,
                          {FRONTEND_KEY_FLOAT_CLEAN: "1200"},
                          include_opencensus=True)
        self.assertIsNotNone(time_series.resource)

        self.assertEqual(len(time_series.points), 1)
        expected_value = monitoring_v3.types.TypedValue()
        expected_value.double_value = 2.2 + 25 * MiB
        self.assertEqual(time_series.points[0].value, expected_value)
    def test_create_batched_time_series_with_many(self, monitor_resource_mock):
        client = mock.Mock()

        # First view with 3
        view_name1 = "view-name1"
        view1 = view_module.View(view_name1, "test description", ['test'],
                                 VIDEO_SIZE_MEASURE,
                                 aggregation_module.LastValueAggregation())
        v_data1 = view_data_module.ViewData(view=view1,
                                            start_time=TEST_TIME_STR,
                                            end_time=TEST_TIME_STR)
        v_data1.record(context=tag_map_module.TagMap({'test': '1'}),
                       value=7,
                       timestamp=None)
        v_data1.record(context=tag_map_module.TagMap({'test': '2'}),
                       value=5,
                       timestamp=None)
        v_data1.record(context=tag_map_module.TagMap({'test': '3'}),
                       value=3,
                       timestamp=None)

        # Second view with 2
        view_name2 = "view-name2"
        view2 = view_module.View(view_name2, "test description", ['test'],
                                 VIDEO_SIZE_MEASURE,
                                 aggregation_module.LastValueAggregation())
        v_data2 = view_data_module.ViewData(view=view2,
                                            start_time=TEST_TIME_STR,
                                            end_time=TEST_TIME_STR)
        v_data2.record(context=tag_map_module.TagMap({'test': '1'}),
                       value=7,
                       timestamp=None)
        v_data2.record(context=tag_map_module.TagMap({'test': '2'}),
                       value=5,
                       timestamp=None)

        view_data = [v_data1, v_data2]
        view_data = [
            metric_utils.view_data_to_metric(vd, TEST_TIME) for vd in view_data
        ]

        option = stackdriver.Options(project_id="project-test")
        exporter = stackdriver.StackdriverStatsExporter(options=option,
                                                        client=client)

        time_series_batches = exporter.create_batched_time_series(view_data, 2)

        self.assertEqual(len(time_series_batches), 3)
        [tsb1, tsb2, tsb3] = time_series_batches
        self.assertEqual(len(tsb1), 2)
        self.assertEqual(len(tsb2), 2)
        self.assertEqual(len(tsb3), 1)
    def test_create_timeseries_multiple_tags(self):
        """Check that exporter creates timeseries for multiple tag values.

        create_time_series_list should return a time series for each set of
        values in the tag value aggregation map.
        """
        agg = aggregation_module.CountAggregation(
            aggregation_type=aggregation_module.Type.COUNT)

        view = view_module.View(
            name="example.org/test_view",
            description="example.org/test_view",
            columns=[
                tag_key_module.TagKey('color'),
                tag_key_module.TagKey('shape')
            ],
            measure=mock.Mock(),
            aggregation=agg,
        )

        v_data = view_data_module.ViewData(
            view=view,
            start_time=TEST_TIME_STR,
            end_time=TEST_TIME_STR,
        )

        rs_count = aggregation_data_module.CountAggregationData(10)
        bc_count = aggregation_data_module.CountAggregationData(20)
        v_data._tag_value_aggregation_data_map = {
            ('red', 'square'): rs_count,
            ('blue', 'circle'): bc_count,
        }

        v_data = metric_utils.view_data_to_metric(v_data, TEST_TIME)

        exporter = stackdriver.StackdriverStatsExporter()
        time_series_list = exporter.create_time_series_list(v_data)

        self.assertEqual(len(time_series_list), 2)
        self.assertEqual(len(time_series_list[0].points), 1)
        self.assertEqual(len(time_series_list[1].points), 1)

        ts_by_color = {
            ts.metric.labels.get('color'): ts
            for ts in time_series_list
        }
        rs_ts = ts_by_color['red']
        bc_ts = ts_by_color['blue']
        self.assertEqual(rs_ts.metric.labels.get('shape'), 'square')
        self.assertEqual(bc_ts.metric.labels.get('shape'), 'circle')
        self.assertEqual(rs_ts.points[0].value.int64_value, 10)
        self.assertEqual(bc_ts.points[0].value.int64_value, 20)
def test_bad_http_response(stats_exporter, caplog):
    view_data = to_view_data(VIEWS["last"])
    view_data.record(None, 100, None)

    metric = metric_utils.view_data_to_metric(view_data, TEST_TIMESTAMP)

    stats_exporter.export_metrics([metric])

    assert (
        "opencensus_ext_newrelic.stats",
        logging.ERROR,
        "New Relic send_metrics failed with status code: 500",
    ) in caplog.record_tuples
    def test_create_timeseries(self, monitor_resource_mock):
        view_manager, stats_recorder, exporter = \
            self.setup_create_timeseries_test()

        view_manager.register_view(VIDEO_SIZE_VIEW)

        tag_value = tag_value_module.TagValue("1200")
        tag_map = tag_map_module.TagMap()
        tag_map.insert(FRONTEND_KEY, tag_value)

        measure_map = stats_recorder.new_measurement_map()
        measure_map.measure_int_put(VIDEO_SIZE_MEASURE, 25 * MiB)
        measure_map.record(tag_map)

        v_data = measure_map.measure_to_view_map.get_view(
            VIDEO_SIZE_VIEW_NAME, None)

        v_data = metric_utils.view_data_to_metric(v_data, TEST_TIME)

        time_series_list = exporter.create_time_series_list(v_data)

        self.assertEqual(len(time_series_list), 1)
        time_series = time_series_list[0]
        self.assertEqual(time_series.resource.type, "global")
        self.assertEqual(
            time_series_list[0].metric.type,
            "custom.googleapis.com/opencensus/my.org/views/video_size_test2")
        self.check_labels(time_series.metric.labels,
                          {FRONTEND_KEY_CLEAN: "1200"},
                          include_opencensus=True)
        self.assertIsNotNone(time_series.resource)

        self.assertEqual(len(time_series.points), 1)
        value = time_series.points[0].value
        self.assertEqual(value.distribution_value.count, 1)

        time_series_list = exporter.create_time_series_list(v_data)

        self.assertEqual(len(time_series_list), 1)
        time_series = time_series_list[0]
        self.check_labels(time_series.metric.labels,
                          {FRONTEND_KEY_CLEAN: "1200"},
                          include_opencensus=True)
        self.assertIsNotNone(time_series.resource)

        self.assertEqual(len(time_series.points), 1)
        value = time_series.points[0].value
        self.assertEqual(value.distribution_value.count, 1)
def test_send_metrics_exception(stats_exporter, caplog):
    # Remove the client object to force an exception when send_metrics is called
    delattr(stats_exporter, "client")
    view_data = to_view_data(VIEWS["last"])
    view_data.record(None, 100, None)

    metric = metric_utils.view_data_to_metric(view_data, TEST_TIMESTAMP)

    response = stats_exporter.export_metrics([metric])
    assert response is None

    assert (
        "opencensus_ext_newrelic.stats",
        logging.ERROR,
        "New Relic send_metrics failed with an exception.",
    ) in caplog.record_tuples
    def test_create_timeseries_from_distribution(self):
        """Check for explicit 0-bound bucket for SD export."""
        agg = aggregation_module.DistributionAggregation(
            aggregation_type=aggregation_module.Type.DISTRIBUTION)

        view = view_module.View(
            name="example.org/test_view",
            description="example.org/test_view",
            columns=['tag_key'],
            measure=mock.Mock(),
            aggregation=agg,
        )

        v_data = view_data_module.ViewData(
            view=view,
            start_time=TEST_TIME_STR,
            end_time=TEST_TIME_STR,
        )

        # Aggregation over (10 * range(10)) for buckets [2, 4, 6, 8]
        dad = aggregation_data_module.DistributionAggregationData(
            mean_data=4.5,
            count_data=100,
            sum_of_sqd_deviations=825,
            counts_per_bucket=[20, 20, 20, 20, 20],
            bounds=[2, 4, 6, 8],
            exemplars={mock.Mock()
                       for ii in range(5)})
        v_data._tag_value_aggregation_data_map = {('tag_value', ): dad}

        v_data = metric_utils.view_data_to_metric(v_data, TEST_TIME)

        exporter = stackdriver.StackdriverStatsExporter()
        time_series_list = exporter.create_time_series_list(v_data)
        self.assertEqual(len(time_series_list), 1)
        [time_series] = time_series_list

        self.check_labels(time_series.metric.labels, {'tag_key': 'tag_value'},
                          include_opencensus=True)
        self.assertEqual(len(time_series.points), 1)
        [point] = time_series.points
        dv = point.value.distribution_value
        self.assertEqual(100, dv.count)
        self.assertEqual(825.0, dv.sum_of_squared_deviation)
        self.assertEqual([0, 20, 20, 20, 20, 20], dv.bucket_counts)
        self.assertEqual([0, 2, 4, 6, 8],
                         dv.bucket_options.explicit_buckets.bounds)
    def test_export_double_point_value(self):
        view = view_module.View('', '', [FRONTEND_KEY], VIDEO_SIZE_MEASURE,
                                aggregation_module.SumAggregation())
        v_data = view_data_module.ViewData(view=view,
                                           start_time=TEST_TIME_STR,
                                           end_time=TEST_TIME_STR)
        v_data.record(context=tag_map_module.TagMap(),
                      value=2.5,
                      timestamp=None)
        view_data = [v_data]
        view_data = [metric_utils.view_data_to_metric(view_data[0], TEST_TIME)]

        handler = mock.Mock(spec=ocagent.ExportRpcHandler)
        ocagent.StatsExporter(handler).export_metrics(view_data)
        self.assertEqual(
            handler.send.call_args[0]
            [0].metrics[0].timeseries[0].points[0].double_value, 2.5)
    def get_metrics(self, timestamp):
        """Get a Metric for each registered view.

        Convert each registered view's associated `ViewData` into a `Metric` to
        be exported.

        :type timestamp: :class: `datetime.datetime`
        :param timestamp: The timestamp to use for metric conversions, usually
        the current time.

        :rtype: Iterator[:class: `opencensus.metrics.export.metric.Metric`]
        """
        for vdl in self._measure_to_view_data_list_map.values():
            for vd in vdl:
                metric = metric_utils.view_data_to_metric(vd, timestamp)
                if metric is not None:
                    yield metric
    def test_export_view_data(self):
        v_data = view_data_module.ViewData(view=VIDEO_SIZE_VIEW,
                                           start_time=TEST_TIME_STR,
                                           end_time=TEST_TIME_STR)
        v_data.record(context=tag_map_module.TagMap(), value=2, timestamp=None)
        view_data = [v_data]
        view_data = [metric_utils.view_data_to_metric(view_data[0], TEST_TIME)]

        handler = mock.Mock(spec=ocagent.ExportRpcHandler)
        ocagent.StatsExporter(handler).export_metrics(view_data)

        self.assertEqual(
            handler.send.call_args[0][0].metrics[0].metric_descriptor,
            metrics_pb2.MetricDescriptor(
                name=VIDEO_SIZE_VIEW_NAME,
                description='processed video size over time',
                unit='By',
                type=metrics_pb2.MetricDescriptor.CUMULATIVE_DISTRIBUTION,
                label_keys=[metrics_pb2.LabelKey(key=FRONTEND_KEY)]))

        self.assertEqual(
            handler.send.call_args[0][0].metrics[0].timeseries[0],
            metrics_pb2.TimeSeries(
                start_timestamp=timestamp_pb2.Timestamp(seconds=1545699723,
                                                        nanos=4000),
                label_values=[metrics_pb2.LabelValue(has_value=False)],
                points=[
                    metrics_pb2.Point(
                        timestamp=timestamp_pb2.Timestamp(seconds=1545699723,
                                                          nanos=4000),
                        distribution_value=metrics_pb2.DistributionValue(
                            sum=2,
                            count=1,
                            bucket_options=metrics_pb2.DistributionValue.
                            BucketOptions(
                                explicit=metrics_pb2.DistributionValue.
                                BucketOptions.Explicit(
                                    bounds=[16.0 * MiB, 256.0 * MiB])),
                            buckets=[
                                metrics_pb2.DistributionValue.Bucket(count=1),
                                metrics_pb2.DistributionValue.Bucket(),
                                metrics_pb2.DistributionValue.Bucket(),
                            ]))
                ]))
    def test_create_timeseries_disjoint_tags(self, monitoring_resoure_mock):
        view_manager, stats_recorder, exporter = \
            self.setup_create_timeseries_test()

        # Register view with two tags
        view_name = "view-name"
        view = view_module.View(view_name, "test description",
                                [FRONTEND_KEY, FRONTEND_KEY_FLOAT],
                                VIDEO_SIZE_MEASURE,
                                aggregation_module.SumAggregation())

        view_manager.register_view(view)

        # Add point with one tag in common and one different tag
        measure_map = stats_recorder.new_measurement_map()
        tag_map = tag_map_module.TagMap()
        tag_map.insert(FRONTEND_KEY, tag_value_module.TagValue("1200"))
        tag_map.insert(FRONTEND_KEY_STR, tag_value_module.TagValue("1800"))
        measure_map.measure_int_put(VIDEO_SIZE_MEASURE, 25 * MiB)
        measure_map.record(tag_map)

        v_data = measure_map.measure_to_view_map.get_view(view_name, None)

        v_data = metric_utils.view_data_to_metric(v_data, TEST_TIME)

        time_series_list = exporter.create_time_series_list(v_data)

        self.assertEqual(len(time_series_list), 1)
        [time_series] = time_series_list

        # Verify first time series
        self.assertEqual(time_series.resource.type, "global")
        self.assertEqual(time_series.metric.type,
                         "custom.googleapis.com/opencensus/" + view_name)
        self.check_labels(time_series.metric.labels,
                          {FRONTEND_KEY_CLEAN: "1200"},
                          include_opencensus=True)
        self.assertIsNotNone(time_series.resource)

        self.assertEqual(len(time_series.points), 1)
        expected_value = monitoring_v3.types.TypedValue()
        # TODO: #565
        expected_value.double_value = 25.0 * MiB
        self.assertEqual(time_series.points[0].value, expected_value)
    def test_create_timeseries_str_tagvalue(self, monitor_resource_mock):
        view_manager, stats_recorder, exporter = \
            self.setup_create_timeseries_test()

        agg_1 = aggregation_module.LastValueAggregation(value=2)
        view_name1 = "view-name1"
        new_view1 = view_module.View(view_name1,
                                     "processed video size over time",
                                     [FRONTEND_KEY_INT], VIDEO_SIZE_MEASURE_2,
                                     agg_1)

        view_manager.register_view(new_view1)

        tag_value_int = tag_value_module.TagValue("Abc")
        tag_map = tag_map_module.TagMap()
        tag_map.insert(FRONTEND_KEY_INT, tag_value_int)

        measure_map = stats_recorder.new_measurement_map()
        measure_map.measure_int_put(VIDEO_SIZE_MEASURE_2, 25 * MiB)
        measure_map.record(tag_map)

        v_data = measure_map.measure_to_view_map.get_view(view_name1, None)

        v_data = metric_utils.view_data_to_metric(v_data, TEST_TIME)

        time_series_list = exporter.create_time_series_list(v_data)
        self.assertEqual(len(time_series_list), 1)
        time_series = time_series_list[0]

        self.check_labels(time_series.metric.labels,
                          {FRONTEND_KEY_INT_CLEAN: "Abc"},
                          include_opencensus=True)
        self.assertIsNotNone(time_series.resource)

        self.assertEqual(len(time_series.points), 1)
        expected_value = monitoring_v3.types.TypedValue()
        # TODO: #565
        expected_value.double_value = 25.0 * MiB
        self.assertEqual(time_series.points[0].value, expected_value)
    def test_create_timeseries_with_resource(self, monitor_resource_mock):

        client = mock.Mock()
        execution_context.clear()

        option = stackdriver.Options(project_id="project-test", resource="")
        exporter = stackdriver.StackdriverStatsExporter(options=option,
                                                        client=client)

        stats = stats_module.stats
        view_manager = stats.view_manager
        stats_recorder = stats.stats_recorder

        if len(view_manager.measure_to_view_map.exporters) > 0:
            view_manager.unregister_exporter(
                view_manager.measure_to_view_map.exporters[0])

        view_manager.register_exporter(exporter)
        view_manager.register_view(VIDEO_SIZE_VIEW)

        tag_value = tag_value_module.TagValue("1200")
        tag_map = tag_map_module.TagMap()
        tag_map.insert(FRONTEND_KEY, tag_value)

        measure_map = stats_recorder.new_measurement_map()
        measure_map.measure_int_put(VIDEO_SIZE_MEASURE, 25 * MiB)
        measure_map.record(tag_map)

        v_data = measure_map.measure_to_view_map.get_view(
            VIDEO_SIZE_VIEW_NAME, None)

        v_data = metric_utils.view_data_to_metric(v_data, TEST_TIME)

        # check for gce_instance monitored resource
        mocked_labels = {
            'instance_id': 'my-instance',
            'project_id': 'my-project',
            'zone': 'us-east1',
            'pod_id': 'localhost',
            'namespace_id': 'namespace'
        }

        mock_resource = mock.Mock()
        mock_resource.get_type.return_value = 'gce_instance'
        mock_resource.get_labels.return_value = mocked_labels
        monitor_resource_mock.return_value = mock_resource

        time_series_list = exporter.create_time_series_list(v_data)
        self.assertEqual(len(time_series_list), 1)
        time_series = time_series_list[0]
        self.assertEqual(time_series.resource.type, "gce_instance")
        self.check_labels(
            time_series.resource.labels, {
                'instance_id': 'my-instance',
                'project_id': 'my-project',
                'zone': 'us-east1',
            })
        self.assertEqual(
            time_series.metric.type,
            "custom.googleapis.com/opencensus/my.org/views/video_size_test2")
        self.assertIsNotNone(time_series)

        time_series_list = exporter.create_time_series_list(v_data)
        self.assertEqual(len(time_series_list), 1)
        time_series = time_series_list[0]

        self.assertEqual(
            time_series.metric.type,
            "custom.googleapis.com/opencensus/my.org/views/video_size_test2")

        # check for gke_container monitored resource
        mocked_labels = {
            'instance_id': 'my-instance',
            'project_id': 'my-project',
            'zone': 'us-east1',
            'pod_id': 'localhost',
            'cluster_name': 'cluster',
            'namespace_id': 'namespace'
        }

        mock_resource = mock.Mock()
        mock_resource.get_type.return_value = 'gke_container'
        mock_resource.get_labels.return_value = mocked_labels
        monitor_resource_mock.return_value = mock_resource

        time_series_list = exporter.create_time_series_list(v_data)
        self.assertEqual(len(time_series_list), 1)
        time_series = time_series_list[0]
        self.assertEqual(time_series.resource.type, "k8s_container")
        self.check_labels(
            time_series.resource.labels, {
                'project_id': 'my-project',
                'location': 'us-east1',
                'cluster_name': 'cluster',
                'pod_name': 'localhost',
                'namespace_name': 'namespace',
            })
        self.assertEqual(
            time_series.metric.type,
            "custom.googleapis.com/opencensus/my.org/views/video_size_test2")
        self.assertIsNotNone(time_series)

        # check for aws_ec2_instance monitored resource
        mocked_labels = {
            'instance_id': 'my-instance',
            'aws_account': 'my-project',
            'region': 'us-east1',
        }

        mock_resource = mock.Mock()
        mock_resource.get_type.return_value = 'aws_ec2_instance'
        mock_resource.get_labels.return_value = mocked_labels
        monitor_resource_mock.return_value = mock_resource

        time_series_list = exporter.create_time_series_list(v_data)
        self.assertEqual(len(time_series_list), 1)
        time_series = time_series_list[0]
        self.assertEqual(time_series.resource.type, "aws_ec2_instance")
        self.check_labels(
            time_series.resource.labels, {
                'instance_id': 'my-instance',
                'aws_account': 'my-project',
                'region': 'aws:us-east1',
            })
        self.assertEqual(
            time_series.metric.type,
            "custom.googleapis.com/opencensus/my.org/views/video_size_test2")
        self.assertIsNotNone(time_series)

        # check for out of box monitored resource
        mock_resource = mock.Mock()
        mock_resource.get_type.return_value = ''
        mock_resource.get_labels.return_value = mock.Mock()
        monitor_resource_mock.return_value = mock_resource

        time_series_list = exporter.create_time_series_list(v_data)
        self.assertEqual(len(time_series_list), 1)
        time_series = time_series_list[0]
        self.assertEqual(time_series.resource.type, 'global')
        self.check_labels(time_series.resource.labels, {})
        self.assertEqual(
            time_series.metric.type,
            "custom.googleapis.com/opencensus/my.org/views/video_size_test2")
        self.assertIsNotNone(time_series)