예제 #1
0
파일: _utils.py 프로젝트: snaik/pydatalab
class _MonitoringClient(object):
    def __init__(self, context):
        self.project = context.project_id
        client_info = ClientInfo(user_agent='pydatalab/v0')
        self.metrics_client = MetricServiceClient(
            credentials=context.credentials, client_info=client_info)
        self.group_client = GroupServiceClient(credentials=context.credentials,
                                               client_info=client_info)

    def list_metric_descriptors(self, filter_string=None, type_prefix=None):
        filters = []
        if filter_string is not None:
            filters.append(filter_string)

        if type_prefix is not None:
            filters.append('metric.type = starts_with("{prefix}")'.format(
                prefix=type_prefix))

        metric_filter = ' AND '.join(filters)
        metrics = self.metrics_client.list_metric_descriptors(
            self.project, filter_=metric_filter)
        return metrics

    def list_resource_descriptors(self, filter_string=None):
        resources = self.metrics_client.list_monitored_resource_descriptors(
            self.project, filter_=filter_string)
        return resources

    def list_groups(self):
        groups = self.group_client.list_groups(self.project)
        return groups
예제 #2
0
파일: _utils.py 프로젝트: snaik/pydatalab
 def __init__(self, context):
     self.project = context.project_id
     client_info = ClientInfo(user_agent='pydatalab/v0')
     self.metrics_client = MetricServiceClient(
         credentials=context.credentials, client_info=client_info)
     self.group_client = GroupServiceClient(credentials=context.credentials,
                                            client_info=client_info)
예제 #3
0
    def test_constructor_maximal(self):
        from google.cloud.monitoring_v3 import MetricServiceClient

        T1 = datetime.datetime(2016, 4, 7, 2, 30, 30)
        DAYS, HOURS, MINUTES = 1, 2, 3
        T0 = T1 - datetime.timedelta(days=DAYS, hours=HOURS, minutes=MINUTES)

        channel = ChannelStub()
        client = MetricServiceClient(channel=channel)
        query = self._make_one(client,
                               PROJECT,
                               METRIC_TYPE,
                               end_time=T1,
                               days=DAYS,
                               hours=HOURS,
                               minutes=MINUTES)

        self.assertEqual(query._client, client)
        self.assertEqual(query._filter.metric_type, METRIC_TYPE)

        self.assertEqual(query._start_time, T0)
        self.assertEqual(query._end_time, T1)

        self.assertEqual(query._per_series_aligner, 0)
        self.assertEqual(query._alignment_period_seconds, 0)
        self.assertEqual(query._cross_series_reducer, 0)
        self.assertEqual(query._group_by_fields, ())
예제 #4
0
    def test_iteration_headers_only(self):
        from google.cloud.monitoring_v3 import MetricServiceClient
        from google.cloud.monitoring_v3.gapic import enums
        from google.cloud.monitoring_v3.proto import metric_service_pb2

        T0 = datetime.datetime(2016, 4, 6, 22, 5, 0)
        T1 = datetime.datetime(2016, 4, 6, 22, 10, 0)

        SERIES1 = {
            "metric": {
                "type": METRIC_TYPE,
                "labels": METRIC_LABELS
            },
            "resource": {
                "type": RESOURCE_TYPE,
                "labels": RESOURCE_LABELS
            },
            "metric_kind": METRIC_KIND,
            "value_type": VALUE_TYPE,
        }
        SERIES2 = {
            "metric": {
                "type": METRIC_TYPE,
                "labels": METRIC_LABELS2
            },
            "resource": {
                "type": RESOURCE_TYPE,
                "labels": RESOURCE_LABELS2
            },
            "metric_kind": METRIC_KIND,
            "value_type": VALUE_TYPE,
        }

        RESPONSE = {"time_series": [SERIES1, SERIES2], "next_page_token": ""}

        channel = ChannelStub(responses=[RESPONSE])
        client = MetricServiceClient(channel=channel)
        query = self._make_one(client, PROJECT, METRIC_TYPE)
        query = query.select_interval(start_time=T0, end_time=T1)
        response = list(query.iter(headers_only=True))

        self.assertEqual(len(response), 2)
        series1, series2 = response

        self.assertEqual(series1.metric.labels, METRIC_LABELS)
        self.assertEqual(series2.metric.labels, METRIC_LABELS2)
        self.assertEqual(series1.resource.labels, RESOURCE_LABELS)
        self.assertEqual(series2.resource.labels, RESOURCE_LABELS2)

        self.assertFalse(len(series1.points))
        self.assertFalse(len(series2.points))

        expected_request = metric_service_pb2.ListTimeSeriesRequest(
            name="projects/" + PROJECT,
            filter='metric.type = "{type}"'.format(type=METRIC_TYPE),
            interval=self._make_interval(T1, T0),
            view=enums.ListTimeSeriesRequest.TimeSeriesView.HEADERS,
        )
        request = channel.requests[0][1]
        self.assertEqual(request, expected_request)
예제 #5
0
    def test_export(self):
        channel = grpc.insecure_channel(self.address)
        transport = metric_service_grpc_transport.MetricServiceGrpcTransport(
            channel=channel)
        exporter = CloudMonitoringMetricsExporter(
            self.project_id, client=MetricServiceClient(transport=transport))

        meter = metrics.MeterProvider().get_meter(__name__)
        counter = meter.create_metric(
            name="name",
            description="desc",
            unit="1",
            value_type=int,
            metric_type=metrics.Counter,
        )

        sum_agg = SumAggregator()
        sum_agg.checkpoint = 1
        sum_agg.last_update_timestamp = (WRITE_INTERVAL + 2) * NANOS_PER_SECOND

        result = exporter.export(
            [MetricRecord(
                counter,
                labels=(),
                aggregator=sum_agg,
            )])

        self.assertEqual(result, MetricsExportResult.SUCCESS)
예제 #6
0
    def test_metric_type(self):
        from google.cloud.monitoring_v3 import MetricServiceClient

        channel = ChannelStub()
        client = MetricServiceClient(channel=channel)
        query = self._make_one(client, PROJECT, METRIC_TYPE)
        self.assertEqual(query.metric_type, METRIC_TYPE)
예제 #7
0
    def test_filter(self):
        from google.cloud.monitoring_v3 import MetricServiceClient

        channel = ChannelStub()
        client = MetricServiceClient(channel=channel)
        query = self._make_one(client, PROJECT, METRIC_TYPE)
        expected = 'metric.type = "{type}"'.format(type=METRIC_TYPE)
        self.assertEqual(query.filter, expected)
예제 #8
0
    def test_execution_without_interval_illegal(self):
        from google.cloud.monitoring_v3 import MetricServiceClient

        channel = ChannelStub()
        client = MetricServiceClient(channel=channel)
        query = self._make_one(client, PROJECT, METRIC_TYPE)
        with self.assertRaises(ValueError):
            list(query)
예제 #9
0
    def test_constructor_nonzero_duration_illegal(self):
        from google.cloud.monitoring_v3 import MetricServiceClient

        T1 = datetime.datetime(2016, 4, 7, 2, 30, 30)
        channel = ChannelStub()
        client = MetricServiceClient(channel=channel)
        with self.assertRaises(ValueError):
            self._make_one(client, PROJECT, METRIC_TYPE, end_time=T1)
예제 #10
0
 def __init__(self, project_id=None, client=None):
     self.client = client or MetricServiceClient()
     if not project_id:
         _, self.project_id = google.auth.default()
     else:
         self.project_id = project_id
     self.project_name = self.client.project_path(self.project_id)
     self._metric_descriptors = {}
     self._last_updated = {}
예제 #11
0
    def test_filter_by_group(self):
        from google.cloud.monitoring_v3 import MetricServiceClient

        GROUP = '1234567'
        channel = ChannelStub()
        client = MetricServiceClient(channel=channel)
        query = self._make_one(client, PROJECT, METRIC_TYPE)
        query = query.select_group(GROUP)
        expected = ('metric.type = "{type}"'
                    ' AND group.id = "{group}"').format(type=METRIC_TYPE,
                                                        group=GROUP)
        self.assertEqual(query.filter, expected)
예제 #12
0
    def test_filter_by_metrics(self):
        from google.cloud.monitoring_v3 import MetricServiceClient

        INSTANCE = 'my-instance'
        channel = ChannelStub()
        client = MetricServiceClient(channel=channel)
        query = self._make_one(client, PROJECT, METRIC_TYPE)
        query = query.select_metrics(instance_name=INSTANCE)
        expected = ('metric.type = "{type}"'
                    ' AND metric.label.instance_name = "{instance}"').format(
                        type=METRIC_TYPE, instance=INSTANCE)
        self.assertEqual(query.filter, expected)
예제 #13
0
    def test_filter_by_projects(self):
        from google.cloud.monitoring_v3 import MetricServiceClient

        PROJECT1, PROJECT2 = 'project-1', 'project-2'
        channel = ChannelStub()
        client = MetricServiceClient(channel=channel)
        query = self._make_one(client, PROJECT, METRIC_TYPE)
        query = query.select_projects(PROJECT1, PROJECT2)
        expected = (
            'metric.type = "{type}"'
            ' AND project = "{project1}" OR project = "{project2}"').format(
                type=METRIC_TYPE, project1=PROJECT1, project2=PROJECT2)
        self.assertEqual(query.filter, expected)
예제 #14
0
    def test_filter_by_resources(self):
        from google.cloud.monitoring_v3 import MetricServiceClient

        ZONE_PREFIX = 'europe-'
        channel = ChannelStub()
        client = MetricServiceClient(channel=channel)
        query = self._make_one(client, PROJECT, METRIC_TYPE)
        query = query.select_resources(zone_prefix=ZONE_PREFIX)
        expected = (
            'metric.type = "{type}"'
            ' AND resource.label.zone = starts_with("{prefix}")').format(
                type=METRIC_TYPE, prefix=ZONE_PREFIX)
        self.assertEqual(query.filter, expected)
 def __init__(self,
              project_id=None,
              client=None,
              add_unique_identifier=False):
     self.client = client or MetricServiceClient()
     if not project_id:
         _, self.project_id = google.auth.default()
     else:
         self.project_id = project_id
     self.project_name = self.client.project_path(self.project_id)
     self._metric_descriptors = {}
     self._last_updated = {}
     self.unique_identifier = None
     if add_unique_identifier:
         self.unique_identifier = "{:08x}".format(random.randint(0, 16**8))
예제 #16
0
    def test_export(self):
        channel = grpc.insecure_channel(self.address)
        transport = metric_service_grpc_transport.MetricServiceGrpcTransport(
            channel=channel
        )
        client = MagicMock(wraps=MetricServiceClient(transport=transport))
        exporter = CloudMonitoringMetricsExporter(
            self.project_id, client=client
        )

        meter_provider = metrics.MeterProvider(
            resource=Resource.create(
                {
                    "cloud.account.id": "some_account_id",
                    "cloud.provider": "gcp",
                    "cloud.zone": "us-east1-b",
                    "host.id": 654321,
                    "gcp.resource_type": "gce_instance",
                }
            )
        )
        meter = meter_provider.get_meter(__name__)
        counter = meter.create_counter(
            # TODO: remove "opentelemetry/" prefix which is a hack
            # https://github.com/GoogleCloudPlatform/opentelemetry-operations-python/issues/84
            name="opentelemetry/name",
            description="desc",
            unit="1",
            value_type=int,
        )
        # interval doesn't matter, we don't start the thread and just run
        # tick() instead
        controller = PushController(meter, exporter, 10)

        counter.add(10, {"env": "test"})

        with patch(
            "opentelemetry.exporter.cloud_monitoring.logger"
        ) as mock_logger:
            controller.tick()

            # run tox tests with `-- -log-cli-level=0` to see mock calls made
            logger.debug(client.create_time_series.mock_calls)
            mock_logger.warning.assert_not_called()
            mock_logger.error.assert_not_called()
예제 #17
0
    def test_request_parameters_minimal(self):
        from google.cloud.monitoring_v3 import MetricServiceClient
        from google.cloud.monitoring_v3.gapic import enums

        T1 = datetime.datetime(2016, 4, 7, 2, 30, 0)

        channel = ChannelStub()
        client = MetricServiceClient(channel=channel)
        query = self._make_one(client, PROJECT, METRIC_TYPE)
        query = query.select_interval(end_time=T1)
        actual = query._build_query_params()
        expected = {
            'name': u'projects/{}'.format(PROJECT),
            'filter_': 'metric.type = "{type}"'.format(type=METRIC_TYPE),
            'interval': self._make_interval(T1),
            'view': enums.ListTimeSeriesRequest.TimeSeriesView.FULL,
        }
        self.assertEqual(actual, expected)
예제 #18
0
    def test_request_parameters_maximal(self):
        from google.cloud.monitoring_v3 import MetricServiceClient
        from google.cloud.monitoring_v3 import types
        from google.cloud.monitoring_v3.gapic import enums

        T0 = datetime.datetime(2016, 4, 7, 2, 0, 0)
        T1 = datetime.datetime(2016, 4, 7, 2, 30, 0)

        ALIGNER = 'ALIGN_DELTA'
        MINUTES, SECONDS, PERIOD_IN_SECONDS = 1, 30, 90

        REDUCER = 'REDUCE_MEAN'
        FIELD1, FIELD2 = 'resource.zone', 'metric.instance_name'

        PAGE_SIZE = 100

        channel = ChannelStub()
        client = MetricServiceClient(channel=channel)
        query = self._make_one(client, PROJECT, METRIC_TYPE)
        query = query.select_interval(start_time=T0, end_time=T1)
        query = query.align(ALIGNER, minutes=MINUTES, seconds=SECONDS)
        query = query.reduce(REDUCER, FIELD1, FIELD2)
        actual = query._build_query_params(headers_only=True,
                                           page_size=PAGE_SIZE)
        expected = {
            'name':
            'projects/%s' % PROJECT,
            'filter_':
            'metric.type = "{type}"'.format(type=METRIC_TYPE),
            'interval':
            self._make_interval(T1, T0),
            'aggregation':
            types.Aggregation(
                per_series_aligner=ALIGNER,
                alignment_period={'seconds': PERIOD_IN_SECONDS},
                cross_series_reducer=REDUCER,
                group_by_fields=[FIELD1, FIELD2],
            ),
            'view':
            enums.ListTimeSeriesRequest.TimeSeriesView.HEADERS,
            'page_size':
            PAGE_SIZE,
        }
        self.assertEqual(actual, expected)
예제 #19
0
    def qsize(self, sub_list: list = None) -> dict:
        response = {'gcp': {}}
        if not sub_list:
            sub_list = self._sub_list

        query_results = query.Query(
            client=MetricServiceClient(),
            project=self._project,
            metric_type=self.METRIC_TYPE,
            end_time=datetime.now(),
            minutes=2
            # if set 1 minute, we get nothing
            # while creating the latest metrics.
        )

        for result in self.__read_metric(query_results=query_results):
            response['gcp'][result['subscription']] = result['value']

        return response
예제 #20
0
    def test_constructor_default_end_time(self):
        from google.cloud.monitoring_v3 import MetricServiceClient

        MINUTES = 5
        NOW = datetime.datetime(2016, 4, 7, 2, 30, 30)
        T0 = datetime.datetime(2016, 4, 7, 2, 25, 0)
        T1 = datetime.datetime(2016, 4, 7, 2, 30, 0)

        channel = ChannelStub()
        client = MetricServiceClient(channel=channel)
        with mock.patch('google.cloud.monitoring_v3.query._UTCNOW',
                        new=lambda: NOW):
            query = self._make_one(client,
                                   PROJECT,
                                   METRIC_TYPE,
                                   minutes=MINUTES)

        self.assertEqual(query._start_time, T0)
        self.assertEqual(query._end_time, T1)
예제 #21
0
    def __init__(self,
                 project_id=None,
                 client=None,
                 add_unique_identifier=False):
        self.client = client or MetricServiceClient()
        if not project_id:
            _, self.project_id = google.auth.default()
        else:
            self.project_id = project_id
        self.project_name = self.client.project_path(self.project_id)
        self._metric_descriptors = {}
        self._last_updated = {}
        self.unique_identifier = None
        if add_unique_identifier:
            self.unique_identifier = "{:08x}".format(random.randint(0, 16**8))

        (
            self._exporter_start_time_seconds,
            self._exporter_start_time_nanos,
        ) = divmod(time_ns(), NANOS_PER_SECOND)
예제 #22
0
    def test_constructor_minimal(self):
        from google.cloud.monitoring_v3 import MetricServiceClient

        # Mock the API response
        channel = ChannelStub()
        client = MetricServiceClient(channel=channel)

        query = self._make_one(client, PROJECT)

        self.assertEqual(query._client, client)
        self.assertEqual(query._filter.metric_type,
                         self._get_target_class().DEFAULT_METRIC_TYPE)

        self.assertIsNone(query._start_time)
        self.assertIsNone(query._end_time)

        self.assertEqual(query._per_series_aligner, 0)
        self.assertEqual(query._alignment_period_seconds, 0)
        self.assertEqual(query._cross_series_reducer, 0)
        self.assertEqual(query._group_by_fields, ())
예제 #23
0
    def test_iteration_empty(self):
        from google.cloud.monitoring_v3 import MetricServiceClient
        from google.cloud.monitoring_v3.gapic import enums
        from google.cloud.monitoring_v3.proto import metric_service_pb2

        T0 = datetime.datetime(2016, 4, 6, 22, 5, 0)
        T1 = datetime.datetime(2016, 4, 6, 22, 10, 0)

        channel = ChannelStub(responses=[{'next_page_token': ''}])
        client = MetricServiceClient(channel=channel)
        query = self._make_one(client, PROJECT, METRIC_TYPE)
        query = query.select_interval(start_time=T0, end_time=T1)
        response = list(query)

        self.assertEqual(len(response), 0)

        expected_request = metric_service_pb2.ListTimeSeriesRequest(
            name='projects/' + PROJECT,
            filter='metric.type = "{type}"'.format(type=METRIC_TYPE),
            interval=self._make_interval(T1, T0),
            view=enums.ListTimeSeriesRequest.TimeSeriesView.FULL)
        request = channel.requests[0][1]
        self.assertEqual(request, expected_request)
예제 #24
0
    def test_iteration(self):
        from google.cloud.monitoring_v3 import MetricServiceClient
        from google.cloud.monitoring_v3.gapic import enums
        from google.cloud.monitoring_v3.proto import metric_service_pb2

        T0 = datetime.datetime(2016, 4, 6, 22, 5, 0)
        T1 = datetime.datetime(2016, 4, 6, 22, 10, 0)

        INTERVAL1 = self._make_interval(TS1, TS0)
        INTERVAL2 = self._make_interval(TS2, TS1)

        VALUE1 = 60  # seconds
        VALUE2 = 60.001  # seconds

        SERIES1 = {
            'metric': {
                'type': METRIC_TYPE,
                'labels': METRIC_LABELS
            },
            'resource': {
                'type': RESOURCE_TYPE,
                'labels': RESOURCE_LABELS
            },
            'metric_kind':
            METRIC_KIND,
            'value_type':
            VALUE_TYPE,
            'points': [
                {
                    'interval': INTERVAL2,
                    'value': {
                        'double_value': VALUE1
                    }
                },
                {
                    'interval': INTERVAL1,
                    'value': {
                        'double_value': VALUE1
                    }
                },
            ],
        }
        SERIES2 = {
            'metric': {
                'type': METRIC_TYPE,
                'labels': METRIC_LABELS2
            },
            'resource': {
                'type': RESOURCE_TYPE,
                'labels': RESOURCE_LABELS2
            },
            'metric_kind':
            METRIC_KIND,
            'value_type':
            VALUE_TYPE,
            'points': [
                {
                    'interval': INTERVAL2,
                    'value': {
                        'double_value': VALUE2
                    }
                },
                {
                    'interval': INTERVAL1,
                    'value': {
                        'double_value': VALUE2
                    }
                },
            ],
        }

        RESPONSE = {'time_series': [SERIES1, SERIES2], 'next_page_token': ''}

        channel = ChannelStub(responses=[RESPONSE])
        client = MetricServiceClient(channel=channel)
        query = self._make_one(client, PROJECT, METRIC_TYPE)
        query = query.select_interval(start_time=T0, end_time=T1)
        response = list(query)

        self.assertEqual(len(response), 2)
        series1, series2 = response

        self.assertEqual(series1.metric.labels, METRIC_LABELS)
        self.assertEqual(series2.metric.labels, METRIC_LABELS2)
        self.assertEqual(series1.resource.labels, RESOURCE_LABELS)
        self.assertEqual(series2.resource.labels, RESOURCE_LABELS2)

        self.assertEqual([p.value.double_value for p in series1.points],
                         [VALUE1, VALUE1])
        self.assertEqual([p.value.double_value for p in series2.points],
                         [VALUE2, VALUE2])
        self.assertEqual([p.interval for p in series1.points],
                         [INTERVAL2, INTERVAL1])
        self.assertEqual([p.interval for p in series2.points],
                         [INTERVAL2, INTERVAL1])

        expected_request = metric_service_pb2.ListTimeSeriesRequest(
            name='projects/' + PROJECT,
            filter='metric.type = "{type}"'.format(type=METRIC_TYPE),
            interval=self._make_interval(T1, T0),
            view=enums.ListTimeSeriesRequest.TimeSeriesView.FULL)
        request = channel.requests[0][1]
        self.assertEqual(request, expected_request)
예제 #25
0
 def _put_metrics(self, ns, metrics):
     client = MetricServiceClient()
     client.create_time_series(metrics)
예제 #26
0
### Define constants

# Cromwell variables passed to the container
# through environmental variables
WORKFLOW_ID = environ['WORKFLOW_ID']
TASK_CALL_NAME = environ['TASK_CALL_NAME']
TASK_CALL_INDEX = environ['TASK_CALL_INDEX']
TASK_CALL_ATTEMPT = environ['TASK_CALL_ATTEMPT']
DISK_MOUNTS = environ['DISK_MOUNTS'].split()

# GCP instance name, zone and project
# from instance introspection API
INSTANCE = get_metadata('name')
_, PROJECT, _, ZONE = get_metadata('zone').split('/')

client = MetricServiceClient()
PROJECT_NAME = client.project_path(PROJECT)

METRIC_ROOT = 'wdl_task'

MEASUREMENT_TIME_SEC = 1
REPORT_TIME_SEC = 60

LABEL_DESCRIPTORS = [
  LabelDescriptor(
    key='workflow_id',
    description='Cromwell workflow ID',
  ),
  LabelDescriptor(
    key='task_call_name',
    description='Cromwell task call name',
예제 #27
0
class MetricsClient:
    """Client for Cloud Monitoring Metrics.

    Provides a simpler interface than the original client.

    Args:
        project_id (str): Cloud Monitoring host project id (workspace) to query
            metrics from.
    """
    def __init__(self, project_id):
        self.client = MetricServiceClient()
        self.project_id = project_id
        self.project = self.client.project_path(project_id)

    def get(self, metric_type):
        """Get a metric descriptor from metric type.
        If the metric is not found, try listing all metrics from project and
        grab the corresponding matches.

        Args:
            metric_type (str): The metric type (e.g: custom.googleapis.com/test)
                or a regex of the metric type.

        Returns:
            iterator: Metric descriptor API response.
        """
        try:
            return self.client.get_metric_descriptor(
                f'{self.project}/metricDescriptors/{metric_type}')
        except exceptions.NotFound:
            metric_type = self.get_approx(metric_type)
            return self.client.get_metric_descriptor(
                f'{self.project}/metricDescriptors/{metric_type}')

    def get_approx(self, metric_type, interactive=True):
        """Get metric descriptors matching a regex of the metric_type.

        Args:
            metric_type (str): Metric type regex.
            interactive (bool): Interactive mode enabled (default: True).

        Returns:
            str: The metric type chosen by the user through interactive input,
                or inferred by the tool.
        """
        LOGGER.info(f'Metric type "{metric_type}" not found (no exact match). '
                    f'Trying with regex ...')
        results = self.list(pattern=metric_type)
        matches = [(x['type'], x['name'].split('/')[1]) for x in list(results)]
        if len(matches) == 0:
            LOGGER.error(
                f'No partial result matched your query "{metric_type}".')
            raise  # re-raise NotFound exception
        if len(matches) == 1:
            metric_type = matches[0][0]
            project_id = matches[0][1]
            LOGGER.info(f'Found exactly one metric "{metric_type}" in project'
                        f'"{project_id}" matching regex.')
        elif interactive:
            LOGGER.info('Found multiple metrics matching regex.')
            for idx, (mtype, project_id) in enumerate(matches):
                print(f'{idx}. {mtype} ({project_id})')
            idx = int(input('Enter your choice: '))
            metric_type = matches[idx][0]
            project_id = matches[idx][1]
        else:  # non-interactive mode, take first match
            metric_type = matches[0][0]
            project_id = matches[0][1]
        self.switch_project(project_id)
        return metric_type

    def create(self,
               metric_type,
               metric_kind='GAUGE',
               value_type='DOUBLE',
               description='N/A'):
        """Create a metric descriptor.

        Args:
            metric_type (str): Metric type.
            metric_kind (str, optional): Metric kind.
            value_type (str, optional): Value type.
            description (str, optional): Description.

        Returns:
            obj: Metric descriptor.
        """
        descriptor = types.MetricDescriptor()
        if metric_type.startswith('custom.googleapis.com/'):
            descriptor.type = metric_type
        else:
            descriptor.type = 'custom.googleapis.com/%s' % metric_type
        descriptor.metric_kind = (getattr(enums.MetricDescriptor.MetricKind,
                                          metric_kind))
        descriptor.value_type = (getattr(enums.MetricDescriptor.ValueType,
                                         value_type))
        descriptor.description = description
        LOGGER.info(f'Creating metric descriptor "{descriptor.type}" ...')
        return self.client.create_metric_descriptor(self.project, descriptor)

    def delete(self, metric_type):
        """Delete a metric descriptor.

        Args:
            metric_type (str): Metric type to delete.

        Returns:
            obj: Metric descriptor.
        """
        LOGGER.info(f'Deleting metric descriptor "{metric_type}" ...')
        return self.client.delete_metric_descriptor(
            f'{self.project}/metricDescriptors/{metric_type}')

    def list(self, pattern=None):
        """List all metric descriptors in project.

        Args:
            pattern (str, optional): Optional pattern to filter on
                specific metric descriptors.
            filter (dict): Filter fields.

        Returns:
            list: List of metric descriptors.
        """
        LOGGER.debug(f'Listing metrics in project "{self.project_id}" ...')
        descriptors = list(self.client.list_metric_descriptors(self.project))
        if pattern:
            descriptors = [
                x for x in descriptors if bool(re.search(pattern, x.type))
            ]
        return descriptors

    def delete_unused(self, pattern=None, window=1, interactive=True):
        """Delete unused metric.

        Args:
            pattern (str): Regex pattern to filter on.
            window (int): Window to check for metric data in days. If no
                datapoints were written during this window, add to delete list.
        """
        LOGGER.info(
            f'Inspecting metrics to find unused ones in "{self.project_id}". '
            f'The bigger the --window, the longest time this will take ...')
        window_seconds = window * 86400  # in seconds
        descriptors = self.list(pattern=pattern)
        delete_list = []
        keep_list = []
        for descriptor in descriptors:
            metric_type = descriptor['type']
            project_id = descriptor['name'].split('/')[1]
            self.switch_project(project_id)
            results = list(self.inspect(metric_type, window_seconds))
            if not results:
                LOGGER.info(
                    f'{metric_type}: not written for (at least) {window} days')
                delete_list.append({
                    'metric_type': metric_type,
                    'project_id': self.project_id
                })
            else:
                last_written = results[0]['points'][0]['interval']['endTime']
                keep_list.append({
                    'metric_type':
                    metric_type,
                    'message':
                    f'Last datapoint written on {last_written}'
                })
                LOGGER.info(
                    f'{metric_type}: last datapoint written at {last_written}')
        if not delete_list:
            LOGGER.info('No unused metrics. Exiting.')
        elif interactive:
            idx = input('Delete unused metrics (y/n) ?')
            if idx.lower() in ['y', 'yes']:
                for item in delete_list:
                    metric_type = item['metric_type']
                    self.switch_project(item['project_id'])
                    self.delete(metric_type)
            LOGGER.info('Metrics deleted successfully.')

    def inspect(self, metric_type, window):
        """Inspect a specific metric. Returns timeseries beteween now and
        300 seconds before.

        Args:
            metric_type (str): Metric type.
            window: Window (in seconds).

        Returns:
            list: List of timeseries.
        """
        LOGGER.debug(
            f'Inspecting metric "{metric_type}" in project "{self.project_id}"'
            ' ...')
        metric = list(self.get(metric_type))[0]
        LOGGER.info(metric)
        metric_type = metric['type']
        interval = types.TimeInterval()
        now = time.time()
        interval.end_time.seconds = int(now)
        interval.end_time.nanos = int(
            (now - interval.end_time.seconds) * 10**9)
        interval.start_time.seconds = int(now - window)
        interval.start_time.nanos = interval.end_time.nanos
        results = list(
            self.client.list_time_series(
                self.project, 'metric.type = "%s"' % metric_type, interval,
                enums.ListTimeSeriesRequest.TimeSeriesView.FULL))
        return results

    def switch_project(self, new_project_id):
        """Update working project.

        Args:
            new_project_id (str): New project id.
        """
        self.project_id = new_project_id
        self.project = self.client.project_path(self.project_id)
예제 #28
0
 def __init__(self, project_id):
     self.client = MetricServiceClient()
     self.project_id = project_id
     self.project = self.client.project_path(project_id)
예제 #29
0
# Cromwell variables passed to the container
# through environmental variables
WORKFLOW_ID = environ['WORKFLOW_ID']
TASK_CALL_NAME = environ['TASK_CALL_NAME']
TASK_CALL_INDEX = environ['TASK_CALL_INDEX']
TASK_CALL_ATTEMPT = environ['TASK_CALL_ATTEMPT']
DISK_MOUNTS = environ['DISK_MOUNTS'].split()

# Get billing rates
MACHINE = get_machine_info()
PRICELIST = get_pricelist()
COST_PER_SEC = (get_machine_hour(MACHINE, PRICELIST) +
                get_disk_hour(MACHINE, PRICELIST)) / 3600

client = MetricServiceClient()
PROJECT_NAME = client.project_path(MACHINE['project'])

METRIC_ROOT = 'wdl_task'

MEASUREMENT_TIME_SEC = 1

# how frequently to report (e.g., if 60, then send a report every minute)
REPORT_TIME_SEC_MIN = 300
REPORT_TIME_SEC = REPORT_TIME_SEC_MIN

LABEL_DESCRIPTORS = [
    LabelDescriptor(
        key='workflow_id',
        description='Cromwell workflow ID',
    ),
예제 #30
0
def stackdriver():
    return MetricServiceClient()
예제 #31
0
 def _create_client(channel=None):
     if channel is None:
         channel = ChannelStub()
     transport = MetricServiceGrpcTransport(channel=channel)
     return MetricServiceClient(transport=transport)
예제 #32
0
### Define constants

# Cromwell variables passed to the container
# through environmental variables
WORKFLOW_ID = environ['WORKFLOW_ID']
TASK_CALL_NAME = environ['TASK_CALL_NAME']
TASK_CALL_INDEX = environ['TASK_CALL_INDEX']
TASK_CALL_ATTEMPT = environ['TASK_CALL_ATTEMPT']
DISK_MOUNTS = environ['DISK_MOUNTS'].split()

# GCP instance name, zone and project
# from instance introspection API
INSTANCE = get_metadata('name')
_, PROJECT, _, ZONE = get_metadata('zone').split('/')

client = MetricServiceClient()
PROJECT_NAME = client.project_path(PROJECT)

METRIC_ROOT = 'wdl_task'

MEASUREMENT_TIME_SEC = 1
REPORT_TIME_SEC = 60

LABEL_DESCRIPTORS = [
  LabelDescriptor(
    key='workflow_id',
    description='Cromwell workflow ID',
  ),
  LabelDescriptor(
    key='task_call_name',
    description='Cromwell task call name',