def send_metric(sales_item, sales_num, report_time): client = monitoring_v3.MetricServiceClient() project_name = f"projects/{PROJECT_ID}" series = monitoring_v3.TimeSeries() series.metric.type = f"custom.googleapis.com/{CUSTOM_METRIC_NAME}" # Available resource types: https://cloud.google.com/monitoring/api/resources series.resource.type = "global" series.resource.labels["project_id"] = PROJECT_ID # If needed, add more labels for filtering and grouping series.metric.labels["item"] = sales_item epoch = report_time.timestamp() seconds = int(epoch) interval = monitoring_v3.TimeInterval( {"end_time": { "seconds": seconds, "nanos": 0 }}) point = monitoring_v3.Point({ "interval": interval, "value": { "int64_value": sales_num } }) series.points = [point] client.create_time_series(request={ "name": project_name, "time_series": [series] }) print("Successfully wrote time series.")
def test_list_monitored_resource_descriptors(self): # Setup Expected Response next_page_token = "" resource_descriptors_element = {} resource_descriptors = [resource_descriptors_element] expected_response = { "next_page_token": next_page_token, "resource_descriptors": resource_descriptors, } expected_response = metric_service_pb2.ListMonitoredResourceDescriptorsResponse( **expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = monitoring_v3.MetricServiceClient() # Setup Request name = client.project_path("[PROJECT]") paged_list_response = client.list_monitored_resource_descriptors(name) resources = list(paged_list_response) assert len(resources) == 1 assert expected_response.resource_descriptors[0] == resources[0] assert len(channel.requests) == 1 expected_request = metric_service_pb2.ListMonitoredResourceDescriptorsRequest( name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request
def test_get_monitored_resource_descriptor(self): # Setup Expected Response name_2 = 'name2-1052831874' type_ = 'type3575610' display_name = 'displayName1615086568' description = 'description-1724546052' expected_response = { 'name': name_2, 'type': type_, 'display_name': display_name, 'description': description } expected_response = monitored_resource_pb2.MonitoredResourceDescriptor( **expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) client = monitoring_v3.MetricServiceClient(channel=channel) # Setup Request name = client.monitored_resource_descriptor_path( '[PROJECT]', '[MONITORED_RESOURCE_DESCRIPTOR]') response = client.get_monitored_resource_descriptor(name) assert expected_response == response assert len(channel.requests) == 1 expected_request = metric_service_pb2.GetMonitoredResourceDescriptorRequest( name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request
def add_metric_point(project_name, metric_name, metric_value): """ Function for adding data point for dashboard. inputs ------ metric_name: name of metric, will be appended to beginning of series.metric.type, ex 'custom.googleapis.com/' + test_metric_1105' metric_value: numeric value to be added returns ------ none, value added. """ client = monitoring_v3.MetricServiceClient() project_name = client.project_path(project_name) series = monitoring_v3.types.TimeSeries() series.metric.type = 'custom.googleapis.com/' + metric_name series.resource.type = 'global' # series.resource.labels['project_id'] = project # series.resource.labels['zone'] = 'us-central1-a' # series.resource.labels['cluster_name'] = 'heavy-hitters' point = series.points.add() point.value.double_value = metric_value now = time.time() point.interval.end_time.seconds = int(now) point.interval.end_time.nanos = int( (now - point.interval.end_time.seconds) * 10**9) client.create_time_series(project_name, [series])
def __init__(self, name, desc, service, buckets=None, client=None, valueType=None): self.service = service self.name = name if self.service == "prometheus": if buckets: self.h = prom.Histogram(name, desc, buckets=buckets) else: self.h = prom.Histogram(name, desc) else: # STACKDRIVER self.client = monitoring_v3.MetricServiceClient() self.project_name = self.client.project_path(name) descriptor = monitoring_v3.types.MetricDescriptor() descriptor.type = 'custom.googleapis.com/{}'.format(metric_type) # Cumulative descriptor.metric_kind = ( monitoring_v3.enums.MetricDescriptor.MetricKind.CUMULATIVE) # Double type (Will add switch for types later) descriptor.value_type = ( monitoring_v3.enums.MetricDescriptor.ValueType.DISTRIBUTION) descriptor.description = desc # Create the metric descriptor and print a success message descriptor = self.client.create_metric_descriptor(self.project_name, descriptor) print('StackDriver Histogram Created {}.'.format(descriptor.name))
def __init__(self): super().__init__() self.sample_rate = os.environ.get('STACKDRIVER_SAMPLE_RATE', 60) self.project_id = os.environ['STACKDRIVER_PROJECT_ID'] self.cluster_name = os.environ['STACKDRIVER_CLUSTER_NAME'] self.container_name = os.environ['STACKDRIVER_CONTAINER_NAME'] self.namespace_id = os.environ['STACKDRIVER_NAMESPACE_UID'] self.pod_id = os.environ['STACKDRIVER_POD_UID'] self.buckets = os.environ.get('STACKDRIVER_BUCKETS', 40) self.growth_factor = os.environ.get('STACKDRIVER_GROWTH_FACTOR', 1.4) self.scale = os.environ.get('STACKDRIVER_SCALE', 1) self.instance_id = requests.get( "http://metadata.google.internal./computeMetadata/v1/instance/id", headers={ 'Metadata-Flavor': 'Google' }).text zone = requests.get( "http://metadata.google.internal./computeMetadata/v1/instance/zone", headers={ 'Metadata-Flavor': 'Google' }).text self.zone = zone.split('/')[-1] self.client = monitoring_v3.MetricServiceClient()
def list_time_series(project_id): client = monitoring_v3.MetricServiceClient() project_name = client.project_path(project_id) interval = monitoring_v3.types.TimeInterval() now = time.time() interval.end_time.seconds = int(now) interval.end_time.nanos = int((now - interval.end_time.seconds) * 10**9) interval.start_time.seconds = int(now - 15000) interval.start_time.nanos = interval.end_time.nanos try: results = client.list_time_series( project_name, 'metric.type = "logging.googleapis.com/user/favicons_served"', interval, monitoring_v3.enums.ListTimeSeriesRequest.TimeSeriesView.FULL) except: return (0) total = 0 try: for result in results: total += 1 for point in result.points: total += point.value.int64_value #print (point.value.int64_value) return (total) except: return (0)
def test_list_monitored_resource_descriptors(self): # Setup Expected Response next_page_token = '' resource_descriptors_element = {} resource_descriptors = [resource_descriptors_element] expected_response = { 'next_page_token': next_page_token, 'resource_descriptors': resource_descriptors } expected_response = metric_service_pb2.ListMonitoredResourceDescriptorsResponse( **expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) client = monitoring_v3.MetricServiceClient(channel=channel) # Setup Request name = client.project_path('[PROJECT]') paged_list_response = client.list_monitored_resource_descriptors(name) resources = list(paged_list_response) assert len(resources) == 1 assert expected_response.resource_descriptors[0] == resources[0] assert len(channel.requests) == 1 expected_request = metric_service_pb2.ListMonitoredResourceDescriptorsRequest( name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request
def test_list_metric_descriptors(self): client = monitoring_v3.MetricServiceClient() name_inside = client.project_path(PROJECT_INSIDE) delayed_inside = lambda: client.list_metric_descriptors(name_inside) name_outside = client.project_path(PROJECT_OUTSIDE) delayed_outside = lambda: client.list_metric_descriptors(name_outside) TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
def list_time_series_reduce(project_id): # [START monitoring_read_timeseries_reduce] client = monitoring_v3.MetricServiceClient() project_name = client.project_path(project_id) interval = monitoring_v3.types.TimeInterval() now = time.time() interval.end_time.seconds = int(now) interval.end_time.nanos = int((now - interval.end_time.seconds) * 10**9) interval.start_time.seconds = int(now - 3600) interval.start_time.nanos = interval.end_time.nanos aggregation = monitoring_v3.types.Aggregation() aggregation.alignment_period.seconds = 1200 # 20 minutes aggregation.per_series_aligner = ( monitoring_v3.enums.Aggregation.Aligner.ALIGN_MEAN) aggregation.cross_series_reducer = ( monitoring_v3.enums.Aggregation.Reducer.REDUCE_MEAN) aggregation.group_by_fields.append('resource.zone') results = client.list_time_series( project_name, 'metric.type = "compute.googleapis.com/instance/cpu/utilization"', interval, monitoring_v3.enums.ListTimeSeriesRequest.TimeSeriesView.FULL, aggregation) for result in results: print(result)
def list_time_series_reduce(project_id): # [START monitoring_read_timeseries_reduce] client = monitoring_v3.MetricServiceClient() project_name = f"projects/{project_id}" now = time.time() seconds = int(now) nanos = int((now - seconds) * 10 ** 9) interval = monitoring_v3.TimeInterval( { "end_time": {"seconds": seconds, "nanos": nanos}, "start_time": {"seconds": (seconds - 3600), "nanos": nanos}, } ) aggregation = monitoring_v3.Aggregation( { "alignment_period": {"seconds": 1200}, # 20 minutes "per_series_aligner": monitoring_v3.Aggregation.Aligner.ALIGN_MEAN, "cross_series_reducer": monitoring_v3.Aggregation.Reducer.REDUCE_MEAN, "group_by_fields": ["resource.zone"], } ) results = client.list_time_series( request={ "name": project_name, "filter": 'metric.type = "compute.googleapis.com/instance/cpu/utilization"', "interval": interval, "view": monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.FULL, "aggregation": aggregation, } ) for result in results: print(result)
def write_time_series(project_id): # [START monitoring_write_timeseries] client = monitoring_v3.MetricServiceClient() project_name = f"projects/{project_id}" series = monitoring_v3.TimeSeries() series.metric.type = "custom.googleapis.com/my_metric" + str(uuid.uuid4()) series.resource.type = "gce_instance" series.resource.labels["instance_id"] = "1234567890123456789" series.resource.labels["zone"] = "us-central1-f" now = time.time() seconds = int(now) nanos = int((now - seconds) * 10**9) interval = monitoring_v3.TimeInterval( {"end_time": { "seconds": seconds, "nanos": nanos }}) point = monitoring_v3.Point({ "interval": interval, "value": { "double_value": 3.14 } }) series.points = [point] client.create_time_series(name=project_name, time_series=[series])
def list_time_series_aggregate(project_id): # [START monitoring_read_timeseries_align] client = monitoring_v3.MetricServiceClient() project_name = client.project_path(project_id) interval = monitoring_v3.types.TimeInterval() now = time.time() interval.end_time.seconds = int(now) interval.end_time.nanos = int((now - interval.end_time.seconds) * 10**9) interval.start_time.seconds = int(now - query_time_int) # 這邊用 query_time 來彈性調整 interval.start_time.nanos = interval.end_time.nanos aggregation = monitoring_v3.types.Aggregation() aggregation.alignment_period.seconds = query_time_int # 這邊用 query_time 來彈性調整 aggregation.per_series_aligner = ( monitoring_v3.enums.Aggregation.Aligner.ALIGN_MEAN) results = client.list_time_series( project_name, 'metric.type = "compute.googleapis.com/instance/cpu/utilization"', interval, monitoring_v3.enums.ListTimeSeriesRequest.TimeSeriesView.FULL, aggregation) for result in results: cpuUtil = result.points[0].value.double_value if cpuUtil < cpu_threshold_float: #需要判斷的 CPU threshold, 用 cpu_threshold 來彈性調整 print("instance name:", result.metric.labels) # 列出 instance 名稱 # instance_id 是放在 resource.labels 下, 以字典的方式儲存, 所以透過 ['KEY_NAME'] 取出 print("instance id:", result.resource.labels['instance_id'] ) # 列出 instance id 來區別同樣名稱的 VM print("CPU utilization:", cpuUtil * 100, "% \n") # 列出 cpu 使用量
def main(project=None, gce_regions=None, verbose=False, **kw): "Fetch, convert, and write quotas for project and optional regions." _configure_logging(verbose=verbose) regions = ['global'] if gce_regions: regions += gce_regions.split(',') quotas = [] try: compute = googleapiclient.discovery.build('compute', 'v1', cache_discovery=False) # Fetch quotas for global + defined regions. for region in regions: _LOGGER.debug('fetching project quota for %s %s', project, region) for quota in _fetch_quotas(project, region, compute=compute): quotas.append((region, quota)) # Convert quotas to series, write to Stackdriver using naive batching. client, i = monitoring_v3.MetricServiceClient(), 0 while i < len(quotas): series = [ _quota_to_series(project, *q) for q in quotas[i:i + _BATCH_SIZE] ] _add_series(project, series, client) i += _BATCH_SIZE except Error as e: _LOGGER.critical(e.message)
def handler(event, context): """Triggered from a message on a Cloud Pub/Sub topic. Args: event (dict): Event payload. context (google.cloud.functions.Context): Metadata for the event. """ msg = base64.b64decode(event['data']).decode('utf-8') vals = { item[0].strip(): item[1].strip() for item in (ms.split(':') for ms in msg.split(',')) } print(vals) client = monitoring_v3.MetricServiceClient() project = 'photon-playground' project_name = client.project_path(project) for name, val in vals.items(): series = monitoring_v3.types.TimeSeries() series.metric.type = 'custom.googleapis.com/' + name series.resource.type = 'global' series.resource.labels['project_id'] = project point = series.points.add() point.value.double_value = float(val) now = time.time() point.interval.end_time.seconds = int(now) point.interval.end_time.nanos = int( (now - point.interval.end_time.seconds) * 10**9) client.create_time_series(project_name, [series]) print('Successfully wrote time series.')
def add_new_metric(project_id, metric_type, desc): """Add new Metrics for StackDriver. Args: project_id: (str) GCP project id. metric_type: (int) MetricDescriptor type. desc: (str) MetricDescriptor description. Raises: MissingProjectIdError: GCP Project id is not defined. """ if not project_id: raise MissingProjectIdError( 'Set the environment variable GCLOUD_PROJECT to your GCP Project ' 'ID.') descriptor = monitoring_v3.types.MetricDescriptor() descriptor.type = 'custom.googleapis.com/{type}'.format(type=metric_type) descriptor.metric_kind = ( monitoring_v3.enums.MetricDescriptor.MetricKind.GAUGE) descriptor.value_type = ( monitoring_v3.enums.MetricDescriptor.ValueType.INT64) descriptor.description = desc # Create Metric Descriptor. client = monitoring_v3.MetricServiceClient() project_name = client.project_path(project_id) descriptor = client.create_metric_descriptor(project_name, descriptor) print('Created {}.'.format(descriptor.name))
def test_create_metric_descriptor(self): # Setup Expected Response name_2 = 'name2-1052831874' type_ = 'type3575610' unit = 'unit3594628' description = 'description-1724546052' display_name = 'displayName1615086568' expected_response = { 'name': name_2, 'type': type_, 'unit': unit, 'description': description, 'display_name': display_name } expected_response = api_metric_pb2.MetricDescriptor( **expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) client = monitoring_v3.MetricServiceClient(channel=channel) # Setup Request name = client.project_path('[PROJECT]') metric_descriptor = {} response = client.create_metric_descriptor(name, metric_descriptor) assert expected_response == response assert len(channel.requests) == 1 expected_request = metric_service_pb2.CreateMetricDescriptorRequest( name=name, metric_descriptor=metric_descriptor) actual_request = channel.requests[0][1] assert expected_request == actual_request
def list_monitored_resources(project_id): # [START monitoring_list_resources] client = monitoring_v3.MetricServiceClient() project_name = f"projects/{project_id}" resource_descriptors = client.list_monitored_resource_descriptors(name=project_name) for descriptor in resource_descriptors: print(descriptor.type)
def test_list_time_series(self): # Setup Expected Response next_page_token = '' time_series_element = {} time_series = [time_series_element] expected_response = { 'next_page_token': next_page_token, 'time_series': time_series } expected_response = metric_service_pb2.ListTimeSeriesResponse( **expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) client = monitoring_v3.MetricServiceClient(channel=channel) # Setup Request name = client.project_path('[PROJECT]') filter_ = 'filter-1274492040' interval = {} view = enums.ListTimeSeriesRequest.TimeSeriesView.FULL paged_list_response = client.list_time_series(name, filter_, interval, view) resources = list(paged_list_response) assert len(resources) == 1 assert expected_response.time_series[0] == resources[0] assert len(channel.requests) == 1 expected_request = metric_service_pb2.ListTimeSeriesRequest( name=name, filter=filter_, interval=interval, view=view) actual_request = channel.requests[0][1] assert expected_request == actual_request
def get_monitored_resource_descriptor(project_id, resource_type_name): # [START monitoring_get_resource] client = monitoring_v3.MetricServiceClient() resource_path = ( f"projects/{project_id}/monitoredResourceDescriptors/{resource_type_name}" ) pprint.pprint(client.get_monitored_resource_descriptor(name=resource_path))
def test_create_time_series(self): client = monitoring_v3.MetricServiceClient() name_inside = client.project_path(PROJECT_INSIDE) delayed_inside = lambda: client.create_time_series(name_inside, {}) name_outside = client.project_path(PROJECT_OUTSIDE) delayed_outside = lambda: client.create_time_series(name_outside, {}) TestVPCServiceControlV3._do_test(delayed_inside, delayed_outside)
def list_time_series(project_id): # [START monitoring_read_timeseries_simple] client = monitoring_v3.MetricServiceClient() project_name = f"projects/{project_id}" interval = monitoring_v3.TimeInterval() now = time.time() seconds = int(now) nanos = int((now - seconds) * 10 ** 9) interval = monitoring_v3.TimeInterval( { "end_time": {"seconds": seconds, "nanos": nanos}, "start_time": {"seconds": (seconds - 1200), "nanos": nanos}, } ) results = client.list_time_series( request={ "name": project_name, "filter": 'metric.type = "compute.googleapis.com/instance/cpu/utilization"', "interval": interval, "view": monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.FULL, } ) for result in results: print(result)
def send_metric(product): global MONITORING_CLIENT if not MONITORING_CLIENT: MONITORING_CLIENT = monitoring_v3.MetricServiceClient() project_name = MONITORING_CLIENT.project_path(PROJECT_ID) series = monitoring_v3.types.TimeSeries() series.metric.type = f"custom.googleapis.com/{CUSTOM_METRIC_PREFIX}-{product}" # Available resource types: https://cloud.google.com/monitoring/api/resources series.resource.type = "generic_task" series.resource.labels["project_id"] = PROJECT_ID # Adjust the lable values as needed series.resource.labels["location"] = "global" series.resource.labels["namespace"] = "default" series.resource.labels["job"] = "app-" + product series.resource.labels["task_id"] = str(uuid.uuid4()) point = series.points.add() point.value.int64_value = 1 now = time.time() point.interval.end_time.seconds = int(now) point.interval.end_time.nanos = int( (now - point.interval.end_time.seconds) * 10**9) MONITORING_CLIENT.create_time_series(project_name, [series])
def run_quickstart(project=""): # [START monitoring_quickstart] from google.cloud import monitoring_v3 import time client = monitoring_v3.MetricServiceClient() # project = 'my-project' # TODO: Update to your project ID. project_name = f"projects/{project}" series = monitoring_v3.TimeSeries() series.metric.type = "custom.googleapis.com/my_metric" series.resource.type = "gce_instance" series.resource.labels["instance_id"] = "1234567890123456789" series.resource.labels["zone"] = "us-central1-f" now = time.time() seconds = int(now) nanos = int((now - seconds) * 10**9) interval = monitoring_v3.TimeInterval( {"end_time": { "seconds": seconds, "nanos": nanos }}) point = monitoring_v3.Point({ "interval": interval, "value": { "double_value": 3.14 } }) series.points = [point] client.create_time_series(request={ "name": project_name, "time_series": [series] }) print("Successfully wrote time series.")
def hello_pubsub(event, context): """Triggered from a message on a Cloud Pub/Sub topic. Args: event (dict): Event payload. context (google.cloud.functions.Context): Metadata for the event. """ #pubsub_message = base64.b64decode(event['data']).decode('utf-8') #print(pubsub_message) client = monitoring_v3.MetricServiceClient() project_name = client.project_path("pe-training") interval = monitoring_v3.types.TimeInterval() now = time.time() interval.end_time.seconds = int(now) interval.end_time.nanos = int( (now - interval.end_time.seconds) * 10**9) interval.start_time.seconds = int(now - (7*24*60*60)) interval.start_time.nanos = interval.end_time.nanos aggregation = monitoring_v3.types.Aggregation() aggregation.alignment_period.seconds = 24*60*60 # 20 minutes aggregation.per_series_aligner = ( monitoring_v3.enums.Aggregation.Aligner.ALIGN_MEAN) results = client.list_time_series(project_name,'metric.type = "compute.googleapis.com/instance/cpu/utilization"',interval,monitoring_v3.enums.ListTimeSeriesRequest.TimeSeriesView.FULL,aggregation) for result in results: print(results)
def test_get_monitored_resource_descriptor(self): # Setup Expected Response name_2 = "name2-1052831874" type_ = "type3575610" display_name = "displayName1615086568" description = "description-1724546052" expected_response = { "name": name_2, "type": type_, "display_name": display_name, "description": description, } expected_response = monitored_resource_pb2.MonitoredResourceDescriptor( **expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = monitoring_v3.MetricServiceClient() # Setup Request name = client.monitored_resource_descriptor_path( "[PROJECT]", "[MONITORED_RESOURCE_DESCRIPTOR]") response = client.get_monitored_resource_descriptor(name) assert expected_response == response assert len(channel.requests) == 1 expected_request = metric_service_pb2.GetMonitoredResourceDescriptorRequest( name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request
def new_stats_exporter(options=None, interval=None): """Get a stats exporter and running transport thread. Create a new `StackdriverStatsExporter` with the given options and start periodically exporting stats to stackdriver in the background. Fall back to default auth if `options` is null. This will raise `google.auth.exceptions.DefaultCredentialsError` if default credentials aren't configured. See `opencensus.metrics.transport.get_exporter_thread` for details on the transport thread. :type options: :class:`Options` :param exporter: Options to pass to the exporter :type interval: int or float :param interval: Seconds between export calls. :rtype: :class:`StackdriverStatsExporter` :return: The newly-created exporter. """ if options is None: _, project_id = google.auth.default() options = Options(project_id=project_id) if str(options.project_id).strip() == "": raise ValueError(ERROR_BLANK_PROJECT_ID) ci = client_info.ClientInfo(client_library_version=get_user_agent_slug()) client = monitoring_v3.MetricServiceClient(client_info=ci) exporter = StackdriverStatsExporter(client=client, options=options) transport.get_exporter_thread([stats.stats], exporter, interval=interval) return exporter
def _main(monitoring_project, gce_project=None, gce_region=None, verbose=False, keywords=None): """Module entry point used by cli and cloud function wrappers.""" _configure_logging(verbose=verbose) gce_projects = gce_project or [monitoring_project] gce_regions = gce_region or ['global'] keywords = set(keywords or []) logging.debug('monitoring project %s', monitoring_project) logging.debug('projects %s regions %s', gce_projects, gce_regions) logging.debug('keywords %s', keywords) quotas = [] compute = googleapiclient.discovery.build('compute', 'v1', cache_discovery=False) for project in gce_projects: logging.debug('project %s', project) for region in gce_regions: logging.debug('region %s', region) for quota in _fetch_quotas(project, region, compute=compute): if keywords and not any(k in quota['metric'] for k in keywords): # logging.debug('skipping %s', quota) continue logging.debug('quota %s', quota) quotas.append((project, region, quota)) client, i = monitoring_v3.MetricServiceClient(), 0 while i < len(quotas): series = [_quota_to_series(*q) for q in quotas[i:i + _BATCH_SIZE]] _add_series(monitoring_project, series, client) i += _BATCH_SIZE
def _AddCpuUtilization(samples, instance_id): """Add cpu utilization to the metadata of relevant metric samples. Note that the utilization only covers the run stage. Args: samples: list of sample.Sample. The expected ordering is: (1) table loading metrics, (2) table read/write metrics. instance_id: the bigtable instance id. Returns: a list of updated sample.Sample. """ # Check the pre-requisite if (len(samples) < 2 or samples[0].metadata.get('stage') != 'load' or samples[-1].metadata.get('stage') != 'run'): return None # pylint: disable=g-import-not-at-top from google.cloud import monitoring_v3 from google.cloud.monitoring_v3 import query # Query the cpu utilization, which are gauged values at each minute in the # time window. client = monitoring_v3.MetricServiceClient() start_timestamp = samples[0].timestamp end_timestamp = samples[-1].timestamp cpu_query = query.Query( client, project=(FLAGS.project or _GetDefaultProject()), metric_type='bigtable.googleapis.com/cluster/cpu_load', end_time=datetime.datetime.utcfromtimestamp(end_timestamp), minutes=int((end_timestamp - start_timestamp) / 60)) cpu_query = cpu_query.select_resources(instance=instance_id) time_series = list(cpu_query) if not time_series: return None # Build the dict to be added to samples. utilization_data = [] for cluster_number, cluster_time_series in enumerate(time_series): utilization = numpy.array( [point.value.double_value for point in cluster_time_series.points]) for percentile in CPU_UTILIZATION_PERCENTILES: utilization_data.append( {'cluster_number': cluster_number, 'percentile': percentile, 'utilization_percentage': ( '%.2f' % (numpy.percentile(utilization, percentile) * 100))}) additional_metadata = {'cpu_utilization': json.dumps(utilization_data)} # Update the samples. for sample in samples: if sample.metadata.get('stage') == 'run': sample.metadata.update(additional_metadata) return samples
def list_metric_descriptors(project_id): # [START monitoring_list_descriptors] client = monitoring_v3.MetricServiceClient() project_name = f"projects/{project_id}" for descriptor in client.list_metric_descriptors(name=project_name): if "custom" in str(descriptor.type): print(descriptor.type) client.delete_metric_descriptor(name=descriptor.name)