def list_time_series_reduce(): # [START list_time_series_reduce] client = monitoring.Client() metric = 'compute.googleapis.com/instance/cpu/utilization' query_results = client.query(metric, hours=1).align( monitoring.Aligner.ALIGN_MEAN, minutes=5).reduce( monitoring.Reducer.REDUCE_MEAN, 'resource.zone') for result in query_results: print(result)
def list_time_series_aggregate(): # [START list_time_series_aggregate] client = monitoring.Client() metric = 'compute.googleapis.com/instance/cpu/utilization' query_results = client.query(metric, hours=1).align(monitoring.Aligner.ALIGN_MEAN, minutes=5) for result in query_results: print(result)
def create_metric_descriptor(): # [START create_metric_descriptor] client = monitoring.Client() descriptor = client.metric_descriptor( 'custom.googleapis.com/my_metric', metric_kind=monitoring.MetricKind.GAUGE, value_type=monitoring.ValueType.DOUBLE, description='This is a simple example of a custom metric.') descriptor.create()
def write_time_series(): client = monitoring.Client() resource = client.resource('gce_instance', labels={ 'instance_id': 'instance-1', 'zone': 'us-central1-c', }) metric = client.metric(type_='custom.googleapis.com/my_metric', labels={}) client.write_point(metric, resource, 3.14)
def delete_metric(metric_name): """Delete the custom metric. Args: metric_name: the name (including prefix) of the custom metric to delete. """ client = monitoring.Client() descriptor = client.metric_descriptor(metric_name) descriptor.delete() logging.info('Deleted metric: %s', metric_name)
def test_list_metric_descriptors_filtered(self): client = monitoring.Client() PREFIX = 'compute.googleapis.com/' descriptors = client.list_metric_descriptors(type_prefix=PREFIX) # There are currently 18 types with this prefix, but that may change. self.assertGreater(len(descriptors), 10) for descriptor in descriptors: self.assertTrue(descriptor.type.startswith(PREFIX))
def main(): client = monitoring.Client(project=PROJECT) subscriber = pubsub.SubscriberClient() subscription = subscriber.subscribe('projects/{}/subscriptions/{}'.format( PROJECT, SUBSCRIPTION)) subscription.open(handle_message) time.sleep(60) while not queue_empty(client): pass subscription.close()
def get_cpu_load(): """Returns the most recent Bigtable CPU load measurement. Returns: float: The most recent Bigtable CPU usage metric """ client = monitoring.Client() query = client.query('bigtable.googleapis.com/cluster/cpu_load', minutes=5) time_series = list(query) recent_time_series = time_series[0] return recent_time_series.points[0].value
def __init__(self, name, project_id): self.metricType = name self.gce = GoogleComputeEngine() if project_id is None: project_id = self.gce.projectId() elif 'projects/' in project_id: project_id = project_id.split('/')[-1] self.project_id = project_id self.points = [] self.valueType = None self.metricKind = None self.client = monitoring.Client(project=self.project_id)
def test_create_group(self): client = monitoring.Client() group = client.group( display_name=self.DISPLAY_NAME, filter_string=self.FILTER, is_cluster=self.IS_CLUSTER, ) retry_503(group.create)() self.to_delete.append(group) self.assertTrue(group.exists())
def test_list_group_members(self): client = monitoring.Client() group = client.group( display_name=self.DISPLAY_NAME, filter_string=self.FILTER, is_cluster=self.IS_CLUSTER, ) retry_503(group.create)() self.to_delete.append(group) for member in group.list_members(): self.assertIsInstance(member, monitoring.Resource)
def write_time_series(): client = monitoring.Client() resource = client.resource('gce_instance', labels={ 'instance_id': '1234567890123456789', 'zone': 'us-central1-f', }) metric = client.metric(type_='custom.googleapis.com/my_metric', labels={ 'status': 'successful', }) client.write_point(metric, resource, 3.14)
def submit_metric(resource_id, project, job, job_count): """Submit GCP custom metric data.""" metric_ns = 'HySDS' metric_name = 'JobsWaiting-%s' % job client = monitoring.Client() metric = client.metric('custom.googleapis.com/%s/%s' % (metric_ns, metric_name), labels={'resource_id': resource_id}) resource = client.resource('global', {}) client.write_point(metric, resource, job_count, end_time=datetime.utcnow()) logging.info("updated job count for %s queue as metric %s:%s: %s" % (job, metric_ns, metric_name, job_count))
def test_write_point(self): METRIC_TYPE = ('custom.googleapis.com/tmp/system_test_example' + unique_resource_id()) METRIC_KIND = monitoring.MetricKind.GAUGE VALUE_TYPE = monitoring.ValueType.DOUBLE DESCRIPTION = 'System test example -- DELETE ME!' VALUE = 3.14 client = monitoring.Client() descriptor = client.metric_descriptor( METRIC_TYPE, metric_kind=METRIC_KIND, value_type=VALUE_TYPE, description=DESCRIPTION, ) descriptor.create() metric = client.metric(METRIC_TYPE, {}) resource = client.resource('global', {}) retry_500(client.write_point)(metric, resource, VALUE) def _query_timeseries_with_retries(): MAX_RETRIES = 6 def _has_timeseries(result): return len(list(result)) > 0 retry_result = RetryResult( _has_timeseries, max_tries=MAX_RETRIES, backoff=3)(client.query) return RetryErrors( BadRequest, max_tries=MAX_RETRIES, backoff=3)(retry_result) query = _query_timeseries_with_retries()(METRIC_TYPE, minutes=5) timeseries_list = list(query) self.assertEqual(len(timeseries_list), 1) timeseries = timeseries_list[0] self.assertEqual(timeseries.metric, metric) # project_id label only exists on output. del timeseries.resource.labels['project_id'] self.assertEqual(timeseries.resource, resource) descriptor.delete() with self.assertRaises(NotFound): descriptor.delete()
def _main(module, id, name, state, **kwargs): client = monitoring.Client() changed = False policy = AlertPolicy(client, name) if id: policy.id = id exists = policy.name and policy.exists() if exists: policy.reload() # mutate policy object param_names = [ "display_name", "user_labels", "conditions", "combiner", "enabled", "notification_channels", ] for param_name in param_names: param_value = module.params[param_name] if param_value is not None: setattr(policy, param_name, param_value) documentation = module.params["documentation"] if documentation is not None: policy.documentation.content = documentation if not exists and kwargs["enabled"] is None: policy.enabled = True # apply changes to policy object if module.check_mode: pass else: if exists and state == ABSENT: policy.delete() changed = True elif not exists and state == PRESENT: policy.create() changed = True elif exists and state == PRESENT: policy.update() changed = True result = { "changed": changed, "alert_policy": _policy_repr(policy), "raw_json": policy.raw_json, "state": state, } module.exit_json(**result)
def test_reload_group(self): client = monitoring.Client() group = client.group( display_name=self.DISPLAY_NAME, filter_string=self.FILTER, is_cluster=self.IS_CLUSTER, ) retry_503(group.create)() self.to_delete.append(group) group.filter = 'resource.type = "aws_ec2_instance"' group.display_name = 'locally changed name' group.reload() self.assertEqual(group.filter, self.FILTER) self.assertEqual(group.display_name, self.DISPLAY_NAME)
def test_group_hierarchy(self): client = monitoring.Client() root_group = client.group( display_name='Testing: Root group', filter_string=self.FILTER, ) retry_503(root_group.create)() self.to_delete.insert(0, root_group) middle_group = client.group( display_name='Testing: Middle group', filter_string=self.FILTER, parent_id=root_group.id, ) retry_503(middle_group.create)() self.to_delete.insert(0, middle_group) leaf_group = client.group( display_name='Testing: Leaf group', filter_string=self.FILTER, parent_id=middle_group.id, ) retry_503(leaf_group.create)() self.to_delete.insert(0, leaf_group) # Test for parent. actual_parent = middle_group.fetch_parent() self.assertTrue(actual_parent.name, root_group.name) # Test for children. actual_children = middle_group.list_children() children_names = [group.name for group in actual_children] self.assertEqual(children_names, [leaf_group.name]) # Test for descendants. actual_descendants = root_group.list_descendants() descendant_names = {group.name for group in actual_descendants} self.assertEqual(descendant_names, set([middle_group.name, leaf_group.name])) # Test for ancestors. actual_ancestors = leaf_group.list_ancestors() ancestor_names = [group.name for group in actual_ancestors] self.assertEqual(ancestor_names, [middle_group.name, root_group.name])
def create_metric(metric_name): """Create the custom HTTP response by status count metric. Args: metric_name: the name (including prefix) of the response count metric to create. """ client = monitoring.Client() label = LabelDescriptor( 'response_code', LabelValueType.INT64, description='HTTP status code') descriptor = client.metric_descriptor( metric_name, metric_kind=MetricKind.CUMULATIVE, value_type=ValueType.INT64, labels=[label], description='Cumulative count of HTTP responses by status code.') descriptor.create() logging.info('Created metric: %s', metric_name)
def run_quickstart(): # [START monitoring_quickstart] from google.cloud import monitoring client = monitoring.Client() resource = client.resource(type_='gce_instance', labels={ 'instance_id': '1234567890123456789', 'zone': 'us-central1-f', }) metric = client.metric(type_='custom.googleapis.com/my_metric', labels={}) # Default arguments use endtime datetime.utcnow() client.write_point(metric, resource, 3.14) print('Successfully wrote time series.')
def test_list_groups(self): client = monitoring.Client() new_group = client.group( display_name=self.DISPLAY_NAME, filter_string=self.FILTER, is_cluster=self.IS_CLUSTER, ) before_groups = client.list_groups() before_names = set(group.name for group in before_groups) retry_503(new_group.create)() self.to_delete.append(new_group) self.assertTrue(new_group.exists()) after_groups = client.list_groups() after_names = set(group.name for group in after_groups) self.assertEqual(after_names - before_names, set([new_group.name]))
def test_create_and_delete_metric_descriptor(self): METRIC_TYPE = ('custom.googleapis.com/tmp/system_test_example' + unique_resource_id()) METRIC_KIND = monitoring.MetricKind.GAUGE VALUE_TYPE = monitoring.ValueType.DOUBLE DESCRIPTION = 'System test example -- DELETE ME!' client = monitoring.Client() descriptor = client.metric_descriptor( METRIC_TYPE, metric_kind=METRIC_KIND, value_type=VALUE_TYPE, description=DESCRIPTION, ) retry_500(descriptor.create)() retry_404_500(descriptor.delete)()
def run_quickstart(): client = monitoring.Client() resource = client.resource( type_='gce_instance', labels={ 'instance_id': '1234567890123456789', 'zone': 'us-central1-f', } ) metric = client.metric( type_='custom.googleapis.com/my_metric', labels={} ) client.write_point(metric, resource, 3.14) print('Successfully wrote time series.')
def main(): client = monitoring.Client(project=PROJECT) # Publishes the message 'Hello World' publisher = pubsub.PublisherClient() topic = 'projects/{}/topics/{}'.format(PROJECT, TOPIC) publisher.publish(topic, 'Hello world!') # Opens a connection to the message queue asynchronously subscriber = pubsub.SubscriberClient() subscription = subscriber.subscribe('projects/{}/subscriptions/{}'.format( PROJECT, SUBSCRIPTION)) subscription.open(print_message) # Waits until the queue is empty to exit. See queue_empty for more # explanation. time.sleep(11) # broken while not queue_empty(client): pass subscription.close()
def test_update_group(self): NEW_FILTER = 'resource.type = "aws_ec2_instance"' NEW_DISPLAY_NAME = 'updated' client = monitoring.Client() group = client.group( display_name=self.DISPLAY_NAME, filter_string=self.FILTER, is_cluster=self.IS_CLUSTER, ) retry_503(group.create)() self.to_delete.append(group) group.filter = NEW_FILTER group.display_name = NEW_DISPLAY_NAME group.update() after = client.fetch_group(group.id) self.assertEqual(after.filter, NEW_FILTER) self.assertEqual(after.display_name, NEW_DISPLAY_NAME)
def test_fetch_resource_descriptor(self): RESOURCE_TYPE = 'pubsub_topic' client = monitoring.Client() descriptor = client.fetch_resource_descriptor(RESOURCE_TYPE) expected_name = 'projects/{project}/{what}/{type}'.format( project=client.project, what='monitoredResourceDescriptors', type=RESOURCE_TYPE, ) self.assertEqual(descriptor.name, expected_name) self.assertEqual(descriptor.type, RESOURCE_TYPE) self.assertTrue(descriptor.display_name) self.assertTrue(descriptor.description) self.assertTrue(descriptor.labels) for label in descriptor.labels: self.assertTrue(label.key) self.assertTrue(label.value_type) self.assertTrue(label.description)
def test_fetch_metric_descriptor(self): METRIC_TYPE = ( 'pubsub.googleapis.com/topic/send_message_operation_count') METRIC_KIND = monitoring.MetricKind.DELTA VALUE_TYPE = monitoring.ValueType.INT64 client = monitoring.Client() descriptor = client.fetch_metric_descriptor(METRIC_TYPE) expected_name = 'projects/{project}/metricDescriptors/{type}'.format( project=client.project, type=METRIC_TYPE, ) self.assertEqual(descriptor.name, expected_name) self.assertEqual(descriptor.type, METRIC_TYPE) self.assertEqual(descriptor.metric_kind, METRIC_KIND) self.assertEqual(descriptor.value_type, VALUE_TYPE) self.assertTrue(descriptor.description) self.assertTrue(descriptor.labels) for label in descriptor.labels: self.assertTrue(label.key) self.assertTrue(label.value_type) self.assertTrue(label.description)
def pod_uid(): # The UID of the Pod is passed in as an environment variable using the Kubernetes Downward API (see: metricspod.yaml) # Getting it using the Kubernetes API: Pod.metadata.uid return os.environ['POD_UID'] print("instance_id:", instance_id()) print("zone: ", zone()) print("namespace_id:", namespace_id()) print("pod_uid:", pod_uid()) print("container_name:", container_name()) print("cluster_name:", cluster_name()) sdclient = monitoring.Client() resource = sdclient.resource('gke_container', labels={ 'cluster_name': cluster_name(), 'container_name': container_name(), 'instance_id': instance_id(), 'namespace_id': namespace_id(), 'pod_id': pod_uid(), 'zone': zone(), }) blocknumber_metric = sdclient.metric( type_='custom.googleapis.com/geth/block_number', labels={}) blocktime_metric = sdclient.metric( type_='custom.googleapis.com/geth/block_time', labels={})
def get_monitored_resource_descriptor(resource_type_name): # [START get_monitored_resources] client = monitoring.Client() print(client.fetch_resource_descriptor(resource_type_name))
#-*-coding: utf-8 from google.cloud import monitoring import time import subprocess import sys my_own_project = sys.argv[4] instance_id = sys.argv[5] instance_zone = sys.argv[6] client = monitoring.Client(project=my_own_project) custom_metric_type = 'custom.googleapis.com/'+sys.argv[1] resource = client.resource('gce_instance',labels={'instance_id': instance_id,'zone': instance_zone,}) metric = client.metric(type_= custom_metric_type,labels={'location': sys.argv[2]}) cmd_id = ['ping','-c', '1', sys.argv[3]] while 1: proc = subprocess.Popen(cmd_id, stdout=subprocess.PIPE, shell=False) (ping_result, err) = proc.communicate() start_point = ping_result.find('time=') if start_point == -1: print "Timeout" else: print ping_result ping_result = ping_result[(start_point+5):] latency = ping_result[:ping_result.find(' ms')]
def get_metric_descriptor(metric_type_name): # [START get_metric_descriptor] client = monitoring.Client() print(client.fetch_metric_descriptor(metric_type_name))