def list_time_series(project_id): # [START monitoring_read_timeseries_simple] client = monitoring_v3.MetricServiceClient() project_name = f"projects/{project_id}" interval = monitoring_v3.TimeInterval() now = time.time() seconds = int(now) nanos = int((now - seconds) * 10 ** 9) interval = monitoring_v3.TimeInterval( { "end_time": {"seconds": seconds, "nanos": nanos}, "start_time": {"seconds": (seconds - 1200), "nanos": nanos}, } ) results = client.list_time_series( request={ "name": project_name, "filter": 'metric.type = "compute.googleapis.com/instance/cpu/utilization"', "interval": interval, "view": monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.FULL, } ) for result in results: print(result)
def time_series(client: Client, service: Service, filter_by: Optional[ResourceFilter]) -> Iterable[Result]: now = time.time() seconds = int(now) nanos = int((now - seconds) * 10**9) interval = monitoring_v3.TimeInterval({ "end_time": { "seconds": seconds, "nanos": nanos }, "start_time": { "seconds": (seconds - 1200), "nanos": nanos }, }) for metric in service.metrics: request = metric.request(interval, groupby=service.default_groupby, project=client.project) try: results = client.monitoring().list_time_series(request=request) except Exception as e: raise RuntimeError(metric.name) from e for ts in results: result = Result(ts=ts) if filter_by is None: yield result elif ts.resource.labels[filter_by.label] == filter_by.value: yield result
def run_quickstart(project=""): # [START monitoring_quickstart] from google.cloud import monitoring_v3 import time client = monitoring_v3.MetricServiceClient() # project = 'my-project' # TODO: Update to your project ID. project_name = f"projects/{project}" series = monitoring_v3.TimeSeries() series.metric.type = "custom.googleapis.com/my_metric" series.resource.type = "gce_instance" series.resource.labels["instance_id"] = "1234567890123456789" series.resource.labels["zone"] = "us-central1-f" now = time.time() seconds = int(now) nanos = int((now - seconds) * 10**9) interval = monitoring_v3.TimeInterval( {"end_time": { "seconds": seconds, "nanos": nanos }}) point = monitoring_v3.Point({ "interval": interval, "value": { "double_value": 3.14 } }) series.points = [point] client.create_time_series(request={ "name": project_name, "time_series": [series] }) print("Successfully wrote time series.")
def write_time_series(project_id): # [START monitoring_write_timeseries] client = monitoring_v3.MetricServiceClient() project_name = f"projects/{project_id}" series = monitoring_v3.TimeSeries() series.metric.type = "custom.googleapis.com/my_metric" + str(uuid.uuid4()) series.resource.type = "gce_instance" series.resource.labels["instance_id"] = "1234567890123456789" series.resource.labels["zone"] = "us-central1-f" now = time.time() seconds = int(now) nanos = int((now - seconds) * 10**9) interval = monitoring_v3.TimeInterval( {"end_time": { "seconds": seconds, "nanos": nanos }}) point = monitoring_v3.Point({ "interval": interval, "value": { "double_value": 3.14 } }) series.points = [point] client.create_time_series(name=project_name, time_series=[series])
def time_series(client: Client, service: GCPService) -> Iterable[Result]: now = time.time() seconds = int(now) nanos = int((now - seconds) * 10**9) interval = monitoring_v3.TimeInterval({ "end_time": { "seconds": seconds, "nanos": nanos }, "start_time": { "seconds": (seconds - 1200), "nanos": nanos }, }) for metric in service.metrics: filter_rule = f'metric.type = "{metric.name}"' request = { "name": f"projects/{client.project}", "filter": filter_rule, "interval": interval, "view": monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.FULL, "aggregation": monitoring_v3.Aggregation(metric.aggregation), } try: results = client.monitoring().list_time_series(request=request) except Exception as e: raise RuntimeError(metric.name) from e for ts in results: yield Result(ts=ts)
def list_time_series_reduce(project_id): # [START monitoring_read_timeseries_reduce] client = monitoring_v3.MetricServiceClient() project_name = f"projects/{project_id}" now = time.time() seconds = int(now) nanos = int((now - seconds) * 10 ** 9) interval = monitoring_v3.TimeInterval( { "end_time": {"seconds": seconds, "nanos": nanos}, "start_time": {"seconds": (seconds - 3600), "nanos": nanos}, } ) aggregation = monitoring_v3.Aggregation( { "alignment_period": {"seconds": 1200}, # 20 minutes "per_series_aligner": monitoring_v3.Aggregation.Aligner.ALIGN_MEAN, "cross_series_reducer": monitoring_v3.Aggregation.Reducer.REDUCE_MEAN, "group_by_fields": ["resource.zone"], } ) results = client.list_time_series( request={ "name": project_name, "filter": 'metric.type = "compute.googleapis.com/instance/cpu/utilization"', "interval": interval, "view": monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.FULL, "aggregation": aggregation, } ) for result in results: print(result)
def time_series(client: Client, service: GCPService) -> Iterable[Result]: now = time.time() seconds = int(now) nanos = int((now - seconds) * 10**9) interval = monitoring_v3.TimeInterval({ "end_time": { "seconds": seconds, "nanos": nanos }, "start_time": { "seconds": (seconds - 1200), "nanos": nanos }, }) for metric in service.metrics: # TODO: actually filter by service filter/labels filter_rule = f'metric.type = "{metric.name}"' results = client.list_time_series( request={ "filter": filter_rule, "interval": interval, "view": monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.FULL, "aggregation": monitoring_v3.Aggregation(metric.aggregation), }) for ts in results: yield Result(ts=ts)
def gauge_int_timeseries(resource_type, resource_labels, metric_type, metric_labels, value): """Build GAUGE INT timeseries object.""" series = monitoring_v3.TimeSeries() series.metric.type = metric_type series.metric.labels.update(metric_labels) series.resource.type = resource_type series.resource.labels.update(resource_labels) series.metric_kind = 'GAUGE' now = time.time() seconds = int(now) nanos = int((now - seconds) * 10**9) interval = monitoring_v3.TimeInterval( {'end_time': { 'seconds': seconds, 'nanos': nanos }}) point = monitoring_v3.Point({ 'interval': interval, 'value': monitoring_v3.TypedValue(int64_value=value) }) series.points = [point] return series
def send_metric(sales_item, sales_num, report_time): client = monitoring_v3.MetricServiceClient() project_name = f"projects/{PROJECT_ID}" series = monitoring_v3.TimeSeries() series.metric.type = f"custom.googleapis.com/{CUSTOM_METRIC_NAME}" # Available resource types: https://cloud.google.com/monitoring/api/resources series.resource.type = "global" series.resource.labels["project_id"] = PROJECT_ID # If needed, add more labels for filtering and grouping series.metric.labels["item"] = sales_item epoch = report_time.timestamp() seconds = int(epoch) interval = monitoring_v3.TimeInterval( {"end_time": { "seconds": seconds, "nanos": 0 }}) point = monitoring_v3.Point({ "interval": interval, "value": { "int64_value": sales_num } }) series.points = [point] client.create_time_series(request={ "name": project_name, "time_series": [series] }) print("Successfully wrote time series.")
def P(timestamp, value): interval = monitoring_v3.TimeInterval() interval.start_time = datetime_helpers.from_rfc3339(timestamp).replace( tzinfo=None) interval.end_time = datetime_helpers.from_rfc3339(timestamp).replace( tzinfo=None) return monitoring_v3.Point(interval=interval, value={"double_value": value})
def _interval() -> monitoring_v3.TimeInterval: return monitoring_v3.TimeInterval({ "end_time": { "seconds": 100000, "nanos": 0 }, "start_time": { "seconds": (100000 - 1200), "nanos": 0 }, })
def record_metric(project_name, metric): """Create TS entry for captured metric""" global logger global now global monitoring_client series = monitoring_v3.TimeSeries() series.resource.type = "global" series.metric.type = f"custom.googleapis.com/{metric['Benchmark']}" # Required to maintain uniqueness of each permutation series.metric.labels["Run"] = metric["Run"] series.metric.labels["Iteration"] = metric["Iteration"] # Populate dynamic labels, GCP limit is 10 labels per descriptor for key in list(metric.keys()): if key not in ["Benchmark", "Run", "Iteration", "Score"]: series.metric.labels[key] = metric[key] if len(series.metric.labels) > GCP_LABEL_LIMIT: logger.warn( f"Exiting metric label loop, limit of {GCP_LABEL_LIMIT} labels." ) break # Break out, we have hit limit on labels seconds = int(now) nanos = int((now - seconds) * 10 ** 9) interval = monitoring_v3.TimeInterval( {"end_time": {"seconds": seconds, "nanos": nanos}} ) point = monitoring_v3.Point( {"interval": interval, "value": {"double_value": float(metric["Score"])}} ) series.points = [point] logger.info( f"Publishing {series.resource.type}/{series.metric.type}: {metric['Score']}" ) try: monitoring_client.create_time_series(name=project_name, time_series=[series]) except InternalServerError: logger.error( f"Failed to publish metric {series.metric.type}, this may be because the metric descriptor has been recently created. Will retry on the next run." )
def report_metric(value, type, instance_id, zone, project_id): series = monitoring_v3.types.TimeSeries() series.metric.type = 'custom.googleapis.com/{type}'.format(type=type) series.resource.type = 'gce_instance' series.resource.labels['instance_id'] = instance_id series.resource.labels['zone'] = zone series.resource.labels['project_id'] = project_id now = time.time() seconds = int(now) nanos = int((now - seconds) * 10 ** 9) interval = monitoring_v3.TimeInterval( {"end_time": {"seconds": seconds, "nanos": nanos}} ) point = monitoring_v3.Point({"interval": interval, "value": {"double_value": 3.14}}) series.points = [point] client.create_time_series(name=project_name, time_series=[series])
def write_data_to_metric(config, monitored_project_id, value, metric_name, network_name): ''' Writes data to Cloud Monitoring custom metrics. Parameters: config (dict): The dict containing config like clients and limits monitored_project_id: ID of the project where the resource lives (will be added as a label) value (int): Value for the data point of the metric. metric_name (string): Name of the metric network_name (string): Name of the network (will be added as a label) Returns: usage (int): Current usage for that network. limit (int): Current usage for that network. ''' client = monitoring_v3.MetricServiceClient() series = monitoring_v3.TimeSeries() series.metric.type = f"custom.googleapis.com/{metric_name}" series.resource.type = "global" series.metric.labels["network_name"] = network_name series.metric.labels["project"] = monitored_project_id now = time.time() seconds = int(now) nanos = int((now - seconds) * 10**9) interval = monitoring_v3.TimeInterval( {"end_time": { "seconds": seconds, "nanos": nanos }}) point = monitoring_v3.Point({ "interval": interval, "value": { "double_value": value } }) series.points = [point] # TODO: sometimes this cashes with 'DeadlineExceeded: 504 Deadline expired before operation could complete' error # Implement exponential backoff retries? try: client.create_time_series(name=config["monitoring_project_link"], time_series=[series]) except Exception as e: print(e)
def get_avg_cpu_cores(project_id, GKE_project_id, start_time, end_time, alignment_period_seconds): client = monitoring_v3.MetricServiceClient() project_name = f"projects/{project_id}" start = datetime.datetime.strptime(start_time, '%Y-%m-%d_%H:%M:%S') end = datetime.datetime.strptime(end_time, '%Y-%m-%d_%H:%M:%S') interval = monitoring_v3.TimeInterval( { "end_time": {"seconds": int(end.timestamp())}, "start_time": {"seconds": int(start.timestamp())}, } ) aggregation = monitoring_v3.Aggregation( { "alignment_period": {"seconds": alignment_period_seconds}, "per_series_aligner": monitoring_v3.Aggregation.Aligner.ALIGN_MEAN, "cross_series_reducer": monitoring_v3.Aggregation.Reducer.REDUCE_SUM, } ) cpu_cores = 0 with tracer.start_span(name=f"{app_name} get {GKE_project_id}'s metrics") as trace_span: results = client.list_time_series( request={ "name": project_name, "filter": 'metric.type = "kubernetes.io/node/cpu/total_cores" AND resource.type="k8s_node" AND project= ' + GKE_project_id, "interval": interval, "view": monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.FULL, "aggregation": aggregation, } ) total = 0.0 for result in results: logger.log_text(f"data points collected: {len(result.points)}", severity=LOG_SEVERITY_DEBUG) for point in result.points: total += point.value.double_value cpu_cores += total / len(result.points) return cpu_cores
def monitoring_interval(): ''' Creates the monitoring interval of 24 hours Returns: monitoring_v3.TimeInterval: Moinitoring time interval of 24h ''' now = time.time() seconds = int(now) nanos = int((now - seconds) * 10**9) return monitoring_v3.TimeInterval({ "end_time": { "seconds": seconds, "nanos": nanos }, "start_time": { "seconds": (seconds - 24 * 60 * 60), "nanos": nanos }, })
def write_time_series(project_id,requested_pets): client = monitoring_v3.MetricServiceClient() project_name = f"projects/{project_id}" series = monitoring_v3.TimeSeries() series.metric.type = "custom.googleapis.com/" + CUSTOM_METRIC_NAME_PREFIX + PROJECT_ID series.resource.type = "gae_instance" series.resource.labels["instance_id"] = os.environ["GAE_INSTANCE"] series.resource.labels["location"] = "us-east1" series.resource.labels["module_id"] = os.environ["GAE_APPLICATION"] series.resource.labels["version_id"] = os.environ["GAE_VERSION"] now = time.time() seconds = int(now) nanos = int((now - seconds) * 10 ** 9) interval = monitoring_v3.TimeInterval( {"end_time": {"seconds": seconds, "nanos": nanos}} ) point = monitoring_v3.Point({"interval": interval, "value": {"int64_value": requested_pets}}) series.points = [point] client.create_time_series(name=project_name, time_series=[series])
def build_series(self, prefix, name, labels, details): s = monitoring_v3.types.TimeSeries() s.metric.type = f'custom.googleapis.com/{self.app_name}/{prefix}/{name}' s.resource.type = 'generic_task' s.resource.labels.update(labels) # setup time for submissions now = time.time() seconds = int(now) nanos = int((now - seconds) * 10 ** 9) interval = monitoring_v3.TimeInterval( {"end_time": {"seconds": seconds, "nanos": nanos}} ) # create the data point p = monitoring_v3.Point({ 'interval': interval, 'value': {details['kind']: details['value']}, }) s.points = [p] return s
def report_metric(value, metric_type, resource_values): """Create time series for report. Args: value: (int) Report metric value. metric_type: (str) Metric type resource_values: (dict) Contains resources information """ client = resource_values.get('client') project_id = resource_values.get('project_id') instance_id = resource_values.get('instance_id') zone = resource_values.get('zone') project_name = client.common_project_path(project_id) # TimeSeries definition. series = monitoring_v3.types.TimeSeries() series.metric.type = 'custom.googleapis.com/{type}'.format( type=metric_type) series.resource.type = 'gce_instance' series.resource.labels['instance_id'] = instance_id series.resource.labels['zone'] = zone series.resource.labels['project_id'] = project_id now = time.time() seconds = int(now) nanos = int((now - seconds) * 10**9) interval = monitoring_v3.TimeInterval( {"end_time": { "seconds": seconds, "nanos": nanos }}) point = monitoring_v3.Point({ "interval": interval, "value": { "int64_value": value } }) series.points = [point] client.create_time_series(name=project_name, time_series=[series])
def report_metric(value, t, instance_id, zone, project_id): series = monitoring_v3.types.TimeSeries() series.metric.type = 'custom.googleapis.com/{type}'.format(type=t) series.resource.type = 'gce_instance' series.resource.labels['instance_id'] = instance_id series.resource.labels['zone'] = zone series.resource.labels['project_id'] = project_id now = time.time() seconds = int(now) nanos = int((now - seconds) * 10**9) interval = monitoring_v3.TimeInterval( {'end_time': { 'seconds': seconds, 'nanos': nanos }}) point = monitoring_v3.Point() point.value.int64_value = value point.interval = interval series.points.append(point) client.create_time_series(request={ 'name': project_name, 'time_series': [series] })
def get(self, metric="tpu_mxu", node_id=None, interval=None, filters=None, raw=False, when=None, full_names=False): if when is None: when = utc() if '/' not in metric: metric = metrics[metric] if interval is None: now = time.time() seconds = int(now) nanos = int((now - seconds) * 10**9) interval = monitoring_v3.TimeInterval({ "end_time": { "seconds": seconds, "nanos": nanos }, "start_time": { "seconds": (seconds - 1200), "nanos": nanos }, }) if filters is None: filters = [] filters = filters[:] if node_id is not None: filters += [['resource.labels.node_id', node_id]] filters += [['metric.type', metric]] filters = ' AND '.join( ['{} = {}'.format(k, json.dumps(v)) for k, v in filters]) results = self.client.list_time_series( request={ "name": "projects/{project_id}".format( project_id=self.project_id), "filter": filters, "interval": interval, "view": monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.FULL, }) if raw: return results points = collections.defaultdict(lambda: []) for timeSeries in results: key = get_time_series_label(timeSeries, short=not full_names) for point in timeSeries.points: point_utc = point.interval.start_time.timestamp() seconds_ago = int(when - point_utc) if timeSeries.value_type == 2: # what's the correct way to get INT64 here? value = point.value.int64_value else: value = point.value.double_value points[key].append([seconds_ago, value]) points = dict(points) return points
def _make_interval(end_time, start_time=None): interval = monitoring_v3.TimeInterval(end_time=end_time, start_time=start_time) return interval