示例#1
0
def time_series(client: Client, service: GCPService) -> Iterable[Result]:
    now = time.time()
    seconds = int(now)
    nanos = int((now - seconds) * 10**9)
    interval = monitoring_v3.TimeInterval({
        "end_time": {
            "seconds": seconds,
            "nanos": nanos
        },
        "start_time": {
            "seconds": (seconds - 1200),
            "nanos": nanos
        },
    })
    for metric in service.metrics:
        filter_rule = f'metric.type = "{metric.name}"'

        request = {
            "name": f"projects/{client.project}",
            "filter": filter_rule,
            "interval": interval,
            "view": monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.FULL,
            "aggregation": monitoring_v3.Aggregation(metric.aggregation),
        }
        try:
            results = client.monitoring().list_time_series(request=request)
        except Exception as e:
            raise RuntimeError(metric.name) from e
        for ts in results:
            yield Result(ts=ts)
示例#2
0
def list_time_series_reduce(project_id):
    # [START monitoring_read_timeseries_reduce]
    client = monitoring_v3.MetricServiceClient()
    project_name = f"projects/{project_id}"

    now = time.time()
    seconds = int(now)
    nanos = int((now - seconds) * 10 ** 9)
    interval = monitoring_v3.TimeInterval(
        {
            "end_time": {"seconds": seconds, "nanos": nanos},
            "start_time": {"seconds": (seconds - 3600), "nanos": nanos},
        }
    )
    aggregation = monitoring_v3.Aggregation(
        {
            "alignment_period": {"seconds": 1200},  # 20 minutes
            "per_series_aligner": monitoring_v3.Aggregation.Aligner.ALIGN_MEAN,
            "cross_series_reducer": monitoring_v3.Aggregation.Reducer.REDUCE_MEAN,
            "group_by_fields": ["resource.zone"],
        }
    )

    results = client.list_time_series(
        request={
            "name": project_name,
            "filter": 'metric.type = "compute.googleapis.com/instance/cpu/utilization"',
            "interval": interval,
            "view": monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.FULL,
            "aggregation": aggregation,
        }
    )
    for result in results:
        print(result)
示例#3
0
def time_series(client: Client, service: GCPService) -> Iterable[Result]:
    now = time.time()
    seconds = int(now)
    nanos = int((now - seconds) * 10**9)
    interval = monitoring_v3.TimeInterval({
        "end_time": {
            "seconds": seconds,
            "nanos": nanos
        },
        "start_time": {
            "seconds": (seconds - 1200),
            "nanos": nanos
        },
    })
    for metric in service.metrics:
        # TODO: actually filter by service filter/labels
        filter_rule = f'metric.type = "{metric.name}"'
        results = client.list_time_series(
            request={
                "filter": filter_rule,
                "interval": interval,
                "view":
                monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.FULL,
                "aggregation": monitoring_v3.Aggregation(metric.aggregation),
            })
        for ts in results:
            yield Result(ts=ts)
示例#4
0
def test_metric_requests(interval: monitoring_v3.TimeInterval):
    metric = agent_gcp.Metric(
        name="compute.googleapis.com/instance/uptime",
        aggregation=agent_gcp.Aggregation(
            per_series_aligner=Aligner.ALIGN_MAX,
            cross_series_reducer=Reducer.REDUCE_NONE),
    )
    request = metric.request(interval=interval,
                             groupby="resource.thisone",
                             project="fun")
    expected = {
        "name":
        "projects/fun",
        "filter":
        'metric.type = "compute.googleapis.com/instance/uptime"',
        "interval":
        interval,
        "view":
        monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.FULL,
        "aggregation":
        monitoring_v3.Aggregation({
            "alignment_period": {
                "seconds": 60
            },
            "group_by_fields": ["resource.thisone"],
            "per_series_aligner": Aligner.ALIGN_MAX,
            "cross_series_reducer": Reducer.REDUCE_NONE,
        }),
    }
    assert request == expected
示例#5
0
 def to_obj(self, default_groupby: str) -> monitoring_v3.Aggregation:
     groupbyfields = [default_groupby]
     groupbyfields.extend(self.group_by_fields)
     return monitoring_v3.Aggregation({
         "alignment_period": {
             "seconds": self.alignment_period
         },
         "group_by_fields":
         groupbyfields,
         "per_series_aligner":
         self.per_series_aligner,
         "cross_series_reducer":
         self.cross_series_reducer,
     })
示例#6
0
def get_avg_cpu_cores(project_id, GKE_project_id, start_time, end_time, alignment_period_seconds):
    client = monitoring_v3.MetricServiceClient()
    project_name = f"projects/{project_id}"
    start = datetime.datetime.strptime(start_time, '%Y-%m-%d_%H:%M:%S')
    end = datetime.datetime.strptime(end_time, '%Y-%m-%d_%H:%M:%S')

    interval = monitoring_v3.TimeInterval(
        {
            "end_time": {"seconds": int(end.timestamp())},
            "start_time": {"seconds": int(start.timestamp())},
        }
    )

    aggregation = monitoring_v3.Aggregation(
        {
            "alignment_period": {"seconds": alignment_period_seconds},
            "per_series_aligner": monitoring_v3.Aggregation.Aligner.ALIGN_MEAN,
            "cross_series_reducer": monitoring_v3.Aggregation.Reducer.REDUCE_SUM,
        }
    )

    cpu_cores = 0
    with tracer.start_span(name=f"{app_name} get {GKE_project_id}'s metrics") as trace_span:
        results = client.list_time_series(
            request={
                "name": project_name,
                "filter": 'metric.type = "kubernetes.io/node/cpu/total_cores" AND resource.type="k8s_node" AND project= ' +
                          GKE_project_id,
                "interval": interval,
                "view": monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.FULL,
                "aggregation": aggregation,
            }
        )

        total = 0.0
        for result in results:
            logger.log_text(f"data points collected: {len(result.points)}", severity=LOG_SEVERITY_DEBUG)
            for point in result.points:
                total += point.value.double_value

            cpu_cores += total / len(result.points)

    return cpu_cores
示例#7
0
    def test_request_parameters_maximal(self):
        T0 = datetime.datetime(2016, 4, 7, 2, 0, 0)
        T1 = datetime.datetime(2016, 4, 7, 2, 30, 0)

        ALIGNER = "ALIGN_DELTA"
        MINUTES, SECONDS, PERIOD_IN_SECONDS = 1, 30, 90

        REDUCER = "REDUCE_MEAN"
        FIELD1, FIELD2 = "resource.zone", "metric.instance_name"

        PAGE_SIZE = 100

        client = self._create_client()
        query = self._make_one(client, PROJECT, METRIC_TYPE)
        query = query.select_interval(start_time=T0, end_time=T1)
        query = query.align(ALIGNER, minutes=MINUTES, seconds=SECONDS)
        query = query.reduce(REDUCER, FIELD1, FIELD2)
        actual = query._build_query_params(headers_only=True,
                                           page_size=PAGE_SIZE)
        expected = {
            "name":
            "projects/%s" % PROJECT,
            "filter":
            'metric.type = "{type}"'.format(type=METRIC_TYPE),
            "interval":
            self._make_interval(T1, T0),
            "aggregation":
            monitoring_v3.Aggregation(
                per_series_aligner=ALIGNER,
                alignment_period={"seconds": PERIOD_IN_SECONDS},
                cross_series_reducer=REDUCER,
                group_by_fields=[FIELD1, FIELD2],
            ),
            "view":
            monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.HEADERS,
            "page_size":
            PAGE_SIZE,
        }
        self.assertEqual(actual, expected)
示例#8
0
    def test_iteration_headers_only(self):
        T0 = datetime.datetime(2016, 4, 6, 22, 5, 0)
        T1 = datetime.datetime(2016, 4, 6, 22, 10, 0)

        SERIES1 = {
            "metric": {
                "type": METRIC_TYPE,
                "labels": METRIC_LABELS
            },
            "resource": {
                "type": RESOURCE_TYPE,
                "labels": RESOURCE_LABELS
            },
            "metric_kind": METRIC_KIND,
            "value_type": VALUE_TYPE,
        }
        SERIES2 = {
            "metric": {
                "type": METRIC_TYPE,
                "labels": METRIC_LABELS2
            },
            "resource": {
                "type": RESOURCE_TYPE,
                "labels": RESOURCE_LABELS2
            },
            "metric_kind": METRIC_KIND,
            "value_type": VALUE_TYPE,
        }

        RESPONSE = {"time_series": [SERIES1, SERIES2], "next_page_token": ""}

        channel = ChannelStub(responses=[RESPONSE])
        client = self._create_client(channel)
        query = self._make_one(client, PROJECT, METRIC_TYPE)
        query = query.select_interval(start_time=T0, end_time=T1)

        # add a temporal alignment to test that "aggregation" query params is
        # correctly processed
        query = query.align(monitoring_v3.Aggregation.Aligner.ALIGN_MAX,
                            seconds=3600)
        response = list(query.iter(headers_only=True))

        self.assertEqual(len(response), 2)
        series1, series2 = response

        self.assertEqual(series1.metric.labels, METRIC_LABELS)
        self.assertEqual(series2.metric.labels, METRIC_LABELS2)
        self.assertEqual(series1.resource.labels, RESOURCE_LABELS)
        self.assertEqual(series2.resource.labels, RESOURCE_LABELS2)

        self.assertFalse(len(series1.points))
        self.assertFalse(len(series2.points))

        expected_request = monitoring_v3.ListTimeSeriesRequest(
            name="projects/" + PROJECT,
            filter='metric.type = "{type}"'.format(type=METRIC_TYPE),
            interval=self._make_interval(T1, T0),
            view=monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.HEADERS,
            aggregation=monitoring_v3.Aggregation(
                per_series_aligner=monitoring_v3.Aggregation.Aligner.ALIGN_MAX,
                alignment_period={"seconds": 3600},
            ),
        )
        request = channel.requests[0][1]
        self.assertEqual(request, expected_request)