def test_creation_query_params(self, summary): """`labels` and `starting_from`""" name = _utils.generate_default_name() alerter = FixedAlerter(comparison.GreaterThan(0.7)) labels = {"datasource": ["census2010", "census2020"]} starting_from = datetime.datetime( year=2021, month=5, day=10, tzinfo=time_utils.utc, ) # none passed alert = summary.alerts.create( name, alerter, ) expected_sample_query = SummarySampleQuery( summary_query=summary.alerts._build_summary_query(), created_after=alert.created_at, ) assert alert.summary_sample_query == expected_sample_query assert alert.labels == {} assert alert.starting_from is None # just labels alert = summary.alerts.create( name, alerter, labels=labels, ) expected_sample_query = SummarySampleQuery( summary_query=summary.alerts._build_summary_query(), labels=labels, created_after=alert.created_at, ) assert alert.summary_sample_query == expected_sample_query assert alert.labels == labels assert alert.starting_from is None # starting_from alert = summary.alerts.create( name, alerter, labels=labels, starting_from=starting_from, ) expected_sample_query = SummarySampleQuery( summary_query=summary.alerts._build_summary_query(), labels=labels, time_window_start=starting_from, created_after=alert.created_at, ) assert alert.summary_sample_query == expected_sample_query assert alert.labels == labels assert alert.starting_from == starting_from
def test_empty_query(self): empty_query = SummarySampleQuery() empty_to_proto = empty_query._to_proto_request() empty_from_proto = SummarySampleQuery._from_proto_request(empty_to_proto) assert empty_query == empty_from_proto # ensure that the aggregation submessage isn't set when not provided, # which has caused backend errors in the past assert not empty_to_proto.HasField("aggregation") assert empty_from_proto.aggregation is None
def test_aggregate_summary_samples(self, class_client, numeric_summary, numeric_samples): found_samples = numeric_summary.find_samples( SummarySampleQuery(aggregation=Aggregation("1d", "sum"))) assert len(found_samples) == 1 aggregated_sample = found_samples[0] assert aggregated_sample.content == data_types.NumericValue( sum(self.values))
def test_creation_datetime(self): time_window_start = time_utils.now() - datetime.timedelta(weeks=1) time_window_end = time_utils.now() - datetime.timedelta(days=1) created_after = time_utils.now() - datetime.timedelta(hours=1) time_window_start_millis = time_utils.epoch_millis(time_window_start) time_window_end_millis = time_utils.epoch_millis(time_window_end) created_after_millis = time_utils.epoch_millis(created_after) # as datetime sample_query = SummarySampleQuery( time_window_start=time_window_start, time_window_end=time_window_end, created_after=created_after, ) proto_request = sample_query._to_proto_request() assert (proto_request.filter.time_window_start_at_millis == time_window_start_millis) assert proto_request.filter.time_window_end_at_millis == time_window_end_millis assert proto_request.filter.created_at_after_millis == created_after_millis # as millis sample_query = SummarySampleQuery( time_window_start=time_window_start_millis, time_window_end=time_window_end_millis, created_after=created_after_millis, ) proto_request = sample_query._to_proto_request() assert (proto_request.filter.time_window_start_at_millis == time_window_start_millis) assert proto_request.filter.time_window_end_at_millis == time_window_end_millis assert proto_request.filter.created_at_after_millis == created_after_millis
def summary_sample_query(self): self._refresh_cache() sample_query_msg = type(self._msg.sample_find_base)() sample_query_msg.CopyFrom(self._msg.sample_find_base) # if this alert hasn't been evaluated yet, refer to creation time last_evaluated_at = (self._msg.last_evaluated_at_millis or self._msg.created_at_millis) # only fetch samples logged after this alert was last evaluated # so as to not re-alert on previously-seen samples sample_query_msg.filter.created_at_after_millis = last_evaluated_at return SummarySampleQuery._from_proto_request(sample_query_msg)
def create( self, name, alerter, notification_channels=None, labels=None, starting_from=None, _created_at=None, _updated_at=None, _last_evaluated_at=None, ): """ Create a new alert. Parameters ---------- name : str A unique name for this alert. alerter : :mod:`~verta.monitoring.alert` The configuration for this alert. notification_channels : list of :class:`~verta.monitoring.notification_channel.entities.NotificationChannel`, optional Channels for this alert to propagate notifications to. labels : dict of str to list of str, optional Alert on samples that have at least one of these labels. A mapping between label keys and lists of corresponding label values. starting_from : datetime.datetime or int, optional Alert on samples associated with periods after this time; useful for monitoring samples representing past data. Either a timezone aware datetime object or unix epoch milliseconds. Returns ------- :class:`Alert` Alert. Examples -------- .. code-block:: python alert = summary.alerts.create( name="MSE", alerter=alerter, notification_channels=[channel], ) """ if self._summary is None: raise RuntimeError( "this Alert cannot be used to create because it was not" " obtained via summary.alerts") summary_sample_query = SummarySampleQuery( summary_query=self._build_summary_query(), labels=labels, time_window_start=time_utils.epoch_millis(starting_from), ) if notification_channels is None: notification_channels = [] for channel in notification_channels: Alert._validate_notification_channel(channel) ctx = _Context(self._conn, self._conf) return Alert._create( self._conn, self._conf, ctx, name=name, monitored_entity_id=(self._monitored_entity_id or self._summary.monitored_entity_id), alerter=alerter, summary_sample_query=summary_sample_query, notification_channels=notification_channels, created_at_millis=time_utils.epoch_millis(_created_at), updated_at_millis=time_utils.epoch_millis(_updated_at), last_evaluated_at_millis=time_utils.epoch_millis( _last_evaluated_at), )
def test_summary_labels(self, client): pytest.importorskip("scipy") summaries = client.monitoring.summaries monitored_entity = client.monitoring.get_or_create_monitored_entity() summary_name = "summary_v2_{}".format(generate_default_name()) summary = summaries.create(summary_name, data_types.DiscreteHistogram, monitored_entity) assert isinstance(summary, Summary) summaries_for_monitored_entity = SummaryQuery( monitored_entities=[monitored_entity]) retrieved_summaries = summaries.find(summaries_for_monitored_entity) assert len(retrieved_summaries) > 0 for s in retrieved_summaries: assert isinstance(s, Summary) now = time_utils.now() yesterday = now - timedelta(days=1) discrete_histogram = data_types.DiscreteHistogram( buckets=["hotdog", "not hotdog"], data=[100, 20]) labels = {"env": "test", "color": "blue"} summary_sample = summary.log_sample( discrete_histogram, labels=labels, time_window_start=yesterday, time_window_end=now, ) assert isinstance(summary_sample, SummarySample) float_histogram = data_types.FloatHistogram( bucket_limits=[1, 13, 25, 37, 49, 61], data=[15, 53, 91, 34, 7], ) labels2 = {"env": "test", "color": "red"} with pytest.raises(TypeError): summary_sample_2 = summary.log_sample( float_histogram, labels=labels2, time_window_start=yesterday, time_window_end=now, ) labels = client.monitoring.labels retrieved_label_keys = labels.find_keys( summary_query=summaries_for_monitored_entity) assert len(retrieved_label_keys) > 0 if retrieved_label_keys: retrieved_labels = labels.find_values( summary_query=summaries_for_monitored_entity, keys=retrieved_label_keys) for key in retrieved_label_keys: assert key in retrieved_labels all_samples_for_summary = summary.find_samples() assert len(all_samples_for_summary) == 1 blue_samples = summary.find_samples( SummarySampleQuery(labels={"color": ["blue"]}), ) assert len(blue_samples) == 1