def test_history_monotonous(history_with_epoch_set): ts = history_with_epoch_set.epoch for delta in (Timedelta.from_s(1), Timedelta.from_string("20s")): new_ts = ts + delta history_with_epoch_set.insert(new_ts, State.OK) assert history_with_epoch_set.transitions[-1].time == new_ts
def __init__( self, name: str, metrics: Iterable[str], value_constraints: Optional[Dict[str, float]], timeout: Optional[str] = None, on_timeout: Optional[Coroutine] = None, ): """Create value- and timeout-checks for a set of metrics :param name: The name of this check :param metrics: Iterable of names of metrics to monitor :param value_constraints: Dictionary indicating warning and critical value ranges, see ValueCheck. If omitted, this check does not care for which values its metrics report. :param timeout: If set, and a metric does not deliver values within this duration, run the callback on_timeout :param on_timeout: Callback to run when metrics do not deliver values in time, mandatory if timeout is given. """ self._name = name self._metrics: Set[str] = set(metrics) self._status_cache = StatusCache(self._metrics) self._value_checks: Optional[Dict[str, ValueCheck]] = None self._timeout_checks: Optional[Dict[str, TimeoutCheck]] = None self._on_timeout_callback: Optional[Coroutine] = None if value_constraints is not None: self._value_checks: Dict[str, ValueCheck] = { metric: ValueCheck(**value_constraints) for metric in self._metrics } if timeout is not None: if on_timeout is None: raise ValueError("on_timeout callback is required if timeout is given") self._on_timeout_callback = on_timeout self._timeout_checks = { metric: TimeoutCheck( Timedelta.from_string(timeout), self._get_on_timeout_callback(metric), ) for metric in self._metrics }
def convert( self, value: Union[str, Timedelta], param: Optional[Parameter], ctx: Optional[Context], ) -> Optional[Timedelta]: if value is None: return None elif isinstance(value, str): try: return Timedelta.from_string(value) except ValueError: self.fail( 'expected a duration: "<value>[<unit>]"', param=param, ctx=ctx, ) else: return value
def parse_functions(target_dict): for function in target_dict.get("functions", ["avg"]): if function == "avg": yield AvgFunction() elif function == "min": yield MinFunction() elif function == "max": yield MaxFunction() elif function == "count": yield CountFunction() elif function == "sma": try: yield MovingAverageFunction( Timedelta.from_string(target_dict.get("sma_window")) ) except (TypeError, KeyError): pass # Cannot instantiate RawFunction - it automatically replaces the aggregates when zooming in else: raise KeyError(f"Unknown function '{function}' requested")
def soft_fail_cache(): return StateCache( metrics=["publish.rate"], transition_debounce_window=Timedelta.from_string("1 day"), transition_postprocessor=SoftFail(max_fail_count=2), )
Timedelta.from_s(-1), ], ) def test_history_non_monotonous(history_with_epoch_set, delta, caplog): ts = history_with_epoch_set.epoch + Timedelta.from_s(1) history_with_epoch_set.insert(ts, State.OK) next_ts = ts + delta with caplog.at_level(logging.WARNING): history_with_epoch_set.insert(next_ts, State.OK) assert "Times of state transitions must be strictly increasing" in caplog.text @pytest.mark.parametrize( "ticker", [Timedelta.from_string("30s"), Timedelta(1)], indirect=True) @pytest.mark.parametrize("expected_history_items", [0, 1, 3]) def test_history_length(ticker, expected_history_items): history = StateTransitionHistory(time_window=ticker.delta * (expected_history_items + 1)) epoch = next(ticker) history.insert(epoch, State.OK) assert history.epoch == epoch for (timestamp, _) in zip(ticker, range(expected_history_items)): history.insert(timestamp, State.OK) logger.info(f"history={history!r}") assert len(history.transitions) == expected_history_items
def test_duration_param(): value = "30s" DURATION = DurationParam(default=None) assert DURATION.convert(value, param=None, ctx=None) == Timedelta.from_string(value)
def __init__(self, minimum_duration: Union[Timedelta, str], **_kwargs): self._minimum_duration = (Timedelta.from_string(minimum_duration) if isinstance(minimum_duration, str) else minimum_duration)
def test_timedelta_from_string(input, expected_ns): assert Timedelta.from_string(input) == Timedelta(expected_ns)
def test_timedelta_precise_string_roundtrip(values): for t in values: assert Timedelta.from_string(t.precise_string) == t
assert isclose(agg.mean_integral, VALUE) assert isclose(agg.mean_sum, VALUE) def test_timeaggregate_from_value_pair_non_monotonic( timestamp: Timestamp, time_delta_10s: Timedelta): later = timestamp + time_delta_10s with pytest.raises(NonMonotonicTimestamps): TimeAggregate.from_value_pair(timestamp_before=later, timestamp=timestamp, value=42.0) @pytest.mark.parametrize( ("date_string", "expected"), [ # Sanity check ("1970-01-01T00:00:00Z", Timestamp(0)), # Parser supports sub-second digits ("1970-01-01T00:00:00.0Z", Timestamp(0)), # Parser drops sub-microsecond digits ("1970-01-01T00:00:00.000001337Z", Timestamp(1000)), # Timezones other that UTC are supported ("1970-01-01T00:00:00-01:00", Timestamp( Timedelta.from_string("1h").ns)), ], ) def test_timestamp_from_iso8601(date_string: str, expected: Timestamp): assert Timestamp.from_iso8601(date_string) == expected