예제 #1
0
    def __init__(self,
                 x_ref: Union[np.ndarray, list],
                 ert: float,
                 window_size: int,
                 backend: str = 'tensorflow',
                 preprocess_fn: Optional[Callable] = None,
                 sigma: Optional[np.ndarray] = None,
                 n_bootstraps: int = 1000,
                 n_kernel_centers: Optional[int] = None,
                 lambda_rd_max: float = 0.2,
                 device: Optional[str] = None,
                 verbose: bool = True,
                 input_shape: Optional[tuple] = None,
                 data_type: Optional[str] = None) -> None:
        """
        Online least squares density difference (LSDD) data drift detector using preconfigured thresholds.
        Motivated by Bu et al. (2017): https://ieeexplore.ieee.org/abstract/document/7890493
        We have made modifications such that a desired ERT can be accurately targeted however.

        Parameters
        ----------
        x_ref
            Data used as reference distribution.
        ert
            The expected run-time (ERT) in the absence of drift. For the multivariate detectors, the ERT is defined
            as the expected run-time from t=0.
        window_size
            The size of the sliding test-window used to compute the test-statistic.
            Smaller windows focus on responding quickly to severe drift, larger windows focus on
            ability to detect slight drift.
        backend
            Backend used for the LSDD implementation and configuration.
        preprocess_fn
            Function to preprocess the data before computing the data drift metrics.s
        sigma
            Optionally set the bandwidth of the Gaussian kernel used in estimating the LSDD. Can also pass multiple
            bandwidth values as an array. The kernel evaluation is then averaged over those bandwidths. If `sigma`
            is not specified, the 'median heuristic' is adopted whereby `sigma` is set as the median pairwise distance
            between reference samples.
        n_bootstraps
            The number of bootstrap simulations used to configure the thresholds. The larger this is the
            more accurately the desired ERT will be targeted. Should ideally be at least an order of magnitude
            larger than the ert.
        n_kernel_centers
            The number of reference samples to use as centers in the Gaussian kernel model used to estimate LSDD.
            Defaults to 2*window_size.
        lambda_rd_max
            The maximum relative difference between two estimates of LSDD that the regularization parameter
            lambda is allowed to cause. Defaults to 0.2 as in the paper.
        device
            Device type used. The default None tries to use the GPU and falls back on CPU if needed.
            Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
        verbose
            Whether or not to print progress during configuration.
        input_shape
            Shape of input data.
        data_type
            Optionally specify the data type (tabular, image or time-series). Added to metadata.
        """
        super().__init__()

        backend = backend.lower()
        if backend == 'tensorflow' and not has_tensorflow or backend == 'pytorch' and not has_pytorch:
            raise ImportError(
                f'{backend} not installed. Cannot initialize and run the '
                f'MMDDrift detector with {backend} backend.')
        elif backend not in ['tensorflow', 'pytorch']:
            raise NotImplementedError(
                f'{backend} not implemented. Use tensorflow or pytorch instead.'
            )

        kwargs = locals()
        args = [kwargs['x_ref'], kwargs['ert'], kwargs['window_size']]
        pop_kwargs = [
            'self', 'x_ref', 'ert', 'window_size', 'backend', '__class__'
        ]
        [kwargs.pop(k, None) for k in pop_kwargs]

        if backend == 'tensorflow' and has_tensorflow:
            kwargs.pop('device', None)
            self._detector = LSDDDriftOnlineTF(*args, **kwargs)  # type: ignore
        else:
            self._detector = LSDDDriftOnlineTorch(*args,
                                                  **kwargs)  # type: ignore
        self.meta = self._detector.meta
예제 #2
0
def test_lsdd_online(lsdd_online_params):
    n_features, ert, window_size, preprocess, n_bootstraps = lsdd_online_params

    np.random.seed(0)
    torch.manual_seed(0)

    x_ref = np.random.randn(n * n_features).reshape(n, n_features).astype(
        np.float32)
    preprocess_fn, preprocess_kwargs = preprocess
    to_list = False
    if hasattr(preprocess_fn,
               '__name__') and preprocess_fn.__name__ == 'preprocess_list':
        to_list = True
        x_ref = [_[None, :] for _ in x_ref]
    elif isinstance(preprocess_fn, Callable) and 'layer' in list(preprocess_kwargs.keys()) \
            and preprocess_kwargs['model'].__name__ == 'HiddenOutput':
        model = MyModel(n_features)
        layer = preprocess_kwargs['layer']
        preprocess_fn = partial(preprocess_fn,
                                model=HiddenOutput(model=model, layer=layer))
    else:
        preprocess_fn = None

    cd = LSDDDriftOnlineTorch(x_ref=x_ref,
                              ert=ert,
                              window_size=window_size,
                              preprocess_fn=preprocess_fn,
                              n_bootstraps=n_bootstraps)

    x_h0 = np.random.randn(n * n_features).reshape(n, n_features).astype(
        np.float32)
    detection_times_h0 = []
    test_stats_h0 = []
    for x_t in x_h0:
        if to_list:
            x_t = [x_t]
        pred_t = cd.predict(x_t, return_test_stat=True)
        test_stats_h0.append(pred_t['data']['test_stat'])
        if pred_t['data']['is_drift']:
            detection_times_h0.append(pred_t['data']['time'])
            cd.reset()
    average_delay_h0 = np.array(detection_times_h0).mean()
    test_stats_h0 = [ts for ts in test_stats_h0 if ts is not None]
    assert ert / 3 < average_delay_h0 < 3 * ert

    cd.reset()

    x_h1 = 1 + np.random.randn(n * n_features).reshape(n, n_features).astype(
        np.float32)
    detection_times_h1 = []
    test_stats_h1 = []
    for x_t in x_h1:
        if to_list:
            x_t = [x_t]
        pred_t = cd.predict(x_t, return_test_stat=True)
        test_stats_h1.append(pred_t['data']['test_stat'])
        if pred_t['data']['is_drift']:
            detection_times_h1.append(pred_t['data']['time'])
            cd.reset()
    average_delay_h1 = np.array(detection_times_h1).mean()
    test_stats_h1 = [ts for ts in test_stats_h1 if ts is not None]
    assert np.abs(average_delay_h1) < ert / 2

    assert np.mean(test_stats_h1) > np.mean(test_stats_h0)