コード例 #1
0
def test_mmd(mmd_params):
    n_features, n_instances = mmd_params
    xshape, yshape = (n_instances[0], n_features), (n_instances[1], n_features)
    np.random.seed(0)
    x = tf.convert_to_tensor(np.random.random(xshape).astype('float32'))
    y = tf.convert_to_tensor(np.random.random(yshape).astype('float32'))
    mmd_xx = mmd2(x, x, kernel=GaussianRBF(sigma=tf.ones(1)))
    mmd_xy = mmd2(x, y, kernel=GaussianRBF(sigma=tf.ones(1)))
    assert mmd_xy > mmd_xx
コード例 #2
0
ファイル: lsdd_online.py プロジェクト: arnaudvl/alibi-detect
    def _configure_thresholds(self):

        # Each bootstrap sample splits the reference samples into a sub-reference sample (x)
        # and an extended test window (y). The extended test window will be treated as W overlapping
        # test windows of size W (so 2W-1 test samples in total)

        w_size = self.window_size
        etw_size = 2 * w_size - 1  # etw = extended test window
        nkc_size = self.n - self.n_kernel_centers  # nkc = non-kernel-centers
        rw_size = nkc_size - etw_size  # rw = ref-window

        perms = [
            tf.random.shuffle(tf.range(nkc_size))
            for _ in range(self.n_bootstraps)
        ]
        x_inds_all = [perm[:rw_size] for perm in perms]
        y_inds_all = [perm[rw_size:] for perm in perms]

        # For stability in high dimensions we don't divide H by (pi*sigma^2)^(d/2)
        # Results in an alternative test-stat of LSDD*(pi*sigma^2)^(d/2). Same p-vals etc.
        H = GaussianRBF(np.sqrt(2.) * self.kernel.sigma)(self.kernel_centers,
                                                         self.kernel_centers)

        # Compute lsdds for first test-window. We infer regularisation constant lambda here.
        y_inds_all_0 = [y_inds[:w_size] for y_inds in y_inds_all]
        lsdds_0, H_lam_inv = permed_lsdds(
            self.k_xc,
            x_inds_all,
            y_inds_all_0,
            H,
            lam_rd_max=self.lambda_rd_max,
        )

        # Can compute threshold for first window
        thresholds = [quantile(lsdds_0, 1 - self.fpr)]
        # And now to iterate through the other W-1 overlapping windows
        p_bar = tqdm(range(1, w_size),
                     "Computing thresholds") if self.verbose else range(
                         1, w_size)
        for w in p_bar:
            y_inds_all_w = [y_inds[w:(w + w_size)] for y_inds in y_inds_all]
            lsdds_w, _ = permed_lsdds(self.k_xc,
                                      x_inds_all,
                                      y_inds_all_w,
                                      H,
                                      H_lam_inv=H_lam_inv)
            thresholds.append(quantile(lsdds_w, 1 - self.fpr))
            x_inds_all = [
                x_inds_all[i] for i in range(len(x_inds_all))
                if lsdds_w[i] < thresholds[-1]
            ]
            y_inds_all = [
                y_inds_all[i] for i in range(len(y_inds_all))
                if lsdds_w[i] < thresholds[-1]
            ]

        self.thresholds = thresholds
        self.H_lam_inv = H_lam_inv
コード例 #3
0
def test_deep_kernel(deep_kernel_params):
    n_features, n_instances, kernel_a, kernel_b, eps = deep_kernel_params
    xshape, yshape = (n_instances[0], n_features), (n_instances[1], n_features)
    x = tf.convert_to_tensor(np.random.random(xshape).astype('float32'))
    y = tf.convert_to_tensor(np.random.random(yshape).astype('float32'))

    proj = tf.keras.Sequential([Input(shape=(n_features,)), Dense(n_features)])
    kernel_a = GaussianRBF(trainable=True) if kernel_a is None else kernel_a(n_features)
    kernel_b = GaussianRBF(trainable=True) if kernel_b is None else kernel_b(n_features)

    kernel = DeepKernel(proj, kernel_a=kernel_a, kernel_b=kernel_b, eps=eps)

    k_xy = kernel(x, y).numpy()
    k_yx = kernel(y, x).numpy()
    k_xx = kernel(x, x).numpy()
    assert k_xy.shape == n_instances and k_xx.shape == (xshape[0], xshape[0])
    assert (np.diag(k_xx) > 0.).all()
    np.testing.assert_almost_equal(k_xy, np.transpose(k_yx), decimal=5)
コード例 #4
0
def test_bckm(bckm_params):
    n_features, n_instances, batch_size = bckm_params
    xshape, yshape = (n_instances[0], n_features), (n_instances[1], n_features)
    np.random.seed(0)
    x = tf.convert_to_tensor(np.random.random(xshape).astype('float32'))
    y = tf.convert_to_tensor(np.random.random(yshape).astype('float32'))

    kernel = GaussianRBF(sigma=tf.constant(1.))
    kernel_mat = kernel(x, y).numpy()
    bc_kernel_mat = batch_compute_kernel_matrix(x, y, kernel, batch_size=batch_size).numpy()
    np.testing.assert_almost_equal(kernel_mat, bc_kernel_mat, decimal=6)
コード例 #5
0
def test_permed_lsdds(permed_lsdds_params):
    n, m, d, B, n_kcs = permed_lsdds_params

    kcs = tf.random.normal((n_kcs, d))
    x_ref = tf.random.normal((n, d))
    x_cur = 10 + 0.2*tf.random.normal((m, d))

    x_full = tf.concat([x_ref, x_cur], axis=0)
    sigma = tf.constant((1.,))
    k_all_c = GaussianRBF(sigma)(x_full, kcs)
    H = GaussianRBF(np.sqrt(2.)*sigma)(kcs, kcs)

    perms = [tf.random.shuffle(tf.range(n+m)) for _ in range(B)]
    x_perms = [perm[:n] for perm in perms]
    y_perms = [perm[n:] for perm in perms]

    lsdd_perms, H_lam_inv, lsdd_unpermed = permed_lsdds(
        k_all_c, x_perms, y_perms, H, return_unpermed=True
    )

    assert int(tf.reduce_sum(tf.cast(lsdd_perms > lsdd_unpermed, float))) == 0
    assert H_lam_inv.shape == (n_kcs, n_kcs)
コード例 #6
0
def test_gaussian_kernel(gaussian_kernel_params):
    sigma, n_features, n_instances = gaussian_kernel_params
    xshape, yshape = (n_instances[0], n_features), (n_instances[1], n_features)
    x = tf.convert_to_tensor(np.random.random(xshape).astype('float32'))
    y = tf.convert_to_tensor(np.random.random(yshape).astype('float32'))

    kernel = GaussianRBF(sigma=sigma)
    infer_sigma = True if sigma is None else False
    k_xy = kernel(x, y, infer_sigma=infer_sigma).numpy()
    k_xx = kernel(x, x, infer_sigma=infer_sigma).numpy()

    assert k_xy.shape == n_instances and k_xx.shape == (xshape[0], xshape[0])
    np.testing.assert_almost_equal(k_xx.trace(), xshape[0], decimal=4)
    assert (k_xx > 0.).all() and (k_xy > 0.).all()
コード例 #7
0
 def metric_fn(x, y):
     return mmd2(x, y, kernel=GaussianRBF(sigma=tf.ones(1))).numpy()
コード例 #8
0
        assert (k_xx > 0.).all() and (k_xy > 0.).all()


class MyKernel(tf.keras.Model
               ):  # TODO: Support then test models using keras functional API
    def __init__(self, n_features: int):
        super().__init__()
        self.dense = Dense(20)

    def call(self, x: tf.Tensor, y: tf.Tensor) -> tf.Tensor:
        return tf.einsum('ji,ki->jk', self.dense(x), self.dense(y))


n_features = [5, 10]
n_instances = [(100, 100), (100, 75)]
kernel_a = [GaussianRBF(trainable=True), MyKernel]
kernel_b = [GaussianRBF(trainable=True), MyKernel, None]
eps = [0.5, 'trainable']
tests_dk = list(product(n_features, n_instances, kernel_a, kernel_b, eps))
n_tests_dk = len(tests_dk)


@pytest.fixture
def deep_kernel_params(request):
    return tests_dk[request.param]


@pytest.mark.parametrize('deep_kernel_params',
                         list(range(n_tests_dk)),
                         indirect=True)
def test_deep_kernel(deep_kernel_params):
コード例 #9
0
ファイル: lsdd_online.py プロジェクト: arnaudvl/alibi-detect
    def __init__(self,
                 x_ref: Union[np.ndarray, list],
                 ert: float,
                 window_size: int,
                 preprocess_fn: Optional[Callable] = None,
                 sigma: Optional[np.ndarray] = None,
                 n_bootstraps: int = 1000,
                 n_kernel_centers: Optional[int] = None,
                 lambda_rd_max: float = 0.2,
                 verbose: bool = True,
                 input_shape: Optional[tuple] = None,
                 data_type: Optional[str] = None) -> None:
        """
        Online least squares density difference (LSDD) data drift detector using preconfigured thresholds.
        Motivated by Bu et al. (2017): https://ieeexplore.ieee.org/abstract/document/7890493
        Modifications are made such that a desired ERT can be accurately targeted however.

        Parameters
        ----------
        x_ref
            Data used as reference distribution.
        ert
            The expected run-time (ERT) in the absence of drift.
        window_size
            The size of the sliding test-window used to compute the test-statistic.
            Smaller windows focus on responding quickly to severe drift, larger windows focus on
            ability to detect slight drift.
        preprocess_fn
            Function to preprocess the data before computing the data drift metrics.s
        sigma
            Optionally set the bandwidth of the Gaussian kernel used in estimating the LSDD. Can also pass multiple
            bandwidth values as an array. The kernel evaluation is then averaged over those bandwidths. If `sigma`
            is not specified, the 'median heuristic' is adopted whereby `sigma` is set as the median pairwise distance
            between reference samples.
        n_bootstraps
            The number of bootstrap simulations used to configure the thresholds. The larger this is the
            more accurately the desired ERT will be targeted. Should ideally be at least an order of magnitude
            larger than the ert.
        n_kernel_centers
            The number of reference samples to use as centers in the Gaussian kernel model used to estimate LSDD.
            Defaults to 2*window_size.
        lambda_rd_max
            The maximum relative difference between two estimates of LSDD that the regularization parameter
            lambda is allowed to cause. Defaults to 0.2 as in the paper.
        verbose
            Whether or not to print progress during configuration.
        input_shape
            Shape of input data.
        data_type
            Optionally specify the data type (tabular, image or time-series). Added to metadata.
        """
        super().__init__(x_ref=x_ref,
                         ert=ert,
                         window_size=window_size,
                         preprocess_fn=preprocess_fn,
                         n_bootstraps=n_bootstraps,
                         verbose=verbose,
                         input_shape=input_shape,
                         data_type=data_type)
        self.meta.update({'backend': 'tensorflow'})
        self.n_kernel_centers = n_kernel_centers
        self.lambda_rd_max = lambda_rd_max

        self._configure_normalization()

        # initialize kernel
        if sigma is None:
            self.kernel = GaussianRBF()
            _ = self.kernel(self.x_ref, self.x_ref, infer_sigma=True)
        else:
            sigma = tf.convert_to_tensor(sigma)
            self.kernel = GaussianRBF(sigma)

        if self.n_kernel_centers is None:
            self.n_kernel_centers = 2 * window_size

        self._configure_kernel_centers()
        self._configure_thresholds()
        self._initialise()
コード例 #10
0
    def __init__(self,
                 x_ref: np.ndarray,
                 p_val: float = .05,
                 preprocess_fn: Optional[Callable] = None,
                 kernel: Optional[tf.keras.Model] = None,
                 n_diffs: int = 1,
                 initial_diffs: Optional[np.ndarray] = None,
                 l1_reg: float = 0.01,
                 binarize_preds: bool = False,
                 train_size: Optional[float] = .75,
                 n_folds: Optional[int] = None,
                 retrain_from_scratch: bool = True,
                 seed: int = 0,
                 optimizer: tf.keras.optimizers = tf.keras.optimizers.Adam,
                 learning_rate: float = 1e-3,
                 batch_size: int = 32,
                 preprocess_batch_fn: Optional[Callable] = None,
                 epochs: int = 3,
                 verbose: int = 0,
                 train_kwargs: Optional[dict] = None,
                 dataset: Callable = TFDataset,
                 data_type: Optional[str] = None) -> None:
        """
        Classifier-based drift detector with a classifier of form y = a + b_1*k(x,w_1) + ... + b_J*k(x,w_J),
        where k is a kernel and w_1,...,w_J are learnable test locations. If drift has occured the test locations
        learn to be more/less (given by sign of b_i) similar to test instances than reference instances.
        The test locations are regularised to be close to the average reference instance such that the **difference**
        is then interpretable as the transformation required for each feature to make the average instance more/less
        like a test instance than a reference instance.

        The classifier is trained on a fraction of the combined reference and test data and drift is detected on
        the remaining data. To use all the data to detect drift, a stratified cross-validation scheme can be chosen.

        Parameters
        ----------
        x_ref
            Data used as reference distribution.
        p_val
            p-value used for the significance of the test.
        preprocess_fn
            Function to preprocess the data before computing the data drift metrics.
        kernel
            Differentiable TensorFlow model used to define similarity between instances, defaults to Gaussian RBF.
        n_diffs
            The number of test locations to use, each corresponding to an interpretable difference.
        initial_diffs
            Array used to initialise the diffs that will be learned. Defaults to Gaussian
            for each feature with equal variance to that of reference data.
        l1_reg
            Strength of l1 regularisation to apply to the differences.
        binarize_preds
            Whether to test for discrepency on soft (e.g. probs/logits) model predictions directly
            with a K-S test or binarise to 0-1 prediction errors and apply a binomial test.
        train_size
            Optional fraction (float between 0 and 1) of the dataset used to train the classifier.
            The drift is detected on `1 - train_size`. Cannot be used in combination with `n_folds`.
        n_folds
            Optional number of stratified folds used for training. The model preds are then calculated
            on all the out-of-fold instances. This allows to leverage all the reference and test data
            for drift detection at the expense of longer computation. If both `train_size` and `n_folds`
            are specified, `n_folds` is prioritized.
        retrain_from_scratch
            Whether the classifier should be retrained from scratch for each set of test data or whether
            it should instead continue training from where it left off on the previous set.
        seed
            Optional random seed for fold selection.
        optimizer
            Optimizer used during training of the classifier.
        learning_rate
            Learning rate used by optimizer.
        batch_size
            Batch size used during training of the classifier.
        preprocess_batch_fn
            Optional batch preprocessing function. For example to convert a list of objects to a batch which can be
            processed by the model.
        epochs
            Number of training epochs for the classifier for each (optional) fold.
        verbose
            Verbosity level during the training of the classifier. 0 is silent, 1 a progress bar.
        train_kwargs
            Optional additional kwargs when fitting the classifier.
        dataset
            Dataset object used during training.
        data_type
            Optionally specify the data type (tabular, image or time-series). Added to metadata.
        """
        if preprocess_fn is not None and preprocess_batch_fn is not None:
            raise ValueError(
                "SpotTheDiffDrift detector only supports preprocess_fn or preprocess_batch_fn, not both."
            )
        if n_folds is not None and n_folds > 1:
            logger.warning(
                "When using multiple folds the returned diffs will correspond to the final fold only."
            )

        if preprocess_fn is not None:
            x_ref_proc = preprocess_fn(x_ref)
        elif preprocess_batch_fn is not None:
            x_ref_proc = predict_batch(x_ref,
                                       lambda x: x,
                                       preprocess_fn=preprocess_batch_fn,
                                       batch_size=batch_size)
        else:
            x_ref_proc = x_ref

        if kernel is None:
            kernel = GaussianRBF(trainable=True)
        if initial_diffs is None:
            initial_diffs = np.random.normal(
                size=(n_diffs, ) + x_ref_proc.shape[1:]) * x_ref_proc.std(0)
        else:
            if len(initial_diffs) != n_diffs:
                raise ValueError(
                    "Should have initial_diffs.shape[0] == n_diffs")

        model = SpotTheDiffDriftTF.InterpretableClf(kernel, x_ref_proc,
                                                    initial_diffs)
        reg_loss_fn = (
            lambda model: tf.reduce_mean(tf.abs(model.diffs)) * l1_reg)

        self._detector = ClassifierDriftTF(
            x_ref=x_ref,
            model=model,
            p_val=p_val,
            preprocess_x_ref=True,
            update_x_ref=None,
            preprocess_fn=preprocess_fn,
            preds_type='logits',
            binarize_preds=binarize_preds,
            reg_loss_fn=reg_loss_fn,
            train_size=train_size,
            n_folds=n_folds,
            retrain_from_scratch=retrain_from_scratch,
            seed=seed,
            optimizer=optimizer,
            learning_rate=learning_rate,
            batch_size=batch_size,
            preprocess_batch_fn=preprocess_batch_fn,
            epochs=epochs,
            verbose=verbose,
            train_kwargs=train_kwargs,
            dataset=dataset,
            data_type=data_type)
        self.meta = self._detector.meta
        self.meta['params']['name'] = 'SpotTheDiffDrift'
        self.meta['params']['n_diffs'] = n_diffs
        self.meta['params']['l1_reg'] = l1_reg
        self.meta['params']['initial_diffs'] = initial_diffs