Ejemplo n.º 1
0
def test_clone_model():
    model_seq_clone = clone_model(model_seq)
    assert not (model_seq_clone.weights[0]
                == model_seq.weights[0]).numpy().any()
    model_func_clone = clone_model(model_func)
    assert not (model_func_clone.weights[0]
                == model_func.weights[0]).numpy().any()
    model_sub_clone = clone_model(model_sub)
    _ = model_sub(tf.zeros((1, 10)))
    _ = model_sub_clone(tf.zeros((1, 10)))
    assert not (model_sub_clone.weights[0]
                == model_sub.weights[0]).numpy().any()
Ejemplo n.º 2
0
    def score(self, x: Union[np.ndarray, list]) -> Tuple[float, float, np.ndarray]:
        """
        Compute the p-value resulting from a permutation test using the maximum mean discrepancy
        as a distance measure between the reference data and the data to be tested. The kernel
        used within the MMD is first trained to maximise an estimate of the resulting test power.

        Parameters
        ----------
        x
            Batch of instances.

        Returns
        -------
        p-value obtained from the permutation test, the MMD^2 between the reference and test set
        and the MMD^2 values from the permutation test.
        """
        x_ref, x_cur = self.preprocess(x)
        (x_ref_tr, x_cur_tr), (x_ref_te, x_cur_te) = self.get_splits(x_ref, x_cur)
        ds_ref_tr, ds_cur_tr = self.dataset(x_ref_tr), self.dataset(x_cur_tr)

        self.kernel = clone_model(self.original_kernel) if self.retrain_from_scratch else self.kernel
        train_args = [self.j_hat, (ds_ref_tr, ds_cur_tr)]
        LearnedKernelDriftTF.trainer(*train_args, **self.train_kwargs)  # type: ignore

        x_all = np.concatenate([x_ref_te, x_cur_te], axis=0)
        kernel_mat = self.kernel_mat_fn(x_all, x_all, self.kernel)
        kernel_mat = kernel_mat - tf.linalg.diag(tf.linalg.diag_part(kernel_mat))  # zero diagonal
        mmd2 = mmd2_from_kernel_matrix(kernel_mat, len(x_cur_te), permute=False, zero_diag=False).numpy()
        mmd2_permuted = np.array(
            [mmd2_from_kernel_matrix(kernel_mat, len(x_cur_te), permute=True, zero_diag=False).numpy()
                for _ in range(self.n_permutations)]
        )
        p_val = (mmd2 <= mmd2_permuted).mean()
        return p_val, mmd2, mmd2_permuted
Ejemplo n.º 3
0
    def score(self,
              x: np.ndarray) -> Tuple[float, float, np.ndarray, np.ndarray]:
        """
        Compute the out-of-fold drift metric such as the accuracy from a classifier
        trained to distinguish the reference data from the data to be tested.

        Parameters
        ----------
        x
            Batch of instances.

        Returns
        -------
        p-value, a notion of distance between the trained classifier's out-of-fold performance
        and that which we'd expect under the null assumption of no drift,
        and the out-of-fold classifier model prediction probabilities on the reference and test data
        """
        x_ref, x = self.preprocess(x)
        n_ref, n_cur = len(x_ref), len(x)
        x, y, splits = self.get_splits(x_ref, x)

        # iterate over folds: train a new model for each fold and make out-of-fold (oof) predictions
        preds_oof_list, idx_oof_list = [], []
        for idx_tr, idx_te in splits:
            y_tr = np.eye(2)[y[idx_tr]]
            if isinstance(x, np.ndarray):
                x_tr, x_te = x[idx_tr], x[idx_te]
            elif isinstance(x, list):
                x_tr, x_te = [x[_] for _ in idx_tr], [x[_] for _ in idx_te]
            else:
                raise TypeError(
                    f'x needs to be of type np.ndarray or list and not {type(x)}.'
                )
            ds_tr = self.dataset(x_tr, y_tr)
            self.model = clone_model(self.original_model) if self.retrain_from_scratch \
                else self.model
            train_args = [self.model, self.loss_fn, None]
            self.train_kwargs.update({'dataset': ds_tr})
            trainer(*train_args, **self.train_kwargs)  # type: ignore
            preds = self.predict_fn(x_te, self.model)
            preds_oof_list.append(preds)
            idx_oof_list.append(idx_te)
        preds_oof = np.concatenate(preds_oof_list, axis=0)
        probs_oof = softmax(
            preds_oof, axis=-1) if self.preds_type == 'logits' else preds_oof
        idx_oof = np.concatenate(idx_oof_list, axis=0)
        y_oof = y[idx_oof]
        p_val, dist = self.test_probs(y_oof, probs_oof, n_ref, n_cur)
        probs_sort = probs_oof[np.argsort(idx_oof)]
        return p_val, dist, probs_sort[:n_ref, 1], probs_sort[n_ref:, 1]
Ejemplo n.º 4
0
    def __init__(self,
                 x_ref: Union[np.ndarray, list],
                 kernel: tf.keras.Model,
                 p_val: float = .05,
                 preprocess_x_ref: bool = True,
                 update_x_ref: Optional[Dict[str, int]] = None,
                 preprocess_fn: Optional[Callable] = None,
                 n_permutations: int = 100,
                 var_reg: float = 1e-5,
                 reg_loss_fn: Callable = (lambda kernel: 0),
                 train_size: Optional[float] = .75,
                 retrain_from_scratch: bool = True,
                 optimizer: tf.keras.optimizers = tf.keras.optimizers.Adam,
                 learning_rate: float = 1e-3,
                 batch_size: int = 32,
                 preprocess_batch_fn: Optional[Callable] = None,
                 epochs: int = 3,
                 verbose: int = 0,
                 train_kwargs: Optional[dict] = None,
                 dataset: Callable = TFDataset,
                 data_type: Optional[str] = None) -> None:
        """
        Maximum Mean Discrepancy (MMD) data drift detector where the kernel is trained to maximise an
        estimate of the test power. The kernel is trained on a split of the reference and test instances
        and then the MMD is evaluated on held out instances and a permutation test is performed.

        For details see Liu et al (2020): Learning Deep Kernels for Non-Parametric Two-Sample Tests
        (https://arxiv.org/abs/2002.09116)


        Parameters
        ----------
        x_ref
            Data used as reference distribution.
        kernel
            Trainable TensorFlow model that returns a similarity between two instances.
        p_val
            p-value used for the significance of the test.
        preprocess_x_ref
            Whether to already preprocess and store the reference data.
        update_x_ref
            Reference data can optionally be updated to the last n instances seen by the detector
            or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
            for reservoir sampling {'reservoir_sampling': n} is passed.
        preprocess_fn
            Function to preprocess the data before applying the kernel.
        n_permutations
            The number of permutations to use in the permutation test once the MMD has been computed.
        var_reg
            Constant added to the estimated variance of the MMD for stability.
        reg_loss_fn
            The regularisation term reg_loss_fn(kernel) is added to the loss function being optimized.
        train_size
            Optional fraction (float between 0 and 1) of the dataset used to train the kernel.
            The drift is detected on `1 - train_size`.
        retrain_from_scratch
            Whether the kernel should be retrained from scratch for each set of test data or whether
            it should instead continue training from where it left off on the previous set.
        optimizer
            Optimizer used during training of the kernel.
        learning_rate
            Learning rate used by optimizer.
        batch_size
            Batch size used during training of the kernel.
        preprocess_batch_fn
            Optional batch preprocessing function. For example to convert a list of objects to a batch which can be
            processed by the kernel.
        epochs
            Number of training epochs for the kernel. Corresponds to the smaller of the reference and test sets.
        verbose
            Verbosity level during the training of the kernel. 0 is silent, 1 a progress bar.
        train_kwargs
            Optional additional kwargs when training the kernel.
        dataset
            Dataset object used during training.
        data_type
            Optionally specify the data type (tabular, image or time-series). Added to metadata.
        """
        super().__init__(x_ref=x_ref,
                         p_val=p_val,
                         preprocess_x_ref=preprocess_x_ref,
                         update_x_ref=update_x_ref,
                         preprocess_fn=preprocess_fn,
                         n_permutations=n_permutations,
                         train_size=train_size,
                         retrain_from_scratch=retrain_from_scratch,
                         data_type=data_type)
        self.meta.update({'backend': 'tensorflow'})

        # define and compile kernel
        self.original_kernel = kernel
        self.kernel = clone_model(kernel)

        self.dataset = partial(dataset, batch_size=batch_size, shuffle=True)
        self.kernel_mat_fn = partial(batch_compute_kernel_matrix,
                                     preprocess_fn=preprocess_batch_fn,
                                     batch_size=batch_size)
        self.train_kwargs = {
            'optimizer': optimizer,
            'epochs': epochs,
            'learning_rate': learning_rate,
            'reg_loss_fn': reg_loss_fn,
            'preprocess_fn': preprocess_batch_fn,
            'verbose': verbose
        }
        if isinstance(train_kwargs, dict):
            self.train_kwargs.update(train_kwargs)

        self.j_hat = LearnedKernelDriftTF.JHat(self.kernel, var_reg)
Ejemplo n.º 5
0
    def __init__(self,
                 x_ref: np.ndarray,
                 model: tf.keras.Model,
                 p_val: float = .05,
                 preprocess_x_ref: bool = True,
                 update_x_ref: Optional[Dict[str, int]] = None,
                 preprocess_fn: Optional[Callable] = None,
                 preds_type: str = 'preds',
                 binarize_preds: bool = False,
                 reg_loss_fn: Callable = (lambda model: 0),
                 train_size: Optional[float] = .75,
                 n_folds: Optional[int] = None,
                 retrain_from_scratch: bool = True,
                 seed: int = 0,
                 optimizer: tf.keras.optimizers = tf.keras.optimizers.Adam,
                 learning_rate: float = 1e-3,
                 batch_size: int = 32,
                 preprocess_batch_fn: Optional[Callable] = None,
                 epochs: int = 3,
                 verbose: int = 0,
                 train_kwargs: Optional[dict] = None,
                 dataset: Callable = TFDataset,
                 data_type: Optional[str] = None) -> None:
        """
        Classifier-based drift detector. The classifier is trained on a fraction of the combined
        reference and test data and drift is detected on the remaining data. To use all the data
        to detect drift, a stratified cross-validation scheme can be chosen.

        Parameters
        ----------
        x_ref
            Data used as reference distribution.
        model
            TensorFlow classification model used for drift detection.
        p_val
            p-value used for the significance of the test.
        preprocess_x_ref
            Whether to already preprocess and store the reference data.
        update_x_ref
            Reference data can optionally be updated to the last n instances seen by the detector
            or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
            for reservoir sampling {'reservoir_sampling': n} is passed.
        preprocess_fn
            Function to preprocess the data before computing the data drift metrics.
        preds_type
            Whether the model outputs 'probs' or 'logits'
        binarize_preds
            Whether to test for discrepency on soft (e.g. prob/log-prob) model predictions directly
            with a K-S test or binarise to 0-1 prediction errors and apply a binomial test.
        reg_loss_fn
            The regularisation term reg_loss_fn(model) is added to the loss function being optimized.
        train_size
            Optional fraction (float between 0 and 1) of the dataset used to train the classifier.
            The drift is detected on `1 - train_size`. Cannot be used in combination with `n_folds`.
        n_folds
            Optional number of stratified folds used for training. The model preds are then calculated
            on all the out-of-fold predictions. This allows to leverage all the reference and test data
            for drift detection at the expense of longer computation. If both `train_size` and `n_folds`
            are specified, `n_folds` is prioritized.
        retrain_from_scratch
            Whether the classifier should be retrained from scratch for each set of test data or whether
            it should instead continue training from where it left off on the previous set.
        seed
            Optional random seed for fold selection.
        optimizer
            Optimizer used during training of the classifier.
        learning_rate
            Learning rate used by optimizer.
        batch_size
            Batch size used during training of the classifier.
        epochs
            Number of training epochs for the classifier for each (optional) fold.
        verbose
            Verbosity level during the training of the classifier.
            0 is silent, 1 a progress bar and 2 prints the statistics after each epoch.
        train_kwargs
            Optional additional kwargs when fitting the classifier.
        dataset
            Dataset object used during training.
        data_type
            Optionally specify the data type (tabular, image or time-series). Added to metadata.
        """
        super().__init__(x_ref=x_ref,
                         p_val=p_val,
                         preprocess_x_ref=preprocess_x_ref,
                         update_x_ref=update_x_ref,
                         preprocess_fn=preprocess_fn,
                         preds_type=preds_type,
                         binarize_preds=binarize_preds,
                         train_size=train_size,
                         n_folds=n_folds,
                         retrain_from_scratch=retrain_from_scratch,
                         seed=seed,
                         data_type=data_type)
        self.meta.update({'backend': 'tensorflow'})

        # define and compile classifier model
        self.original_model = model
        self.model = clone_model(model)
        self.loss_fn = BinaryCrossentropy(
            from_logits=(self.preds_type == 'logits'))
        self.dataset = partial(dataset, batch_size=batch_size, shuffle=True)
        self.predict_fn = partial(predict_batch,
                                  preprocess_fn=preprocess_batch_fn,
                                  batch_size=batch_size)
        self.train_kwargs = {
            'optimizer': optimizer(learning_rate=learning_rate),
            'epochs': epochs,
            'reg_loss_fn': reg_loss_fn,
            'preprocess_fn': preprocess_batch_fn,
            'verbose': verbose
        }
        if isinstance(train_kwargs, dict):
            self.train_kwargs.update(train_kwargs)