Exemple #1
0
    def test_TrainerGrad_MutualInfo(self):
        set_seed(2)
        mi_history = []

        class MutualInfoNPEETDebug(MutualInfoNPEET):
            def plot(self_mi, viz):
                mi_history.append(self_mi.information['mlp.2'])
                super().plot(viz)

        data_loader = DataLoader(MNIST,
                                 eval_size=100,
                                 transform=TransformDefault.mnist())
        trainer = TrainerGrad(self.model,
                              criterion=nn.CrossEntropyLoss(),
                              data_loader=data_loader,
                              optimizer=self.optimizer,
                              mutual_info=MutualInfoNPEETDebug(
                                  data_loader=data_loader, pca_size=None),
                              scheduler=self.scheduler)
        trainer.monitor.advanced_monitoring(level=MonitorLevel.FULL)
        trainer.train(n_epochs=1, mutual_info_layers=1)
        mi_history = np.vstack(mi_history)
        assert_array_less(0, mi_history)
        x_last, y_last = mi_history[-1]

        # the model is well trained
        self.assertGreater(y_last, 2.5)
        self.assertGreater(x_last, 2.2)
        self.assertLessEqual(x_last, y_last)
 def test_reshape(self):
     set_seed(2)
     model = Reshape(height=10, width=12)
     tensor = torch.rand(5, 3 * model.height * model.width)
     output = model(tensor)
     assert_array_equal(output, tensor.view(5, 3, model.height,
                                            model.width))
Exemple #3
0
 def test_rand_pairs(self):
     set_seed(1)
     outputs = torch.randn(self.labels.shape[0], 30)
     for loss_model in self.loss_models:
         with self.subTest(loss_model=loss_model):
             loss = loss_model(outputs, self.labels)
             self.assertGreater(loss, 0.)
 def test_mlp(self):
     set_seed(3)
     in_features, hidden_features, output_features = 32, 12, 17
     model = MLP(in_features, hidden_features, output_features)
     tensor = torch.rand(5, in_features)
     output = model(tensor)
     self.assertEqual(output.shape, (tensor.shape[0], output_features))
Exemple #5
0
 def test_exponential_moving_average_psnr(self):
     set_seed(1)
     noise = torch.rand(100)
     smoothed = exponential_moving_average(noise, window=3)
     psnr = peak_to_signal_noise_ratio(noise, smoothed)
     self.assertGreaterEqual(psnr, 15.7)
     self.assertEqual(smoothed.shape, noise.shape)
     self.assertLess(smoothed.std(), noise.std())
Exemple #6
0
 def test_softshink(self):
     set_seed(16)
     lambd = 0.1
     softshrink = Softshrink(n_features=1)
     softshrink.lambd.data[:] = lambd
     softshrink_gt = nn.Softshrink(lambd=lambd)
     tensor = torch.randn(10, 20)
     assert_array_almost_equal(
         softshrink(tensor).detach(), softshrink_gt(tensor))
Exemple #7
0
 def test_NormalizeInverse(self):
     set_seed(0)
     mean = torch.rand(3)
     std = torch.rand_like(mean)
     normalize = Normalize(mean=mean, std=std, inplace=False)
     normalize_inverse = NormalizeInverse(mean=mean, std=std)
     tensor = torch.rand(5, 3, 12, 12)
     tensor_normalized = normalize(tensor)
     tensor_restored = normalize_inverse(tensor_normalized)
     assert_array_almost_equal(tensor_restored, tensor)
Exemple #8
0
 def test_TrainerAutoencoder(self):
     set_seed(4)
     model = AutoencoderLinear(784, 64)
     trainer = TrainerAutoencoder(model,
                                  criterion=nn.BCEWithLogitsLoss(),
                                  data_loader=self.data_loader,
                                  optimizer=self.optimizer,
                                  scheduler=self.scheduler)
     loss_epochs = trainer.train(n_epochs=1, mutual_info_layers=0)
     assert_array_almost_equal(loss_epochs, [0.69737625122])
Exemple #9
0
 def test_get_normalize_inverse(self):
     set_seed(1)
     mean = torch.rand(3)
     std = torch.rand_like(mean)
     normalize = Normalize(mean=mean, std=std)
     normalize_inverse = NormalizeInverse(mean=mean, std=std)
     normalize_inverse2 = get_normalize_inverse(normalize)
     assert_array_almost_equal(normalize_inverse2.mean,
                               normalize_inverse.mean)
     assert_array_almost_equal(normalize_inverse2.std,
                               normalize_inverse.std)
Exemple #10
0
 def test_TrainerEmbedding(self):
     set_seed(3)
     criterion = TripletLossSampler(nn.TripletMarginLoss())
     trainer = TrainerEmbedding(self.model,
                                criterion=criterion,
                                data_loader=self.data_loader,
                                optimizer=self.optimizer,
                                scheduler=self.scheduler)
     loss_epochs = trainer.train(n_epochs=1, mutual_info_layers=0)
     # CircleCI outputs 0.103
     assert_array_almost_equal(loss_epochs, [0.09936], decimal=2)
Exemple #11
0
 def setUp(self):
     set_seed(0)
     n_classes = 3
     labels = torch.arange(n_classes)
     self.labels = torch.cat([labels, labels])
     outputs_same = torch.randn(n_classes, 30)
     self.outputs_same = torch.cat([outputs_same, outputs_same])
     self.loss_models = (ContrastiveLossSampler(
         nn.CosineEmbeddingLoss(margin=0.5)),
                         TripletLossSampler(nn.TripletMarginLoss()),
                         TripletLossSampler(TripletCosineLoss()))
Exemple #12
0
    def setUp(self):
        set_seed(1)
        self.model = MLP(784, 64, 10)
        self.data_loader = DataLoader(MNIST,
                                      eval_size=10000,
                                      transform=TransformDefault.mnist())

        self.optimizer = torch.optim.Adam(filter(
            lambda param: param.requires_grad, self.model.parameters()),
                                          lr=1e-3,
                                          weight_decay=1e-5)
        self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
            self.optimizer)
Exemple #13
0
 def test_AutoEncoderLinear(self):
     set_seed(0)
     in_features, hidden_features = 16, 64
     batch_size = 5
     model = AutoencoderLinear(in_features, hidden_features)
     self.assertEqual(model.encoding_dim, hidden_features)
     self.assertIsInstance(model.encoder, nn.Module)
     self.assertIsInstance(model.decoder, nn.Module)
     tensor = torch.rand(batch_size, in_features)
     output = model(tensor)
     self.assertIsInstance(output, AutoencoderOutput)
     self.assertEqual(output.latent.shape, (batch_size, hidden_features))
     self.assertEqual(output.reconstructed.shape, (batch_size, in_features))
Exemple #14
0
 def test_TrainerGrad(self):
     set_seed(2)
     trainer = TrainerGrad(self.model,
                           criterion=nn.CrossEntropyLoss(),
                           data_loader=self.data_loader,
                           optimizer=self.optimizer,
                           scheduler=self.scheduler)
     loss_epochs = trainer.train(n_epochs=1,
                                 mutual_info_layers=0,
                                 mask_explain_params=dict())
     assert_array_almost_equal(loss_epochs, [0.219992])
     trainer.save()
     trainer.restore()
 def test_basis_pursuit_admm_convergence(self):
     set_seed(12)
     n_features, n_atoms = 10, 50
     dictionary = torch.randn(n_features, n_atoms)
     tensor_x = torch.randn(3, n_features)
     solver = BasisPursuitADMM(lambd=0.1, max_iters=1000)
     solver.save_stats = True
     tensor_z = solver.solve(A=dictionary, b=tensor_x)
     iterations = solver.online['iterations'].get_mean().item()
     self.assertLessEqual(iterations, solver.max_iters)
     dv_norm = solver.online['dv_norm'].get_mean().item()
     self.assertLessEqual(dv_norm, solver.tol)
     x_restored = tensor_z.matmul(dictionary.t())
     assert_array_almost_equal(tensor_x, x_restored, decimal=1)
     psnr = solver.online['psnr'].get_mean().item()
     self.assertGreater(psnr, 41.)  # 41 is taken from the output
    def test_force_update(self):
        set_seed(1)
        mi_instance_pca = self._init_mutual_info(pca_size=20)
        mi_instance_pca.prepare(self.model)

        self.mi_instance.force_update(self.model)
        mi_instance_pca.force_update(self.model)
        mi_no_pca = self.mi_instance.information
        mi_pca = mi_instance_pca.information
        self.assertEqual(mi_pca.keys(), mi_no_pca.keys())
        for mi_layers_dict in (mi_pca, mi_no_pca):
            for mi_x, mi_y in mi_layers_dict.values():
                # MI must be non-negative
                self.assertGreater(mi_x, 0)
                self.assertGreater(mi_y, 0)
        for layer_name in mi_pca.keys():
            self._test_estimated_values(mi_pca[layer_name],
                                        mi_no_pca[layer_name])
        mi_instance_pca.plot(self.viz)
        mi_instance_pca.plot_activations_hist(self.viz)
Exemple #17
0
 def test_lista_forward_best(self):
     set_seed(16)
     solver = BasisPursuitADMM()
     in_features = 10
     out_features = 40
     lista = LISTA(in_features=in_features,
                   out_features=out_features,
                   solver=solver)
     mp = MatchingPursuit(in_features=in_features,
                          out_features=out_features,
                          solver=solver)
     tensor = torch.randn(5, in_features)
     with torch.no_grad():
         mp.normalize_weight()
         lista.weight_input.data = mp.weight.data.clone()
         mp_output = mp(tensor)
         lista_output_best = lista.forward_best(tensor)
     assert_array_almost_equal(lista_output_best.latent, mp_output.latent)
     assert_array_almost_equal(lista_output_best.reconstructed,
                               mp_output.reconstructed)
Exemple #18
0
    def test_TrainerAutoencoder_cached(self):
        set_seed(3)
        model = AutoencoderLinear(784, 64)
        trainer = TrainerAutoencoder(
            model,
            criterion=nn.BCEWithLogitsLoss(),
            data_loader=self.data_loader,
            optimizer=self.optimizer,
            scheduler=self.scheduler,
            accuracy_measure=AccuracyEmbedding(cache=True))
        trainer.open_monitor(offline=True)

        # TripletLoss is not deterministic; fix the seed
        set_seed(4)
        loss_cached = trainer.full_forward_pass(train=True)
        accuracy_cached = trainer.update_accuracy(train=True)

        trainer.accuracy_measure.cache = False
        trainer.accuracy_measure.reset()

        set_seed(4)
        loss = trainer.full_forward_pass(train=True)
        accuracy = trainer.update_accuracy(train=True)

        self.assertAlmostEqual(loss_cached.item(), loss.item())
        self.assertAlmostEqual(accuracy_cached, accuracy)
Exemple #19
0
    def test_TrainerEmbedding_cached(self):
        set_seed(3)
        criterion = TripletLossSampler(nn.TripletMarginLoss())
        trainer = TrainerEmbedding(
            self.model,
            criterion=criterion,
            data_loader=self.data_loader,
            optimizer=self.optimizer,
            scheduler=self.scheduler,
            accuracy_measure=AccuracyEmbedding(cache=True))
        trainer.open_monitor(offline=True)

        # TripletLoss is not deterministic; fix the seed
        set_seed(4)
        loss_cached = trainer.full_forward_pass(train=True)
        accuracy_cached = trainer.update_accuracy(train=True)

        trainer.accuracy_measure.cache = False
        trainer.accuracy_measure.reset()

        set_seed(4)
        loss = trainer.full_forward_pass(train=True)
        accuracy = trainer.update_accuracy(train=True)

        self.assertAlmostEqual(loss_cached.item(), loss.item())
        self.assertAlmostEqual(accuracy_cached, accuracy)
 def test_basis_pursuit_admm_as_numpy(self):
     set_seed(12)
     n_features, n_atoms = 10, 50
     dictionary = torch.randn(n_features, n_atoms)
     tensor_x = torch.randn(3, n_features)
     lambd = 0.1
     tol_decimal = 4
     tol = 10**(-tol_decimal)
     max_iters = 100
     z_pytorch = bp_pytorch(A=dictionary,
                            b=tensor_x,
                            lambd=lambd,
                            tol=tol,
                            max_iters=max_iters).numpy()
     z_numpy = [
         bp_numpy(A=dictionary.numpy(),
                  b=b,
                  lambd=lambd,
                  tol=tol,
                  max_iters=max_iters) for b in tensor_x
     ]
     assert_array_almost_equal(z_pytorch, z_numpy, decimal=tol_decimal)
Exemple #21
0
 def test_flatten(self):
     set_seed(1)
     model = Flatten()
     tensor = torch.rand(5, 1, 28, 28)
     output = model(tensor)
     assert_array_equal(output, tensor.flatten(start_dim=1))
Exemple #22
0
 def setUp(self):
     set_seed(10)
     self.image = torch.rand(1, 10, 10)
     self.model = MLP(self.image.nelement(), 10)
     self.label = 3
Exemple #23
0
 def test_cifar10(self):
     set_seed(0)
     transform = TransformDefault.cifar10()
     loader = DataLoader(dataset_cls=CIFAR10, transform=transform)
     self._test_dataset(loader)

def test_kmeans():
    outputs, labels = synthesize_log_softmax_data(n_samples=20000,
                                                  n_classes=10,
                                                  p_argmax=0.99)
    estimator = MutualInfoKMeans(n_bins=20, debug=False)
    quantized = estimator.quantize(outputs)
    estimated = MutualInfoKMeans.compute_mutual_info(quantized, labels)
    print(f"KMeans Mutual Information estimate: {estimated:.3f}")


def test_gcmi():
    """
    Test Gaussian-Copula Mutual Information estimator
    """
    outputs, labels = synthesize_log_softmax_data(n_samples=20000,
                                                  n_classes=10,
                                                  p_argmax=0.99)
    print(type(labels))
    estimated = micd(outputs.numpy().T, labels.numpy())
    print(f"Gaussian-Copula Mutual Information estimate: {estimated:.3f}")


if __name__ == '__main__':
    set_seed(26)
    # expected estimated value: log2(10) ~ 3.322
    test_kmeans()
    test_mine()
    test_gcmi()
 def setUp(self):
     set_seed(0)
     self.mi_instance = self._init_mutual_info(pca_size=None)
     self.mi_instance.prepare(self.model)
    n_epochs = max(n_epochs - trainer.epoch, 0)
    trainer.train(n_epochs=n_epochs, mutual_info_layers=0)
    return trainer.model


def train_lista(dataset_cls=MNIST):
    model_ref = train_matching_pursuit(dataset_cls=dataset_cls)
    model = LISTA(784, out_features=model_ref.out_features)
    data_loader = DataLoader(dataset_cls,
                             transform=TransformDefault.mnist(
                                 normalize=None
                             ))
    criterion = nn.MSELoss()
    optimizer, scheduler = get_optimizer_scheduler(model)
    trainer = TrainLISTA(model,
                         model_reference=model_ref,
                         criterion=criterion,
                         data_loader=data_loader,
                         optimizer=optimizer,
                         scheduler=scheduler,
                         accuracy_measure=AccuracyEmbedding())
    # trainer.monitor.advanced_monitoring(level=MonitorLevel.FULL)
    trainer.train(n_epochs=10, mutual_info_layers=0)


if __name__ == '__main__':
    set_seed(28)
    # test_matching_pursuit()
    # train_matching_pursuit()
    train_lista()
Exemple #27
0
 def test_mnist(self):
     set_seed(0)
     transform = TransformDefault.mnist()
     loader = DataLoader(dataset_cls=MNIST, transform=transform)
     self._test_dataset(loader)