Пример #1
0
    def test_TrainerGrad_MutualInfo(self):
        set_seed(2)
        mi_history = []

        class MutualInfoNPEETDebug(MutualInfoNPEET):
            def plot(self_mi, viz):
                mi_history.append(self_mi.information['mlp.2'])
                super().plot(viz)

        data_loader = DataLoader(MNIST,
                                 eval_size=100,
                                 transform=TransformDefault.mnist())
        trainer = TrainerGrad(self.model,
                              criterion=nn.CrossEntropyLoss(),
                              data_loader=data_loader,
                              optimizer=self.optimizer,
                              mutual_info=MutualInfoNPEETDebug(
                                  data_loader=data_loader, pca_size=None),
                              scheduler=self.scheduler)
        trainer.monitor.advanced_monitoring(level=MonitorLevel.FULL)
        trainer.train(n_epochs=1, mutual_info_layers=1)
        mi_history = np.vstack(mi_history)
        assert_array_less(0, mi_history)
        x_last, y_last = mi_history[-1]

        # the model is well trained
        self.assertGreater(y_last, 2.5)
        self.assertGreater(x_last, 2.2)
        self.assertLessEqual(x_last, y_last)
Пример #2
0
def train_autoenc(n_epoch=60, dataset_cls=MNIST):
    kwta = KWinnersTakeAllSoft(hardness=2, sparsity=0.05)
    # kwta = SynapticScaling(kwta, synaptic_scale=3.0)
    model = AutoencoderLinearKWTA(input_dim=784, encoding_dim=256, kwta=kwta)
    # model = MLP(784, 128, 64)
    if isinstance(model, AutoencoderLinearKWTATanh):
        # normalize in range [-1, 1]
        normalize = transforms.Normalize(mean=(0.5, ), std=(0.5, ))
        criterion = nn.MSELoss()
        reconstr_thr = torch.linspace(-0.5, 0.9, steps=10, dtype=torch.float32)
    else:
        normalize = None
        criterion = nn.BCEWithLogitsLoss()
        reconstr_thr = torch.linspace(0.1, 0.95, steps=10, dtype=torch.float32)
    optimizer, scheduler = get_optimizer_scheduler(model)
    data_loader = DataLoader(
        dataset_cls, transform=TransformDefault.mnist(normalize=normalize))
    kwta_scheduler = KWTAScheduler(model=model,
                                   step_size=10,
                                   gamma_sparsity=0.7,
                                   min_sparsity=0.05,
                                   gamma_hardness=2,
                                   max_hardness=10)
    trainer = TrainerAutoencoderBinary(model,
                                       criterion=criterion,
                                       data_loader=data_loader,
                                       optimizer=optimizer,
                                       scheduler=scheduler,
                                       kwta_scheduler=kwta_scheduler,
                                       reconstruct_threshold=reconstr_thr,
                                       accuracy_measure=AccuracyEmbeddingKWTA(
                                           cache=model.encoding_dim <= 2048))
    # trainer.restore()  # uncomment to restore the saved state
    # trainer.monitor.advanced_monitoring(level=MonitorLevel.FULL)
    trainer.train(n_epochs=n_epoch)
Пример #3
0
def train_mask():
    """
    Train explainable mask for an image from ImageNet, using pretrained model.
    """
    model = torchvision.models.vgg19(pretrained=True)
    model.eval()
    for param in model.parameters():
        param.requires_grad_(False)
    transform = TransformDefault.imagenet()
    accuracy_measure = AccuracyArgmax()
    monitor = Monitor(
        accuracy_measure=accuracy_measure,
        mutual_info=MutualInfoKMeans(),
        normalize_inverse=get_normalize_inverse(transform),
    )
    monitor.open(env_name='mask')
    image = Image.open("images/flute.jpg")
    image = transform(image)
    mask_trainer = MaskTrainer(accuracy_measure=accuracy_measure,
                               image_shape=image.shape,
                               show_progress=True)
    monitor.log(repr(mask_trainer))
    if torch.cuda.is_available():
        model.cuda()
        image = image.cuda()
    outputs = model(image.unsqueeze(dim=0))
    proba = accuracy_measure.predict_proba(outputs)
    proba_max, label_true = proba[0].max(dim=0)
    print(f"True label: {label_true} (confidence {proba_max: .5f})")
    monitor.plot_explain_input_mask(model=model,
                                    mask_trainer=mask_trainer,
                                    image=image,
                                    label=label_true)
def train_matching_pursuit(dataset_cls=MNIST, n_epochs=5):
    # Typically, the 'out_features', the second parameter of MatchingPursuit
    # model, should be greater than the 'in_features'.
    # In case of MNIST, it works even with the smaller values, but the
    # resulting embedding vector is dense.

    model = MatchingPursuit(784, 256, solver=BasisPursuitADMM())
    # model = LISTA(784, 128)

    data_loader = DataLoader(dataset_cls,
                             transform=TransformDefault.mnist(
                                 normalize=None
                             ))
    criterion = LossPenalty(nn.MSELoss(), lambd=model.lambd)
    optimizer, scheduler = get_optimizer_scheduler(model)
    trainer = TrainMatchingPursuit(model,
                                   criterion=criterion,
                                   data_loader=data_loader,
                                   optimizer=optimizer,
                                   scheduler=scheduler,
                                   accuracy_measure=AccuracyEmbedding())
    trainer.restore()
    n_epochs = max(n_epochs - trainer.epoch, 0)
    trainer.train(n_epochs=n_epochs, mutual_info_layers=0)
    return trainer.model
Пример #5
0
def test(model, n_epoch=500, dataset_cls=MNIST):
    model.eval()
    for param in model.parameters():
        param.requires_grad_(False)
    criterion = nn.CrossEntropyLoss()
    data_loader = DataLoader(dataset_cls, transform=TransformDefault.mnist())
    trainer = Test(model=model, criterion=criterion, data_loader=data_loader)
    trainer.train(n_epochs=n_epoch)
Пример #6
0
    def setUp(self):
        set_seed(1)
        self.model = MLP(784, 64, 10)
        self.data_loader = DataLoader(MNIST,
                                      eval_size=10000,
                                      transform=TransformDefault.mnist())

        self.optimizer = torch.optim.Adam(filter(
            lambda param: param.requires_grad, self.model.parameters()),
                                          lr=1e-3,
                                          weight_decay=1e-5)
        self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
            self.optimizer)
Пример #7
0
def train_grad(n_epoch=30, dataset_cls=MNIST):
    model = MLP(784, 128, 10)
    optimizer, scheduler = get_optimizer_scheduler(model)
    criterion = nn.CrossEntropyLoss()
    data_loader = DataLoader(dataset_cls, transform=TransformDefault.mnist())
    trainer = TrainerGrad(model,
                          criterion=criterion,
                          data_loader=data_loader,
                          optimizer=optimizer,
                          scheduler=scheduler)
    trainer.restore()  # uncomment to restore the saved state
    # trainer.monitor.advanced_monitoring(level=MonitorLevel.SIGNAL_TO_NOISE)
    trainer.train(n_epochs=n_epoch)
Пример #8
0
def train_grad(n_epoch=10, dataset_cls=MNIST):
    model = MLP(784, 128, 10)
    optimizer, scheduler = get_optimizer_scheduler(model)
    data_loader = DataLoader(dataset_cls, transform=TransformDefault.mnist())
    trainer = TrainerGrad(model,
                          criterion=nn.CrossEntropyLoss(),
                          data_loader=data_loader,
                          optimizer=optimizer,
                          mutual_info=MutualInfoNeuralEstimation(data_loader),
                          scheduler=scheduler)
    # trainer.restore()  # uncomment to restore the saved state
    trainer.monitor.advanced_monitoring(level=MonitorLevel.SIGNAL_TO_NOISE)
    trainer.train(n_epochs=n_epoch, mutual_info_layers=1)
def test_matching_pursuit_lambdas(dataset_cls=MNIST):
    model = MatchingPursuit(784, 2048)
    data_loader = DataLoader(dataset_cls,
                             transform=TransformDefault.mnist(
                                 normalize=None
                             ))
    bmp_lambdas = torch.linspace(0.05, 0.95, steps=10)
    trainer = TestMatchingPursuitParameters(model,
                                            criterion=nn.MSELoss(),
                                            data_loader=data_loader,
                                            bmp_params_range=bmp_lambdas,
                                            param_name='lambda')
    trainer.train(n_epochs=1, mutual_info_layers=0)
def test_matching_pursuit(dataset_cls=MNIST):
    # vanilla Matching Pursuit: the weights are fixed, no training
    model = MatchingPursuit(784, 2048)
    data_loader = DataLoader(dataset_cls,
                             eval_size=10000,
                             transform=TransformDefault.mnist(
                                 normalize=None
                             ))
    trainer = TestMatchingPursuit(model,
                                  criterion=nn.MSELoss(),
                                  data_loader=data_loader,
                                  optimizer=OptimizerStub(),
                                  accuracy_measure=AccuracyEmbedding())
    trainer.train(n_epochs=1, mutual_info_layers=0)
Пример #11
0
def train_autoencoder(n_epoch=60, dataset_cls=MNIST):
    model = AutoencoderLinear(784, 128)
    data_loader = DataLoader(dataset_cls,
                             transform=TransformDefault.mnist(normalize=None))
    criterion = nn.BCEWithLogitsLoss()
    optimizer, scheduler = get_optimizer_scheduler(model)
    trainer = TrainerAutoencoder(
        model,
        criterion=criterion,
        data_loader=data_loader,
        optimizer=optimizer,
        scheduler=scheduler,
        accuracy_measure=AccuracyEmbedding(cache=True))
    # trainer.restore()  # uncomment to restore the saved state
    trainer.monitor.advanced_monitoring(level=MonitorLevel.SIGNAL_TO_NOISE)
    trainer.train(n_epochs=n_epoch, mutual_info_layers=0)
def train_lista(dataset_cls=MNIST):
    model_ref = train_matching_pursuit(dataset_cls=dataset_cls)
    model = LISTA(784, out_features=model_ref.out_features)
    data_loader = DataLoader(dataset_cls,
                             transform=TransformDefault.mnist(
                                 normalize=None
                             ))
    criterion = nn.MSELoss()
    optimizer, scheduler = get_optimizer_scheduler(model)
    trainer = TrainLISTA(model,
                         model_reference=model_ref,
                         criterion=criterion,
                         data_loader=data_loader,
                         optimizer=optimizer,
                         scheduler=scheduler,
                         accuracy_measure=AccuracyEmbedding())
    # trainer.monitor.advanced_monitoring(level=MonitorLevel.FULL)
    trainer.train(n_epochs=10, mutual_info_layers=0)
Пример #13
0
def train_pretrained(n_epoch=500, dataset_cls=CIFAR10):
    model = models.cifar.CIFAR10(pretrained=True)
    for param in model.parameters():
        param.requires_grad_(False)
    kwta = KWinnersTakeAllSoft(sparsity=0.3)
    model.classifier = nn.Sequential(nn.Linear(1024, 128, bias=False), kwta)
    optimizer, scheduler = get_optimizer_scheduler(model)
    criterion = ContrastiveLossSampler(nn.CosineEmbeddingLoss(margin=0.5))
    data_loader = DataLoader(dataset_cls, transform=TransformDefault.cifar10())
    kwta_scheduler = KWTAScheduler(model=model,
                                   step_size=15,
                                   gamma_sparsity=0.7,
                                   min_sparsity=0.05,
                                   gamma_hardness=2,
                                   max_hardness=10)
    trainer = TrainerEmbedding(model=model,
                               criterion=criterion,
                               data_loader=data_loader,
                               optimizer=optimizer,
                               scheduler=scheduler,
                               kwta_scheduler=kwta_scheduler,
                               accuracy_measure=AccuracyEmbeddingKWTA(),
                               env_suffix='')
    trainer.train(n_epochs=n_epoch, mutual_info_layers=1)
Пример #14
0
def train_kwta(n_epoch=500, dataset_cls=MNIST):
    kwta = KWinnersTakeAllSoft(sparsity=0.05, hard=False, hardness=2)
    # kwta = SynapticScaling(kwta, synaptic_scale=3)
    model = MLP_kWTA(784, 64, 256, kwta=kwta)
    optimizer, scheduler = get_optimizer_scheduler(model)
    criterion = TripletLossSampler(TripletCosineLoss(margin=0.5))
    data_loader = DataLoader(dataset_cls, transform=TransformDefault.mnist())
    kwta_scheduler = KWTAScheduler(model=model,
                                   step_size=10,
                                   gamma_sparsity=0.7,
                                   min_sparsity=0.05,
                                   gamma_hardness=2,
                                   max_hardness=20)
    trainer = TrainerEmbeddingKWTA(
        model=model,
        criterion=criterion,
        data_loader=data_loader,
        optimizer=optimizer,
        scheduler=scheduler,
        kwta_scheduler=kwta_scheduler,
        accuracy_measure=AccuracyEmbeddingKWTA(cache=True),
        env_suffix='')
    # trainer.restore()
    trainer.train(n_epochs=n_epoch, mutual_info_layers=1)