Пример #1
0
def test_train_with_prediction():
    model = Mock()
    model.return_value = torch.zeros(100, 100).requires_grad_()
    model.cluster_number = 10
    model.encoder.return_value = torch.zeros(100, 100)
    optimizer = Mock()
    dataset = TensorDataset(torch.zeros(100, 100), torch.zeros(100, 1))
    train(dataset=dataset,
          model=model,
          epochs=1,
          batch_size=100,
          optimizer=optimizer,
          cuda=False)
    assert model.call_count == 2
Пример #2
0
def main(data_dir, cuda, batch_size, pretrain_epochs, finetune_epochs,
         testing_mode):
    writer = SummaryWriter()  # create the TensorBoard object

    # callback function to call during training, uses writer from the scope

    def training_callback(epoch, lr, loss, validation_loss):
        writer.add_scalars('data/autoencoder', {
            'lr': lr,
            'loss': loss,
            'validation_loss': validation_loss,
        }, epoch)

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    ds_train = CachedMNIST(data_dir,
                           is_train=True,
                           device=device,
                           testing_mode=testing_mode)  # training dataset
    ds_val = CachedMNIST(data_dir,
                         is_train=False,
                         device=device,
                         testing_mode=testing_mode)  # evaluation dataset
    autoencoder = StackedDenoisingAutoEncoder([28 * 28, 500, 500, 2000, 10],
                                              final_activation=None)

    autoencoder = autoencoder.to(device)
    print('Pretraining stage.')
    ae.pretrain(
        ds_train,
        autoencoder,
        device=device,
        validation=ds_val,
        epochs=pretrain_epochs,
        batch_size=batch_size,
        silent=True,
        optimizer=lambda model: SGD(model.parameters(), lr=0.1, momentum=0.9),
        scheduler=lambda x: StepLR(x, 20000, gamma=0.1),
        corruption=0.2)
    print('Training stage.')
    ae_optimizer = SGD(params=autoencoder.parameters(), lr=0.1, momentum=0.9)
    ae.train(ds_train,
             autoencoder,
             device=device,
             validation=ds_val,
             epochs=finetune_epochs,
             batch_size=batch_size,
             silent=True,
             optimizer=ae_optimizer,
             scheduler=StepLR(ae_optimizer, 20000, gamma=0.1),
             corruption=0.2,
             update_callback=training_callback)
    print('DEC stage.')
    model = DEC(cluster_number=10,
                embedding_dimension=28 * 28,
                hidden_dimension=10,
                encoder=autoencoder.encoder)

    model = model.to(device)
    dec_optimizer = SGD(model.parameters(), lr=0.01, momentum=0.9)
    train(dataset=ds_train,
          model=model,
          epochs=20000,
          batch_size=256,
          silent=True,
          optimizer=dec_optimizer,
          stopping_delta=0.000001,
          cuda=cuda)
    predicted, actual = predict(ds_train,
                                model,
                                1024,
                                silent=True,
                                return_actual=True,
                                cuda=cuda)
    actual = actual.cpu().numpy()
    predicted = predicted.cpu().numpy()
    reassignment, accuracy = cluster_accuracy(actual, predicted)
    print('Final DEC accuracy: %s' % accuracy)
    if not testing_mode:
        predicted_reassigned = [reassignment[item]
                                for item in predicted]  # TODO numpify
        confusion = confusion_matrix(actual, predicted_reassigned)
        normalised_confusion = confusion.astype('float') / confusion.sum(
            axis=1)[:, np.newaxis]
        confusion_id = uuid.uuid4().hex
        sns.heatmap(normalised_confusion).get_figure().savefig(
            'confusion_%s.png' % confusion_id)
        print('Writing out confusion diagram with UUID: %s' % confusion_id)
        writer.close()
Пример #3
0
def main(cuda, batch_size, pretrain_epochs, finetune_epochs, testing_mode):
    writer = SummaryWriter()  # create the TensorBoard object

    # callback function to call during training, uses writer from the scope

    def training_callback(epoch, lr, loss, validation_loss):
        writer.add_scalars(
            "data/autoencoder",
            {
                "lr": lr,
                "loss": loss,
                "validation_loss": validation_loss,
            },
            epoch,
        )

    ds_train = CachedMNIST(train=True, cuda=cuda,
                           testing_mode=testing_mode)  # training dataset
    ds_val = CachedMNIST(train=False, cuda=cuda,
                         testing_mode=testing_mode)  # evaluation dataset
    autoencoder = StackedDenoisingAutoEncoder([28 * 28, 500, 500, 2000, 10],
                                              final_activation=None)
    if cuda:
        autoencoder.cuda()
    print("Pretraining stage.")
    ae.pretrain(
        ds_train,
        autoencoder,
        cuda=cuda,
        validation=ds_val,
        epochs=pretrain_epochs,
        batch_size=batch_size,
        optimizer=lambda model: SGD(model.parameters(), lr=0.1, momentum=0.9),
        scheduler=lambda x: StepLR(x, 100, gamma=0.1),
        corruption=0.2,
    )
    print("Training stage.")
    ae_optimizer = SGD(params=autoencoder.parameters(), lr=0.1, momentum=0.9)
    ae.train(
        ds_train,
        autoencoder,
        cuda=cuda,
        validation=ds_val,
        epochs=finetune_epochs,
        batch_size=batch_size,
        optimizer=ae_optimizer,
        scheduler=StepLR(ae_optimizer, 100, gamma=0.1),
        corruption=0.2,
        update_callback=training_callback,
    )
    print("DEC stage.")
    model = DEC(cluster_number=10,
                hidden_dimension=10,
                encoder=autoencoder.encoder)
    if cuda:
        model.cuda()
    dec_optimizer = SGD(model.parameters(), lr=0.01, momentum=0.9)
    train(
        dataset=ds_train,
        model=model,
        epochs=100,
        batch_size=256,
        optimizer=dec_optimizer,
        stopping_delta=0.000001,
        cuda=cuda,
    )
    predicted, actual = predict(ds_train,
                                model,
                                1024,
                                silent=True,
                                return_actual=True,
                                cuda=cuda)
    actual = actual.cpu().numpy()
    predicted = predicted.cpu().numpy()
    reassignment, accuracy = cluster_accuracy(actual, predicted)
    print("Final DEC accuracy: %s" % accuracy)
    if not testing_mode:
        predicted_reassigned = [reassignment[item]
                                for item in predicted]  # TODO numpify
        confusion = confusion_matrix(actual, predicted_reassigned)
        normalised_confusion = (confusion.astype("float") /
                                confusion.sum(axis=1)[:, np.newaxis])
        confusion_id = uuid.uuid4().hex
        sns.heatmap(normalised_confusion).get_figure().savefig(
            "confusion_%s.png" % confusion_id)
        print("Writing out confusion diagram with UUID: %s" % confusion_id)
        writer.close()