def test_basic(self): """ Basic test to check that the calculation is sensible. """ true_value1 = np.array([1, 2, 1, 2, 0, 0], dtype=np.int64) pred_value1 = np.array([2, 1, 2, 1, 0, 0], dtype=np.int64) self.assertAlmostEqual( cluster_accuracy(true_value1, pred_value1)[1], 1.0) self.assertAlmostEqual( cluster_accuracy(true_value1, pred_value1, 3)[1], 1.0) true_value2 = np.array([1, 1, 1, 1, 1, 1], dtype=np.int64) pred_value2 = np.array([0, 1, 2, 3, 4, 5], dtype=np.int64) self.assertAlmostEqual( cluster_accuracy(true_value2, pred_value2)[1], 1.0 / 6.0) self.assertAlmostEqual( cluster_accuracy(true_value2, pred_value2, 6)[1], 1.0 / 6.0)
def main(cuda, batch_size, pretrain_epochs, finetune_epochs): writer = SummaryWriter() # create the TensorBoard object # callback function to call during training, uses writer from the scope def training_callback(epoch, lr, loss, validation_loss): writer.add_scalars('data/autoencoder', { 'lr': lr, 'loss': loss, 'validation_loss': validation_loss, }, epoch) ds_train = CachedMNIST(train=True, cuda=cuda) # training dataset ds_val = CachedMNIST(train=False, cuda=cuda) # evaluation dataset autoencoder = StackedDenoisingAutoEncoder([28 * 28, 500, 500, 2000, 10], final_activation=None) if cuda: autoencoder.cuda() print('Pretraining stage.') ae.pretrain( ds_train, autoencoder, cuda=cuda, validation=ds_val, epochs=pretrain_epochs, batch_size=batch_size, optimizer=lambda model: SGD(model.parameters(), lr=0.1, momentum=0.9), scheduler=lambda x: StepLR(x, 100, gamma=0.1), corruption=0.2) print('Training stage.') ae_optimizer = SGD(params=autoencoder.parameters(), lr=0.1, momentum=0.9) ae.train(ds_train, autoencoder, cuda=cuda, validation=ds_val, epochs=finetune_epochs, batch_size=batch_size, optimizer=ae_optimizer, scheduler=StepLR(ae_optimizer, 100, gamma=0.1), corruption=0.2, update_callback=training_callback) print('k-Means stage') dataloader = DataLoader(ds_train, batch_size=1024, shuffle=False) kmeans = KMeans(n_clusters=10, n_init=20) autoencoder.eval() features = [] actual = [] for index, batch in enumerate(dataloader): if (isinstance(batch, tuple) or isinstance(batch, list)) and len(batch) == 2: batch, value = batch # if we have a prediction label, separate it to actual actual.append(value) if cuda: batch = batch.cuda(async=True) batch = batch.squeeze(1).view(batch.size(0), -1) features.append(autoencoder.encoder(batch).detach().cpu()) actual = torch.cat(actual).long().cpu().numpy() predicted = kmeans.fit_predict(torch.cat(features).numpy()) reassignment, accuracy = cluster_accuracy(predicted, actual) print('Final k-Means accuracy: %s' % accuracy) predicted_reassigned = [reassignment[item] for item in predicted] # TODO numpify confusion = confusion_matrix(actual, predicted_reassigned) normalised_confusion = confusion.astype('float') / confusion.sum( axis=1)[:, np.newaxis] confusion_id = uuid.uuid4().hex sns.heatmap(normalised_confusion).get_figure().savefig('confusion_%s.png' % confusion_id) print('Writing out confusion diagram with UUID: %s' % confusion_id) writer.add_embedding( torch.cat(features), metadata=predicted, label_img=ds_train.ds.train_data.float().unsqueeze(1), # TODO bit ugly tag='predicted') writer.close()