def main(cuda, batch_size, pretrain_epochs, finetune_epochs): writer = SummaryWriter() # create the TensorBoard object # callback function to call during training, uses writer from the scope def training_callback(epoch, lr, loss, validation_loss): writer.add_scalars('data/autoencoder', { 'lr': lr, 'loss': loss, 'validation_loss': validation_loss, }, epoch) ds_train = CachedMNIST(train=True, cuda=cuda) # training dataset ds_val = CachedMNIST(train=False, cuda=cuda) # evaluation dataset autoencoder = StackedDenoisingAutoEncoder([28 * 28, 500, 500, 2000, 10], final_activation=None) if cuda: autoencoder.cuda() print('Pretraining stage.') ae.pretrain( ds_train, autoencoder, cuda=cuda, validation=ds_val, epochs=pretrain_epochs, batch_size=batch_size, optimizer=lambda model: SGD(model.parameters(), lr=0.1, momentum=0.9), scheduler=lambda x: StepLR(x, 100, gamma=0.1), corruption=0.2) print('Training stage.') ae_optimizer = SGD(params=autoencoder.parameters(), lr=0.1, momentum=0.9) ae.train(ds_train, autoencoder, cuda=cuda, validation=ds_val, epochs=finetune_epochs, batch_size=batch_size, optimizer=ae_optimizer, scheduler=StepLR(ae_optimizer, 100, gamma=0.1), corruption=0.2, update_callback=training_callback) print('k-Means stage') dataloader = DataLoader(ds_train, batch_size=1024, shuffle=False) kmeans = KMeans(n_clusters=10, n_init=20) autoencoder.eval() features = [] actual = [] for index, batch in enumerate(dataloader): if (isinstance(batch, tuple) or isinstance(batch, list)) and len(batch) == 2: batch, value = batch # if we have a prediction label, separate it to actual actual.append(value) if cuda: batch = batch.cuda(async=True) batch = batch.squeeze(1).view(batch.size(0), -1) features.append(autoencoder.encoder(batch).detach().cpu()) actual = torch.cat(actual).long().cpu().numpy() predicted = kmeans.fit_predict(torch.cat(features).numpy()) reassignment, accuracy = cluster_accuracy(predicted, actual) print('Final k-Means accuracy: %s' % accuracy) predicted_reassigned = [reassignment[item] for item in predicted] # TODO numpify confusion = confusion_matrix(actual, predicted_reassigned) normalised_confusion = confusion.astype('float') / confusion.sum( axis=1)[:, np.newaxis] confusion_id = uuid.uuid4().hex sns.heatmap(normalised_confusion).get_figure().savefig('confusion_%s.png' % confusion_id) print('Writing out confusion diagram with UUID: %s' % confusion_id) writer.add_embedding( torch.cat(features), metadata=predicted, label_img=ds_train.ds.train_data.float().unsqueeze(1), # TODO bit ugly tag='predicted') writer.close()
class SDAETransformerBase(TransformerMixin, BaseEstimator): def __init__(self, dimensions: List[int], cuda: Optional[bool] = None, batch_size: int = 256, pretrain_epochs: int = 200, finetune_epochs: int = 500, corruption: Optional[float] = 0.2, optimiser_pretrain: Callable[[torch.nn.Module], torch.optim.Optimizer] = lambda x: SGD(x.parameters(), lr=0.1, momentum=0.9), optimiser_train: Callable[[torch.nn.Module], torch.optim.Optimizer] = lambda x: SGD(x.parameters(), lr=0.1, momentum=0.9), scheduler: Optional[Callable[[torch.optim.Optimizer], Any]] = lambda x: StepLR(x, 100, gamma=0.1), final_activation: Optional[torch.nn.Module] = None) -> None: self.cuda = torch.cuda.is_available() if cuda is None else cuda self.batch_size = batch_size self.dimensions = dimensions self.pretrain_epochs = pretrain_epochs self.finetune_epochs = finetune_epochs self.optimiser_pretrain = optimiser_pretrain self.optimiser_train = optimiser_train self.scheduler = scheduler self.corruption = corruption self.autoencoder = None self.final_activation = final_activation def fit(self, X, y=None): if issparse(X): X = X.todense() ds = TensorDataset(torch.from_numpy(X.astype(np.float32))) self.autoencoder = StackedDenoisingAutoEncoder(self.dimensions, final_activation=self.final_activation) if self.cuda: self.autoencoder.cuda() ae.pretrain( ds, self.autoencoder, cuda=self.cuda, epochs=self.pretrain_epochs, batch_size=self.batch_size, optimizer=self.optimiser_pretrain, scheduler=self.scheduler, corruption=0.2, silent=True ) ae_optimizer = self.optimiser_train(self.autoencoder) ae.train( ds, self.autoencoder, cuda=self.cuda, epochs=self.finetune_epochs, batch_size=self.batch_size, optimizer=ae_optimizer, scheduler=self.scheduler(ae_optimizer), corruption=self.corruption, silent=True ) return self def score(self, X, y=None, sample_weight=None) -> float: loss_function = torch.nn.MSELoss() if self.autoencoder is None: raise NotFittedError if issparse(X): X = X.todense() self.autoencoder.eval() ds = TensorDataset(torch.from_numpy(X.astype(np.float32))) dataloader = DataLoader( ds, batch_size=self.batch_size, shuffle=False ) loss = 0 for index, batch in enumerate(dataloader): batch = batch[0] if self.cuda: batch = batch.cuda(non_blocking=True) output = self.autoencoder(batch) loss += float(loss_function(output, batch).item()) return loss
# pretrain ptsdae.model.pretrain(dataset, autoencoder=ae, epochs=args.pretrain_epochs, batch_size=args.batch_size, optimizer=get_opt, scheduler=get_sched, validation=validation, update_freq=args.pretrain_epochs // 50, cuda=True, num_workers=args.njobs) # train # prep for cuda usage ... ae.cuda() # get our scheduler and optimizers opt = get_opt(ae, lr=args.train_lr) sched = get_sched(opt) print("Training ...") sys.stdout.flush() ptsdae.model.train(dataset, autoencoder=ae, epochs=args.train_epochs, batch_size=args.batch_size, optimizer=opt, scheduler=sched, validation=validation, update_freq=args.train_epochs // 50,
pretrain_epochs = 300 finetune_epochs = 500 training_callback = None cuda = torch.cuda.is_available() ds_val = None embedded_dim = get_embedded_dim() try: autoencoder = pickle.load(open(autoencoder_path, 'rb')) except: autoencoder = StackedDenoisingAutoEncoder( dimensions=[embedded_dim, 500, 500, 2000, 10], final_activation=None, ) if cuda: autoencoder.cuda() print('SDAE Pretraining stage.', flush=True) print(f'@ {time.time() - start_time}\n', flush=True) ae.pretrain( ds_train, autoencoder, cuda=cuda, validation=ds_val, epochs=pretrain_epochs, batch_size=batch_size, optimizer=lambda model: SGD( model.parameters(), lr=0.1, momentum=0.9), scheduler=lambda x: StepLR(x, 100, gamma=0.1), corruption=0.2, silent=True,
def main(cuda, batch_size, pretrain_epochs, finetune_epochs, testing_mode): writer = SummaryWriter() # create the TensorBoard object # callback function to call during training, uses writer from the scope def training_callback(epoch, lr, loss, validation_loss): writer.add_scalars( "data/autoencoder", { "lr": lr, "loss": loss, "validation_loss": validation_loss, }, epoch, ) ds_train = CachedMNIST(train=True, cuda=cuda, testing_mode=testing_mode) # training dataset ds_val = CachedMNIST(train=False, cuda=cuda, testing_mode=testing_mode) # evaluation dataset autoencoder = StackedDenoisingAutoEncoder([28 * 28, 500, 500, 2000, 10], final_activation=None) if cuda: autoencoder.cuda() print("Pretraining stage.") ae.pretrain( ds_train, autoencoder, cuda=cuda, validation=ds_val, epochs=pretrain_epochs, batch_size=batch_size, optimizer=lambda model: SGD(model.parameters(), lr=0.1, momentum=0.9), scheduler=lambda x: StepLR(x, 100, gamma=0.1), corruption=0.2, ) print("Training stage.") ae_optimizer = SGD(params=autoencoder.parameters(), lr=0.1, momentum=0.9) ae.train( ds_train, autoencoder, cuda=cuda, validation=ds_val, epochs=finetune_epochs, batch_size=batch_size, optimizer=ae_optimizer, scheduler=StepLR(ae_optimizer, 100, gamma=0.1), corruption=0.2, update_callback=training_callback, ) print("DEC stage.") model = DEC(cluster_number=10, hidden_dimension=10, encoder=autoencoder.encoder) if cuda: model.cuda() dec_optimizer = SGD(model.parameters(), lr=0.01, momentum=0.9) train( dataset=ds_train, model=model, epochs=100, batch_size=256, optimizer=dec_optimizer, stopping_delta=0.000001, cuda=cuda, ) predicted, actual = predict(ds_train, model, 1024, silent=True, return_actual=True, cuda=cuda) actual = actual.cpu().numpy() predicted = predicted.cpu().numpy() reassignment, accuracy = cluster_accuracy(actual, predicted) print("Final DEC accuracy: %s" % accuracy) if not testing_mode: predicted_reassigned = [reassignment[item] for item in predicted] # TODO numpify confusion = confusion_matrix(actual, predicted_reassigned) normalised_confusion = (confusion.astype("float") / confusion.sum(axis=1)[:, np.newaxis]) confusion_id = uuid.uuid4().hex sns.heatmap(normalised_confusion).get_figure().savefig( "confusion_%s.png" % confusion_id) print("Writing out confusion diagram with UUID: %s" % confusion_id) writer.close()