losses['rec'].append(epochRecLoss / i) losses['dis'].append(epochDisLoss / i) #### Test dae.eval() dis.eval() #get test outuputs and losses xTest, yTest = prep_data(iter(testLoader).next(), useCUDA=dae.useCUDA) zTest, recTest = dae.forward(xTest) #N.B. corruption in here recLossTest = dae.rec_loss(recTest, xTest) #Plot losses losses['test rec'].append(recLossTest.data[0]) if e > 0: #only one point for test rec otherwise plot_losses(losses, exDir, epochs=e + 1) plot_norm_losses(losses, exDir) #save parameters dae.save_params(exDir) dis.save_params(exDir) #Save images of original and rec save_image(xTest.data, join(exDir, 'original.png')) save_image(recTest.data, join(exDir, 'rec.png')) #Save samples sampleDir = join(exDir, 'epoch_' + str(e)) os.mkdir(sampleDir) print 'sample dir:', sampleDir dae.sample_x(opts.M, sampleDir)
def train_svm(dae, svm, trainLoader, testLoader, exDir, lr): ''' Data y is [0,1] For training SVM must be -1, 1 To eval data put back to [0,1] To get loss use [-1,1] for train and test To get score use [0,1] for train and test ''' print 'training svm...' dae.eval() optimSVM = optim.SGD(svm.parameters(), lr=lr) #optimizer f = open(join(exDir, 'svmOpts.txt'), 'w') f.write('smvLR: %0.5f\nc: %0.5f\n' % (lr, svm.c)) f.close() svmLoss = {'train': [], 'test': []} for epoch in range(opts.maxEpochs): epochLoss_svm = 0 svm.train() T = time() for i, data in enumerate(trainLoader): x, y = prep_data(data, useCUDA=svm.useCUDA) #prep data as a var inputs = dae.encode(x) #get encodings as input output = svm.forward(inputs) #get output loss = svm.loss(output, y * 2 - 1) #calc loss optimSVM.zero_grad() #zero grad loss.backward() #backwards optimSVM.step() #step epochLoss_svm += loss.data[0] if i % 100 == 0: print '[%d, %i] loss: %0.5f, time: %0.3f' % ( epoch, i, epochLoss_svm / (i + 1), time() - T) svm.save_params(exDir) svmLoss['train'].append(epochLoss_svm / (i + 1)) #test loss: svm.eval() xTest, yTest = prep_data(iter(testLoader).next(), useCUDA=svm.useCUDA) testInputs = dae.encode(xTest) testOutputs = svm.forward(testInputs) testLoss = svm.loss(testOutputs, yTest * 2 - 1) svmLoss['test'].append(testLoss.data[0]) if epoch > 1: plot_losses(svmLoss, exDir=exDir, epochs=epoch + 1, title='SVM_loss') #Do classification testScore = svm.binary_class_score( testOutputs, yTest) #has threshold as zero for testOutputs in [-1,1] trainScore = svm.binary_class_score( output, y) #has threshold as zero for output in [-1,1] f = open(join(exDir, 'svm.txt'), 'w') f.write('trainScore: %f \ntestScore: %f ' \ % (trainScore.mean().data[0], testScore.mean().data[0])) f.close() return svm
xTest, yTest = prep_data(testData, classer.useCUDA) ####### eval VAE ####### recTest, outMu, outLogVar = vae(xTest) bceLossTest, klLossTest = vae.loss(recTest, xTest, outMu, outLogVar) save_image(xTest.data, join(exDir, 'input.png')) save_image(recTest.data, join(exDir, 'output_' + str(e) + '.png')) ####### eval CLASSER ####### yPredTrain = classer(xTrain) yPredTest = classer(xTest) classTrain = score(yPredTrain, yTrain).data[0] classTest = score(yPredTest, yTest).data[0] vaeLosses['test_bce'].append(bceLossTest.mean().data[0]) vaeLosses['test_kl'].append(bceLossTest.mean().data[0]) classerLosses['train_acc'].append(classTrain) classerLosses['test_acc'].append(classTest) viz_DZ(deltaZ.data[0].cpu().numpy(), exDir) if e > 0: plot_norm_losses(vaeLosses, exDir, e, title='VAE') plot_losses(classerLosses, exDir, e, title='CLASSER') plot_losses(deltaZLosses, exDir, e, title='DELTAZ') #Evaluate the results: eval_results(vae, classer, deltaZ, testLoader, exDir) inception_score(vae, classer, deltaZ, testLoader, exDir)