def testModel(): # Create models print("Creating Autoencoder, Encoder and Generator...") autoencoder, encoder, decoder = getModels() # Load Autoencoder weights print("Loading weights...") autoencoder.load_weights(modelsPath + modelName) # Load dataset to test print("Loading dataset...") X_train, X_test = loadDataset() # Visualization functions visualizeReconstructedImages(X_train[:16], X_test[:16], autoencoder, save=True)
def trainModel(startEpoch=0): # Create models print("Creating Autoencoder...") autoencoder, _, _ = getModels() autoencoder.compile(optimizer=RMSprop(lr=0.00025), loss="mse") # From which we start if startEpoch > 0: # Load Autoencoder weights print("Loading weights...") autoencoder.load_weights(modelsPath + modelName) print("Loading dataset...") X_train, X_test = loadDataset() # Compute number of batches nbBatch = int(X_train.shape[0] / batchSize) # Train the Autoencoder on dataset print( "Training Autoencoder for {} epochs with {} batches per epoch and {} samples per batch." .format(nbEpoch, nbBatch, batchSize)) print("Run id: {}".format(runID)) # Debug utils writer writer = tf.summary.FileWriter("/tmp/logs/" + runID) batchTimes = [0. for i in range(5)] # For each epoch for epoch in range(startEpoch, nbEpoch): # For each batch for batchIndex in range(nbBatch): batchStartTime = time.time() # Get batch X = X_train[batchIndex * batchSize:(batchIndex + 1) * batchSize] # Train on batch autoencoderLoss = autoencoder.train_on_batch(X, X) trainingSummary = tf.Summary.Value( tag="Loss", simple_value=float(autoencoderLoss)) # Compute ETA batchTime = time.time() - batchStartTime batchTimes = batchTimes[1:] + [batchTime] eta = getETA( sum(batchTimes) / len(batchTimes), nbBatch, batchIndex, nbEpoch, epoch) # Save reconstructions on train/test samples if batchIndex % 2 == 0: visualizeReconstructedImages(X_train[:16], X_test[:16], autoencoder, save=True, label="{}_{}".format( epoch, batchIndex)) # Validation & Tensorboard Debug if batchIndex % 20 == 0: validationLoss = autoencoder.evaluate(X_test[:512], X_test[:512], batch_size=256, verbose=0) validationSummary = tf.Summary.Value( tag="Validation Loss", simple_value=float(validationLoss)) summary = tf.Summary( value=[trainingSummary, validationSummary]) print( "Epoch {}/{} - Batch {}/{} - Loss: {:.3f}/{:.3f} - ETA:". format(epoch + 1, nbEpoch, batchIndex + 1, nbBatch, autoencoderLoss, validationLoss), eta) else: print( "Epoch {}/{} - Batch {}/{} - Loss: {:.3f} - ETA:".format( epoch + 1, nbEpoch, batchIndex + 1, nbBatch, autoencoderLoss), eta) summary = tf.Summary(value=[ trainingSummary, ]) writer.add_summary(summary, epoch * nbBatch + batchIndex) #Save model every epoch print("Saving autoencoder...") autoencoder.save_weights(modelsPath + modelName, overwrite=True)