plt.figure(1) plt.plot(training_loss, 'b', label='Training') plt.plot(val_loss, 'r', label='Validation') plt.title('Model: Loss Over Epochs') plt.xlabel('Epoch') plt.ylabel('Loss') plt.legend() plt.figure(2) plt.plot(traininig_accuracy, 'b', label='Training') plt.plot(val_accuracy, 'r', label='Validation') plt.title('Model: Accuracy Over Epochs') plt.xlabel('Epoch') plt.ylabel('Accuracy') plt.legend() plt.show() ### Save Model #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ save_weights = False if save_weights: checkpoint_dir = '.\\training_checkpoints' checkpoint_prefix = os.path.join(checkpoint_dir, "model_ckpt") model.save_weights(checkpoint_prefix) print('Model weights saved to files: '+checkpoint_prefix+'.*') #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #----------------------------------END FILE------------------------------------ #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Reference [1] Kingma, Diederik P., and Max Welling. "Auto-encoding variational bayes." https://arxiv.org/abs/1312.6114 ''' from __future__ import absolute_import from __future__ import division from __future__ import print_function from VAE import VAE from Encoder import Encoder from Decoder import Decoder from Parameters import x_train, x_test, y_test, latent_dim, input_shape, epochs, batch_size from keras.utils import plot_model from Util import plot_results if __name__ == '__main__': encoder = Encoder(input_data=input_shape) decoder = Decoder(input_data=(latent_dim, )) models = (encoder, decoder) data = (x_test, y_test) vae = VAE(input_data=input_shape, encoder=encoder, decoder=decoder) vae.compile(optimizer='adam') vae.summary() plot_model(vae, to_file='vae_mlp.png', show_shapes=True) vae.fit(x=x_train, y=None, epochs=epochs, batch_size=batch_size, validation_data=(x_test, None)) vae.save_weights('vae_mlp_mnist.h5') plot_results(models, data, batch_size=batch_size, model_name="vae_mlp")