import tensorflow as tf from architectures.pretrained_encoder import encoder_model from architectures.latent_space import latent_space from architectures.decoder import decoder_model from dataprovider import DataProvider import os epochs = 50 batch_size = 20 latent_units = 200 l_rate = 0.0001 # data data_provider = DataProvider(batch_size, root_folder='../data') train_num_batches, val_num_batches = data_provider.get_num_batches() training_dataset_init, val_dataset_init, images, labels = data_provider.get_data( ) # model encoder = encoder_model(images) latent_vector, mean, stddev = latent_space(encoder, latent_units) predictions = decoder_model(latent_vector) # losses generative_loss = tf.reduce_mean(tf.reduce_sum(tf.squared_difference( predictions, labels), axis=1), axis=1) latent_loss = tf.reduce_mean(0.5 * tf.reduce_sum( tf.square(mean) + tf.square(stddev) - tf.log(1e-8 + tf.square(stddev)) - 1, 1))