def main():
    # inputs
    input_shape = (1, 84, 84)
    filters = 32
    kernel_size = 6
    epochs = 20
    batch_size = 1

    # log directory
    run = 'cvae_atari_space_invaders_no_batch_norm_18_May_20_33_10_batch_size_1_beta_8_epochs_10_filters_32_kernel_size_6_loss_vae_loss_lr_0.0001_optimizer_adam'
    log_dir = './summaries/' + experiment + '/' + run + '/'

    # define model
    vae = PongEntangledConvolutionalLatentNoBatchNormVAE(
        input_shape, log_dir, filters=filters, kernel_size=kernel_size)

    # load weights
    vae.load_model()

    # extract models
    model = vae.get_model()
    decoder = vae.get_decoder()
    encoder = vae.get_encoder()

    # load testing data
    test_directory = './atari_agents/record/test/'
    test_generator = utils.atari_generator(test_directory, batch_size=1)
    X_test_size = 1000
    X_test = np.asarray(
        [next(test_generator)[0][0] for i in range(X_test_size)])

    # show original and reconstruction
    sampling.encode_decode_sample(X_test, model)
def main():
    # inputs
    input_shape = (1, 84, 84)
    epochs = 10
    batch_size = 1
    filters = 32
    kernel_size = 6
    pre_latent_size = 512
    latent_size = 32

    # log directory
    run = 'cvae_atari_dense_latent_pong_no_batchnorm_beta_2_16_May_00_19_59_batch_size_1_beta_2_epochs_10_filters_32_kernel_size_6_loss_vae_loss_optimizer_adam'
    log_dir = './summaries/' + experiment + '/' + run + '/'

    # make VAE
    vae = DenseLatentPongNoBatchNorm(input_shape,
                                     log_dir,
                                     filters=filters,
                                     kernel_size=kernel_size,
                                     pre_latent_size=pre_latent_size,
                                     latent_size=latent_size)

    # load weights
    vae.load_model()

    # extract models
    model = vae.get_model()
    decoder = vae.get_decoder()
    encoder = vae.get_encoder()

    # load testing data
    test_directory = './atari_agents/record/test/'
    test_generator = utils.atari_generator(test_directory, batch_size=1)
    X_test_size = 100
    X_test = np.asarray(
        [next(test_generator)[0][0] for i in range(X_test_size)])

    # show original and reconstruction
    sampling.encode_decode_sample(X_test, model)

    # plot filters
    # sampling.show_convolutional_layers(X_test, encoder, 8, 8)

    # sample from prior
    # sampling.decode_prior_samples(5, decoder, latent_shape=(1, 32))

    # sample from posterior
    # num_iter = 100
    # sampling.sample_posterior(X_test, model, num_iter, show_every=1)

    latent_shape = (1, latent_size)
    latent_index = 30
    sampling.change_latent_variable(latent_shape, latent_index, decoder)
Ejemplo n.º 3
0
def main():
    # inputs
    input_shape = (1, 84, 84)
    filters = 32
    latent_filters = 8
    kernel_size = 6
    epochs = 10
    batch_size = 1
    lr = 1e-4
    beta = 1.0

    # log directory
    run = 'cvae_atari_28_May_16_14_52_batch_size_1_beta_1_epochs_12_filters_32_kernel_size_7_latent_filters_8_loss_vae_loss_lr_0.0001_optimizer_adam'
    log_dir = './summaries/' + experiment + '/' + run + '/'

    # make VAE
    vae = ConvolutionalLatentAverageFilterShallowVAE(
        input_shape,
        log_dir,
        filters=filters,
        latent_filters=latent_filters,
        kernel_size=kernel_size,
        beta=beta)

    # load weights
    vae.load_model()

    # extract models
    model = vae.get_model()
    decoder = vae.get_decoder()
    encoder = vae.get_encoder()

    # load testing data
    test_directory = './atari_agents/record/test/'
    test_generator = utils.atari_generator(test_directory,
                                           batch_size=1,
                                           shuffle=False)
    X_test_size = 330
    X_test = np.asarray(
        [next(test_generator)[0][0] for i in range(X_test_size)])

    # show original and reconstruction
    sampling.encode_decode_sample(X_test, model)
    plt.show()
def main():
    # inputs
    img_channels = 1
    input_shape = (img_channels, 28, 28)
    epochs = 50
    batch_size = 1
    lr = 1e-4

    # define filename
    name = 'autoencoder_mnist'

    # builder hyperparameter dictionary
    hp_dictionary = {
        'epochs': epochs,
        'batch_size': batch_size,
        'lr': lr,
        'loss': 'vae_loss',
        'optimizer': 'adam'
    }

    # define log directory
    run = 'autoencoder_mnist_31_May_19_54_30_batch_size_1_epochs_60_loss_vae_loss_lr_0.0001_optimizer_adam'
    log_dir = './summaries/' + experiment + '/' + run + '/'

    # make VAE
    autoencoder = DenseAutoencoder(input_shape, log_dir)
    
    # load weights
    autoencoder.load_model()

    # extract models
    model = autoencoder.get_model()
    decoder = autoencoder.get_decoder()
    encoder = autoencoder.get_encoder()
    
    # get dataset
    (X_train, _), (X_test, _) = utils.load_mnist()
    train_generator = utils.make_generator(X_train, batch_size=batch_size)
    test_generator = utils.make_generator(X_test, batch_size=batch_size)
    
    # show original and reconstruction
    sampling.encode_decode_sample(X_test, model, input_shape=(1, 28, 28))
    plt.show()
def main():
    # inputs
    input_shape = (1, 84, 84)
    epochs = 10
    batch_size = 1
    beta = 1.0
    filters = 32
    kernel_size = 6
    pre_latent_size = 512
    latent_size = 32

    # log directory
    experiment = 'experiment_optimal_network_dense_latent_pong'
    run = 'cvae_atari_dense_latent_pong_reconstruction_only_15_May_15_50_14_batch_size_1_beta_0.0_epochs_20_filters_32_kernel_size_6_loss_vae_loss_optimizer_adam'
    log_dir = './summaries/' + experiment + '/' + run + '/'

    # define model
    vae = DenseLatentPong(input_shape,
                          log_dir,
                          filters=filters,
                          kernel_size=kernel_size,
                          pre_latent_size=pre_latent_size,
                          latent_size=latent_size,
                          beta=beta)

    # load weights
    vae.load_model()

    # extract models
    model = vae.get_model()
    decoder = vae.get_decoder()
    encoder = vae.get_encoder()

    # load testing data
    test_directory = './atari_agents/record/test/'
    test_generator = utils.atari_generator(test_directory, batch_size=1)
    X_test_size = 100
    X_test = np.asarray(
        [next(test_generator)[0][0] for i in range(X_test_size)])

    # show original and reconstruction
    sampling.encode_decode_sample(X_test, model)
Ejemplo n.º 6
0
def main():
    # inputs
    input_shape = (1, 28, 20)
    filters = 32
    latent_filters = 1
    kernel_size = 2
    pool_size = 2
    lr = 1e-4
    beta = 1.0
    batch_size = 1

    # log directory
    run = 'cvae_frey_20_May_17_07_04_batch_size_1_beta_1_epochs_20_filters_32_kernel_size_2_latent_filters_1_loss_vae_loss_lr_0.0001_optimizer_adam'
    log_dir = './summaries/' + experiment + '/' + run + '/'

    # define model
    vae = FreyConvolutionalLatentSpaceNoBatchNormVAE(
        input_shape,
        log_dir,
        filters=filters,
        latent_filters=latent_filters,
        kernel_size=kernel_size,
        pool_size=pool_size,
        beta=beta)

    # load weights
    vae.load_model()

    # extract models
    model = vae.get_model()
    decoder = vae.get_decoder()
    encoder = vae.get_encoder()

    # get dataset
    (X_train, _), (X_test, _) = utils.load_frey()
    train_generator = utils.make_generator(X_train, batch_size=batch_size)
    test_generator = utils.make_generator(X_test, batch_size=batch_size)
    train_size = len(X_train)
    test_size = len(X_test)

    # show original and reconstruction
    sampling.encode_decode_sample(X_test, model)
Ejemplo n.º 7
0
def main():
    # inputs
    input_shape = (1, 84, 84)
    filters = 32
    latent_filters = 8
    kernel_size = 6
    epochs = 10
    batch_size = 1
    lr = 1e-4
    img_channels = 1
    beta = 1.0

    # log directory
    run = 'cvae_atari_average_filter_19_Jun_05_05_40_batch_size_1_beta_4_epochs_10_filters_32_img_channels_1_kernel_size_6_latent_filters_8_loss_vae_loss_lr_0.0001_optimizer_adam'
    log_dir = './summaries/' + experiment + '/' + run + '/'

    # make VAE
    vae = LatentImage(input_shape, 
                    log_dir,
                    filters=filters,
                    kernel_size=kernel_size,
                    img_channels=img_channels,
                    beta=beta)

    # load weights
    vae.load_model()

    # extract models
    model = vae.get_model()
    decoder = vae.get_decoder()
    encoder = vae.get_encoder()

    # load testing data
    test_directory = './atari_agents/record/test/'
    test_generator = utils.atari_generator(test_directory, batch_size=1, shuffle=False)
    X_test_size = 1000
    X_test = np.asarray([next(test_generator)[0][0] for i in range(X_test_size)])

    # show original and reconstruction
    for sample_number in range(4):
        sampling.encode_decode_sample(X_test, model, sample_number=sample_number, save=True, save_path='/home/dane/Documents/Thesis/thesis/figures/results/latent_image/', base='beta_4_')
    # plt.show()

    # plot filters
    # for sample_number in [30, 70, 10, 90]:
    #     plt.figure(1)
    #     plt.xticks([], [])
    #     plt.yticks([], [])
    #     plt.imshow(X_test[sample_number][0])
    #     plt.gray()
    #     plt.savefig('/home/dane/Documents/Thesis/thesis/figures/results/latent_image/beta_' + str(int(beta)) + '_sample_' + str(sample_number) + '_original.png', bbox_inches='tight')
    #     # plt.show()
    #     x_encoded = encoder.predict(np.asarray([X_test[sample_number]]))  # shape (1, num_filters, width, height)
    #     x_encoded = x_encoded[0]  # shape (num_filters, width, height)
    #     plt.figure(2)
    #     plt.xticks([], [])
    #     plt.yticks([], [])
    #     plt.imshow(x_encoded[0])
    #     plt.gray()
    #     plt.savefig('/home/dane/Documents/Thesis/thesis/figures/results/latent_image/beta_' + str(int(beta)) + '_sample_' + str(sample_number) + '_latent.png', bbox_inches='tight')
    # plt.show()

    # sample from prior
    # sampling.decode_prior_samples(4, decoder, latent_shape=(1, 1, 8, 8), save=True, save_path='/home/dane/Documents/Thesis/thesis/figures/results/latent_image/', base='beta_1_')
    # plt.show()

    # sample from posterior
    # num_iter = 1000
    # sampling.sample_posterior(X_test, model, num_iter, show_every=1, save=True, save_path='/home/dane/Documents/Thesis/thesis/figures/results/latent_image/', base='beta_1_')

    # change latent variable
    # latent_shape = (1, 8, 8, 8)
    # filter_index = 0
    # sampling.change_latent_filter(X_test,
    #                             latent_shape,
    #                             filter_index,
    #                             encoder,
    #                             decoder,
    #                             num_samples=10,
    #                             init_sample_num=0,
    #                             noise_factor=1.0,
    #                             std_dev=1.0,
    #                             mean=0.0)

    # plot mean activation over latent space
    sampling.plot_mean_latent_activation(X_test, encoder, 1, 1)
    plt.savefig('/home/dane/Documents/Thesis/thesis/figures/results/latent_image/' + 'beta_1_' + 'average_activation' + '.png', bbox_inches='tight')
Ejemplo n.º 8
0
def main():
    # inputs
    input_shape = (1, 84, 84)
    filters = 32
    latent_filters = 8
    kernel_size = 6
    epochs = 10
    batch_size = 1
    lr = 1e-4
    beta = 2.0

    # log directory
    run = 'cvae_atari_29_May_05_54_36_batch_size_1_beta_5_epochs_13_filters_32_kernel_size_7_latent_filters_8_loss_vae_loss_lr_0.0001_optimizer_adam'
    log_dir = './summaries/' + experiment + '/' + run + '/'

    # make VAE
    vae = ConvolutionalLatentAverageFilterShallowVAE(input_shape, 
                                            log_dir,
                                            filters=filters,
                                            latent_filters=latent_filters,
                                            kernel_size=kernel_size,
                                            beta=beta)

    # load weights
    vae.load_model()

    # extract models
    model = vae.get_model()
    decoder = vae.get_decoder()
    encoder = vae.get_encoder()

    # load testing data
    test_directory = './atari_agents/record/test/'
    test_generator = utils.atari_generator(test_directory, batch_size=1, shuffle=False)
    X_test_size = 1756
    X_test = np.asarray([next(test_generator)[0][0] for i in range(X_test_size)])

    # show original and reconstruction
    sampling.encode_decode_sample(X_test, model)
    plt.show()

    # plot filters
    # sampling.show_convolutional_layers(X_test, encoder, 4, 2, threshold=True, threshold_val=0.0)
    # plt.show()

    # sample from prior
    # sampling.decode_prior_samples(5, decoder, latent_shape=(1, 8, 7, 7))
    # plt.show()

    # sample from posterior
    # num_iter = 100
    # sampling.sample_posterior(X_test, model, num_iter, show_every=1)

    # change latent variable
    # latent_shape = (1, 8, 8, 8)
    # filter_index = 6
    # sampling.change_latent_filter(X_test,
    #                             latent_shape,
    #                             filter_index,
    #                             encoder,
    #                             decoder,
    #                             num_samples=10,
    #                             init_sample_num=0,
    #                             noise_factor=1.0,
    #                             std_dev=1.0,
    #                             mean=0.0)

    # plot mean activation over latent space
    sampling.plot_mean_latent_activation(X_test, encoder, 4, 2, threshold=True, threshold_val=0.0)
    plt.show()