def main():
    # inputs
    input_shape = (1, 28, 20)
    filters = 32
    latent_channels = 1
    kernel_size = 2
    beta = 1.0  # entangled latent space
    epochs = 10
    batch_size = 1

    # log directory
    experiment = 'experiment_optimal_network_convolutional_latent_frey'
    run = 'cvae_frey_entangled_with_fully_connected_filters_same_borders_15_May_13_50_05_batch_size_1_beta_1.0_epochs_20_filters_32_kernel_size_2_latent_channels_64_loss_vae_loss_optimizer_adam'
    log_dir = './summaries/' + experiment + '/' + run + '/'

    # define model
    vae = FreyOptimalConvolutionalLatentExperimentFullyConnectedFilterVAE(
        input_shape,
        log_dir,
        filters=filters,
        latent_channels=latent_channels,
        kernel_size=kernel_size,
        beta=beta)

    # load weights
    vae.load_model()

    # extract models
    model = vae.get_model()
    decoder = vae.get_decoder()
    encoder = vae.get_encoder()

    # get dataset
    (X_train, _), (X_test, _) = utils.load_frey()
    train_generator = utils.make_generator(X_train, batch_size=batch_size)
    test_generator = utils.make_generator(X_test, batch_size=batch_size)
    train_size = len(X_train)
    test_size = len(X_test)

    # show original and reconstruction
    # sampling.encode_decode_sample(X_test, model)

    # plot filters
    # sampling.show_convolutional_layers(X_test, encoder, 8, 8)

    # sample from prior
    # sampling.decode_prior_samples(5, decoder, latent_shape=(1, 64, 1, 1))

    # sample from posterior
    num_iter = 1000
    sampling.sample_posterior(X_test, model, num_iter, show_every=5)
Exemple #2
0
def main():
    # inputs
    input_shape = (1, 28, 20)
    filters = 32
    latent_filters = 1
    kernel_size = 2
    pool_size = 2
    lr = 1e-4
    beta = 1.0
    batch_size = 1

    # log directory
    run = 'cvae_frey_20_May_17_07_04_batch_size_1_beta_1_epochs_20_filters_32_kernel_size_2_latent_filters_1_loss_vae_loss_lr_0.0001_optimizer_adam'
    log_dir = './summaries/' + experiment + '/' + run + '/'

    # define model
    vae = FreyConvolutionalLatentSpaceNoBatchNormVAE(
        input_shape,
        log_dir,
        filters=filters,
        latent_filters=latent_filters,
        kernel_size=kernel_size,
        pool_size=pool_size,
        beta=beta)

    # load weights
    vae.load_model()

    # extract models
    model = vae.get_model()
    decoder = vae.get_decoder()
    encoder = vae.get_encoder()

    # get dataset
    (X_train, _), (X_test, _) = utils.load_frey()
    train_generator = utils.make_generator(X_train, batch_size=batch_size)
    test_generator = utils.make_generator(X_test, batch_size=batch_size)
    train_size = len(X_train)
    test_size = len(X_test)

    # show original and reconstruction
    sampling.encode_decode_sample(X_test, model)
Exemple #3
0
vae = FreyVAE((1, 28, 20), log_dir)

# load architecture and weights
encoder = vae.load_model()

# extract models
model = vae.get_model()
encoder = vae.get_encoder()
decoder = vae.get_decoder()

# print summaries
vae.print_model_summaries()
'''
Load data
'''
(_, _), (X_test, _) = utils.load_frey()
'''
Sampling functions
'''


def __decode_prior_samples(num_samples, latent_shape=(1, 2)):
    # take num_sample samples
    for i in range(num_samples):
        # sample from prior
        prior_sample = np.random.normal(
            size=latent_shape, loc=0.0,
            scale=1.0)  # sample from standard normal
        # decode sample
        sample_decoded = decoder.predict(prior_sample)
        # plot decoded sample
def train_reconstruction_only_frey_network_with_image_latent_space(latent_filters):
    # inputs
    input_shape = (1, 28, 20)
    filters = 32
    kernel_size = 2
    pool_size = 2
    beta = 1.0
    lr = 1e-4
    epochs = 20
    batch_size = 1

    # define filename
    name = 'cvae_frey'

    # builder hyperparameter dictionary
    hp_dictionary = {
        'epochs': epochs,
        'batch_size': batch_size,
        'filters': filters,
        'latent_filters': latent_filters,
        'kernel_size': kernel_size,
        'beta': beta,
        'lr': lr,
        'loss': 'vae_loss',
        'optimizer': 'adam'
    }

    # define log directory
    log_dir = './summaries/' + experiment + '/' + utils.build_hyperparameter_string(name, hp_dictionary) + '/'

    # make VAE
    vae = FreyConvolutionalLatentSpaceNoBatchNormVAE(input_shape, 
                                            log_dir,
                                            filters=filters,
                                            latent_filters=latent_filters,
                                            kernel_size=kernel_size,
                                            pool_size=pool_size,
                                            beta=beta)

    # compile VAE
    from keras import optimizers
    optimizer = optimizers.Adam(lr=lr)
    vae.compile(optimizer=optimizer)

    # get dataset
    (X_train, _), (X_test, _) = utils.load_frey()
    train_generator = utils.make_generator(X_train, batch_size=batch_size)
    test_generator = utils.make_generator(X_test, batch_size=batch_size)
    train_size = len(X_train)
    test_size = len(X_test)

    # print summaries
    vae.print_model_summaries()

    # fit VAE
    steps_per_epoch = int(train_size / batch_size)
    validation_steps = int(test_size / batch_size)
    vae.fit_generator(train_generator,
                   epochs=epochs,
                   steps_per_epoch=steps_per_epoch,
                   validation_data=test_generator,
                   validation_steps=validation_steps)
Exemple #5
0
def run_convolutional_latent_space_with_same_number_of_parameters():
    '''
    Latent shape: (128, 5, 3)
    Number of parameters in network: 119,521
    Number of parameters in latent space: 32,896
    '''

    # inputs
    input_shape = (1, 28, 20)
    epochs = 1
    batch_size = 1
    beta = 1.0
    filters = 32
    kernel_size = 2
    latent_channels = 128
    pool_size = 2

    # define filename
    name = 'cvae_frey_convolutional_latent'

    # builder hyperparameter dictionary
    hp_dictionary = {
        'epochs': epochs,
        'batch_size': batch_size,
        'beta': beta,
        'filters': filters,
        'kernel_size': kernel_size,
        'latent_channels': latent_channels,
        'pool_size': pool_size,
        'loss': 'vae_loss',
        'optimizer': 'adam'
    }

    # define log directory
    log_dir = './summaries/' + utils.build_hyperparameter_string(
        name, hp_dictionary) + '/'

    # make VAE
    vae = FreyConvolutionalLatentVAE(input_shape,
                                     log_dir,
                                     filters=filters,
                                     kernel_size=kernel_size,
                                     latent_channels=latent_channels,
                                     pool_size=pool_size)

    # compile VAE
    from keras import optimizers
    optimizer = optimizers.Adam(lr=1e-3)
    vae.compile(optimizer=optimizer)

    # print summaries
    vae.print_model_summaries()

    # get dataset
    (X_train, _), (X_test, _) = utils.load_frey()
    train_generator = utils.make_generator(X_train, batch_size=batch_size)
    test_generator = utils.make_generator(X_test, batch_size=batch_size)
    train_size = len(X_train)
    test_size = len(X_test)

    # fit VAE
    steps_per_epoch = int(train_size / batch_size)
    validation_steps = int(test_size / batch_size)
    vae.fit_generator(train_generator,
                      epochs=epochs,
                      steps_per_epoch=steps_per_epoch,
                      validation_data=test_generator,
                      validation_steps=validation_steps)
def train_entangled_frey_network_with_fully_connected_filters_same_borders_no_pooling_less_filters(
):
    # inputs
    input_shape = (1, 28, 20)
    filters = 8
    latent_channels = 16
    kernel_size = 2
    beta = 1.0  # entangled latent space
    epochs = 20
    batch_size = 1

    # define filename
    name = 'cvae_frey_entangled_with_fully_connected_filters_same_borders_no_pooling_less_filters'

    # builder hyperparameter dictionary
    hp_dictionary = {
        'epochs': epochs,
        'batch_size': batch_size,
        'beta': beta,
        'filters': filters,
        'latent_channels': latent_channels,
        'kernel_size': kernel_size,
        'loss': 'vae_loss',
        'optimizer': 'adam'
    }

    # define log directory
    log_dir = './summaries/experiment_optimal_network_convolutional_latent_frey/' + utils.build_hyperparameter_string(
        name, hp_dictionary) + '/'

    # make VAE
    vae = FreyOptimalConvolutionalLatentExperimentFullyConnectedFilterVAE(
        input_shape,
        log_dir,
        filters=filters,
        latent_channels=latent_channels,
        kernel_size=kernel_size,
        beta=beta)

    # compile VAE
    from keras import optimizers
    optimizer = optimizers.Adam(lr=1e-3)
    vae.compile(optimizer=optimizer)

    # get dataset
    (X_train, _), (X_test, _) = utils.load_frey()
    train_generator = utils.make_generator(X_train, batch_size=batch_size)
    test_generator = utils.make_generator(X_test, batch_size=batch_size)
    train_size = len(X_train)
    test_size = len(X_test)

    # print summaries
    vae.print_model_summaries()

    # fit VAE
    steps_per_epoch = int(train_size / batch_size)
    validation_steps = int(test_size / batch_size)
    vae.fit_generator(train_generator,
                      epochs=epochs,
                      steps_per_epoch=steps_per_epoch,
                      validation_data=test_generator,
                      validation_steps=validation_steps)
Exemple #7
0
    # make VAE
    vae = FreyVAE(input_shape, 
                log_dir,
                filters=filters,
                kernel_size=kernel_size,
                pre_latent_size=pre_latent_size,
                latent_size=latent_size)
    
    # compile VAE
    from keras import optimizers
    optimizer = optimizers.Adam(lr=1e-3)
    vae.compile(optimizer=optimizer)

    # print summaries
    vae.print_model_summaries()
    
    # get dataset
    (X_train, _), (X_test, _) = utils.load_frey()
    train_generator = utils.make_generator(X_train, batch_size=batch_size)
    test_generator = utils.make_generator(X_test, batch_size=batch_size)
    train_size = len(X_train)
    test_size = len(X_test)

    # fit VAE
    steps_per_epoch = int(train_size / batch_size)
    validation_steps = int(test_size / batch_size)
    vae.fit_generator(train_generator,
                   epochs=epochs,
                   steps_per_epoch=steps_per_epoch,
                   validation_data=test_generator,
                   validation_steps=validation_steps)