def run_autoencoder(optimizer):
    """ Runs the autoencoder model using the specified optimizer.

    Parameters
    ----------
    optimizer : RMSProp/Adam
        Optimization algorithm to be used for parameter learning

    """
    optimizer = Adam(learning_rate=0.03) if optimizer == 'adam' else RMSProp(
        learning_rate=0.05)
    train_matrix, val_matrix = get_training_and_val_data()
    model = Autoencoder(input_dim=train_matrix.shape[1])
    model.print_summary()
    model.compile(optimizer)
    errors = model.fit(train_matrix,
                       train_matrix,
                       num_epochs=60,
                       val_set=(val_matrix, val_matrix),
                       early_stopping=True)
    plot_losses(errors['training'], errors['validation'])
    neuron_num = model.model.layers[0].optimizer.reference_index
    learning_rates = model.model.layers[0].optimizer.learning_rates
    plot_learning_rates(learning_rates['weights'], learning_rates['bias'],
                        neuron_num)
def train(x_train, learning_rate, batch_size, epochs):
    autoencoder = Autoencoder(input_shape=(28, 28, 1),
                              conv_filters=(32, 64, 64, 64),
                              conv_kernels=(3, 3, 3, 3),
                              conv_strides=(1, 2, 2, 1),
                              latent_space_dim=2)
    autoencoder.summary()
    autoencoder.compile(learning_rate)
    autoencoder.train(x_train, batch_size, epochs)
    return autoencoder
Ejemplo n.º 3
0
def train_grid(x_train,
               learning_rate,
               batch_size,
               epochs,
               latent_space_dim=32):
    autoencoder = Autoencoder(input_shape=(23, 23, 5),
                              conv_filters=(16, 32, 32, 32),
                              conv_kernels=(3, 3, 3, 3),
                              conv_strides=(1, 1, 1, 1),
                              latent_space_dim=latent_space_dim,
                              name="Autoencoder_CNN_Grid_" +
                              str(latent_space_dim))
    autoencoder.summary()
    autoencoder.compile(learning_rate)
    history = autoencoder.train(x_train, batch_size, epochs)
    return autoencoder, history
Ejemplo n.º 4
0
RUN_FOLDER = 'run'

(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train[:, :, :, np.newaxis].astype('float32') / 255.

AE = Autoencoder(input_dim=(28, 28, 1),
                 encoder_conv_filters=[32, 64, 64, 64],
                 encoder_conv_kernel_size=[3, 3, 3, 3],
                 encoder_conv_strides=[1, 2, 2, 1],
                 decoder_conv_t_filters=[64, 64, 32, 1],
                 decoder_conv_t_kernel_size=[3, 3, 3, 3],
                 decoder_conv_t_strides=[1, 2, 2, 1],
                 z_dim=2)

AE.save(RUN_FOLDER)
AE.encoder.summary()
AE.decoder.summary()

LEARNING_RATE = 0.0005
BATCH_SIZE = 8
INITIAL_EPOCH = 0

AE.compile(LEARNING_RATE)

AE.train(x_train[:60000],
         batch_size=BATCH_SIZE,
         epochs=10,
         run_folder=RUN_FOLDER,
         initial_epoch=INITIAL_EPOCH)
AE.save_networks(RUN_FOLDER)