Exemplo n.º 1
0
def train(x_train, learning_rate, batch_size, num_epochs):
    autoencoder = VAE(input_shape=(256, 64, 1),
                      conv_filters=(512, 256, 128, 64, 32),
                      conv_kernel=(3, 3, 3, 3, 3),
                      conv_strides=(2, 2, 2, 2, (2, 1)),
                      latent_dim=128)
    autoencoder.summary()
    autoencoder.compile(learning_rate)
    autoencoder.train(x_train, batch_size, num_epochs)
    return autoencoder
def vae_train():
    # load train
    x_train = load_dataset('adi')

    # load test normal and anomaly
    test_anomaly = load_dataset('vans')
    test_normal = load_dataset('adi_test')

    # drfine train and valid iamge for train
    trains = x_train[10:]
    valid = x_train[:10]
    print(trains.shape, valid.shape, test_anomaly.shape, test_normal.shape)

    # try to plot
    plt.imshow(x_train[10].reshape(256, 256))
    plt.gray()
    plt.show()

    # train
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    def step_decay(epoch):
        initial_lrate = 0.0001
        decay_rate = 0.5
        decay_steps = 8.0
        lrate = initial_lrate * math.pow(decay_rate,
                                         math.floor((1 + epoch) / decay_steps))
        return lrate

    callback = []
    callback.append(
        HistoryCheckpoint(filepath='tb/LearningCurve_{history}.png',
                          verbose=1,
                          period=300))
    callback.append(LearningRateScheduler(step_decay))

    model = VAE()
    model, loss = model.vae_net()
    #model.load_weights("vae_model.h5")

    model.add_loss(loss)
    model.compile(optimizer=Adam(lr=0.0001))
    model.summary()

    try:
        model.fit(trains,
                  batch_size=20,
                  epochs=300,
                  callbacks=callback,
                  validation_data=(valid, None))
    finally:
        model.save('weight/vae_model.h5')
Exemplo n.º 3
0

### Build Model
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
### Model Parameters
latent_dim = 124
input_dim = len(int2labels_map) - 1
dropout = .1
maxnorm = None
vae_b1 , vae_b2 = .02 , .1

# Build Model
model = VAE(latent_dim, input_dim, measures, measure_len, dropout, 
            maxnorm, vae_b1 , vae_b2)
model.build(tf.TensorShape([None, measures, measure_len, input_dim]))
model.summary()


### Train Model
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Training Parameters
batch_size = 300
epochs = 10

# Cost Function
cost_function = model.vae_loss

# Optimizer and learning_rate schedule
lr_0 = .001
decay_rate = .998
lr_decay = lambda t: lr_0 * decay_rate**t
Exemplo n.º 4
0
# Reference
[1] Kingma, Diederik P., and Max Welling.
"Auto-encoding variational bayes."
https://arxiv.org/abs/1312.6114
'''

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from VAE import VAE
from Encoder import Encoder
from Decoder import Decoder
from Parameters import x_train, x_test, y_test, latent_dim, input_shape, epochs, batch_size
from keras.utils import plot_model
from Util import plot_results
if __name__ == '__main__':
    encoder = Encoder(input_data=input_shape)
    decoder = Decoder(input_data=(latent_dim, ))
    models = (encoder, decoder)
    data = (x_test, y_test)
    vae = VAE(input_data=input_shape, encoder=encoder, decoder=decoder)
    vae.compile(optimizer='adam')
    vae.summary()
    plot_model(vae, to_file='vae_mlp.png', show_shapes=True)
    vae.fit(x=x_train,
            y=None,
            epochs=epochs,
            batch_size=batch_size,
            validation_data=(x_test, None))
    vae.save_weights('vae_mlp_mnist.h5')
    plot_results(models, data, batch_size=batch_size, model_name="vae_mlp")