示例#1
0
from mas_tools.models.autoencoders import deep_conv2d_vae
from mas_tools.ml import save_model_arch

if __name__ == "__main__":
    path = 'E:/Projects/market-analysis-system/mas_vae/'

    enc, dec, ae, _ = deep_conv2d_vae((80, 80, 3),
                                      latent_dim=60,
                                      filters_count=(3, 15),
                                      dropout=0.3)
    save_model_arch(enc, path + 'ae_enc')
    enc.summary()
    save_model_arch(dec, path + 'ae_dec')
    dec.summary()
    save_model_arch(ae, path + 'ae')
    ae.summary()
    # compressed npz
    npz_file = np.load(dt_path + filename)
    new_data = npz_file.f.arr_0
    new_data = np.reshape(new_data, (len(new_data), img_width, img_width, 3))
    x_data = np.vstack((x_data, new_data))
# clean first row
x_data = x_data[1:]
# normalize imagess data
x_data = x_data.astype('float32') / 255
print('New data shape:', x_data.shape)

## Build VAE
print('Build autoencoder...')
encoder, decoder, autoencoder, vae_loss = deep_conv2d_vae(
    (img_width, img_width, 3),
    filters_count=filters,
    latent_dim=code,
    dropout=dropout)
# TODO How to use vae_loss?
# print(type(vae_loss))
autoencoder.compile(optimizer='rmsprop', loss='mse', metrics=['acc'])

## Train or do prediction
if action in ['train2', 'predict']:
    autoencoder.load_weights(wgt_path + model_name + '.hdf5', by_name=True)
if action in ['train1', 'train2']:
    print('Train model...')
    # reduce_lr = ReduceLROnPlateau(factor=0.1, patience=3, min_lr=0.00001, verbose=1)
    # chpt = ModelCheckpoint(wgt_path + model_name + '_{}.hdf5')
    # tb = TensorBoard()
示例#3
0
    new_data = npz_file.f.arr_0
    new_data = np.reshape(new_data, (len(new_data), img_width, img_width, 3))
    data = np.vstack((data, new_data))
# clean first row
data = data[1:]
# normalize images data
data = data.astype('float32') / 255

print('Data shape: {}'.format(data.shape))

#====== Build encoder ======
print('Build encoder layers...')
model_name = 'vae_img{}_flt{}-{}_code{}_enc'.format(window, filters[0],
                                                    filters[1], code)
encoder, _, _, _ = deep_conv2d_vae((img_width, img_width, 3),
                                   filters_count=filters,
                                   latent_dim=code,
                                   dropout=dropout)

encoder.compile(optimizer='rmsprop', loss='mse')
encoder.load_weights(wgt_path1 + model_name + '.hdf5', by_name=True)
encoder.trainable = False

# generate output
_, _, y_data = encoder.predict(data, batch_size=batch)
data = data[:-1]
y_data = y_data[1:]
x_train, x_test, y_train, y_test = train_test_split(data,
                                                    y_data,
                                                    shuffle=False,
                                                    test_size=0.1)
# clear memory