def test_autoencoder(): """ Test that all components of the auto-encoder work correctly by executing a training run against generated data. """ input_shape = (3, ) epochs = 1000 # Generate some data x_train = np.random.rand(100, 3) x_test = np.random.rand(30, 3) # Define encoder and decoder model def create_encoder_model(input_shape): model_input = Input(shape=input_shape) encoder = Dense(4)(model_input) encoder = BatchNormalization()(encoder) encoder = Activation(activation='relu')(encoder) return Model(model_input, encoder) def create_decoder_model(embedding_shape): embedding_a = Input(shape=embedding_shape) decoder = Dense(3)(embedding_a) decoder = BatchNormalization()(decoder) decoder = Activation(activation='relu')(decoder) return Model(embedding_a, decoder) # Create auto-encoder network encoder_model = create_encoder_model(input_shape) decoder_model = create_decoder_model(encoder_model.output_shape) autoencoder = AutoEncoder(encoder_model, decoder_model) # Prepare auto-encoder for training autoencoder.compile(loss='binary_crossentropy', optimizer='adam') # Evaluate network before training to establish a baseline score_before = autoencoder.evaluate(x_train, x_train) # Train network autoencoder.fit(x_train, x_train, validation_data=(x_test, x_test), epochs=epochs) # Evaluate network score_after = autoencoder.evaluate(x_train, x_train) # Ensure that the training loss score improved as a result of the training assert (score_before > score_after)
# Normalize all images print("\nNormalizing training images") imgs_train = normalize_img(imgs_train) print("Number of training images:", len(imgs_train)) # Convert images to numpy array of right dimensions print("\nConverting training images to numpy array of right dimensions") X_train = np.array(imgs_train).reshape((-1, ) + input_shape_model) print(">>> X_train.shape = " + str(X_train.shape)) print("Number of training images:", len(X_train)) # Create object for train augmentation completeTrainGen = data_augmentation(X_train, args.bs) print("\nStart training...") # Compiling model.compile(loss=args.loss, optimizer="adam") # Fitting model.fit(completeTrainGen, steps_per_epoch, n_epochs=args.e, batch_size=args.bs, wandb=args.wandb) # Saving model.save_models() print("Done training") print("\nCreating embeddings...") E_train = model.predict(X_train) E_train_flatten = E_train.reshape((-1, np.prod(output_shape_model)))
if __name__ == "__main__": # load MNIST dataset mnist = tf.keras.datasets.mnist # extract train and val data (x_train, _),(x_val, _) = mnist.load_data() # reshape and normalize in range [0 .. 1] x_train, x_val = x_train.reshape(-1, 28*28) / 255.0, x_val.reshape(-1, 28*28) / 255.0 # init model model = AutoEncoder(z_dim=32) # set loss and optimizer type model.compile(optimizer='adam', loss='mean_squared_error') # train model model.fit(x_train, x_train, batch_size=32, epochs=20, verbose=1, validation_data=(x_val, x_val)) # show some results # =================== PLOTTING ============================ # images per row and col NUM_IMG_PER_ROW = 10 # to store images selected_imgs = [] # pick random indexes to visualize indexes = np.random.random_integers(x_train.shape[0], size=(1,NUM_IMG_PER_ROW**2))
decoder = Dense(1 * 28 * 28)(embedding_a) decoder = BatchNormalization()(decoder) decoder = Activation(activation='relu')(decoder) decoder = Reshape(input_shape)(decoder) return Model(embedding_a, decoder) num_classes = 10 epochs = 999999 encoder_model = create_encoder_model(input_shape) decoder_model = create_decoder_model(encoder_model.output_shape) autoencoder_network = AutoEncoder(encoder_model, decoder_model) autoencoder_network.compile(loss='binary_crossentropy', optimizer=keras.optimizers.adam(), metrics=['accuracy']) autoencoder_checkpoint_path = "./autoencoder_checkpoint" autoencoder_callbacks = [ EarlyStopping(monitor='val_acc', patience=10, verbose=0), ModelCheckpoint(autoencoder_checkpoint_path, monitor='val_acc', save_best_only=True, verbose=0) ] autoencoder_network.fit(x_train, x_train, validation_data=(x_test, x_test),