Beispiel #1
0
print('Validation steps per epoch {}'.format(validation_steps_per_epoch))

model_file = 'model_combined_last_0.2_drop_batch_new_augmentation.h5'
# Initializing a KerasMoel instance
k_model = KerasModel(1,
                     keras_model.NVIDIA_ARCHITECTURE,
                     dropout=0.2,
                     batch_norm=BATCH_NORM,
                     model_file=model_file,
                     multivariant=MULTIVARIANT,
                     gray=GRAY,
                     load=False)
# Training the KerasModel model and getting the metrics
model_history = k_model.train_model_with_generator(train_generator,
                                                   train_steps_per_epoch,
                                                   EPOCHS,
                                                   validation_generator,
                                                   validation_steps_per_epoch,
                                                   save_model_filepath=model_file)
# model_history = k_model.train_learned_model_with_generator(train_generator,
#                                                            train_steps_per_epoch,
#                                                            EPOCHS,
#                                                            validation_generator,
#                                                            validation_steps_per_epoch,
#                                                            save_model_filepath='model_transfer_Inceptionv3.h5')
# Plotting the model Loss
utils.plot_loss(model_history=model_history)

# Track 1 layers visualization
# k_model = KerasModel(load=True, model_file='./models/model_modular_nvidia.h5')
# test_image = np.asarray(Image.open(
#     './assets/Layer_visualization/Track1/center_2018_05_07_18_39_19_350.jpg'))
Beispiel #2
0
# # Flattening the Images after the convolutional steps
# model.add(Flatten())
# # Fist dense layer
# model.add(Dense(120))
# # Second dense layer
# model.add(Dense(84))
# # Logits layer
# model.add(Dense(1))
# # Defining the loss function and optimizer
# model.compile(loss='mse', optimizer='adam')

training_lenght = math.ceil((len(train_samples)*3*2) / BATCHSIZE)
validation_length = math.ceil((len(validation_samples)*3*2) / BATCHSIZE)
# print(len(list(train_generator)))
k_model = KerasModel(1, keras_model.LENET_ARCHITECTURE)
model_history = k_model.train_model_with_generator(train_generator,
                                                   training_lenght,
                                                   EPOCHS,
                                                   validation_generator,
                                                   validation_length,
                                                   save_model_filepath='model_modular.h5')
# model_history = model.fit_generator(train_generator,
#                                     steps_per_epoch=training_lenght,
#                                     validation_data=validation_generator,
#                                     validation_steps=validation_length,
#                                     epochs=EPOCHS, verbose=1)
#
# model.save('model.h5')
# model.save('model_track2.h5')
plot_loss(model_history=model_history)