valid_batch = BatchGenerator(valid_img, generator_config, jitter=False) # **Setup a few callbacks and start the training** early_stop = EarlyStopping(monitor='loss', min_delta=0.001, patience=10, mode='min', verbose=1) checkpoint = ModelCheckpoint('weights_yolo_adam_validdata_temp.h5', monitor='loss', verbose=1, save_best_only=True, mode='min', period=1) optimizer = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) #optimizer = SGD(lr=1e-4, decay=0.0005, momentum=0.9) #optimizer = RMSprop(lr=1e-5, rho=0.9, epsilon=1e-08, decay=0.0) model.compile(loss=custom_loss, optimizer=optimizer) model.fit_generator(generator=train_batch.get_generator(), steps_per_epoch=train_batch.get_dateset_size(), epochs=100, verbose=1, validation_data=valid_batch.get_generator(), validation_steps=valid_batch.get_dateset_size(), callbacks=[early_stop, checkpoint])
def train(self, train_imgs, # the list of images to train the model valid_imgs, # the list of images used to validate the model train_times, # the number of time to repeat the training set, often used for small datasets valid_times, # the number of times to repeat the validation set, often used for small datasets nb_epoch, # number of epoches learning_rate, # the learning rate batch_size, # the size of the batch warmup_bs, # number of initial batches to let the model familiarize with the new dataset object_scale, no_object_scale, coord_scale, class_scale, debug): self.batch_size = batch_size self.warmup_bs = warmup_bs self.object_scale = object_scale self.no_object_scale = no_object_scale self.coord_scale = coord_scale self.class_scale = class_scale self.debug = debug ############################################ # Compile the model ############################################ optimizer = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) self.model.compile(loss=self.custom_loss, optimizer=optimizer) ############################################ # Make train and validation generators ############################################ generator_config = { 'IMAGE_H' : self.input_size, 'IMAGE_W' : self.input_size, 'GRID_H' : self.grid_h, 'GRID_W' : self.grid_w, 'BOX' : self.nb_box, 'LABELS' : self.labels, 'CLASS' : len(self.labels), 'ANCHORS' : self.anchors, 'BATCH_SIZE' : self.batch_size, 'TRUE_BOX_BUFFER' : self.max_box_per_image, } train_batch = BatchGenerator(train_imgs, generator_config) valid_batch = BatchGenerator(valid_imgs, generator_config, jitter=False) ############################################ # Make a few callbacks ############################################ early_stop = EarlyStopping(monitor='val_loss', min_delta=0.001, patience=3, mode='min', verbose=1) checkpoint = ModelCheckpoint('best_weights.h5', monitor='val_loss', verbose=1, save_best_only=True, mode='min', period=1) tensorboard = TensorBoard(log_dir='~/logs/yolo/', histogram_freq=0, write_graph=True, write_images=False) ############################################ # Start the training process ############################################ self.model.fit_generator(generator = train_batch.get_generator(), steps_per_epoch = train_batch.get_dateset_size() * train_times, epochs = nb_epoch, verbose = 1, validation_data = valid_batch.get_generator(), validation_steps = valid_batch.get_dateset_size() * valid_times, callbacks = [early_stop, checkpoint, tensorboard], max_queue_size = 3)