Exemple #1
0
  def test_validate_callbacks_predefined_callbacks(self):
    supported_predefined_callbacks = [
        callbacks.TensorBoard(),
        callbacks.CSVLogger(filename='./log.csv'),
        callbacks.EarlyStopping(),
        callbacks.ModelCheckpoint(filepath='./checkpoint'),
        callbacks.TerminateOnNaN(),
        callbacks.ProgbarLogger(),
        callbacks.History(),
        callbacks.RemoteMonitor()
    ]

    distributed_training_utils.validate_callbacks(
        supported_predefined_callbacks, adam.Adam())

    unsupported_predefined_callbacks = [
        callbacks.ReduceLROnPlateau(),
        callbacks.LearningRateScheduler(schedule=lambda epoch: 0.001)
    ]

    for callback in unsupported_predefined_callbacks:
      with self.assertRaisesRegex(ValueError,
                                  'You must specify a Keras Optimizer V2'):
        distributed_training_utils.validate_callbacks([callback],
                                                      v1_adam.AdamOptimizer())
Exemple #2
0
def get_callbacks():
    stop_on_nan_callback = callbacks.TerminateOnNaN()

    def decay(epoch):
        if epoch < 7:
            return 0.001
        else:
            return 0.001 * np.exp(0.1 * (7 - epoch))

    learning_rate_scheduler = callbacks.LearningRateScheduler(decay)

    return [stop_on_nan_callback, learning_rate_scheduler]
Exemple #3
0
def create_all_services(artifact_dir: str,
                        cfg_services: dict) -> List[callbacks.Callback]:
    """Create all services (callbacks).

    Args:
        artifact_dir: str, path to artifact directory.
        cfg_services: dict, services subsection of config.

    Returns:
        list[Callback], all services.
    """
    return [
        _create_best_checkpoint(artifact_dir, cfg_services),
        _create_resume_checkpoint(artifact_dir),
        _create_tensorboard(artifact_dir, cfg_services),
        _create_csv_logger(artifact_dir),
        _create_train_early_stopping(cfg_services),
        _create_validation_early_stopping(cfg_services),
        callbacks.TerminateOnNaN(),
    ]
Exemple #4
0
all_images = glob("VOCdevkit/VOC2012/JPEGImages/*.jpg")
train_items = all_images[0:12000]
test_items = all_images[12000:len(all_images)]
data_generator = YoloImageGenerator(classes=classes, target_size=input_shape[0:2], grid_shape=grid_shape, nbox=nbox)
train_iterator = data_generator.flow_from_list(train_items, annotation_callback=voc2012_get_annotation, batch_size=batch_size, augument=False)
test_iterator = data_generator.flow_from_list(test_items, annotation_callback=voc2012_get_annotation, batch_size=batch_size, augument=False)


##############################
# Train model
##############################
log_dir = "./K_YOLO/{}".format(datetime.today().strftime('%m-%d__%H-%M-%S') + "_" + args.name)
tensorboard = callbacks.TensorBoard(log_dir=log_dir, write_graph=True, write_grads=False, write_images=False, histogram_freq=0)
tensorboard.set_model(model)
checkpoint = callbacks.ModelCheckpoint(log_dir + "/model-{epoch:04d}.hdf5", period=1)
terminate = callbacks.TerminateOnNaN()

def step_decay(epoch):
    initial_lr = 1e-4
    drop = 0.05
    lrate = initial_lr * 1/(1 + drop * epoch)
    return lrate

def step_increase(epoch): # designed for 0-35
    initial_lr = 1e-8
    increase = 0.5
    lrate = initial_lr * (1 + increase)**epoch
    return lrate

lr_schedule = callbacks.LearningRateScheduler(step_decay)