def train(config_type: type(BaseSegmentationTrainConfig)):
    fsm = FileStructManager(base_dir=config_type.experiment_dir, is_continue=False)

    config = config_type({'train': ['train_seg.npy'], 'val': 'val_seg.npy'})

    trainer = Trainer(config, fsm, device=torch.device('cuda'))
    tensorboard = TensorboardMonitor(fsm, is_continue=False)
    trainer.monitor_hub.add_monitor(tensorboard)

    trainer.set_epoch_num(300)
    trainer.enable_lr_decaying(coeff=0.5, patience=10, target_val_clbk=lambda: np.mean(config.val_stage.get_losses()))
    trainer.add_on_epoch_end_callback(lambda: tensorboard.update_scalar('params/lr', trainer.data_processor().get_lr()))
    trainer.enable_best_states_saving(lambda: np.mean(config.val_stage.get_losses()))
    trainer.add_stop_rule(lambda: trainer.data_processor().get_lr() < 1e-6)

    trainer.train()
Exemplo n.º 2
0
def train():
    train_config = PoseNetTrainConfig()

    file_struct_manager = FileStructManager(
        base_dir=PoseNetTrainConfig.experiment_dir, is_continue=False)

    trainer = Trainer(train_config, file_struct_manager, torch.device('cuda'))
    trainer.set_epoch_num(EPOCH_NUM)

    tensorboard = TensorboardMonitor(file_struct_manager, is_continue=False)
    log = LogMonitor(file_struct_manager).write_final_metrics()
    trainer.monitor_hub.add_monitor(tensorboard).add_monitor(log)
    trainer.enable_best_states_saving(
        lambda: np.mean(train_config.val_stage.get_losses()))

    trainer.enable_lr_decaying(
        coeff=0.5,
        patience=10,
        target_val_clbk=lambda: np.mean(train_config.val_stage.get_losses()))
    trainer.add_on_epoch_end_callback(
        lambda: tensorboard.update_scalar('params/lr',
                                          trainer.data_processor().get_lr()))
    trainer.train()