コード例 #1
0
ファイル: train.py プロジェクト: Obarads/torchpcp
def main(cfg:omegaconf.DictConfig):
    # fix paths
    cfg = fix_path_in_configs(CW_DIR, cfg, [["dataset","root"]])

    # set a seed 
    PytorchTools.set_seed(
        cfg.general.seed, 
        cfg.general.device, 
        cfg.general.reproducibility
    )

    # set a device
    cfg.general.device = PytorchTools.select_device(cfg.general.device)

    model = get_model(cfg)
    dataset = get_dataset(cfg)
    criterion = get_losses(cfg)
    optimizer = get_optimizer(cfg, model)
    scheduler = get_scheduler(cfg, optimizer)

    # get a logger
    writer = SummaryWriter("./")

    # start training
    loader = tqdm(range(cfg.general.start_epoch, cfg.general.epochs), 
                  desc="Training", ncols=70)
    for epoch in loader:
        # print('Epoch {}/{}:'.format(epoch, cfg.general.epochs))

        # training
        train_log = train(
            cfg, 
            model, 
            dataset["train"], 
            optimizer, 
            criterion, 
            scheduler
        )

        dict2tensorboard(train_log, writer, epoch)

        # save params and model
        if (epoch+1) % cfg.general.save_epoch == 0 and \
            cfg.general.save_epoch != -1:
            save_params("model.path.tar", epoch+1, cfg, model, optimizer, 
                        scheduler)

    print('Epoch {}/{}:'.format(cfg.general.epochs, cfg.general.epochs))
    save_params("f_model.path.tar", cfg.general.epochs, cfg, model, optimizer, 
                scheduler)

    writer.close()

    print("Finish training.")
コード例 #2
0
ファイル: train.py プロジェクト: Obarads/torchpcp
def main():
    # get configs
    cfg, _, _ = get_configs(CONFIG_PATH)

    # make a output folder
    make_folders(cfg.output_folder)

    # set a seed 
    PytorchTools.set_seed(cfg.seed, cfg.device, cfg.reproducibility)

    # set a device
    cfg.device = PytorchTools.select_device(cfg.device)

    model = get_model(cfg)
    dataset = get_dataset(cfg)
    criterion = get_losses()
    optimizer = get_optimizer(cfg, model)
    scheduler = get_scheduler(cfg, optimizer)

    # get a logger
    writer = SummaryWriter(cfg.output_folder)

    # start training
    for epoch in range(cfg.start_epoch, cfg.epochs):
        print('Epoch {}/{}:'.format(epoch, cfg.epochs))

        # training
        train_log = train(cfg, model, dataset["train"], optimizer, criterion, 
                          scheduler)

        dict2tensorboard(train_log, writer, epoch)

        # save params and model
        if (epoch+1) % cfg.save_epoch == 0 and cfg.save_epoch != -1:
            save_params(os.path.join(cfg.output_folder, "model.path.tar"), 
                        epoch+1, cfg, model, optimizer, scheduler)

    print('Epoch {}/{}:'.format(cfg.epochs, cfg.epochs))
    save_params(os.path.join(cfg.output_folder, "f_model.path.tar"), 
                cfg.epochs, cfg, model, optimizer, scheduler)
    writer.close()

    print("Finish training.")