Beispiel #1
0
def main(cfg: configs.ModelNet40Config) -> None:
    # ifx paths
    cfg = monitor.fix_path_in_configs(
        CW_DIR, cfg, [["dataset", "root"], ["model", "resume"]])

    # set a seed
    pytorch_tools.set_seed(cfg.general.seed, cfg.general.device,
                           cfg.general.reproducibility)

    # set a device
    cfg.general.device = pytorch_tools.select_device(cfg.general.device)

    # get a trained model
    checkpoint, checkpoint_cfg = get_checkpoint(cfg.model.resume)

    # test env
    ## Get model.
    model = get_model(checkpoint_cfg)
    ## Get dataset and loader.
    dataset = get_dataset(checkpoint_cfg)
    datset_loader = get_loader(checkpoint_cfg, dataset)
    ## Get loss functions.
    criterion = get_losses(checkpoint_cfg)

    # set trained params
    model.load_state_dict(checkpoint["model"])

    # test start
    test_log = test(cfg, model, datset_loader["test"], criterion)

    # show results
    print(test_log)

    print("Finish test.")
Beispiel #2
0
def main(cfg: default.ModelNet40Config) -> None:
    # ifx paths
    cfg = monitor.fix_path_in_configs(CW_DIR, cfg, [["dataset", "root"]])

    # set a seed
    pytorch_tools.set_seed(cfg.general.seed, cfg.general.device,
                           cfg.general.reproducibility)

    # set a device
    cfg.general.device = pytorch_tools.select_device(cfg.general.device)

    # training env
    ## Get model.
    model = get_model(cfg)
    ## Get dataset.
    dataset, train_collate_fn = get_dataset(cfg)
    ## Get loader
    train_dataset_loader = get_loader(cfg,
                                      dataset["train"],
                                      shuffle=cfg.loader.shuffle,
                                      collate_fn=train_collate_fn)
    test_dataset_loader = get_loader(cfg, dataset["test"])
    ## Get loss functions.
    criterion = get_losses(cfg)
    ## Get optimizer.
    optimizer = get_optimizer(cfg, model)
    ## Get scheduler.
    scheduler = get_scheduler(cfg, optimizer)
    ## Get logger.
    writer = get_writer(cfg)

    # progress bar
    loader = tqdm(range(cfg.general.start_epoch, cfg.general.epochs),
                  desc="Training",
                  ncols=70)

    # training start
    for epoch in loader:
        # training
        train_log = train(cfg, model, train_dataset_loader, optimizer,
                          criterion, scheduler)
        # save training log
        monitor.dict2logger(train_log, writer, epoch, cfg.writer.name)

        # test
        eval_log = eval(cfg, model, test_dataset_loader, criterion)
        # save eval log
        monitor.dict2logger(eval_log, writer, epoch, cfg.writer.name)

        # save params and model
        if (epoch+1) % cfg.general.save_epoch == 0 and \
            cfg.general.save_epoch != -1:
            save_params("model.path.tar", epoch + 1, cfg, model, optimizer,
                        scheduler)

    save_params("f_model.path.tar", cfg.general.epochs, cfg, model, optimizer,
                scheduler)

    print("Finish training.")
Beispiel #3
0
def main(cfg: configs.KITTIConfig) -> None:
    # ifx paths
    cfg = monitor.fix_path_in_configs(CW_DIR, cfg, [["dataset", "dataset_path"]])

    # set a seed
    pytorch_tools.set_seed(
        cfg.general.seed,
        cfg.general.device,
        cfg.general.reproducibility
    )

    # set a device
    cfg.general.device = pytorch_tools.select_device(cfg.general.device)

    ## I will remove this code.
    log_file = os.path.join("./", 'log_train.txt')
    logger = create_logger(log_file)

    # training env
    ## Get model.
    model = get_model(cfg)
    ## Get dataset and loader.
    dataset = get_dataset(cfg, logger)
    datset_loader = get_loader(cfg, dataset)
    ## Get loss functions.
    criterion = get_losses(cfg)
    ## Get optimizer.
    optimizer = get_optimizer(cfg, model)
    ## Get scheduler.
    scheduler = get_scheduler(cfg, optimizer)
    ## Get logger.
    writer = get_writer(cfg)

    # progress bar
    loader = tqdm(range(cfg.general.start_epoch, cfg.general.epochs),
                  desc="Training", ncols=70)

    # training start
    for epoch in loader:
        # training
        train_log = train(cfg, model, datset_loader, optimizer, criterion,
                          scheduler)

        # save training log
        monitor.dict2logger(train_log, writer, epoch, cfg.writer.name)

        # save params and model
        if (epoch+1) % cfg.general.save_epoch == 0 and \
            cfg.general.save_epoch != -1:
            save_params("model.path.tar", epoch+1, cfg, model, optimizer, 
                        scheduler)

    print('Epoch {}/{}:'.format(cfg.general.epochs, cfg.general.epochs))
    save_params("f_model.path.tar", cfg.general.epochs, cfg, model, optimizer, 
                scheduler)

    print("Finish training.")
Beispiel #4
0
def main(cfg: omegaconf.DictConfig):
    # fix paths
    cfg = fix_path_in_configs(CW_DIR, cfg, [["dataset", "root"]])

    # set a seed
    pytorch_tools.set_seed(cfg.general.seed, cfg.general.device,
                           cfg.general.reproducibility)

    # set a device
    cfg.general.device = pytorch_tools.select_device(cfg.general.device)

    model = get_model(cfg)
    dataset = get_dataset(cfg)
    criterion = get_losses(cfg)
    optimizer = get_optimizer(cfg, model)
    scheduler = get_scheduler(cfg, optimizer)

    # get a logger
    writer = get_writer(cfg)

    # start training
    loader = tqdm(range(cfg.general.start_epoch, cfg.general.epochs),
                  desc="Training",
                  ncols=70)
    for epoch in loader:
        # print('Epoch {}/{}:'.format(epoch, cfg.general.epochs))

        # training
        train_log = train(cfg, model, dataset["train"], optimizer, criterion,
                          scheduler)

        dict2logger(train_log, writer, epoch, cfg.writer.name)

        # save params and model
        if (epoch+1) % cfg.general.save_epoch == 0 and \
            cfg.general.save_epoch != -1:
            save_params("model.path.tar", epoch + 1, cfg, model, optimizer,
                        scheduler)

    print('Epoch {}/{}:'.format(cfg.general.epochs, cfg.general.epochs))
    save_params("f_model.path.tar", cfg.general.epochs, cfg, model, optimizer,
                scheduler)

    writer.close()

    print("Finish training.")