예제 #1
0
def main(cfg: omegaconf.DictConfig):
    # fix paths
    cfg = fix_path_in_configs(CW_DIR, cfg,
                              [["dataset", "root"], ["model", "resume"]])

    # set a seed
    PytorchTools.set_seed(cfg.general.seed, cfg.general.device,
                          cfg.general.reproducibility)

    # set a device
    cfg.general.device = PytorchTools.select_device(cfg.general.device)

    # get a trained model env
    checkpoint, checkpoint_cfg = get_checkpoint(cfg.model.resume)

    # change cfg
    checkpoint_cfg.dataset.root = cfg.dataset.root

    model = get_model(checkpoint_cfg)
    dataset = get_dataset(checkpoint_cfg)
    criterion = get_losses(checkpoint_cfg)

    # set trained params
    model.load_state_dict(checkpoint["model"])

    # start test
    test(cfg, model, dataset["test"], criterion)

    print("Finish test.")
예제 #2
0
파일: test.py 프로젝트: Obarads/torchpcp
def main():
    # get configs
    cfg, _, _ = get_configs(CONFIG_PATH)

    # make a output folder
    make_folders(cfg.output_folder)

    # set a seed
    PytorchTools.set_seed(cfg.seed, cfg.device, cfg.reproducibility)

    # set a device
    cfg.device = PytorchTools.select_device(cfg.device)

    # get a trained model
    checkpoint, checkpoint_cfg = get_checkpoint(cfg.resume)

    model = get_model(checkpoint_cfg)
    dataset = get_dataset(checkpoint_cfg)
    criterion = get_losses()

    # set trained params
    model.load_state_dict(checkpoint["model"])

    eval(checkpoint_cfg, model, dataset["test"], criterion)
    print("Finish test.")
예제 #3
0
파일: test.py 프로젝트: Obarads/torchpcp
def main(cfg: omegaconf.DictConfig):
    # fix paths
    cfg = fix_path_in_configs(
        CW_DIR, cfg,
        [["dataset", "root"], ["general", "sizetxt"], ["model", "resume"]])

    # set a seed
    PytorchTools.set_seed(cfg.general.seed, cfg.general.device,
                          cfg.general.reproducibility)

    # set a device
    cfg.general.device = PytorchTools.select_device(cfg.general.device)

    # get a trained model env
    checkpoint, checkpoint_cfg = get_checkpoint(cfg.model.resume)

    # change cfg
    checkpoint_cfg.dataset.root = cfg.dataset.root
    checkpoint_cfg.dataset.name = "s3dis"
    checkpoint_cfg.dataset.mode = "scene"

    model = get_model(checkpoint_cfg)
    dataset = get_dataset(checkpoint_cfg)
    criterion = get_losses(checkpoint_cfg)

    # set trained params
    model.load_state_dict(checkpoint["model"])

    # get ins size txt file
    mean_num_pts_in_group = np.loadtxt(cfg.general.sizetxt)

    # test
    eval(model, dataset["test"], criterion, mean_num_pts_in_group,
         cfg.general.device)
예제 #4
0
파일: train.py 프로젝트: Obarads/torchpcp
def main(cfg:omegaconf.DictConfig):
    # fix paths
    cfg = fix_path_in_configs(CW_DIR, cfg, [["dataset","root"]])

    # set a seed 
    PytorchTools.set_seed(
        cfg.general.seed, 
        cfg.general.device, 
        cfg.general.reproducibility
    )

    # set a device
    cfg.general.device = PytorchTools.select_device(cfg.general.device)

    model = get_model(cfg)
    dataset = get_dataset(cfg)
    criterion = get_losses(cfg)
    optimizer = get_optimizer(cfg, model)
    scheduler = get_scheduler(cfg, optimizer)

    # get a logger
    writer = SummaryWriter("./")

    # start training
    loader = tqdm(range(cfg.general.start_epoch, cfg.general.epochs), 
                  desc="Training", ncols=70)
    for epoch in loader:
        # print('Epoch {}/{}:'.format(epoch, cfg.general.epochs))

        # training
        train_log = train(
            cfg, 
            model, 
            dataset["train"], 
            optimizer, 
            criterion, 
            scheduler
        )

        dict2tensorboard(train_log, writer, epoch)

        # save params and model
        if (epoch+1) % cfg.general.save_epoch == 0 and \
            cfg.general.save_epoch != -1:
            save_params("model.path.tar", epoch+1, cfg, model, optimizer, 
                        scheduler)

    print('Epoch {}/{}:'.format(cfg.general.epochs, cfg.general.epochs))
    save_params("f_model.path.tar", cfg.general.epochs, cfg, model, optimizer, 
                scheduler)

    writer.close()

    print("Finish training.")
예제 #5
0
파일: train.py 프로젝트: Obarads/torchpcp
def main():
    # get configs
    cfg, _, _ = get_configs(CONFIG_PATH)

    # make a output folder
    make_folders(cfg.output_folder)

    # set a seed 
    PytorchTools.set_seed(cfg.seed, cfg.device, cfg.reproducibility)

    # set a device
    cfg.device = PytorchTools.select_device(cfg.device)

    model = get_model(cfg)
    dataset = get_dataset(cfg)
    criterion = get_losses()
    optimizer = get_optimizer(cfg, model)
    scheduler = get_scheduler(cfg, optimizer)

    # get a logger
    writer = SummaryWriter(cfg.output_folder)

    # start training
    for epoch in range(cfg.start_epoch, cfg.epochs):
        print('Epoch {}/{}:'.format(epoch, cfg.epochs))

        # training
        train_log = train(cfg, model, dataset["train"], optimizer, criterion, 
                          scheduler)

        dict2tensorboard(train_log, writer, epoch)

        # save params and model
        if (epoch+1) % cfg.save_epoch == 0 and cfg.save_epoch != -1:
            save_params(os.path.join(cfg.output_folder, "model.path.tar"), 
                        epoch+1, cfg, model, optimizer, scheduler)

    print('Epoch {}/{}:'.format(cfg.epochs, cfg.epochs))
    save_params(os.path.join(cfg.output_folder, "f_model.path.tar"), 
                cfg.epochs, cfg, model, optimizer, scheduler)
    writer.close()

    print("Finish training.")