コード例 #1
0
def main():
    # parse training options
    opt = parse_options()

    # create the training directory if needed
    dir_check(opt)

    # configure loggers
    loggers = configure_loggers(opt)

    # set random seed
    opt = get_random_seed(opt)

    # resume state or create directories if needed
    resume_state = get_resume_state(opt)

    # if the model does not change and input sizes remain the same during training then there may be benefit 
    # from setting torch.backends.cudnn.benchmark = True, otherwise it may stall training
    torch.backends.cudnn.benchmark = True
    # torch.backends.cudnn.deterministic = True
    # torch.autograd.set_detect_anomaly(True)

    # create dataloaders
    dataloaders, data_params = get_dataloaders(opt)

    # create and setup model: load and print networks; create schedulers/optimizer; init
    model = create_model(opt, step = 0 if resume_state is None else resume_state['iter'])

    # resume training if needed
    steps_states = resume_training(opt, model, resume_state, data_params)

    # start training loop with configured options
    fit(model, opt, dataloaders, steps_states, data_params, loggers)
コード例 #2
0
ファイル: evaluate.py プロジェクト: wakaba130/S-DCNet
def main(cfg):
    orig_cwd = hydra.utils.get_original_cwd()
    print(f"  Evaluating the checkpoint "
          f"'{cfg.test.trained_ckpt_for_inference}'")
    loaded_struct = torch.load(
        pjn(orig_cwd, cfg.test.trained_ckpt_for_inference))

    cfg.train.train_val_split = 1.0
    # ^ associate all of the train data with the train_loader below
    #   (do not split the train data into train + validation)
    train_loader, _, test_loader = train.get_dataloaders(cfg, (1, 0, 1))

    interval_bounds, label2count_list = make_label2count_list(cfg)
    model = SDCNet(label2count_list,
                   cfg.model.supervised,
                   load_pretr_weights_vgg=False)
    model.load_state_dict(loaded_struct['model_state_dict'], strict=True)

    additional_cfg = {'device': None}
    if not cfg.resources.disable_cuda and torch.cuda.is_available():
        additional_cfg['device'] = torch.device('cuda')
        model = model.cuda()
    else:
        additional_cfg['device'] = torch.device('cpu')

    optimizer = None

    trainer = train.TrainManager(
        model,
        optimizer,
        cfg,
        additional_cfg,
        train_loader=None,
        val_loader=None,
    )

    print()
    datadir = pjn(cfg.dataset.dataset_rootdir, f"part_{cfg.dataset.part}")
    print(f"  Evaluating on the (whole) train data and on the test data "
          f"(in '{datadir}')")

    mae_train, mse_train = trainer.validate(train_loader)
    print(f"  Metrics on the (whole) train data: "
          f"MAE: {mae_train:.2f}, MSE: {mse_train:.2f}")

    mae_test, mse_test = trainer.validate(test_loader)
    print(f"  Metrics on the test data:          "
          f"MAE: {mae_test:.2f}, MSE: {mse_test:.2f}")

    if cfg.test.visualize:
        vis_dir_name = f"visualized_part_{cfg.dataset.part}_test_set_predictions"
        vis_dir_print = pjn(os.path.relpath(os.getcwd(), orig_cwd),
                            vis_dir_name)
        print(
            f"  Visualized predictions are being saved to '{vis_dir_print}':")
        visualize_predictions(cfg, model, test_loader, vis_dir_name)
コード例 #3
0
def main():
    
    # parse test options
    opt = parse_options(is_train=False)

    # create the test directory
    dir_check(opt)
    
    torch.backends.cudnn.benchmark = True
    # torch.backends.cudnn.deterministic = True
    
    # configure loggers
    loggers = configure_loggers(opt)

    # create dataloaders
    # note: test dataloader only supports num_workers = 0, batch_size = 1 and data shuffling is disable
    dataloaders, data_params = get_dataloaders(opt)

    # create and setup model: load and print network; init
    model = create_model(opt)

    # start testing loop with configured options
    test_loop(model, opt, dataloaders, data_params)
コード例 #4
0
from matplotlib import pyplot as plt
import matplotlib
import numpy as np
from train import get_dataloaders, datapath, train_transform, val_transform, test_transform

matplotlib.use('Agg')

if __name__ == '__main__':
    train_loader, val_loader, test_loader, num_classes = get_dataloaders(
        datapath, (train_transform, val_transform, test_transform))

    intensity = 255 // num_classes

    for j, (imgs, masks) in enumerate(train_loader):
        for i in range(imgs.shape[0]):
            img = imgs[i, :, :, :].numpy()
            mask = masks[i, :, :, :].numpy()

            mask_img = np.argmax(mask, axis=0) * intensity

            fig, (ax1, ax2) = plt.subplots(2, 1)

            ax1.imshow(img.swapaxes(0, 2).swapaxes(0, 1))
            ax2.imshow(mask_img)

            ax1.axis('off')
            ax2.axis('off')

            plt.savefig("{}.png".format(j))
            plt.close()
        input("Press ENTER to continue")