コード例 #1
0
ファイル: eval.py プロジェクト: zeta1999/GLU-Net
torch.set_grad_enabled(False) # make sure to not compute gradients for computational performance
torch.backends.cudnn.enabled = True
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # either gpu or cpu


if not os.path.exists(args.save_dir):
    os.makedirs(args.save_dir)

save_dict = {}
if not os.path.isdir(args.save_dir):
    os.makedirs(args.save_dir)

pre_trained_models = args.pre_trained_models

# define the image processing parameters, the actual pre-processing is done within the model functions
input_images_transform = transforms.Compose([ArrayToTensor(get_float=False)])  # only put channel first
gt_flow_transform = transforms.Compose([ArrayToTensor()])  # only put channel first
co_transform = None

for pre_trained_model_type in pre_trained_models:
    print('model: ' + args.model + ', pre-trained model: ' + pre_trained_model_type)
    with torch.no_grad():

        # define the network to use
        if args.model == 'GLUNet':
            network = GLU_Net(model_type=pre_trained_model_type,
                              consensus_network=False,
                              cyclic_consistency=True,
                              iterative_refinement=True,
                              apply_flipping_condition=args.flipping_condition)
コード例 #2
0
ファイル: eval.py プロジェクト: liuguoyou/GLU-Net
    False)  # make sure to not compute gradients for computational performance
torch.backends.cudnn.enabled = True
device = torch.device(
    "cuda" if torch.cuda.is_available() else "cpu")  # either gpu or cpu

if not os.path.exists(args.save_dir):
    os.makedirs(args.save_dir)

save_dict = {}
if not os.path.isdir(args.save_dir):
    os.makedirs(args.save_dir)

pre_trained_models = args.pre_trained_models

# define the image processing parameters, the actual pre-processing is done within the model functions
input_images_transform = transforms.Compose([ArrayToTensor(get_float=False)
                                             ])  # only put channel first
gt_flow_transform = transforms.Compose([ArrayToTensor()
                                        ])  # only put channel first
co_transform = None

for pre_trained_model_type in pre_trained_models:
    print(pre_trained_model_type)
    with torch.no_grad():

        # define the network to use
        if args.model == 'GLUNet':
            network = GLU_Net(model_type=pre_trained_model_type,
                              consensus_network=False,
                              cyclic_consistency=True,
                              iterative_refinement=True,
コード例 #3
0
                                         transforms_target=source_transforms,
                                         pyramid_param=pyramid_param,
                                         get_flow=True,
                                         output_size=(520, 520))

    else:
        # If synthetic pairs were already created and saved to disk, run instead of 'train_dataset' the following.
        # and replace args.training_data_dir by the root to folders containing images/ and flow/

        # because fixed input size, rescale the images and the ground-truth flows to 256x256
        co_transform = Scale((256,256))

        # apply pre-processing to the images
        image_transforms = transforms.Compose([transforms.ToTensor(),
                                               normTransform])
        flow_transform = transforms.Compose([ArrayToTensor()]) # just put channels first and put it to float
        train_dataset, _ = PreMadeDataset(root=args.training_data_dir,
                                          source_image_transform=image_transforms,
                                          target_image_transform=image_transforms,
                                          flow_transform=flow_transform,
                                          co_transform=co_transform,
                                          split=1)  # only training

        _, val_dataset = PreMadeDataset(root=args.evaluation_data_dir,
                                        source_image_transform=image_transforms,
                                        target_image_transform=image_transforms,
                                        flow_transform=flow_transform,
                                        co_transform=co_transform,
                                        split=0)  # only validation

    train_dataloader = DataLoader(train_dataset,
コード例 #4
0
    parser.add_argument('--div_flow', type=float, default=1.0, help='div flow')
    parser.add_argument('--seed',
                        type=int,
                        default=1986,
                        help='Pseudo-RNG seed')
    args = parser.parse_args()
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(device)

    # datasets, pre-processing of the images is done within the network function !
    source_img_transforms = transforms.Compose(
        [ArrayToTensor(get_float=False)])
    target_img_transforms = transforms.Compose(
        [ArrayToTensor(get_float=False)])

    if not args.pre_loaded_training_dataset:
        # training dataset, created on the fly at each epoch
        pyramid_param = [
            520
        ]  # means that we get the ground-truth flow field at this size
        train_dataset = HomoAffTps_Dataset(
            image_path=args.training_data_dir,
            csv_file=osp.join('datasets', 'csv_files',
                              'homo_aff_tps_train_DPED_CityScape_ADE.csv'),
            transforms=source_img_transforms,
            transforms_target=target_img_transforms,
            pyramid_param=pyramid_param,
コード例 #5
0
            pyramid_param=pyramid_param,
            get_flow=True,
            output_size=(520, 520))

    else:
        # If synthetic pairs were already created and saved to disk, run instead of 'train_dataset' the following.
        # and replace args.training_data_dir by the root to folders containing images/ and flow/

        # because fixed input size, rescale the images and the ground-truth flows to 256x256
        co_transform = Scale((256, 256))

        # apply pre-processing to the images
        image_transforms = transforms.Compose(
            [transforms.ToTensor(), normTransform])
        flow_transform = transforms.Compose(
            [ArrayToTensor()])  # just put channels first and put it to float
        train_dataset, _ = PreMadeDataset(
            root=args.training_data_dir,
            source_image_transform=image_transforms,
            target_image_transform=image_transforms,
            flow_transform=flow_transform,
            co_transform=co_transform,
            split=1)  # only training

        _, val_dataset = PreMadeDataset(
            root=args.evaluation_data_dir,
            source_image_transform=image_transforms,
            target_image_transform=image_transforms,
            flow_transform=flow_transform,
            co_transform=co_transform,
            split=0)  # only validation
コード例 #6
0
    plot = args.plot
    save_dir = args.save_dir
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    image_dir = os.path.join(save_dir, 'images')
    flow_dir = os.path.join(save_dir, 'flow')

    if not os.path.exists(image_dir):
        os.makedirs(image_dir)
    if not os.path.exists(flow_dir):
        os.makedirs(flow_dir)

    # datasets
    source_img_transforms = transforms.Compose(
        [ArrayToTensor(get_float=False)])
    target_img_transforms = transforms.Compose(
        [ArrayToTensor(get_float=False)])
    pyramid_param = [520]

    # training dataset
    train_dataset = HomoAffTps_Dataset(image_path=args.image_data_path,
                                       csv_file=args.csv_path,
                                       transforms=source_img_transforms,
                                       transforms_target=target_img_transforms,
                                       pyramid_param=pyramid_param,
                                       get_flow=True,
                                       output_size=(520, 520))

    test_dataloader = DataLoader(train_dataset,
                                 batch_size=1,
コード例 #7
0
    parser.add_argument('--weight-decay', type=float, default=4e-4,
                        help='weight decay constant')
    parser.add_argument('--div_flow', type=float, default=1.0,
                        help='div flow')
    parser.add_argument('--seed', type=int, default=1986,
                        help='Pseudo-RNG seed')
    args = parser.parse_args()
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(device)

    # datasets, pre-processing of the images is done within the network function !
    source_img_transforms = transforms.Compose([ArrayToTensor(get_float=False)])
    target_img_transforms = transforms.Compose([ArrayToTensor(get_float=False)])

    if not args.pre_loaded_training_dataset:
        # training dataset, created on the fly at each epoch
        pyramid_param = [520] # means that we get the ground-truth flow field at this size
        train_dataset = HomoAffTps_Dataset(image_path=args.training_data_dir,
                                           csv_file=osp.join('datasets', 'csv_files',
                                                         'homo_aff_tps_train_DPED_CityScape_ADE.csv'),
                                           transforms=source_img_transforms,
                                           transforms_target=target_img_transforms,
                                           pyramid_param=pyramid_param,
                                           get_flow=True,
                                           output_size=(520, 520))

        # validation dataset