fs3, fs4 = feature_B.shape[-2:] corr4d = corr_and_add(feature_A, feature_B, k=model.k) corr4d = model.NeighConsensus(corr4d) return corr4d # Set which parts of the model to train if args.fe_finetune_params > 0: for i in range(args.fe_finetune_params): for p in model.FeatureExtraction.model[-1][-(i + 1)].parameters(): p.requires_grad = True print('Trainable parameters:') for i, p in enumerate(filter(lambda p: p.requires_grad, model.parameters())): print(str(i + 1) + ": " + str(p.shape)) # Optimizer print('using Adam optimizer') optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr) cnn_image_size = (args.image_size, args.image_size) Dataset = ImagePairDataset train_csv = 'train_pairs.csv' test_csv = 'val_pairs.csv' if args.feature_extraction_cnn == 'd2': #.startswith('d2'): normalization_tnf = normalize_image_dict_caffe else:
# Create model print('Creating CNN model...') model = ImMatchNet(use_cuda=use_cuda, checkpoint=args.checkpoint, ncons_kernel_sizes=args.ncons_kernel_sizes, ncons_channels=args.ncons_channels) # Set which parts of the model to train if args.fe_finetune_params > 0: for i in range(args.fe_finetune_params): for p in model.FeatureExtraction.model[-1][-(i + 1)].parameters(): p.requires_grad = True print('Trainable parameters:') for i, p in enumerate(filter(lambda p: p.requires_grad, model.parameters())): print(str(i + 1) + ": " + str(p.shape)) # Optimizer print('using Adam optimizer') optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr) cnn_image_size = (args.image_size, args.image_size) Dataset = ImagePairDataset train_csv = 'train_pairs.csv' test_csv = 'val_pairs.csv' normalization_tnf = NormalizeImageDict(['source_image', 'target_image']) batch_preprocessing_fn = BatchTensorToVars(use_cuda=use_cuda)