Пример #1
0
parser.set_defaults(plot=False)

parser.add_argument('--checkpoint_directory',
                    type=str,
                    default='checkpoints',
                    help='directory for training checkpoints')
parser.add_argument('--checkpoint_prefix',
                    type=str,
                    default='rord',
                    help='prefix for training checkpoints')

args = parser.parse_args()
print(args)

# Creating CNN model
model = D2Net(model_file=args.init_model, use_cuda=False)
model = model.to(device)

# Optimizer
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
                       lr=args.lr)

training_dataset = PhotoTourismIPR(base_path=args.dataset_path,
                                   preprocessing=args.preprocessing)
training_dataset.build_dataset()

training_dataloader = DataLoader(training_dataset,
                                 batch_size=args.batch_size,
                                 num_workers=args.num_workers)

Пример #2
0
                    help='prefix for training checkpoints')

args = parser.parse_args()

print(args)

# Create the folders for plotting if need be
if args.plot:
    plot_path = 'train_vis'
    if os.path.isdir(plot_path):
        print('[Warning] Plotting directory already exists.')
    else:
        os.mkdir(plot_path)

# Creating CNN model
model = D2Net(model_file=args.model_file, use_cuda=use_cuda)

# Optimizer
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
                       lr=args.lr)

# Dataset
if args.use_validation:
    validation_dataset = MegaDepthDataset(
        scene_list_path='megadepth_utils/valid_scenes.txt',
        scene_info_path=args.scene_info_path,
        base_path=args.dataset_path,
        train=False,
        preprocessing=args.preprocessing,
        pairs_per_scene=25)
    validation_dataloader = DataLoader(validation_dataset,
Пример #3
0
parser.set_defaults(plot=False)

parser.add_argument('--checkpoint_directory',
                    type=str,
                    default='checkpoints',
                    help='directory for training checkpoints')
parser.add_argument('--checkpoint_prefix',
                    type=str,
                    default='d2',
                    help='prefix for training checkpoints')

args = parser.parse_args()
print(args)

# Creating CNN model
model = D2Net(model_file=args.model_file, use_cuda=False)
model = model.to(device)

# Optimizer
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
                       lr=args.lr)

# Dataset
if args.use_validation:
    validation_dataset = PhotoTourismCombined(
        # scene_list_path='megadepth_utils/valid_scenes.txt',
        # scene_info_path=args.scene_info_path,
        base_path=args.dataset_path,
        train=False,
        preprocessing=args.preprocessing,
        pairs_per_scene=25)
Пример #4
0
import torch
from lib.model import D2Net
from lib.utils import preprocess_image
from lib.pyramid import process_multiscale
import scipy
import scipy.io
import scipy.misc
import numpy as np

use_cuda = torch.cuda.is_available()

# Creating CNN model
model = D2Net(model_file="models/d2_tf.pth", use_relu=True, use_cuda=use_cuda)

#현지가 임의로 수정하려다가 주석
device = torch.device("cuda:0" if use_cuda else "cpu")
#device = torch.device("cpu")

multiscale = True
max_edge = 2500
max_sum_edges = 5000


# de-net feature extract function
def cnn_feature_extract(image, scales=[.25, 0.50, 1.0], nfeatures=1000):
    if len(image.shape) == 2:
        image = image[:, :, np.newaxis]
        image = np.repeat(image, 3, -1)

    # TODO: switch to PIL.Image due to deprecation of scipy.misc.imresize.
    resized_image = image
Пример #5
0
    model_path = os.path.join(log_folder, 'model')
    if not os.path.exists(log_path):
        os.makedirs(log_path)
    if not os.path.exists(model_path):
        os.makedirs(model_path)
    create_exp_dir(log_folder, scripts_to_save=glob.glob('*.py'))
    create_exp_dir(log_folder, scripts_to_save=glob.glob('lib/*.py'))

    vis_writer = SummaryWriter(log_path)

else:
    vis_writer = None

criterion = D2Loss_hpatches(scaling_steps=3, device=device).to(device)

model = D2Net(model_file=args.resume, use_cuda=use_cuda)

# Optimizer
optimizer = optim.Adam(
    filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr
)

print(gct(), 'Param size = %fMB', count_parameters_in_MB(model))

# Resume training
if args.resume:
    if os.path.isfile(args.resume):
        checkpoint = torch.load(args.resume)
        start_epoch = checkpoint['epoch'] + 1
        optimizer.load_state_dict(checkpoint['optimizer'])
        best_loss = checkpoint['loss']
Пример #6
0
    scaling_steps = 6 - args.dilation_blocks - args.truncated_blocks
    safe_radius = 2**(args.truncated_blocks + args.dilation_blocks - 1)
elif args.model_type == 'res101':
    scaling_steps = 6 - args.dilation_blocks - args.truncated_blocks
    safe_radius = 2**(args.truncated_blocks + args.dilation_blocks - 1)

# print information for check
print(
    'The %s model is used for this testing (truncated at -%s block and finetune last %s layers) '
    % (args.model_type, args.truncated_blocks, args.finetune_layers))

# Creating CNN model
model = D2Net(model_file=args.model_file,
              use_cuda=use_cuda,
              finetune_layers=args.finetune_layers,
              truncated_blocks=args.truncated_blocks,
              model_type=args.model_type,
              finetune_skip_layers=args.finetune_skip_layers,
              dilation_blocks=args.dilation_blocks)

# Optimizer
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
                       lr=args.lr)

# Dataset
if args.use_validation:
    validation_dataset = MegaDepthDataset(
        scene_list_path='megadepth_utils/valid_scenes.txt',
        scene_info_path=args.scene_info_path,
        base_path=args.dataset_path,
        train=False,