Esempio n. 1
0
# -------------------------------------------------------
# Set up the training routine
network = nn.DataParallel(
	model.float(),
	device_ids=device_ids).to(device)


val_dataloader = torch.utils.data.DataLoader(
        dataset=OmniDepthDataset(
                root_path=input_dir,
                path_to_img_list=val_file_list),
        batch_size=1,
        shuffle=True,
        num_workers=num_workers,
        drop_last=False)

trainer = OmniDepthTrainer(
        experiment_name,
        network,
        None,
        val_dataloader,
        None,
        None,
        checkpoint_dir,
        device,
        validation_sample_freq=validation_sample_freq)



trainer.evaluate_upright(checkpoint_path, num_tests, rot_range, device, seed)
        0.068,
    ]
else:
    assert True, 'Unsupported network type'

# Make the checkpoint directory
mkdirs(checkpoint_dir)

# -------------------------------------------------------
# Set up the training routine
network = nn.DataParallel(model.float(), device_ids=device_ids).to(device)

val_dataloader = torch.utils.data.DataLoader(dataset=OmniDepthDataset(
    root_path=input_dir, path_to_img_list=val_file_list),
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=num_workers,
                                             drop_last=False)

trainer = OmniDepthTrainer(experiment_name,
                           network,
                           None,
                           val_dataloader,
                           None,
                           None,
                           checkpoint_dir,
                           device,
                           validation_sample_freq=validation_sample_freq)

trainer.evaluate(checkpoint_path)
Esempio n. 3
0
    num_workers=num_workers,
    drop_last=False)

criterion = MultiScaleL2Loss(alpha_list, beta_list)

# Set up network parameters with Caffe-like LR multipliers
param_list = set_caffe_param_mult(network, lr, 0)
optimizer = torch.optim.Adam(params=param_list, lr=lr)

scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                            step_size=step_size,
                                            gamma=lr_decay)

trainer = OmniDepthTrainer(experiment_name,
                           network,
                           train_dataloader,
                           val_dataloader,
                           criterion,
                           optimizer,
                           checkpoint_dir,
                           device,
                           visdom=[vis, env],
                           scheduler=scheduler,
                           num_epochs=num_epochs,
                           validation_freq=validation_freq,
                           visualization_freq=visualization_freq,
                           validation_sample_freq=validation_sample_freq,
                           num_samples=num_samples)

trainer.train(checkpoint_path, load_weights_only)