Esempio n. 1
0
                                             crop_size=(352, 352))
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               num_workers=4)

    val_dataset = DatasetKITTIVal(kitti_depth_path=kitti_depth_path)
    val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
                                             batch_size=val_batch_size,
                                             shuffle=False,
                                             num_workers=1)

    criterion = MaskedL2Gauss().cuda()
    rmse_criterion = RMSE().cuda()

    model = DepthCompletionNet().cuda()
    model = torch.nn.DataParallel(model)
    model.train()

    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=learning_rate,
                                 weight_decay=weight_decay)
    optimizer.zero_grad()

    train_losses = []
    batch_train_losses = []
    val_losses = []
    train_rmses = []
    batch_train_rmses = []
    val_rmses = []
    for i_iter, batch in enumerate(train_loader):
Esempio n. 2
0
eval_dataset = DatasetKITTIVal(kitti_depth_path=kitti_depth_path)
eval_loader = torch.utils.data.DataLoader(dataset=eval_dataset,
                                          batch_size=batch_size,
                                          shuffle=False,
                                          num_workers=4)

criterion = MaskedL2Gauss().cuda()
rmse_criterion = RMSE().cuda()

for model_i in model_is:
    print("model_i: %d" % model_i)

    restore_from = "/root/evaluating_bdl/depthCompletion/trained_models/%s_%d/checkpoint_40000.pth" % (
        model_id, model_i)
    model = DepthCompletionNet().cuda()
    model = torch.nn.DataParallel(model)
    model.load_state_dict(torch.load(restore_from))
    model.eval()

    for M in M_values:
        M_float = float(M)
        print("M: %d" % M)

        for run in range(num_runs_per_M):
            print("run: %d" % run)

            batch_losses = []
            batch_rmses = []
            sigma_alea_values = np.array([])
            sigma_epi_values = np.array([])