Esempio n. 1
0
def test_kitti_split(A, depth, model):
    A_l = A[:, :, :, :int(A.size(3) / 3) + 5]
    A_m = A[:, :, :, int(A.size(3) / 3) - 5:int(A.size(3) / 3) * 2 + 5]
    A_r = A[:, :, :, int(A.size(3) / 3) * 2 - 5:]

    pred_depth_l, _ = model.module.depth_model(A_l)
    pred_depth_m, _ = model.module.depth_model(A_m)
    pred_depth_r, _ = model.module.depth_model(A_r)

    pred_depth_metric_l = recover_metric_depth(
        pred_depth_l, depth[:, :, :int(A.size(3) / 3) + 5])
    pred_depth_metric_m = recover_metric_depth(
        pred_depth_m, depth[:, :,
                            int(A.size(3) / 3) - 5:int(A.size(3) / 3) * 2 + 5])
    pred_depth_metric_r = recover_metric_depth(
        pred_depth_r, depth[:, :, int(A.size(3) / 3) * 2 - 5:])
    pred_depth_metric = np.zeros((A.size(2), A.size(3)))
    pred_depth_metric[:, :int(
        A.size(3) / 3
    ) + 5] += pred_depth_metric_l  # pred_depth_l.cpu().numpy().squeeze()  #
    pred_depth_metric[:, int(A.size(3) / 3) - 5:int(
        A.size(3) / 3
    ) * 2 + 5] += pred_depth_metric_m  # pred_depth_m.cpu().numpy().squeeze()  #
    pred_depth_metric[:, int(
        A.size(3) / 3
    ) * 2 - 5:] += pred_depth_metric_r  # pred_depth_r.cpu().numpy().squeeze()  #
    pred_depth_metric[:,
                      int(A.size(3) / 3) * 2 - 5:int(A.size(3) / 3) * 2 +
                      5] /= 2.0
    pred_depth_metric[:, int(A.size(3) / 3) - 5:int(A.size(3) / 3) + 5] /= 2.0
    return pred_depth_metric
Esempio n. 2
0
def test_kitti_split1(data, model):
    out = model.module.inference(data)
    pred_depth = torch.squeeze(out['b_fake'])
    pred_depth_metric = recover_metric_depth(pred_depth, data['B_raw'])
    # pred_depth = pred_depth[pred_depth.shape[0] - data['A_raw'].shape[1]:,
    #                   pred_depth.shape[1] - data['A_raw'].shape[2]:]
    return pred_depth_metric  #pred_depth.cpu().numpy().squeeze()
Esempio n. 3
0
def val(val_dataloader, model):
    """
    Validate the model.
    """
    smoothed_absRel = SmoothedValue(len(val_dataloader))
    smoothed_whdr = SmoothedValue(len(val_dataloader))
    smoothed_criteria = {
        'err_absRel': smoothed_absRel,
        'err_whdr': smoothed_whdr
    }
    for i, data in enumerate(val_dataloader):
        out = model.module.inference(data)
        pred_depth = torch.squeeze(out['pred'])

        pred_depth_resize = cv2.resize(pred_depth.cpu().numpy(),
                                       (torch.squeeze(data['B_raw']).shape[1],
                                        torch.squeeze(data['B_raw']).shape[0]))
        pred_depth_metric = recover_metric_depth(pred_depth_resize,
                                                 data['B_raw'])
        smoothed_criteria = validate_rel_depth_err(pred_depth_metric,
                                                   data['B_raw'],
                                                   smoothed_criteria,
                                                   scale=1.0)
    return {
        'abs_rel': smoothed_criteria['err_absRel'].GetGlobalAverageValue(),
        'whdr': smoothed_criteria['err_whdr'].GetGlobalAverageValue()
    }
def val(val_dataloader, model):
    """
    Validate the model.
    """
    smoothed_absRel = SmoothedValue(len(val_dataloader))
    smoothed_whdr = SmoothedValue(len(val_dataloader))
    smoothed_criteria = {
        'err_absRel': smoothed_absRel,
        'err_whdr': smoothed_whdr
    }
    for i, data in enumerate(val_dataloader):
        invalid_side = data['invalid_side'][0]
        out = model.module.inference(data)
        pred_depth = torch.squeeze(out['b_fake'])

        pred_depth = pred_depth[invalid_side[0]:pred_depth.size(0) -
                                invalid_side[1],
                                invalid_side[2]:pred_depth.size(1) -
                                invalid_side[3]]

        pred_depth_resize = resize_image(pred_depth,
                                         torch.squeeze(data['B_raw']).shape)
        pred_depth_metric = recover_metric_depth(pred_depth_resize,
                                                 data['B_raw'])
        smoothed_criteria = validate_rel_depth_err(pred_depth_metric,
                                                   data['B_raw'],
                                                   smoothed_criteria,
                                                   scale=1.0)
    return {
        'abs_rel': smoothed_criteria['err_absRel'].GetGlobalAverageValue(),
        'whdr': smoothed_criteria['err_whdr'].GetGlobalAverageValue()
    }
Esempio n. 5
0
def test_kitti_split2(A, depth, model):
    A_l = A[:, :, :, :int(A.size(3) / 3) + 5]
    A_m = A[:, :, :, int(A.size(3) / 3) - 5:int(A.size(3) / 3) * 2 + 5]
    A_r = A[:, :, :, int(A.size(3) / 3) * 2 - 5:]

    pad_h_l = A_l.size(3) - A_l.size(2)
    pad_h_m = A_m.size(3) - A_m.size(2)
    pad_h_r = A_r.size(3) - A_r.size(2)
    A_l_pad = torch.nn.functional.pad(A_l, (0, 0, pad_h_l, 0), "constant", -5)
    A_m_pad = torch.nn.functional.pad(A_m, (0, 0, pad_h_m, 0), "constant", -5)
    A_r_pad = torch.nn.functional.pad(A_r, (0, 0, pad_h_r, 0), "constant", -5)

    pred_depth_l, _ = model.module.depth_model(A_l_pad)
    pred_depth_m, _ = model.module.depth_model(A_m_pad)
    pred_depth_r, _ = model.module.depth_model(A_r_pad)
    pred_depth_l = pred_depth_l[:, :, pad_h_l:, :]
    pred_depth_m = pred_depth_m[:, :, pad_h_m:, :]
    pred_depth_r = pred_depth_r[:, :, pad_h_r:, :]

    pred_depth_metric_l = recover_metric_depth(
        pred_depth_l, depth[:, :, :int(A.size(3) / 3) + 5])
    pred_depth_metric_m = recover_metric_depth(
        pred_depth_m, depth[:, :,
                            int(A.size(3) / 3) - 5:int(A.size(3) / 3) * 2 + 5])
    pred_depth_metric_r = recover_metric_depth(
        pred_depth_r, depth[:, :, int(A.size(3) / 3) * 2 - 5:])
    pred_depth_metric = np.zeros((A.size(2), A.size(3)))
    pred_depth_metric[:, :int(
        A.size(3) / 3
    ) + 5] += pred_depth_metric_l  # pred_depth_l.cpu().numpy().squeeze()  #
    pred_depth_metric[:, int(A.size(3) / 3) - 5:int(
        A.size(3) / 3
    ) * 2 + 5] += pred_depth_metric_m  # pred_depth_m.cpu().numpy().squeeze()  #
    pred_depth_metric[:, int(
        A.size(3) / 3
    ) * 2 - 5:] += pred_depth_metric_r  # pred_depth_r.cpu().numpy().squeeze()  #
    pred_depth_metric[:,
                      int(A.size(3) / 3) * 2 - 5:int(A.size(3) / 3) * 2 +
                      5] /= 2.0
    pred_depth_metric[:, int(A.size(3) / 3) - 5:int(A.size(3) / 3) + 5] /= 2.0
    return pred_depth_metric
        'err_whdr': smoothed_whdr
    }

    for i, data in enumerate(data_loader):
        out = model.module.inference(data)
        pred_depth = torch.squeeze(out['b_fake'])

        invalid_side = data['invalid_side'][0]
        pred_depth = pred_depth[invalid_side[0]:pred_depth.size(0) -
                                invalid_side[1], :]
        pred_depth_resize = resize_image(pred_depth,
                                         torch.squeeze(data['B_raw']).shape)

        img_path = data['A_paths']
        # Recover metric depth
        pred_depth_metric = recover_metric_depth(pred_depth_resize,
                                                 data['B_raw'])

        smoothed_criteria = evaluate_rel_err(pred_depth_metric,
                                             data['B_raw'],
                                             smoothed_criteria,
                                             mask=(45, 471, 41, 601),
                                             scale=10.)

        model_name = test_args.load_ckpt.split('/')[-1].split('.')[0]
        image_dir = os.path.join(cfg.ROOT_DIR, './evaluation',
                                 cfg.MODEL.ENCODER, model_name + '_nyu')
        if not os.path.exists(image_dir):
            os.makedirs(image_dir)
        img_name = img_path[0].split('/')[-1]
        depth_max = pred_depth_metric.max()
Esempio n. 7
0
        mask_invalid = depth < 1e-8
        mask_invalid[45:471, 41:601] = 1
        mask_invalid = mask_invalid.astype(np.bool)

        # resize input to [385, 385], same to training setting
        rgb_resize = cv2.resize(rgb, (385, 385))

        img_torch = scale_torch(rgb_resize, 255)
        img_torch = img_torch[None, :, :, :].cuda()
        with torch.no_grad():
            pred_depth = model.module.depth_model(img_torch)
        pred_depth_resize = cv2.resize(pred_depth.cpu().numpy().squeeze(),
                                       (rgb.shape[1], rgb.shape[0]))

        # Recover metric depth
        pred_depth_metric = recover_metric_depth(pred_depth_resize, depth)
        # evaluate
        smoothed_criteria = evaluate_rel_err(pred_depth_metric, depth,
                                             smoothed_criteria)

        model_name = test_args.load_ckpt.split('/')[-1].split('.')[0]
        image_dir = os.path.join(cfg.ROOT_DIR, './evaluation',
                                 cfg.MODEL.ENCODER, model_name + '_nyu')
        os.makedirs(image_dir, exist_ok=True)
        img_name = '%04d.png' % i

        plt.imsave(os.path.join(image_dir, img_name),
                   pred_depth_metric,
                   cmap='rainbow')
        cv2.imwrite(os.path.join(image_dir, img_name[-4:] + '_rgb.jpg'),
                    np.squeeze(rgb))