コード例 #1
0
def test_nyu(cfg, model, test_images, test_gt_depths):
    leng = test_images.shape[0]
    print('Test nyu depth on ' + str(leng) + ' images. Using depth model in ' +
          cfg.model_dir)
    pred_disp_list = []
    crop_imgs = []
    crop_gt_depths = []
    for i in range(leng):
        img = test_images[i]
        img_crop = img[:, 45:472, 41:602]
        crop_imgs.append(img_crop)
        gt_depth_crop = test_gt_depths[i][45:472, 41:602]
        crop_gt_depths.append(gt_depth_crop)
        #img = np.transpose(cv2.resize(np.transpose(img_crop, [1,2,0]), (576,448)), [2,0,1])
        img = np.transpose(
            cv2.resize(np.transpose(img_crop, [1, 2, 0]),
                       (cfg.img_hw[1], cfg.img_hw[0])), [2, 0, 1])
        img_t = torch.from_numpy(img).float().cuda().unsqueeze(0) / 255.0
        disp = model.infer_depth(img_t)
        disp = np.transpose(disp[0].cpu().detach().numpy(), [1, 2, 0])
        pred_disp_list.append(disp)

    pred_depths = resize_disp(pred_disp_list, crop_gt_depths)
    eval_depth_res = eval_depth(crop_gt_depths, pred_depths, nyu=True)
    abs_rel, sq_rel, rms, log_rms, a1, a2, a3 = eval_depth_res
    sys.stderr.write(
        "{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10} \n".format(
            'abs_rel', 'sq_rel', 'rms', 'log10', 'a1', 'a2', 'a3'))
    sys.stderr.write(
        "{:10.4f}, {:10.4f}, {:10.3f}, {:10.3f}, {:10.3f}, {:10.3f}, {:10.3f} \n"
        .format(abs_rel, sq_rel, rms, log_rms, a1, a2, a3))

    return eval_depth_res
コード例 #2
0
def test_eigen_depth(cfg, model):
    print('Evaluate depth using eigen split. Using model in ' + cfg.model_dir)
    filenames = open('./data/eigen/test_files.txt').readlines()
    pred_disp_list = []
    for i in range(len(filenames)):
        path1, idx, _ = filenames[i].strip().split(' ')
        img = cv2.imread(
            os.path.join(os.path.join(cfg.raw_base_dir, path1),
                         'image_02/data/' + str(idx) + '.png'))
        #img_resize = cv2.resize(img, (832,256))
        img_resize = cv2.resize(img, (cfg.img_hw[1], cfg.img_hw[0]))
        img_input = torch.from_numpy(
            img_resize / 255.0).float().cuda().unsqueeze(0).permute(
                0, 3, 1, 2)
        disp = model.infer_depth(img_input)
        disp = disp[0].detach().cpu().numpy()
        disp = disp.transpose(1, 2, 0)
        pred_disp_list.append(disp)
        # print(i)

    gt_depths = np.load('./data/eigen/gt_depths.npz',
                        allow_pickle=True)['data']
    pred_depths, pred_disp_resized = resize_depths(gt_depths, pred_disp_list)
    eval_depth_res = eval_depth(gt_depths, pred_depths)
    abs_rel, sq_rel, rms, log_rms, a1, a2, a3 = eval_depth_res
    sys.stderr.write(
        "{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10} \n".format(
            'abs_rel', 'sq_rel', 'rms', 'log_rms', 'a1', 'a2', 'a3'))
    sys.stderr.write(
        "{:10.4f}, {:10.4f}, {:10.3f}, {:10.3f}, {:10.3f}, {:10.3f}, {:10.3f} \n"
        .format(abs_rel, sq_rel, rms, log_rms, a1, a2, a3))

    return eval_depth_res