def val(net): # Initialize global and geometric errors ... num_samples = len(occ_list) rms = np.zeros(num_samples, np.float32) log10 = np.zeros(num_samples, np.float32) abs_rel = np.zeros(num_samples, np.float32) sq_rel = np.zeros(num_samples, np.float32) thr1 = np.zeros(num_samples, np.float32) thr2 = np.zeros(num_samples, np.float32) thr3 = np.zeros(num_samples, np.float32) dbe_acc = np.zeros(num_samples, np.float32) dbe_com = np.zeros(num_samples, np.float32) dde_0 = np.zeros(num_samples, np.float32) dde_m = np.zeros(num_samples, np.float32) dde_p = np.zeros(num_samples, np.float32) net.eval() with torch.no_grad(): for i in range(len(occ_list)): depth_coarse = pred_depths[i].unsqueeze(0).cuda() occlusion = np.load(os.path.join(opt.occ_dir, occ_list[i])) # remove predictions with small score mask = occlusion[:, :, 0] <= opt.th occlusion[mask, 1:] = 0 occlusion = padding_array(occlusion) occlusion = occlusion.unsqueeze(0).cuda() # forward pass if opt.use_normal: aux = cv2.imread(os.path.join(opt.data_dir, normal_list[i]), -1) / (2 ** 16 - 1) * 2 - 1 elif opt.use_img: aux = cv2.imread(os.path.join(opt.data_dir, img_list[i]), -1) / 255 else: aux = None if aux is not None: aux = padding_array(aux).unsqueeze(0).cuda() if opt.use_log: depth_refined = depth_coarse * net(depth_coarse.log(), occlusion, aux).exp() else: depth_refined = net(depth_coarse, occlusion, aux) pred = depth_refined.clamp(1e-9) # get numpy array from torch tensor and crop gt = gt_depths[i, eigen_crop[0]:eigen_crop[1], eigen_crop[2]:eigen_crop[3]] edge = gt_boundaries[i, eigen_crop[0]:eigen_crop[1], eigen_crop[2]:eigen_crop[3]] pred = pred.squeeze().cpu().numpy()[eigen_crop[0]:eigen_crop[1], eigen_crop[2]:eigen_crop[3]] gt_vec = gt.flatten() pred_vec = pred.flatten() abs_rel[i], sq_rel[i], rms[i], log10[i], thr1[i], thr2[i], thr3[i] = compute_global_errors(gt_vec, pred_vec) dbe_acc[i], dbe_com[i], est_edges = compute_depth_boundary_error(edge, pred) dde_0[i], dde_m[i], dde_p[i] = compute_directed_depth_error(gt_vec, pred_vec, 3.0) return abs_rel, sq_rel, rms, log10, thr1, thr2, thr3, dbe_acc, dbe_com, dde_0, dde_m, dde_p
np.ascontiguousarray(depths)).float().unsqueeze(1) assert len(occ_list) == depths.shape[ 0], 'depth map and occlusion map does not match !' with torch.no_grad(): for i in tqdm(range(len(occ_list)), desc='refining depth prediction from {}'.format(method)): depth_coarse = depths[i].unsqueeze(0).cuda() occlusion = np.load(os.path.join(opt.occ_dir, occ_list[i])) # remove predictions with small score mask = occlusion[:, :, 0] <= opt.th occlusion[mask, 1:] = 0 occlusion = padding_array(occlusion) occlusion = occlusion.unsqueeze(0).cuda() # forward pass if opt.use_normal: aux = cv2.imread(os.path.join(opt.data_dir, normal_list[i]), -1) / (2**16 - 1) * 2 - 1 elif opt.use_img: aux = cv2.imread(os.path.join(opt.data_dir, img_list[i]), -1) / 255 else: aux = None if aux is not None: aux = padding_array(aux).unsqueeze(0).cuda() pred = net(depth_coarse, occlusion, aux)