def test_single_image(img_path, model, training_hw, save_dir='./'): img = cv2.imread(img_path) h, w = img.shape[0:2] img_resized = cv2.resize(img, (training_hw[1], training_hw[0])) img_t = torch.from_numpy(np.transpose( img_resized, [2, 0, 1])).float().cuda().unsqueeze(0) / 255.0 disp = model.infer_depth(img_t) disp = np.transpose(disp[0].cpu().detach().numpy(), [1, 2, 0]) disp_resized = cv2.resize(disp, (w, h)) depth = 1.0 / (1e-6 + disp_resized) visualizer = Visualizer_debug(dump_dir=save_dir) visualizer.save_disp_color_img(disp_resized, name='demo') print('Depth prediction saved in ' + save_dir)
def test_kitti_2015(cfg, model, gt_flows, noc_masks, gt_masks, depth_save_dir=None): dataset = KITTI_2015(cfg.gt_2015_dir) visualizer = Visualizer_debug(depth_save_dir) pred_flow_list = [] pred_disp_list = [] img_list = [] for idx, inputs in enumerate(tqdm(dataset)): img, K, K_inv = inputs img = img[None, :, :, :] K = K[None, :, :] K_inv = K_inv[None, :, :] img_h = int(img.shape[2] / 2) img1, img2 = img[:, :, :img_h, :], img[:, :, img_h:, :] img_list.append(img1) img1, img2, K, K_inv = img1.cuda(), img2.cuda(), K.cuda(), K_inv.cuda() if cfg.mode == 'flow' or cfg.mode == 'flowposenet': flow = model.inference_flow(img1, img2) else: flow, disp1, disp2, Rt, _, _ = model.inference( img1, img2, K, K_inv) disp = disp1[0].detach().cpu().numpy() disp = disp.transpose(1, 2, 0) pred_disp_list.append(disp) flow = flow[0].detach().cpu().numpy() flow = flow.transpose(1, 2, 0) pred_flow_list.append(flow) # pdb.set_trace() eval_flow_res = eval_flow_avg(gt_flows, noc_masks, pred_flow_list, cfg, moving_masks=gt_masks, write_img=False) print('CONFIG: {0}, mode: {1}'.format(cfg.config_file, cfg.mode)) print('[EVAL] [KITTI 2015]') print(eval_flow_res) # depth evaluation return eval_flow_res