def write_vls_eval(self, data_blob, outputs, tagname, step): img1 = data_blob['img1'][0].permute([1, 2, 0]).numpy().astype(np.uint8) img2 = data_blob['img2'][0].permute([1, 2, 0]).numpy().astype(np.uint8) insmap = data_blob['insmap'][0].squeeze().numpy() insvls = vls_ins(img1, insmap) depthpredvls = tensor2disp(1 / outputs[('depth', 2)], vmax=0.15, viewind=0) depthgtvls = tensor2disp(1 / data_blob['depthmap'], vmax=0.15, viewind=0) flowvls = flow_to_image( outputs[('flowpred', 2)][0].detach().cpu().permute([1, 2, 0]).numpy(), rad_max=10) imgrecon = tensor2rgb(outputs[('reconImg', 2)], viewind=0) img_val_up = np.concatenate([np.array(insvls), np.array(img2)], axis=1) img_val_mid2 = np.concatenate( [np.array(depthpredvls), np.array(depthgtvls)], axis=1) img_val_mid3 = np.concatenate( [np.array(imgrecon), np.array(flowvls)], axis=1) img_val = np.concatenate([ np.array(img_val_up), np.array(img_val_mid2), np.array(img_val_mid3) ], axis=0) self.writer.add_image('{}_predvls'.format(tagname), (torch.from_numpy(img_val).float() / 255).permute([2, 0, 1]), step) X = self.vls_sampling(np.array(insvls), img2, data_blob['depthvls'], data_blob['flowmap'], data_blob['insmap'], outputs) self.writer.add_image('{}_X'.format(tagname), (torch.from_numpy(X).float() / 255).permute( [2, 0, 1]), step)
def write_vls(self, data_blob, depth, selector, step): img1 = data_blob['img1'][0].permute([1, 2, 0]).numpy().astype(np.uint8) insmap = data_blob['insmap'][0].squeeze().numpy() figmask = tensor2disp(selector, vmax=1, viewind=0) insvls = Image.fromarray(vls_ins(img1, insmap)) depthgtvls = tensor2disp(1 / data_blob['depthmap'], vmax=0.15, viewind=0) depthpredvls = tensor2disp(1 / depth, vmax=0.15, viewind=0) img_val_up = np.concatenate( [np.array(figmask), np.array(insvls)], axis=1) img_val_down = np.concatenate( [np.array(depthgtvls), np.array(depthpredvls)], axis=1) img_val = np.concatenate( [np.array(img_val_up), np.array(img_val_down)], axis=0) self.writer.add_image('predvls', (torch.from_numpy(img_val).float() / 255).permute([2, 0, 1]), step)
def write_vls_eval(self, data_blob, flowpred, mDdoutputs, selector, evalidx, step): img1 = data_blob['img1'][0].permute([1, 2, 0]).numpy().astype(np.uint8) insmap = data_blob['insmap'][0].squeeze().numpy() figmask = tensor2disp(selector, vmax=1, viewind=0) insvls = Image.fromarray(vls_ins(img1, insmap)) flowgtvls = Image.fromarray( flow_to_image(data_blob['flowmap'][0].permute([1, 2, 0]).numpy(), rad_max=10)) flowpredvls = Image.fromarray( flow_to_image(flowpred[0].detach().cpu().permute([1, 2, 0]).numpy(), rad_max=10)) depthgtvls = tensor2disp(1 / data_blob['depthmap'], vmax=0.15, viewind=0) depthpredvls = tensor2disp(1 / mDdoutputs[('mDepth', 0)], vmax=0.15, viewind=0) img_val_up = np.concatenate( [np.array(figmask), np.array(insvls)], axis=1) img_val_down = np.concatenate( [np.array(flowgtvls), np.array(flowpredvls)], axis=1) img_val_mid = np.concatenate( [np.array(depthgtvls), np.array(depthpredvls)], axis=1) img_val = np.concatenate([ np.array(img_val_up), np.array(img_val_mid), np.array(img_val_down) ], axis=0) self.writer.add_image('predvls_eval_{}'.format(str(evalidx).zfill(2)), (torch.from_numpy(img_val).float() / 255).permute([2, 0, 1]), step)
def write_vls(self, data_blob, outputs, flowselector, step): img1 = data_blob['img1'][0].permute([1, 2, 0]).numpy().astype(np.uint8) img2 = data_blob['img2'][0].permute([1, 2, 0]).numpy().astype(np.uint8) insmap = data_blob['insmap'][0].squeeze().numpy() figmask_flow = tensor2disp(flowselector, vmax=1, viewind=0) insvls = vls_ins(img1, insmap) flowvls = flow_to_image(outputs[-1][0].detach().cpu().permute([1, 2, 0]).numpy(), rad_max=50) img_val_up = np.concatenate([np.array(insvls), np.array(img2)], axis=1) img_val_mid2 = np.concatenate([np.array(flowvls), np.array(figmask_flow)], axis=1) img_val = np.concatenate([np.array(img_val_up), np.array(img_val_mid2)], axis=0) self.writer.add_image('predvls', (torch.from_numpy(img_val).float() / 255).permute([2, 0, 1]), step)
def write_vls_eval(self, data_blob, outputs, tagname, step): img1 = data_blob['img1'][0].permute([1, 2, 0]).numpy().astype(np.uint8) img2 = data_blob['img2'][0].permute([1, 2, 0]).numpy().astype(np.uint8) insmap = data_blob['insmap'][0].squeeze().numpy() insvls = vls_ins(img1, insmap) depthpredvls = tensor2disp(1 / data_blob['mdDepth_pred'], vmax=0.15, viewind=0) imgrecon = tensor2rgb(outputs[('img1_recon', 2)][:, -1], viewind=0) img_val_up = np.concatenate([np.array(insvls), np.array(img2)], axis=1) img_val_mid = np.concatenate([np.array(depthpredvls), np.array(imgrecon)], axis=1) img_val = np.concatenate([np.array(img_val_up), np.array(img_val_mid)], axis=0) self.writer.add_image('{}_predvls'.format(tagname), (torch.from_numpy(img_val).float() / 255).permute([2, 0, 1]), step)
def write_vls(self, data_blob, outputs, depthselector, step): img1 = data_blob['img1'][0].permute([1, 2, 0]).numpy().astype(np.uint8) img2 = data_blob['img2'][0].permute([1, 2, 0]).numpy().astype(np.uint8) insmap = data_blob['insmap'][0].squeeze().numpy() figmask_depth = tensor2disp(depthselector, vmax=1, viewind=0) insvls = vls_ins(img1, insmap) depthpredvls = tensor2disp(1 / outputs['depth_predictions'][-1], vmax=0.15, viewind=0) flowvls = flow_to_image(outputs['flowpred'][0].detach().cpu().permute( [1, 2, 0]).numpy(), rad_max=50) imgrecon = tensor2rgb(outputs['img1_recon'], viewind=0) img_val_up = np.concatenate([np.array(insvls), np.array(img2)], axis=1) img_val_mid2 = np.concatenate( [np.array(depthpredvls), np.array(figmask_depth)], axis=1) img_val_mid3 = np.concatenate( [np.array(imgrecon), np.array(flowvls)], axis=1) img_val = np.concatenate([ np.array(img_val_up), np.array(img_val_mid2), np.array(img_val_mid3) ], axis=0) self.writer.add_image('predvls', (torch.from_numpy(img_val).float() / 255).permute([2, 0, 1]), step) X = self.vls_sampling(np.array(insvls), img2, data_blob['depthvls'], data_blob['flowmap'], data_blob['insmap'], outputs) self.writer.add_image('X', (torch.from_numpy(X).float() / 255).permute( [2, 0, 1]), step)
def write_vls(self, data_blob, outputs, step): img1 = data_blob['img1'][0].permute([1, 2, 0]).numpy().astype(np.uint8) img2 = data_blob['img2'][0].permute([1, 2, 0]).numpy().astype(np.uint8) insmap = data_blob['insmap'][0].squeeze().numpy() insvls = vls_ins(img1, insmap) depthpredvls = tensor2disp(1 / data_blob['mdDepth_pred'], vmax=0.15, viewind=0) imgrecon = tensor2rgb(outputs[('img1_recon', 2)][:, -1], viewind=0) flow_RAFT = Image.fromarray( flow_to_image(data_blob['flowpred'][0].permute([1, 2, 0]).cpu().numpy())) flow_pred = Image.fromarray( flow_to_image(outputs[('flowpred', 2)][0].permute([1, 2, 0]).detach().cpu().numpy())) img_val_up = np.concatenate([np.array(insvls), np.array(img2)], axis=1) img_val_mid = np.concatenate( [np.array(depthpredvls), np.array(imgrecon)], axis=1) img_val_flow = np.concatenate( [np.array(flow_RAFT), np.array(flow_pred)], axis=1) img_val = np.concatenate([ np.array(img_val_up), np.array(img_val_mid), np.array(img_val_flow) ], axis=0) self.writer.add_image('predvls', (torch.from_numpy(img_val).float() / 255).permute([2, 0, 1]), step) X = self.vls_sampling(np.array(insvls), img2, data_blob['depthvls'], outputs) self.writer.add_image('X', (torch.from_numpy(X).float() / 255).permute( [2, 0, 1]), step) X = self.vls_objmvment(np.array(insvls), data_blob['insmap'], data_blob['posepred']) self.writer.add_image('objmvment', (torch.from_numpy(X).float() / 255).permute([2, 0, 1]), step)
def write_vls(self, data_blob, est_objpose, estflow, selector): img1 = data_blob['img1'][0].permute([1, 2, 0]).numpy().astype(np.uint8) img2 = data_blob['img2'][0].permute([1, 2, 0]).numpy().astype(np.uint8) depthmap = data_blob['depthmap'][0].squeeze().numpy() insmap = data_blob['insmap'][0].squeeze().numpy() intrinsic = data_blob['intrinsic'][0].squeeze().numpy() est_objpose = est_objpose[0].detach().cpu().numpy() flowmap = data_blob['flowmap'][0].squeeze().numpy() objpose_gt = data_blob['poses'][0].squeeze().numpy() figmask = tensor2disp(selector, vmax=1, viewind=0) insvls = Image.fromarray(vls_ins(img1, insmap)) posevls = self.plot_scattervls(img1, img2, depthmap, insmap, intrinsic, objpose_gt[0], objpose_gt, est_objpose, flowmap) flowvls = flow_to_image(data_blob['flowmap'][0].cpu().permute( [1, 2, 0]).numpy(), rad_max=15) estflowvls = flow_to_image(estflow[0].cpu().permute([1, 2, 0]).numpy(), rad_max=15) img_val_up = np.concatenate( [np.array(figmask), np.array(insvls)], axis=1) img_val_down = np.concatenate( [np.array(flowvls), np.array(estflowvls)], axis=1) img_val = np.concatenate( [np.array(img_val_up), np.array(img_val_down)], axis=0) posevls = np.array(posevls[:, :, 0:3]) self.writer.add_image('img_val', (torch.from_numpy(img_val).float() / 255).permute([2, 0, 1]), self.total_steps) self.writer.add_image('posevls', (torch.from_numpy(posevls).float() / 255).permute([2, 0, 1]), self.total_steps)
def write_vls(self, data_blob, flowpred, selector, step): img1 = data_blob['img1'][0].permute([1, 2, 0]).numpy().astype(np.uint8) insmap = data_blob['insmap'][0].squeeze().numpy() figmask = tensor2disp(selector, vmax=1, viewind=0) insvls = Image.fromarray(vls_ins(img1, insmap)) flowgtvls = Image.fromarray( flow_to_image(data_blob['flowmap'][0].permute([1, 2, 0]).numpy(), rad_max=10)) flowpredvls = Image.fromarray( flow_to_image(flowpred[-1][0].detach().cpu().permute([1, 2, 0]).numpy(), rad_max=10)) img_val_up = np.concatenate( [np.array(figmask), np.array(insvls)], axis=1) img_val_down = np.concatenate( [np.array(flowgtvls), np.array(flowpredvls)], axis=1) img_val = np.concatenate( [np.array(img_val_up), np.array(img_val_down)], axis=0) self.writer.add_image('predvls', (torch.from_numpy(img_val).float() / 255).permute([2, 0, 1]), step)
def write_vls(self, image1, image2, flowgt, flow_predictions, valid): image1 = image1.detach() image1 = torch.stack( [image1[:, 2, :, :], image1[:, 1, :, :], image1[:, 0, :, :]], dim=1) image2 = image2.detach() image2 = torch.stack( [image2[:, 2, :, :], image2[:, 1, :, :], image2[:, 0, :, :]], dim=1) img1 = image1[0].cpu().detach().permute([1, 2, 0]).numpy().astype(np.uint8) img2 = image2[0].cpu().detach().permute([1, 2, 0]).numpy().astype(np.uint8) flow_pred = flow_to_image(flow_predictions[-1][0].permute( [1, 2, 0]).detach().cpu().numpy()) flow_gt = flow_to_image(flowgt[0].permute([1, 2, 0]).detach().cpu().numpy()) h, w = image1.shape[2::] xx, yy = np.meshgrid(range(w), range(h), indexing='xy') pixelloc = np.stack([xx, yy], axis=0) pixelloc = torch.from_numpy( pixelloc).float() + flow_predictions[-1][0].detach().cpu() pixelloc[0, :, :] = ((pixelloc[0, :, :] / w) - 0.5) * 2 pixelloc[1, :, :] = ((pixelloc[1, :, :] / h) - 0.5) * 2 pixelloc = pixelloc.permute([1, 2, 0]) image1_recon = torch.nn.functional.grid_sample( image2[0, :, :, :].detach().unsqueeze(0).cpu(), pixelloc.unsqueeze(0), mode='bilinear', align_corners=False) image1_recon = image1_recon[0].permute([1, 2, 0]).numpy().astype(np.uint8) pixelloc = np.stack([xx, yy], axis=0) pixelloc = torch.from_numpy( pixelloc).float() + flowgt[0].detach().cpu() pixelloc[0, :, :] = ((pixelloc[0, :, :] / w) - 0.5) * 2 pixelloc[1, :, :] = ((pixelloc[1, :, :] / h) - 0.5) * 2 pixelloc = pixelloc.permute([1, 2, 0]) image1_recon_gt = torch.nn.functional.grid_sample( image2[0, :, :, :].detach().unsqueeze(0).cpu(), pixelloc.unsqueeze(0), mode='bilinear', align_corners=False) image1_recon_gt = image1_recon_gt[0].permute([1, 2, 0]).numpy().astype( np.uint8) figvalid = np.array( tensor2disp(valid[0].unsqueeze(0).unsqueeze(0).float(), vmax=1, viewind=0)) img_up = np.concatenate([img1, img2], axis=1) img_mid = np.concatenate([flow_pred, flow_gt], axis=1) img_down = np.concatenate([image1_recon, image1_recon_gt], axis=1) img_down2 = np.concatenate([img1, figvalid], axis=1) img_vls = np.concatenate([img_up, img_mid, img_down, img_down2], axis=0) self.writer.add_image('img_vls', (torch.from_numpy(img_vls).float() / 255).permute([2, 0, 1]), self.total_steps)
def validate_kitti(model, args, eval_loader, group, isdeepv2d=False): """ Peform validation using the KITTI-2015 (train) split """ """ Peform validation using the KITTI-2015 (train) split """ model.eval() gpu = args.gpu eval_measures_depth = torch.zeros(10).cuda(device=gpu) vlsroot = '/media/shengjie/disk1/visualization/paper_depthvls' residual_root = os.path.join(vlsroot, 'residual_vls') bts_root = os.path.join(vlsroot, 'bts_vls') ours_root = os.path.join(vlsroot, 'ours_vls') deepv2d_root = os.path.join(vlsroot, 'deepv2d_vls') rgb_root = os.path.join(vlsroot, 'rgb_in') # os.makedirs(residual_root, exist_ok=True) # os.makedirs(bts_root, exist_ok=True) # os.makedirs(ours_root, exist_ok=True) # os.makedirs(deepv2d_root, exist_ok=True) # os.makedirs(rgb_root, exist_ok=True) for val_id, data_blob in enumerate(tqdm(eval_loader)): image1 = data_blob['img1'].cuda(gpu) / 255.0 image2 = data_blob['img2'].cuda(gpu) / 255.0 intrinsic = data_blob['intrinsic'].cuda(gpu) insmap = data_blob['insmap'].cuda(gpu) posepred = data_blob['posepred'].cuda(gpu) depthgt = data_blob['depthmap'].cuda(gpu) if not args.initbymD: mD_pred = data_blob['depthpred'].cuda(gpu) else: mD_pred = data_blob['mdDepth_pred'].cuda(gpu) svname = "{}.png".format(str(val_id).zfill(10)) mD_pred_clipped = torch.clamp_min(mD_pred, min=args.min_depth_pred) outputs = model(image1, image2, mD_pred_clipped, intrinsic, posepred, insmap) predread = outputs[('depth', 2)] depthpred_deepv2d = data_blob['depthpred_deepv2d'].cuda(gpu) sigmoidact = outputs[('residualdepth', 2)] # tensor2disp(1 / mD_pred_clipped, vmax=0.15, viewind=0).save(os.path.join(bts_root, svname)) # tensor2disp(1 / predread, vmax=0.15, viewind=0).save(os.path.join(ours_root, svname)) # tensor2disp(1 / depthpred_deepv2d, vmax=0.15, viewind=0).save(os.path.join(deepv2d_root, svname)) # tensor2rgb(image1, viewind=0).save(os.path.join(rgb_root, svname)) # tensor2grad(sigmoidact, pos_bar=0.1, neg_bar=-0.1, viewind=0).save(os.path.join(residual_root, svname)) fig1 = tensor2rgb(image1, viewind=0) fig1_2 = tensor2rgb(image2, viewind=0) fig2 = tensor2disp(1 / depthpred_deepv2d, vmax=0.15, viewind=0) fig3 = tensor2disp(1 / mD_pred_clipped, vmax=0.15, viewind=0) fig4 = tensor2grad(sigmoidact, pos_bar=0.1, neg_bar=-0.1, viewind=0) fig5 = tensor2disp(1 / predread, vmax=0.15, viewind=0) figs = concat_imgs([fig1, fig1_2, fig2, fig3, fig4, fig5]) figc1 = np.concatenate([np.array(figs[0]), np.array(figs[1])], axis=0) figc2 = np.concatenate([np.array(figs[4]), np.array(figs[2])], axis=0) figc3 = np.concatenate([np.array(figs[3]), np.array(figs[5])], axis=0) imgvls = np.concatenate([figc1, figc2, figc3], axis=1) imgvls = Image.fromarray(imgvls) imgvls.save(os.path.join(vlsroot, svname)) selector = ((depthgt > 0) * (predread > 0) * (depthgt > args.min_depth_eval) * (depthgt < args.max_depth_eval)).float() predread = torch.clamp(predread, min=args.min_depth_eval, max=args.max_depth_eval) depth_gt_flatten = depthgt[selector == 1].cpu().numpy() pred_depth_flatten = predread[selector == 1].cpu().numpy() pred_depth_flatten = np.median(depth_gt_flatten/pred_depth_flatten) * pred_depth_flatten eval_measures_depth_np = compute_errors(gt=depth_gt_flatten, pred=pred_depth_flatten) eval_measures_depth[:9] += torch.tensor(eval_measures_depth_np).cuda(device=gpu) eval_measures_depth[9] += 1 if args.distributed: dist.all_reduce(tensor=eval_measures_depth, op=dist.ReduceOp.SUM, group=group) if args.gpu == 0: eval_measures_depth[0:9] = eval_measures_depth[0:9] / eval_measures_depth[9] eval_measures_depth = eval_measures_depth.cpu().numpy() print('Computing Depth errors for %f eval samples' % (eval_measures_depth[9].item())) print("{:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}".format('silog', 'abs_rel', 'log10', 'rms', 'sq_rel', 'log_rms', 'd1', 'd2', 'd3')) for i in range(8): print('{:7.3f}, '.format(eval_measures_depth[i]), end='') print('{:7.3f}'.format(eval_measures_depth[8])) return {'silog': float(eval_measures_depth[0]), 'abs_rel': float(eval_measures_depth[1]), 'log10': float(eval_measures_depth[2]), 'rms': float(eval_measures_depth[3]), 'sq_rel': float(eval_measures_depth[4]), 'log_rms': float(eval_measures_depth[5]), 'd1': float(eval_measures_depth[6]), 'd2': float(eval_measures_depth[7]), 'd3': float(eval_measures_depth[8]) } else: return None
def validate_RANSAC_odom_relpose(args, eval_loader, banins=False, bangrad=False, samplenum=50000): if bangrad: gradComputer = None else: gradComputer = GradComputer() ssim = SSIM() trialtime = 32 os.makedirs(args.vls_root, exist_ok=True) for val_id, data_blob in enumerate(tqdm(eval_loader)): insmap = data_blob['insmap'] intrinsic = data_blob['intrinsic'] flowpred = data_blob['flowpred_RAFT'] mdDepth_pred = data_blob['mdDepth'] tag = data_blob['tag'][0] flowgt = data_blob['flowgt'] rgb1 = data_blob['img1'] / 255.0 rgb2 = data_blob['img2'] / 255.0 insselector = (insmap == 0).float() stereogt = data_blob['stereogt'] flowgt_valid = data_blob['flowgt_valid'] * insselector * (stereogt > 0).float() self_poses = list() breakflag = False for k in range(trialtime): R, t, scale, _ = inf_pose_flow(flowpred, insmap, mdDepth_pred, intrinsic, int(val_id * 500 + k), gradComputer=gradComputer, banins=banins, samplenum=samplenum) if scale == 0: breakflag = True else: self_pose = np.eye(4) self_pose[0:3, 0:3] = R self_pose[0:3, 3:4] = t * scale self_pose = torch.from_numpy(self_pose).float() self_poses.append(self_pose) if breakflag: continue rpj_loss_rec = list() flow_loss_rec = list() for k in range(trialtime): self_pose = self_poses[k] mdDepth_pred_sqz = mdDepth_pred.squeeze() _, _, h, w = insmap.shape xx, yy = np.meshgrid(range(w), range(h), indexing='xy') xxt = torch.from_numpy(xx) yyt = torch.from_numpy(yy) ones = torch.ones_like(xxt) pts3d = torch.stack([xxt * mdDepth_pred_sqz, yyt * mdDepth_pred_sqz, mdDepth_pred_sqz, ones], dim=2).unsqueeze(-1) intrinsic_sqz = intrinsic.squeeze() projM = intrinsic_sqz @ self_pose @ torch.inverse(intrinsic_sqz) projM_ex = projM.unsqueeze(0).unsqueeze(0).expand([h, w, -1, -1]) prj2d = projM_ex @ pts3d prj2d_x = prj2d[:, :, 0] / prj2d[:, :, 2] prj2d_y = prj2d[:, :, 1] / prj2d[:, :, 2] flowx = prj2d_x.squeeze() - xxt flowy = prj2d_y.squeeze() - yyt flowpred_depth = torch.stack([flowx, flowy], dim=0).unsqueeze(0) # Image.fromarray(flow_to_image(flowpred_depth[0].permute([1, 2, 0]).numpy(), rad_max=300)).show() # Image.fromarray(flow_to_image(flowpred[0].permute([1, 2, 0]).numpy(), rad_max=300)).show() prj2d_x = (prj2d_x / (w - 1) - 0.5) * 2 prj2d_y = (prj2d_y / (h - 1) - 0.5) * 2 prj2dpx = torch.cat([prj2d_x, prj2d_y], dim=-1).unsqueeze(0) rgb1_recon = F.grid_sample(rgb2, prj2dpx, mode='bilinear', align_corners=False) rpj_loss, rpj_loss_imgsize = reprojetion_loss(rgb1, rgb1_recon, ssim, insselector) mdDepth_pred_sqz = stereogt.squeeze() _, _, h, w = insmap.shape xx, yy = np.meshgrid(range(w), range(h), indexing='xy') xxt = torch.from_numpy(xx) yyt = torch.from_numpy(yy) ones = torch.ones_like(xxt) pts3d = torch.stack([xxt * mdDepth_pred_sqz, yyt * mdDepth_pred_sqz, mdDepth_pred_sqz, ones], dim=2).unsqueeze(-1) intrinsic_sqz = intrinsic.squeeze() projM = intrinsic_sqz @ self_pose @ torch.inverse(intrinsic_sqz) projM_ex = projM.unsqueeze(0).unsqueeze(0).expand([h, w, -1, -1]) prj2d = projM_ex @ pts3d prj2d_x = prj2d[:, :, 0] / prj2d[:, :, 2] prj2d_y = prj2d[:, :, 1] / prj2d[:, :, 2] flowx = prj2d_x.squeeze() - xxt flowy = prj2d_y.squeeze() - yyt flowpred_depth = torch.stack([flowx, flowy], dim=0).unsqueeze(0) flow_loss = torch.sum(torch.abs(flowpred_depth - flowgt), dim=1, keepdim=True) flow_loss_imgsize = flow_loss * flowgt_valid flow_loss = torch.sum(flow_loss * flowgt_valid) / torch.sum(flowgt_valid) # tensor2rgb(rgb1_recon, viewind=0).show() rpj_loss_rec.append(rpj_loss.item()) flow_loss_rec.append(flow_loss.item()) if k == 0: fig = plt.figure(figsize=(16, 9)) fig.add_subplot(4, 1, 1) plt.imshow(tensor2rgb(rgb1, viewind=0)) fig.add_subplot(4, 1, 2) plt.imshow(tensor2disp(rpj_loss_imgsize, vmax=0.1)) fig.add_subplot(4, 1, 3) plt.imshow(tensor2disp(flow_loss_imgsize, vmax=3)) fig.add_subplot(4, 1, 4) plt.scatter(flow_loss_rec, rpj_loss_rec) plt.xlabel('flow loss') plt.ylabel('photometric loss') svpath = os.path.join(args.vls_root, tag.split(' ')[0].split('/')[1] + '.png') plt.savefig(svpath, bbox_inches='tight') plt.close()