def visualize(self, data): r''' Visualizes the current output data of the model. The point clouds for respective input data is plotted. Args: data (tensor): input data ''' device = self.device points_gt = data.get('pointcloud').to(device) inputs = data.get('inputs').to(device) with torch.no_grad(): points_out = self.model(inputs) points_out = points_out.cpu().numpy() points_gt = points_gt.cpu().numpy() batch_size = inputs.size(0) for i in trange(batch_size): input_img_path = os.path.join(self.vis_dir, '%03d_in.png' % i) vis.visualize_data(inputs[i].cpu(), self.input_type, input_img_path) out_file = os.path.join(self.vis_dir, '%03d.png' % i) out_file_gt = os.path.join(self.vis_dir, '%03d_gt.png' % i) vis.visualize_pointcloud(points_out[i], out_file=out_file) vis.visualize_pointcloud(points_gt[i], out_file=out_file_gt)
def visualize(self, data): r''' Visualises the GT point cloud and predicted vertices (as a point cloud). Arguments: data (tensor): input data ''' points_gt = data.get('pointcloud').to(self.device) img = data.get('inputs').to(self.device) camera_args = common.get_camera_args(data, 'pointcloud.loc', 'pointcloud.scale', device=self.device) world_mat, camera_mat = camera_args['Rt'], camera_args['K'] if not os.path.isdir(self.vis_dir): os.mkdir(self.vis_dir) with torch.no_grad(): outputs1, outputs2 = self.model(img, camera_mat) pred_vertices_1, pred_vertices_2, pred_vertices_3 = outputs1 points_out = common.transform_points_back(pred_vertices_3, world_mat) points_out = points_out.cpu().numpy() input_img_path = os.path.join(self.vis_dir, 'input.png') save_image(img.cpu(), input_img_path, nrow=4) points_gt = points_gt.cpu().numpy() batch_size = img.size(0) for i in range(batch_size): out_file = os.path.join(self.vis_dir, '%03d.png' % i) out_file_gt = os.path.join(self.vis_dir, '%03d_gt.png' % i) vis.visualize_pointcloud(points_out[i], out_file=out_file) vis.visualize_pointcloud(points_gt[i], out_file=out_file_gt)
def visualize(self, data, it=0, vis_type='mesh'): ''' Visualized the data. Args: data (dict): data dictionary it (int): training iteration vis_type (string): visualization type ''' if self.multi_gpu: print( "Sorry, visualizations currently not implemented when using \ multi GPU training.") return 0 device = self.device inputs = data.get('inputs', torch.empty(1, 0)).to(device) batch_size = inputs.shape[0] c = self.model.encode_inputs(inputs) if vis_type == 'voxel': shape = (32, 32, 32) p = make_3d_grid([-0.5] * 3, [0.5] * 3, shape).to(device) p = p.unsqueeze(0).repeat(batch_size, 1, 1) with torch.no_grad(): p_r = self.model.decode(p, c=c).probs voxels_out = (p_r >= self.threshold).cpu().numpy() voxels_out = voxels_out.reshape(batch_size, 32, 32, 32) for i in range(batch_size): out_file = os.path.join(self.vis_dir, '%03d.png' % i) vis.visualize_voxels(voxels_out[i], out_file) elif vis_type == 'pointcloud': p = torch.rand(batch_size, 60000, 3).to(device) - 0.5 with torch.no_grad(): occ = self.model.decode(p, c=c).probs mask = occ > self.threshold for i in range(batch_size): pi = p[i][mask[i]].cpu() out_file = os.path.join(self.vis_dir, '%03d.png' % i) vis.visualize_pointcloud(pi, out_file=out_file) elif vis_type == 'mesh': try: mesh_list = self.generator.generate_meshes( data, return_stats=False) for i, mesh in tqdm(enumerate(mesh_list)): if self.overwrite_visualization: ending = '' else: ending = '_%010d' % it mesh_out_file = os.path.join( self.vis_dir, '%03d%s.ply' % (i, ending)) mesh.export(mesh_out_file) except Exception as e: print("Exception occurred during visualization: ", e) else: print('The visualization type %s is not valid!' % vis_type)
def test(): model_class = '02691156' model_id = '10155655850468db78d106ce0a280f87' if depth_pred: depth_foldername = 'depth_pred' else: depth_foldername = 'depth' depth_folder = os.path.join(DEPTH_ROOT, model_class, model_id, depth_foldername) mask_folder = os.path.join(MASK_ROOT, model_class, model_id, 'mask') output_folder = os.path.join(TEST_ROOT) if not os.path.exists(output_folder): os.mkdir(output_folder) depth_range_file = os.path.join(depth_folder, 'depth_range.txt') worker = DepthToPCNp() with open(depth_range_file, 'r') as f: for i in range(N_VIEWS): depth_range = f.readline().split(' ') depth_min = float(depth_range[0]) depth_max = float(depth_range[1]) depth_unit = float(depth_range[2]) depth_file = os.path.join(depth_folder, '%.2d_depth.png' % i) mask_file = os.path.join(mask_folder, '%.2d_mask.png' % i) depth_img = Image.open(depth_file).convert('L') depth_img.save(os.path.join(output_folder, '%.2d_depth.png' % i)) mask_img = Image.open(mask_file) pts = worker.work(depth_img, mask_img, depth_min, depth_max, n=N, unit=depth_unit) output_file = os.path.join(output_folder, '%.2d_pointcloud.npz' % i) np.savez(output_file, pointcloud=pts) output_file = os.path.join(output_folder, '%.2d_pc.png' % i) visualize_pointcloud(pts, out_file=output_file, show=(i % 4 == 0))
def visualize(self, data): ''' Performs a visualization step for the data. Args: data (dict): data dictionary ''' device = self.device encoder_inputs, raw_data = compose_inputs( data, mode='train', device=self.device, input_type=self.input_type, depth_pointcloud_transfer=self.depth_pointcloud_transfer, ) world_mat = None if (self.model.encoder_world_mat is not None) \ or self.gt_pointcloud_transfer in ('view', 'view_scale_model'): if 'world_mat' in raw_data: world_mat = raw_data['world_mat'] else: world_mat = get_world_mat(data, device=device) gt_pc = compose_pointcloud(data, device, self.gt_pointcloud_transfer, world_mat=world_mat) batch_size = gt_pc.size(0) self.model.eval() with torch.no_grad(): if self.model.encoder_world_mat is not None: out = self.model(encoder_inputs, world_mat=world_mat) else: out = self.model(encoder_inputs) if isinstance(out, tuple): out, _ = out for i in trange(batch_size): pc = gt_pc[i].cpu() vis.visualize_pointcloud(pc, out_file=os.path.join( self.vis_dir, '%03d_gt_pc.png' % i)) pc = out[i].cpu() vis.visualize_pointcloud(pc, out_file=os.path.join( self.vis_dir, '%03d_pr_pc.png' % i)) pc = encoder_inputs[i].cpu() vis.visualize_pointcloud(pc, out_file=os.path.join( self.vis_dir, '%03d_input_half_pc.png' % i))
pointcloud_out_file = os.path.join( pointcloud_dir, '%s.ply' % i) export_pointcloud(pointcloud, pointcloud_out_file) out_file_dict['pointcloud'] = pointcloud_out_file if cfg['generation']['copy_input']: # Save inputs if input_type == 'img': for i in range(len(data['inputs'])): inputs_path = os.path.join(in_dir, '%s.jpg' % i) inputs = data['inputs'][i].squeeze(0).cpu() visualize_data(inputs, 'img', inputs_path) out_file_dict['in'] = inputs_path out_file = os.path.join(gt_dir, 'gt.png') pt = data['pointcloud'].squeeze(0).cpu().numpy() vis.visualize_pointcloud(pt, out_file=out_file) elif input_type == 'voxels': inputs_path = os.path.join(in_dir, '%s.off' % modelname) inputs = data['inputs'].squeeze(0).cpu() voxel_mesh = VoxelGrid(inputs).to_mesh() voxel_mesh.export(inputs_path) out_file_dict['in'] = inputs_path elif input_type == 'pointcloud': inputs_path = os.path.join(in_dir, '%s.ply' % modelname) inputs = data['inputs'].squeeze(0).cpu().numpy() export_pointcloud(inputs, inputs_path, False) out_file_dict['in'] = inputs_path # Copy to visualization directory for first vis_n_output samples c_it = model_counter[category_id] if c_it < vis_n_outputs:
def visualize(self, data): ''' Performs a visualization step for the data. Args: data (dict): data dictionary ''' device = self.device batch_size = data['points'].size(0) shape = (32, 32, 32) p = make_3d_grid([-0.5] * 3, [0.5] * 3, shape).to(device) p = p.expand(batch_size, *p.size()) encoder_inputs, raw_data = compose_inputs( data, mode='val', device=self.device, input_type=self.input_type, use_gt_depth_map=self.use_gt_depth_map, depth_map_mix=self.depth_map_mix, with_img=self.with_img, depth_pointcloud_transfer=self.depth_pointcloud_transfer, local=self.local) kwargs = {} with torch.no_grad(): p_r = self.model.forward_halfway(p, encoder_inputs, sample=self.eval_sample, **kwargs) occ_hat = p_r.probs.view(batch_size, *shape) voxels_out = (occ_hat >= self.threshold).cpu().numpy() # visualize if self.local: encoder_inputs = encoder_inputs[None] if self.input_type == 'depth_pred': gt_mask = raw_data['mask'] if self.with_img: encoder_inputs = encoder_inputs['depth'] for i in trange(batch_size): if self.use_gt_depth_map: input_img_path = os.path.join(self.vis_dir, '%03d_in_gt.png' % i) else: input_img_path = os.path.join(self.vis_dir, '%03d_in_pr.png' % i) depth_map = encoder_inputs[i].cpu() depth_map = depth_to_L(depth_map, gt_mask[i].cpu()) vis.visualize_data(depth_map, 'img', input_img_path) vis.visualize_voxels( voxels_out[i], os.path.join(self.vis_dir, '%03d.png' % i)) elif self.input_type == 'depth_pointcloud': for i in trange(batch_size): input_pointcloud_file = os.path.join( self.vis_dir, '%03d_depth_pointcloud.png' % i) pc = encoder_inputs[i].cpu() if self.depth_pointcloud_transfer in ('view', 'view_scale_model'): vis.visualize_pointcloud(pc, out_file=input_pointcloud_file, elev=15, azim=180) else: vis.visualize_pointcloud(pc, out_file=input_pointcloud_file) vis.visualize_voxels( voxels_out[i], os.path.join(self.vis_dir, '%03d.png' % i))