コード例 #1
0
    def visualize(self, data):
        ''' Performs a visualization step for the data.

        Args:
            data (dict): data dictionary
        '''
        device = self.device

        batch_size = data['points'].size(0)
        inputs = data.get('inputs', torch.empty(batch_size, 0)).to(device)

        shape = (32, 32, 32)
        p = make_3d_grid([-0.5] * 3, [0.5] * 3, shape).to(device)
        p = p.expand(batch_size, *p.size())

        kwargs = {}
        with torch.no_grad():
            p_r = self.model(p, inputs, sample=self.eval_sample, **kwargs)

        occ_hat = p_r.probs.view(batch_size, *shape)
        voxels_out = (occ_hat >= self.threshold).cpu().numpy()

        for i in trange(batch_size):
            input_img_path = os.path.join(self.vis_dir, '%03d_in.png' % i)
            vis.visualize_data(inputs[i].cpu(), self.input_type,
                               input_img_path)
            vis.visualize_voxels(voxels_out[i],
                                 os.path.join(self.vis_dir, '%03d.png' % i))
コード例 #2
0
    def visualize(self, data):
        """ Performs a visualization step for the data.

    Args:
        data (dict): data dictionary
    """

        batch_size = data["points"].shape[0]
        inputs = data.get("inputs", tf.zeros([batch_size, 0]))

        shape = (32, 32, 32)
        p = make_3d_grid([-0.5] * 3, [0.5] * 3, shape)  # CHECK
        p = tf.broadcast_to(p, [batch_size, *p.shape])

        kwargs = {}
        p_r = self.model(p,
                         inputs,
                         sample=self.eval_sample,
                         training=False,
                         **kwargs)

        occ_hat = tf.reshape(p_r.probs_parameter(), [batch_size, *shape])
        voxels_out = (occ_hat >= self.threshold).numpy()

        for i in trange(batch_size):
            input_img_path = os.path.join(self.vis_dir, "%03d_in.png" % i)
            vis.visualize_data(inputs[i], self.input_type, input_img_path)
            vis.visualize_voxels(voxels_out[i],
                                 os.path.join(self.vis_dir, "%03d.png" % i))
コード例 #3
0
    def visualize(self, data):
        ''' Performs an intermidiate visualization.

        Args:
            data (dict): data dictionary
        '''
        device = self.device

        occ = data.get('voxels').to(device)
        inputs = data.get('inputs').to(device)

        with torch.no_grad():
            occ_logits = self.model(inputs).squeeze(1)

        occ_hat = torch.sigmoid(occ_logits)
        voxels_gt = (occ >= self.threshold).cpu().numpy()
        voxels_out = (occ_hat >= self.threshold).cpu().numpy()

        batch_size = occ.size(0)
        for i in trange(batch_size):
            input_img_path = os.path.join(self.vis_dir, '%03d_in.png' % i)
            vis.visualize_data(inputs[i].cpu(), self.input_type,
                               input_img_path)
            vis.visualize_voxels(voxels_out[i],
                                 os.path.join(self.vis_dir, '%03d.png' % i))
            vis.visualize_voxels(voxels_gt[i],
                                 os.path.join(self.vis_dir, '%03d_gt.png' % i))
コード例 #4
0
    def visualize(self, data):
        r''' Visualizes the current output data of the model.

        The point clouds for respective input data is plotted.

        Args:
            data (tensor): input data
        '''
        device = self.device

        points_gt = data.get('pointcloud').to(device)
        inputs = data.get('inputs').to(device)

        with torch.no_grad():
            points_out = self.model(inputs)

        points_out = points_out.cpu().numpy()
        points_gt = points_gt.cpu().numpy()

        batch_size = inputs.size(0)
        for i in trange(batch_size):
            input_img_path = os.path.join(self.vis_dir, '%03d_in.png' % i)
            vis.visualize_data(inputs[i].cpu(), self.input_type,
                               input_img_path)
            out_file = os.path.join(self.vis_dir, '%03d.png' % i)
            out_file_gt = os.path.join(self.vis_dir, '%03d_gt.png' % i)
            vis.visualize_pointcloud(points_out[i], out_file=out_file)
            vis.visualize_pointcloud(points_gt[i], out_file=out_file_gt)
コード例 #5
0
    def visualize(self, data, it=0., epoch_it=0.):
        ''' Performs a visualization step for the data.

        Args:
            data (dict): data dictionary
        '''
        device = self.device

        batch_size = data['points'].size(0)
        inputs = data.get('inputs', torch.empty(batch_size, 0)).to(device)
        angles = data.get('angles').to(device)

        shape = (32, 32, 32)
        p = make_3d_grid([-0.5] * 3, [0.5] * 3, shape).to(device)
        p = p.expand(batch_size, *p.size())

        kwargs = {}
        with torch.no_grad():
            _, _, sgn, _, _ = self.model(p * self.pnet_point_scale,
                                         inputs,
                                         sample=self.eval_sample,
                                         angles=angles,
                                         **kwargs)

        if self.is_sdf:
            if self.is_logits_by_min:
                logits = (sgn.min(1)[0] <= 0).float()
            else:
                raise NotImplementedError
        else:
            if self.is_logits_by_max:
                logits = convert_tsd_range_to_zero_to_one(sgn.max(1)[0])
            elif self.is_logits_by_sign_filter:
                positive = torch.relu(sgn).sum(1)
                negative = torch.relu(-sgn).sum(1)
                logits = torch.where(positive >= negative, positive, -negative)
            else:
                logits = convert_tsd_range_to_zero_to_one(sgn).sum(1)
        occ_hat = logits.view(batch_size, *shape)
        voxels_out = (occ_hat >= self.threshold).cpu().numpy()

        input_images = []
        voxels_images = []
        for i in trange(batch_size):
            input_img_path = os.path.join(self.vis_dir, '%03d_in.png' % i)
            vis.visualize_data(inputs[i].cpu(), self.input_type,
                               input_img_path)
            vis.visualize_voxels(voxels_out[i],
                                 os.path.join(self.vis_dir, '%03d.png' % i))
コード例 #6
0
    def visualize(self, data):
        device = self.device
        shape = (self.num_cells + 1, ) * 3
        inputs = data.get('inputs').to(self.device)
        batch_size = inputs.size(0)

        inputs_norm = self.num_cells * (inputs / 1.2 + 0.5)

        with torch.no_grad():
            offset, topology, occupancy = self.model(inputs_norm)

        occupancy = occupancy.view(batch_size, *shape)
        voxels_out = (occupancy >= 0.5).cpu().numpy()

        for i in trange(batch_size):
            input_img_path = os.path.join(self.vis_dir, '%03d_in.png' % i)
            vis.visualize_data(inputs[i].cpu(), self.input_type,
                               input_img_path)
            vis.visualize_voxels(voxels_out[i],
                                 os.path.join(self.vis_dir, '%03d.png' % i))
コード例 #7
0
    def visualize(self, data):
        ''' Performs a visualization step for the data.

        Args:
            data (dict): data dictionary
        '''
        device = self.device
        inputs = data.get('inputs').to(device)
        gt_depth_maps = data.get('inputs.depth')
        gt_masks = data.get('inputs.mask').byte()
        batch_size = gt_depth_maps.size(0)

        kwargs = {}
        self.model.eval()
        with torch.no_grad():
            pr_depth_maps = self.model.predict_depth_map(inputs).cpu()

        for i in trange(batch_size):
            gt_depth_map = gt_depth_maps[i]
            pr_depth_map = pr_depth_maps[i]
            gt_mask = gt_masks[i]

            pr_depth_map = depth_to_L(pr_depth_map, gt_mask)
            gt_depth_map = depth_to_L(gt_depth_map, gt_mask)

            input_img_path = os.path.join(self.vis_dir, '%03d_in.png' % i)
            input_depth_path = os.path.join(self.vis_dir,
                                            '%03d_in_depth.png' % i)
            pr_depth_path = os.path.join(self.vis_dir, '%03d_pr_depth.png' % i)
            vis.visualize_data(inputs[i].cpu(), 'img', input_img_path)
            vis.visualize_data(gt_depth_map, 'img', input_depth_path)
            vis.visualize_data(pr_depth_map, 'img', pr_depth_path)
コード例 #8
0
    def visualize(self, data):
        ''' Performs a visualization step for the data.

        Args:
            data (dict): data dictionary
        '''
        device = self.device

        batch_size = data['points'].size(0)
        inputs = data.get('inputs').to(device)
        #gt_depth_maps = data.get('inputs.depth').to(device)
        gt_mask = data.get('inputs.mask').to(device).byte()

        shape = (32, 32, 32)
        p = make_3d_grid([-0.5] * 3, [0.5] * 3, shape).to(device)
        p = p.expand(batch_size, *p.size())

        kwargs = {}
        with torch.no_grad():
            pr_depth_maps = self.model.predict_depth_map(inputs)
            background_setting(pr_depth_maps, gt_mask)
            p_r = self.model.forward_halfway(p,
                                             pr_depth_maps,
                                             sample=self.eval_sample,
                                             **kwargs)

        occ_hat = p_r.probs.view(batch_size, *shape)
        voxels_out = (occ_hat >= self.threshold).cpu().numpy()

        for i in trange(batch_size):
            input_img_path = os.path.join(self.vis_dir, '%03d_in.png' % i)
            vis.visualize_data(inputs[i].cpu(), 'img', input_img_path)
            vis.visualize_voxels(voxels_out[i],
                                 os.path.join(self.vis_dir, '%03d.png' % i))

            depth_map_path = os.path.join(self.vis_dir,
                                          '%03d_pr_depth.png' % i)
            depth_map = pr_depth_maps[i].cpu()
            depth_map = depth_to_L(depth_map, gt_mask[i].cpu())
            vis.visualize_data(depth_map, 'img', depth_map_path)
コード例 #9
0
    if generate_pointcloud:
        t0 = time.time()
        pointcloud = generator.generate_pointcloud(data)
        time_dict["pcl"] = time.time() - t0
        pointcloud_out_file = os.path.join(pointcloud_dir,
                                           "%s.ply" % modelname)
        export_pointcloud(pointcloud, pointcloud_out_file)
        out_file_dict["pointcloud"] = pointcloud_out_file

    if cfg["generation"]["copy_input"]:
        # Save inputs
        if input_type == "img":
            inputs_path = os.path.join(in_dir, "%s.jpg" % modelname)
            inputs = tf.squeeze(data["inputs"], axis=0)
            visualize_data(inputs, "img", inputs_path)
            out_file_dict["in"] = inputs_path
        elif input_type == "voxels":
            inputs_path = os.path.join(in_dir, "%s.off" % modelname)
            inputs = tf.squeeze(data["inputs"], axis=0)
            voxel_mesh = VoxelGrid(inputs).to_mesh()
            voxel_mesh.export(inputs_path)
            out_file_dict["in"] = inputs_path
        elif input_type == "pointcloud":
            inputs_path = os.path.join(in_dir, "%s.ply" % modelname)
            inputs = tf.squeeze(data["inputs"], axis=0).numpy()
            export_pointcloud(inputs, inputs_path, False)
            out_file_dict["in"] = inputs_path

    # Copy to visualization directory for first vis_n_output samples
    c_it = model_counter[category_id]
コード例 #10
0
    if generate_pointcloud:
        t0 = time.time()
        pointcloud = generator.generate_pointcloud(data)
        time_dict['pcl'] = time.time() - t0
        pointcloud_out_file = os.path.join(pointcloud_dir,
                                           '%s.ply' % modelname)
        export_pointcloud(pointcloud, pointcloud_out_file)
        out_file_dict['pointcloud'] = pointcloud_out_file

    if cfg['generation']['copy_input']:
        # Save inputs
        if input_type == 'img':
            inputs_path = os.path.join(in_dir, '%s.jpg' % modelname)
            inputs = data['inputs'].squeeze(0).cpu()
            visualize_data(inputs, 'img', inputs_path)
            out_file_dict['in'] = inputs_path
        elif input_type == 'voxels':
            inputs_path = os.path.join(in_dir, '%s.off' % modelname)
            inputs = data['inputs'].squeeze(0).cpu()
            voxel_mesh = VoxelGrid(inputs).to_mesh()
            voxel_mesh.export(inputs_path)
            out_file_dict['in'] = inputs_path
        elif input_type == 'pointcloud':
            inputs_path = os.path.join(in_dir, '%s.ply' % modelname)
            inputs = data['inputs'].squeeze(0).cpu().numpy()
            export_pointcloud(inputs, inputs_path, False)
            out_file_dict['in'] = inputs_path

    # Copy to visualization directory for first vis_n_output samples
    c_it = model_counter[category_id]
コード例 #11
0
    def visualize(self, data):
        ''' Performs a visualization step for the data.

        Args:
            data (dict): data dictionary
        '''
        device = self.device

        batch_size = data['points'].size(0)

        shape = (32, 32, 32)
        p = make_3d_grid([-0.5] * 3, [0.5] * 3, shape).to(device)
        p = p.expand(batch_size, *p.size())

        encoder_inputs, raw_data = compose_inputs(
            data,
            mode='val',
            device=self.device,
            input_type=self.input_type,
            use_gt_depth_map=self.use_gt_depth_map,
            depth_map_mix=self.depth_map_mix,
            with_img=self.with_img,
            depth_pointcloud_transfer=self.depth_pointcloud_transfer,
            local=self.local)

        kwargs = {}
        with torch.no_grad():
            p_r = self.model.forward_halfway(p,
                                             encoder_inputs,
                                             sample=self.eval_sample,
                                             **kwargs)

        occ_hat = p_r.probs.view(batch_size, *shape)
        voxels_out = (occ_hat >= self.threshold).cpu().numpy()

        # visualize
        if self.local:
            encoder_inputs = encoder_inputs[None]

        if self.input_type == 'depth_pred':
            gt_mask = raw_data['mask']
            if self.with_img:
                encoder_inputs = encoder_inputs['depth']

            for i in trange(batch_size):
                if self.use_gt_depth_map:
                    input_img_path = os.path.join(self.vis_dir,
                                                  '%03d_in_gt.png' % i)
                else:
                    input_img_path = os.path.join(self.vis_dir,
                                                  '%03d_in_pr.png' % i)

                depth_map = encoder_inputs[i].cpu()
                depth_map = depth_to_L(depth_map, gt_mask[i].cpu())
                vis.visualize_data(depth_map, 'img', input_img_path)
                vis.visualize_voxels(
                    voxels_out[i], os.path.join(self.vis_dir, '%03d.png' % i))
        elif self.input_type == 'depth_pointcloud':
            for i in trange(batch_size):
                input_pointcloud_file = os.path.join(
                    self.vis_dir, '%03d_depth_pointcloud.png' % i)

                pc = encoder_inputs[i].cpu()
                if self.depth_pointcloud_transfer in ('view',
                                                      'view_scale_model'):
                    vis.visualize_pointcloud(pc,
                                             out_file=input_pointcloud_file,
                                             elev=15,
                                             azim=180)
                else:
                    vis.visualize_pointcloud(pc,
                                             out_file=input_pointcloud_file)
                vis.visualize_voxels(
                    voxels_out[i], os.path.join(self.vis_dir, '%03d.png' % i))
コード例 #12
0
            os.mkdir(
                os.path.join(out_dir, cur_model_info['category'],
                             cur_model_info['model']))

        if not os.path.exists(
                os.path.join(out_dir, cur_model_info['category'],
                             cur_model_info['model'], pred_path)):
            os.mkdir(
                os.path.join(out_dir, cur_model_info['category'],
                             cur_model_info['model'], pred_path))

        # save png
        png_path = os.path.join(out_dir, cur_model_info['category'],
                                cur_model_info['model'], pred_path,
                                '%.2d_depth.png' % cur_viewid)
        visualize_data(cur_depth_map, 'img', png_path)
        # record range
        depth_range_path = os.path.join(out_dir, cur_model_info['category'],
                                        cur_model_info['model'], pred_path,
                                        'depth_range.txt')

        if cur_viewid == 0:
            if os.path.exists(depth_range_path):
                os.remove(depth_range_path)
        with open(depth_range_path, mode='a') as f:
            print(depth_min.item(), depth_max.item(), 1.0, file=f)

    t1 = time.time()
    pbar.update(1)
    #print("\r finished: %d / %d in %d sec" % (it, batch_count, t1 - t0), flush=True)
pbar.close()