コード例 #1
0
    def visualize(self, data):
        ''' Performs a visualization step for the data.

        Args:
            data (dict): data dictionary
        '''
        device = self.device

        batch_size = data['points'].size(0)
        inputs = data.get('inputs', torch.empty(batch_size, 0)).to(device)

        shape = (32, 32, 32)
        p = make_3d_grid([-0.5] * 3, [0.5] * 3, shape).to(device)
        p = p.expand(batch_size, *p.size())

        kwargs = {}
        with torch.no_grad():
            p_r = self.model(p, inputs, sample=self.eval_sample, **kwargs)

        occ_hat = p_r.probs.view(batch_size, *shape)
        voxels_out = (occ_hat >= self.threshold).cpu().numpy()

        for i in trange(batch_size):
            input_img_path = os.path.join(self.vis_dir, '%03d_in.png' % i)
            vis.visualize_data(inputs[i].cpu(), self.input_type,
                               input_img_path)
            vis.visualize_voxels(voxels_out[i],
                                 os.path.join(self.vis_dir, '%03d.png' % i))
コード例 #2
0
    def visualize(self, data):
        """ Performs a visualization step for the data.

    Args:
        data (dict): data dictionary
    """

        batch_size = data["points"].shape[0]
        inputs = data.get("inputs", tf.zeros([batch_size, 0]))

        shape = (32, 32, 32)
        p = make_3d_grid([-0.5] * 3, [0.5] * 3, shape)  # CHECK
        p = tf.broadcast_to(p, [batch_size, *p.shape])

        kwargs = {}
        p_r = self.model(p,
                         inputs,
                         sample=self.eval_sample,
                         training=False,
                         **kwargs)

        occ_hat = tf.reshape(p_r.probs_parameter(), [batch_size, *shape])
        voxels_out = (occ_hat >= self.threshold).numpy()

        for i in trange(batch_size):
            input_img_path = os.path.join(self.vis_dir, "%03d_in.png" % i)
            vis.visualize_data(inputs[i], self.input_type, input_img_path)
            vis.visualize_voxels(voxels_out[i],
                                 os.path.join(self.vis_dir, "%03d.png" % i))
コード例 #3
0
    def visualize(self, data):
        ''' Performs an intermidiate visualization.

        Args:
            data (dict): data dictionary
        '''
        device = self.device

        occ = data.get('voxels').to(device)
        inputs = data.get('inputs').to(device)

        with torch.no_grad():
            occ_logits = self.model(inputs).squeeze(1)

        occ_hat = torch.sigmoid(occ_logits)
        voxels_gt = (occ >= self.threshold).cpu().numpy()
        voxels_out = (occ_hat >= self.threshold).cpu().numpy()

        batch_size = occ.size(0)
        for i in trange(batch_size):
            input_img_path = os.path.join(self.vis_dir, '%03d_in.png' % i)
            vis.visualize_data(inputs[i].cpu(), self.input_type,
                               input_img_path)
            vis.visualize_voxels(voxels_out[i],
                                 os.path.join(self.vis_dir, '%03d.png' % i))
            vis.visualize_voxels(voxels_gt[i],
                                 os.path.join(self.vis_dir, '%03d_gt.png' % i))
コード例 #4
0
    def visualize(self, data, it=0, vis_type='mesh'):
        ''' Visualized the data.

        Args:
            data (dict): data dictionary
            it (int): training iteration
            vis_type (string): visualization type
        '''
        if self.multi_gpu:
            print(
                "Sorry, visualizations currently not implemented when using \
                multi GPU training.")
            return 0

        device = self.device
        inputs = data.get('inputs', torch.empty(1, 0)).to(device)
        batch_size = inputs.shape[0]
        c = self.model.encode_inputs(inputs)
        if vis_type == 'voxel':
            shape = (32, 32, 32)
            p = make_3d_grid([-0.5] * 3, [0.5] * 3, shape).to(device)
            p = p.unsqueeze(0).repeat(batch_size, 1, 1)
            with torch.no_grad():
                p_r = self.model.decode(p, c=c).probs
            voxels_out = (p_r >= self.threshold).cpu().numpy()
            voxels_out = voxels_out.reshape(batch_size, 32, 32, 32)
            for i in range(batch_size):
                out_file = os.path.join(self.vis_dir, '%03d.png' % i)
                vis.visualize_voxels(voxels_out[i], out_file)
        elif vis_type == 'pointcloud':
            p = torch.rand(batch_size, 60000, 3).to(device) - 0.5
            with torch.no_grad():

                occ = self.model.decode(p, c=c).probs
                mask = occ > self.threshold

            for i in range(batch_size):
                pi = p[i][mask[i]].cpu()
                out_file = os.path.join(self.vis_dir, '%03d.png' % i)
                vis.visualize_pointcloud(pi, out_file=out_file)
        elif vis_type == 'mesh':
            try:
                mesh_list = self.generator.generate_meshes(
                    data, return_stats=False)
                for i, mesh in tqdm(enumerate(mesh_list)):
                    if self.overwrite_visualization:
                        ending = ''
                    else:
                        ending = '_%010d' % it
                    mesh_out_file = os.path.join(
                        self.vis_dir, '%03d%s.ply' % (i, ending))
                    mesh.export(mesh_out_file)
            except Exception as e:
                print("Exception occurred during visualization: ", e)
        else:
            print('The visualization type %s is not valid!' % vis_type)
コード例 #5
0
    def visualize(self, data, it=0., epoch_it=0.):
        ''' Performs a visualization step for the data.

        Args:
            data (dict): data dictionary
        '''
        device = self.device

        batch_size = data['points'].size(0)
        inputs = data.get('inputs', torch.empty(batch_size, 0)).to(device)
        angles = data.get('angles').to(device)

        shape = (32, 32, 32)
        p = make_3d_grid([-0.5] * 3, [0.5] * 3, shape).to(device)
        p = p.expand(batch_size, *p.size())

        kwargs = {}
        with torch.no_grad():
            _, _, sgn, _, _ = self.model(p * self.pnet_point_scale,
                                         inputs,
                                         sample=self.eval_sample,
                                         angles=angles,
                                         **kwargs)

        if self.is_sdf:
            if self.is_logits_by_min:
                logits = (sgn.min(1)[0] <= 0).float()
            else:
                raise NotImplementedError
        else:
            if self.is_logits_by_max:
                logits = convert_tsd_range_to_zero_to_one(sgn.max(1)[0])
            elif self.is_logits_by_sign_filter:
                positive = torch.relu(sgn).sum(1)
                negative = torch.relu(-sgn).sum(1)
                logits = torch.where(positive >= negative, positive, -negative)
            else:
                logits = convert_tsd_range_to_zero_to_one(sgn).sum(1)
        occ_hat = logits.view(batch_size, *shape)
        voxels_out = (occ_hat >= self.threshold).cpu().numpy()

        input_images = []
        voxels_images = []
        for i in trange(batch_size):
            input_img_path = os.path.join(self.vis_dir, '%03d_in.png' % i)
            vis.visualize_data(inputs[i].cpu(), self.input_type,
                               input_img_path)
            vis.visualize_voxels(voxels_out[i],
                                 os.path.join(self.vis_dir, '%03d.png' % i))
コード例 #6
0
    def visualize(self, data):
        device = self.device
        shape = (self.num_cells + 1, ) * 3
        inputs = data.get('inputs').to(self.device)
        batch_size = inputs.size(0)

        inputs_norm = self.num_cells * (inputs / 1.2 + 0.5)

        with torch.no_grad():
            offset, topology, occupancy = self.model(inputs_norm)

        occupancy = occupancy.view(batch_size, *shape)
        voxels_out = (occupancy >= 0.5).cpu().numpy()

        for i in trange(batch_size):
            input_img_path = os.path.join(self.vis_dir, '%03d_in.png' % i)
            vis.visualize_data(inputs[i].cpu(), self.input_type,
                               input_img_path)
            vis.visualize_voxels(voxels_out[i],
                                 os.path.join(self.vis_dir, '%03d.png' % i))
コード例 #7
0
    def visualize(self, data):
        ''' Performs a visualization step for the data.

        Args:
            data (dict): data dictionary
        '''
        device = self.device

        batch_size = data['points'].size(0)
        inputs = data.get('inputs').to(device)
        #gt_depth_maps = data.get('inputs.depth').to(device)
        gt_mask = data.get('inputs.mask').to(device).byte()

        shape = (32, 32, 32)
        p = make_3d_grid([-0.5] * 3, [0.5] * 3, shape).to(device)
        p = p.expand(batch_size, *p.size())

        kwargs = {}
        with torch.no_grad():
            pr_depth_maps = self.model.predict_depth_map(inputs)
            background_setting(pr_depth_maps, gt_mask)
            p_r = self.model.forward_halfway(p,
                                             pr_depth_maps,
                                             sample=self.eval_sample,
                                             **kwargs)

        occ_hat = p_r.probs.view(batch_size, *shape)
        voxels_out = (occ_hat >= self.threshold).cpu().numpy()

        for i in trange(batch_size):
            input_img_path = os.path.join(self.vis_dir, '%03d_in.png' % i)
            vis.visualize_data(inputs[i].cpu(), 'img', input_img_path)
            vis.visualize_voxels(voxels_out[i],
                                 os.path.join(self.vis_dir, '%03d.png' % i))

            depth_map_path = os.path.join(self.vis_dir,
                                          '%03d_pr_depth.png' % i)
            depth_map = pr_depth_maps[i].cpu()
            depth_map = depth_to_L(depth_map, gt_mask[i].cpu())
            vis.visualize_data(depth_map, 'img', depth_map_path)
コード例 #8
0
    def visualize(self, data):
        ''' Performs a visualization step for the data.

        Args:
            data (dict): data dictionary
        '''
        device = self.device

        batch_size = data['points'].size(0)

        shape = (32, 32, 32)
        p = make_3d_grid([-0.5] * 3, [0.5] * 3, shape).to(device)
        p = p.expand(batch_size, *p.size())

        encoder_inputs, raw_data = compose_inputs(
            data,
            mode='val',
            device=self.device,
            input_type=self.input_type,
            use_gt_depth_map=self.use_gt_depth_map,
            depth_map_mix=self.depth_map_mix,
            with_img=self.with_img,
            depth_pointcloud_transfer=self.depth_pointcloud_transfer,
            local=self.local)

        kwargs = {}
        with torch.no_grad():
            p_r = self.model.forward_halfway(p,
                                             encoder_inputs,
                                             sample=self.eval_sample,
                                             **kwargs)

        occ_hat = p_r.probs.view(batch_size, *shape)
        voxels_out = (occ_hat >= self.threshold).cpu().numpy()

        # visualize
        if self.local:
            encoder_inputs = encoder_inputs[None]

        if self.input_type == 'depth_pred':
            gt_mask = raw_data['mask']
            if self.with_img:
                encoder_inputs = encoder_inputs['depth']

            for i in trange(batch_size):
                if self.use_gt_depth_map:
                    input_img_path = os.path.join(self.vis_dir,
                                                  '%03d_in_gt.png' % i)
                else:
                    input_img_path = os.path.join(self.vis_dir,
                                                  '%03d_in_pr.png' % i)

                depth_map = encoder_inputs[i].cpu()
                depth_map = depth_to_L(depth_map, gt_mask[i].cpu())
                vis.visualize_data(depth_map, 'img', input_img_path)
                vis.visualize_voxels(
                    voxels_out[i], os.path.join(self.vis_dir, '%03d.png' % i))
        elif self.input_type == 'depth_pointcloud':
            for i in trange(batch_size):
                input_pointcloud_file = os.path.join(
                    self.vis_dir, '%03d_depth_pointcloud.png' % i)

                pc = encoder_inputs[i].cpu()
                if self.depth_pointcloud_transfer in ('view',
                                                      'view_scale_model'):
                    vis.visualize_pointcloud(pc,
                                             out_file=input_pointcloud_file,
                                             elev=15,
                                             azim=180)
                else:
                    vis.visualize_pointcloud(pc,
                                             out_file=input_pointcloud_file)
                vis.visualize_voxels(
                    voxels_out[i], os.path.join(self.vis_dir, '%03d.png' % i))