def warp_ref_image_and_depth_recons(self, inv_depths, ref_image, K, ref_K,
                                        pose, ref_disp):
        """
        Warps a reference image to produce a reconstruction of the original one.

        Parameters
        ----------
        inv_depths : torch.Tensor [B,1,H,W]
            Inverse depth map of the original image
        ref_image : torch.Tensor [B,3,H,W]
            Reference RGB image
        K : torch.Tensor [B,3,3]
            Original camera intrinsics
        ref_K : torch.Tensor [B,3,3]
            Reference camera intrinsics
        pose : Pose
            Original -> Reference camera transformation

        Returns
        -------
        ref_warped : torch.Tensor [B,3,H,W]
            Warped reference image (reconstructing the original one)
        """
        B, _, H, W = ref_image.shape
        device = ref_image.get_device()
        # Generate cameras for all scales
        cams, ref_cams = [], []
        for i in range(self.n):
            _, _, DH, DW = inv_depths[i].shape
            scale_factor = DW / float(W)
            cams.append(Camera(K=K.float()).scaled(scale_factor).to(device))
            ref_cams.append(
                Camera(K=ref_K.float(),
                       Tcw=pose).scaled(scale_factor).to(device))
        # View synthesis
        depths = [inv2depth(inv_depths[i]) for i in range(self.n)]
        prev_depths = [inv2depth(ref_disp[i]) for i in range(self.n)]

        ref_images = match_scales(ref_image, inv_depths, self.n)

        ref_warped = []
        remap_disp = []
        for i in range(self.n):
            warped_ref, depth_remap = view_synthesis_with_depth_recons(
                ref_images[i],
                depths[i],
                ref_cams[i],
                cams[i],
                prev_depths[i],
                padding_mode=self.padding_mode)
            ref_warped.append(warped_ref)

            disp_remap = depth2inv(depth_remap)
            remap_disp.append(disp_remap)

        return ref_warped, remap_disp
Exemple #2
0
 def evaluate_depth(self, batch):
     """Evaluate batch to produce depth metrics."""
     # Get predicted depth
     inv_depths = self.model(batch)['inv_depths']
     depth = inv2depth(inv_depths[0])
     # Post-process predicted depth
     batch['rgb'] = flip_lr(batch['rgb'])
     inv_depths_flipped = self.model(batch)['inv_depths']
     inv_depth_pp = post_process_inv_depth(inv_depths[0],
                                           inv_depths_flipped[0],
                                           method='mean')
     depth_pp = inv2depth(inv_depth_pp)
     batch['rgb'] = flip_lr(batch['rgb'])
     # Calculate predicted metrics
     metrics = OrderedDict()
     if 'depth' in batch:
         for mode in self.metrics_modes:
             metrics[self.metrics_name + mode] = compute_depth_metrics(
                 self.config.model.params,
                 gt=batch['depth'],
                 pred=depth_pp if 'pp' in mode else depth,
                 use_gt_scale='gt' in mode)
     # Return metrics and extra information
     return {'metrics': metrics, 'inv_depth': inv_depth_pp}