Exemplo n.º 1
0
    def warp_ref_image_spatial(self, inv_depths, ref_image, K, ref_K,
                               extrinsics_1, extrinsics_2):
        """
        Warps a reference image to produce a reconstruction of the original one (spatial-wise).

        Parameters
        ----------
        inv_depths : torch.Tensor [6,1,H,W]
            Inverse depth map of the original image
        ref_image : torch.Tensor [6,3,H,W]
            Reference RGB image
        K : torch.Tensor [B,3,3]
            Original camera intrinsics
        ref_K : torch.Tensor [B,3,3]
            Reference camera intrinsics
        extrinsics_1: torch.Tensor [B,4,4]
            target image extrinsics
        extrinsics_2: torch.Tensor [B,4,4]
            context image extrinsics

        Returns
        -------
        ref_warped : torch.Tensor [B,3,H,W]
            Warped reference image (reconstructing the original one)
        valid_points_mask :
            valid points mask
        """
        B, _, H, W = ref_image.shape
        device = ref_image.get_device()
        # Generate cameras for all scales
        cams, ref_cams = [], []
        for i in range(self.n):
            _, _, DH, DW = inv_depths[i].shape
            scale_factor = DW / float(W)
            cams.append(
                Camera(K=K.float(),
                       Tcw=extrinsics_1).scaled(scale_factor).to(device))
            ref_cams.append(
                Camera(K=ref_K.float(),
                       Tcw=extrinsics_2).scaled(scale_factor).to(device))
        # View synthesis
        depths = [inv2depth(inv_depths[i]) for i in range(self.n)]
        ref_images = match_scales(ref_image, inv_depths, self.n)
        ref_warped = []
        ref_coords = []
        for i in range(self.n):
            w, c = view_synthesis(ref_images[i],
                                  depths[i],
                                  ref_cams[i],
                                  cams[i],
                                  padding_mode=self.padding_mode)
            ref_warped.append(w)
            ref_coords.append(c)
        # calculate valid_points_mask
        valid_points_masks = [
            ref_coords[i].abs().max(dim=-1)[0] <= 1 for i in range(self.n)
        ]
        return ref_warped, valid_points_masks
    def warp_ref_image(self, inv_depths, ref_image, K, k, p, ref_K, ref_k,
                       ref_p, pose):
        """
        Warps a reference image to produce a reconstruction of the original one.

        Parameters
        ----------
        inv_depths : torch.Tensor [B,1,H,W]
            Inverse depth map of the original image
        ref_image : torch.Tensor [B,3,H,W]
            Reference RGB image
        K : torch.Tensor [B,3,3]
            Original camera intrinsics
        ref_K : torch.Tensor [B,3,3]
            Reference camera intrinsics
        pose : Pose
            Original -> Reference camera transformation

        Returns
        -------
        ref_warped : torch.Tensor [B,3,H,W]
            Warped reference image (reconstructing the original one)
        """
        B, _, H, W = ref_image.shape
        device = ref_image.get_device()
        # Generate cameras for all scales
        cams, ref_cams = [], []
        for i in range(self.n):
            _, _, DH, DW = inv_depths[i].shape
            scale_factor = DW / float(W)
            cams.append(
                CameraDistorted(K=K.float(),
                                k1=k[:, 0],
                                k2=k[:, 1],
                                k3=k[:, 2],
                                p1=p[:, 0],
                                p2=p[:, 1]).scaled(scale_factor).to(device))
            ref_cams.append(
                CameraDistorted(K=ref_K.float(),
                                k1=ref_k[:, 0],
                                k2=ref_k[:, 1],
                                k3=ref_k[:, 2],
                                p1=ref_p[:, 0],
                                p2=ref_p[:, 1],
                                Tcw=pose).scaled(scale_factor).to(device))
        # View synthesis
        depths = [inv2depth(inv_depths[i]) for i in range(self.n)]
        ref_images = match_scales(ref_image, inv_depths, self.n)
        ref_warped = [
            view_synthesis(ref_images[i],
                           depths[i],
                           ref_cams[i],
                           cams[i],
                           padding_mode=self.padding_mode)
            for i in range(self.n)
        ]
        # Return warped reference image
        return ref_warped
    def warp_ref_image(self, inv_depths, ref_image, K, ref_K, pose):
        """
        Warps a reference image to produce a reconstruction of the original one.

        Parameters
        ----------
        inv_depths : torch.Tensor [B,1,H,W]
            Inverse depth map of the original image
        ref_image : torch.Tensor [B,3,H,W]
            Reference RGB image
        K : torch.Tensor [B,3,3]
            Original camera intrinsics
        ref_K : torch.Tensor [B,3,3]
            Reference camera intrinsics
        pose : Pose
            Original -> Reference camera transformation

        Returns
        -------
        ref_warped : torch.Tensor [B,3,H,W]
            Warped reference image (reconstructing the original one)
        """
        B, _, H, W = ref_image.shape
        #print('Taille image référence')
        #print(ref_image.shape)
        #print('Warping des images , tailles :')
        device = torch.device('cpu')#ref_image.get_device()
        # Generate cameras for all scales
        cams, ref_cams = [], []
        for i in range(self.n):
            _, _, DH, DW = inv_depths[i].shape
            #print(inv_depths[i].shape)
            scale_factor = DW / float(W)
            #print(scale_factor)
            cams.append(Camera(K=K.float()).scaled(scale_factor).to(device))
            ref_cams.append(Camera(K=ref_K.float(), Tcw=pose).scaled(scale_factor).to(device))
            #print(Camera(K=K.float()).scaled(scale_factor).K)
            #print(Camera(K=ref_K.float()).scaled(scale_factor).K)
        # View synthesis
        depths = [inv2depth(inv_depths[i]) for i in range(self.n)]
        ref_images = match_scales(ref_image, inv_depths, self.n)
        ref_warped = [view_synthesis(
            ref_images[i], depths[i], ref_cams[i], cams[i],
            padding_mode=self.padding_mode) for i in range(self.n)]
        # Return warped reference image
        return ref_warped
def warp_inv_depth(inv_depths, K, ref_K, pose):
    """
        Warps a reference image to produce a reconstruction of the original one.

        Parameters
        ----------
        inv_depths : list of torch.Tensor [B,1,H,W]
            Predicted depth maps for the original image, in all scales
            Reference RGB image
        K : torch.Tensor [B,3,3]
            Original camera intrinsics
        ref_K : torch.Tensor [B,3,3]
            Reference camera intrinsics
        pose : Pose
            Original -> Reference camera transformation

        Returns
        -------
        ref_warped : torch.Tensor [B,3,H,W]
            Warped reference image (reconstructing the original one)
        """
    B, _, H, W = inv_depths[0].shape  # full scale
    device = inv_depths[0].get_device()
    # Generate cameras for all scales
    cams, ref_cams = [], []
    n = len(inv_depths)
    for i in range(n):
        _, _, DH, DW = inv_depths[i].shape
        scale_factor = DW / float(W)
        cams.append(Camera(K=K.float()).scaled(scale_factor).to(device))
        ref_cams.append(
            Camera(K=ref_K.float(), Tcw=pose).scaled(scale_factor).to(device))
    # View synthesis
    depths = [inv2depth(inv_depths[i]) for i in range(n)]
    ref_warped = [
        view_synthesis(depths[i],
                       depths[i],
                       ref_cams[i],
                       cams[i],
                       padding_mode="zeros") for i in range(n)
    ]
    # Return warped reference image
    return ref_warped
    def warp_ref_image(self, inv_depths, ref_image, ref_tensor, K, ref_K, pose,
                       ref_extrinsics, ref_context_type):
        """
        Warps a reference image to produce a reconstruction of the original one.

        Parameters
        ----------
        inv_depths : torch.Tensor [B,1,H,W]
            Inverse depth map of the original image
        ref_image : torch.Tensor [B,3,H,W]
            Reference RGB image
        K : torch.Tensor [B,3,3]
            Original camera intrinsics
        ref_K : torch.Tensor [B,3,3]
            Reference camera intrinsics
        pose : Pose
            Original -> Reference camera transformation

        Returns
        -------
        ref_warped : torch.Tensor [B,3,H,W]
            Warped reference image (reconstructing the original one)
        """
        B, _, H, W = ref_image.shape
        device = ref_image.get_device()
        # Generate cameras for all scales
        cams, ref_cams = [], []
        for b in range(B):
            if ref_context_type[b] == 'left' or ref_context_type[b] == 'right':
                pose.mat[b, :, :] = ref_extrinsics[b, :, :]
                #pose.mat[b,:3,3]=0
        for i in range(self.n):
            _, _, DH, DW = inv_depths[i].shape
            scale_factor = DW / float(W)
            cams.append(Camera(K=K.float()).scaled(scale_factor).to(device))
            ref_cams.append(
                Camera(K=ref_K.float(),
                       Tcw=pose).scaled(scale_factor).to(device))
        # View synthesis
        depths = [inv2depth(inv_depths[i]) for i in range(self.n)]
        ref_images = match_scales(ref_image, inv_depths, self.n)
        ref_warped = [
            view_synthesis(ref_images[i],
                           depths[i],
                           ref_cams[i],
                           cams[i],
                           padding_mode=self.padding_mode)
            for i in range(self.n)
        ]
        # Return warped reference image

        ref_tensors = match_scales(ref_tensor,
                                   inv_depths,
                                   self.n,
                                   mode='nearest',
                                   align_corners=None)
        ref_tensors_warped = [
            view_synthesis(ref_tensors[i],
                           depths[i],
                           ref_cams[i],
                           cams[i],
                           padding_mode=self.padding_mode,
                           mode='nearest') for i in range(self.n)
        ]

        return ref_warped, ref_tensors_warped
    def warp_ref_image(self, inv_depths, camera_type, intrinsics_poly_coeffs,
                       intrinsics_principal_point, intrinsics_scale_factors,
                       intrinsics_K, intrinsics_k, intrinsics_p, ref_image,
                       ref_pose, ref_ego_mask_tensors, ref_camera_type,
                       ref_intrinsics_poly_coeffs,
                       ref_intrinsics_principal_point,
                       ref_intrinsics_scale_factors, ref_intrinsics_K,
                       ref_intrinsics_k, ref_intrinsics_p):
        """
        Warps a reference image to produce a reconstruction of the original one.

        Parameters
        ----------
        inv_depths : torch.Tensor [B,1,H,W]
            Inverse depth map of the original image
        ref_image : torch.Tensor [B,3,H,W]
            Reference RGB image
        K : torch.Tensor [B,3,3]
            Original camera intrinsics
        ref_K : torch.Tensor [B,3,3]
            Reference camera intrinsics
        pose : Pose
            Original -> Reference camera transformation

        Returns
        -------
        ref_warped : torch.Tensor [B,3,H,W]
            Warped reference image (reconstructing the original one)
        """
        B, _, H, W = ref_image.shape
        device = ref_image.get_device()
        # Generate cameras for all scales
        cams, ref_cams = [], []
        for i in range(self.n):
            _, _, DH, DW = inv_depths[i].shape
            scale_factor = DW / float(W)
            cams.append(
                CameraMultifocal(intrinsics_poly_coeffs,
                                 intrinsics_principal_point,
                                 intrinsics_scale_factors,
                                 intrinsics_K,
                                 intrinsics_k[:, 0],
                                 intrinsics_k[:, 1],
                                 intrinsics_k[:, 2],
                                 intrinsics_p[:, 0],
                                 intrinsics_p[:, 1],
                                 camera_type,
                                 Tcw=None).scaled(scale_factor).to(device))
            ref_cams.append(
                CameraMultifocal(ref_intrinsics_poly_coeffs,
                                 ref_intrinsics_principal_point,
                                 ref_intrinsics_scale_factors,
                                 ref_intrinsics_K,
                                 ref_intrinsics_k[:, 0],
                                 ref_intrinsics_k[:, 1],
                                 ref_intrinsics_k[:, 2],
                                 ref_intrinsics_p[:, 0],
                                 ref_intrinsics_p[:, 1],
                                 ref_camera_type,
                                 Tcw=ref_pose).scaled(scale_factor).to(device))
        # View synthesis
        depths = [inv2depth(inv_depths[i]) for i in range(self.n)]
        ref_images = match_scales(ref_image, inv_depths, self.n)
        ref_warped = [
            view_synthesis(ref_images[i],
                           depths[i],
                           ref_cams[i],
                           cams[i],
                           padding_mode=self.padding_mode)
            for i in range(self.n)
        ]

        ref_tensors_warped = [
            view_synthesis(ref_ego_mask_tensors[i],
                           depths[i],
                           ref_cams[i],
                           cams[i],
                           padding_mode=self.padding_mode,
                           mode='nearest') for i in range(self.n)
        ]
        # Return warped reference image
        return ref_warped, ref_tensors_warped