Ejemplo n.º 1
0
    def test_perspective(self):
        cameras = SfMPerspectiveCameras()
        P = cameras.get_projection_transform()

        vertices = torch.randn([3, 4, 3], dtype=torch.float32)
        v1 = P.transform_points(vertices)
        v2 = sfm_perspective_project_naive(vertices)
        self.assertClose(v1, v2)
Ejemplo n.º 2
0
 def test_perspective_kwargs(self):
     cameras = SfMPerspectiveCameras(focal_length=5.0, principal_point=((2.5, 2.5),))
     P = cameras.get_projection_transform(
         focal_length=2.0, principal_point=((2.5, 3.5),)
     )
     vertices = torch.randn([3, 4, 3], dtype=torch.float32)
     v1 = P.transform_points(vertices)
     v2 = sfm_perspective_project_naive(vertices, fx=2.0, fy=2.0, p0x=2.5, p0y=3.5)
     self.assertClose(v1, v2, atol=1e-6)
Ejemplo n.º 3
0
    def test_perspective_scaled(self):
        focal_length_x = 10.0
        focal_length_y = 15.0
        p0x = 15.0
        p0y = 30.0

        cameras = SfMPerspectiveCameras(
            focal_length=((focal_length_x, focal_length_y),),
            principal_point=((p0x, p0y),),
        )
        P = cameras.get_projection_transform()

        vertices = torch.randn([3, 4, 3], dtype=torch.float32)
        v1 = P.transform_points(vertices)
        v2 = sfm_perspective_project_naive(
            vertices, fx=focal_length_x, fy=focal_length_y, p0x=p0x, p0y=p0y
        )
        v3 = cameras.transform_points(vertices)
        self.assertClose(v1, v2)
        self.assertClose(v3[..., :2], v2[..., :2])
Ejemplo n.º 4
0
    def __init__(self, device, dtype, zsep=1, init_cam=None, Nv=311, Np=128, scale=1):

        ################################################################
        ################################################################
        super().__init__()

        ################################################################
        # input:
        #
        # Nv - the number of vertices across in the planar mesh
        # Np - the number of pixels in the rendererd images
        # zsep - the separation between the XY and UV planes {defualt: 1}
        # scale - the maximum XY scale of the vertices in the plane {defualt: -1,1}
        ################################################################        
        
        # create a regular grid of spatial coords for uniform mesh
        self.zsep = zsep;
        self.Np = Np;
        xp = np.linspace(-scale,scale,Nv) # grid is scaled -1,1
        Y,X = np.meshgrid(xp, xp);

        # create 2D vertex list
        init_verts2d = torch.cat((torch.tensor(X, dtype=dtype).view(Nv*Nv,1), 
                             torch.tensor(Y, dtype=dtype).view(Nv*Nv,1)), 1).to(device) # (1, Nv*Nv, 2)
        self.verts2D = init_verts2d
        #print(self.verts2D)

        # the xy plane is at z = 0;
        # the uv plane is at z = zsep (default);
        depthXY = torch.zeros(Nv*Nv,1).to(device)
        depthUV = zsep*torch.ones(Nv*Nv,1).to(device)

        # create the 3D vertex list from the 2D vertices and the depthmap
        verts3DXY = torch.cat((self.verts2D, depthXY), 1); # (views,N*N,3)
        verts3DUV = torch.cat((self.verts2D, depthUV), 1); # (views,N*N,3)
        
        # use scipy to help with Delaunay triangulation of the faces
        tri = sp.Delaunay(self.verts2D.cpu().numpy());
        self.faces = torch.tensor(tri.simplices, dtype=torch.int64).to(device); # (1, F, 3) - datatype must be integer for indices

        ################################################################
        # choose initial camera model 
        ################################################################
        
        if init_cam == None:
            self.views = 1;
            # Get the position of the camera based on the spherical angles
            R, T = look_at_view_transform(dist=2, elev=0, azim=0, device=device) # (views,3,3), (views,3) 
            init_cam = SfMPerspectiveCameras(device=device, R=R, T=T);
        self.cameras = init_cam                
                
        # the rasterization settings
        self.raster_settings = RasterizationSettings(
            image_size=Np, 
            blur_radius=np.log(1. / 1e-6 - 1.)*1e-6, 
            faces_per_pixel=1, 
            bin_size=0
        )
        
        ################################################################
        # Create a Meshes object for XY/UV planes.
        ################################################################

        self.meshesXY = Meshes(
            verts=verts3DXY[None,:,:],   
            faces=self.faces[None,:,:]
        )
        self.meshesUV = Meshes(
            verts=verts3DUV[None,:,:],   
            faces=self.faces[None,:,:] 
        )

        # extend the meshes for each view
        self.views = self.cameras.R.shape[0];
        self.meshesXY = self.meshesXY.extend(self.views)
        self.meshesUV = self.meshesUV.extend(self.views)

        ################################################################
        # create the renderer with a ReflectionMappingShader
        ################################################################        
        self.renderer = MeshRenderer(
            rasterizer=MeshRasterizer(
                cameras=self.cameras, 
                raster_settings=self.raster_settings
            ),
            shader=LightfieldShader(cameras=self.cameras)
        )