コード例 #1
0
 def test_look_at_rotation_list_broadcast(self):
     # fmt: off
     camera_positions = [[0.0, 0.0, -1.0], [0.0, 0.0, 1.0]]
     rot_mats_expected = torch.tensor(
         [
             [
                 [1.0, 0.0, 0.0],
                 [0.0, 1.0, 0.0],
                 [0.0, 0.0, 1.0]
             ],
             [
                 [-1.0, 0.0,  0.0],  # noqa: E241, E201
                 [ 0.0, 1.0,  0.0],  # noqa: E241, E201
                 [ 0.0, 0.0, -1.0]   # noqa: E241, E201
             ],
         ],
         dtype=torch.float32
     )
     # fmt: on
     rot_mats = look_at_rotation(camera_positions)
     self.assertClose(rot_mats, rot_mats_expected, atol=2e-7)
コード例 #2
0
 def test_view_transform(self):
     T = torch.tensor([0.0, 0.0, -1.0], requires_grad=True).view(1, -1)
     R = look_at_rotation(T)
     RT = get_world_to_view_transform(R=R, T=T)
     self.assertTrue(isinstance(RT, Transform3d))
コード例 #3
0
 def test_look_at_rotation_input_fail(self):
     camera_position = [-1.0]  # expected to have xyz positions
     with self.assertRaises(ValueError):
         look_at_rotation(camera_position)
コード例 #4
0
 def test_look_at_rotation_python_list(self):
     camera_position = [[0.0, 0.0,
                         -1.0]]  # camera pointing along negative z
     rot_mat = look_at_rotation(camera_position)
     self.assertClose(rot_mat, torch.eye(3)[None], atol=2e-7)
コード例 #5
0
def getCamera(w,h,near,far,fov,Origin,LookAt,Up):
    aspect = w / h
    R = torchCam.look_at_rotation(Origin,LookAt,Up)
    return torchCam.FoVPerspectiveCameras(near,far,aspect,fov,R=R,T=Origin).get_projection_transform()
        
コード例 #6
0
    #compare

    #sample on sphere
    radius = 1  #np.random.rand(1)[0]
    samples = torch.rand((1, 2, nsamplesSphere))
    # sphereOrig = torch.rand((1,1,3))
    cx, cy, cz = (0, 0, 0)
    sphereOrig = torch.Tensor([[[cx, cy, cz]]])
    sphereOrigt = torch.Tensor([[[cy, cx, -cz]]])
    x, y, z = lin.sampleUniformOnSphere(samples[:, 0, :], samples[:, 1, :])
    xyz = torch.stack((x, y, z), dim=2) * radius + sphereOrigt

    #rasterization by pytorch3d
    aspect = w / h
    # R,T = torchCam.look_at_view_transform(dist=2.7, elev=0, azim=0)
    R = torchCam.look_at_rotation(camOrig, camLookAtt, camUp)
    T = torch.einsum('abc,ac->ab', R, camOrig)
    cam = torchCam.FoVPerspectiveCameras(near, far, aspect, fov, R=R, T=T)

    uv = cam.transform_points_screen(xyz, torch.Tensor([[h, w]]))
    uv = torch.clamp(uv, 0, h - 1)
    img = torch.zeros((h, w, 3))
    img[uv[0, :, 0].long(), uv[0, :, 1].long(), :] = 1

    outfn = os.path.join(outdir, outfmtTorch % (0, 0))
    im.writePng(outfn, img)

    #render by mitsuba
    light = mts.pointlight(camOrig)
    camera = mts.camera(camOrig, camLookAt, camUp, fov, nsamples=nsamples)
    shape = mts.sphere(sphereOrig[0], radius, material)
コード例 #7
0
def get_tri_color_lights_for_view(cams, has_specular=False, point_lights=True):
    """
    Create RGB lights direction in the half dome
    The direction is given in the same coordinates as the pointcloud
    Args:
        cams
    Returns:
        Lights with three RGB light sources (B: right, G: left, R: bottom)
    """
    import math
    from DSS.core.lighting import (DirectionalLights, PointLights)
    from pytorch3d.renderer.cameras import look_at_rotation
    from pytorch3d.transforms import Rotate

    elev = torch.tensor(((30, 30, 30), ), device=cams.device)
    azim = torch.tensor(((-60, 60, 180), ), device=cams.device)
    elev = math.pi / 180.0 * elev
    azim = math.pi / 180.0 * azim

    x = torch.cos(elev) * torch.sin(azim)
    y = torch.sin(elev)
    z = torch.cos(elev) * torch.cos(azim)
    light_directions = torch.stack([x, y, z], dim=-1)
    # import trimesh
    # import pdb; pdb.set_trace()
    # trimesh.Trimesh(vertices=light_directions[0].cpu().numpy(), process=False).export('tests/outputs/light_dir_pre.ply')
    # transform from y-up to z-up
    # transform from camera to world
    cam_pos = cams.get_camera_center()
    R = look_at_rotation(torch.zeros_like(cam_pos),
                         at=F.normalize(torch.cross(cam_pos,
                                                    torch.rand_like(cam_pos)),
                                        dim=-1),
                         up=cam_pos)
    light_directions = Rotate(R=R.transpose(
        1, 2), device=cams.device).transform_points(light_directions)
    # trimesh.Trimesh(vertices=torch.cat([cam_pos, light_directions[0]], dim=0).cpu().numpy(), process=False).export('tests/outputs/light_dir.ply')
    ambient_color = torch.FloatTensor((((0.2, 0.2, 0.2), ), ))
    diffuse_color = torch.FloatTensor(((
        (0.0, 0.0, 0.8),
        (0.0, 0.8, 0.0),
        (0.8, 0.0, 0.0),
    ), ))
    if has_specular:
        specular_color = 0.15 * diffuse_color
        diffuse_color *= 0.85
    else:
        specular_color = ((
            (0, 0, 0),
            (0, 0, 0),
            (0, 0, 0),
        ), )
    if not point_lights:
        lights = DirectionalLights(ambient_color=ambient_color,
                                   diffuse_color=diffuse_color,
                                   specular_color=specular_color,
                                   direction=light_directions)
    else:
        location = light_directions * 5
        lights = PointLights(ambient_color=ambient_color,
                             diffuse_color=diffuse_color,
                             specular_color=specular_color,
                             location=location)
    return lights