Esempio n. 1
0
    def create_scene(self, hws, alphas, N):
        batch = []
        for i in range(N):
            scene = []
            for mesh_name in self.models:
                hw = hws[mesh_name]
                alpha = alphas[mesh_name]
                N, K, _ = hw.shape
                for k in range(K):
                    c = self.colors[mesh_name].clone()
                    c[..., 3] = alpha[i, k]

                    textures = TexturesVertex(verts_features=[c])
                    m = Meshes(verts=[self.verts[mesh_name].clone()],
                               faces=[self.faces[mesh_name].clone()],
                               textures=textures)

                    t = Translate(y=hw[i, k, 0],
                                  x=hw[i, k, 1],
                                  z=torch.zeros(1, device=self.device),
                                  device=str(self.device))
                    m = m.update_padded(t.transform_points(m.verts_padded()))
                    scene += [m]
            batch += [join_meshes_as_scene(scene)]
        batch = join_meshes_as_batch(batch)
        return batch
Esempio n. 2
0
    def normalize_to_sphere_(self):
        """
        Center and scale the point clouds to a unit sphere
        Returns: normalizing_trans (Transform3D)
        """
        # (B,3,2)
        boxMinMax = self.get_bounding_boxes()
        boxCenter = boxMinMax.sum(dim=-1) / 2
        # (B,)
        boxRange, _ = (boxMinMax[:, :, 1] - boxMinMax[:, :, 0]).max(dim=-1)
        if boxRange == 0:
            boxRange = 1

        # center and scale the point clouds, likely faster than calling obj2world_trans directly?
        pointOffsets = torch.repeat_interleave(-boxCenter,
                                               self.num_points_per_cloud(),
                                               dim=0)
        self.offset_(pointOffsets)
        # (P)
        norms = torch.norm(self.points_packed(), dim=-1)
        # List[(Pi)]
        norms = torch.split(norms, self.num_points_per_cloud())
        # (N)
        scale = torch.stack([x.max() for x in norms], dim=0)
        self.scale_(1 / eps_denom(scale))
        normalizing_trans = Translate(-boxCenter).compose(
            Scale(1 / eps_denom(scale))).to(device=self.device)
        self.obj2world_trans = normalizing_trans.inverse().compose(
            self.obj2world_trans)
        return normalizing_trans
Esempio n. 3
0
    def normalize_to_box_(self):
        """
        center and scale the point clouds to a unit cube,
        Returns:
            normalizing_trans (Transform3D): Transform3D used to normalize the pointclouds
        """
        # (B,3,2)
        boxMinMax = self.get_bounding_boxes()
        boxCenter = boxMinMax.sum(dim=-1) / 2
        # (B,)
        boxRange, _ = (boxMinMax[:, :, 1] - boxMinMax[:, :, 0]).max(dim=-1)
        if boxRange == 0:
            boxRange = 1

        # center and scale the point clouds, likely faster than calling obj2world_trans directly?
        pointOffsets = torch.repeat_interleave(-boxCenter,
                                               self.num_points_per_cloud(),
                                               dim=0)
        self.offset_(pointOffsets)
        self.scale_(1 / boxRange)

        # update obj2world_trans
        normalizing_trans = Translate(-boxCenter).compose(Scale(
            1 / boxRange)).to(device=self.device)
        self.obj2world_trans = normalizing_trans.inverse().compose(
            self.obj2world_trans)
        return normalizing_trans
Esempio n. 4
0
def test_scene():
    world = engine.World()

    world.add_mesh('red_box', verts, faces, red)
    world.add_mesh('green_box', verts, faces, green)
    world.add_mesh('blue_box', verts, faces, blue)

    scene_spec = [
        {'red_box_0': 'red_box', 'green_box_0': 'green_box'},
        {'blue_box_0': 'blue_box', 'blue_box_1': 'blue_box'}
    ]

    world.create_scenes(scene_spec)

    poses = [
        [Translate(0, -30, 0), Translate(-10, -10, 0)],
        [Translate(40, 0, 0), Translate(-10, -10, 0)]
    ]

    world.update_scenes(poses)

    batch = world.batch()
    labels = world.labels()

    distance = 30
    elevation = 0.0
    azimuth = 0

    R, T = look_at_view_transform(distance, elevation, azimuth)
    cameras = FoVOrthographicCameras(max_x=64.0, max_y=64.0,
                                     min_x=-64.0, min_y=-64.0,
                                     scale_xyz=((1, 1, 1),),
                                     R=R, T=T)

    raster_settings = RasterizationSettings(
        image_size=128,
        blur_radius=0,
        faces_per_pixel=6,
    )

    renderer = MeshRenderer(
        rasterizer=MeshRasterizer(
            cameras=cameras,
            raster_settings=raster_settings,
        ),
        shader=IdentityShader()
    )

    boxes = world.bounding_boxes(cameras, (128, 128))
    image = renderer(batch)
    fig, ax = plt.subplots(nrows=1, ncols=1)
    ax.imshow(image[0, :, :, 0, :])
    for box in boxes[0]:
        ax.add_patch(box.get_patch())
    plt.show()
Esempio n. 5
0
    def get_world_to_view_transform(self, **kwargs) -> Transform3d:
        """
        Return the world-to-view transform.
        Args:
            **kwargs: parameters for the camera extrinsics can be passed in
                as keyword arguments to override the default values
                set in __init__.
        Setting R and T here will update the values set in init as these
        values may be needed later on in the rendering pipeline e.g. for
        lighting calculations.
        Returns:
            T: a Transform3d object which represents a batch of transforms
            of shape (N, 3, 3)
        """
        R = self.R = kwargs.get("R", self.R)  # pyre-ignore[16]
        T = self.T = kwargs.get("T", self.T)  # pyre-ignore[16]
        if T.shape[0] != R.shape[0]:
            msg = "Expected R, T to have the same batch dimension; got %r, %r"
            raise ValueError(msg % (R.shape[0], T.shape[0]))
        if T.dim() != 2 or T.shape[1:] != (3,):
            msg = "Expected T to have shape (N, 3); got %r"
            raise ValueError(msg % repr(T.shape))
        if R.dim() != 3 or R.shape[1:] != (3, 3):
            msg = "Expected R to have shape (N, 3, 3); got %r"
            raise ValueError(msg % R.shape)

        # Create a Transform3d object
        T = Translate(T, device=T.device)
        R = Rotate(R, device=R.device)
        world_to_view_transform = R.compose(T)
        return world_to_view_transform
Esempio n. 6
0
File: cloud.py Progetto: yifita/DSS
 def normalize_to_sphere_(self):
     """
     Center and scale the point clouds to a unit sphere
     Returns: normalizing_trans (Transform3D)
     """
     # packed offset
     center = torch.stack([x.mean(dim=0) for x in self.points_list()],
                          dim=0)
     center_packed = torch.repeat_interleave(-center,
                                             self.num_points_per_cloud(),
                                             dim=0)
     self.offset_(center_packed)
     # (P)
     norms = torch.norm(self.points_packed(), dim=-1)
     # List[(Pi)]
     norms = torch.split(norms, self.num_points_per_cloud())
     # (N)
     scale = torch.stack([x.max() for x in norms], dim=0)
     self.scale_(1 / eps_denom(scale))
     normalizing_trans = Translate(-center).compose(
         Scale(1 / eps_denom(scale))).to(device=self.device)
     self.obj2world_trans = normalizing_trans.inverse().compose(
         self.obj2world_trans)
     return normalizing_trans
Esempio n. 7
0
def get_world_to_view_transform(R=r, T=t) -> Transform3d:
    """
    This function returns a Transform3d representing the transformation
    matrix to go from world space to view space by applying a rotation and
    a translation.

    Pytorch3d uses the same convention as Hartley & Zisserman.
    I.e., for camera extrinsic parameters R (rotation) and T (translation),
    we map a 3D point `X_world` in world coordinates to
    a point `X_cam` in camera coordinates with:
    `X_cam = X_world R + T`

    Args:
        R: (N, 3, 3) matrix representing the rotation.
        T: (N, 3) matrix representing the translation.

    Returns:
        a Transform3d object which represents the composed RT transformation.

    """
    # TODO: also support the case where RT is specified as one matrix
    # of shape (N, 4, 4).

    if T.shape[0] != R.shape[0]:
        msg = "Expected R, T to have the same batch dimension; got %r, %r"
        raise ValueError(msg % (R.shape[0], T.shape[0]))
    if T.dim() != 2 or T.shape[1:] != (3, ):
        msg = "Expected T to have shape (N, 3); got %r"
        raise ValueError(msg % repr(T.shape))
    if R.dim() != 3 or R.shape[1:] != (3, 3):
        msg = "Expected R to have shape (N, 3, 3); got %r"
        raise ValueError(msg % repr(R.shape))

    # Create a Transform3d object
    T = Translate(T, device=T.device)
    R = Rotate(R, device=R.device)
    return R.compose(T)
Esempio n. 8
0
def marching_cubes_naive(
    volume_data_batch: torch.Tensor,
    isolevel: Optional[float] = None,
    spacing: int = 1,
    return_local_coords: bool = True,
) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
    """
    Runs the classic marching cubes algorithm, iterating over
    the coordinates of the volume_data and using a given isolevel
    for determining intersected edges of cubes of size `spacing`.
    Returns vertices and faces of the obtained mesh.
    This operation is non-differentiable.

    This is a naive implementation, and is not optimized for efficiency.

    Args:
        volume_data_batch: a Tensor of size (N, D, H, W) corresponding to
            a batch of 3D scalar fields
        isolevel: the isosurface value to use as the threshold to determine
            whether points are within a volume. If None, then the average of the
            maximum and minimum value of the scalar field will be used.
        spacing: an integer specifying the cube size to use
        return_local_coords: bool. If True the output vertices will be in local coordinates in
        the range [-1, 1] x [-1, 1] x [-1, 1]. If False they will be in the range
        [0, W-1] x [0, H-1] x [0, D-1]
    Returns:
        verts: [(V_0, 3), (V_1, 3), ...] List of N FloatTensors of vertices.
        faces: [(F_0, 3), (F_1, 3), ...] List of N LongTensors of faces.
    """
    volume_data_batch = volume_data_batch.detach().cpu()
    batched_verts, batched_faces = [], []
    D, H, W = volume_data_batch.shape[1:]
    # pyre-ignore [16]
    volume_size_xyz = volume_data_batch.new_tensor([W, H, D])[None]

    if return_local_coords:
        # Convert from local coordinates in the range [-1, 1] range to
        # world coordinates in the range [0, D-1], [0, H-1], [0, W-1]
        local_to_world_transform = Translate(
            x=+1.0, y=+1.0, z=+1.0, device=volume_data_batch.device).scale(
                (volume_size_xyz - 1) * spacing * 0.5)
        # Perform the inverse to go from world to local
        world_to_local_transform = local_to_world_transform.inverse()

    for i in range(len(volume_data_batch)):
        volume_data = volume_data_batch[i]
        curr_isolevel = (((volume_data.max() + volume_data.min()) /
                          2).item() if isolevel is None else isolevel)
        edge_vertices_to_index = {}
        vertex_coords_to_index = {}
        verts, faces = [], []
        # Use length - spacing for the bounds since we are using
        # cubes of size spacing, with the lowest x,y,z values
        # (bottom front left)
        for x in range(0, W - spacing, spacing):
            for y in range(0, H - spacing, spacing):
                for z in range(0, D - spacing, spacing):
                    cube = Cube((x, y, z), spacing)
                    new_verts, new_faces = polygonise(
                        cube,
                        curr_isolevel,
                        volume_data,
                        edge_vertices_to_index,
                        vertex_coords_to_index,
                    )
                    verts.extend(new_verts)
                    faces.extend(new_faces)
        if len(faces) > 0 and len(verts) > 0:
            verts = torch.tensor(verts, dtype=torch.float32)
            # Convert vertices from world to local coords
            if return_local_coords:
                verts = world_to_local_transform.transform_points(verts[None,
                                                                        ...])
                verts = verts.squeeze()
            batched_verts.append(verts)
            batched_faces.append(torch.tensor(faces, dtype=torch.int64))
    return batched_verts, batched_faces
Esempio n. 9
0
        return i // ncols, i % ncols

    plt.ion()
    fig = plt.figure(figsize=(10, 5))
    spec = plt.GridSpec(ncols=N // 2, nrows=2, figure=fig)
    ax = [fig.add_subplot(spec[gridnum(i, ncols=N // 2)]) for i in range(N)]
    plt.tight_layout()

    for b in range(50):

        transforms = []
        for _ in range(N):
            model_matrices = []
            for _ in objects:
                xy = torch.rand(2) * 128 - 64
                model_matrices += [Translate(x=xy[0], y=xy[1], z=0)]
            transforms += [model_matrices]

        distance = 30
        elevation = 0.0
        azimuth = 0
        R, T = look_at_view_transform(distance,
                                      elevation,
                                      azimuth,
                                      device=device)

        cameras = FoVOrthographicCameras(device=device,
                                         max_x=64.0,
                                         max_y=64.0,
                                         min_x=-64.0,
                                         min_y=-64.0,
Esempio n. 10
0
for i in range(1):
    for mesh_name in models:
        hw = hws[mesh_name]
        alpha = alphas[mesh_name]
        N, K, _ = hw.shape
        for k in range(K):
            c = colors[mesh_name].clone()
            c[..., 3] = c[..., 3] * alpha[i, k]
            textures = TexturesVertex(verts_features=[c])
            m = Meshes(verts=[verts[mesh_name].clone()],
                       faces=[faces[mesh_name].clone()],
                       textures=textures)
            #m = meshes[mesh_name].clone().detach().to(device)
            t = Translate(y=hw[i, k, 0],
                          x=hw[i, k, 1],
                          z=torch.zeros(1, device=device),
                          device=str(device))
            m = m.update_padded(t.transform_points(m.verts_padded()))
            scene += [m]

teapot_mesh = join_meshes_as_scene(scene)

#for i, (name, particles) in particles_per_mesh:

#textures = TexturesVertex(verts_features=colors)

# meshes = Meshes(
#     verts=verts,
#     faces=faces,
#     textures=textures,
# )