예제 #1
0
    def renderTemplates(self, renderer, assembly, t, R):
        if R.shape[-1] != t.shape[-1]:
            err_str = f"R shape {R.shape} doesn't match t shape {t.shape}"
            raise AssertionError(err_str)

        num_templates = R.shape[-1]

        component_poses = ((np.eye(3), np.zeros(3)), )
        assembly = assembly.setPose(component_poses, in_place=False)

        init_vertices = render.makeBatch(assembly.vertices,
                                         dtype=torch.float).cuda()
        faces = render.makeBatch(assembly.faces, dtype=torch.int).cuda()
        textures = render.makeBatch(assembly.textures,
                                    dtype=torch.float).cuda()

        vertices = torch.einsum('nvj,jit->nvit', [init_vertices, R]) + t
        vertices = vertices.permute(-1, 0, 1, 2)

        faces = faces.expand(num_templates, *faces.shape)
        textures = textures.expand(num_templates, *textures.shape)

        rgb_images_obj, depth_images_obj = renderer.render(
            torch.reshape(vertices, (-1, *vertices.shape[2:])),
            torch.reshape(faces, (-1, *faces.shape[2:])),
            torch.reshape(textures, (-1, *textures.shape[2:])))
        rgb_images_scene, depth_images_scene, label_images_scene = render.reduceByDepth(
            torch.reshape(rgb_images_obj,
                          vertices.shape[:2] + rgb_images_obj.shape[1:]),
            torch.reshape(depth_images_obj,
                          vertices.shape[:2] + depth_images_obj.shape[1:]),
        )

        return rgb_images_scene, depth_images_scene
예제 #2
0
    def renderScene(self,
                    assembly,
                    component_poses,
                    rgb_background=None,
                    depth_background=None,
                    camera_pose=None,
                    camera_params=None,
                    as_numpy=False):
        """ Render a scene consisting of a spatial assembly and a background plane.

        Parameters
        ----------

        Returns
        -------
        """

        if camera_pose is None:
            camera_pose = geometry.homogeneousMatrix(self.R[0], self.t[0][0])

        if camera_params is None:
            camera_params = self.K[0]

        if rgb_background is None:
            rgb_background = torch.zeros(*self.image_shape, 3)

        if depth_background is None:
            depth_background = torch.full(self.image_shape, float('inf'))

        if not assembly.blocks:
            label_background = torch.zeros(*self.image_shape, dtype=torch.int)
            return rgb_background, depth_background, label_background

        assembly = assembly.setPose(component_poses, in_place=False)

        vertices = torchutils.makeBatch(assembly.vertices,
                                        dtype=torch.float).cuda()
        faces = torchutils.makeBatch(assembly.faces, dtype=torch.int).cuda()
        textures = torchutils.makeBatch(assembly.textures,
                                        dtype=torch.float).cuda()

        rgb_images, depth_images = self.render(vertices, faces, textures)

        rgb_images = torch.cat((rgb_background[None, ...], rgb_images), 0)
        depth_images = torch.cat((depth_background[None, ...], depth_images),
                                 0)

        rgb_image, depth_image, label_image = render.reduceByDepth(
            rgb_images, depth_images)

        if as_numpy:
            rgb_image = rgb_image.detach().cpu().numpy()
            depth_image = depth_image.detach().cpu().numpy()
            label_image = label_image.detach().cpu().numpy()

        return rgb_image, depth_image, label_image
예제 #3
0
    def renderComponent(self,
                        assembly,
                        component_index,
                        component_pose,
                        rgb_background=None,
                        depth_background=None):
        """

        Parameters
        ----------
        rgb_background : array of float, shape (img_height, img_width, 3), optional
        depth_background : array of shape (img_height, img_width), optional

        Returns
        -------
        rgb_image :
        depth_image :
        label_image :
        """

        if (rgb_background is None) != (depth_background is None):
            err_str = (
                "Keyword arguments 'rgb_background' and 'depth_background'"
                "must be passed together --- one of the arguments passed is None,"
                "but the other is not.")
            raise AssertionError(err_str)

        assembly = assembly.recenter(component_index, in_place=False)

        vertices = torch.stack(
            tuple(assembly.componentVertices(component_index)))
        faces = torch.stack(tuple(assembly.componentFaces(component_index)))
        textures = torch.stack(
            tuple(assembly.componentTextures(component_index)))

        R, t = component_pose
        vertices = vertices @ m.np.transpose(R) + t

        rgb_images, depth_images = self.render(vertices, faces, textures)

        if rgb_background is not None:
            rgb_images = torch.cat((rgb_background[None, ...], rgb_images), 0)

        if depth_background is not None:
            depth_images = torch.cat(
                (depth_background[None, ...], depth_images), 0)

        rgb_image, depth_image, label_image = render.reduceByDepth(
            rgb_images, depth_images)

        return rgb_image, depth_image, label_image
예제 #4
0
def render(renderer, assembly, t, R):
    t = t.permute(1, 0)
    R = R.permute(2, 1, 0)

    if R.shape[-1] != t.shape[-1]:
        err_str = f"R shape {R.shape} doesn't match t shape {t.shape}"
        raise AssertionError(err_str)

    num_templates = R.shape[-1]

    def get_vertices(link):
        return link.pose.apply(link.mesh.vertices)

    mesh_links = tuple(link for _, link in assembly.links.items()
                       if link.mesh is not None)
    init_vertices = np.stack(tuple(get_vertices(link) for link in mesh_links),
                             axis=0)
    faces = np.stack(tuple(link.mesh.faces for link in mesh_links), axis=0)
    textures = np.stack(tuple(link.mesh.textures for link in mesh_links),
                        axis=0)

    init_vertices = torch.tensor(init_vertices, dtype=torch.float).cuda()
    faces = torch.tensor(faces, dtype=torch.int).cuda()
    textures = torch.tensor(textures, dtype=torch.float).cuda()

    vertices = torch.einsum('nvj,jit->nvit', [init_vertices, R]) + t
    vertices = vertices.permute(-1, 0, 1, 2)

    faces = faces.expand(num_templates, *faces.shape)
    textures = textures.expand(num_templates, *textures.shape)

    rgb_images_obj, depth_images_obj = renderer.render(
        torch.reshape(vertices, (-1, *vertices.shape[2:])),
        torch.reshape(faces, (-1, *faces.shape[2:])),
        torch.reshape(textures, (-1, *textures.shape[2:])))
    rgb_images_scene, depth_images_scene, label_images_scene = lib_render.reduceByDepth(
        torch.reshape(rgb_images_obj,
                      vertices.shape[:2] + rgb_images_obj.shape[1:]),
        torch.reshape(depth_images_obj,
                      vertices.shape[:2] + depth_images_obj.shape[1:]),
        max_depth=renderer.far)

    return rgb_images_scene, depth_images_scene, label_images_scene