コード例 #1
0
    def test_box(self):
        v = np.array([[0.50, 0.50, 0.50], [-0.5, 0.50,
                                           0.50], [0.50, -0.5, 0.50],
                      [-0.5, -0.5, 0.50], [0.50, 0.50,
                                           -0.5], [-0.5, 0.50, -0.5],
                      [0.50, -0.5, -0.5], [-0.5, -0.5, -0.5]])
        f = np.array(
            [[1, 2, 3], [4, 3, 2], [1, 3, 5], [7, 5, 3], [1, 5, 2], [6, 2, 5],
             [8, 6, 7], [5, 7, 6], [8, 7, 4], [3, 4, 7], [8, 4, 6], [2, 6, 4]],
            dtype=np.uint32) - 1
        n = v / np.linalg.norm(v[0])

        # test considering omnidirectional cameras
        vis, n_dot_cam = visibility_compute(v=v,
                                            f=f,
                                            cams=np.array([[1.0, 0.0, 0.0]]))
        self.assertTrue(((v.T[0] > 0) == vis).all())
        # test considering omnidirectional cameras and minimum dot product
        # between camera-vertex ray and normal .5
        vis, n_dot_cam = visibility_compute(v=v,
                                            f=f,
                                            n=n,
                                            cams=np.array([[1e10, 0.0, 0.0]]))
        vis = np.logical_and(vis, n_dot_cam > .5)
        self.assertTrue(((v.T[0] > 0) == vis).all())
        # test considering two omnidirectional cameras
        vis, n_dot_cam = visibility_compute(v=v,
                                            f=f,
                                            cams=np.array([[0.0, 1.0, 0.0],
                                                           [0.0, 0.0, 1.0]]))
        self.assertTrue(((v.T[1:3] > 0) == vis).all())

        vextra = np.array(
            [[.9, .9, .9], [-.9, .9, .9], [.9, -.9, .9], [-.9, -.9, .9]],
            dtype=np.double)
        fextra = np.array([[1, 2, 3], [4, 3, 2]], dtype=np.uint32) - 1
        # test considering extra meshes that can block light
        cams = np.array([[0.0, 0.0, 10.0]])
        vis, n_dot_cam = visibility_compute(v=v,
                                            f=f,
                                            cams=cams,
                                            extra_v=vextra,
                                            extra_f=fextra)
        self.assertTrue((np.zeros_like(v.T[0]) == vis).all())

        # test considering extra meshes that can block light, but only if the
        # if the distance is at least 1.0
        vis, n_dot_cam = visibility_compute(v=v,
                                            f=f,
                                            cams=np.array([[0.0, 0.0, 10.0]]),
                                            extra_v=vextra,
                                            extra_f=fextra,
                                            min_dist=1.0)
        self.assertTrue(((v.T[2] > 0) == vis).all())
コード例 #2
0
    def vertices_visibility(self, vertices: Tensor, faces: Tensor,
                            camtrans: Tensor):
        '''
        This function check the visibility of vertices for the given camera, 
        using the MPI mesh library. 
        
        The coordinate system is the same as .obj file which is different
        from ours and blender's.
        
        Process:
            1. The library create a AABB tree for the mesh.
            2. For each vertex of the mesh, it computes the ray pointing from 
            the vertex to the camera center.
            3. Given the ray and the mesh, it checks if the ray intersected 
            with the mesh (invisible) and return the results.
            
        It assumes that the given camera always points to the origin, so the
        input is camera translation only.
        
        It also support to add camera frame size as constrain and additional
        meshes as occulusion. (not used here)
            
        Parameters
        ----------
        vertices : Tensor
            Vertices of the mesh to be checked.
        faces : Tensor
            Faces of the mesh.
        camtrans : Tensor
            Camera translation of the camera to compute visibility.

        Returns
        -------
        visIndices : Tensor
            A mask vector indicating the visibility.

        '''
        assert camtrans.shape[1] == 3, 'camtrans must be Bx3 Tensor.'
        assert len(vertices.shape) == 2 and len(faces.shape) == 2

        # cast data type
        if vertices.requires_grad:
            vert = vertices.detach().cpu().double().numpy()
        else:
            vert = vertices.cpu().double().numpy()

        face = faces.cpu().numpy().astype('uint32')
        cam = camtrans.cpu().double().numpy()

        # compute visibility
        visIndices, _ = visibility_compute(v=vert, f=face, cams=cam)

        # # debug
        # visIndices = visIndices.T.repeat(3, axis = 1)
        # my_mesh = Mesh(v=vert, f=face, vc=visIndices)
        # mvs = MeshViewer()
        # mvs.set_static_meshes([my_mesh])

        return visIndices
コード例 #3
0
    def get_visibility(self, verts, cam_dir):

        # construct the trimesh
        triangle_mesh = trimesh.Trimesh(vertices=verts, \
                                        faces=self.faces, process=False)

        # get the vertices and faces of the trimesh
        vertices = triangle_mesh.vertices
        faces = triangle_mesh.faces

        # compute visibility of vertices
        vis, _ = visibility_compute(v=vertices, f=faces.astype(np.uint32), \
                                    cams=np.double(cam_dir.reshape((1, 3))))
        vertex_visibility = vis[0]

        return vertex_visibility
コード例 #4
0
def my_color_map_by_proj(svs,
                         algn,
                         cams,
                         face_indices_map,
                         b_coords_map,
                         source_images=None,
                         silhs=None,
                         save_path='texture.png'):

    nCams = len(cams)
    texture_maps = []
    weights = []
    vis = [None] * nCams
    for i in range(nCams):

        print("working on camera %d" % i)
        alignment = Mesh(v=algn[i].v, f=algn[i].f)
        (points, normals) = uv_to_xyz_and_normals(alignment, face_indices_map,
                                                  b_coords_map)

        # add artificious vertices and normals
        alignment.points = points
        alignment.v = np.vstack((alignment.v, points))
        alignment.vn = np.vstack((alignment.vn, normals))

        img = source_images[i]
        if use_old_camera:
            camera = old_camera(cams[i], img.shape[1], img.shape[0])

            cam_vis_ndot = np.array(visibility_compute(v=alignment.v, f=alignment.f, n=alignment.vn, \
                cams=(np.array([camera.origin.flatten()]))))
        else:
            camera = cams[i]
            cams[i].v = points
            cam_vis_ndot = np.array(visibility_compute(v=alignment.v, f=alignment.f, n=alignment.vn, \
                cams=(np.array([camera.t.r.flatten()]))))

        cam_vis_ndot = cam_vis_ndot[:, 0, :]
        (cmap, vmap) = camera_projection(alignment, camera, cam_vis_ndot, img,
                                         face_indices_map, b_coords_map)

        vis[i] = cam_vis_ndot[0][-alignment.points.shape[0]:]
        n_dot = cam_vis_ndot[1][-alignment.points.shape[0]:]
        vis[i][n_dot < 0] = 0
        n_dot[n_dot < 0] = 0
        texture_maps.append(cmap)
        weights.append(vmap)

        imgf = render_mesh(alignment,
                           img.shape[1],
                           img.shape[0],
                           cams[i],
                           img=img,
                           world_frame=True)

        cv2.imwrite('texture_' + str(i) + '.png', texture_maps[i])
        cv2.imwrite('texture_w_' + str(i) + '.png', 255 * vmap)

        # restore old vertices and normals
        alignment.v = alignment.v[:(len(alignment.v) - points.shape[0])]
        alignment.vn = alignment.vn[:(len(alignment.vn) - points.shape[0])]
        del alignment.points

    # Create a global texture map
    # Federica Bogo's code
    sum_of_weights = np.array(weights).sum(axis=0)
    sum_of_weights[sum_of_weights == 0] = .00001
    for weight in weights:
        weight /= sum_of_weights

    if settings['max_tex_weight']:
        W = np.asarray(weights)
        M = np.max(W, axis=0)
        for i in range(len(weights)):
            B = weights[i] != 0
            weights[i] = (W[i, :, :] == M) * B

    if clean_from_green:
        print('Cleaning green pixels')
        weights_green = clean_green(texture_maps, source_images, silhs)
        weights_all = [weights_green[i] * w for i, w in enumerate(weights)]
    else:
        weights_all = weights

    sum_of_weights = np.array(weights_all).sum(axis=0)
    if not settings['max_tex_weight']:
        sum_of_weights[sum_of_weights == 0] = .00001
        for w in weights_all:
            w /= sum_of_weights

    full_texture_med = np.median(np.array([texture_maps]),
                                 axis=1).squeeze() / 255.
    T = np.array([texture_maps]).squeeze() / 255.
    W = np.zeros_like(T)
    if nCams > 1:
        W[:, :, :, 0] = np.array([weights_all]).squeeze()
        W[:, :, :, 1] = np.array([weights_all]).squeeze()
        W[:, :, :, 2] = np.array([weights_all]).squeeze()
    else:
        W[:, :, 0] = np.array([weights_all]).squeeze()
        W[:, :, 1] = np.array([weights_all]).squeeze()
        W[:, :, 2] = np.array([weights_all]).squeeze()

    # Average texture
    for i, texture in enumerate(texture_maps):
        for j in range(texture.shape[2]):
            texture[:, :, j] = weights_all[i] * texture[:, :, j] / 255.
    full_texture = np.sum(np.array([texture_maps]), axis=1).squeeze()
    cv2.imwrite(save_path, full_texture * 255)
    return full_texture, sum_of_weights
コード例 #5
0
    def render(self, img, verts, cam_transformation, cam_dir, angle=None, \
                axis=None, mesh_filename=None, velocity_colors=None):

        # construct the trimesh
        triangle_mesh = trimesh.Trimesh(vertices=verts, \
                                        faces=self.faces, process=False)

        # transform the trimesh
        Rx = trimesh.transformations.rotation_matrix(math.radians(180),
                                                     [1, 0, 0])
        triangle_mesh.apply_transform(Rx)
        if mesh_filename is not None:
            trimesh.export(mesh_filename)
        if angle and axis:
            R = trimesh.transformations.rotation_matrix(
                math.radians(angle), axis)
            triangle_mesh.apply_transform(R)

        # get the vertices and faces of the trimesh
        vertices = triangle_mesh.vertices
        faces = triangle_mesh.faces

        # set camera pose
        sx, sy, tx, ty = cam_transformation
        camera = WeakPerspectiveCamera(scale=[sx, sy],
                                       translation=[tx, ty],
                                       zfar=1000.)
        camera_pose = np.eye(4)
        cam_node = self.scene.add(camera, pose=camera_pose)

        # set rendering flags
        if self.wireframe:
            render_flags = RenderFlags.RGBA | RenderFlags.ALL_WIREFRAME
        else:
            render_flags = RenderFlags.RGBA

        # compute visibility of vertices
        vis, _ = visibility_compute(v=vertices, f=faces.astype(np.uint32), \
                                    cams=np.double(cam_dir.reshape((1, 3))))
        visibility = np.repeat(np.expand_dims(vis[0], axis=1), 3, axis=1)
        num_vis = np.sum(vis[0])

        # render vertices with visibility
        visibility_colors = np.zeros_like(velocity_colors)
        visibility_colors[vis[0] == 1] = np.array([0, 1, 0])
        visibility_colors[vis[0] == 0] = np.array([0, 0, 1])
        triangle_mesh.visual.vertex_colors = visibility_colors
        mesh = pyrender.Mesh.from_trimesh(triangle_mesh)
        # mesh = pyrender.Mesh.from_points(vertices, colors=visibility_colors)

        mesh_node = self.scene.add(mesh, 'mesh')
        rgb, depth = self.renderer.render(self.scene, flags=render_flags)

        # blend original background and visualized vertices
        valid_mask = (rgb[:, :, -1] > 0)[:, :, np.newaxis]
        output_img = rgb[:, :, :-1] * valid_mask
        if SHOW_BACKGROUND:
            output_img += (1 - valid_mask) * img
        visibility_image = output_img.astype(np.uint8)
        self.scene.remove_node(mesh_node)

        # create 3D model for rendering velocity
        if VISUALIZATION_TYPE == "points":
            vertices = vertices[visibility == 1].reshape((num_vis, 3))
            velocity_colors = velocity_colors[visibility \
                             == 1].reshape((num_vis, 3))
            mesh = pyrender.Mesh.from_points(vertices, colors=velocity_colors)
        elif VISUALIZATION_TYPE == "mesh":
            triangle_mesh.visual.vertex_colors = velocity_colors
            mesh = pyrender.Mesh.from_trimesh(triangle_mesh)
        else:
            exit("ERROR: VISUALIZATION_TYPE can only be 'points' or 'mesh'")

        # render human with velocity
        mesh_node = self.scene.add(mesh, 'mesh')
        rgb, depth = self.renderer.render(self.scene, flags=render_flags)

        # blend original background and visualized vertices
        valid_mask = (rgb[:, :, -1] > 0)[:, :, np.newaxis]
        output_img = rgb[:, :, :-1] * valid_mask
        if SHOW_BACKGROUND:
            output_img += (1 - valid_mask) * img
        velocity_image = output_img.astype(np.uint8)
        self.scene.remove_node(mesh_node)

        # define coolwarm mapping
        cmap = plt.get_cmap("coolwarm")
        coolwarm_mapping = matplotlib.cm.ScalarMappable(cmap=cmap)

        # create an example mesh to show green to red
        triangle_mesh_example = triangle_mesh.copy()
        velocity_colors_example = np.zeros_like(velocity_colors)
        max_y = np.max(vertices[:, 1])
        min_y = np.min(vertices[:, 1])
        color_values = (vertices[:, 1] - min_y) / (max_y - min_y)
        velocity_colors_example = coolwarm_mapping.to_rgba(
            color_values)[:, :-1]
        triangle_mesh_example.visual.vertex_colors = velocity_colors_example
        mesh_example = pyrender.Mesh.from_trimesh(triangle_mesh_example)
        mesh_node_example = self.scene.add(mesh_example, 'mesh')
        rgb_example, _ = self.renderer.render(self.scene, flags=render_flags)
        valid_mask = (rgb_example[:, :, -1] > 0)[:, :, np.newaxis]
        output_img = rgb_example[:, :, :-1] * valid_mask
        example_image = output_img.astype(np.uint8)
        self.scene.remove_node(mesh_node_example)
        self.scene.remove_node(cam_node)

        return visibility_image, velocity_image, \
                    example_image
コード例 #6
0
    def forward(self,
                body_model_output,
                camera,
                gt_joints,
                joints_conf,
                body_model_faces,
                joint_weights,
                use_vposer=False,
                pose_embedding=None,
                scan_tensor=None,
                visualize=False,
                scene_v=None,
                scene_vn=None,
                scene_f=None,
                ftov=None,
                **kwargs):
        projected_joints = camera(body_model_output.joints)
        # Calculate the weights for each joints
        weights = (joint_weights * joints_conf if self.use_joints_conf else
                   joint_weights).unsqueeze(dim=-1)

        # Calculate the distance of the projected joints from
        # the ground truth 2D detections
        joint_diff = self.robustifier(gt_joints - projected_joints)
        joint_loss = (torch.sum(weights**2 * joint_diff) * self.data_weight**2)

        # Calculate the loss from the Pose prior
        if use_vposer:
            pprior_loss = (pose_embedding.pow(2).sum() *
                           self.body_pose_weight**2)
        else:
            pprior_loss = torch.sum(
                self.body_pose_prior(
                    body_model_output.body_pose,
                    body_model_output.betas)) * self.body_pose_weight**2

        shape_loss = torch.sum(self.shape_prior(
            body_model_output.betas)) * self.shape_weight**2
        # Calculate the prior over the joint rotations. This a heuristic used
        # to prevent extreme rotation of the elbows and knees
        body_pose = body_model_output.full_pose[:, 3:66]
        angle_prior_loss = torch.sum(
            self.angle_prior(body_pose)) * self.bending_prior_weight**2

        # Apply the prior on the pose space of the hand
        left_hand_prior_loss, right_hand_prior_loss = 0.0, 0.0
        if self.use_hands and self.left_hand_prior is not None:
            left_hand_prior_loss = torch.sum(
                self.left_hand_prior(
                    body_model_output.left_hand_pose)) * \
                self.hand_prior_weight ** 2

        if self.use_hands and self.right_hand_prior is not None:
            right_hand_prior_loss = torch.sum(
                self.right_hand_prior(
                    body_model_output.right_hand_pose)) * \
                self.hand_prior_weight ** 2

        expression_loss = 0.0
        jaw_prior_loss = 0.0
        if self.use_face:
            expression_loss = torch.sum(self.expr_prior(
                body_model_output.expression)) * \
                self.expr_prior_weight ** 2

            if hasattr(self, 'jaw_prior'):
                jaw_prior_loss = torch.sum(
                    self.jaw_prior(
                        body_model_output.jaw_pose.mul(self.jaw_prior_weight)))

        pen_loss = 0.0
        # Calculate the loss due to interpenetration
        if (self.interpenetration and self.coll_loss_weight.item() > 0):
            batch_size = projected_joints.shape[0]
            triangles = torch.index_select(body_model_output.vertices, 1,
                                           body_model_faces).view(
                                               batch_size, -1, 3, 3)

            with torch.no_grad():
                collision_idxs = self.search_tree(triangles)

            # Remove unwanted collisions
            if self.tri_filtering_module is not None:
                collision_idxs = self.tri_filtering_module(collision_idxs)

            if collision_idxs.ge(0).sum().item() > 0:
                pen_loss = torch.sum(
                    self.coll_loss_weight *
                    self.pen_distance(triangles, collision_idxs))

        s2m_dist = 0.0
        m2s_dist = 0.0
        # calculate the scan2mesh and mesh2scan loss from the sparse point cloud
        if (self.s2m or self.m2s) and (self.s2m_weight > 0 or self.m2s_weight >
                                       0) and scan_tensor is not None:
            vertices_np = body_model_output.vertices.detach().cpu().numpy(
            ).squeeze()
            body_faces_np = body_model_faces.detach().cpu().numpy().reshape(
                -1, 3)
            m = Mesh(v=vertices_np, f=body_faces_np)

            (vis, n_dot) = visibility_compute(v=m.v,
                                              f=m.f,
                                              cams=np.array([[0.0, 0.0, 0.0]]))
            vis = vis.squeeze()

            if self.s2m and self.s2m_weight > 0 and vis.sum() > 0:
                s2m_dist, _, _, _ = distChamfer(
                    scan_tensor,
                    body_model_output.vertices[:, np.where(vis > 0)[0], :])
                s2m_dist = self.s2m_robustifier(s2m_dist.sqrt())
                s2m_dist = self.s2m_weight * s2m_dist.sum()
            if self.m2s and self.m2s_weight > 0 and vis.sum() > 0:
                _, m2s_dist, _, _ = distChamfer(
                    scan_tensor, body_model_output.
                    vertices[:,
                             np.where(np.logical_and(vis > 0, self.body_mask)
                                      )[0], :])

                m2s_dist = self.m2s_robustifier(m2s_dist.sqrt())
                m2s_dist = self.m2s_weight * m2s_dist.sum()

        # Transform vertices to world coordinates
        if self.R is not None and self.t is not None:
            vertices = body_model_output.vertices
            nv = vertices.shape[1]
            vertices.squeeze_()
            vertices = self.R.mm(vertices.t()).t() + self.t.repeat([nv, 1])
            vertices.unsqueeze_(0)

        # Compute scene penetration using signed distance field (SDF)
        sdf_penetration_loss = 0.0
        if self.sdf_penetration and self.sdf_penetration_weight > 0:
            grid_dim = self.sdf.shape[0]
            sdf_ids = torch.round((vertices.squeeze() - self.grid_min) /
                                  self.voxel_size).to(dtype=torch.long)
            sdf_ids.clamp_(min=0, max=grid_dim - 1)

            norm_vertices = (vertices - self.grid_min) / (
                self.grid_max - self.grid_min) * 2 - 1
            body_sdf = F.grid_sample(self.sdf.view(1, 1, grid_dim, grid_dim,
                                                   grid_dim),
                                     norm_vertices[:, :, [2, 1, 0]].view(
                                         1, nv, 1, 1, 3),
                                     padding_mode='border')
            sdf_normals = self.sdf_normals[sdf_ids[:, 0], sdf_ids[:, 1],
                                           sdf_ids[:, 2]]
            # if there are no penetrating vertices then set sdf_penetration_loss = 0
            if body_sdf.lt(0).sum().item() < 1:
                sdf_penetration_loss = torch.tensor(0.0,
                                                    dtype=joint_loss.dtype,
                                                    device=joint_loss.device)
            else:
                sdf_penetration_loss = self.sdf_penetration_weight * (
                    body_sdf[body_sdf < 0].unsqueeze(dim=-1).abs() *
                    sdf_normals[body_sdf.view(-1) < 0, :]).pow(2).sum(
                        dim=-1).sqrt().sum()

        # Compute the contact loss
        contact_loss = 0.0
        if self.contact and self.contact_loss_weight > 0:
            # select contact vertices
            contact_body_vertices = vertices[:, self.contact_verts_ids, :]
            contact_dist, _, idx1, _ = distChamfer(
                contact_body_vertices.contiguous(), scene_v)

            body_triangles = torch.index_select(vertices, 1,
                                                body_model_faces).view(
                                                    1, -1, 3, 3)
            # Calculate the edges of the triangles
            # Size: BxFx3
            edge0 = body_triangles[:, :, 1] - body_triangles[:, :, 0]
            edge1 = body_triangles[:, :, 2] - body_triangles[:, :, 0]
            # Compute the cross product of the edges to find the normal vector of
            # the triangle
            body_normals = torch.cross(edge0, edge1, dim=2)
            # Normalize the result to get a unit vector
            body_normals = body_normals / \
                torch.norm(body_normals, 2, dim=2, keepdim=True)
            # compute the vertex normals
            body_v_normals = torch.mm(ftov, body_normals.squeeze())
            body_v_normals = body_v_normals / \
                torch.norm(body_v_normals, 2, dim=1, keepdim=True)

            # vertix normals of contact vertices
            contact_body_verts_normals = body_v_normals[
                self.contact_verts_ids, :]
            # scene normals of the closest points on the scene surface to the contact vertices
            contact_scene_normals = scene_vn[:,
                                             idx1.squeeze().to(dtype=torch.long
                                                               ), :].squeeze()

            # compute the angle between contact_verts normals and scene normals
            angles = torch.asin(
                torch.norm(torch.cross(contact_body_verts_normals,
                                       contact_scene_normals),
                           2,
                           dim=1,
                           keepdim=True)) * 180 / np.pi

            # consider only the vertices which their normals match
            valid_contact_mask = (angles.le(self.contact_angle) +
                                  angles.ge(180 - self.contact_angle)).ge(1)
            valid_contact_ids = valid_contact_mask.squeeze().nonzero().squeeze(
            )

            contact_dist = self.contact_robustifier(
                contact_dist[:, valid_contact_ids].sqrt())
            contact_loss = self.contact_loss_weight * contact_dist.mean()

        total_loss = (joint_loss + pprior_loss + shape_loss +
                      angle_prior_loss + pen_loss + jaw_prior_loss +
                      expression_loss + left_hand_prior_loss +
                      right_hand_prior_loss + m2s_dist + s2m_dist +
                      sdf_penetration_loss + contact_loss)
        if visualize:
            print(
                'total:{:.2f}, joint_loss:{:0.2f},  s2m:{:0.2f}, m2s:{:0.2f}, penetration:{:0.2f}, contact:{:0.2f}'
                .format(total_loss.item(), joint_loss.item(),
                        torch.tensor(s2m_dist).item(),
                        torch.tensor(m2s_dist).item(),
                        torch.tensor(sdf_penetration_loss).item(),
                        torch.tensor(contact_loss).item()))
        return total_loss
コード例 #7
0
    def render(self,
               img,
               verts,
               cam_transformation,
               cam_dir,
               angle=None,
               axis=None,
               mesh_filename=None,
               velocity_colors=None):

        # construct the mesh
        mesh = trimesh.Trimesh(vertices=verts, faces=self.faces, process=False)

        # transform the mesh
        Rx = trimesh.transformations.rotation_matrix(math.radians(180),
                                                     [1, 0, 0])
        mesh.apply_transform(Rx)
        if mesh_filename is not None:
            mesh.export(mesh_filename)
        if angle and axis:
            R = trimesh.transformations.rotation_matrix(
                math.radians(angle), axis)
            mesh.apply_transform(R)

        # get the vertices and faces of the mesh
        vertices = mesh.vertices
        faces = mesh.faces

        # set camera pose
        sx, sy, tx, ty = cam_transformation
        camera = WeakPerspectiveCamera(scale=[sx, sy],
                                       translation=[tx, ty],
                                       zfar=1000.)
        camera_pose = np.eye(4)
        cam_node = self.scene.add(camera, pose=camera_pose)

        # set rendering flags
        if self.wireframe:
            render_flags = RenderFlags.RGBA | RenderFlags.ALL_WIREFRAME
        else:
            render_flags = RenderFlags.RGBA

        # compute visibility of vertices
        vis, _ = visibility_compute(v=vertices,
                                    f=faces.astype(np.uint32),
                                    cams=np.double(np.array([[0, 0, 1]])))
        visibility = np.repeat(np.expand_dims(vis[0], axis=1), 3, axis=1)
        num_vis = np.sum(vis[0])

        # render vertices with visibility
        visibility_colors = np.zeros_like(velocity_colors)
        visibility_colors[vis[0] == 1] = np.array([0, 1, 0])
        visibility_colors[vis[0] == 0] = np.array([0, 0, 1])
        mesh = pyrender.Mesh.from_points(vertices, colors=visibility_colors)
        mesh_node = self.scene.add(mesh, 'mesh')
        rgb, depth = self.renderer.render(self.scene, flags=render_flags)

        # blend original background and visualized vertices
        valid_mask = (rgb[:, :, -1] > 0)[:, :, np.newaxis]
        output_img = rgb[:, :, :-1] * valid_mask + (1 - valid_mask) * img * 0
        visibility_image = output_img.astype(np.uint8)
        self.scene.remove_node(mesh_node)

        # render vertices with velocity
        vertices = vertices[visibility == 1].reshape((num_vis, 3))
        velocity_colors = velocity_colors[visibility == 1].reshape(
            (num_vis, 3))
        mesh = pyrender.Mesh.from_points(vertices, colors=velocity_colors)
        mesh_node = self.scene.add(mesh, 'mesh')
        rgb, depth = self.renderer.render(self.scene, flags=render_flags)

        # blend original background and visualized vertices
        valid_mask = (rgb[:, :, -1] > 0)[:, :, np.newaxis]
        output_img = rgb[:, :, :-1] * valid_mask + (1 - valid_mask) * img * 0
        velocity_image = output_img.astype(np.uint8)
        self.scene.remove_node(mesh_node)
        self.scene.remove_node(cam_node)

        return visibility_image, velocity_image