def convert_to_mesh(self, point_cloud: open3d.geometry.PointCloud):
     point_cloud.estimate_normals()
     distances = point_cloud.compute_nearest_neighbor_distance()
     average_distance = np.mean(distances)
     radius = 1.5 * average_distance
     mesh = open3d.geometry.TriangleMesh()
     mesh = mesh.create_from_point_cloud_ball_pivoting(
         pcd=point_cloud,
         radii=open3d.utility.DoubleVector([radius, radius * 2]))
     mesh = mesh.filter_smooth_simple()
     mesh = mesh.compute_vertex_normals()
     return mesh
Beispiel #2
0
def computeNormals(cloud: o3d.geometry.PointCloud, orient_normals=False):
    """
    Compute the normals for the points in the point cloud.

    Parameters
    ----------
    cloud: o3d.geometry.PointCloud
        Cloud to compute the normals.
    orient_normals: bool
        Select if the normals have to be oriented towards the camera.

    Returns
    ----------
    cloud: o3d.geometry.PointCloud
        Cloud with the normals computed
    """
    cloud.estimate_normals()
    if orient_normals is True:
        cloud.orient_normals_towards_camera_location()
    return cloud
Beispiel #3
0
def adjust_colors_from_normals(
        pcd: o3d.geometry.PointCloud) -> o3d.geometry.PointCloud:
    """Adjust colors based on inverting Lambert's cosine law

    Arguments:
        pcd:        The point clout to adjust
    Returns:
        New adjusted point cloud
    """
    pcd.estimate_normals()
    xyz = np.asarray(pcd.points)
    rgb = np.asarray(pcd.colors)

    normals_z = np.abs(np.asarray(pcd.normals)[:, 2])
    nz_cols = np.column_stack((normals_z, normals_z, normals_z))
    rgb = np.clip(rgb / nz_cols, 0.0, 1.0)

    pcd_out = o3d.geometry.PointCloud()
    pcd_out.points = o3d.utility.Vector3dVector(xyz)
    pcd_out.colors = o3d.utility.Vector3dVector(rgb)
    return pcd_out
Beispiel #4
0
    def preprocess_point_cloud(
            self, open3d_point_cloud: open3d.geometry.PointCloud,
            camera_frame_id: str, robot_frame_id: str, min_bound: List[float],
            max_bound: List[float], normals_radius: float,
            normals_max_nn: int) -> open3d.geometry.PointCloud:

        # Check if any points remain in the area after cropping
        if not open3d_point_cloud.has_points():
            print("Point cloud has no points")
            return open3d_point_cloud

        # Get transformation from camera to robot and use it to transform point
        # cloud into robot's base coordinate frame
        transform = self.lookup_transform_sync(target_frame=robot_frame_id,
                                               source_frame=camera_frame_id)
        transform_mat = conversions.transform_to_matrix(transform=transform)
        open3d_point_cloud = open3d_point_cloud.transform(transform_mat)

        # Crop point cloud to include only the workspace
        open3d_point_cloud = open3d_point_cloud.crop(
            bounding_box=open3d.geometry.AxisAlignedBoundingBox(
                min_bound=min_bound, max_bound=max_bound))

        # Check if any points remain in the area after cropping
        if not open3d_point_cloud.has_points():
            print(
                "Point cloud has no points after cropping to the workspace volume"
            )
            return open3d_point_cloud

        # Estimate normal vector for each cloud point and orient these towards the camera
        open3d_point_cloud.estimate_normals(
            search_param=open3d.geometry.KDTreeSearchParamHybrid(
                radius=normals_radius, max_nn=normals_max_nn),
            fast_normal_computation=True)

        open3d_point_cloud.orient_normals_towards_camera_location(
            camera_location=transform_mat[0:3, 3])

        return open3d_point_cloud
Beispiel #5
0
def remove_divergent_normals(pcd: o3d.geometry.PointCloud,
                             threshold: float) -> o3d.geometry.PointCloud:
    """Remove points whose normals are not pointing towards the camera (z-axis)

    Arguments:
        pcd:        The point cloud to filter
        threshold:  The removal threshold for the z-component of the normal
    Returns:
        A new filtered point cloud
    """

    pcd.estimate_normals()
    normals_z = np.asarray(pcd.normals)[:, 2]

    mask = normals_z > threshold
    xyz_out = np.asarray(pcd.points)[mask]
    rgb_out = np.asarray(pcd.colors)[mask]

    pcd_out = o3d.geometry.PointCloud()
    pcd_out.points = o3d.utility.Vector3dVector(xyz_out)
    pcd_out.colors = o3d.utility.Vector3dVector(rgb_out)
    return pcd_out
Beispiel #6
0
def get_best_views(pcd: o3d.geometry.PointCloud, n: int) -> list:

    # Remove the table plane using RANSAC
    # TODO

    pcd.estimate_normals()
    pcd.orient_normals_consistent_tangent_plane(k=10)

    # We need a mesh, so we do Poisson surface reconstruction
    with o3d.utility.VerbosityContextManager(
            o3d.utility.VerbosityLevel.Info) as cm:
        mesh, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(
            pcd=pcd, depth=4)
    print(mesh)
    # print_attributes(mesh)

    # Visualize the mesh
    # o3d.visualization.draw_geometries([mesh])

    # For this mesh, we need to find the best n views
    best_views = []

    for view_i in range(n):
        loop = tqdm.tqdm(range(1000))

        # Initialize the model
        if best_views:  # if we already have some views, use the opposite of the last view
            initial_camera_position = -best_views[-1].detach().cpu().numpy()
            # normalize and multiply by 16
            initial_camera_position /= np.linalg.norm(initial_camera_position)
            initial_camera_position *= 16
            model = Model(mesh,
                          "data/gaussian_reference.png",
                          tqdm_loop=loop,
                          initial_camera_position=initial_camera_position)
        else:
            model = Model(mesh, "data/gaussian_reference.png", tqdm_loop=loop)

        model.cuda()

        # optimizer = chainer.optimizers.Adam(alpha=0.1)
        optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
        for i in loop:
            optimizer.zero_grad()
            loss = model()
            loss.backward()
            optimizer.step()
            images, _, _ = model.renderer(model.vertices, model.faces,
                                          torch.tanh(model.textures))

            # print(images.shape)

            # image = (images.detach().cpu().numpy()[0].copy() * 255).astype(np.uint8)

            image = (
                images.detach().cpu().numpy()[0].transpose(1, 2, 0).copy() *
                255).astype(np.uint8)

            # https://www.programcreek.com/python/example/89325/cv2.Sobel
            sobel_x = cv2.Sobel(image, cv2.CV_64F, 1, 0, ksize=5)
            sobel_y = cv2.Sobel(image, cv2.CV_64F, 0, 1, ksize=5)
            abs_sobel_x = cv2.convertScaleAbs(sobel_x)
            abs_sobel_y = cv2.convertScaleAbs(sobel_y)
            edges = cv2.addWeighted(np.uint8(abs_sobel_x), 0.5,
                                    np.uint8(abs_sobel_y), 0.5, 0)
            # edges = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)
            # print("shapes", image.shape, edges.shape)
            concat = cv2.hconcat([image, edges])
            cv2.putText(concat, f"loss: {loss.item():.2f}", (6, 250),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2,
                        cv2.LINE_AA)
            imsave('/tmp/_tmp_%04d.png' % i, concat)

            if loss.item() < 70:
                break
        make_gif(f'standardized_interface_output{view_i}.mp4')
        best_views.append(model.renderer.eye)

    return best_views