def background_removal(
         self, point_cloud: open3d.geometry.PointCloud
 ) -> open3d.geometry.PointCloud:
     plane, inliers = point_cloud.segment_plane(distance_threshold=0.005,
                                                ransac_n=4,
                                                num_iterations=250)
     return point_cloud.select_down_sample(inliers, invert=True)
Beispiel #2
0
def downsample(cloud: o3d.geometry.PointCloud,
               v_size: float,
               with_trace=False):
    """
    Downsample the point cloud with a voxel grid with the defined voxel size

    Parameters
    ----------
    cloud: o3d.geometry.PointCloud
        Cloud to be downsampled.
    v_size: float
        Voxel size to downsample into.
    with_trace: bool
        Select recording of point cloud index before downsampling.

    Returns
    ----------
    cloud: o3d.geometry.PointCloud
        Downsampled point cloud
    """
    if with_trace is True:
        [cloud, indices
         ] = cloud.voxel_down_sample_and_trace(v_size,
                                               min_bound=[0, 0, 0],
                                               max_bound=[1000, 1000, 1000])
        return [cloud, indices]
    else:
        cloud = cloud.voxel_down_sample(v_size)
        return cloud
 def convert_to_mesh(self, point_cloud: open3d.geometry.PointCloud):
     point_cloud.estimate_normals()
     distances = point_cloud.compute_nearest_neighbor_distance()
     average_distance = np.mean(distances)
     radius = 1.5 * average_distance
     mesh = open3d.geometry.TriangleMesh()
     mesh = mesh.create_from_point_cloud_ball_pivoting(
         pcd=point_cloud,
         radii=open3d.utility.DoubleVector([radius, radius * 2]))
     mesh = mesh.filter_smooth_simple()
     mesh = mesh.compute_vertex_normals()
     return mesh
Beispiel #4
0
def removeHiddenPoints(pcd: o3d.geometry.PointCloud) -> o3d.geometry.PointCloud:
    '''
    Remove Hidden Points from Open3D pointcloud
    '''

    # TODO: choose exact parameters for removal.
    diameter = np.linalg.norm(
    np.asarray(pcd.get_max_bound()) - np.asarray(pcd.get_min_bound()))
    camera = [0, 0, diameter]
    radius = diameter * 100
    _, pt_map = pcd.hidden_point_removal(camera, radius)
    pcd = pcd.select_by_index(pt_map)
    
    return pcd
Beispiel #5
0
    def construct_octree(self, open3d_point_cloud: open3d.geometry.PointCloud,
                         include_color: bool) -> torch.Tensor:

        # In case the point cloud has no points, add a single point
        # This is a workaround because I was not able to create an empty octree without getting a segfault
        # TODO: Figure out a better way of making an empty octree (it does not occur if setup correctly, so probably not worth it)
        if not open3d_point_cloud.has_points():
            open3d_point_cloud.points.append([
                (self._min_bound[0] + self._max_bound[0]) / 2,
                (self._min_bound[1] + self._max_bound[1]) / 2,
                (self._min_bound[2] + self._max_bound[2]) / 2
            ])
            open3d_point_cloud.normals.append([0.0, 0.0, 0.0])
            open3d_point_cloud.colors.append([0.0, 0.0, 0.0])

        # Convert open3d point cloud into octree points
        octree_points = conversions.open3d_point_cloud_to_octree_points(
            open3d_point_cloud, include_color)

        # Convert octree points into 1D Tensor (via ndarray)
        # Note: Copy of points here is necessary as ndarray would otherwise be immutable
        octree_points_ndarray = np.frombuffer(np.copy(octree_points.buffer()),
                                              np.uint8)
        octree_points_tensor = torch.from_numpy(octree_points_ndarray)

        # Finally, create an octree from the points
        return self._points_to_octree(octree_points_tensor)
Beispiel #6
0
 def random_down_sample(pc: o3d.geometry.PointCloud, cnt):
     pc_cnt = np.asarray(pc.points).shape[0]
     if pc_cnt < cnt:
         raise ValueError("pc_cnt < sampling cnt")
     indices = np.arange(cnt)
     if pc_cnt != cnt:
         indices = np.random.choice(indices, size=cnt, replace=False)
     return pc.select_down_sample(indices)
Beispiel #7
0
    def filter_ground(self,
                      pcd: o3d.geometry.PointCloud,
                      max_dist: float = 2,
                      height_threshold=0.5,
                      ransac_dist_threshold=0.01,
                      ransac_n=3,
                      ransac_itr=100) -> o3d.geometry.PointCloud:
        """
        Find ground from point cloud by first selecting points that are below the (car's position + a certain threshold)
        Then it will take only the points that are less than `max_dist` distance away
        Then do RANSAC on the resulting point cloud.

        Note that this function assumes that the ground will be the largest plane seen after filtering out everything
        above the vehicle

        Args:
            pcd: point cloud to be parsed
            max_dist: maximum distance
            height_threshold: additional height padding
            ransac_dist_threshold: RANSAC distance threshold
            ransac_n: RANSAC starting number of points
            ransac_itr: RANSAC number of iterations

        Returns:
            point cloud that only has the ground.

        """
        points = np.asarray(pcd.points)
        colors = np.asarray(pcd.colors)
        points_of_interest = np.where(
            (points[:, 1] < self.vehicle.transform.location.y +
             height_threshold) & (points[:, 2] < max_dist))
        points = points[points_of_interest]
        colors = colors[points_of_interest]
        pcd.points = o3d.utility.Vector3dVector(points)
        pcd.colors = o3d.utility.Vector3dVector(colors)
        plane_model, inliers = pcd.segment_plane(
            distance_threshold=ransac_dist_threshold,
            ransac_n=ransac_n,
            num_iterations=ransac_itr)

        pcd = pcd.select_by_index(inliers)
        return pcd
Beispiel #8
0
def clean_outlier_blobs(
        pcd: o3d.geometry.PointCloud) -> o3d.geometry.PointCloud:
    """Remove points that are not part of the main structure

    Arguments:
        pcd:    The point cloud to filter
    Returns:
        A new filtered point cloud
    """
    pcd, _ = pcd.remove_radius_outlier(nb_points=100, radius=5)
    return pcd
def calculate_error(cloud1: o3d.geometry.PointCloud,
                    cloud2: o3d.geometry.PointCloud) -> float:
    assert len(cloud1.points) == len(
        cloud2.points), "len(cloud1.points) != len(cloud2.points)"

    centroid, _ = cloud1.compute_mean_and_covariance()
    weights = np.linalg.norm(np.asarray(cloud1.points) - centroid, 2, axis=1)
    distances = np.linalg.norm(
        np.asarray(cloud1.points) - np.asarray(cloud2.points), 2,
        axis=1) / len(weights)
    return np.sum(distances / weights)
Beispiel #10
0
def computeNormals(cloud: o3d.geometry.PointCloud, orient_normals=False):
    """
    Compute the normals for the points in the point cloud.

    Parameters
    ----------
    cloud: o3d.geometry.PointCloud
        Cloud to compute the normals.
    orient_normals: bool
        Select if the normals have to be oriented towards the camera.

    Returns
    ----------
    cloud: o3d.geometry.PointCloud
        Cloud with the normals computed
    """
    cloud.estimate_normals()
    if orient_normals is True:
        cloud.orient_normals_towards_camera_location()
    return cloud
Beispiel #11
0
def remove_by_z_threshold(pcd: o3d.geometry.PointCloud,
                          z_threshold: float) -> o3d.geometry.PointCloud:
    """Remove points that have z-value below some threshold

    Arguments:
        pcd:            The point cloud to filter
        z_threshold:    Threshold for z value
    Returns:
        A new filtered point cloud
    """
    xyz = np.asarray(pcd.points)
    rgb = np.asarray(pcd.colors)

    floor_mask = xyz[:, 2] > z_threshold
    xyz = xyz[floor_mask]
    rgb = rgb[floor_mask]

    pcd = o3d.geometry.PointCloud()
    pcd.points = o3d.utility.Vector3dVector(xyz)
    pcd.colors = o3d.utility.Vector3dVector(rgb)
    return pcd
Beispiel #12
0
def adjust_colors_from_normals(
        pcd: o3d.geometry.PointCloud) -> o3d.geometry.PointCloud:
    """Adjust colors based on inverting Lambert's cosine law

    Arguments:
        pcd:        The point clout to adjust
    Returns:
        New adjusted point cloud
    """
    pcd.estimate_normals()
    xyz = np.asarray(pcd.points)
    rgb = np.asarray(pcd.colors)

    normals_z = np.abs(np.asarray(pcd.normals)[:, 2])
    nz_cols = np.column_stack((normals_z, normals_z, normals_z))
    rgb = np.clip(rgb / nz_cols, 0.0, 1.0)

    pcd_out = o3d.geometry.PointCloud()
    pcd_out.points = o3d.utility.Vector3dVector(xyz)
    pcd_out.colors = o3d.utility.Vector3dVector(rgb)
    return pcd_out
Beispiel #13
0
def remove_divergent_normals(pcd: o3d.geometry.PointCloud,
                             threshold: float) -> o3d.geometry.PointCloud:
    """Remove points whose normals are not pointing towards the camera (z-axis)

    Arguments:
        pcd:        The point cloud to filter
        threshold:  The removal threshold for the z-component of the normal
    Returns:
        A new filtered point cloud
    """

    pcd.estimate_normals()
    normals_z = np.asarray(pcd.normals)[:, 2]

    mask = normals_z > threshold
    xyz_out = np.asarray(pcd.points)[mask]
    rgb_out = np.asarray(pcd.colors)[mask]

    pcd_out = o3d.geometry.PointCloud()
    pcd_out.points = o3d.utility.Vector3dVector(xyz_out)
    pcd_out.colors = o3d.utility.Vector3dVector(rgb_out)
    return pcd_out
Beispiel #14
0
def create_3d_visualizer(
    point_cloud: o3d.geometry.PointCloud,
    inliers: Sequence[int] = None,
    visualizer_size: Sequence[int] = [800, 800],
    plane_color: Sequence[float] = [1.0, 0.0, 0.0],
) -> o3d.visualization.Visualizer:

    vis = o3d.visualization.VisualizerWithKeyCallback()
    vis.create_window(width=visualizer_size[0],
                      height=visualizer_size[1],
                      left=0,
                      top=0)

    mesh = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.1,
                                                             origin=np.array(
                                                                 [0, 0, 0]))
    if inliers is None:
        o3d.visualization.draw_geometries([point_cloud, mesh])
        vis.add_geometry(point_cloud)
        vis.add_geometry(mesh)
    else:
        inlier_cloud = point_cloud.select_by_index(inliers)
        inlier_cloud.paint_uniform_color(plane_color)
        outlier_cloud = point_cloud.select_by_index(inliers, invert=True)
        vis.add_geometry(inlier_cloud)
        vis.add_geometry(outlier_cloud)
        vis.add_geometry(mesh)

    # Rotate view around target point cloud
    view_control = vis.get_view_control()
    view_control.set_front([0, 0, -1])
    view_control.set_up([0, -1, 0])
    view_control.set_zoom(1)
    view_control.set_lookat(np.array(
        point_cloud.points).mean(0))  # LookAt Point Cloud centroid
    view_control.rotate(visualizer_size[0] / 4.0, -visualizer_size[1] / 4.0)

    return vis
Beispiel #15
0
    def cropScene(self, object: o3d.geometry.PointCloud, 
                        scene: o3d.geometry.PointCloud, 
                        radius: float):
        """
        Select the points of the scene point cloud which are within a certain 
        distance from the object center.
            After applying the ground truth pose to the object, the center of 
            the object 'c' is taken as a reference point. The resulting cloud 
            only keeps those points which are withtin a sphere centered in 'c' 
            with radius 'radius'.

        Parameters
        ----------
        object : o3d.geometry.PointCloud
            Point cloud containing the object
        scene : o3d.geometry.PointCloud
            Point cloud containing the scene to be cropped
        radius : float
            Distance threshold
        
        Returns
        -------
        cropped_scene : o3d.geometry.PointCloud
            Cropped scene point cloud
        """
        # Build the Kd-tree to search through the points of the scene pointcloud
        scene_tree = o3d.geometry.KDTreeFlann(scene)
        # Get the center of the object
        c = object.get_center()
        # Get the neighboring points (points of the scene which are closer than
        # the radius to the object center)
        [k, idx, _] = scene_tree.search_radius_vector_3d(c, radius)
        if k == 0:
            print('ERROR: cropping failed.')
        else:
            # Crop scene point cloud by selecting only the neighboring points 
            cropped_scene = scene.select_by_index(idx)
            return cropped_scene         
Beispiel #16
0
 def addPointcloud(self,
                   cloud: o3d.geometry.PointCloud,
                   t=None,
                   R=None,
                   color=None):
     if color is not None:
         cloud.paint_uniform_color(color)
     if t is not None:
         cloud.translate(t)
     if R is not None:
         cloud.rotate(R)
     self.visualization.append(cloud)
Beispiel #17
0
    def preprocess_point_cloud(
            self, open3d_point_cloud: open3d.geometry.PointCloud,
            camera_frame_id: str, robot_frame_id: str, min_bound: List[float],
            max_bound: List[float], normals_radius: float,
            normals_max_nn: int) -> open3d.geometry.PointCloud:

        # Check if any points remain in the area after cropping
        if not open3d_point_cloud.has_points():
            print("Point cloud has no points")
            return open3d_point_cloud

        # Get transformation from camera to robot and use it to transform point
        # cloud into robot's base coordinate frame
        transform = self.lookup_transform_sync(target_frame=robot_frame_id,
                                               source_frame=camera_frame_id)
        transform_mat = conversions.transform_to_matrix(transform=transform)
        open3d_point_cloud = open3d_point_cloud.transform(transform_mat)

        # Crop point cloud to include only the workspace
        open3d_point_cloud = open3d_point_cloud.crop(
            bounding_box=open3d.geometry.AxisAlignedBoundingBox(
                min_bound=min_bound, max_bound=max_bound))

        # Check if any points remain in the area after cropping
        if not open3d_point_cloud.has_points():
            print(
                "Point cloud has no points after cropping to the workspace volume"
            )
            return open3d_point_cloud

        # Estimate normal vector for each cloud point and orient these towards the camera
        open3d_point_cloud.estimate_normals(
            search_param=open3d.geometry.KDTreeSearchParamHybrid(
                radius=normals_radius, max_nn=normals_max_nn),
            fast_normal_computation=True)

        open3d_point_cloud.orient_normals_towards_camera_location(
            camera_location=transform_mat[0:3, 3])

        return open3d_point_cloud
Beispiel #18
0
def point_plane_residual(x: np.ndarray, base_transform: np.ndarray,
                         pcd_source: o3d.geometry.PointCloud,
                         pcd_target: o3d.geometry.PointCloud,
                         kdtree_target: o3d.geometry.KDTreeFlann,
                         max_correspond_dist):
    assert pcd_target.has_normals()

    num_points = len(pcd_source.points)
    points_source = np.asarray(pcd_source.points)

    nearest_points = np.empty((num_points, 3), dtype=float)
    nearest_normals = np.empty((num_points, 3), dtype=float)
    points_target = np.asarray(pcd_target.points)
    normals_target = np.asarray(pcd_target.normals)

    for i in range(num_points):
        k, idx, _ = kdtree_target.search_hybrid_vector_3d(
            pcd_source.points[i], max_correspond_dist, 1)
        if k == 1:
            nearest_points[i, :] = points_target[idx[0], :]
            nearest_normals[i, :] = normals_target[idx[0], :]
        else:
            nearest_points[i, :] = points_source[i, :]
            nearest_normals[i, :] = np.array(
                [0, 0, 0], dtype=float)  # cause a zero residual

    angle = x[0]
    t1 = x[1]
    t2 = x[2]

    transform = rotation_matrix(angle, rotation_axis)
    transform[:3, 3] = t1 * translation_axis1 + t2 * translation_axis2
    transform = np.dot(transform, base_transform)

    position_residuals = np.dot(transform[:3, :3],
                                points_source.T) + transform[:3, 3].reshape(
                                    3, 1) - nearest_points.T
    residuals = position_residuals * nearest_normals.T
    residuals = np.sum(residuals, axis=0)

    return residuals
Beispiel #19
0
def preprocess_point_cloud(
    pcd: o3d.geometry.PointCloud, voxel_size: np.float64
) -> {o3d.geometry.PointCloud, o3d.pipelines.registration.Feature}:
    print(":: Downsample with a voxel size %.3f." % voxel_size)
    pcd_down = pcd.voxel_down_sample(voxel_size)

    radius_normal = voxel_size * 2
    print(":: Estimate normal with search radius %.3f." % radius_normal)
    pcd_down.estimate_normals(
        o3d.geometry.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30))

    # KDTreeSearchParamHybrid: radius - search radius, max_nn - at maximum,
    # max_nn neighbours will be searched
    radius_feature = voxel_size * 5
    print(":: Compute FPFH feature with search radius %.3f.\n" %
          radius_feature)
    pcd_fpfh = o3d.pipelines.registration.compute_fpfh_feature(
        pcd_down,
        o3d.geometry.KDTreeSearchParamHybrid(radius=radius_feature,
                                             max_nn=100))
    # print(pcd_fpfh, '\n')
    return pcd_down, pcd_fpfh
Beispiel #20
0
def cicp(pcd_source: o3d.geometry.PointCloud,
         pcd_target: o3d.geometry.PointCloud, base_transform: np.ndarray,
         max_correspond_dist_coarse, max_correspond_dist_fine):
    kdtree = o3d.geometry.KDTreeFlann(pcd_target)
    if not pcd_target.has_normals():
        o3d.estimate_normals(pcd_target,
                             search_param=o3d.KDTreeSearchParamHybrid(
                                 radius=0.1, max_nn=30))

    x0 = np.array([0, 0, 0], dtype=float)
    result = least_squares(point_plane_residual,
                           x0,
                           jac='2-point',
                           method='trf',
                           loss='soft_l1',
                           args=(base_transform, pcd_source, pcd_target,
                                 kdtree, max_correspond_dist_coarse))

    x0 = result.x
    result = least_squares(point_plane_residual,
                           x0,
                           jac='2-point',
                           method='trf',
                           loss='soft_l1',
                           args=(base_transform, pcd_source, pcd_target,
                                 kdtree, max_correspond_dist_fine))

    x = result.x
    angle = x[0]
    t1 = x[1]
    t2 = x[2]

    transform = rotation_matrix(angle, rotation_axis)
    transform[:3, 3] = t1 * translation_axis1 + t2 * translation_axis2
    transform = np.dot(transform, base_transform)

    return transform, result
Beispiel #21
0
def get_best_views(pcd: o3d.geometry.PointCloud, n: int) -> list:

    # Remove the table plane using RANSAC
    # TODO

    pcd.estimate_normals()
    pcd.orient_normals_consistent_tangent_plane(k=10)

    # We need a mesh, so we do Poisson surface reconstruction
    with o3d.utility.VerbosityContextManager(
            o3d.utility.VerbosityLevel.Info) as cm:
        mesh, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(
            pcd=pcd, depth=4)
    print(mesh)
    # print_attributes(mesh)

    # Visualize the mesh
    # o3d.visualization.draw_geometries([mesh])

    # For this mesh, we need to find the best n views
    best_views = []

    for view_i in range(n):
        loop = tqdm.tqdm(range(1000))

        # Initialize the model
        if best_views:  # if we already have some views, use the opposite of the last view
            initial_camera_position = -best_views[-1].detach().cpu().numpy()
            # normalize and multiply by 16
            initial_camera_position /= np.linalg.norm(initial_camera_position)
            initial_camera_position *= 16
            model = Model(mesh,
                          "data/gaussian_reference.png",
                          tqdm_loop=loop,
                          initial_camera_position=initial_camera_position)
        else:
            model = Model(mesh, "data/gaussian_reference.png", tqdm_loop=loop)

        model.cuda()

        # optimizer = chainer.optimizers.Adam(alpha=0.1)
        optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
        for i in loop:
            optimizer.zero_grad()
            loss = model()
            loss.backward()
            optimizer.step()
            images, _, _ = model.renderer(model.vertices, model.faces,
                                          torch.tanh(model.textures))

            # print(images.shape)

            # image = (images.detach().cpu().numpy()[0].copy() * 255).astype(np.uint8)

            image = (
                images.detach().cpu().numpy()[0].transpose(1, 2, 0).copy() *
                255).astype(np.uint8)

            # https://www.programcreek.com/python/example/89325/cv2.Sobel
            sobel_x = cv2.Sobel(image, cv2.CV_64F, 1, 0, ksize=5)
            sobel_y = cv2.Sobel(image, cv2.CV_64F, 0, 1, ksize=5)
            abs_sobel_x = cv2.convertScaleAbs(sobel_x)
            abs_sobel_y = cv2.convertScaleAbs(sobel_y)
            edges = cv2.addWeighted(np.uint8(abs_sobel_x), 0.5,
                                    np.uint8(abs_sobel_y), 0.5, 0)
            # edges = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)
            # print("shapes", image.shape, edges.shape)
            concat = cv2.hconcat([image, edges])
            cv2.putText(concat, f"loss: {loss.item():.2f}", (6, 250),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2,
                        cv2.LINE_AA)
            imsave('/tmp/_tmp_%04d.png' % i, concat)

            if loss.item() < 70:
                break
        make_gif(f'standardized_interface_output{view_i}.mp4')
        best_views.append(model.renderer.eye)

    return best_views
Beispiel #22
0
    def getMatches(self, object:o3d.geometry.PointCloud, 
                         scene:o3d.geometry.PointCloud, 
                         distance_threshold:float, 
                         radial_vector_length, 
                         scalar_projection):
        visualize = False
        # Container to save the sample data
        sample_data  = np.array([])
        # Construct kd-tree in order to search through the object points
        object_tree = o3d.geometry.KDTreeFlann(object)
        # Compute normals of the scene point cloud 
        scene = computeNormals(cloud=scene, orient_normals=True)
        scene_points = np.asarray(scene.points)
        scene_normals = np.asarray(scene.normals)
        [number_of_points_scene,_]=scene_points.shape     
  
        scene_points_indices = np.array([], dtype=int)
        # Loop through all the points in the scene point cloud
        for i in range(number_of_points_scene): 
            # Find the closest point of the object to the scene point
            [k, idx, _] = object_tree.search_knn_vector_3d(scene_points[i], 1)
            object_point = np.asarray(object.points)[idx] 
            # Compute the euclidian distance between the object and scene point
            dist = getEuclidianDistance(scene_points[i],object_point)
            # If the points are close enough, save the data
            if dist < distance_threshold: 
                scene_points_indices = np.append(scene_points_indices, i)
                # Storing the sample data:
                # [scene_point(x,y,z),
                # radial_vector_length,
                # scalar_projection,
                # normal]    
                point_data = np.hstack([scene_points[i], 
                                        radial_vector_length[idx], 
                                        scalar_projection[idx], 
                                        scene_normals[i]])   
                if sample_data.size == 0:
                    sample_data = point_data
                else:
                    sample_data = np.row_stack([sample_data, point_data])

        if visualize is True:
            vis = visualizer()
            scene.paint_uniform_color([0,1,0])
            np.asarray(scene.colors)[scene_points_indices[1:], :] = [0,0,1]
            vis.addPointcloud(scene)
            vis.show()

        scene_tree = o3d.geometry.KDTreeFlann(scene)
        indices = np.random.uniform(low=0, high=(sample_data.__len__()-1), size=50)
        for it in indices:
            it = int(it)
            seed = sample_data[it, 0:3]
            [k, idx, _] = scene_tree.search_radius_vector_3d(seed, v.SEGMENT_RADIUS)
            
            if visualize is True:
                vis = visualizer()
                scene.paint_uniform_color([0,1,0])
                np.asarray(scene.colors)[idx[1:], :] = [0,0,1]
                vis.addPointcloud(scene)
                vis.addSphere(center=seed, color=[1,0,0], radius=2)
                vis.show()

            idx = np.asarray(idx)
            np.random.shuffle(idx)
            idx = idx[0:v.NUM_OF_POINTS]
            points = copy.deepcopy(np.asarray(scene.points)[idx])
            points = np.asarray([[p - seed for p in points]])
            normals = [copy.deepcopy(np.asarray(scene.normals)[idx])]

            if visualize is True:
                vis = visualizer()
                scene.paint_uniform_color([0,1,0])
                np.asarray(scene.colors)[idx[1:], :] = [0,0,1]
                vis.addPointcloud(scene)
                vis.addSphere(center=seed, color=[1,0,0], radius=2)
                vis.show()
            
            [_, m, _] = points.shape
            if m == v.NUM_OF_POINTS:
                gf_vector = self.getGlobalFeatureVectors([points])
            else:
                return None
        
            if self.dataset.size == 0:
                self.dataset = np.asarray([sample_data[it, :]])
                self.dataset_out = np.asarray(gf_vector)
                self.dataset_cloud = np.asarray(points)
                self.dataset_normals = np.asarray(normals)
            else:
                self.dataset = np.row_stack([self.dataset, [sample_data[it, :]]])
                self.dataset_out = np.row_stack([self.dataset_out, gf_vector])
                self.dataset_cloud = np.row_stack([self.dataset_cloud, points])
                self.dataset_normals = np.row_stack([self.dataset_normals, normals])
                
        return sample_data       
Beispiel #23
0
def set_pcd_color(pcd: o3d.geometry.PointCloud, colors: np.ndarray):
    pcd.colors = o3d.utility.Vector3dVector(colors)
    return pcd
 def downsample(
         self, point_cloud: open3d.geometry.PointCloud
 ) -> open3d.geometry.PointCloud:
     voxel_down_pcd = point_cloud.voxel_down_sample(voxel_size=1.7)
     return voxel_down_pcd.uniform_down_sample(every_k_points=2)