def capture(config, video=None):
    # Configure streams
    pipeline, process_modules, filters, proj_mat, t265_device = create_pipeline(config)
    t265_pipeline = t265_device['pipeline']
    logging.info("Pipeline Created")

    # Long lived objects. These are the object that hold all the algorithms for surface exraction.
    # They need to be long lived (objects) because they hold state (thread scheduler, image datastructures, etc.)
    ll_objects = dict()
    ll_objects['pl'] = Polylidar3D(**config['polylidar'])
    ll_objects['ga'] = GaussianAccumulatorS2(level=config['fastga']['level'])
    ll_objects['ico'] = IcoCharts(level=config['fastga']['level'])

    if video:
        frame_width = config['color']['width'] * 2
        frame_height = config['color']['height']
        out_vid = cv2.VideoWriter(video, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 30, (frame_width, frame_height))

    # Create Homogenous Transform from sensor frame to wheel chair frame
    sensor_mount_frame = config['frames']['sensor_mount']
    sensor_frame = config['frames']['sensor']
    sensor_to_wheel_chair_transform = create_transform(np.array(sensor_mount_frame['translation']), sensor_mount_frame['rotation']) \
        @ create_transform(sensor_frame['translation'], sensor_frame['rotation'])

    # print(sensor_to_wheel_chair_transform)
    # sys.exit()
    all_records = []
    counter = 0
    try:
        while True:
            t00 = time.perf_counter()
            try:
                color_image, depth_image, meta = get_frames(pipeline, t265_pipeline, process_modules, filters, config)
            except RuntimeError:
                # This only gets thrown when in playback mode from a recoded file when frames "run out"
                logging.info("Out of frames")
                break
            t0 = time.perf_counter()
            if color_image is None or not valid_frames(color_image, depth_image, **config['polygon']['frameskip']):
                logging.debug("Invalid Frames")
                continue
            t1 = time.perf_counter()
            counter += 1
            if counter < 1:
                continue

            try:
                # Get 6DOF Pose at appropriate timestamp
                if config['tracking']['enabled']:
                    euler_t265 = get_pose_matrix(meta['ts'])
                    logging.info('euler_t265: %r', euler_t265)

                # flag to write results
                have_results = False
                if config['show_polygon']:
                    # planes, obstacles, geometric_planes, timings, o3d_mesh = get_polygon(depth_image, config, ll_objects, **meta)
                    planes, obstacles, geometric_planes, timings = get_polygon(depth_image, config, ll_objects, **meta)
                    timings['t_get_frames'] = (t0 - t00) * 1000
                    timings['t_check_frames'] = (t1 - t0) * 1000
                    all_records.append(timings)

                    curb_height, first_plane, second_plane = analyze_planes(geometric_planes)
                    fname = config['playback']['file'].split('/')[1].split('.')[0]
                    # dump(dict(first_plane=first_plane, second_plane=second_plane), f"data/scratch_test/planes_{fname}_{counter:04}.joblib")
                    
                    # curb height must be greater than 2 cm and first_plane must have been found
                    if curb_height > 0.02 and first_plane is not None:
                        top_plane = choose_plane(first_plane, second_plane)
                        top_points, top_normal = top_plane['all_points'], top_plane['normal_ransac']
                        filtered_top_points = filter_points(top_points)  # <100 us
                        _, height, _, best_fit_lines = extract_lines_wrapper(filtered_top_points, top_normal, return_only_one_line=True)
                        if best_fit_lines:
                            platform_center_sensor_frame = best_fit_lines[0]['hplane_point']
                            platform_normal_sensor_frame = best_fit_lines[0]['hplane_normal']
                            # print(platform_center_sensor_frame, platform_normal_sensor_frame)
                            result = get_turning_manuever(platform_center_sensor_frame, platform_normal_sensor_frame, \
                                        sensor_to_wheel_chair_transform, poi_offset=config.get('poi_offset', 0.7), debug=False)
                            plt.show()

                            orthog_dist = result['ortho_dist_platform']
                            distance_of_interest = result['dist_poi']
                            orientation = result['beta']
                            initial_turn = result['first_turn']
                            final_turn= result['second_turn']

                            logging.info("Frame #: %s, Orthogonal Distance to Platform: %.02f m, Orientation: %0.1f Degrees, Distance to POI: %.02f m, " \
                                "First Turn: %.01f degrees, Second Turn: %.01f degrees", 
                                         counter, orthog_dist, orientation, distance_of_interest, initial_turn, final_turn)
                            
                            plot_points(best_fit_lines[0]['square_points'], proj_mat, color_image, config)
                            if len(best_fit_lines) > 1: 
                                
                                plot_points(best_fit_lines[1]['square_points'], proj_mat, color_image, config)
                            have_results = True
                        else:
                            logging.warning("Line Detector Failed")
                    else:
                        logging.warning("Couldn't find the street and sidewalk surface")

                    if initial_turn < 0:
                        initial_turnsign = 0
                    else:
                        initial_turnsign = 1
                    
                    if final_turn < 0:
                        final_turnsign = 0
                    else:
                        final_turnsign = 1

                    # Pad the numbers: 
                    # curb_height, distance_of_interest  --> 0.00  (METER)
                    # final_turn, orientation     --> 00.00 (DEGREE)
                    # orientationsign, final_turn --> 0(NEGTIVE) / 1(POSITIVE)
                    ser.write(("{:04.2f}".format(curb_height) + "{:04.2f}".format(distance_of_interest) + \
                               "{:06.2f}".format(abs(initial_turn)) + str(initial_turnsign) + \
                               "{:06.2f}".format(abs(final_turn))+ str(final_turnsign) + "\n").encode())

                    # sys.exit()
                    # Plot polygon in rgb frame
                    plot_planes_and_obstacles(planes, obstacles, proj_mat, None, color_image, config)

                    # import ipdb; ipdb.set_trace()
                # Show images
                if config.get("show_images"):
                    # Convert to open cv image types (BGR)
                    color_image_cv, depth_image_cv = colorize_images_open_cv(color_image, depth_image, config)
                    # Stack both images horizontally
                    images = np.hstack((color_image_cv, depth_image_cv))
                    if have_results:
                        cv2.putText(images,'Curb Height: '"{:.2f}" 'm'.format(curb_height), (20,360), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1, cv2.LINE_AA)
                        cv2.putText(images,'Orthogonal Distance: '"{:.2f}" 'm'.format(orthog_dist), (20,380), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1, cv2.LINE_AA)
                        cv2.putText(images,'Distance to Point of Interest: '"{:.2f}" 'm'.format(distance_of_interest), (20,400), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1, cv2.LINE_AA)
                        cv2.putText(images,'Initial Turn: '"{:.2f}" 'deg'.format(initial_turn), (20,420), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1, cv2.LINE_AA)
                        cv2.putText(images,'Angle for Orientation: '"{:.2f}" 'deg'.format(orientation), (20,440), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1, cv2.LINE_AA)
                        cv2.putText(images,'Angle for final turn: '"{:.2f}" 'deg'.format(final_turn), (20,460), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1, cv2.LINE_AA)
                    cv2.imshow('RealSense Color/Depth (Aligned)', images)
                    
                    
                    if video:
                        out_vid.write(images)

                    res = cv2.waitKey(1)
                    if res == ord('p'):
                        uid = uuid.uuid4()
                        logging.info("Saving Picture: {}".format(uid))
                        cv2.imwrite(path.join(PICS_DIR, "{}_color.jpg".format(uid)), color_image_cv)
                        cv2.imwrite(path.join(PICS_DIR, "{}_stack.jpg".format(uid)), images)
                    if res == ord('m'):
                        plt.imshow(np.asarray(ll_objects['ico'].image_to_vertex_idx))
                        plt.show()
                        plt.imshow(np.asarray(ll_objects['ico'].mask))
                        plt.show()
                        plt.imshow(np.asarray(ll_objects['ico'].image))
                        plt.show()                  
                    

                    to_save_frames = config['save'].get('frames')
                    if config['playback']['enabled'] and to_save_frames is not None and counter in to_save_frames:
                        logging.info("Saving Picture: {}".format(counter))
                        cv2.imwrite(path.join(PICS_DIR, "{}_color.jpg".format(counter)), color_image_cv)
                        cv2.imwrite(path.join(PICS_DIR, "{}_stack.jpg".format(counter)), images)

                # logging.info(f"Frame %d; Get Frames: %.2f; Check Valid Frame: %.2f; Laplacian: %.2f; Bilateral: %.2f; Mesh: %.2f; FastGA: %.2f; Plane/Poly: %.2f; Filtering: %.2f; Geometric Planes: %.2f",
                #              counter, timings['t_get_frames'], timings['t_check_frames'], timings['t_laplacian'], timings['t_bilateral'], timings['t_mesh'], timings['t_fastga_total'],
                #              timings['t_polylidar_planepoly'], timings['t_polylidar_filter'], timings['t_geometric_planes'])
                logging.info(f"Curb Height: %.2f", curb_height)
                
            except Exception as e:
                logging.exception("Error!")
    finally:
        pipeline.stop()
    if video is not None:
        out_vid.release()
    cv2.destroyAllWindows()
def capture(config, video=None):
    # Configure streams
    pipeline, process_modules, filters, proj_mat, t265_device = create_pipeline(config)
    t265_pipeline = t265_device['pipeline']
    logging.info("Pipeline Created")

    # Long lived objects. These are the object that hold all the algorithms for surface exraction.
    # They need to be long lived (objects) because they hold state (thread scheduler, image datastructures, etc.)
    ll_objects = dict()
    ll_objects['pl'] = Polylidar3D(**config['polylidar'])
    ll_objects['ga'] = GaussianAccumulatorS2(level=config['fastga']['level'])
    ll_objects['ico'] = IcoCharts(level=config['fastga']['level'])

    if video:
        frame_width = config['color']['width'] * 2
        frame_height = config['color']['height']
        out_vid = cv2.VideoWriter(video, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 30, (frame_width, frame_height))

    all_records = []
    counter = 0
    try:
        while True:
            t00 = time.perf_counter()
            try:
                color_image, depth_image, meta = get_frames(pipeline, t265_pipeline, process_modules, filters, config)
            except RuntimeError:
                # This only gets thrown when in playback mode from a recoded file when frames "run out"
                logging.info("Out of frames")
                break
            t0 = time.perf_counter()
            if color_image is None or not valid_frames(color_image, depth_image, **config['polygon']['frameskip']):
                logging.debug("Invalid Frames")
                continue
            t1 = time.perf_counter()
            counter += 1
            # if counter < 10:
            #     continue

            try:
                # Get 6DOF Pose at appropriate timestamp
                if config['tracking']['enabled']:
                    euler_t265 = get_pose_matrix(meta['ts'])
                    logging.info('euler_t265: %r', euler_t265)

                if config['show_polygon']:
                    # planes, obstacles, timings, o3d_mesh = get_polygon(depth_image, config, ll_objects, **meta)
                    planes, obstacles, timings = get_polygon(depth_image, config, ll_objects, **meta)
                    timings['t_get_frames'] = (t0 - t00) * 1000
                    timings['t_check_frames'] = (t1 - t0) * 1000
                    all_records.append(timings)

                    # Plot polygon in rgb frame
                    plot_planes_and_obstacles(planes, obstacles, proj_mat, None, color_image, config)

                # Show images
                if config.get("show_images"):
                    # Convert to open cv image types (BGR)
                    color_image_cv, depth_image_cv = colorize_images_open_cv(color_image, depth_image, config)
                    # Stack both images horizontally
                    images = np.hstack((color_image_cv, depth_image_cv))
                    cv2.imshow('RealSense Color/Depth (Aligned)', images)
                    if video:
                        out_vid.write(images)
                    res = cv2.waitKey(1)
                    if res == ord('p'):
                        uid = uuid.uuid4()
                        logging.info("Saving Picture: {}".format(uid))
                        cv2.imwrite(path.join(PICS_DIR, "{}_color.jpg".format(uid)), color_image_cv)
                        cv2.imwrite(path.join(PICS_DIR, "{}_stack.jpg".format(uid)), images)
                    if res == ord('m'):
                        pass
                    to_save_frames = config['save'].get('frames')
                    if config['playback']['enabled'] and to_save_frames is not None and counter in to_save_frames:
                        logging.info("Saving Picture: {}".format(counter))
                        cv2.imwrite(path.join(PICS_DIR, "{}_color.jpg".format(counter)), color_image_cv)
                        cv2.imwrite(path.join(PICS_DIR, "{}_stack.jpg".format(counter)), images)

                logging.info(f"Frame %d; Get Frames: %.2f; Check Valid Frame: %.2f; Laplacian: %.2f; Bilateral: %.2f; Mesh: %.2f; FastGA: %.2f; Plane/Poly: %.2f; Filtering: %.2f",
                             counter, timings['t_get_frames'], timings['t_check_frames'], timings['t_laplacian'], timings['t_bilateral'], timings['t_mesh'], timings['t_fastga_total'],
                             timings['t_polylidar_planepoly'], timings['t_polylidar_filter'])
            except Exception as e:
                logging.exception("Error!")
    finally:
        pipeline.stop()
    if video is not None:
        out_vid.release()
    cv2.destroyAllWindows()

    df = pd.DataFrame.from_records(all_records)
    print(df.mean())
    if config['save'].get('timings') is not "":
        df.to_csv(config['save'].get('timings', 'data/timings.csv'))
Exemplo n.º 3
0
def main():

    kwargs = dict(num_groups=2, group_size=1000, dist=100.0, seed=1)
    # generate random normally distributed clusters of points, 200 X 2 numpy array.
    points = generate_test_points(**kwargs)
    lmax = get_estimated_lmax(**kwargs)
    polylidar_kwargs = dict(alpha=0.0, lmax=lmax, min_triangles=5)
    # print(polylidar_kwargs)
    # Convert Points and make Polylidar
    points_mat = MatrixDouble(points)
    polylidar = Polylidar3D(**polylidar_kwargs)
    # Extracts planes and polygons, time
    t1 = time.perf_counter()
    mesh, planes, polygons = polylidar.extract_planes_and_polygons(points_mat)
    t2 = time.perf_counter()
    triangles = np.asarray(mesh.triangles)
    # print(triangles, triangles.shape)
    triangles = triangles.flatten()
    # print(triangles, triangles.shape)
    planes_np = np.asarray(planes)
    # print(planes_np)
    print("Took {:.2f} milliseconds".format((t2 - t1) * 1000))

    # Plot Data
    if points.shape[0] < 100000:
        fig, ax = plt.subplots(figsize=(10, 10), nrows=1, ncols=1)
        # plot points
        print("Plot Points")
        ax.scatter(points[:, 0], points[:, 1], c='k', s=20.0)
        fig.savefig("assets/scratch/Basic2DAlgorithm_pointcloud.png",
                    bbox_inches='tight',
                    pad_inches=-0.6)
        plt.show()

        print("Plot Mesh")
        fig, ax = plt.subplots(figsize=(10, 10), nrows=1, ncols=1)
        # plot points
        ax.scatter(points[:, 0], points[:, 1], c='k', s=0.1)
        # plot all triangles
        plot_triangles(get_triangles_from_list(triangles, points), ax)
        fig.savefig("assets/scratch/Basic2DAlgorithm_mesh.png",
                    bbox_inches='tight',
                    pad_inches=-0.6)
        plt.show()

        print("Planes and Polygons")
        fig, ax = plt.subplots(figsize=(10, 10), nrows=1, ncols=1)
        # plot points
        ax.scatter(points[:, 0], points[:, 1], c='k', s=0.1)
        # plot all triangles
        plot_triangles(get_triangles_from_list(triangles, points), ax)
        # plot mesh triangles
        triangle_meshes = get_colored_planar_segments(planes_np, triangles,
                                                      points)
        plot_triangle_meshes(triangle_meshes, ax)
        # plot polygons
        plot_polygons(polygons, points, ax)

        plt.axis('equal')

        plt.show()
Exemplo n.º 4
0
def main():
    np.random.seed(1)
    # generate random plane with hole
    plane = generate_3d_plane(bounds_x=[0, 10, 0.5],
                              bounds_y=[0, 10, 0.5],
                              holes=[[[3, 5], [3, 5]]],
                              height_noise=0.02,
                              planar_noise=0.02)
    # Generate top of box (causing the hole that we see)
    box_top = generate_3d_plane(bounds_x=[3, 5, 0.2],
                                bounds_y=[3, 5, 0.2],
                                holes=[],
                                height_noise=0.02,
                                height=2,
                                planar_noise=0.02)
    # Generate side of box (causing the hole that we see)
    box_side = generate_3d_plane(bounds_x=[0, 2, 0.2],
                                 bounds_y=[0, 2, 0.2],
                                 holes=[],
                                 height_noise=0.02,
                                 planar_noise=0.02)
    rm = rotation_matrix([0, 1, 0], -math.pi / 2.0)
    box_side = apply_rotation(rm, box_side) + [5, 3, 0]
    # All points joined together
    points = np.concatenate((plane, box_side, box_top))

    points_mat = MatrixDouble(points)
    polylidar_kwargs = dict(alpha=0.0,
                            lmax=1.0,
                            min_triangles=20,
                            z_thresh=0.1,
                            norm_thresh_min=0.94)
    polylidar = Polylidar3D(**polylidar_kwargs)

    elev = 15.0
    azim = -35

    # Show Point Cloud
    print("Should see point raw point cloud")
    fig, ax = plt.subplots(figsize=(10, 10),
                           nrows=1,
                           ncols=1,
                           subplot_kw=dict(projection='3d'))
    # plot points
    ax.scatter(*scale_points(points),
               s=20.0,
               c=points[:, 2],
               cmap=plt.cm.plasma)
    set_axes_equal(ax)
    ax.view_init(elev=elev, azim=azim)
    fig.savefig("assets/scratch/Basic25DAlgorithm_pointcloud.pdf",
                bbox_inches='tight')
    fig.savefig("assets/scratch/Basic25DAlgorithm_pointcloud.png",
                bbox_inches='tight',
                pad_inches=-0.8)
    plt.show()

    # Extracts planes and polygons, time
    t1 = time.time()
    mesh, planes, polygons = polylidar.extract_planes_and_polygons(points_mat)
    t2 = time.time()
    print("Polylidar Took {:.2f} milliseconds".format((t2 - t1) * 1000))

    triangles = np.asarray(mesh.triangles)
    all_planes = [np.arange(triangles.shape[0])]

    # Show Triangulation
    fig, ax = plt.subplots(figsize=(10, 10),
                           nrows=1,
                           ncols=1,
                           subplot_kw=dict(projection='3d'))

    plot_planes_3d(points, triangles, all_planes, ax, alpha=0.0, z_value=-4.0)
    plot_planes_3d(points, triangles, all_planes, ax, alpha=0.5)
    # plot points
    ax.scatter(*scale_points(points), s=0.1, c='k')
    set_axes_equal(ax, ignore_z=True)
    ax.set_zlim3d([-4, 6])
    ax.view_init(elev=elev, azim=azim)
    print("Should see triangulation point cloud")
    fig.savefig("assets/scratch/Basic25DAlgorithm_mesh.pdf",
                bbox_inches='tight')
    fig.savefig("assets/scratch/Basic25DAlgorithm_mesh.png",
                bbox_inches='tight',
                pad_inches=-0.8)
    plt.show()

    print("")
    print("Should see two planes extracted, please rotate.")
    fig, ax = plt.subplots(figsize=(10, 10),
                           nrows=1,
                           ncols=1,
                           subplot_kw=dict(projection='3d'))
    # plot all triangles
    plot_polygons_3d(points, polygons, ax)
    plot_planes_3d(points, triangles, planes, ax)
    # plot points
    ax.scatter(*scale_points(points), c='k', s=0.1)
    set_axes_equal(ax)
    ax.view_init(elev=elev, azim=azim)
    fig.savefig("assets/scratch/Basic25DAlgorithm_polygons.pdf",
                bbox_inches='tight')
    fig.savefig("assets/scratch/Basic25DAlgorithm_polygons.png",
                bbox_inches='tight',
                pad_inches=-0.8)
    plt.show()
    print("")
Exemplo n.º 5
0
def extract_planes_and_polygons_from_mesh(
    tri_mesh,
    avg_peaks,
    polylidar_kwargs=dict(alpha=0.0,
                          lmax=0.1,
                          min_triangles=2000,
                          z_thresh=0.1,
                          norm_thresh=0.95,
                          norm_thresh_min=0.95,
                          min_hole_vertices=50,
                          task_threads=4),
    filter_polygons=True,
    pl_=None,
    optimized=False,
    postprocess=dict(filter=dict(hole_area=dict(min=0.025, max=100.0),
                                 hole_vertices=dict(min=6),
                                 plane_area=dict(min=0.05)),
                     positive_buffer=0.00,
                     negative_buffer=0.00,
                     simplify=0.0)):

    if pl_ is not None:
        pl = pl_
    else:
        pl = Polylidar3D(**polylidar_kwargs)

    avg_peaks_mat = MatrixDouble(avg_peaks)
    t0 = time.perf_counter()
    if optimized:
        all_planes, all_polygons = pl.extract_planes_and_polygons_optimized(
            tri_mesh, avg_peaks_mat)
    else:
        all_planes, all_polygons = pl.extract_planes_and_polygons(
            tri_mesh, avg_peaks_mat)
    t1 = time.perf_counter()

    # tri_set = pl.extract_tri_set(tri_mesh, avg_peaks_mat)
    # planes_tri_set = [np.argwhere(np.asarray(tri_set) == i)  for i in range(1, 2)]
    # o3d_mesh_painted = paint_planes(o3d_mesh, planes_tri_set)

    polylidar_time = (t1 - t0) * 1000
    # logging.info("Polygon Extraction - Took (ms): %.2f", polylidar_time)
    all_planes_shapely = []
    all_obstacles_shapely = []
    time_filter = []
    # all_poly_lines = []
    if filter_polygons:
        vertices = np.asarray(tri_mesh.vertices)
        for i in range(avg_peaks.shape[0]):
            avg_peak = avg_peaks[i, :]
            rm, _ = R.align_vectors([[0, 0, 1]], [avg_peak])
            polygons_for_normal = all_polygons[i]
            # print(polygons_for_normal)
            if len(polygons_for_normal) > 0:
                planes_shapely, obstacles_shapely, filter_time = filter_and_create_polygons(
                    vertices,
                    polygons_for_normal,
                    rm=rm,
                    postprocess=postprocess)
                all_planes_shapely.extend(planes_shapely)
                all_obstacles_shapely.extend(obstacles_shapely)
                time_filter.append(filter_time)
                # all_poly_lines.extend(poly_lines)

    timings = dict(t_polylidar_planepoly=polylidar_time,
                   t_polylidar_filter=np.array(time_filter).mean())
    # all_planes_shapely, all_obstacles_shapely, all_poly_lines, timings
    return all_planes_shapely, all_obstacles_shapely, timings
Exemplo n.º 6
0
def run_test(pcd,
             rgbd,
             intrinsics,
             extrinsics,
             bp_alg=dict(radii=[0.02, 0.02]),
             poisson=dict(depth=8),
             callback=None,
             stride=2):
    """Demonstrate Polygon Extraction on both unorganized and organized point cloud

    Args:
        pcd (o3d.geometry.PointCloud): Open3D point cloud
        rgbd (np.ndarray): MXN Numpy array
        intrinsics (np.ndarray): 3X3 numpy array of camera intrinsics (assume pin hole model)
        extrinsics (np.ndarray): 4X4 numpy array of extrinsics of camera
        bp_alg (dict, optional): Arguments to Open3D ball pivot alg. Defaults to dict(radii=[0.02, 0.02]).
        poisson (dict, optional): Arguments to Open3D Poisson surface reconstruction. Defaults to dict(depth=8).
        callback (function, optional): Callback function for visualization. Defaults to None.
        stride (int, optional): Skip rows/columns in rgbd. Defaults to 2.

    """
    points = np.asarray(pcd.points)
    polylidar_kwargs = dict(alpha=0.0,
                            lmax=0.10,
                            min_triangles=100,
                            z_thresh=0.04,
                            norm_thresh=0.90,
                            norm_thresh_min=0.90,
                            min_hole_vertices=6)
    pl = Polylidar3D(**polylidar_kwargs)

    ################################################################################
    ##### Treat data as an unorganized point clouds
    ##### Create Surface Mesh using 2.5 Delaunay Triangulation and extract Polygons
    ################################################################################
    points_mat = MatrixDouble(points)
    t1 = time.perf_counter()
    mesh, planes, polygons = pl.extract_planes_and_polygons(points_mat)
    t2 = time.perf_counter()

    # Visualization of mesh and polygons
    all_poly_lines = filter_and_create_open3d_polygons(points, polygons)
    triangles = np.asarray(mesh.triangles)
    mesh_2d_polylidar = extract_mesh_planes(points, triangles, planes,
                                            mesh.counter_clock_wise,
                                            COLOR_PALETTE[0])
    mesh_2d_polylidar.extend(
        flatten([line_mesh.cylinder_segments for line_mesh in all_poly_lines]))
    time_mesh_2d_polylidar = (t2 - t1) * 1000
    polylidar_alg_name = 'Treated as **Unorganized** Point Cloud - 2.5D Delaunay Triangulation with Polygon Extraction'
    callback(polylidar_alg_name, time_mesh_2d_polylidar, pcd,
             mesh_2d_polylidar)

    ################################################################################
    ###### Treat data as an **Organized** 3D Point Cloud #########
    ###### Creates a true 3D mesh and is much master using organized structure of point cloud
    ################################################################################
    tri_mesh, t_mesh_creation = make_uniform_grid_mesh(
        np.asarray(rgbd.depth),
        np.ascontiguousarray(intrinsics.intrinsic_matrix),
        extrinsics,
        stride=stride)

    # Visualization of only the mesh
    tri_mesh_o3d = create_open_3d_mesh_from_tri_mesh(tri_mesh)
    uniform_alg_name = 'Treated as **Organized** Point Cloud - Right-Cut Triangulation/Uniform Mesh (Mesh only)'
    callback(uniform_alg_name, t_mesh_creation, pcd, tri_mesh_o3d)

    # Exctact Polygons with Polylidar3D using the Uniform Mesh. Dominant Plane normal is 0,0,1.
    t1 = time.perf_counter()
    planes, polygons = pl.extract_planes_and_polygons(tri_mesh)
    t2 = time.perf_counter()

    # Visualization of mesh and polygons
    vertices_np = np.asarray(tri_mesh.vertices)
    triangles_np = np.asarray(tri_mesh.triangles)

    all_poly_lines = filter_and_create_open3d_polygons(vertices_np, polygons)
    mesh_3d_polylidar = extract_mesh_planes(vertices_np, triangles_np, planes,
                                            tri_mesh.counter_clock_wise)
    mesh_3d_polylidar.extend(
        flatten([line_mesh.cylinder_segments for line_mesh in all_poly_lines]))
    time_polylidar3D = (t2 - t1) * 1000
    polylidar_3d_alg_name = 'Polygon Extraction on Uniform Mesh (only one dominant plane normal)'
    callback(polylidar_3d_alg_name, time_polylidar3D,
             create_open3d_pc(vertices_np), mesh_3d_polylidar)