Ejemplo n.º 1
0
def main():
    EXAMPLE_INDEX = 2
    kwargs_base = dict(level=4, max_phi=180)
    kwargs_s2 = dict(**kwargs_base)
    kwargs_opt_integrate = dict(num_nbr=12)
    query_max_phi = kwargs_base['max_phi'] - 5

    # Get an Example Mesh
    ga_cpp_s2 = GaussianAccumulatorS2(**kwargs_s2)

    example_mesh = o3d.io.read_triangle_mesh(str(ALL_MESHES[EXAMPLE_INDEX]))
    r = ALL_MESHES_ROTATIONS[EXAMPLE_INDEX]
    example_mesh_filtered = example_mesh
    if r is not None:
        example_mesh_filtered = example_mesh_filtered.rotate(r.as_matrix())
        example_mesh_filtered = example_mesh_filtered.filter_smooth_laplacian(
            5)

    example_mesh_filtered.compute_triangle_normals()
    np.save('fixtures/normals/basement.npy',
            np.asarray(example_mesh_filtered.triangle_normals))
    colored_icosahedron_s2, normals, neighbors_s2 = visualize_gaussian_integration(
        ga_cpp_s2,
        example_mesh_filtered,
        max_phi=query_max_phi,
        integrate_kwargs=kwargs_opt_integrate)

    o3d.visualization.draw_geometries([example_mesh_filtered])
    o3d.visualization.draw_geometries([colored_icosahedron_s2])

    # Visualize unwrapping
    ico_chart_ = IcoCharts(4)
    t2 = time.perf_counter()
    normalized_bucket_counts_by_vertex = ga_cpp_s2.get_normalized_bucket_counts_by_vertex(
        True)
    ico_chart_.fill_image(normalized_bucket_counts_by_vertex)

    find_peaks_kwargs = dict(threshold_abs=50,
                             min_distance=1,
                             exclude_border=False,
                             indices=False)
    print(np.asarray(ico_chart_.image).shape)
    cluster_kwargs = dict(t=0.1, criterion='distance')
    _, _, avg_peaks, _ = find_peaks_from_ico_charts(
        ico_chart_,
        np.asarray(normalized_bucket_counts_by_vertex),
        find_peaks_kwargs=find_peaks_kwargs,
        cluster_kwargs=cluster_kwargs)
    t3 = time.perf_counter()
    print(t3 - t2)
    print(avg_peaks)
    full_image = np.asarray(ico_chart_.image)

    plt.imshow(full_image)
    plt.axis('off')
    # plt.xticks(np.arange(0, full_image.shape[1], step=1))
    # plt.yticks(np.arange(0, full_image.shape[0], step=1))
    plt.show()
Ejemplo n.º 2
0
def extract_polygons(opc, classes, config):
    pl = Polylidar3D(**config['polylidar'])
    ga = GaussianAccumulatorS2Beta(level=config['fastga']['level'])
    ico = IcoCharts(level=config['fastga']['level'])
    # 1. Create mesh
    tri_mesh = create_meshes_cuda(opc, **config['mesh']['filter'])

    mesh_o3d = create_open_3d_mesh_from_tri_mesh(tri_mesh)
    classes = classes.reshape((np.asarray(mesh_o3d.vertices).shape[0], 1))
    classes_mat = MatrixUInt8(classes)
    tri_mesh.set_vertex_classes(classes_mat, True)
    # alg_timings.update(timings)
    # 2. Get dominant plane normals
    avg_peaks, _, _, _, alg_timings = extract_all_dominant_plane_normals(
        tri_mesh, ga_=ga, ico_chart_=ico, **config['fastga'])

    # alg_timings.update(timings)
    # 3. Extract Planes and Polygons
    planes, obstacles, timings = extract_planes_and_polygons_from_classified_mesh(
        tri_mesh,
        avg_peaks,
        pl_=pl,
        filter_polygons=True,
        optimized=True,
        postprocess=config['polygon']['postprocess'])

    return mesh_o3d, planes, obstacles, timings
Ejemplo n.º 3
0
def extract_all_dominant_plane_normals(tri_mesh,
                                       level=5,
                                       with_o3d=False,
                                       ga_=None,
                                       ico_chart_=None,
                                       **kwargs):

    # Reuse objects if provided
    if ga_ is not None:
        ga = ga_
    else:
        ga = GaussianAccumulatorS2(level=level)

    if ico_chart_ is not None:
        ico_chart = ico_chart_
    else:
        ico_chart = IcoCharts(level=level)

    triangle_normals = np.asarray(tri_mesh.triangle_normals)
    triangle_normals_ds = down_sample_normals(triangle_normals, **kwargs)

    # np.savetxt('bad_normals.txt', triangle_normals_ds)
    triangle_normals_ds_mat = MatX3d(triangle_normals_ds)
    t1 = time.perf_counter()
    ga.integrate(triangle_normals_ds_mat)
    t2 = time.perf_counter()

    logging.debug(
        "Gaussian Accumulator - Normals Sampled: %d; Took (ms): %.2f",
        triangle_normals_ds.shape[0], (t2 - t1) * 1000)

    avg_peaks, pcd_all_peaks, arrow_avg_peaks, timings_dict = get_image_peaks(
        ico_chart, ga, level=level, with_o3d=with_o3d, **kwargs)

    # Create Open3D structures for visualization
    if with_o3d:
        # Visualize the Sphere
        accumulator_counts = np.asarray(ga.get_normalized_bucket_counts())
        refined_icosahedron_mesh = create_open_3d_mesh(
            np.asarray(ga.mesh.triangles), np.asarray(ga.mesh.vertices))
        color_counts = get_colors(accumulator_counts)[:, :3]
        colored_icosahedron = assign_vertex_colors(refined_icosahedron_mesh,
                                                   color_counts)
    else:
        colored_icosahedron = None

    elapsed_time_fastga = (t2 - t1) * 1000
    elapsed_time_peak = timings_dict['t_fastga_peak']
    elapsed_time_total = elapsed_time_fastga + elapsed_time_peak

    timings = dict(t_fastga_total=elapsed_time_total,
                   t_fastga_integrate=elapsed_time_fastga,
                   t_fastga_peak=elapsed_time_peak)

    ga.clear_count()
    return avg_peaks, pcd_all_peaks, arrow_avg_peaks, colored_icosahedron, timings
Ejemplo n.º 4
0
def extract_all_dominant_planes(tri_mesh,
                                vertices,
                                polylidar_kwargs,
                                config_pp,
                                ds=50,
                                min_samples=10000):
    ga = GaussianAccumulatorS2(level=4, max_phi=180)
    ico = IcoCharts(level=4)

    triangle_normals = np.asarray(tri_mesh.triangle_normals)
    num_normals = triangle_normals.shape[0]
    triangle_normals_ds = down_sample_normals(triangle_normals)

    # Get the data
    t0 = time.perf_counter()
    ga.integrate(MatX3d(triangle_normals_ds))
    t1 = time.perf_counter()
    avg_peaks, _, _, timings = get_image_peaks(ico,
                                               ga,
                                               level=4,
                                               with_o3d=False)
    timings['t_fastga_integrate'] = (t1 - t0) * 1000
    timings['t_fastga_total'] = timings['t_fastga_integrate'] + timings[
        't_fastga_peak']

    logging.info("Processing mesh with %d triangles", num_normals)
    logging.info("Dominant Plane Normals")
    print(avg_peaks)

    avg_peaks_selected = np.copy(avg_peaks[[0, 1, 2, 3], :])
    pl = Polylidar3D(**polylidar_kwargs)
    avg_peaks_mat = MatrixDouble(avg_peaks_selected)

    # debugging purposes, ignore
    tri_set = pl.extract_tri_set(tri_mesh, avg_peaks_mat)

    t0 = time.perf_counter()
    all_planes, all_polygons = pl.extract_planes_and_polygons_optimized(
        tri_mesh, avg_peaks_mat)
    t1 = time.perf_counter()
    timings['t_polylidar_planepoly'] = (t1 - t0) * 1000

    all_poly_lines = []
    for i in range(avg_peaks_selected.shape[0]):
        avg_peak = avg_peaks[i, :]
        rm, _ = R.align_vectors([[0, 0, 1]], [avg_peak])
        polygons_for_normal = all_polygons[i]
        # print(polygons_for_normal)
        if len(polygons_for_normal) > 0:
            poly_lines, _ = filter_and_create_open3d_polygons(
                vertices, polygons_for_normal, rm=rm, config_pp=config_pp)
            all_poly_lines.extend(poly_lines)

    return all_planes, tri_set, all_poly_lines, timings
def get_image_peaks(ga_cpp_s2, level=2, **kwargs):
    ico_chart = IcoCharts(level)
    normalized_bucket_counts_by_vertex = ga_cpp_s2.get_normalized_bucket_counts_by_vertex(
        True)
    ico_chart.fill_image(normalized_bucket_counts_by_vertex)

    find_peaks_kwargs = dict(threshold_abs=25,
                             min_distance=1,
                             exclude_border=False,
                             indices=False)
    cluster_kwargs = dict(t=0.10, criterion='distance')
    average_filter = dict(min_total_weight=0.10)

    peaks, clusters, avg_peaks, avg_weights = find_peaks_from_ico_charts(
        ico_chart, np.asarray(normalized_bucket_counts_by_vertex),
        find_peaks_kwargs, cluster_kwargs, average_filter)
    gaussian_normals_sorted = np.asarray(ico_chart.sphere_mesh.vertices)
    pcd_all_peaks = get_pc_all_peaks(peaks, clusters, gaussian_normals_sorted)
    arrow_avg_peaks = get_arrow_normals(avg_peaks, avg_weights)

    return [pcd_all_peaks, *arrow_avg_peaks]
def example_normals(normals: np.ndarray):
    LEVEL = 2
    kwargs_base = dict(level=LEVEL, max_phi=180)
    kwargs_s2 = dict(**kwargs_base)

    # Create Gaussian Accumulator
    ga_cpp_s2 = GaussianAccumulatorS2(**kwargs_s2)
    # Integrate the normals and get open3d visualization
    colored_icosahedron = integrate_normals_and_visualize(normals, ga_cpp_s2)
    o3d.visualization.draw_geometries([colored_icosahedron])
    # Create the IcoChart for unwrapping
    ico_chart_ = IcoCharts(LEVEL)
    normalized_bucket_counts_by_vertex = ga_cpp_s2.get_normalized_bucket_counts_by_vertex(
        True)
    ico_chart_.fill_image(normalized_bucket_counts_by_vertex)

    triangles_vertex_14 = [2, 15, 7, 260, 267, 256]

    # 2D Peak Detection
    find_peaks_kwargs = dict(threshold_abs=20,
                             min_distance=1,
                             exclude_border=False,
                             indices=False)
    cluster_kwargs = dict(t=0.2, criterion='distance')
    average_filter = dict(min_total_weight=0.05)
    _, _, avg_peaks, _ = find_peaks_from_ico_charts(
        ico_chart_,
        np.asarray(normalized_bucket_counts_by_vertex),
        find_peaks_kwargs=find_peaks_kwargs,
        cluster_kwargs=cluster_kwargs)
    print("Detected Peaks: {}".format(avg_peaks))

    full_image = np.asarray(ico_chart_.image)
    plt.imshow(full_image)
    plt.xticks(np.arange(0, full_image.shape[1], step=1))
    plt.yticks(np.arange(0, full_image.shape[0], step=1))
    plt.show()

    # Don't forget to reset the GA
    ga_cpp_s2.clear_count()
def extract_all_dominant_planes(tri_mesh,
                                vertices,
                                polylidar_kwargs,
                                ds=50,
                                min_samples=10000):
    ga = GaussianAccumulatorS2Beta(level=4)
    ico = IcoCharts(level=4)

    fast_ga_kwargs = dict(find_peaks_kwargs=dict(threshold_abs=15,
                                                 min_distance=1,
                                                 exclude_border=False,
                                                 indices=False),
                          cluster_kwargs=dict(t=0.28, criterion='distance'),
                          average_filter=dict(min_total_weight=0.1))

    avg_peaks, _, _, _, alg_timings = extract_all_dominant_plane_normals(
        tri_mesh, ga_=ga, ico_chart_=ico, **fast_ga_kwargs)
    logging.info("Dominant Plane Normals")
    print(avg_peaks)

    avg_peaks_selected = np.copy(avg_peaks[[0, 1, 2, 3, 4], :])
    pl = Polylidar3D(**polylidar_kwargs)
    avg_peaks_mat = MatrixDouble(avg_peaks_selected)

    tri_set = pl.extract_tri_set(tri_mesh, avg_peaks_mat)
    t0 = time.perf_counter()

    all_planes, all_polygons = pl.extract_planes_and_polygons_optimized(
        tri_mesh, avg_peaks_mat)
    t1 = time.perf_counter()
    polylidar_time = (t1 - t0) * 1000
    all_poly_lines = []
    for i in range(avg_peaks_selected.shape[0]):
        avg_peak = avg_peaks[i, :]
        rm, _ = R.align_vectors([[0, 0, 1]], [avg_peak])
        polygons_for_normal = all_polygons[i]
        # print(polygons_for_normal)
        if len(polygons_for_normal) > 0:
            poly_lines, _ = filter_and_create_open3d_polygons(
                vertices, polygons_for_normal, rm=rm)
            all_poly_lines.extend(poly_lines)

    return all_planes, tri_set, all_poly_lines, polylidar_time
def capture(config, video=None):
    # Configure streams
    pipeline, process_modules, filters, proj_mat, t265_device = create_pipeline(
        config)
    t265_pipeline = t265_device['pipeline']
    logging.info("Pipeline Created")

    # Long lived objects. These are the object that hold all the algorithms for surface exraction.
    # They need to be long lived (objects) because they hold state (thread scheduler, image datastructures, etc.)
    ll_objects = dict()
    ll_objects['pl'] = Polylidar3D(**config['polylidar'])
    ll_objects['ga'] = GaussianAccumulatorS2(level=config['fastga']['level'])
    ll_objects['ico'] = IcoCharts(level=config['fastga']['level'])

    if video:
        frame_width = config['color']['width'] * 2
        frame_height = config['color']['height']
        out_vid = cv2.VideoWriter(video,
                                  cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),
                                  30, (frame_width, frame_height))

    all_records = []
    counter = 0
    try:
        while True:
            t00 = time.perf_counter()
            try:
                color_image, depth_image, meta = get_frames(
                    pipeline, t265_pipeline, process_modules, filters, config)
            except RuntimeError:
                # This only gets thrown when in playback mode from a recoded file when frames "run out"
                logging.info("Out of frames")
                break
            t0 = time.perf_counter()
            if color_image is None or not valid_frames(
                    color_image, depth_image, **
                    config['polygon']['frameskip']):
                logging.debug("Invalid Frames")
                continue
            t1 = time.perf_counter()
            counter += 1
            # if counter < 10:
            #     continue

            try:
                # Get 6DOF Pose at appropriate timestamp
                if config['tracking']['enabled']:
                    euler_t265 = get_pose_matrix(meta['ts'])
                    # logging.info('euler_t265: %r', euler_t265)

                if config['show_polygon']:
                    # planes, obstacles, geometric_planes, timings, o3d_mesh = get_polygon(depth_image, config, ll_objects, **meta)
                    planes, obstacles, geometric_planes, timings = get_polygon(
                        depth_image, config, ll_objects, **meta)
                    timings['t_get_frames'] = (t0 - t00) * 1000
                    timings['t_check_frames'] = (t1 - t0) * 1000
                    all_records.append(timings)

                    curb_height = analyze_planes(geometric_planes)
                    ser.write(("{:.2f}".format(curb_height) + "\n").encode())

                    # Plot polygon in rgb frame
                    plot_planes_and_obstacles(planes, obstacles, proj_mat,
                                              None, color_image, config)

                # Show images
                if config.get("show_images"):
                    # Convert to open cv image types (BGR)
                    color_image_cv, depth_image_cv = colorize_images_open_cv(
                        color_image, depth_image, config)
                    # Stack both images horizontally
                    images = np.hstack((color_image_cv, depth_image_cv))
                    cv2.imshow('RealSense Color/Depth (Aligned)', images)
                    if video:
                        out_vid.write(images)
                    res = cv2.waitKey(1)
                    if res == ord('p'):
                        uid = uuid.uuid4()
                        logging.info("Saving Picture: {}".format(uid))
                        cv2.imwrite(
                            path.join(PICS_DIR, "{}_color.jpg".format(uid)),
                            color_image_cv)
                        cv2.imwrite(
                            path.join(PICS_DIR, "{}_stack.jpg".format(uid)),
                            images)
                    if res == ord('m'):
                        plt.imshow(
                            np.asarray(ll_objects['ico'].image_to_vertex_idx))
                        plt.show()
                        plt.imshow(np.asarray(ll_objects['ico'].mask))
                        plt.show()
                        plt.imshow(np.asarray(ll_objects['ico'].image))
                        plt.show()
                        import ipdb
                        ipdb.set_trace()

                    to_save_frames = config['save'].get('frames')
                    if config['playback'][
                            'enabled'] and to_save_frames is not None and counter in to_save_frames:
                        logging.info("Saving Picture: {}".format(counter))
                        cv2.imwrite(
                            path.join(PICS_DIR,
                                      "{}_color.jpg".format(counter)),
                            color_image_cv)
                        cv2.imwrite(
                            path.join(PICS_DIR,
                                      "{}_stack.jpg".format(counter)), images)

                # logging.info(f"Frame %d; Get Frames: %.2f; Check Valid Frame: %.2f; Laplacian: %.2f; Bilateral: %.2f; Mesh: %.2f; FastGA: %.2f; Plane/Poly: %.2f; Filtering: %.2f; Geometric Planes: %.2f; Curb Height: %.2f",
                #              counter, timings['t_get_frames'], timings['t_check_frames'], timings['t_laplacian'], timings['t_bilateral'], timings['t_mesh'], timings['t_fastga_total'],
                #              timings['t_polylidar_planepoly'], timings['t_polylidar_filter'], timings['t_geometric_planes'] curb_height)
                logging.info(f"Curb Height: %.2f", curb_height)
            except Exception as e:
                logging.exception("Error!")
    finally:
        pipeline.stop()
    if video is not None:
        out_vid.release()
    cv2.destroyAllWindows()
Ejemplo n.º 9
0
def capture(config, video=None):
    # Configure streams
    pipeline, process_modules, filters, proj_mat, t265_device = create_pipeline(config)
    t265_pipeline = t265_device['pipeline']
    logging.info("Pipeline Created")

    # Long lived objects. These are the object that hold all the algorithms for surface exraction.
    # They need to be long lived (objects) because they hold state (thread scheduler, image datastructures, etc.)
    ll_objects = dict()
    ll_objects['pl'] = Polylidar3D(**config['polylidar'])
    ll_objects['ga'] = GaussianAccumulatorS2(level=config['fastga']['level'])
    ll_objects['ico'] = IcoCharts(level=config['fastga']['level'])

    if video:
        frame_width = config['color']['width'] * 2
        frame_height = config['color']['height']
        out_vid = cv2.VideoWriter(video, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 30, (frame_width, frame_height))

    # Create Homogenous Transform from sensor frame to wheel chair frame
    sensor_mount_frame = config['frames']['sensor_mount']
    sensor_frame = config['frames']['sensor']
    sensor_to_wheel_chair_transform = create_transform(np.array(sensor_mount_frame['translation']), sensor_mount_frame['rotation']) \
        @ create_transform(sensor_frame['translation'], sensor_frame['rotation'])

    # print(sensor_to_wheel_chair_transform)
    # sys.exit()
    all_records = []
    counter = 0
    try:
        while True:
            t00 = time.perf_counter()
            try:
                color_image, depth_image, meta = get_frames(pipeline, t265_pipeline, process_modules, filters, config)
            except RuntimeError:
                # This only gets thrown when in playback mode from a recoded file when frames "run out"
                logging.info("Out of frames")
                break
            t0 = time.perf_counter()
            if color_image is None or not valid_frames(color_image, depth_image, **config['polygon']['frameskip']):
                logging.debug("Invalid Frames")
                continue
            t1 = time.perf_counter()
            counter += 1
            if counter < 1:
                continue

            try:
                # Get 6DOF Pose at appropriate timestamp
                if config['tracking']['enabled']:
                    euler_t265 = get_pose_matrix(meta['ts'])
                    logging.info('euler_t265: %r', euler_t265)

                # flag to write results
                have_results = False
                if config['show_polygon']:
                    # planes, obstacles, geometric_planes, timings, o3d_mesh = get_polygon(depth_image, config, ll_objects, **meta)
                    planes, obstacles, geometric_planes, timings = get_polygon(depth_image, config, ll_objects, **meta)
                    timings['t_get_frames'] = (t0 - t00) * 1000
                    timings['t_check_frames'] = (t1 - t0) * 1000
                    all_records.append(timings)

                    curb_height, first_plane, second_plane = analyze_planes_updated(geometric_planes)
                    fname = config['playback']['file'].split('/')[1].split('.')[0]
                    # dump(dict(first_plane=first_plane, second_plane=second_plane), f"data/scratch_test/planes_{fname}_{counter:04}.joblib")
                    
                    # curb height must be greater than 2 cm and first_plane must have been found
                    if curb_height > 0.02 and first_plane is not None:
                        top_plane = choose_plane(first_plane, second_plane)
                        top_points, top_normal = top_plane['all_points'], top_plane['normal_ransac']
                        filtered_top_points = filter_points(top_points)  # <100 us
                        top_points_2d, height, all_fit_lines, best_fit_lines = extract_lines_wrapper(filtered_top_points, top_normal, return_only_one_line=True, **config['linefitting'])
                        if best_fit_lines:
                            #If there are two lines only choose the first one
                            platform_center_sensor_frame = best_fit_lines[0]['hplane_point'] 
                            platform_normal_sensor_frame = best_fit_lines[0]['hplane_normal']
                            # print(platform_center_sensor_frame, platform_normal_sensor_frame)
                            result = get_turning_manuever(platform_center_sensor_frame, platform_normal_sensor_frame, \
                                        sensor_to_wheel_chair_transform, poi_offset=config.get('poi_offset', 0.7), debug=False)
                            plt.show()

                            orthog_dist = result['ortho_dist_platform']
                            distance_of_interest = result['dist_poi']
                            orientation = result['beta']
                            initial_turn = result['first_turn']
                            final_turn= result['second_turn']

                            logging.info("Frame #: %s, Orthogonal Distance to Platform: %.02f m, Orientation: %0.1f Degrees, Distance to POI: %.02f m, " \
                                "First Turn: %.01f degrees, Second Turn: %.01f degrees", 
                                         counter, orthog_dist, orientation, distance_of_interest, initial_turn, final_turn)
                            
                            plot_points(best_fit_lines[0]['square_points'], proj_mat, color_image, config)
                            # plot_points(best_fit_lines[0]['points_3d_orig'], proj_mat, color_image, config)
                            if len(best_fit_lines) > 2: 
                                plot_points(best_fit_lines[1]['square_points'], proj_mat, color_image, config)
                            have_results = True
                        else:
                            logging.warning("Line Detector Failed")
                    else:
                        logging.warning("Couldn't find the street and sidewalk surface")

                    # sys.exit()
                    # Plot polygon in rgb frame
                    plot_planes_and_obstacles(planes, obstacles, proj_mat, None, color_image, config)

                    # import ipdb; ipdb.set_trace()
                # Show images
                if config.get("show_images"):
                    # Convert to open cv image types (BGR)
                    color_image_cv, depth_image_cv = colorize_images_open_cv(color_image, depth_image, config)
                    # Stack both images horizontally
                    images = np.hstack((color_image_cv, depth_image_cv))
                    if have_results:
                        cv2.putText(images,'Curb Height: '"{:.2f}" 'm'.format(curb_height), (20,360), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1, cv2.LINE_AA)
                        cv2.putText(images,'Orthogonal Distance: '"{:.2f}" 'm'.format(orthog_dist), (20,380), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1, cv2.LINE_AA)
                        cv2.putText(images,'Distance to Point of Interest: '"{:.2f}" 'm'.format(distance_of_interest), (20,400), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1, cv2.LINE_AA)
                        cv2.putText(images,'Initial Turn: '"{:.2f}" 'deg'.format(initial_turn), (20,420), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1, cv2.LINE_AA)
                        cv2.putText(images,'Orientation: '"{:.2f}" 'deg'.format(orientation), (20,440), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1, cv2.LINE_AA)
                        cv2.putText(images,'Angle for final turn: '"{:.2f}" 'deg'.format(final_turn), (20,460), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1, cv2.LINE_AA)
                        cv2.imshow('RealSense Color/Depth (Aligned)', images)
                        # visualize_2d(top_points_2d, top_points_2d, all_fit_lines, best_fit_lines)
                    
                    if video:
                        out_vid.write(images)

                    res = cv2.waitKey(1)
                    if res == ord('p'):
                        uid = uuid.uuid4()
                        logging.info("Saving Picture: {}".format(uid))
                        cv2.imwrite(path.join(PICS_DIR, "{}_color.jpg".format(uid)), color_image_cv)
                        cv2.imwrite(path.join(PICS_DIR, "{}_stack.jpg".format(uid)), images)
                    if res == ord('m'):
                        plt.imshow(np.asarray(ll_objects['ico'].image_to_vertex_idx))
                        plt.show()
                        plt.imshow(np.asarray(ll_objects['ico'].mask))
                        plt.show()
                        plt.imshow(np.asarray(ll_objects['ico'].image))
                        plt.show()                  
                    

                    to_save_frames = config['save'].get('frames')
                    if config['playback']['enabled'] and to_save_frames is not None and counter in to_save_frames:
                        logging.info("Saving Picture: {}".format(counter))
                        cv2.imwrite(path.join(PICS_DIR, "{}_color.jpg".format(counter)), color_image_cv)
                        cv2.imwrite(path.join(PICS_DIR, "{}_stack.jpg".format(counter)), images)

                # logging.info(f"Frame %d; Get Frames: %.2f; Check Valid Frame: %.2f; Laplacian: %.2f; Bilateral: %.2f; Mesh: %.2f; FastGA: %.2f; Plane/Poly: %.2f; Filtering: %.2f; Geometric Planes: %.2f",
                #              counter, timings['t_get_frames'], timings['t_check_frames'], timings['t_laplacian'], timings['t_bilateral'], timings['t_mesh'], timings['t_fastga_total'],
                #              timings['t_polylidar_planepoly'], timings['t_polylidar_filter'], timings['t_geometric_planes'])
                logging.info(f"Curb Height: %.2f", curb_height)
                
            except Exception as e:
                logging.exception("Error!")
    finally:
        pipeline.stop()
    if video is not None:
        out_vid.release()
    cv2.destroyAllWindows()
Ejemplo n.º 10
0
def evaluate_with_params(param_set,
                         param_index,
                         variance,
                         counter=None,
                         data='train'):
    """Evaluates polylidar against a set of parameters and writes results to a csv file

    Arguments:
        param_set {List[Dict]} -- A list of parameters dictionaries
        param_index {int} -- A unique index of the param_set if it was split from a master list
        variance {int} -- The level of variance of files to pull from SynPEB

    Keyword Arguments:
        counter {Value} -- Multiprocessing Value for incementing progress in safe manner (default: {None})
        data {str} -- To pull from training or testing (default: {'train'})
    """
    from polylidar_plane_benchmark.utility.helper import (
        convert_planes_to_classified_point_cloud, load_pcd_file,
        predict_loops_laplacian, extract_all_dominant_plane_normals,
        extract_planes_and_polygons_from_mesh)
    from polylidar_plane_benchmark.utility.evaluate import evaluate
    from polylidar_plane_benchmark.utility.helper_mesh import create_meshes_cuda

    from polylidar import Polylidar3D
    from fastga import GaussianAccumulatorS2, IcoCharts

    all_fpaths = get_fpaths(variance, data=data)
    optimized = data == 'test'

    # Create Long Lived Objects Only Once
    ga = GaussianAccumulatorS2(
        level=level_default)  # Fast Gaussian Accumulator
    ico_chart = IcoCharts(
        level=level_default
    )  # Used for unwrapping S2 to Image for peak detection
    pl = Polylidar3D(
        **polylidar_kwargs_default)  # Region Growing and Polygons Extraction

    # logger_train.warn("Working on variance %r, param index %r with %r elements", variance, param_index, len(param_set))
    csv_fpath = get_csv_fpath(variance, param_index, data=data)
    with open(csv_fpath, 'w', newline='') as csv_file:
        fieldnames = [
            'variance', 'fname', 'tcomp', 'kernel_size', 'loops_bilateral',
            'loops_laplacian', 'sigma_angle', 'norm_thresh_min', 'stride',
            'min_triangles', 'predict_loops_laplacian', 'n_gt', 'n_ms_all',
            'f_weighted_corr_seg', 'f_corr_seg', 'n_corr_seg', 'n_over_seg',
            'n_under_seg', 'n_missed_seg', 'n_noise_seg', 'rmse', 'laplacian',
            'bilateral', 'mesh', 'fastga_total', 'fastga_integrate',
            'fastga_peak', 'polylidar'
        ]
        writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
        writer.writeheader()
        try:
            for fpath in all_fpaths:
                fname = pathlib.Path(fpath).name
                logger_train.info("Working on file %s", fpath)

                mesh_kwargs_previous = dict()
                tri_mesh = None
                timings_mesh = None

                prev_stride = 0
                pc_raw = None
                pc_image = None
                for params in param_set:
                    stride = params.get('stride', 2)
                    if stride != prev_stride:
                        pc_raw, pc_image = load_pcd_file(str(fpath),
                                                         stride=stride)
                        prev_stride = stride
                    else:
                        logger_train.debug(
                            "Reusing previously loaded point cloud")

                    # Predict laplacian loops needed by noisiness of point cloud
                    if params.get('predict_loops_laplacian'):
                        params['loops_laplacian'] = predict_loops_laplacian(
                            pc_image, stride)
                        logger_train.info("Set laplacian loops to :%r",
                                          params['loops_laplacian'])

                    # Update Polylidar Kwargs
                    pl.norm_thresh = params['norm_thresh_min']
                    pl.norm_thresh_min = params['norm_thresh_min']
                    pl.min_triangles = params.get('min_triangles', 250)

                    t1 = time.perf_counter()
                    mesh_kwargs = {
                        k: params[k]
                        for k in ('loops_laplacian', 'kernel_size',
                                  'loops_bilateral', 'sigma_angle')
                    }
                    # Create Smoothed TriMesh
                    if mesh_kwargs_previous != mesh_kwargs:
                        tri_mesh, mesh_timings = create_meshes_cuda(
                            pc_image, **mesh_kwargs)
                        mesh_kwargs_previous = mesh_kwargs
                    else:
                        logger_train.debug("Reusing previously created mesh!")

                    # Extract Dominant Plane Peaks
                    avg_peaks, _, _, _, fastga_timings = extract_all_dominant_plane_normals(
                        tri_mesh,
                        level=level_default,
                        with_o3d=False,
                        ga_=ga,
                        ico_chart_=ico_chart)
                    # Extact Planes and Polygons
                    try:
                        all_planes, all_polygons, all_poly_lines, polylidar_timings = extract_planes_and_polygons_from_mesh(
                            tri_mesh,
                            avg_peaks,
                            filter_polygons=False,
                            pl_=pl,
                            optimized=optimized)
                    except Exception:
                        logger_train.exception(
                            "Error in Polylidar, File: %s, Variance: %d, Param Index: %d, Params: %r",
                            fname, variance, param_index, params)
                        all_planes = []
                        all_polygons = []
                        all_poly_lines = []
                        polylidar_timings = dict()
                        logger_train.exception(
                            "Error in Polylidar, Params: %r", params)

                    # Aggregate timings
                    all_timings = dict(**mesh_timings, **fastga_timings,
                                       **polylidar_timings)

                    # Convert planes to format used for evaluation
                    all_planes_classified = convert_planes_to_classified_point_cloud(
                        all_planes, tri_mesh, avg_peaks)
                    # Evaluate the results. This actually takes the longest amount of time!
                    misc = dict(fname=fname,
                                variance=variance,
                                tcomp=0.80,
                                **params)
                    results_080, auxiliary_080 = evaluate(
                        pc_image, all_planes_classified, tcomp=0.80, misc=misc)
                    # print(results)
                    logger_train.info("Finished %r", params)

                    full_record_080 = dict(**params,
                                           **results_080,
                                           **all_timings,
                                           fname=fname,
                                           variance=variance,
                                           tcomp=0.80)
                    # Save the record
                    writer.writerow(full_record_080)
                    csv_file.flush()

                    t2 = time.perf_counter()
                    elapsed = (t2 - t1) * 1000
                    logger_train.info("One Param Set Took: %.1f", elapsed)
                    # Don't forget to clear the GA
                    ga.clear_count()
                    if counter is not None:
                        with counter.get_lock():
                            counter.value += 1

        except Exception as e:
            logger_train.exception("Something went wrong")
Ejemplo n.º 11
0
def evaluate_with_params_visualize(params):
    """Similar to above but just for one set of parameters. Used for debugging and visualization

    Arguments:
        params {dict} -- Dict of hyperparameters
    """

    from polylidar_plane_benchmark.utility.helper import (
        convert_planes_to_classified_point_cloud, load_pcd_file, paint_planes,
        predict_loops_laplacian, extract_all_dominant_plane_normals,
        extract_planes_and_polygons_from_mesh)
    from polylidar_plane_benchmark.utility.evaluate import evaluate, evaluate_incorrect
    from polylidar_plane_benchmark.utility.helper_mesh import create_meshes_cuda
    from polylidar_plane_benchmark.utility.o3d_util import create_open_3d_pcd, plot_meshes, create_open_3d_mesh_from_tri_mesh, mark_invalid_planes

    from polylidar import Polylidar3D
    from fastga import GaussianAccumulatorS2, IcoCharts
    import open3d as o3d

    logger.setLevel(logging.INFO)
    variance = params['variance']
    all_fpaths = get_fpaths(params['variance'])
    fpaths = [fpath for fpath in all_fpaths if params['fname'] in fpath]
    fpath = fpaths[0]

    # Create Long Lived Objects Only Once
    ga = GaussianAccumulatorS2(
        level=level_default)  # Fast Gaussian Accumulator
    ico_chart = IcoCharts(
        level=level_default
    )  # Used for unwrapping S2 to Image for peak detection
    pl = Polylidar3D(
        **polylidar_kwargs_default)  # Region Growing and Polygons Extraction

    fname = pathlib.Path(fpath).name
    logger_train.info("Working on file %s", fpath)

    stride = params.get('stride', 2)
    pc_raw, pc_image = load_pcd_file(fpath, stride=stride)
    pc_points = np.ascontiguousarray(pc_raw[:, :3])
    # Create Open3D point cloud
    cmap = cc.cm.glasbey_bw
    pcd_raw = create_open_3d_pcd(pc_raw[:, :3], pc_raw[:, 3], cmap=cmap)

    pl.norm_thresh = params['norm_thresh_min']
    pl.norm_thresh_min = params['norm_thresh_min']
    pl.min_triangles = params.get('min_triangles', 250)

    # Predict laplacian loops needed by noisiness of point cloud
    if params.get('predict_loops_laplacian'):
        params['loops_laplacian'] = predict_loops_laplacian(pc_image, stride)
        logger_train.info("Set laplacian loops to :%r",
                          params['loops_laplacian'])

    t1 = time.perf_counter()
    mesh_kwargs = {
        k: params[k]
        for k in ('loops_laplacian', 'kernel_size', 'loops_bilateral',
                  'sigma_angle')
    }

    tri_mesh, mesh_timings = create_meshes_cuda(pc_image, **mesh_kwargs)
    mesh_kwargs_previous = mesh_kwargs

    # Extract Dominant Plane Peaks
    avg_peaks, pcd_all_peaks, arrow_avg_peaks, colored_icosahedron, fastga_timings = extract_all_dominant_plane_normals(
        tri_mesh,
        level=level_default,
        with_o3d=True,
        ga_=ga,
        ico_chart_=ico_chart)

    # Extact Planes and Polygons
    try:
        all_planes, all_polygons, all_poly_lines, polylidar_timings = extract_planes_and_polygons_from_mesh(
            tri_mesh,
            avg_peaks,
            filter_polygons=False,
            pl_=pl,
            optimized=False)
    except Exception:
        all_planes = []
        all_polygons = []
        all_poly_lines = []
        polylidar_timings = dict()
        logger_train.exception("Error in Polylidar, Params: %r", params)

    # Aggregate timings
    all_timings = dict(**mesh_timings, **fastga_timings, **polylidar_timings)

    # Convert planes to format used for evaluation
    all_planes_classified = convert_planes_to_classified_point_cloud(
        all_planes, tri_mesh, avg_peaks)
    # Evaluate the results. This actually takes the longest amount of time!
    misc = dict(**params)
    # results_080, auxiliary_080 = evaluate(pc_image, all_planes_classified, tcomp=0.80, misc=misc)
    results_080, auxiliary_080 = evaluate(pc_image,
                                          all_planes_classified,
                                          tcomp=params['tcomp'],
                                          misc=misc)
    # print(results)
    logger_train.info("Finished %r", params)

    # create invalid plane markers, green = gt_label_missed, red=ms_labels_noise, blue=gt_label_over_seg, gray=ms_label_under_seg
    invalid_plane_markers = mark_invalid_planes(pc_raw, auxiliary_080,
                                                all_planes_classified)
    tri_mesh_o3d = create_open_3d_mesh_from_tri_mesh(tri_mesh)
    tri_mesh_o3d_painted = paint_planes(all_planes_classified, tri_mesh_o3d)

    # plot_meshes([pcd_raw, tri_mesh_o3d], [colored_icosahedron, *arrow_avg_peaks])
    plot_meshes([pcd_raw, tri_mesh_o3d_painted, *invalid_plane_markers])
Ejemplo n.º 12
0
def capture(config, video=None):
    # Configure streams
    pipeline, process_modules, filters, proj_mat, t265_device = create_pipeline(
        config)
    t265_pipeline = t265_device['pipeline']
    logging.info("Pipeline Created")

    # Long lived objects. These are the object that hold all the algorithms for surface exraction.
    # They need to be long lived (objects) because they hold state (thread scheduler, image datastructures, etc.)
    ll_objects = dict()
    ll_objects['pl'] = Polylidar3D(**config['polylidar'])
    ll_objects['ga'] = GaussianAccumulatorS2(level=config['fastga']['level'])
    ll_objects['ico'] = IcoCharts(level=config['fastga']['level'])

    if video:
        frame_width = config['color']['width'] * 2
        frame_height = config['color']['height']
        out_vid = cv2.VideoWriter(video,
                                  cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),
                                  30, (frame_width, frame_height))

    all_records = []
    counter = 0
    try:
        while True:
            t00 = time.perf_counter()
            try:
                color_image, depth_image, meta = get_frames(
                    pipeline, t265_pipeline, process_modules, filters, config)
            except RuntimeError:
                # This only gets thrown when in playback mode from a recoded file when frames "run out"
                logging.info("Out of frames")
                break
            t0 = time.perf_counter()
            if color_image is None or not valid_frames(
                    color_image, depth_image, **
                    config['polygon']['frameskip']):
                logging.debug("Invalid Frames")
                continue
            t1 = time.perf_counter()
            counter += 1
            if counter < 1:
                continue

            try:
                # Get 6DOF Pose at appropriate timestamp
                if config['tracking']['enabled']:
                    euler_t265 = get_pose_matrix(meta['ts'])
                    logging.info('euler_t265: %r', euler_t265)

                if config['show_polygon']:
                    # planes, obstacles, geometric_planes, timings, o3d_mesh = get_polygon(depth_image, config, ll_objects, **meta)
                    planes, obstacles, geometric_planes, timings = get_polygon(
                        depth_image, config, ll_objects, **meta)
                    timings['t_get_frames'] = (t0 - t00) * 1000
                    timings['t_check_frames'] = (t1 - t0) * 1000
                    all_records.append(timings)

                    curb_height, first_plane, second_plane = analyze_planes(
                        geometric_planes)

                    # curb height must be greater than 2 cm and first_plane must have been found
                    if curb_height > 0.02 and first_plane is not None:
                        square_points, normal_svm, center = hplane(
                            first_plane, second_plane)
                        dist, theta = get_theta_and_distance(
                            normal_svm, center, first_plane['normal_ransac'])
                        logging.info(
                            "Frame #: %s, Distance: %.02f meters, Theta: %.01f degrees",
                            counter, dist, theta)
                        plot_points(square_points, proj_mat, color_image,
                                    config)
                        # dump(dict(first_plane=first_plane, second_plane=second_plane), 'data/planes.joblib')
                    else:
                        logging.warning(
                            "Couldn't find the street and sidewalk surface")

                    # Send arduino curb height, distance to curb, and Angle to the curb
                    # ser.write(("{:.2f}".format(curb_height)+"\n").encode())
                    # ser.write(("{:.2f}".format(dist)+"\n").encode())
                    # ser.write(("{:.2f}".format(theta)+"\n").encode())

                    # Send RPi through Socket curb height, distance to curb, and Angle to the curb
                    s = socket.socket()
                    host = "169.254.41.103"  #This is your Server IP!
                    port = 2345
                    s.connect((host, port))
                    data = struct.pack('!d', ("{:.2f}".format(curb_height)))
                    s.send(data)
                    rece = s.recv(1024)
                    print("Received", rece)
                    s.close()

                    # sys.exit()
                    # Plot polygon in rgb frame
                    plot_planes_and_obstacles(planes, obstacles, proj_mat,
                                              None, color_image, config)

                    # import ipdb; ipdb.set_trace()
                # Show images
                if config.get("show_images"):
                    # Convert to open cv image types (BGR)
                    color_image_cv, depth_image_cv = colorize_images_open_cv(
                        color_image, depth_image, config)
                    # Stack both images horizontally
                    images = np.hstack((color_image_cv, depth_image_cv))
                    cv2.putText(
                        images, 'Curb Height: '
                        "{:.2f}"
                        'm'.format(curb_height), (10, 200),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1,
                        cv2.LINE_AA)
                    cv2.putText(
                        images, 'Distance from Curb: '
                        "{:.2f}"
                        'm'.format(dist), (10, 215), cv2.FONT_HERSHEY_SIMPLEX,
                        0.5, (0, 255, 0), 1, cv2.LINE_AA)
                    cv2.putText(
                        images, 'Angle to the Curb: '
                        "{:.2f}"
                        'deg'.format(theta), (10, 230),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1,
                        cv2.LINE_AA)
                    cv2.imshow('RealSense Color/Depth (Aligned)', images)

                    if video:
                        out_vid.write(images)

                    res = cv2.waitKey(1)
                    if res == ord('p'):
                        uid = uuid.uuid4()
                        logging.info("Saving Picture: {}".format(uid))
                        cv2.imwrite(
                            path.join(PICS_DIR, "{}_color.jpg".format(uid)),
                            color_image_cv)
                        cv2.imwrite(
                            path.join(PICS_DIR, "{}_stack.jpg".format(uid)),
                            images)
                    if res == ord('m'):
                        plt.imshow(
                            np.asarray(ll_objects['ico'].image_to_vertex_idx))
                        plt.show()
                        plt.imshow(np.asarray(ll_objects['ico'].mask))
                        plt.show()
                        plt.imshow(np.asarray(ll_objects['ico'].image))
                        plt.show()

                        # import ipdb; ipdb.set_trace()

                    to_save_frames = config['save'].get('frames')
                    if config['playback'][
                            'enabled'] and to_save_frames is not None and counter in to_save_frames:
                        logging.info("Saving Picture: {}".format(counter))
                        cv2.imwrite(
                            path.join(PICS_DIR,
                                      "{}_color.jpg".format(counter)),
                            color_image_cv)
                        cv2.imwrite(
                            path.join(PICS_DIR,
                                      "{}_stack.jpg".format(counter)), images)

                # logging.info(f"Frame %d; Get Frames: %.2f; Check Valid Frame: %.2f; Laplacian: %.2f; Bilateral: %.2f; Mesh: %.2f; FastGA: %.2f; Plane/Poly: %.2f; Filtering: %.2f; Geometric Planes: %.2f; Curb Height: %.2f",
                #              counter, timings['t_get_frames'], timings['t_check_frames'], timings['t_laplacian'], timings['t_bilateral'], timings['t_mesh'], timings['t_fastga_total'],
                #              timings['t_polylidar_planepoly'], timings['t_polylidar_filter'], timings['t_geometric_planes'] curb_height)
                logging.info(f"Curb Height: %.2f", curb_height)

            except Exception as e:
                logging.exception("Error!")
    finally:
        pipeline.stop()
    if video is not None:
        out_vid.release()
    cv2.destroyAllWindows()
Ejemplo n.º 13
0
def analyze_mesh(mesh):
    """Demonstrates unwrapping and peak detection of a S2 Histogram"""
    LEVEL = 4
    kwargs_base = dict(level=LEVEL, max_phi=180)
    kwargs_s2 = dict(**kwargs_base)
    kwargs_opt_integrate = dict(num_nbr=12)

    # Create Gaussian Accumulator
    ga_cpp_s2 = GaussianAccumulatorS2(**kwargs_s2)
    # This function will integrate the normals and return an open3d mesh for visualization.
    colored_icosahedron_s2, _, _ = visualize_gaussian_integration(
        ga_cpp_s2,
        mesh,
        max_phi=kwargs_base['max_phi'],
        integrate_kwargs=kwargs_opt_integrate)
    num_triangles = ga_cpp_s2.num_buckets

    # for verification
    ico_s2_organized_mesh = ga_cpp_s2.copy_ico_mesh(True)
    _, _, ico_o3d_s2_om = decompose(ico_s2_organized_mesh)
    colors_s2 = get_colors(range(num_triangles), colormap=plt.cm.tab20)[:, :3]
    colored_ico_s2_organized_mesh = assign_vertex_colors(
        ico_o3d_s2_om, colors_s2)

    # Demonstrate the five charts for visualization
    bucket_counts = np.asarray(ga_cpp_s2.get_normalized_bucket_counts(True))
    bucket_colors = get_colors(bucket_counts)[:, :3]
    charts_triangles = []
    for chart_idx in range(5):
        chart_size = int(num_triangles / 5)
        chart_start_idx = chart_idx * chart_size
        chart_end_idx = chart_start_idx + chart_size
        icochart_square = refine_icochart(level=LEVEL, square=True)
        _, _, icochart_square_o3d = decompose(icochart_square)
        colored_icochart_square = assign_vertex_colors(
            icochart_square_o3d,
            bucket_colors[chart_start_idx:chart_end_idx, :])
        charts_triangles.append(colored_icochart_square)

    # Plot the unwrapped icosahedron
    new_charts = translate_meshes(charts_triangles,
                                  current_translation=-4.0,
                                  axis=1)
    all_charts = functools.reduce(lambda a, b: a + b, new_charts)
    plot_meshes(colored_ico_s2_organized_mesh, colored_icosahedron_s2,
                all_charts, mesh)

    ico_chart_ = IcoCharts(LEVEL)
    normalized_bucket_counts_by_vertex = ga_cpp_s2.get_normalized_bucket_counts_by_vertex(
        True)
    ico_chart_.fill_image(normalized_bucket_counts_by_vertex)

    find_peaks_kwargs = dict(threshold_abs=25,
                             min_distance=1,
                             exclude_border=False,
                             indices=False)
    average_filter = dict(min_total_weight=0.05)
    _, _, avg_peaks, _ = find_peaks_from_ico_charts(
        ico_chart_,
        np.asarray(normalized_bucket_counts_by_vertex),
        find_peaks_kwargs=find_peaks_kwargs,
        average_filter=average_filter)
    print(avg_peaks)

    full_image = np.asarray(ico_chart_.image)

    plt.imshow(full_image)
    plt.xticks(np.arange(0, full_image.shape[1], step=1))
    plt.yticks(np.arange(0, full_image.shape[0], step=1))
    plt.show()