Ejemplo n.º 1
0
def capture(config, video=None):
    # Configure streams
    pipeline, process_modules, filters, proj_mat, t265_device = create_pipeline(config)
    t265_pipeline = t265_device['pipeline']
    logging.info("Pipeline Created")

    # Long lived objects. These are the object that hold all the algorithms for surface exraction.
    # They need to be long lived (objects) because they hold state (thread scheduler, image datastructures, etc.)
    ll_objects = dict()
    ll_objects['pl'] = Polylidar3D(**config['polylidar'])
    ll_objects['ga'] = GaussianAccumulatorS2(level=config['fastga']['level'])
    ll_objects['ico'] = IcoCharts(level=config['fastga']['level'])

    if video:
        frame_width = config['color']['width'] * 2
        frame_height = config['color']['height']
        out_vid = cv2.VideoWriter(video, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 30, (frame_width, frame_height))

    # Create Homogenous Transform from sensor frame to wheel chair frame
    sensor_mount_frame = config['frames']['sensor_mount']
    sensor_frame = config['frames']['sensor']
    sensor_to_wheel_chair_transform = create_transform(np.array(sensor_mount_frame['translation']), sensor_mount_frame['rotation']) \
        @ create_transform(sensor_frame['translation'], sensor_frame['rotation'])

    # print(sensor_to_wheel_chair_transform)
    # sys.exit()
    all_records = []
    counter = 0
    try:
        while True:
            t00 = time.perf_counter()
            try:
                color_image, depth_image, meta = get_frames(pipeline, t265_pipeline, process_modules, filters, config)
            except RuntimeError:
                # This only gets thrown when in playback mode from a recoded file when frames "run out"
                logging.info("Out of frames")
                break
            t0 = time.perf_counter()
            if color_image is None or not valid_frames(color_image, depth_image, **config['polygon']['frameskip']):
                logging.debug("Invalid Frames")
                continue
            t1 = time.perf_counter()
            counter += 1
            if counter < 1:
                continue

            try:
                # Get 6DOF Pose at appropriate timestamp
                if config['tracking']['enabled']:
                    euler_t265 = get_pose_matrix(meta['ts'])
                    logging.info('euler_t265: %r', euler_t265)

                # flag to write results
                have_results = False
                if config['show_polygon']:
                    # planes, obstacles, geometric_planes, timings, o3d_mesh = get_polygon(depth_image, config, ll_objects, **meta)
                    planes, obstacles, geometric_planes, timings = get_polygon(depth_image, config, ll_objects, **meta)
                    timings['t_get_frames'] = (t0 - t00) * 1000
                    timings['t_check_frames'] = (t1 - t0) * 1000
                    all_records.append(timings)

                    curb_height, first_plane, second_plane = analyze_planes_updated(geometric_planes)
                    fname = config['playback']['file'].split('/')[1].split('.')[0]
                    # dump(dict(first_plane=first_plane, second_plane=second_plane), f"data/scratch_test/planes_{fname}_{counter:04}.joblib")
                    
                    # curb height must be greater than 2 cm and first_plane must have been found
                    if curb_height > 0.02 and first_plane is not None:
                        top_plane = choose_plane(first_plane, second_plane)
                        top_points, top_normal = top_plane['all_points'], top_plane['normal_ransac']
                        filtered_top_points = filter_points(top_points)  # <100 us
                        top_points_2d, height, all_fit_lines, best_fit_lines = extract_lines_wrapper(filtered_top_points, top_normal, return_only_one_line=True, **config['linefitting'])
                        if best_fit_lines:
                            #If there are two lines only choose the first one
                            platform_center_sensor_frame = best_fit_lines[0]['hplane_point'] 
                            platform_normal_sensor_frame = best_fit_lines[0]['hplane_normal']
                            # print(platform_center_sensor_frame, platform_normal_sensor_frame)
                            result = get_turning_manuever(platform_center_sensor_frame, platform_normal_sensor_frame, \
                                        sensor_to_wheel_chair_transform, poi_offset=config.get('poi_offset', 0.7), debug=False)
                            plt.show()

                            orthog_dist = result['ortho_dist_platform']
                            distance_of_interest = result['dist_poi']
                            orientation = result['beta']
                            initial_turn = result['first_turn']
                            final_turn= result['second_turn']

                            logging.info("Frame #: %s, Orthogonal Distance to Platform: %.02f m, Orientation: %0.1f Degrees, Distance to POI: %.02f m, " \
                                "First Turn: %.01f degrees, Second Turn: %.01f degrees", 
                                         counter, orthog_dist, orientation, distance_of_interest, initial_turn, final_turn)
                            
                            plot_points(best_fit_lines[0]['square_points'], proj_mat, color_image, config)
                            # plot_points(best_fit_lines[0]['points_3d_orig'], proj_mat, color_image, config)
                            if len(best_fit_lines) > 2: 
                                plot_points(best_fit_lines[1]['square_points'], proj_mat, color_image, config)
                            have_results = True
                        else:
                            logging.warning("Line Detector Failed")
                    else:
                        logging.warning("Couldn't find the street and sidewalk surface")

                    # sys.exit()
                    # Plot polygon in rgb frame
                    plot_planes_and_obstacles(planes, obstacles, proj_mat, None, color_image, config)

                    # import ipdb; ipdb.set_trace()
                # Show images
                if config.get("show_images"):
                    # Convert to open cv image types (BGR)
                    color_image_cv, depth_image_cv = colorize_images_open_cv(color_image, depth_image, config)
                    # Stack both images horizontally
                    images = np.hstack((color_image_cv, depth_image_cv))
                    if have_results:
                        cv2.putText(images,'Curb Height: '"{:.2f}" 'm'.format(curb_height), (20,360), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1, cv2.LINE_AA)
                        cv2.putText(images,'Orthogonal Distance: '"{:.2f}" 'm'.format(orthog_dist), (20,380), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1, cv2.LINE_AA)
                        cv2.putText(images,'Distance to Point of Interest: '"{:.2f}" 'm'.format(distance_of_interest), (20,400), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1, cv2.LINE_AA)
                        cv2.putText(images,'Initial Turn: '"{:.2f}" 'deg'.format(initial_turn), (20,420), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1, cv2.LINE_AA)
                        cv2.putText(images,'Orientation: '"{:.2f}" 'deg'.format(orientation), (20,440), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1, cv2.LINE_AA)
                        cv2.putText(images,'Angle for final turn: '"{:.2f}" 'deg'.format(final_turn), (20,460), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1, cv2.LINE_AA)
                        cv2.imshow('RealSense Color/Depth (Aligned)', images)
                        # visualize_2d(top_points_2d, top_points_2d, all_fit_lines, best_fit_lines)
                    
                    if video:
                        out_vid.write(images)

                    res = cv2.waitKey(1)
                    if res == ord('p'):
                        uid = uuid.uuid4()
                        logging.info("Saving Picture: {}".format(uid))
                        cv2.imwrite(path.join(PICS_DIR, "{}_color.jpg".format(uid)), color_image_cv)
                        cv2.imwrite(path.join(PICS_DIR, "{}_stack.jpg".format(uid)), images)
                    if res == ord('m'):
                        plt.imshow(np.asarray(ll_objects['ico'].image_to_vertex_idx))
                        plt.show()
                        plt.imshow(np.asarray(ll_objects['ico'].mask))
                        plt.show()
                        plt.imshow(np.asarray(ll_objects['ico'].image))
                        plt.show()                  
                    

                    to_save_frames = config['save'].get('frames')
                    if config['playback']['enabled'] and to_save_frames is not None and counter in to_save_frames:
                        logging.info("Saving Picture: {}".format(counter))
                        cv2.imwrite(path.join(PICS_DIR, "{}_color.jpg".format(counter)), color_image_cv)
                        cv2.imwrite(path.join(PICS_DIR, "{}_stack.jpg".format(counter)), images)

                # logging.info(f"Frame %d; Get Frames: %.2f; Check Valid Frame: %.2f; Laplacian: %.2f; Bilateral: %.2f; Mesh: %.2f; FastGA: %.2f; Plane/Poly: %.2f; Filtering: %.2f; Geometric Planes: %.2f",
                #              counter, timings['t_get_frames'], timings['t_check_frames'], timings['t_laplacian'], timings['t_bilateral'], timings['t_mesh'], timings['t_fastga_total'],
                #              timings['t_polylidar_planepoly'], timings['t_polylidar_filter'], timings['t_geometric_planes'])
                logging.info(f"Curb Height: %.2f", curb_height)
                
            except Exception as e:
                logging.exception("Error!")
    finally:
        pipeline.stop()
    if video is not None:
        out_vid.release()
    cv2.destroyAllWindows()
def capture(config, video=None):
    # Configure streams
    pipeline, process_modules, filters, proj_mat, t265_device = create_pipeline(
        config)
    t265_pipeline = t265_device['pipeline']
    logging.info("Pipeline Created")

    # Long lived objects. These are the object that hold all the algorithms for surface exraction.
    # They need to be long lived (objects) because they hold state (thread scheduler, image datastructures, etc.)
    ll_objects = dict()
    ll_objects['pl'] = Polylidar3D(**config['polylidar'])
    ll_objects['ga'] = GaussianAccumulatorS2(level=config['fastga']['level'])
    ll_objects['ico'] = IcoCharts(level=config['fastga']['level'])

    if video:
        frame_width = config['color']['width'] * 2
        frame_height = config['color']['height']
        out_vid = cv2.VideoWriter(video,
                                  cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),
                                  30, (frame_width, frame_height))

    all_records = []
    counter = 0
    try:
        while True:
            t00 = time.perf_counter()
            try:
                color_image, depth_image, meta = get_frames(
                    pipeline, t265_pipeline, process_modules, filters, config)
            except RuntimeError:
                # This only gets thrown when in playback mode from a recoded file when frames "run out"
                logging.info("Out of frames")
                break
            t0 = time.perf_counter()
            if color_image is None or not valid_frames(
                    color_image, depth_image, **
                    config['polygon']['frameskip']):
                logging.debug("Invalid Frames")
                continue
            t1 = time.perf_counter()
            counter += 1
            # if counter < 10:
            #     continue

            try:
                # Get 6DOF Pose at appropriate timestamp
                if config['tracking']['enabled']:
                    euler_t265 = get_pose_matrix(meta['ts'])
                    # logging.info('euler_t265: %r', euler_t265)

                if config['show_polygon']:
                    # planes, obstacles, geometric_planes, timings, o3d_mesh = get_polygon(depth_image, config, ll_objects, **meta)
                    planes, obstacles, geometric_planes, timings = get_polygon(
                        depth_image, config, ll_objects, **meta)
                    timings['t_get_frames'] = (t0 - t00) * 1000
                    timings['t_check_frames'] = (t1 - t0) * 1000
                    all_records.append(timings)

                    curb_height = analyze_planes(geometric_planes)
                    ser.write(("{:.2f}".format(curb_height) + "\n").encode())

                    # Plot polygon in rgb frame
                    plot_planes_and_obstacles(planes, obstacles, proj_mat,
                                              None, color_image, config)

                # Show images
                if config.get("show_images"):
                    # Convert to open cv image types (BGR)
                    color_image_cv, depth_image_cv = colorize_images_open_cv(
                        color_image, depth_image, config)
                    # Stack both images horizontally
                    images = np.hstack((color_image_cv, depth_image_cv))
                    cv2.imshow('RealSense Color/Depth (Aligned)', images)
                    if video:
                        out_vid.write(images)
                    res = cv2.waitKey(1)
                    if res == ord('p'):
                        uid = uuid.uuid4()
                        logging.info("Saving Picture: {}".format(uid))
                        cv2.imwrite(
                            path.join(PICS_DIR, "{}_color.jpg".format(uid)),
                            color_image_cv)
                        cv2.imwrite(
                            path.join(PICS_DIR, "{}_stack.jpg".format(uid)),
                            images)
                    if res == ord('m'):
                        plt.imshow(
                            np.asarray(ll_objects['ico'].image_to_vertex_idx))
                        plt.show()
                        plt.imshow(np.asarray(ll_objects['ico'].mask))
                        plt.show()
                        plt.imshow(np.asarray(ll_objects['ico'].image))
                        plt.show()
                        import ipdb
                        ipdb.set_trace()

                    to_save_frames = config['save'].get('frames')
                    if config['playback'][
                            'enabled'] and to_save_frames is not None and counter in to_save_frames:
                        logging.info("Saving Picture: {}".format(counter))
                        cv2.imwrite(
                            path.join(PICS_DIR,
                                      "{}_color.jpg".format(counter)),
                            color_image_cv)
                        cv2.imwrite(
                            path.join(PICS_DIR,
                                      "{}_stack.jpg".format(counter)), images)

                # logging.info(f"Frame %d; Get Frames: %.2f; Check Valid Frame: %.2f; Laplacian: %.2f; Bilateral: %.2f; Mesh: %.2f; FastGA: %.2f; Plane/Poly: %.2f; Filtering: %.2f; Geometric Planes: %.2f; Curb Height: %.2f",
                #              counter, timings['t_get_frames'], timings['t_check_frames'], timings['t_laplacian'], timings['t_bilateral'], timings['t_mesh'], timings['t_fastga_total'],
                #              timings['t_polylidar_planepoly'], timings['t_polylidar_filter'], timings['t_geometric_planes'] curb_height)
                logging.info(f"Curb Height: %.2f", curb_height)
            except Exception as e:
                logging.exception("Error!")
    finally:
        pipeline.stop()
    if video is not None:
        out_vid.release()
    cv2.destroyAllWindows()
Ejemplo n.º 3
0
def capture(config, video=None):
    # Configure streams
    pipeline, process_modules, filters, proj_mat, t265_device = create_pipeline(
        config)
    t265_pipeline = t265_device['pipeline']
    logging.info("Pipeline Created")

    # Long lived objects. These are the object that hold all the algorithms for surface exraction.
    # They need to be long lived (objects) because they hold state (thread scheduler, image datastructures, etc.)
    ll_objects = dict()
    ll_objects['pl'] = Polylidar3D(**config['polylidar'])
    ll_objects['ga'] = GaussianAccumulatorS2(level=config['fastga']['level'])
    ll_objects['ico'] = IcoCharts(level=config['fastga']['level'])

    if video:
        frame_width = config['color']['width'] * 2
        frame_height = config['color']['height']
        out_vid = cv2.VideoWriter(video,
                                  cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),
                                  30, (frame_width, frame_height))

    all_records = []
    counter = 0
    try:
        while True:
            t00 = time.perf_counter()
            try:
                color_image, depth_image, meta = get_frames(
                    pipeline, t265_pipeline, process_modules, filters, config)
            except RuntimeError:
                # This only gets thrown when in playback mode from a recoded file when frames "run out"
                logging.info("Out of frames")
                break
            t0 = time.perf_counter()
            if color_image is None or not valid_frames(
                    color_image, depth_image, **
                    config['polygon']['frameskip']):
                logging.debug("Invalid Frames")
                continue
            t1 = time.perf_counter()
            counter += 1
            if counter < 1:
                continue

            try:
                # Get 6DOF Pose at appropriate timestamp
                if config['tracking']['enabled']:
                    euler_t265 = get_pose_matrix(meta['ts'])
                    logging.info('euler_t265: %r', euler_t265)

                if config['show_polygon']:
                    # planes, obstacles, geometric_planes, timings, o3d_mesh = get_polygon(depth_image, config, ll_objects, **meta)
                    planes, obstacles, geometric_planes, timings = get_polygon(
                        depth_image, config, ll_objects, **meta)
                    timings['t_get_frames'] = (t0 - t00) * 1000
                    timings['t_check_frames'] = (t1 - t0) * 1000
                    all_records.append(timings)

                    curb_height, first_plane, second_plane = analyze_planes(
                        geometric_planes)

                    # curb height must be greater than 2 cm and first_plane must have been found
                    if curb_height > 0.02 and first_plane is not None:
                        square_points, normal_svm, center = hplane(
                            first_plane, second_plane)
                        dist, theta = get_theta_and_distance(
                            normal_svm, center, first_plane['normal_ransac'])
                        logging.info(
                            "Frame #: %s, Distance: %.02f meters, Theta: %.01f degrees",
                            counter, dist, theta)
                        plot_points(square_points, proj_mat, color_image,
                                    config)
                        # dump(dict(first_plane=first_plane, second_plane=second_plane), 'data/planes.joblib')
                    else:
                        logging.warning(
                            "Couldn't find the street and sidewalk surface")

                    # Send arduino curb height, distance to curb, and Angle to the curb
                    # ser.write(("{:.2f}".format(curb_height)+"\n").encode())
                    # ser.write(("{:.2f}".format(dist)+"\n").encode())
                    # ser.write(("{:.2f}".format(theta)+"\n").encode())

                    # Send RPi through Socket curb height, distance to curb, and Angle to the curb
                    s = socket.socket()
                    host = "169.254.41.103"  #This is your Server IP!
                    port = 2345
                    s.connect((host, port))
                    data = struct.pack('!d', ("{:.2f}".format(curb_height)))
                    s.send(data)
                    rece = s.recv(1024)
                    print("Received", rece)
                    s.close()

                    # sys.exit()
                    # Plot polygon in rgb frame
                    plot_planes_and_obstacles(planes, obstacles, proj_mat,
                                              None, color_image, config)

                    # import ipdb; ipdb.set_trace()
                # Show images
                if config.get("show_images"):
                    # Convert to open cv image types (BGR)
                    color_image_cv, depth_image_cv = colorize_images_open_cv(
                        color_image, depth_image, config)
                    # Stack both images horizontally
                    images = np.hstack((color_image_cv, depth_image_cv))
                    cv2.putText(
                        images, 'Curb Height: '
                        "{:.2f}"
                        'm'.format(curb_height), (10, 200),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1,
                        cv2.LINE_AA)
                    cv2.putText(
                        images, 'Distance from Curb: '
                        "{:.2f}"
                        'm'.format(dist), (10, 215), cv2.FONT_HERSHEY_SIMPLEX,
                        0.5, (0, 255, 0), 1, cv2.LINE_AA)
                    cv2.putText(
                        images, 'Angle to the Curb: '
                        "{:.2f}"
                        'deg'.format(theta), (10, 230),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1,
                        cv2.LINE_AA)
                    cv2.imshow('RealSense Color/Depth (Aligned)', images)

                    if video:
                        out_vid.write(images)

                    res = cv2.waitKey(1)
                    if res == ord('p'):
                        uid = uuid.uuid4()
                        logging.info("Saving Picture: {}".format(uid))
                        cv2.imwrite(
                            path.join(PICS_DIR, "{}_color.jpg".format(uid)),
                            color_image_cv)
                        cv2.imwrite(
                            path.join(PICS_DIR, "{}_stack.jpg".format(uid)),
                            images)
                    if res == ord('m'):
                        plt.imshow(
                            np.asarray(ll_objects['ico'].image_to_vertex_idx))
                        plt.show()
                        plt.imshow(np.asarray(ll_objects['ico'].mask))
                        plt.show()
                        plt.imshow(np.asarray(ll_objects['ico'].image))
                        plt.show()

                        # import ipdb; ipdb.set_trace()

                    to_save_frames = config['save'].get('frames')
                    if config['playback'][
                            'enabled'] and to_save_frames is not None and counter in to_save_frames:
                        logging.info("Saving Picture: {}".format(counter))
                        cv2.imwrite(
                            path.join(PICS_DIR,
                                      "{}_color.jpg".format(counter)),
                            color_image_cv)
                        cv2.imwrite(
                            path.join(PICS_DIR,
                                      "{}_stack.jpg".format(counter)), images)

                # logging.info(f"Frame %d; Get Frames: %.2f; Check Valid Frame: %.2f; Laplacian: %.2f; Bilateral: %.2f; Mesh: %.2f; FastGA: %.2f; Plane/Poly: %.2f; Filtering: %.2f; Geometric Planes: %.2f; Curb Height: %.2f",
                #              counter, timings['t_get_frames'], timings['t_check_frames'], timings['t_laplacian'], timings['t_bilateral'], timings['t_mesh'], timings['t_fastga_total'],
                #              timings['t_polylidar_planepoly'], timings['t_polylidar_filter'], timings['t_geometric_planes'] curb_height)
                logging.info(f"Curb Height: %.2f", curb_height)

            except Exception as e:
                logging.exception("Error!")
    finally:
        pipeline.stop()
    if video is not None:
        out_vid.release()
    cv2.destroyAllWindows()