コード例 #1
0
def sync_imu(ri, pipeline_1, frames_t265, start_time, current_time, dt):
    time_imu = 0.0
    while ((current_time -
            (ri.get_pose_frames(frames_t265).get_timestamp() - start_time)) >= dt):
        frames_t265 = ri.get_frames(pipeline_1)
        time_imu = ri.get_pose_frames(frames_t265).get_timestamp() - start_time

    return frames_t265, time_imu
コード例 #2
0
def main(args):

    while True:

        # Wait for a coherent pair of frames: depth and color
        frames_sr300 = ri.get_frames(pipeline_1)
        depth_frame = ri.get_depth_frames(frames_sr300)
        color_frame = ri.get_color_frames(frames_sr300)
        if not depth_frame or not color_frame:
            continue

        # Convert images to numpy arrays
        depth_image = ri.convert_img_to_nparray(depth_frame)
        color_image = ri.convert_img_to_nparray(color_frame)

        # Apply colormap on depth image (image must be converted to 8-bit per pixel first)
        depth_colormap = ri.get_depth_colormap(depth_image, 0.03)

        # Stack both images horizontally
        images = np.hstack((color_image, depth_colormap))

        # Show images
        if args.minimal:
            ri.show_imgs('SR300', images, 1)
コード例 #3
0
# Get pipelines for the two streams
pipeline_1 = ri.get_pipeline()

# Enable SR300 stream
ri.enable_stream_sr300(config_1, 640, 480, 30)

# Start pipeline
pipeline_1.start(config_1)


try:
    while True:

        # Wait for a coherent pair of frames: depth and color
        frames_sr300 = ri.get_frames(pipeline_1)
        depth_frame = ri.get_depth_frames(frames_sr300)
        color_frame = ri.get_color_frames(frames_sr300)
        if not depth_frame or not color_frame:
            continue

        # Convert images to numpy arrays
        depth_image = ri.convert_img_to_nparray(depth_frame)
        color_image = ri.convert_img_to_nparray(color_frame)


        # Apply colormap on depth image (image must be converted to 8-bit per pixel first)
        depth_colormap = ri.get_depth_colormap(depth_image, 0.03)

        # Stack both images horizontally
        images = np.hstack((color_image, depth_colormap))
コード例 #4
0
config_1 = ri.get_config()
config_2 = ri.get_config()

# Get pipeline for the stream
pipeline_1 = ri.get_pipeline()
pipeline_2 = ri.get_pipeline()

# Start the cameras
profile_1 = tests.start_t265(config_1, pipeline_1, "t265.bag")
profile_2 = tests.start_sr300(config_2, pipeline_2, width_sr300, height_sr300,
                              framerate_sr300, "sr300.bag")

try:

    # Wait for a coherent pair of frames: depth and color
    frames_sr300 = ri.get_frames(pipeline_2)
    depth_frame_prev = ri.get_depth_frames(frames_sr300)
    color_frame_prev = ri.get_color_frames(frames_sr300)

    # IMU
    frames_t265 = ri.get_frames(pipeline_1)

    # Sync start times, in msec
    start_time_imu = ri.get_pose_frames(frames_t265).get_timestamp()
    start_time_depth = depth_frame_prev.get_timestamp()
    start_time_color = color_frame_prev.get_timestamp()
    start_time_robot = robot_states[0].timestamp
    start_time_robot_ball = robot_states_ball[0].timestamp

    while ((depth_frame_prev.get_timestamp() - start_time_depth) <
           time_crop_point_start):
コード例 #5
0
def create_dataset(args, pipeline, playback):

    import utils
    import videoseg.src.utils as nlc_utils
    import videoseg.src.run_full_mod as nlc
    if nlc:
        reload(nlc)

    # Emf settings
    step = 16
    dt = 1.0 / 30 #s

    with open('cam_robot_transform_config.yaml') as file:
        # The FullLoader parameter handles the conversion from YAML
        # scalar values to Python the dictionary format
        reg_params = yaml.load(file, Loader=yaml.FullLoader)
        R_cam_robot = np.matrix(reg_params.get('R_cam_robot'))
        t_cam_robot = np.matrix(reg_params.get('t_cam_robot'))
    print("Camera registration read succesfully!\nR_cam_robot:\n{}\nt_cam_robot:\n{}".format(R_cam_robot,t_cam_robot))


    T_cam_robot = np.append(R_cam_robot, t_cam_robot, axis = 1)
    T_cam_robot = np.append(T_cam_robot, np.matrix('0 0 0 1'), axis = 0)

    R_robot_cam = R_cam_robot.transpose()
    t_robot_cam = -1.0 * R_robot_cam.dot(t_cam_robot)
    T_robot_cam = np.append(R_robot_cam, t_robot_cam, axis = 1)
    T_robot_cam = np.append(T_robot_cam, np.matrix('0 0 0 1'), axis = 0)

    counter = 1
    frame_num = 0

    playback_duration_in_ms = playback.get_duration()/timedelta(microseconds=1)

    vids = []
    dirs = []

    with open('data_correct.txt', 'r') as f:
        robot_states = f.readlines()
        while abs(playback.get_position()*0.001 - playback_duration_in_ms) > 10:

            robot_velocity_at_frame = None
            robot_ang_velocity_at_frame = None

            robot_states_after = next((line for line in robot_states if float(line.split(',')[1])*1000000 > playback.get_position()*0.001))
            if robot_states.index(robot_states_after) != 0:
                robot_states_before = robot_states[robot_states.index(robot_states_after)-1]
                timestamp_before = float(robot_states_before.split(',')[1])*1000000
                timestamp_of_frame = playback.get_position()*0.001
                timestamp_after = float(robot_states_after.split(',')[1])*1000000
                robot_states_before = json.loads(robot_states_before.split(',')[0].replace(' ', ','))
                print(robot_states_after.split(',')[0])
                robot_states_after = json.loads(robot_states_after.split(',')[0].replace(' ', ','))
                ratio = (timestamp_of_frame - timestamp_before) / (timestamp_after - timestamp_before)
                robot_velocity_at_frame = ratio * (robot_states_after[3] - robot_states_before[3]) + robot_states_before[3]
                robot_ang_velocity_at_frame = ratio * (robot_states_after[4] - robot_states_before[4]) + robot_states_before[4]

            directory = os.path.join(args.tdir,str(counter).zfill(5))
            if not os.path.exists(directory):
                os.makedirs(os.path.join(directory,'imgs'))
                os.makedirs(os.path.join(directory,'video'))
                os.makedirs(os.path.join(directory,'np_arrays'))

            # Wait for a coherent pair of frames: depth and color
            frames_sr300 = ri.get_frames(pipeline)
            depth_frame = ri.get_depth_frames(frames_sr300)
            color_frame = ri.get_color_frames(frames_sr300)
            if not depth_frame or not color_frame:
                continue

            # Convert images to numpy arrays
            depth_image = ri.convert_img_to_nparray(depth_frame)
            color_image = ri.convert_img_to_nparray(color_frame)
            image = color_image

            ih,iw,ic = color_image.shape
            if args.correct:
                color_image_original = color_image
            color_image=color_image[:,int((iw-ih)/2):int((iw+ih)/2),:]
            color_image = cv2.resize(color_image, (args.s,args.s))


            if frame_num == 0:
                imseq = np.array([color_image])
                inputs = np.array([])
                if args.correct:
                    imseq_original = np.array([color_image_original])

                    # Align depth to color
                    depth_frame_prev_aligned = emf.align_depth_to_color(frames_sr300)

                    # Deprojection
                    deproject_flow_prev = emf.deproject(depth_frame_prev_aligned, step=step)
            else:
                if args.correct:
                    prevgray_original = cv2.cvtColor(imseq_original[-1], cv2.COLOR_BGR2GRAY)
                    gray_original = cv2.cvtColor(color_image_original, cv2.COLOR_BGR2GRAY)
                    imseq_original = np.append(imseq_original, [color_image_original], axis=0)
                    flow_original = cv2.calcOpticalFlowFarneback(prevgray_original, gray_original, None, pyr_scale = 0.5, levels = 3, winsize = 15, iterations = 3, poly_n = 5, poly_sigma = 1.2, flags = 0)
                prevgray = cv2.cvtColor(imseq[-1], cv2.COLOR_BGR2GRAY)
                gray = cv2.cvtColor(color_image, cv2.COLOR_BGR2GRAY)
                imseq = np.append(imseq, [color_image], axis=0)
                flow = cv2.calcOpticalFlowFarneback(prevgray, gray, None, pyr_scale = 0.5, levels = 3, winsize = 15, iterations = 3, poly_n = 5, poly_sigma = 1.2, flags = 0)
                ri.show_imgs('Optical flow', emf.draw_flow(gray, flow.astype(int), step=step), 1)
                if args.correct:
                    # Get the previous and the current pixel locations
                    lines = emf.get_lines(gray_original, flow_original, step=step)

                    # Align depth to color
                    depth_frame_aligned = emf.align_depth_to_color(frames_sr300)

                    # Deproject the current pixels for 3D optical flow
                    deproject_flow = emf.deproject_flow_new(depth_frame_aligned, lines, step=step)

                    # Calculate 3D optical flow
                    diff_flow = emf.flow_3d(deproject_flow, deproject_flow_prev, dt)
                    diff_flow_robot = emf.transform_velocites(diff_flow, T_robot_cam)

                    if robot_velocity_at_frame is not None and robot_ang_velocity_at_frame is not None:
                        v_robot = np.matrix([[robot_velocity_at_frame],  [0] , [0]])
                        omega_robot = np.matrix([[0],  [0] , [robot_ang_velocity_at_frame]])

                        deprojected_coordinates_robot = emf.transform_points(deproject_flow, T_robot_cam)

                        velocities_from_egomotion_robot = \
                            emf.velocity_from_point_clouds_robot_frame(deprojected_coordinates_robot, \
                                            v_robot, omega_robot)
                        
                        threshold_lower = abs(v_robot[0][0])*0.001
                        threshold_upper = 0.5

                        egomotion_filtered_flow = \
                            emf.velocity_comparison(depth_frame_aligned, \
                                            diff_flow_robot, \
                                            velocities_from_egomotion_robot, \
                                            threshold_lower, threshold_upper, step=step)

                        flow = emf.filtered_flow_2d(egomotion_filtered_flow, \
                                        flow_original, step=step)

                        flow = flow[:,int((iw-ih)/2):int((iw+ih)/2)]
                        flow = cv2.resize(flow, (299,299))

                        ri.show_imgs('Optical flow filtered', \
                            emf.draw_flow(gray, flow, step=step), 1)
                
                flow_normalized = utils.normalize_flow(flow)
                gray = (gray-gray.min())/(gray.max()-gray.min())
                gray = np.reshape(gray,(args.s,args.s,1))
                conc_img = np.concatenate((flow_normalized,gray), axis=2)

                if frame_num == 1:
                    inputs = np.array([conc_img])
                else:
                    inputs = np.append(inputs, [conc_img], axis=0)

                if args.correct:
                    deproject_flow_prev = deproject_flow
                    depth_frame_prev_aligned = depth_frame_aligned
            
                cv2.imwrite('%s/imgs/input%s.jpg' % (directory,str(frame_num).zfill(5)), cv2.cvtColor((conc_img*255).astype(np.uint8),cv2.COLOR_RGB2BGR))
            cv2.imwrite('%s/imgs/frame%s.jpg' % (directory,str(frame_num).zfill(5)), color_image)


            frame_num += 1

            if frame_num == 100 or not abs(playback.get_position()*0.001 - playback_duration_in_ms) > 10:
                frame_num = 0
                counter += 1
                np.save('%s/np_arrays/imgs.npy' % directory, imseq)
                np.save('%s/np_arrays/inputs.npy' % directory, inputs)
                vids.append(imseq)
                dirs.append(directory)

            # Apply colormap on depth image (image must be converted to 8-bit per pixel first)
            depth_colormap = ri.get_depth_colormap(depth_image, 0.03)

            # Stack both images horizontally
            images = np.hstack((image, depth_colormap))

            # Show images
            ri.show_imgs('SR300', images, 1)
    
    cv2.destroyAllWindows()
    masks = nlc.demo_images(vids=vids)
    for i in range(len(vids)):
        nlc_utils.im2vid('%s/video/vid.avi' % dirs[i], vids[i], masks[i])
        np.save('%s/np_arrays/masks.npy' % dirs[i], masks[i])
コード例 #6
0
def run_obstacle_detection(registration_file):

    # Make an instance of the network
    net = network.Network()


    # Point cloud visualization:  https://github.com/heremaps/pptk

    class SpeedSource(Enum):
        CONST = 1
        ROBOT = 2

    zeros_one = np.matrix('0 0 0 1')

    # Velocity robot
    v_robot_const = np.matrix('0; 0; 0')
    omega_robot_const = np.matrix('0; 0; 0')

    with open(registration_file) as file:
        # The FullLoader parameter handles the conversion from YAML
        # scalar values to Python the dictionary format
        reg_params = yaml.load(file, Loader=yaml.FullLoader)
        R_cam_robot = np.matrix(reg_params.get('R_cam_robot'))
        t_cam_robot = np.matrix(reg_params.get('t_cam_robot'))
    print("Camera registration read succesfully!\nR_cam_robot:\n{}\nt_cam_robot:\n{}".format(R_cam_robot,t_cam_robot))


    T_cam_robot = np.append(R_cam_robot, t_cam_robot, axis = 1)
    T_cam_robot = np.append(T_cam_robot, zeros_one, axis = 0)
    #print(T_cam_robot)

    R_robot_cam = R_cam_robot.transpose()
    t_robot_cam = -1.0 * R_robot_cam.dot(t_cam_robot)
    T_robot_cam = np.append(R_robot_cam, t_robot_cam, axis = 1)
    T_robot_cam = np.append(T_robot_cam, zeros_one, axis = 0)
    #print(T_robot_cam)


    # Algorithm settings
    step = 16
    threshold = 0.001

    speed_source = SpeedSource.ROBOT


    if speed_source == SpeedSource.CONST:
        print("\nSpeed source CONST\n")
    elif speed_source == SpeedSource.ROBOT:
        print("\nSpeed source ROBOT\n")


    # Get config for the stream
    config_1 = ri.get_config()


    # Get pipeline for the stream
    pipeline_1 = ri.get_pipeline()


    # Enable SR300 stream
    ri.enable_stream_sr300(config_1, 640, 480, 30)
    dt = 1.0 / 30 #s

    # Start pipeline
    pipeline_1.start(config_1)


    try:

        # Wait for a coherent pair of frames: depth and color
        frames_sr300 = ri.get_frames(pipeline_1)
        depth_frame = ri.get_depth_frames(frames_sr300)
        color_frame = ri.get_color_frames(frames_sr300)


        # Convert images to numpy arrays
        depth_image_prev = ri.convert_img_to_nparray(depth_frame)
        color_image_prev = ri.convert_img_to_nparray(color_frame)

        # Convert RGB image to gray (for optical flow)
        gray_prev = emf.rgb_to_gray(color_image_prev)
        ih,iw = gray_prev.shape


        # Align depth to color
        depth_frame_prev_aligned = emf.align_depth_to_color(frames_sr300)

        # Deprojection
        deproject_flow_prev = emf.deproject(depth_frame_prev_aligned, step=step)
        #deprojected_prev = emf.deproject(depth_frame_prev_aligned, 0, 479)

        # Visualize point cloud
        v = pptk.viewer(deproject_flow_prev)
        v.color_map('cool')
        #######################################################

        while True:

            # Wait for a coherent pair of frames: depth and color
            frames_sr300 = ri.get_frames(pipeline_1)
            depth_frame = ri.get_depth_frames(frames_sr300)
            color_frame = ri.get_color_frames(frames_sr300)
            if not depth_frame or not color_frame:
                continue

            # Read images
            depth_image = ri.convert_img_to_nparray(depth_frame)
            color_image = ri.convert_img_to_nparray(color_frame)



            # Convert RGB image to gray (for optical flow)
            gray = emf.rgb_to_gray(color_image)

            #######################################################



            # Calculate optical flow
            flow = cv2.calcOpticalFlowFarneback(gray_prev, gray,
                            None, 0.5, 5, 15, 3, 5, 1,
                            cv2.OPTFLOW_FARNEBACK_GAUSSIAN)

            # Get the previous and the current pixel locations
            lines = emf.get_lines(gray, flow, step=step)

            # Align depth to color
            depth_frame_aligned = emf.align_depth_to_color(frames_sr300)


            # Deproject the current pixels for 3D optical flow
            deproject_flow = emf.deproject_flow_new(depth_frame_aligned, lines, step=step)

            # Calculate 3D optical flow
            diff_flow = emf.flow_3d(deproject_flow, deproject_flow_prev,dt)
            diff_flow_robot = emf.transform_velocites(diff_flow, T_robot_cam)

            # Apply colormap on depth image (image must be converted to 8-bit per pixel first)
            depth_colormap = ri.get_depth_colormap(depth_image, 1)

            # Stack both images horizontally
            images = np.hstack((color_image, depth_colormap))
            ri.show_imgs('Optical flow', emf.draw_flow(gray, flow.astype(int), step=step), 1)

            #######################################################

            socket.subscribe(b'd\x02\x00\x03')
            if socket.recv() == True:
                speed_source == SpeedSource.ROBOT
            else:
                speed_source == SpeedSource.CONST

            if speed_source == SpeedSource.CONST:
                v_robot = v_robot_const
                omega_robot = omega_robot_const
            elif speed_source == SpeedSource.ROBOT:
                #topic = socket.recv()
                array_buffer = socket.recv()
                data_bytearray = bytearray(array_buffer)
                msg = Msg.Msg.GetRootAsMsg(array_buffer,0)
                msg_as_np = msg.ValueVectorAsNumpy()
    
                x_sensorfusion = msg_as_np[0]
                y_sensorfusion = msg_as_np[1]
                phi_sensorfusion = msg_as_np[2]
                v_sensorfusion = msg_as_np[3]
                omega_sensorfusion = msg_as_np[4]
    
                v_vehicle_0 = math.cos(phi_sensorfusion)*v_sensorfusion
                v_vehicle_1 = math.sin(phi_sensorfusion)*v_sensorfusion
    
                v_robot = np.matrix([[v_vehicle_0],  [v_vehicle_1] , [0]])
                omega_robot = np.matrix([[0],  [0] , [omega_sensorfusion]])

                print(v_robot)


            deprojected_coordinates_robot = emf.transform_points(deproject_flow, T_robot_cam)
            velocities_from_egomotion_robot = \
                emf.velocity_from_point_clouds_robot_frame(deprojected_coordinates_robot, \
                                            v_robot, omega_robot)


            # Compare the velocities
            egomotion_filtered_flow = \
                emf.velocity_comparison(depth_frame_aligned, \
                                            diff_flow_robot, \
                                            velocities_from_egomotion_robot, \
                                            threshold, step=step)

            nonzero_elements = egomotion_filtered_flow[np.nonzero(\
                                        egomotion_filtered_flow > 0)]
            nonzero_indices = np.where(egomotion_filtered_flow != 0)[0]

            filtered_to_flow = emf.filtered_flow_2d(egomotion_filtered_flow, \
                                        flow, step=step)
            ri.show_imgs('Optical flow filtered', \
                            emf.draw_flow(gray, filtered_to_flow, step=step), 1)

            full_len = deproject_flow.shape[0]*deproject_flow.shape[1]

            deproject_flow_flat = deproject_flow.reshape(full_len,3)
            three_d_flow_x = np.squeeze(diff_flow[:,:,0].reshape(full_len,1))
            three_d_flow_x = three_d_flow_x

            #######################################################
            # Prepare data for network

            use_filtered_flow = True

            if (use_filtered_flow):
                flow_for_nn=filtered_to_flow[:,int((iw-ih)/2):int((iw+ih)/2)]
            else:
                flow_for_nn=flow[:,int((iw-ih)/2):int((iw+ih)/2)]

            flow_for_nn = cv.resize(flow_for_nn, (299,299))

            gray_resized=gray[:,int((iw-ih)/2):int((iw+ih)/2)]
            gray_resized = cv.resize(gray_resized, (299,299))

            #######################################################
            # Obstacle detection

            conc_img = utils.form_network_input(gray_resized, flow_for_nn)

            # Make predictions with the network
            prediction,mask = net.predict(conc_img)

            # Visualization
            viz = cv.resize(np.reshape(mask,(30,30)), (299,299))
            mask_small = np.reshape(mask,(30,30))
            #mask_small = np.zeros((30,30)) # for validation
            viz_2 = cv.resize(np.reshape(prediction,(30,30)), (299,299))
            utils.visualize_flow(viz)
            utils.visualize_flow(viz_2, name='pred')
            utils.visualize_flow(gray_resized, name='imgs')


            #######################################################
            # Bounding box

            mask_small_blob = mask_small.astype(np.uint8)
            mask_small_blob = emf.find_largest_blob(mask_small_blob)
            utils.visualize_flow(np.array(cv.resize(mask_small_blob, (299,299)), dtype=np.float32), name='mask_small_blob')
            blob_max = mask_small_blob.reshape((mask_small_blob.shape[0]*mask_small_blob.shape[1])).max(axis=0)

            if (blob_max > 0):
                bh, bw, bd = deprojected_coordinates_robot.shape
                deprojected_coordinates_robot_small=deprojected_coordinates_robot[:,int((bw-bh)/2):int((bw+bh)/2),:]
                bb = emf.get_3D_bounding_box(deprojected_coordinates_robot_small, mask_small_blob)
                
                blob_avg_coords = emf.avg_coords(deprojected_coordinates_robot_small, mask_small_blob)
                blob_width = emf.max_blob_width(deprojected_coordinates_robot_small, mask_small_blob, depth_frame_aligned, blob_avg_coords, step = step)
                blob_send = [blob_avg_coords, blob_width]
                print("Bounding box:\n({}\t{}\t{})\n({}\t{}\t{})".format( \
                        bb.get('x1'), bb.get('y1'), bb.get('z1'), \
                        bb.get('x2'), bb.get('y2'), bb.get('z2')))
            else:
                bb = {'x1': math.nan, 'y1': math.nan, 'z1': math.nan,\
                      'x2': math.nan, 'y2': math.nan, 'z2': math.nan}
                blob_send = [0, 0, 0, 0]  
            


            # Display point cloud
            emf.show_pointcloud(v, deproject_flow_flat, three_d_flow_x)

            # Calculate props of the moving object
            eh, ew, ed = egomotion_filtered_flow.shape
            egomotion_filtered_flow_masked=egomotion_filtered_flow[:,\
                                            int((ew-eh)/2):int((ew+eh)/2),:]

            # Mask result on point velocities
            egomotion_filtered_flow_masked = np.multiply(egomotion_filtered_flow_masked,\
                                np.stack((mask_small, mask_small, mask_small), axis=2))

            velocity_mean_nonzero_elements, velocity_std_nonzero_elements = \
                            emf.calc_mean_velocity(egomotion_filtered_flow_masked)
            print("Result velocity mean:\t{} [m/s]"\
                    .format(velocity_mean_nonzero_elements))
            print("Result velocity std:\t{} [m/s]"\
                    .format(velocity_std_nonzero_elements))

            deproject_flow_masked=deproject_flow[:,\
                                    int((ew-eh)/2):int((ew+eh)/2),:]

            # Mask result on point positions
            deproject_flow_masked = np.multiply(deproject_flow_masked,\
                        np.stack((mask_small, mask_small, mask_small), axis=2))
            depth_mean_z, depth_std_z = \
                emf.calc_mean_depth(egomotion_filtered_flow_masked, \
                                        deproject_flow_masked)
            print("Result depth mean:\t{} [m]".format(depth_mean_z))
            print("Result depth std:\t{} [m]".format(depth_std_z))



            #######################################################
            # ZMQ publish blob
            blob_msg_full = [blob_send, velocity_mean_nonzero_elements]
            socket_p.send_pyobj(blob_msg_full)

            #######################################################

            # Because of optical flow we have to change the images
            gray_prev = gray

            # Change the points as well
            deproject_flow_prev = deproject_flow
            depth_frame_prev_aligned = depth_frame_aligned

            print("")


        print("Executed succesfully.")


    except (KeyboardInterrupt, SystemExit):
        print("Program exited.")

    finally:
        # Stop streaming
        pipeline_1.stop()