Ejemplo n.º 1
0
def main():

    
    # Open TCP connection to robot
    client = TCPClient()

    # Startup realsense pipeline
    pipeline = rs.pipeline()

    #Create a config and configure the pipeline to stream
    #  different resolutions of color and depth streams
    
    config = rs.config()
    config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 60)
    config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 60)

    # Start streaming
    profile = pipeline.start(config)

    # Getting the depth sensor's depth scale (see rs-align example for explanation)
    depth_sensor = profile.get_device().first_depth_sensor()
    depth_scale = depth_sensor.get_depth_scale()
    print("Depth Scale is: " , depth_scale)

    # Create an align object
    # rs.align allows us to perform alignment of depth frames to others frames
    # The "align_to" is the stream type to which we plan to align depth frames.
    align_to = rs.stream.color
    align = rs.align(align_to)

    # Declare depth filters
    dec_filter = rs.decimation_filter()  # Decimation - reduces depth frame density
    spat_filter = rs.spatial_filter()  # Spatial    - edge-preserving spatial smoothing
    hole_filling = rs.hole_filling_filter()
    temp_filter = rs.temporal_filter()  # Temporal   - reduces temporal noise

    depth_to_disparity = rs.disparity_transform(True)
    disparity_to_depth = rs.disparity_transform(False)

    # initialize variables for trajectory calculation
    buffer_len = 30 #number of points that will be taken into account for trajectory calculation
    pts = deque(maxlen=buffer_len)
    camera_pts = deque(maxlen=buffer_len)
    time_vec = deque(maxlen=buffer_len)
    tic = time.time()
    tic_frame = None
    none_count = 0

    preditcion_store = []
    points_store = []
    catch_point = []

    # loop for video
    while True:
        # Wait for frames from realsense
        frames = pipeline.wait_for_frames()
        # Align the depth frame to color frame
        aligned_frames = align.process(frames)

        # Get aligned frames
        aligned_depth_frame = aligned_frames.get_depth_frame() # aligned_depth_frame is a 640x480 depth image
        color_frame = aligned_frames.get_color_frame()

        # Filter aligned depth frame
        #aligned_depth_frame = dec_filter.process(aligned_depth_frame)
        aligned_depth_frame = depth_to_disparity.process(aligned_depth_frame)
        aligned_depth_frame = spat_filter.process(aligned_depth_frame)
        aligned_depth_frame = temp_filter.process(aligned_depth_frame)
        aligned_depth_frame = disparity_to_depth.process(aligned_depth_frame)
        aligned_depth_frame = hole_filling.process(aligned_depth_frame)

        # Get images to work on
        depth_image = np.asanyarray(aligned_depth_frame.get_data())
        color_image = np.asanyarray(color_frame.get_data())

        # Validate that both frames are valid
        if not aligned_depth_frame or not color_frame:
            print('Frame is none')
            continue

        depth_image = np.asanyarray(aligned_depth_frame.get_data())
        color_image = np.asanyarray(color_frame.get_data())
        
        # Detect the orange ball and return image with results
        center,radius = bd.detect_ball(color_image)

        # update the points queue
        if center is not None :
            
            #get depth from depth_image and append to center, append the current center to the points list
            depth = depth_image[center[1],center[0]]
            if not tic_frame:
                tic_frame = time.time()
            center.append(depth)
            camera_pts.append(center)

            # Transform point from camera coordinates to robot coordinate frame
            center_world = bte.transform_to_world(center)
            pts.append(center_world)
            points_store.append(center_world)

            #append current time to time vector
            toc = time.time()
            time_vec.append(toc-tic)

            

        else:
            none_count = none_count+1


        #if no points were detected for some time (10 frames), reset the point vector and polynomial calculation
        if none_count >10:
            pts.clear()
            camera_pts.clear()
            time_vec.clear()
            none_count = 0

        # if more then x ball positions were detected, calculate the trajectory estimation
        if(len(pts) > 7):
            toce = time.time()
            params_x,params_y,params_z = bte.estimate_trajectory(np.asarray(pts), np.asarray(time_vec))

            catch_point = cpc.get_catching_point(params_x,params_y,params_z)


            #Send catching point to robot
            if catch_point is not None:
                client.send_message(np.round(catch_point,2))
                print("Processing time:",(time.time()-toce))
                print("Sent point: ",np.round(catch_point,2))
                catch_point_camera = bte.transform_to_camera(catch_point)
                cv2.drawMarker(color_image, tuple(catch_point_camera.astype(int)[:2]), (0, 255, 0) ,cv2.MARKER_CROSS,10)

            # calculate future points for ball from the estimated polynomial parameters and draw them
            print("Tic frame: ", tic_frame)
            print("Time now: ", time.time)
            future_points = bte.get_future_points_3D(params_x,params_y,params_z,tic,time.time(),2)
            
            for point in future_points.transpose():
                preditcion_store.append(point)
                camera_point = bte.transform_to_camera(point)
                cv2.drawMarker(color_image, tuple(camera_point.astype(int)[:2]), (255, 0, 0) ,cv2.MARKER_CROSS,5)


            # loop over the set of tracked points to draw the balls past movement
            print("cam points: ", camera_pts)
            for i in range(1, len(camera_pts)):

                # compute the thickness of the line and
                # draw the connecting lines
                thickness = int(np.sqrt(buffer_len / float(i + 1)) * 2.5)
                cv2.drawMarker(color_image, (camera_pts[i][0],camera_pts[i][1]), (0, 0, 255), cv2.MARKER_CROSS,10)
        
            break
        # Display results
        cv2.imshow("Result image", color_image)
        out.write(color_image)  # uncomment to save video
        key = cv2.waitKey(1) & 0xFF

        # if the 'q' key is pressed, stop the loop
        if key == ord("q"):
            break

    cv2.imshow("Result image", color_image)

    del points_store[0]    
    points_store = np.asarray(points_store)
    preditcion_store = np.asarray(preditcion_store)
    print("Catching point: ", catch_point)
    print("points: ", points_store)
    print('first: ', points_store[:,0])
    print('prediction: ',preditcion_store)
    
    visualization(points_store[:,0],points_store[:,1],points_store[:,2],preditcion_store[:,0],preditcion_store[:,1] , preditcion_store[:,2],catch_point)
    # close all windows
    cv2.destroyAllWindows()
   
    client.close()
Ejemplo n.º 2
0
import Constants


def read_cmd_code():
    try:
        return int(input('Execute: '))
    except ValueError:
        print("ERROR: Not a number")
        print("Example: 1")
        return read_cmd_code()


if __name__ == '__main__':
    client = TCPClient("localhost", 5001)
    if client.fail:
        print("Contolla il server")
        exit(1)
    cmd = ""
    while True:
        print("Available actions")
        cn = 1
        for cmd in Constants.actions:
            print(cn.__str__() + " - " + cmd)
            cn += 1
        cmd = read_cmd_code() - 1
        client.send_message(Constants.actions[cmd])
        if Constants.actions[cmd] == "exit":
            break
    client.close_socket()
    exit(0)