Пример #1
0
def main(dt, dt_ros):
    # import pkl file
    rospack = rospkg.RosPack()
    file_name = os.path.join(rospack.get_path("path_follower"), "scripts",
                             "waypoint_loader", 'output_0_768.avi.pkl')

    with open(file_name, 'rb') as f:
        data = pickle.load(f)

    rospy.init_node('toy_planner_waypoints', anonymous=True)
    pub = rospy.Publisher('waypoints', Waypoints, queue_size=1)
    rate = rospy.Rate(1 / dt_ros)

    Index = 0
    while (rospy.is_shutdown() != 1):
        waypoints = Waypoints()
        for i in range(8):
            pt = Point2D()
            pt.x = data[Index][i, 0]
            pt.y = data[Index][i, 1]
            waypoints.points.append(pt)
        waypoints.dt = dt
        pub.publish(waypoints)
        Index += 1
        rate.sleep()
Пример #2
0
def main(dt, dt_ros, horizon):
    global X, Y, psi, stateEstimate_mark

    # import track file
    track = Tra('Tra_3', horizon)
    rospy.init_node('toy_planner_waypoints', anonymous=True)
    rospy.Subscriber('state_estimate', state_Dynamic, stateEstimateCallback)
    pub = rospy.Publisher('waypoints', Waypoints, queue_size=1)

    rate = rospy.Rate(1 / dt_ros)

    # first set the horizon to be very large to get where the vehicle is
    track.horizon = horizon
    track.currentIndex = 0
    num_points = 400

    while (rospy.is_shutdown() != 1):
        if stateEstimate_mark == True:
            track.currentIndex, _ = track.searchClosestPt(
                X, Y, track.currentIndex)
            if track.currentIndex + num_points < track.size:
                index_list = np.arange(track.currentIndex + int(dt / 0.01),
                                       track.currentIndex + num_points, 1)
                num_steps = int(
                    floor((track.t[track.currentIndex + num_points] -
                           track.t[track.currentIndex + 1]) / dt) + 1)
                t = np.linspace(track.t[track.currentIndex],
                                track.t[track.currentIndex] + num_steps * dt,
                                num_steps)
                t_sub = track.t[index_list]
                x_sub = track.x[index_list]
                y_sub = track.y[index_list]
                spl_x = UnivariateSpline(t_sub, x_sub, k=3)
                spl_y = UnivariateSpline(t_sub, y_sub, k=3)
                spl_x_val = spl_x(t)
                spl_y_val = spl_y(t)

                waypoints = Waypoints()
                for i in range(1, num_steps):
                    pt = Point2D()
                    pt.x = (spl_x_val[i] - X) * cos(psi) + (spl_y_val[i] -
                                                            Y) * sin(psi)
                    pt.y = -(spl_x_val[i] - X) * sin(psi) + (spl_y_val[i] -
                                                             Y) * cos(psi)
                    waypoints.points.append(pt)
                waypoints.dt = dt
                pub.publish(waypoints)
        rate.sleep()
Пример #3
0
# define the initial states and timestep
ra_to_cg = 1.65
X = 0
Y = 0
vx = 0
vy = 0
psi = 0
X_ref = 0
Y_ref = 0
vx_ref = 0
vy_ref = 0
psi_ref = 0
stateEstimate_mark = True
Waypoints_mark = False
Waypoints_received = Waypoints()
state_received = state_Dynamic()
steering_ratio = 14.8


def stateEstimateCallback(data):
    global X, Y, psi, vx, vy, stateEstimate_mark, state_received
    X = data.X + cos(data.psi) * ra_to_cg
    Y = data.Y + sin(psi) * ra_to_cg
    vx = data.vx
    vy = data.vy
    psi = data.psi
    stateEstimate_mark = True
    state_received = data

Пример #4
0
def on_image_received(data):
    # this would directly receive the raw image from the driver
    if vehicle_real_speed_kmh is None:
        return

    global count_middle
    count_middle += 1
    if count_middle % IMAGE_DOWNSAMPLE_MAIN != 0:
        return

    time0 = time.time()
    global bridge, driving_model, vis_pub_full, controller, waypoint_pub, parent_conn, dbw_enable, vis_pub_full_enabled

    img = bridge.imgmsg_to_cv2(data, "bgr8")
    # deliberately not resizing the image, since it might be zoomed later
    #img = cv2.resize(img, (IM_WIDTH, IM_HEIGHT))
    # flip because the camera is flipped
    img = img[::-1, ::-1, :]

    if use_left_right:
        if left_cache is None or right_cache is None:
            return
        left_lock.acquire()
        right_lock.acquire()
        sensors = [left_cache, img, right_cache]
        left_lock.release()
        right_lock.release()

    else:
        sensors = [img]
    t00 = time.time()

    '''
    print("before sending out the images")
    parent_conn.send([sensors, vehicle_real_speed_kmh, direction])
    print("before receving")
    control_dict, vis = parent_conn.recv()
    control = lambda : None
    control.steer = control_dict["steer"]
    control.throttle = control_dict["throttle"]
    control.brake = control_dict["brake"]
    print("after receiving")
    '''
    # TODO: fill the model name and current parameters
    exp_id = sys.argv[1]
    global message_from_controller
    extra_extra = exp_id + "\n" + \
                  message_from_controller + \
                  "real_speed={:.2f} m/s \n".format(vehicle_real_speed_kmh/3.6)
    control, vis = driving_model.compute_action(sensors, vehicle_real_speed_kmh, direction,
                                                save_image_to_disk=False, return_vis=True, return_extra=False, extra_extra=extra_extra,
                                                mapping_support={"town_id": "rfs", "pos": vehicle_pos, "ori": vehicle_yaw})
    #print("time for compute action is ", time.time() - t00)
    #control, vis = driving_model.compute_action(sensors, 0.0, direction, save_image_to_disk=False, return_vis=True)
    global use_waypoint

    if use_waypoint:
        # shift the waypoint forward by the computation time
        time_passed = time.time() - time0
        time_list = np.array([0.2*(i+1) for i in range(control.shape[0])])
        time_list -= time_passed # this typically less than 200ms
        desired_time = np.array([0.2*(i+1) for i in range(control.shape[0]-1)])

        wp0 = np.interp(desired_time, time_list, control[:, 0])
        wp1 = np.interp(desired_time, time_list, control[:, 1])
        control = np.stack([wp0, wp1], axis=1)

        # TODO: have a manner to control the safety speed
        waypoints = Waypoints()
        for i in range(1, control.shape[0]):
            pt = Point2D()
            pt.x = control[i, 0]
            pt.y = -control[i, 1]
            waypoints.points.append(pt)
        waypoints.dt = 0.2
        waypoint_pub.publish(waypoints)

    else:
        v = Vector3()
        v.x = control.steer
        v.y = control.throttle
        v.z = control.brake
        global raw_control_pub
        if raw_control_pub != None:
            raw_control_pub.publish(v)

    #cv2.imshow("Cameras", vis)
    #cv2.waitKey(5)

    msg = bridge.cv2_to_imgmsg(vis, "rgb8")
    vis_pub_full.publish(msg)
    if dbw_enable:
        print("enabling, republishing the message to a second topic")
        vis_pub_full_enabled.publish(msg)

    time_now = time.time()
    global last_computation
    print ("total time for on image receive is ", time.time()-time0, ". Runs at ", 1.0 / (time_now - last_computation), "Hz")
    last_computation = time.time()