Exemplo n.º 1
0
 def _generate_turn_in_place_trajectory(v, angle_to_turn_to, direction):
     """
     This function do planning trajectory rollout given a set of parameters (v, angle_to_turn_to, direction)
     :param v: commanding linear velocity for the in place turn
     :param angle_to_turn_to: end of turn angle
     :param direction: turning direction, i.e. clockwise or counterclockwise
     :return: robot's pose, state, control input in time series with turn_full_circle_steps steps
     """
     initial_pose = [0., 0., 0.]  # (x, y, theta)
     initial_state = [None, max_v,
                      0.]  # (wheel_angle, linear_velocity, ang_velocity)
     initial_control = [0., 0]  # (linear_velocity, angular_velocity)
     # control policy function handler: (control_generator, n_steps_to_apply_this_generator)
     # in this case, list below contains only 1 control policy with turn_full_circle_steps steps
     list_of_policies = [
         (lambda pose, state, controls: _turn_in_place_policy(
             pose,
             state,
             controls,
             v=v,
             direction=direction,
             angle_to_turn_to=angle_to_turn_to), turn_full_circle_steps)
     ]
     return statefull_branching(initial_pose, initial_state,
                                initial_control, list_of_policies,
                                forward_model, refine_dt)
Exemplo n.º 2
0
def control_choices_tricycle_exhaustive(
        forward_model,
        wheel_angle, max_v,
        exhausitve_dt,
        refine_dt,
        n_steps,
        theta_samples,
        v_samples,
        extra_copy_n_steps,
        max_front_wheel_angle,
        max_front_wheel_speed):
    """
    Generates all possible control policies for tricycle drive given initial wheel_angle that
    can be reached in n_steps of dt length.
    """

    v_choices = np.linspace(max_v*0.5, max_v, v_samples + 1)[1:]

    exhaustive_policy = lambda pose, state, controls: tricycle_branching_policy(
        pose,
        state,
        controls,
        # this is not strictly correct because it is legally supposed to be refine_dt.
        # However it does work well in practice with real tricycles.
        # Effectively it makes tricycle think that it can turn the front wheel 7 times faster than it can.
        # Probably in real world tricycle has so much inertia that makes it move slowly but front wheel doesn't
        # have much inertia so effectively you move slower but turn wheel the same.
        exhausitve_dt,
        theta_samples=theta_samples,
        v_choices=v_choices,
        max_front_wheel_angle=max_front_wheel_angle,
        front_wheel_speed=max_front_wheel_speed
    )

    # policies = [(exhaustive_policy, 1), (copy_control_policy, (int(exhausitve_dt/refine_dt)-1))]*n_steps
    # if extra_copy_n_steps > 0:
    #     # add extra continuation to the policy to extend the horizon without switching point
    #     policies += [(copy_control_policy, extra_copy_n_steps)]
    policies = [(exhaustive_policy, 1), (copy_control_policy, extra_copy_n_steps)]

    pose_evolution, state_evolution, control_evolution = statefull_branching(
        initial_pose = [0., 0., 0.],
        initial_state= [wheel_angle, max_v, 0.],
        initial_control = [0., 0],
        list_of_policies=policies,
        forward_model=forward_model,
        dt = refine_dt
    )

    return pose_evolution, state_evolution, control_evolution, refine_dt
Exemplo n.º 3
0
def control_choices_diff_drive_exhaustive(max_v,
                                          max_w,
                                          forward_model,
                                          initial_state,
                                          exhausitve_dt=0.4,
                                          refine_dt=0.1,
                                          n_steps=2,
                                          w_samples_in_each_direction=15,
                                          enable_turn_in_place=True,
                                          v_samples=4):
    """
    Exhaustive set of diff drive control policies given max_v and max_w for few steps
    We apply exhaustive search every exhausitve_dt and then copy the state for the rest of the steps (with refine_dt)
    :param max_v: e.g. 1
    :param max_w: e.g. 1
    :param forward_model: A function of the form:
        next_pose, next_state = forward_model(pose, initial_states, dt, control_signals)
    :param initial_state: vector of the initial state for forward model
    :param v_samples: number of velocity samples
    :return:
    """
    exhaustive_policy = lambda pose, state, control: gen_vw_step_choices(
        min_v=0.1 * max_v,
        max_v=max_v,
        max_w=max_w,
        v_samples=v_samples,
        w_samples_in_each_direction=w_samples_in_each_direction,
        enable_turn_in_place=enable_turn_in_place)

    pose_evolution, state_evolution, control_evolution = statefull_branching(
        initial_pose=[0., 0., 0.],  # (x, y, theta)
        initial_state=initial_state,
        initial_control=[0., 0],  # (v, w)
        list_of_policies=[(exhaustive_policy, 1),
                          (copy_control_policy,
                           (int(exhausitve_dt / refine_dt) - 1))] * n_steps,
        forward_model=forward_model,
        dt=refine_dt)

    logging.info(
        'DWA diff drive exhaustive control generated %d trajectories (%d steps)'
        % (state_evolution.shape[0], n_steps))

    control_costs = np.zeros(control_evolution.shape[0], dtype=float)
    return pose_evolution, state_evolution, control_evolution, refine_dt, control_costs
Exemplo n.º 4
0
def control_choices_tricycle_recovery_aggressive(
        forward_model, wheel_angle, max_v,
        max_front_wheel_angle, max_front_wheel_speed,
        scale_steering_speed=False):
    '''
    forward_model: a function getting robot pose and state, dt and controls and returning
        the new pose and state
    This analogous to control_choices_gtx_exhaustive, but parameters are to mimic original dwa
    that doesn't have switch points and was used for demo
    '''
    max_v = 0.33*max_v
    # coefficient that scales the range of choices of front wheel angle at the beginning of  the first arc
    exhausitve_dt = 0.3
    # coefficient that scales the range of choices of front wheel angle for the second arc
    exhausitve_dt_2 = 0.75

    refine_dt = 0.1
    # resolution of choices of front wheel angle for the first arc
    theta_samples = 181
    # resolution of choices of front wheel angle for the second arc
    theta_samples_2 = 11

    v_samples = 2
    first_stage_steps = 8
    match_distance_steps = 8
    extra_copy_n_steps = 0

    v_choices = np.linspace(0., max_v, v_samples + 1)[1:]
    if scale_steering_speed:
        front_wheel_speed_coefficients = np.linspace(0., 1., v_samples + 1)[1:][::-1]
    else:
        front_wheel_speed_coefficients = None

    exhaustive_policy = lambda pose, state, control: tricycle_branching_policy(
        pose,
        state,
        control,
        exhausitve_dt,
        theta_samples=theta_samples,
        v_choices=v_choices,
        max_front_wheel_angle=max_front_wheel_angle,
        front_wheel_speed=max_front_wheel_speed,
        front_wheel_speed_coefficients=front_wheel_speed_coefficients)

    exhaustive_policy_2 = lambda pose, state, control: tricycle_branching_policy(
        pose,
        state,
        control,
        exhausitve_dt_2,
        theta_samples=theta_samples_2,
        v_choices=v_choices,
        max_front_wheel_angle=max_front_wheel_angle,
        front_wheel_speed=max_front_wheel_speed,
        front_wheel_speed_coefficients=front_wheel_speed_coefficients)

    policies = [(exhaustive_policy, 1), (copy_control_policy, first_stage_steps-1),
                (exhaustive_policy_2, 1), (copy_control_policy, match_distance_steps-1)
                ]

    if extra_copy_n_steps > 0:
        policies += [(copy_control_policy, extra_copy_n_steps)]

    pose_evolution, state_evolution, control_evolution = statefull_branching(
        [0., 0., 0.], [wheel_angle, max_v, 0.], [0., 0.], policies, forward_model, refine_dt)

    return pose_evolution, state_evolution, control_evolution, refine_dt
Exemplo n.º 5
0
def control_choices_tricycle_constant_distance_2(
        forward_model, wheel_angle, max_v,
        max_front_wheel_angle, max_front_wheel_speed,
        scale_steering_speed=False):
    '''
    forward_model: a function getting robot pose and state, dt and controls and returning
        the new pose and state
    This analogous to control_choices_gtx_exhaustive, but parameters are to mimic original dwa
    that doesn't have switch points and was used for demo
    '''

    # coefficient that scales the range of choices of front wheel angle at the beginning of  the first arc
    exhausitve_dt = 0.3
    # coefficient that scales the range of choices of front wheel angle for the second arc
    match_distance_exhaustive_dt = 0.75

    refine_dt = 0.1
    # resolution of choices of front wheel angle for the first arc
    theta_samples = 41
    theta_samples_2 = 3
    # resolution of choices of front wheel angle for the second arc
    match_distance_theta_samples = 3

    v_samples = 2
    first_stage_steps = 8
    match_distance_steps = 4
    extra_copy_n_steps = 0

    def _gtx_branching_policy_constant_distance(pose_evolution, state_evolution, control_evolution,
                                                exhaustive_dt, refine_dt, theta_samples,
                                                max_v, steps_to_evolve, front_wheel_speed):
        previous_linear_v = state_evolution[-1, 1]
        desired_distance = max_v*refine_dt*pose_evolution.shape[0]
        # TODO: compute actual distance now
        distance_traveled = previous_linear_v*refine_dt*pose_evolution.shape[0]

        new_v = (desired_distance-distance_traveled)/(steps_to_evolve*refine_dt)

        if new_v < max_v*0.1:
            return copy_control_policy(pose_evolution, state_evolution, control_evolution)
        else:
            matching_v_choices = [0.5*max_v, max_v]

            return tricycle_branching_policy(
                pose_evolution,
                state_evolution,
                control_evolution,
                dt=match_distance_exhaustive_dt,
                theta_samples=match_distance_theta_samples,
                v_choices=matching_v_choices,
                max_front_wheel_angle=max_front_wheel_angle,
                front_wheel_speed=max_front_wheel_speed,
            )

    distance_matching_policy = lambda pose, state, control: _gtx_branching_policy_constant_distance(
        pose,
        state,
        control,
        exhausitve_dt,
        refine_dt,
        theta_samples=theta_samples,
        max_v=max_v,
        steps_to_evolve=match_distance_steps,
        front_wheel_speed=max_front_wheel_speed)

    v_choices = np.linspace(0., max_v, v_samples + 1)[1:]
    if scale_steering_speed:
        front_wheel_speed_coefficients = np.linspace(0., 1., v_samples + 1)[1:][::-1]
    else:
        front_wheel_speed_coefficients = None

    exhaustive_policy = lambda pose, state, control: tricycle_branching_policy(
        pose,
        state,
        control,
        # this is not strictly correct because it is legally supposed to be refine_dt.
        # However it does work well in practice with real GTX.
        # Effectivelly it makes gtx think that it can turn the front wheel 7 times faster than it can.
        # Probably in real world gtx has so much inertia that makes it move slowly but front wheel doesn't
        # have much inertia so effectively you move slower but turn wheel the same.
        exhausitve_dt,
        theta_samples=theta_samples,
        v_choices=v_choices,
        max_front_wheel_angle=max_front_wheel_angle,
        front_wheel_speed=max_front_wheel_speed,
        front_wheel_speed_coefficients=front_wheel_speed_coefficients)

    exhaustive_policy_2 = lambda pose, state, control: tricycle_branching_policy(
        pose,
        state,
        control,
        # this is not strictly correct because it is legally supposed to be refine_dt.
        # However it does work well in practice with real GTX.
        # Effectivelly it makes gtx think that it can turn the front wheel 7 times faster than it can.
        # Probably in real world gtx has so much inertia that makes it move slowly but front wheel doesn't
        # have much inertia so effectively you move slower but turn wheel the same.
        exhausitve_dt,
        theta_samples=theta_samples_2,
        v_choices=v_choices,
        max_front_wheel_angle=max_front_wheel_angle,
        front_wheel_speed=max_front_wheel_speed,
        front_wheel_speed_coefficients=front_wheel_speed_coefficients)

    policies = [(exhaustive_policy, 1), (copy_control_policy, first_stage_steps-1),
                (exhaustive_policy_2, 1), (copy_control_policy, first_stage_steps - 1),
                (distance_matching_policy, 1), (copy_control_policy, match_distance_steps-1)]

    if extra_copy_n_steps > 0:
        policies += [(copy_control_policy, extra_copy_n_steps)]

    pose_evolution, state_evolution, control_evolution = statefull_branching(
        [0., 0., 0.], [wheel_angle, max_v, 0.], [0., 0.], policies, forward_model, refine_dt)

    return pose_evolution, state_evolution, control_evolution, refine_dt
Exemplo n.º 6
0
 def _prepare_rotation(angle_to_turn_to, direction):
     return statefull_branching(
         [0., 0., 0.], [wheel_angle, max_v, 0.], [0., 0],
         [(lambda pose, state, _: _turn_in_place_policy(
             pose, state, direction=direction, angle_to_turn_to=angle_to_turn_to), turn_around_steps)],
         forward_model, refine_dt)
Exemplo n.º 7
0
def control_choices_tricycle_constant_distance(
        forward_model, wheel_angle, max_v,
        max_front_wheel_angle, max_front_wheel_speed,
        scale_steering_speed=False):
    """
    forward_model: a function getting robot pose and state, dt and controls and returning
        the new pose and state
    This analogous to control_choices_tricycle_exhaustive, but parameters are to mimic original dwa
    that doesn't have switch points and was used for demo
    """

    # coefficient that scales the range of choices of front wheel angle at the beginning of  the first arc
    exhausitve_dt = 0.3
    # coefficient that scales the range of choices of front wheel angle for the second arc
    match_distance_exhaustive_dt = 0.75

    refine_dt = 0.1
    # resolution of choices of front wheel angle for the first arc
    theta_samples = 21
    # resolution of choices of front wheel angle for the second arc
    match_distance_theta_samples = 11

    v_samples = 3

    first_stage_steps = 8
    match_distance_steps = 8
    extra_copy_n_steps = 0

    def _tricycle_branching_policy_constant_distance(pose_evolution, state_evolution, control_evolution,
                                                     exhaustive_dt, refine_dt, theta_samples,
                                                     max_v, steps_to_evolve, front_wheel_speed):
        previous_linear_v = state_evolution[-1, 1]
        desired_distance = max_v*refine_dt*pose_evolution.shape[0]
        # TODO: compute actual distance now
        distance_traveled = previous_linear_v*refine_dt*pose_evolution.shape[0]

        new_v = (desired_distance-distance_traveled)/(steps_to_evolve*refine_dt)

        if new_v < max_v*0.1:
            # If you need a small velocity to reach required distance,
            # then you have a little space to maneuver at the need,
            # so we optimize on number of trajectories to save on computation later.
            return copy_control_policy(pose_evolution, state_evolution, control_evolution)
        else:
            # Here we need to cover decent amount of distance. We can cover it with just new_v.
            # But since we are in the beginning of the fan, we want more possibilities to finish the fan.
            # That's why we branch out not only on angles but also on velocities.
            # For example, we can choose new_v and few other velocities (e.g. new_v and max_v)
            # But here we just simply use [0.5*max_v, max_v].
            # 0.5 max_v to have a decent length even if new_v is small.
            matching_v_choices = [0.5*max_v, max_v]

            return tricycle_branching_policy(
                pose_evolution,
                state_evolution,
                control_evolution,
                dt=match_distance_exhaustive_dt,
                theta_samples=match_distance_theta_samples,
                v_choices=matching_v_choices,
                max_front_wheel_angle=max_front_wheel_angle,
                front_wheel_speed=max_front_wheel_speed,
            )

    distance_matching_policy = lambda pose, state, control: _tricycle_branching_policy_constant_distance(
        pose,
        state,
        control,
        exhausitve_dt,
        refine_dt,
        theta_samples=theta_samples,
        max_v=max_v,
        steps_to_evolve=match_distance_steps,
        front_wheel_speed=max_front_wheel_speed)

    v_choices = np.linspace(0., max_v, v_samples + 1)[1:]
    if scale_steering_speed:
        front_wheel_speed_coefficients = np.linspace(0., 1., v_samples + 1)[1:][::-1]
    else:
        front_wheel_speed_coefficients = None

    exhaustive_policy = lambda pose, state, control: tricycle_branching_policy(
        pose,
        state,
        control,
        # this is not strictly correct because it is legally supposed to be refine_dt.
        # However it does work well in practice with real tricycles.
        # Effectively it makes tricycle think that it can turn the front wheel 7 times faster than it can.
        # Probably in real world tricycle has so much inertia that makes it move slowly but front wheel doesn't
        # have much inertia so effectively you move slower but turn wheel the same.
        exhausitve_dt,
        theta_samples=theta_samples,
        v_choices=v_choices,
        max_front_wheel_angle=max_front_wheel_angle,
        front_wheel_speed=max_front_wheel_speed,
        front_wheel_speed_coefficients=front_wheel_speed_coefficients)

    policies = [(exhaustive_policy, 1), (copy_control_policy, first_stage_steps-1),
                (distance_matching_policy, 1), (copy_control_policy, match_distance_steps-1)]

    if extra_copy_n_steps > 0:
        policies += [(copy_control_policy, extra_copy_n_steps)]

    pose_evolution, state_evolution, control_evolution = statefull_branching(
        [0., 0., 0.], [wheel_angle, max_v, 0.], [0., 0.], policies, forward_model, refine_dt)

    return pose_evolution, state_evolution, control_evolution, refine_dt