def _adapt_observation_for_baseline(self, state):
        # Get basic information about the ego vehicle.
        ego_position = self.extract_ego_position(state)
        ego_heading = self.extract_ego_heading(state)
        ego_speed = self.extract_ego_speed(state)
        ego_steering = self.extract_ego_steering(state)
        ego_start = self.extract_ego_start(state)
        ego_goal = self.extract_ego_goal(state)
        ego_waypoints = self.extract_ego_waypoints(state)
        social_vehicle_states = self.extract_social_vehicles(state)

        # Identify the path the ego is following.
        ego_goal_path = self.extract_ego_goal_path(
            ego_goal=ego_goal,
            ego_waypoints=ego_waypoints,
            ego_start=ego_start,
        )

        # Get the closest waypoint to the ego.
        ego_closest_waypoint, _ = self.extract_closest_waypoint(
            ego_goal_path=ego_goal_path,
            ego_position=ego_position,
            ego_heading=ego_heading,
        )

        # Calculate the ego's distance from the center of the lane.
        signed_distance_from_center = ego_closest_waypoint.signed_lateral_error(
            ego_position)
        lane_width = ego_closest_waypoint.lane_width * 0.5
        ego_distance_from_center = signed_distance_from_center / lane_width

        # Calculate the ego's relative, rotated position from the goal.
        ego_relative_rotated_goal_position = rotate2d_vector(
            np.asarray(ego_goal.position[0:2]) - np.asarray(ego_position[0:2]),
            -ego_heading,
        )

        basic_state = dict(
            speed=ego_speed,
            relative_goal_position=ego_relative_rotated_goal_position,
            distance_from_center=ego_distance_from_center,
            steering=ego_steering,
            angle_error=ego_closest_waypoint.relative_heading(ego_heading),
            social_vehicles=social_vehicle_states,
            road_speed=ego_closest_waypoint.speed_limit,
            start=ego_start.position,
            goal=ego_goal.position,
            heading=ego_heading,
            goal_path=ego_goal_path,
            ego_position=ego_position,
            waypoint_paths=ego_waypoints,
            events=state.events,
        )
        return basic_state
def extract_social_vehicle_state_default(social_vehicle,
                                         ego_vehicle_pos,
                                         ego_vehicle_heading,
                                         social_vehicle_config=None):
    social_vehicle_position = social_vehicle.position[0:2] - ego_vehicle_pos[
        0:2]
    social_vehicle_position_rotated = rotate2d_vector(social_vehicle_position,
                                                      -ego_vehicle_heading)
    return [
        (social_vehicle_position_rotated[0]) / 100.0,
        (social_vehicle_position_rotated[1]) / 100.0,
        (social_vehicle.heading - ego_vehicle_heading) / 3.14,
        social_vehicle.speed / 30.0,
    ]
def extract_social_vehicle_state_pointnet(social_vehicle, ego_vehicle_pos,
                                          ego_vehicle_heading,
                                          social_vehicle_config):
    speed_norm = 30
    heading_diff = social_vehicle.heading - ego_vehicle_heading
    # vector length is prop to the speed
    heading_vector = np.asarray([np.cos(heading_diff),
                                 np.sin(heading_diff)
                                 ]) * (social_vehicle.speed / speed_norm)
    social_vehicle_position = social_vehicle.position[0:2] - ego_vehicle_pos[
        0:2]
    social_vehicle_position_rotated = rotate2d_vector(social_vehicle_position,
                                                      -ego_vehicle_heading)
    return [
        (social_vehicle_position_rotated[0]) / 100.0,
        (social_vehicle_position_rotated[1]) / 100.0,
        heading_vector[0],
        heading_vector[1],
    ]
def _extract_social_vehicle_state(
    social_vehicle: VehicleObservation,
    ego_position: Tuple[float, float, float],
    ego_heading: Heading,
) -> List[float]:
    absolute_position_difference = social_vehicle.position[:2] - ego_position[:
                                                                              2]
    relative_position_difference = rotate2d_vector(
        absolute_position_difference, -ego_heading)
    absolute_heading_difference = social_vehicle.heading - ego_heading

    # NOTE: The number of elements in this list should equal _FEATURES.
    social_vehicle_state = [
        relative_position_difference[0] / 100.0,
        relative_position_difference[1] / 100.0,
        absolute_heading_difference / 3.14,
        social_vehicle.speed / 30.0,
    ]
    return social_vehicle_state
Example #5
0
    def observation_adapter(self, env_observation):
        ego_state = env_observation.ego_vehicle_state
        start = env_observation.ego_vehicle_state.mission.start
        goal = env_observation.ego_vehicle_state.mission.goal
        path = get_path_to_goal(
            goal=goal, paths=env_observation.waypoint_paths, start=start
        )
        closest_wp, _ = get_closest_waypoint(
            num_lookahead=num_lookahead,
            goal_path=path,
            ego_position=ego_state.position,
            ego_heading=ego_state.heading,
        )
        signed_dist_from_center = closest_wp.signed_lateral_error(ego_state.position)
        lane_width = closest_wp.lane_width * 0.5
        ego_dist_center = signed_dist_from_center / lane_width

        relative_goal_position = np.asarray(goal.position[0:2]) - np.asarray(
            ego_state.position[0:2]
        )
        relative_goal_position_rotated = rotate2d_vector(
            relative_goal_position, -ego_state.heading
        )

        state = dict(
            speed=ego_state.speed,
            relative_goal_position=relative_goal_position_rotated,
            distance_from_center=ego_dist_center,
            steering=ego_state.steering,
            angle_error=closest_wp.relative_heading(ego_state.heading),
            social_vehicles=env_observation.neighborhood_vehicle_states,
            road_speed=closest_wp.speed_limit,
            # ----------
            # dont normalize the following,
            start=start.position,
            goal=goal.position,
            heading=ego_state.heading,
            goal_path=path,
            ego_position=ego_state.position,
            waypoint_paths=env_observation.waypoint_paths,
            events=env_observation.events,
        )
        return state  # ego=ego, env_observation=env_observation)
def adapt(observation: Observation) -> Dict[str, np.ndarray]:
    """Adapts a raw environment observation into a dictionary of numpy.ndarrays.

    The raw observation from the environment must include the ego vehicle's state,
    events, waypoint paths, and neighborhood vehicles. See smarts.core.sensors for more
    information on the Observation type.

    Args:
        observation (Observation): The raw environment observation received from SMARTS.

    Returns:
        dict: A dictionary with two keys, "low_dim_states" and "social_vehicles". The
            value of "low_dim_states" is a numpy.ndarray with shape (_SIZE,), and the
            value of "social_vehicles" is a numpy.ndarray with shape
            (_CAPACITY, _FEATURES).
    """
    observation = copy.deepcopy(observation)
    ego_position = observation.ego_vehicle_state.position
    ego_heading = observation.ego_vehicle_state.heading
    ego_speed = observation.ego_vehicle_state.speed
    ego_steering = observation.ego_vehicle_state.steering
    ego_start = observation.ego_vehicle_state.mission.start
    ego_goal = observation.ego_vehicle_state.mission.goal
    ego_waypoints = observation.waypoint_paths

    ego_goal_path = get_path_to_goal(goal=ego_goal,
                                     paths=ego_waypoints,
                                     start=ego_start)
    ego_closest_waypoint, ego_lookahead_waypoints = get_closest_waypoint(
        num_lookahead=_WAYPOINTS,
        goal_path=ego_goal_path,
        ego_position=ego_position,
        ego_heading=ego_heading,
    )
    ego_lookahead_waypoints = np.hstack(ego_lookahead_waypoints)

    signed_distance_from_center = ego_closest_waypoint.signed_lateral_error(
        ego_position)
    lane_width = ego_closest_waypoint.lane_width * 0.5
    ego_distance_from_center = signed_distance_from_center / lane_width

    ego_relative_rotated_goal_position = rotate2d_vector(
        np.asarray(ego_goal.position[0:2]) - np.asarray(ego_position[0:2]),
        -ego_heading,
    )

    observation_dict = dict(
        speed=ego_speed,
        relative_goal_position=ego_relative_rotated_goal_position,
        distance_from_center=ego_distance_from_center,
        steering=ego_steering,
        angle_error=ego_closest_waypoint.relative_heading(ego_heading),
        road_speed=ego_closest_waypoint.speed_limit,
        start=ego_start.position,
        goal=ego_goal.position,
        heading=ego_heading,
        goal_path=ego_goal_path,
        ego_position=ego_position,
        waypoint_paths=ego_waypoints,
        events=observation.events,
        waypoints_lookahead=ego_lookahead_waypoints,
    )
    normalized_observation = [
        _normalize(key, observation_dict[key])
        for key in _NORMALIZATION_VALUES.keys()
    ]
    low_dim_states = np.concatenate(
        [
            value if isinstance(value, collections.abc.Iterable) else
            np.asarray([value]) for value in normalized_observation
        ],
        axis=-1,
    )

    # Adapt the social vehicles.
    social_vehicles = observation.neighborhood_vehicle_states

    if len(social_vehicles) == 0:
        # There are no social vehicles. Create an empty array with the correct
        # number of features so it can be padded.
        social_vehicles = np.empty((0, _FEATURES))
    else:
        # Sort by distance to the ego vehicle.
        social_vehicles.sort(
            key=lambda vehicle: _get_distance(vehicle, ego_position),
            reverse=False,
        )
        # Extract the state of each social vehicle.
        social_vehicles = np.asarray([
            _extract_social_vehicle_state(
                social_vehicle=social_vehicle,
                ego_position=ego_position,
                ego_heading=ego_heading,
            ) for social_vehicle in social_vehicles
        ])

    if len(social_vehicles) < _CAPACITY:
        # Pad with zero vectors if we don't have enough social vehicles.
        remain = _CAPACITY - len(social_vehicles)
        empty_social_vehicles = np.zeros(shape=(remain, _FEATURES))
        social_vehicles = np.concatenate(
            (social_vehicles, empty_social_vehicles))
    elif len(social_vehicles) > _CAPACITY:
        # Remove extra social vehicles if there were too many in the observation.
        social_vehicles = social_vehicles[:_CAPACITY]

    vector_observation = {
        "low_dim_states": low_dim_states.astype(np.float32),
        "social_vehicles": social_vehicles.astype(np.float32),
    }
    return vector_observation