def update(self, car_pose):
        """
        Update the frustums

        Args:
            car_pose (Pose): Gazebo Pose of the agent
        """
        with self.lock:
            self.frustums = []
            self.cam_poses = []
            self.near_plane_infos = []
            # Retrieve car pose to calculate camera pose.
            car_pos = np.array([car_pose.position.x, car_pose.position.y,
                                car_pose.position.z])
            car_quaternion = [car_pose.orientation.x,
                              car_pose.orientation.y,
                              car_pose.orientation.z,
                              car_pose.orientation.w]
            for camera_offset in self.camera_offsets:
                # Get camera position by applying position offset from the car position.
                cam_pos = car_pos + apply_orientation(car_quaternion, camera_offset)
                # Get camera rotation by applying car rotation and pitch angle of camera.
                _, _, yaw = quaternion_to_euler(x=car_quaternion[0],
                                                y=car_quaternion[1],
                                                z=car_quaternion[2],
                                                w=car_quaternion[3])
                cam_quaternion = np.array(euler_to_quaternion(pitch=self.camera_pitch, yaw=yaw))
                # Calculate frustum with camera position and rotation.
                planes, cam_pose, near_plane_info = self._calculate_frustum_planes(cam_pos=cam_pos,
                                                                                   cam_quaternion=cam_quaternion)
                self.frustums.append(planes)
                self.cam_poses.append(cam_pose)
                self.near_plane_infos.append(near_plane_info)
Ejemplo n.º 2
0
 def _update_temp(self):
     with self.lock:
         self.frustums = []
         self.cam_poses = []
         self.near_plane_infos = []
         car_model_state = self.model_state_client('racecar', '')
         car_pos = np.array([
             car_model_state.pose.position.x,
             car_model_state.pose.position.y,
             car_model_state.pose.position.z
         ])
         car_quaternion = [
             car_model_state.pose.orientation.x,
             car_model_state.pose.orientation.y,
             car_model_state.pose.orientation.z,
             car_model_state.pose.orientation.w
         ]
         for camera_offset in self.camera_offsets:
             cam_pos = car_pos + apply_orientation(car_quaternion,
                                                   camera_offset)
             _, _, yaw = quaternion_to_euler(x=car_quaternion[0],
                                             y=car_quaternion[1],
                                             z=car_quaternion[2],
                                             w=car_quaternion[3])
             cam_quaternion = np.array(
                 euler_to_quaternion(pitch=self.camera_pitch, yaw=yaw))
             planes, cam_pose, near_plane_info = self._calculate_frustum_planes(
                 cam_pos=cam_pos, cam_quaternion=cam_quaternion)
             self.frustums.append(planes)
             self.cam_poses.append(cam_pose)
             self.near_plane_infos.append(near_plane_info)
    def _update(self, car_model_state, delta_time):
        # Calculate target Camera position based on car position
        _, _, car_yaw = quaternion_to_euler(
            x=car_model_state.pose.orientation.x,
            y=car_model_state.pose.orientation.y,
            z=car_model_state.pose.orientation.z,
            w=car_model_state.pose.orientation.w)
        # Linear Interpolate Yaw angle
        car_yaw = utils.lerp_angle_rad(self.last_yaw, car_yaw,
                                       delta_time * self.damping)
        target_cam_quaternion = np.array(
            euler_to_quaternion(pitch=self.look_down_angle_rad, yaw=car_yaw))
        target_camera_location = np.array([car_model_state.pose.position.x,
                                           car_model_state.pose.position.y,
                                           0.0]) + \
                                 apply_orientation(target_cam_quaternion, np.array([-self.cam_dist_offset, 0, 0]))
        target_camera_location_2d = target_camera_location[0:2]

        # Linear interpolate Camera position to target position
        cur_camera_2d_pos = np.array([
            self.last_camera_state.pose.position.x,
            self.last_camera_state.pose.position.y
        ])
        new_cam_pos_2d = utils.lerp(cur_camera_2d_pos,
                                    target_camera_location_2d,
                                    delta_time * self.damping)

        # Calculate camera rotation quaternion based on lookAt yaw
        look_at_yaw = utils.get_angle_between_two_points_2d_rad(
            self.last_camera_state.pose.position,
            car_model_state.pose.position)
        cam_quaternion = euler_to_quaternion(pitch=self.look_down_angle_rad,
                                             yaw=look_at_yaw)

        # Configure Camera Model State
        camera_model_state = ModelState()
        camera_model_state.model_name = self.topic_name
        camera_model_state.pose.position.x = new_cam_pos_2d[0]
        camera_model_state.pose.position.y = new_cam_pos_2d[1]
        camera_model_state.pose.position.z = self.cam_fixed_height
        camera_model_state.pose.orientation.x = cam_quaternion[0]
        camera_model_state.pose.orientation.y = cam_quaternion[1]
        camera_model_state.pose.orientation.z = cam_quaternion[2]
        camera_model_state.pose.orientation.w = cam_quaternion[3]
        self.model_state_client(camera_model_state)

        self.last_camera_state = camera_model_state
        self.last_yaw = car_yaw
Ejemplo n.º 4
0
    def _get_initial_camera_pose(self, car_model_state):
        _, _, car_yaw = quaternion_to_euler(x=car_model_state.pose.orientation.x,
                                            y=car_model_state.pose.orientation.y,
                                            z=car_model_state.pose.orientation.z,
                                            w=car_model_state.pose.orientation.w)
        target_cam_quaternion = np.array(euler_to_quaternion(pitch=self.look_down_angle_rad, yaw=car_yaw))
        target_camera_location = np.array([car_model_state.pose.position.x,
                                           car_model_state.pose.position.y,
                                           0.0]) + \
                                 apply_orientation(target_cam_quaternion, np.array([-self.cam_dist_offset, 0, 0]))
        camera_model_pose = Pose()
        camera_model_pose.position.x = target_camera_location[0]
        camera_model_pose.position.y = target_camera_location[1]
        camera_model_pose.position.z = self.cam_fixed_height
        camera_model_pose.orientation.x = target_cam_quaternion[0]
        camera_model_pose.orientation.y = target_cam_quaternion[1]
        camera_model_pose.orientation.z = target_cam_quaternion[2]
        camera_model_pose.orientation.w = target_cam_quaternion[3]
        self.last_yaw = car_yaw

        return camera_model_pose
Ejemplo n.º 5
0
    def get_object_reward_params(self, racecar_name, model_point, car_pose):
        """Returns a dictionary with object-related reward function params."""
        with self._lock_:
            try:
                object_locations = [[
                    pose.position.x, pose.position.y
                ] for name, pose in self.object_poses.items()
                                    if racecar_name not in name]
                object_poses = [
                    pose for name, pose in self.object_poses.items()
                    if racecar_name not in name
                ]
                if not object_locations:
                    return {}

                # Sort the object locations based on projected distance
                num_objects = len(object_locations)
                object_pdists = [
                    self.center_line.project(Point(p))
                    for p in object_locations
                ]
                object_headings = [0.0] * num_objects
                object_speeds = [0.0] * num_objects
                if self._is_bot_car_:
                    for i, object_pose in enumerate(object_poses):
                        _, _, yaw = quaternion_to_euler(
                            x=object_pose.orientation.x,
                            y=object_pose.orientation.y,
                            z=object_pose.orientation.z,
                            w=object_pose.orientation.w,
                        )
                        object_headings[i] = yaw
                        object_speeds[i] = self._bot_car_speed_

                # Find the prev/next objects
                model_pdist = self.center_line.project(model_point)
                object_order = np.argsort(object_pdists)
                object_pdists_ordered = [
                    object_pdists[i] for i in object_order
                ]
                prev_object_index, next_object_index = find_prev_next(
                    object_pdists_ordered, model_pdist)
                prev_object_index = object_order[prev_object_index]
                next_object_index = object_order[next_object_index]

                # Figure out which one is the closest
                object_points = [
                    Point([object_location[0], object_location[1]])
                    for object_location in object_locations
                ]
                prev_object_point = object_points[prev_object_index]
                next_object_point = object_points[next_object_index]
                prev_object_dist = model_point.distance(prev_object_point)
                next_object_dist = model_point.distance(next_object_point)
                if prev_object_dist < next_object_dist:
                    closest_object_point = prev_object_point
                else:
                    closest_object_point = next_object_point

                # Figure out whether objects is left of center based on direction
                objects_left_of_center = [
                    self._inner_poly_.contains(p) ^ (not self.is_ccw)
                    for p in object_points
                ]

                # Get object distances to centerline
                objects_distance_to_center = [
                    self.center_line.distance(p) for p in object_points
                ]

                # Figure out if the next object is in the camera view
                objects_in_camera = self.get_objects_in_camera_frustums(
                    agent_name=racecar_name, car_pose=car_pose)
                is_next_object_in_camera = any(
                    object_in_camera_idx == next_object_index
                    for object_in_camera_idx, _ in objects_in_camera)

                # Determine if they are in the same lane
                return {
                    RewardParam.CLOSEST_OBJECTS.value[0]:
                    [prev_object_index, next_object_index],
                    RewardParam.OBJECT_LOCATIONS.value[0]:
                    object_locations,
                    RewardParam.OBJECTS_LEFT_OF_CENTER.value[0]:
                    objects_left_of_center,
                    RewardParam.OBJECT_SPEEDS.value[0]:
                    object_speeds,
                    RewardParam.OBJECT_HEADINGS.value[0]:
                    object_headings,
                    RewardParam.OBJECT_CENTER_DISTS.value[0]:
                    objects_distance_to_center,
                    RewardParam.OBJECT_CENTERLINE_PROJECTION_DISTANCES.value[0]:
                    object_pdists,
                    RewardParam.OBJECT_IN_CAMERA.value[0]:
                    is_next_object_in_camera,
                }
            except Exception as ex:
                raise GenericRolloutException(
                    "Unable to get object reward params: {}".format(ex))
Ejemplo n.º 6
0
    def get_object_reward_params(self, racecar_name, model_point, reverse_dir,
                                 car_pose):
        '''Returns a dictionary with object-related reward function params.'''
        with self._lock_:
            try:
                object_locations = [[
                    pose.position.x, pose.position.y
                ] for name, pose in self.object_poses.items()
                                    if racecar_name not in name]
                object_poses = [
                    pose for name, pose in self.object_poses.items()
                    if racecar_name not in name
                ]
                if not object_locations:
                    return {}

                # Sort the object locations based on projected distance
                object_pdists = [
                    self.center_line.project(Point(p))
                    for p in object_locations
                ]
                object_order = np.argsort(object_pdists)
                object_pdists = [object_pdists[i] for i in object_order]
                object_locations = [object_locations[i] for i in object_order]
                object_headings = []
                object_speeds = []
                object_headings = [0.0 for _ in object_order]
                object_speeds = [0.0 for _ in object_order]
                if self._is_bot_car_:
                    for i in object_order:
                        object_pose = object_poses[i]
                        _, _, yaw = quaternion_to_euler(
                            x=object_pose.orientation.x,
                            y=object_pose.orientation.y,
                            z=object_pose.orientation.z,
                            w=object_pose.orientation.w)
                        object_headings[i] = yaw
                        object_speeds[i] = self._bot_car_speed_

                # Find the prev/next objects
                model_pdist = self.center_line.project(model_point)
                prev_object_index, next_object_index = find_prev_next(
                    object_pdists, model_pdist)

                # Figure out which one is the closest
                object_points = [
                    Point([object_location[0], object_location[1]])
                    for object_location in object_locations
                ]
                prev_object_point = object_points[prev_object_index]
                next_object_point = object_points[next_object_index]
                prev_object_dist = model_point.distance(prev_object_point)
                next_object_dist = model_point.distance(next_object_point)
                if prev_object_dist < next_object_dist:
                    closest_object_point = prev_object_point
                else:
                    closest_object_point = next_object_point

                # Figure out whether objects is left of center based on direction
                is_ccw = self._is_ccw_ ^ reverse_dir
                objects_left_of_center = [self._inner_poly_.contains(p) ^ (not is_ccw) \
                                          for p in object_points]

                # Figure out which lane the model is in
                model_nearest_pnts_dict = self.get_nearest_points(model_point)
                model_nearest_dist_dict = self.get_nearest_dist(
                    model_nearest_pnts_dict, model_point)
                model_is_inner = \
                    model_nearest_dist_dict[TrackNearDist.NEAR_DIST_IN.value] < \
                    model_nearest_dist_dict[TrackNearDist.NEAR_DIST_OUT.value]

                # Figure out which lane the object is in
                closest_object_nearest_pnts_dict = self.get_nearest_points(
                    closest_object_point)
                closest_object_nearest_dist_dict = \
                    self.get_nearest_dist(closest_object_nearest_pnts_dict, closest_object_point)
                closest_object_is_inner = \
                    closest_object_nearest_dist_dict[TrackNearDist.NEAR_DIST_IN.value] < \
                    closest_object_nearest_dist_dict[TrackNearDist.NEAR_DIST_OUT.value]
                objects_in_camera = self.get_objects_in_camera_frustums(
                    agent_name=racecar_name,
                    car_pose=car_pose,
                    object_order=object_order)
                is_next_object_in_camera = any(
                    object_in_camera_idx == next_object_index
                    for object_in_camera_idx, _ in objects_in_camera)
                # Determine if they are in the same lane
                return {
                    RewardParam.CLOSEST_OBJECTS.value[0]:
                    [prev_object_index, next_object_index],
                    RewardParam.OBJECT_LOCATIONS.value[0]:
                    object_locations,
                    RewardParam.OBJECTS_LEFT_OF_CENTER.value[0]:
                    objects_left_of_center,
                    RewardParam.OBJECT_SPEEDS.value[0]:
                    object_speeds,
                    RewardParam.OBJECT_HEADINGS.value[0]:
                    object_headings,
                    RewardParam.OBJECT_CENTERLINE_PROJECTION_DISTANCES.value[0]:
                    object_pdists,
                    RewardParam.OBJECT_IN_CAMERA.value[0]:
                    is_next_object_in_camera
                }
            except Exception as ex:
                raise GenericRolloutException(
                    "Unable to get object reward params: {}".format(ex))