예제 #1
0
def test_distance(point_a, point_b, expected):
    """ Test the distance computed between two points is the same as expected"""
    location_a, location_b = Location(*point_a), Location(*point_b)
    assert np.isclose(location_a.distance(location_b), expected), "Distance "
    "between point_a and point_b is not the same as the expected distance."
    assert np.isclose(location_b.distance(location_a), expected), "Distance "
    "between point_b and point_a is not the same as the expected distance."
예제 #2
0
    def _construct_waypoints(self, timestamp, path_x, path_y, speeds, success):
        """
        Convert the optimal frenet path into a waypoints message.
        """
        path_transforms = []
        target_speeds = []
        if not success:
            self._logger.debug("@{}: Frenet Optimal Trajectory failed. "
                               "Sending emergency stop.".format(timestamp))
            for wp in itertools.islice(self._prev_waypoints, 0,
                                       DEFAULT_NUM_WAYPOINTS):
                path_transforms.append(wp)
                target_speeds.append(0)
        else:
            self._logger.debug("@{}: Frenet Optimal Trajectory succeeded."
                               .format(timestamp))
            for point in zip(path_x, path_y, speeds):
                if self._map is not None:
                    p_loc = self._map.get_closest_lane_waypoint(
                        Location(x=point[0], y=point[1], z=0)).location
                else:
                    p_loc = Location(x=point[0], y=point[1], z=0)
                path_transforms.append(
                    Transform(
                        location=Location(x=point[0], y=point[1], z=p_loc.z),
                        rotation=Rotation(),
                    ))
                target_speeds.append(point[2])

        waypoints = deque(path_transforms)
        self._prev_waypoints = waypoints
        return WaypointsMessage(timestamp, waypoints, target_speeds)
예제 #3
0
def test_l1_distance(point_a, point_b, expected):
    """ Test the L1 distance computed between the two points. """
    location_a, location_b = Location(*point_a), Location(*point_b)
    assert np.isclose(location_a.l1_distance(location_b), expected), "L1 "
    "Distance between point_a and point_b is not the same as expected."
    assert np.isclose(location_b.l1_distance(location_a), expected), "L1 "
    "Distance between point_a and point_b is not the same as expected."
예제 #4
0
    def _construct_waypoints(self, timestamp, path_x, path_y, speeds, success):
        """
        Convert the rrt* path into a waypoints message.
        """
        path_transforms = []
        target_speeds = []
        if not success:
            self._logger.error("@{}: RRT* failed. "
                               "Sending emergency stop.".format(timestamp))
            for wp in itertools.islice(self._prev_waypoints, 0,
                                       DEFAULT_NUM_WAYPOINTS):
                path_transforms.append(wp)
                target_speeds.append(0)
        else:
            self._logger.debug("@{}: RRT* succeeded."
                               .format(timestamp))
            for point in zip(path_x, path_y, speeds):
                if self._map is not None:
                    p_loc = self._map.get_closest_lane_waypoint(
                        Location(x=point[0], y=point[1], z=0)).location
                else:
                    # RRT* does not take into account the driveable region
                    # it constructs search space as a top down, minimum bounding rectangle
                    # with padding in each dimension
                    p_loc = Location(x=point[0], y=point[1], z=0)
                path_transforms.append(
                    Transform(
                        location=Location(x=point[0], y=point[1], z=p_loc.z),
                        rotation=Rotation(),
                    ))
                target_speeds.append(point[2])

        waypoints = deque(path_transforms)
        self._prev_waypoints = waypoints
        return WaypointsMessage(timestamp, waypoints, target_speeds)
예제 #5
0
def test_negative_location_from_carla():
    """ Test that Location throws a ValueError if incorrect carla_loc argument
    is passed. """
    DummyType = namedtuple("DummyType", "x, y, z")
    dummy_instance = DummyType(10, 20, 30)
    with pytest.raises(ValueError):
        Location.from_carla_location(dummy_instance)
예제 #6
0
def test_as_numpy_array():
    """ Test the as_carla_location instance method of Location """
    location = Location(x=1, y=2, z=3)
    np_array = location.as_numpy_array()
    assert isinstance(np_array, np.ndarray), "Returned instance is "
    "not of the type np.ndarray"
    assert all(np.isclose(np_array, [1, 2, 3])), "Returned instance x, y, z "
    "values are not the same as the one in location."
예제 #7
0
파일: lane_map.py 프로젝트: ymote/pylot
 def generate_waypoints(self, ego_transform, waypoint, road_option):
     # Keep on driving in the same lane/straight if the next waypoint is
     # far away.
     if (road_option == pylot.utils.RoadOption.STRAIGHT
             or road_option == pylot.utils.RoadOption.LANE_FOLLOW
             or ego_transform.location.distance(
                 waypoint.location.distance) > 20):
         for lane in self.lanes:
             if lane.id == 0:
                 return lane.get_lane_center_transforms()
         # Lane detector didn't find ego's lane. Keep on driving
         # in the same direction.
         output_wps = deque([])
         for distance in range(1, 20):
             wp = (ego_transform *
                   Transform(Location(x=distance), Rotation()))
             output_wps.append(wp)
         return output_wps
     elif road_option == pylot.utils.RoadOption.LEFT:
         output_wps = deque([])
         wp = copy.deepcopy(ego_transform)
         for distance in range(1, 11):
             wp = wp * Transform(Location(x=1), Rotation(yaw=-9))
             output_wps.append(wp)
         return output_wps
     elif road_option == pylot.utils.RoadOption.RIGHT:
         output_wps = deque([])
         wp = copy.deepcopy(ego_transform)
         for distance in range(1, 11):
             wp = wp * Transform(Location(x=1), Rotation(yaw=9))
             output_wps.append(wp)
         return output_wps
     elif road_option == pylot.utils.RoadOption.CHANGE_LANE_LEFT:
         for lane in self.lanes:
             if lane.id == -1:
                 return lane.get_lane_center_transforms()
         # Lane detector didn't find left lane.
         output_wps = deque([])
         wp = copy.deepcopy(ego_transform)
         for distance in range(1, 11):
             wp = wp * Transform(Location(x=1), Rotation(yaw=-4))
             output_wps.append(wp)
         return output_wps
     elif road_option == pylot.utils.RoadOption.CHANGE_LANE_RIGHT:
         for lane in self.lanes:
             if lane.id == 1:
                 return lane.get_lane_center_transforms()
         # Lane detector didn't find right lane.
         output_wps = deque([])
         wp = copy.deepcopy(ego_transform)
         for distance in range(1, 11):
             wp = wp * Transform(Location(x=1), Rotation(yaw=4))
             output_wps.append(wp)
         return output_wps
     self._logger.debug('Unexpected road option {}'.format(road_option))
     return deque([ego_transform])
예제 #8
0
def test_as_carla_location():
    """ Test the as_carla_location instance method of Location """
    location = Location(x=1, y=2, z=3)
    carla_location = location.as_carla_location()
    assert isinstance(carla_location, carla.Location), "Returned instance is "
    "not of the type carla.Location"
    assert np.isclose(carla_location.x, location.x), "Returned instance x "
    "value is not the same as the one in location."
    assert np.isclose(carla_location.y, location.y), "Returned instance y "
    "value is not the same as the one in location."
    assert np.isclose(carla_location.z, location.z), "Returned instance z "
    "value is not the same as the one in location."
예제 #9
0
def test_addition(point_a, point_b, expected):
    """ Test the addition of the two locations. """
    location_a, location_b = Location(*point_a), Location(*point_b)
    sum_location = location_a + location_b
    assert isinstance(sum_location, Location), "The sum was not of the type "
    "Location"
    assert np.isclose(expected[0], sum_location.x), "The x value of the sum "
    "was not the same as the expected value."
    assert np.isclose(expected[1], sum_location.y), "The y value of the sum "
    "was not the same as the expected value."
    assert np.isclose(expected[2], sum_location.z), "The z value of the sum "
    "was not the same as the expected value."
예제 #10
0
def test_vector_to_camera_view(location, expected):
    from pylot.drivers.sensor_setup import CameraSetup
    camera_setup = CameraSetup('test_camera',
                               'sensor.camera.rgb',
                               width=1920,
                               height=1080,
                               transform=Transform(Location(), Rotation()))
    location = Location(*location)
    assert all(
        np.isclose(
            location.to_camera_view(
                camera_setup.get_extrinsic_matrix(),
                camera_setup.get_intrinsic_matrix()).as_numpy_array(),
            expected)), "The camera transformation was not as expected."
예제 #11
0
    def get_pixel_location(self, pixel, camera_setup):
        """ Gets the 3D world location from pixel coordinates.

        Args:
            pixel: A pylot.utils.Vector2D denoting pixel coordinates.
            camera_setup: The setup of the camera.
        Returns:
            A pylot.utils.Location of the 3D world location, or None if all the
            point cloud points are behind.
        """
        if len(self.points) == 0:
            return None
        intrinsic_mat = camera_setup.get_intrinsic_matrix()
        # Project our 2D pixel location into 3D space, onto the z=1 plane.
        p3d = np.dot(inv(intrinsic_mat), np.array([[pixel.x], [pixel.y],
                                                   [1.0]]))
        location = self._get_closest_point_in_point_cloud(
            Vector2D(p3d[0], p3d[1]))
        # Normalize our point to have the same depth as our closest point.
        p3d *= np.array([location.z])
        p3d_locations = [
            Location(px, py, pz) for px, py, pz in np.asarray(p3d.transpose())
        ]
        # Convert from camera to unreal coordinates.
        to_world_transform = camera_setup.get_unreal_transform()
        camera_point_cloud = to_world_transform.transform_points(p3d_locations)
        return camera_point_cloud[0]
예제 #12
0
def test_imu_setup_initialization():
    # Set up the required parameters for the initialization.
    name = 'imu_sensor'
    transform = Transform(Location(), Rotation())
    imu_setup = IMUSetup(name, transform)
    assert imu_setup.name == name, "The name in the setup is not the same."
    assert imu_setup.transform == transform, "The transform is not the same."
예제 #13
0
 def from_carla_point_cloud(cls, carla_pc, transform):
     # Transform the raw_data into a point cloud.
     points = np.frombuffer(carla_pc.raw_data, dtype=np.dtype('f4'))
     points = copy.deepcopy(points)
     points = np.reshape(points, (int(points.shape[0] / 3), 3))
     point_cloud = [Location(x, y, z) for x, y, z in np.asarray(points)]
     return cls(point_cloud, transform)
예제 #14
0
 def build_output_waypoints(self, path_x, path_y, speeds):
     wps = deque()
     target_speeds = deque()
     for point in zip(path_x, path_y, speeds):
         if self._map is not None:
             p_loc = self._map.get_closest_lane_waypoint(
                 Location(x=point[0], y=point[1], z=0)).location
         else:
             p_loc = Location(x=point[0], y=point[1], z=0)
         wps.append(
             Transform(
                 location=Location(x=point[0], y=point[1], z=p_loc.z),
                 rotation=Rotation(),
             ))
         target_speeds.append(point[2])
     return Waypoints(wps, target_speeds)
예제 #15
0
파일: point_cloud.py 프로젝트: ymote/pylot
    def get_closest_point_in_point_cloud(fwd_points,
                                         pixel,
                                         normalized: bool = False):
        """Finds the closest point in the point cloud to the given point.

        Args:
            pixel (:py:class:`~pylot.utils.Vector2D`): Camera coordinates.

        Returns:
            :py:class:`~pylot.utils.Location`: Closest point cloud point.
        """
        # Select x and y.
        pc_xy = fwd_points[:, 0:2]
        # Create an array from the x, y coordinates of the point.
        xy = np.array([pixel.x, pixel.y]).transpose()

        # Compute distance
        if normalized:
            # Select z
            pc_z = fwd_points[:, 2]
            # Divize x, y by z
            normalized_pc = pc_xy / pc_z[:, None]
            dist = np.sum((normalized_pc - xy)**2, axis=1)
        else:
            dist = np.sum((pc_xy - xy)**2, axis=1)

        # Select index of the closest point.
        closest_index = np.argmin(dist)

        # Return the closest point.
        return Location(fwd_points[closest_index][0],
                        fwd_points[closest_index][1],
                        fwd_points[closest_index][2])
예제 #16
0
    def get_pixel_location(self, pixel, camera_setup):
        """ Gets the 3D world location from pixel coordinates.

        Args:
            pixel (:py:class:`~pylot.utils.Vector2D`): Pixel coordinates.
            camera_setup (:py:class:`~pylot.drivers.sensor_setup.CameraSetup`):
                The setup of the camera.
        Returns:
            :py:class:`~pylot.utils.Location`: The 3D world location, or None
            if all the point cloud points are behind.
        """
        # Select only points that are in front.
        fwd_points = self.points[np.where(self.points[:, 2] > 0.0)]
        if len(fwd_points) == 0:
            return None
        intrinsic_mat = camera_setup.get_intrinsic_matrix()
        # Project our 2D pixel location into 3D space, onto the z=1 plane.
        p3d = np.dot(inv(intrinsic_mat), np.array([[pixel.x], [pixel.y],
                                                   [1.0]]))
        location = PointCloud.get_closest_point_in_point_cloud(
            fwd_points, Vector2D(p3d[0], p3d[1]))
        # Normalize our point to have the same depth as our closest point.
        p3d *= np.array([location.z])
        p3d = p3d.transpose()
        # Convert from camera to unreal coordinates.
        to_world_transform = camera_setup.get_unreal_transform()
        camera_point_cloud = to_world_transform.transform_points(p3d)[0]
        pixel_location = Location(camera_point_cloud[0], camera_point_cloud[1],
                                  camera_point_cloud[2])
        return pixel_location
예제 #17
0
    def run(self):
        # Read the vehicle ID from the vehicle ID stream.
        vehicle_id_msg = self._vehicle_id_stream.read()
        vehicle_id = vehicle_id_msg.data
        self._logger.debug("@{}: Received Vehicle ID: {}".format(
            vehicle_id_msg.timestamp, vehicle_id))

        # Connect to the world.
        _, world = get_world(self._flags.simulator_host,
                             self._flags.simulator_port,
                             self._flags.simulator_timeout)

        self._vehicle = get_vehicle_handle(world, vehicle_id)

        # Install the collision sensor.
        collision_blueprint = world.get_blueprint_library().find(
            'sensor.other.collision')
        self._logger.debug("Spawning a collision sensor.")
        self._collision_sensor = world.spawn_actor(
            collision_blueprint,
            Transform(Location(), Rotation()).as_simulator_transform(),
            attach_to=self._vehicle)

        # Register the callback on the collision sensor.
        self._collision_sensor.listen(self.process_collision)
예제 #18
0
def test_location_from_carla(x, y, z):
    """ Test that the Location is initialized correctly from a carla.Location
    instance """
    location = Location.from_carla_location(carla.Location(x, y, z))
    assert np.isclose(location.x, x), "X values are not the same."
    assert np.isclose(location.y, y), "Y values are not the same."
    assert np.isclose(location.z, z), "Z values are not the same."
예제 #19
0
def test_camera_unreal_transform(rotation, expected):
    """
    Ensure that the camera space to unreal engine coordinate space conversion
    is correct.

    The camera space is defined as:
        +x to the right, +y to down, +z into the screen.

    The unreal engine coordinate space is defined as:
        +x into the screen, +y to the right, +z to up.
    """
    camera_rotation = Rotation(*rotation)
    camera_setup = CameraSetup(name='camera_setup',
                               camera_type='sensor.camera.rgb',
                               width=1920,
                               height=1080,
                               transform=Transform(Location(),
                                                   camera_rotation))
    transformed_rotation = camera_setup.get_unreal_transform().rotation
    transformed_rotation = [
        transformed_rotation.pitch, transformed_rotation.yaw,
        transformed_rotation.roll
    ]
    assert all(np.isclose(transformed_rotation, expected)), \
            "The unreal transformation does not match the expected transform."
예제 #20
0
def test_lidar_unreal_transform(rotation, expected):
    """
    Ensure that the LIDAR space to unreal engine coordinate space conversion
    is correct.

    The LIDAR space is defined as:
        +x to the right, +y out of the screen, +z is down.

    The unreal engine coordinate space is defined as:
        +x into the screen, +y to the right, +z to up.
    """
    lidar_rotation = Rotation(*rotation)
    lidar_setup = LidarSetup(name='lidar_setup',
                             lidar_type='sensor.lidar.ray_cast',
                             transform=Transform(Location(), lidar_rotation),
                             range=6000.0,
                             rotation_frequency=3000.0,
                             channels=3,
                             upper_fov=90.0,
                             lower_fov=90.0,
                             points_per_second=100)

    transformed_rotation = lidar_setup.get_unreal_transform().rotation
    transformed_rotation = [
        transformed_rotation.pitch, transformed_rotation.yaw,
        transformed_rotation.roll
    ]
    assert all(np.isclose(transformed_rotation, expected)), \
            "The unreal transformation does not match the expected transform."
예제 #21
0
파일: lane.py 프로젝트: erdos-project/pylot
 def get_lane_center_transforms(self):
     if len(self.left_markings) < len(self.right_markings):
         anchor_markings = self.left_markings
         other_markings = self.right_markings
     else:
         anchor_markings = self.right_markings
         other_markings = self.left_markings
     index_other = 0
     center_markings = deque([])
     for transform in anchor_markings:
         dist = transform.location.distance(
             other_markings[index_other].location)
         while (index_other + 1 < len(other_markings)
                and dist > transform.location.distance(
                    other_markings[index_other + 1].location)):
             index_other += 1
             dist = transform.location.distance(
                 other_markings[index_other].location)
         if index_other < len(other_markings):
             other_loc = other_markings[index_other].location
             center_location = Location(
                 (transform.location.x + other_loc.x) / 2.0,
                 (transform.location.y + other_loc.y) / 2.0,
                 (transform.location.z + other_loc.z) / 2.0)
             center_markings.append(Transform(center_location, Rotation()))
     return center_markings
예제 #22
0
    def _postprocess_predictions(self, prediction_array, vehicle_trajectories,
                                 vehicle_ego_transforms):
        # The prediction_array consists of predictions with respect to each
        # vehicle. Transform each predicted trajectory to be in relation to the
        # ego-vehicle, then convert into an ObstaclePrediction.
        obstacle_predictions_list = []
        num_predictions = len(vehicle_trajectories)

        for idx in range(num_predictions):
            cur_prediction = prediction_array[idx]

            obstacle_transform = vehicle_trajectories[idx].obstacle.transform
            predictions = []
            # Because R2P2 only predicts (x,y) coordinates, we assume the
            # vehicle stays at the same height as its last location.
            for t in range(self._flags.prediction_num_future_steps):
                cur_point = vehicle_ego_transforms[idx].transform_points(
                    np.array([[
                        cur_prediction[t][0], cur_prediction[t][1],
                        vehicle_ego_transforms[idx].location.z
                    ]]))[0]
                # R2P2 does not predict vehicle orientation, so we use our
                # estimated orientation of the vehicle at its latest location.
                predictions.append(
                    Transform(location=Location(cur_point[0], cur_point[1],
                                                cur_point[2]),
                              rotation=vehicle_ego_transforms[idx].rotation))

            # Probability; currently a filler value because we are taking
            # just one sample from distribution
            obstacle_predictions_list.append(
                ObstaclePrediction(vehicle_trajectories[idx],
                                   obstacle_transform, 1.0, predictions))
        return obstacle_predictions_list
예제 #23
0
파일: point_cloud.py 프로젝트: ymote/pylot
    def get_pixel_location(self, pixel, camera_setup: CameraSetup):
        """ Gets the 3D world location from pixel coordinates.

        Args:
            pixel (:py:class:`~pylot.utils.Vector2D`): Pixel coordinates.
            camera_setup (:py:class:`~pylot.drivers.sensor_setup.CameraSetup`):
                The setup of the camera with its transform in the world frame
                of reference.

        Returns:
            :py:class:`~pylot.utils.Location`: The 3D world location, or None
            if all the point cloud points are behind.
        """
        # Select only points that are in front.
        # Setting the threshold to 0.1 because super close points cause
        # floating point errors.
        fwd_points = self.points[np.where(self.points[:, 2] > 0.1)]
        if len(fwd_points) == 0:
            return None
        intrinsic_mat = camera_setup.get_intrinsic_matrix()
        # Project our 2D pixel location into 3D space, onto the z=1 plane.
        p3d = np.dot(inv(intrinsic_mat), np.array([[pixel.x], [pixel.y],
                                                   [1.0]]))

        if self._lidar_setup.lidar_type == 'sensor.lidar.ray_cast':
            location = PointCloud.get_closest_point_in_point_cloud(
                fwd_points, Vector2D(p3d[0], p3d[1]), normalized=True)
            # Use the depth from the retrieved location.
            p3d *= np.array([location.z])
            p3d = p3d.transpose()
            # Convert from camera to unreal coordinates if the lidar type is
            # sensor.lidar.ray_cast
            to_world_transform = camera_setup.get_unreal_transform()
            camera_point_cloud = to_world_transform.transform_points(p3d)[0]
            pixel_location = Location(camera_point_cloud[0],
                                      camera_point_cloud[1],
                                      camera_point_cloud[2])
        elif self._lidar_setup.lidar_type == 'velodyne':
            location = PointCloud.get_closest_point_in_point_cloud(
                fwd_points, Vector2D(p3d[0], p3d[1]), normalized=False)
            # Use the depth from the retrieved location.
            p3d[2] = location.z
            p3d = p3d.transpose()
            pixel_location = Location(p3d[0, 0], p3d[0, 1], p3d[0, 2])
        return pixel_location
예제 #24
0
 def _construct_waypoints(self, timestamp, pose_msg, path_x, path_y, speeds, success):
     """
     Convert the rrt* path into a waypoints message.
     """
     path_transforms = []
     target_speeds = deque()
     if not success:
         self._logger.error("@{}: RRT* failed. "
                            "Sending emergency stop.".format(timestamp))
         x = pose_msg.data.transform.location.x
         y = pose_msg.data.transform.location.y
         current_index = 0
         min_dist = np.infty
         for i, wp in enumerate(self._prev_waypoints):
             dist = np.linalg.norm([wp.location.x - x, wp.location.y - y])
             if dist <= min_dist:
                 current_index = i
                 min_dist = dist
         for wp in itertools.islice(
                 self._prev_waypoints, current_index,
                 current_index + self._flags.num_waypoints_ahead):
             path_transforms.append(wp)
             target_speeds.append(0)
         waypoints = deque(path_transforms)
     else:
         self._logger.debug("@{}: RRT* succeeded.".format(timestamp))
         for point in zip(path_x, path_y, speeds):
             if self._map is not None:
                 p_loc = self._map.get_closest_lane_waypoint(
                     Location(x=point[0], y=point[1], z=0)).location
             else:
                 # RRT* does not take into account the driveable region it
                 # constructs search space as a top down, minimum bounding
                 # rectangle with padding in each dimension.
                 p_loc = Location(x=point[0], y=point[1], z=0)
             path_transforms.append(
                 Transform(
                     location=Location(x=point[0], y=point[1], z=p_loc.z),
                     rotation=Rotation(),
                 ))
             target_speeds.append(point[2])
         waypoints = deque(path_transforms)
         self._prev_waypoints = waypoints
     return WaypointsMessage(timestamp, waypoints, target_speeds)
예제 #25
0
def test_imu_setup_failed_initialization():
    # Set up the required parameters for the initialization.
    name = 'imu_sensor'
    transform = Transform(Location(), Rotation())

    with pytest.raises(AssertionError):
        imu_setup = IMUSetup(name=1, transform=transform)

    with pytest.raises(AssertionError):
        imu_setup = IMUSetup(name=name, transform=None)
예제 #26
0
def test_point_cloud_get_pixel_location(lidar_points, pixel, expected):
    camera_setup = CameraSetup(
        'test_setup',
        'test_type',
        801,
        601,  # width, height
        Transform(location=Location(0, 0, 0), rotation=Rotation(0, 0, 0)),
        fov=90)
    point_cloud = PointCloud(lidar_points, Transform(Location(), Rotation()))
    location = point_cloud.get_pixel_location(pixel, camera_setup)
    assert np.isclose(location.x,
                      expected.x), 'Returned x value is not the same '
    'as expected'
    assert np.isclose(location.y,
                      expected.y), 'Returned y value is not the same '
    'as expected'
    assert np.isclose(location.z,
                      expected.z), 'Returned z value is not the same '
    'as expected'
예제 #27
0
def create_left_right_camera_setups(camera_name_prefix,
                                    location,
                                    width,
                                    height,
                                    camera_offset,
                                    fov=90):
    """ Creates a dual-RGB-camera setup with the center at the given location,
    and the two cameras on either side of the center at a distance specified
    by the camera_offset.

    The Rotation is set to (pitch=0, yaw=0, roll=0).

    Args:
        camera_name_prefix (str): The name of the camera instance. A suffix
            of "_left" and "_right" is appended to the name.
        location (:py:class:`~pylot.utils.Location`): The location of the
            center of the cameras with respect to the center of the vehicle.
        width (int): The width of the image returned by the cameras.
        height (int): The height of the image returned by the cameras.
        camera_offset (float): The offset of the two cameras from the center.
        fov (float): The field of view of the image returned by the cameras.

    Returns:
        tuple: A tuple containing two instances of
        :py:class:`~pylot.drivers.sensor_setup.RGBCameraSetup` for the left
        and right camera setups with the given parameters.
    """
    rotation = Rotation()
    left_loc = location + Location(0, -camera_offset, 0)
    right_loc = location + Location(0, camera_offset, 0)
    left_transform = Transform(left_loc, rotation)
    right_transform = Transform(right_loc, rotation)
    left_camera_setup = RGBCameraSetup(camera_name_prefix + '_left',
                                       width,
                                       height,
                                       left_transform,
                                       fov=fov)
    right_camera_setup = RGBCameraSetup(camera_name_prefix + '_right',
                                        width,
                                        height,
                                        right_transform,
                                        fov=fov)
    return (left_camera_setup, right_camera_setup)
예제 #28
0
    def _construct_waypoints(self, timestamp, pose_msg, path_x, path_y, speeds,
                             success):
        """
        Convert the optimal frenet path into a waypoints message.
        """
        path_transforms = []
        target_speeds = deque()
        if not success:
            self._logger.debug("@{}: Frenet Optimal Trajectory failed. "
                               "Sending emergency stop.".format(timestamp))
            x = pose_msg.data.transform.location.x
            y = pose_msg.data.transform.location.y
            current_index = 0
            min_dist = np.infty
            for i, wp in enumerate(self._prev_waypoints):
                dist = np.linalg.norm([wp.location.x - x, wp.location.y - y])
                if dist <= min_dist:
                    current_index = i
                    min_dist = dist
            for wp in itertools.islice(
                    self._prev_waypoints, current_index,
                    current_index + self._flags.num_waypoints_ahead):
                path_transforms.append(wp)
                target_speeds.append(0)
        else:
            self._logger.debug(
                "@{}: Frenet Optimal Trajectory succeeded.".format(timestamp))
            for point in zip(path_x, path_y, speeds):
                if self._map is not None:
                    p_loc = self._map.get_closest_lane_waypoint(
                        Location(x=point[0], y=point[1], z=0)).location
                else:
                    p_loc = Location(x=point[0], y=point[1], z=0)
                path_transforms.append(
                    Transform(
                        location=Location(x=point[0], y=point[1], z=p_loc.z),
                        rotation=Rotation(),
                    ))
                target_speeds.append(point[2])

        waypoints = deque(path_transforms)
        self._prev_waypoints = waypoints
        return WaypointsMessage(timestamp, waypoints, target_speeds)
예제 #29
0
 def _get_waypoint(self, location: Location, project_to_road: bool,
                   lane_type):
     try:
         waypoint = self._map.get_waypoint(location.as_simulator_location(),
                                           project_to_road=project_to_road,
                                           lane_type=lane_type)
     except RuntimeError as err:
         self._logger.error('get_waypoint call failed: {}'.format(err))
         waypoint = None
     return waypoint
예제 #30
0
    def on_watermark(self, timestamp, waypoints_stream):
        self._logger.debug('@{}: received watermark'.format(timestamp))
        # get ego info
        can_bus_msg = self._can_bus_msgs.popleft()
        vehicle_transform = can_bus_msg.data.transform

        # get obstacles
        obstacle_map = self._build_obstacle_map(vehicle_transform)

        # compute goals
        target_location = self._compute_target_location(vehicle_transform)

        # run rrt*
        path, cost = self._run_rrt_star(vehicle_transform, target_location,
                                        obstacle_map)

        # convert to waypoints if path found, else use default waypoints
        if cost is not None:
            path_transforms = []
            for point in path:
                p_loc = self._carla_map.get_waypoint(
                    carla.Location(x=point[0], y=point[1], z=0),
                    project_to_road=True,
                ).transform.location  # project to road to approximate Z
                path_transforms.append(
                    Transform(
                        location=Location(x=point[0], y=point[1], z=p_loc.z),
                        rotation=Rotation(),
                    ))
            waypoints = deque(path_transforms)
            waypoints.extend(
                itertools.islice(self._waypoints, self._wp_index,
                                 len(self._waypoints))
            )  # add the remaining global route for future
        else:
            waypoints = self._waypoints

        # construct waypoints message
        waypoints = collections.deque(
            itertools.islice(waypoints, 0,
                             DEFAULT_NUM_WAYPOINTS))  # only take 50 meters
        next_waypoint = waypoints[self._wp_index]
        wp_steer_speed_vector, wp_steer_speed_angle = \
            get_waypoint_vector_and_angle(
                next_waypoint, vehicle_transform
            )
        output_msg = WaypointsMessage(timestamp,
                                      waypoints=waypoints,
                                      wp_angle=wp_steer_speed_angle,
                                      wp_vector=wp_steer_speed_vector,
                                      wp_angle_speed=wp_steer_speed_angle)

        # send waypoints message
        waypoints_stream.send(output_msg)
        waypoints_stream.send(erdos.WatermarkMessage(timestamp))