コード例 #1
0
    def process_imu(self, imu_msg):
        """Invoked when an IMU message is received from the simulator.

        Sends IMU measurements to downstream operators.

        Args:
            imu_msg (carla.IMUMeasurement): IMU reading.
        """
        game_time = int(imu_msg.timestamp * 1000)
        timestamp = erdos.Timestamp(coordinates=[game_time])
        watermark_msg = erdos.WatermarkMessage(timestamp)
        with erdos.profile(self.config.name + '.process_imu',
                           self,
                           event_data={'timestamp': str(timestamp)}):
            with self._lock:
                msg = IMUMessage(
                    timestamp,
                    Transform.from_simulator_transform(imu_msg.transform),
                    Vector3D.from_simulator_vector(imu_msg.accelerometer),
                    Vector3D.from_simulator_vector(imu_msg.gyroscope),
                    imu_msg.compass)
                self._imu_stream.send(msg)
                # Note: The operator is set not to automatically propagate
                # watermarks received on input streams. Thus, we can issue
                # watermarks only after the simulator callback is invoked.
                self._imu_stream.send(watermark_msg)
コード例 #2
0
def test_lidar_unreal_transform(rotation, expected):
    """
    Ensure that the LIDAR space to unreal engine coordinate space conversion
    is correct.

    The LIDAR space is defined as:
        +x to the right, +y out of the screen, +z is down.

    The unreal engine coordinate space is defined as:
        +x into the screen, +y to the right, +z to up.
    """
    lidar_rotation = Rotation(*rotation)
    lidar_setup = LidarSetup(name='lidar_setup',
                             lidar_type='sensor.lidar.ray_cast',
                             transform=Transform(Location(), lidar_rotation),
                             range=6000.0,
                             rotation_frequency=3000.0,
                             channels=3,
                             upper_fov=90.0,
                             lower_fov=90.0,
                             points_per_second=100)

    transformed_rotation = lidar_setup.get_unreal_transform().rotation
    transformed_rotation = [
        transformed_rotation.pitch, transformed_rotation.yaw,
        transformed_rotation.roll
    ]
    assert all(np.isclose(transformed_rotation, expected)), \
            "The unreal transformation does not match the expected transform."
コード例 #3
0
def test_imu_setup_initialization():
    # Set up the required parameters for the initialization.
    name = 'imu_sensor'
    transform = Transform(Location(), Rotation())
    imu_setup = IMUSetup(name, transform)
    assert imu_setup.name == name, "The name in the setup is not the same."
    assert imu_setup.transform == transform, "The transform is not the same."
コード例 #4
0
def test_camera_unreal_transform(rotation, expected):
    """
    Ensure that the camera space to unreal engine coordinate space conversion
    is correct.

    The camera space is defined as:
        +x to the right, +y to down, +z into the screen.

    The unreal engine coordinate space is defined as:
        +x into the screen, +y to the right, +z to up.
    """
    camera_rotation = Rotation(*rotation)
    camera_setup = CameraSetup(name='camera_setup',
                               camera_type='sensor.camera.rgb',
                               width=1920,
                               height=1080,
                               transform=Transform(Location(),
                                                   camera_rotation))
    transformed_rotation = camera_setup.get_unreal_transform().rotation
    transformed_rotation = [
        transformed_rotation.pitch, transformed_rotation.yaw,
        transformed_rotation.roll
    ]
    assert all(np.isclose(transformed_rotation, expected)), \
            "The unreal transformation does not match the expected transform."
コード例 #5
0
    def _construct_waypoints(self, timestamp, path_x, path_y, speeds, success):
        """
        Convert the rrt* path into a waypoints message.
        """
        path_transforms = []
        target_speeds = []
        if not success:
            self._logger.error("@{}: RRT* failed. "
                               "Sending emergency stop.".format(timestamp))
            for wp in itertools.islice(self._prev_waypoints, 0,
                                       DEFAULT_NUM_WAYPOINTS):
                path_transforms.append(wp)
                target_speeds.append(0)
        else:
            self._logger.debug("@{}: RRT* succeeded."
                               .format(timestamp))
            for point in zip(path_x, path_y, speeds):
                if self._map is not None:
                    p_loc = self._map.get_closest_lane_waypoint(
                        Location(x=point[0], y=point[1], z=0)).location
                else:
                    # RRT* does not take into account the driveable region
                    # it constructs search space as a top down, minimum bounding rectangle
                    # with padding in each dimension
                    p_loc = Location(x=point[0], y=point[1], z=0)
                path_transforms.append(
                    Transform(
                        location=Location(x=point[0], y=point[1], z=p_loc.z),
                        rotation=Rotation(),
                    ))
                target_speeds.append(point[2])

        waypoints = deque(path_transforms)
        self._prev_waypoints = waypoints
        return WaypointsMessage(timestamp, waypoints, target_speeds)
コード例 #6
0
    def _construct_waypoints(self, timestamp, path_x, path_y, speeds, success):
        """
        Convert the optimal frenet path into a waypoints message.
        """
        path_transforms = []
        target_speeds = []
        if not success:
            self._logger.debug("@{}: Frenet Optimal Trajectory failed. "
                               "Sending emergency stop.".format(timestamp))
            for wp in itertools.islice(self._prev_waypoints, 0,
                                       DEFAULT_NUM_WAYPOINTS):
                path_transforms.append(wp)
                target_speeds.append(0)
        else:
            self._logger.debug("@{}: Frenet Optimal Trajectory succeeded."
                               .format(timestamp))
            for point in zip(path_x, path_y, speeds):
                if self._map is not None:
                    p_loc = self._map.get_closest_lane_waypoint(
                        Location(x=point[0], y=point[1], z=0)).location
                else:
                    p_loc = Location(x=point[0], y=point[1], z=0)
                path_transforms.append(
                    Transform(
                        location=Location(x=point[0], y=point[1], z=p_loc.z),
                        rotation=Rotation(),
                    ))
                target_speeds.append(point[2])

        waypoints = deque(path_transforms)
        self._prev_waypoints = waypoints
        return WaypointsMessage(timestamp, waypoints, target_speeds)
コード例 #7
0
ファイル: sensor_setup.py プロジェクト: mawright/pylot
def create_segmented_camera_setup(camera_name_prefix,
                                  camera_location,
                                  width,
                                  height,
                                  fov=90):
    """ Creates a SegmentedCameraSetup instance with the given values.

    The Rotation is set to (pitch=0, yaw=0, roll=0).

    Args:
        camera_name_prefix (str): The name of the camera instance. A suffix
            of "_segmented" is appended to the name.
        camera_location (:py:class:`~pylot.utils.Location`): The location of
            the camera with respect to the center of the vehicle.
        width (int): The width of the image returned by the camera.
        height (int): The height of the image returned by the camera.
        fov (float): The field of view of the image returned by the camera.

    Returns:
        :py:class:`~pylot.drivers.sensor_setup.SegmentedCameraSetup`: A
        camera setup with the given parameters.
    """
    transform = Transform(camera_location, Rotation())
    return SegmentedCameraSetup(camera_name_prefix + '_segmented',
                                width,
                                height,
                                transform,
                                fov=fov)
コード例 #8
0
    def _postprocess_predictions(self, prediction_array, vehicle_trajectories,
                                 vehicle_ego_transforms):
        # The prediction_array consists of predictions with respect to each
        # vehicle. Transform each predicted trajectory to be in relation to the
        # ego-vehicle, then convert into an ObstaclePrediction.
        obstacle_predictions_list = []
        num_predictions = len(vehicle_trajectories)

        for idx in range(num_predictions):
            cur_prediction = prediction_array[idx]

            obstacle_transform = vehicle_trajectories[idx].obstacle.transform
            predictions = []
            # Because R2P2 only predicts (x,y) coordinates, we assume the
            # vehicle stays at the same height as its last location.
            for t in range(self._flags.prediction_num_future_steps):
                cur_point = vehicle_ego_transforms[idx].transform_points(
                    np.array([[
                        cur_prediction[t][0], cur_prediction[t][1],
                        vehicle_ego_transforms[idx].location.z
                    ]]))[0]
                # R2P2 does not predict vehicle orientation, so we use our
                # estimated orientation of the vehicle at its latest location.
                predictions.append(
                    Transform(location=Location(cur_point[0], cur_point[1],
                                                cur_point[2]),
                              rotation=vehicle_ego_transforms[idx].rotation))

            # Probability; currently a filler value because we are taking
            # just one sample from distribution
            obstacle_predictions_list.append(
                ObstaclePrediction(vehicle_trajectories[idx],
                                   obstacle_transform, 1.0, predictions))
        return obstacle_predictions_list
コード例 #9
0
ファイル: hd_map.py プロジェクト: seikurou/pylot
    def compute_waypoints(self, source_loc: Location,
                          destination_loc: Location):
        """Computes waypoints between two locations.

        Assumes that the ego vehicle has the same orientation as the lane on
        whch it is on.

        Args:
            source_loc (:py:class:`~pylot.utils.Location`): Source location in
                world coordinates.
            destination_loc (:py:class:`~pylot.utils.Location`): Destination
                location in world coordinates.

        Returns:
            list(:py:class:`~pylot.utils.Transform`): List of waypoint
            transforms.
        """
        start_waypoint = self._get_waypoint(source_loc,
                                            project_to_road=True,
                                            lane_type=carla.LaneType.Driving)
        end_waypoint = self._get_waypoint(destination_loc,
                                          project_to_road=True,
                                          lane_type=carla.LaneType.Driving)
        assert start_waypoint and end_waypoint, 'Map could not find waypoints'
        route = self._grp.trace_route(start_waypoint.transform.location,
                                      end_waypoint.transform.location)
        # TODO(ionel): The planner returns several options in intersections.
        # We always take the first one, but this is not correct.
        return deque([
            Transform.from_carla_transform(waypoint[0].transform)
            for waypoint in route
        ])
コード例 #10
0
ファイル: hd_map.py プロジェクト: Jimuyangz/pylot
 def get_right_lane(self, location: Location):
     waypoint = self._get_waypoint(location, project_to_road=False)
     if waypoint:
         right_lane_waypoint = waypoint.get_right_lane()
         if right_lane_waypoint:
             return Transform.from_carla_transform(waypoint.transform)
     return None
コード例 #11
0
    def run(self):
        # Read the vehicle ID from the vehicle ID stream.
        vehicle_id_msg = self._vehicle_id_stream.read()
        vehicle_id = vehicle_id_msg.data
        self._logger.debug("@{}: Received Vehicle ID: {}".format(
            vehicle_id_msg.timestamp, vehicle_id))

        # Connect to the world.
        _, world = get_world(self._flags.simulator_host,
                             self._flags.simulator_port,
                             self._flags.simulator_timeout)

        self._vehicle = get_vehicle_handle(world, vehicle_id)

        # Install the collision sensor.
        collision_blueprint = world.get_blueprint_library().find(
            'sensor.other.collision')
        self._logger.debug("Spawning a collision sensor.")
        self._collision_sensor = world.spawn_actor(
            collision_blueprint,
            Transform().as_simulator_transform(),
            attach_to=self._vehicle)

        # Register the callback on the collision sensor.
        self._collision_sensor.listen(self.process_collision)
コード例 #12
0
ファイル: sensor_setup.py プロジェクト: alvkao58/pylot
def create_rgb_camera_setup(camera_name,
                            camera_location,
                            width,
                            height,
                            fov=90):
    transform = Transform(camera_location, Rotation())
    return RGBCameraSetup(camera_name, width, height, transform, fov)
コード例 #13
0
ファイル: lane.py プロジェクト: seikurou/pylot
 def get_lane_center_transforms(self):
     if len(self.left_markings) < len(self.right_markings):
         anchor_markings = self.left_markings
         other_markings = self.right_markings
     else:
         anchor_markings = self.right_markings
         other_markings = self.left_markings
     index_other = 0
     center_markings = deque([])
     for transform in anchor_markings:
         dist = transform.location.distance(
             other_markings[index_other].location)
         while (index_other + 1 < len(other_markings)
                and dist > transform.location.distance(
                    other_markings[index_other + 1].location)):
             index_other += 1
             dist = transform.location.distance(
                 other_markings[index_other].location)
         if index_other < len(other_markings):
             other_loc = other_markings[index_other].location
             center_location = Location(
                 (transform.location.x + other_loc.x) / 2.0,
                 (transform.location.y + other_loc.y) / 2.0,
                 (transform.location.z + other_loc.z) / 2.0)
             center_markings.append(Transform(center_location, Rotation()))
     return center_markings
コード例 #14
0
def create_center_lidar_setup(location, rotation_frequency=20):
    """ Creates a LidarSetup instance with the given location.

    The Rotation is set to (pitch=0, roll=0, yaw=0).

    Args:
        location (:py:class:`~pylot.utils.Location`): The location of the
            LIDAR with respect to the center of the vehicle.

    Returns:
        :py:class:`~pylot.drivers.sensor_setup.LidarSetup`: A LidarSetup
        with the given location.
    """
    rotation = Rotation()
    # Place the lidar in the same position as the camera.
    lidar_transform = Transform(location, rotation)
    return LidarSetup(
        name='front_center_lidar',
        lidar_type='sensor.lidar.ray_cast',
        transform=lidar_transform,
        range=5000,  # in centimeters
        rotation_frequency=rotation_frequency,
        channels=32,
        upper_fov=15,
        lower_fov=-30,
        points_per_second=250000)
コード例 #15
0
ファイル: hd_map.py プロジェクト: mfkiwl/pylot
 def get_right_lane(self, location: Location):
     waypoint = self._get_waypoint(location)
     if waypoint:
         right_lane_waypoint = waypoint.get_right_lane()
         if right_lane_waypoint:
             return Transform.from_simulator_transform(
                 right_lane_waypoint.transform)
     return None
コード例 #16
0
ファイル: point_cloud.py プロジェクト: ymote/pylot
 def _to_camera_coordinates(self, points):
     # Converts points in lidar coordinates to points in camera coordinates.
     # See CameraSetup in pylot/drivers/sensor_setup.py for coordinate
     # axis orientations.
     #
     # The Velodyne coordinate space is defined as:
     # +x into the screen, +y to the left, and +z up.
     #
     # Note: We're using the ROS velodyne driver coordinate
     # system, not the one specified in the Velodyne manual.
     # Link to the ROS coordinate system:
     # https://www.ros.org/reps/rep-0103.html#axis-orientation
     if self._lidar_setup.lidar_type == 'sensor.lidar.ray_cast':
         if self._lidar_setup.legacy:
             # The legacy CARLA Lidar coordinate space is defined as:
             # +x to right, +y out of the screen, +z down.
             to_camera_transform = Transform(matrix=np.array(
                 [[1, 0, 0, 0], [0, 0, 1, 0], [0, -1, 0, 0], [0, 0, 0, 1]]))
         else:
             # The latest coordiante space is the unreal space.
             to_camera_transform = Transform(matrix=np.array(
                 [[0, 1, 0, 0], [0, 0, -1, 0], [1, 0, 0, 0], [0, 0, 0, 1]]))
     elif self._lidar_setup.lidar_type == 'velodyne':
         to_camera_transform = Transform(matrix=np.array(
             [[0, -1, 0, 0], [0, 0, -1, 0], [1, 0, 0, 0], [0, 0, 0, 1]]))
     else:
         raise ValueError('Unexpected lidar type {}'.format(
             self._lidar_setup.lidar_type))
     transformed_points = to_camera_transform.transform_points(points)
     return transformed_points
コード例 #17
0
ファイル: sensor_setup.py プロジェクト: alvkao58/pylot
 def __create_unreal_transform(transform):
     """
     Takes in a Transform that occurs in camera coordinates,
     and converts it into a Transform that goes from lidar
     coordinates to camera coordinates.
     """
     to_camera_transform = Transform(matrix=np.array(
         [[1, 0, 0, 0], [0, 0, 1, 0], [0, -1, 0, 0], [0, 0, 0, 1]]))
     return transform * to_camera_transform
コード例 #18
0
 def get_right_lane(self, location: Location):
     waypoint = self._get_waypoint(location,
                                   project_to_road=False,
                                   lane_type=carla.LaneType.Any)
     if waypoint:
         right_lane_waypoint = waypoint.get_right_lane()
         if right_lane_waypoint:
             return Transform.from_simulator_transform(
                 right_lane_waypoint.transform)
     return None
コード例 #19
0
ファイル: hd_map.py プロジェクト: seikurou/pylot
 def get_left_lane(self, location: Location):
     waypoint = self._get_waypoint(location,
                                   project_to_road=False,
                                   lane_type=carla.LaneType.Any)
     if waypoint:
         left_lane_waypoint = waypoint.get_left_lane()
         if left_lane_waypoint:
             return Transform.from_carla_transform(
                 left_lane_waypoint.transform)
     return None
コード例 #20
0
def test_imu_setup_failed_initialization():
    # Set up the required parameters for the initialization.
    name = 'imu_sensor'
    transform = Transform(Location(), Rotation())

    with pytest.raises(AssertionError):
        imu_setup = IMUSetup(name=1, transform=transform)

    with pytest.raises(AssertionError):
        imu_setup = IMUSetup(name=name, transform=None)
コード例 #21
0
ファイル: sensor_setup.py プロジェクト: fangedward/pylot
def create_left_right_camera_setups(camera_name_prefix,
                                    location,
                                    width,
                                    height,
                                    camera_offset,
                                    fov=90):
    """ Creates a dual-RGB-camera setup with the center at the given location,
    and the two cameras on either side of the center at a distance specified
    by the camera_offset.

    The Rotation is set to (pitch=0, yaw=0, roll=0).

    Args:
        camera_name_prefix (str): The name of the camera instance. A suffix
            of "_left" and "_right" is appended to the name.
        location (:py:class:`~pylot.utils.Location`): The location of the
            center of the cameras with respect to the center of the vehicle.
        width (int): The width of the image returned by the cameras.
        height (int): The height of the image returned by the cameras.
        camera_offset (float): The offset of the two cameras from the center.
        fov (float): The field of view of the image returned by the cameras.

    Returns:
        tuple: A tuple containing two instances of
        :py:class:`~pylot.drivers.sensor_setup.RGBCameraSetup` for the left
        and right camera setups with the given parameters.
    """
    rotation = Rotation()
    left_loc = location + Location(0, -camera_offset, 0)
    right_loc = location + Location(0, camera_offset, 0)
    left_transform = Transform(left_loc, rotation)
    right_transform = Transform(right_loc, rotation)
    left_camera_setup = RGBCameraSetup(camera_name_prefix + '_left',
                                       width,
                                       height,
                                       left_transform,
                                       fov=fov)
    right_camera_setup = RGBCameraSetup(camera_name_prefix + '_right',
                                        width,
                                        height,
                                        right_transform,
                                        fov=fov)
    return (left_camera_setup, right_camera_setup)
コード例 #22
0
def test_point_cloud_get_pixel_location(lidar_points, pixel, expected):
    camera_setup = CameraSetup(
        'test_setup',
        'test_type',
        801,
        601,  # width, height
        Transform(location=Location(0, 0, 0), rotation=Rotation(0, 0, 0)),
        fov=90)
    point_cloud = PointCloud(lidar_points, Transform(Location(), Rotation()))
    location = point_cloud.get_pixel_location(pixel, camera_setup)
    assert np.isclose(location.x,
                      expected.x), 'Returned x value is not the same '
    'as expected'
    assert np.isclose(location.y,
                      expected.y), 'Returned y value is not the same '
    'as expected'
    assert np.isclose(location.z,
                      expected.z), 'Returned z value is not the same '
    'as expected'
コード例 #23
0
    def on_watermark(self, timestamp, waypoints_stream):
        self._logger.debug('@{}: received watermark'.format(timestamp))
        # get ego info
        can_bus_msg = self._can_bus_msgs.popleft()
        vehicle_transform = can_bus_msg.data.transform

        # get obstacles
        obstacle_map = self._build_obstacle_map(vehicle_transform)

        # compute goals
        target_location = self._compute_target_location(vehicle_transform)

        # run rrt*
        path, cost = self._run_rrt_star(vehicle_transform, target_location,
                                        obstacle_map)

        # convert to waypoints if path found, else use default waypoints
        if cost is not None:
            path_transforms = []
            for point in path:
                p_loc = self._carla_map.get_waypoint(
                    carla.Location(x=point[0], y=point[1], z=0),
                    project_to_road=True,
                ).transform.location  # project to road to approximate Z
                path_transforms.append(
                    Transform(
                        location=Location(x=point[0], y=point[1], z=p_loc.z),
                        rotation=Rotation(),
                    ))
            waypoints = deque(path_transforms)
            waypoints.extend(
                itertools.islice(self._waypoints, self._wp_index,
                                 len(self._waypoints))
            )  # add the remaining global route for future
        else:
            waypoints = self._waypoints

        # construct waypoints message
        waypoints = collections.deque(
            itertools.islice(waypoints, 0,
                             DEFAULT_NUM_WAYPOINTS))  # only take 50 meters
        next_waypoint = waypoints[self._wp_index]
        wp_steer_speed_vector, wp_steer_speed_angle = \
            get_waypoint_vector_and_angle(
                next_waypoint, vehicle_transform
            )
        output_msg = WaypointsMessage(timestamp,
                                      waypoints=waypoints,
                                      wp_angle=wp_steer_speed_angle,
                                      wp_vector=wp_steer_speed_vector,
                                      wp_angle_speed=wp_steer_speed_angle)

        # send waypoints message
        waypoints_stream.send(output_msg)
        waypoints_stream.send(erdos.WatermarkMessage(timestamp))
コード例 #24
0
ファイル: sensor_setup.py プロジェクト: alvkao58/pylot
def create_depth_camera_setup(camera_name_prefix,
                              camera_location,
                              width,
                              height,
                              fov=90):
    transform = Transform(camera_location, Rotation())
    return DepthCameraSetup(camera_name_prefix + '_depth',
                            width,
                            height,
                            transform,
                            fov=fov)
コード例 #25
0
    def generate_predicted_trajectories(self, msg: Message,
                                        linear_prediction_stream: WriteStream):
        self._logger.debug('@{}: received trajectories message'.format(
            msg.timestamp))
        obstacle_predictions_list = []

        nearby_obstacle_trajectories, nearby_obstacles_ego_transforms = \
            msg.get_nearby_obstacles_info(self._flags.prediction_radius)
        num_predictions = len(nearby_obstacle_trajectories)

        self._logger.info(
            '@{}: Getting linear predictions for {} obstacles'.format(
                msg.timestamp, num_predictions))

        for idx in range(len(nearby_obstacle_trajectories)):
            obstacle_trajectory = nearby_obstacle_trajectories[idx]
            # Time step matrices used in regression.
            num_steps = min(self._flags.prediction_num_past_steps,
                            len(obstacle_trajectory.trajectory))
            ts = np.zeros((num_steps, 2))
            future_ts = np.zeros((self._flags.prediction_num_future_steps, 2))
            for t in range(num_steps):
                ts[t][0] = -t
                ts[t][1] = 1
            for i in range(self._flags.prediction_num_future_steps):
                future_ts[i][0] = i + 1
                future_ts[i][1] = 1

            xy = np.zeros((num_steps, 2))
            for t in range(num_steps):
                # t-th most recent step
                transform = obstacle_trajectory.trajectory[-(t + 1)]
                xy[t][0] = transform.location.x
                xy[t][1] = transform.location.y
            linear_model_params = np.linalg.lstsq(ts, xy, rcond=None)[0]
            # Predict future steps and convert to list of locations.
            predict_array = np.matmul(future_ts, linear_model_params)
            predictions = []
            for t in range(self._flags.prediction_num_future_steps):
                # Linear prediction does not predict vehicle orientation, so we
                # use our estimated orientation of the vehicle at its latest
                # location.
                predictions.append(
                    Transform(location=Location(x=predict_array[t][0],
                                                y=predict_array[t][1]),
                              rotation=nearby_obstacles_ego_transforms[idx].
                              rotation))
            obstacle_predictions_list.append(
                ObstaclePrediction(obstacle_trajectory,
                                   obstacle_trajectory.obstacle.transform, 1.0,
                                   predictions))
        linear_prediction_stream.send(
            PredictionMessage(msg.timestamp, obstacle_predictions_list))
        linear_prediction_stream.send(erdos.WatermarkMessage(msg.timestamp))
コード例 #26
0
ファイル: sensor_setup.py プロジェクト: alvkao58/pylot
def create_segmented_camera_setup(camera_name_prefix,
                                  camera_location,
                                  width,
                                  height,
                                  fov=90):
    transform = Transform(camera_location, Rotation())
    return SegmentedCameraSetup(camera_name_prefix + '_segmented',
                                width,
                                height,
                                transform,
                                fov=fov)
コード例 #27
0
ファイル: hd_map.py プロジェクト: mfkiwl/pylot
    def _must_obey_american_traffic_light(self, ego_transform: Transform,
                                          tl_locations,
                                          tl_max_dist_thresh: float) -> bool:
        ego_waypoint = self._get_waypoint(ego_transform.location)
        # We're not on a road, or we're already in the intersection. Carry on.
        if ego_waypoint is None or self.__is_intersection(ego_waypoint):
            return (False, None)

        min_angle = 25.0
        selected_tl_loc = None
        for tl_loc in tl_locations:
            if ego_transform.is_within_distance_ahead(tl_loc,
                                                      tl_max_dist_thresh):
                angle, distance = ego_transform.get_angle_and_magnitude(tl_loc)
                if distance < 60.0 and angle < min(25.0, min_angle):
                    min_angle = angle
                    selected_tl_loc = tl_loc
        if selected_tl_loc is not None:
            return (True, selected_tl_loc)
        else:
            return (False, None)
コード例 #28
0
ファイル: sensor_setup.py プロジェクト: alvkao58/pylot
def create_left_right_camera_setups(camera_name_prefix,
                                    location,
                                    width,
                                    height,
                                    camera_offset,
                                    fov=90):
    rotation = Rotation()
    left_loc = location + Location(0, -camera_offset, 0)
    right_loc = location + Location(0, camera_offset, 0)
    left_transform = Transform(left_loc, rotation)
    right_transform = Transform(right_loc, rotation)
    left_camera_setup = RGBCameraSetup(camera_name_prefix + '_left',
                                       width,
                                       height,
                                       left_transform,
                                       fov=fov)
    right_camera_setup = RGBCameraSetup(camera_name_prefix + '_right',
                                        width,
                                        height,
                                        right_transform,
                                        fov=fov)
    return (left_camera_setup, right_camera_setup)
コード例 #29
0
def test_camera_setup_failed_initialization():
    """
    Ensure that the CameraSetup constructor fails when wrong values or values
    of the wrong type are provided.
    """
    # Set up the correct values for all the constructor values.
    name, camera_type = 'camera_setup', 'sensor.camera.rgb'
    width, height = 1920, 1080
    transform = Transform(Location(), Rotation())

    # Ensure that a wrong name throws an error.
    with pytest.raises(AssertionError):
        camera_setup = CameraSetup(name=1,
                                   camera_type=camera_type,
                                   width=width,
                                   height=height,
                                   transform=transform)

    # Ensure that a wrong camera_type throws an error.
    with pytest.raises(AssertionError):
        camera_setup = CameraSetup(name=name,
                                   camera_type=None,
                                   width=width,
                                   height=height,
                                   transform=transform)

    # Ensure that a wrong width, height throws an error.
    with pytest.raises(AssertionError):
        camera_setup = CameraSetup(name=name,
                                   camera_type=camera_type,
                                   width=float(width),
                                   height=float(height),
                                   transform=transform)

    # Ensure that a wrong transform throws an error.
    with pytest.raises(AssertionError):
        camera_setup = CameraSetup(name=name,
                                   camera_type=camera_type,
                                   width=width,
                                   height=height,
                                   transform=None)

    # Ensure that a bad fov raises an error.
    with pytest.raises(AssertionError):
        camera_setup = CameraSetup(name=name,
                                   camera_type=camera_type,
                                   width=width,
                                   height=height,
                                   transform=transform,
                                   fov=None)
コード例 #30
0
ファイル: sensor_setup.py プロジェクト: alvkao58/pylot
def create_center_lidar_setup(location):
    rotation = Rotation()
    # Place the lidar in the same position as the camera.
    lidar_transform = Transform(location, rotation)
    return LidarSetup(
        name='front_center_lidar',
        lidar_type='sensor.lidar.ray_cast',
        transform=lidar_transform,
        range=5000,  # in centimers
        rotation_frequency=20,
        channels=32,
        upper_fov=15,
        lower_fov=-30,
        points_per_second=500000)