Ejemplo n.º 1
0
    def process_imu(self, imu_msg):
        """Invoked when an IMU message is received from the simulator.

        Sends IMU measurements to downstream operators.

        Args:
            imu_msg (carla.IMUMeasurement): IMU reading.
        """
        game_time = int(imu_msg.timestamp * 1000)
        timestamp = erdos.Timestamp(coordinates=[game_time])
        watermark_msg = erdos.WatermarkMessage(timestamp)
        with erdos.profile(self.config.name + '.process_imu',
                           self,
                           event_data={'timestamp': str(timestamp)}):
            with self._lock:
                msg = IMUMessage(
                    timestamp,
                    Transform.from_carla_transform(imu_msg.transform),
                    Vector3D.from_carla_vector(imu_msg.accelerometer),
                    Vector3D.from_carla_vector(imu_msg.gyroscope),
                    imu_msg.compass)
                self._imu_stream.send(msg)
                # Note: The operator is set not to automatically propagate
                # watermark messages received on input streams. Thus, we can
                # issue watermarks only after the Carla callback is invoked.
                self._imu_stream.send(watermark_msg)
Ejemplo n.º 2
0
Archivo: lane.py Proyecto: ymote/pylot
    def draw_on_frame(self, frame, inverse_transform=None):
        """Draw lane markings on a frame.

        Args:
            bgr_frame: Frame on which to draw the waypoints.
            inverse_transform (optional): To be used to transform the waypoints
                to relative to the ego vehicle.
        """
        extrinsic_matrix = frame.camera_setup.get_extrinsic_matrix()
        intrinsic_matrix = frame.camera_setup.get_intrinsic_matrix()
        # change color based on lane id
        lane_color = self._color_map[self.id % len(self._color_map)]

        for marking in self.left_markings:
            if inverse_transform:
                # marking = inverse_transform * marking
                marking = inverse_transform.transform_points(
                    np.array([marking.as_numpy_array()]))
                marking = Vector3D(marking[0, 0], marking[0, 1], marking[0, 2])
            pixel_location = marking.to_camera_view(extrinsic_matrix,
                                                    intrinsic_matrix)
            frame.draw_point(pixel_location, lane_color)
        for marking in self.right_markings:
            if inverse_transform:
                # marking = inverse_transform * marking
                marking = inverse_transform.transform_points(
                    np.array([marking.as_numpy_array()]))
                marking = Vector3D(marking[0, 0], marking[0, 1], marking[0, 2])
            pixel_location = marking.to_camera_view(extrinsic_matrix,
                                                    intrinsic_matrix)
            frame.draw_point(pixel_location, lane_color)
Ejemplo n.º 3
0
def test_negative_vector_from_carla():
    """ Tests that Vector3D throws a ValueError if incorrect vector is
    passed. """
    DummyType = namedtuple("DummyType", "x, y, z")
    dummy_instance = DummyType(0, 0, 0)
    with pytest.raises(ValueError):
        Vector3D.from_carla_vector(dummy_instance)
Ejemplo n.º 4
0
    def collect_frame_data(self,
                           frame,
                           binary_frame,
                           camera_setup,
                           inverse_transform=None):
        """Draw lane markings on input frames for lane data collection.

        Args:
            frame: Grayscale image on which to draw the waypoints.
            binary_frame: Black and white image on which to draw the waypoints.
            camera_setup: Camera setup used to generate the frame.
            inverse_transform (optional): To be used to transform the waypoints
                to relative to the ego vehicle.
        """
        extrinsic_matrix = camera_setup.get_extrinsic_matrix()
        intrinsic_matrix = camera_setup.get_intrinsic_matrix()
        gray_color_map = [(20, 20), (70, 70), (120, 120), (170, 170),
                          (220, 220), (250, 250)]
        # change color based on lane id
        lane_color_l = gray_color_map[self.id % len(gray_color_map)]
        lane_color_r = gray_color_map[(self.id + 2) % len(gray_color_map)]

        for marking in self.left_markings:
            if inverse_transform:
                # marking = inverse_transform * marking
                marking = inverse_transform.transform_points(
                    np.array([marking.as_numpy_array()]))
                marking = Vector3D(marking[0, 0], marking[0, 1], marking[0, 2])
            pixel_location = marking.to_camera_view(extrinsic_matrix,
                                                    intrinsic_matrix)
            if (pixel_location.z >= 0):
                try:
                    cv2.circle(frame,
                               (int(pixel_location.x), int(pixel_location.y)),
                               3, lane_color_l, -1)
                    cv2.circle(binary_frame,
                               (int(pixel_location.x), int(pixel_location.y)),
                               3, (255, 255), -1)
                except Exception:
                    continue
        for marking in self.right_markings:
            if inverse_transform:
                # marking = inverse_transform * marking
                marking = inverse_transform.transform_points(
                    np.array([marking.as_numpy_array()]))
                marking = Vector3D(marking[0, 0], marking[0, 1], marking[0, 2])
            pixel_location = marking.to_camera_view(extrinsic_matrix,
                                                    intrinsic_matrix)
            if (pixel_location.z >= 0):
                try:
                    cv2.circle(frame,
                               (int(pixel_location.x), int(pixel_location.y)),
                               3, lane_color_r, -1)
                    cv2.circle(binary_frame,
                               (int(pixel_location.x), int(pixel_location.y)),
                               3, (255, 255), -1)
                except Exception:
                    continue
Ejemplo n.º 5
0
def test_subtract_vector(point_a, point_b, expected):
    first_vector, second_vector = Vector3D(*point_a), Vector3D(*point_b)
    sub_vector = first_vector - second_vector
    expected_vector = Vector3D(*expected)
    assert isinstance(sub_vector, Vector3D), "The difference is not an "
    "instance of Vector3D"
    assert np.isclose(sub_vector.x, expected_vector.x), "The x coordinate of "
    "the difference is not the same as the expected x coordinate"
    assert np.isclose(sub_vector.y, expected_vector.y), "The y coordinate of "
    "the difference is not the same as the expected y coordinate"
    assert np.isclose(sub_vector.z, expected_vector.z), "The z coordinate of "
    "the sum is not the same as the expected z coordinate"
Ejemplo n.º 6
0
    def draw_on_frame(self, frame, inverse_transform=None, binary_frame=None):
        """Draw lane markings on a frame.

        Args:
            bgr_frame: Frame on which to draw the waypoints.
            inverse_transform (optional): To be used to transform the waypoints
                to relative to the ego vehicle.
        """
        extrinsic_matrix = frame.camera_setup.get_extrinsic_matrix()
        intrinsic_matrix = frame.camera_setup.get_intrinsic_matrix()
        # change color based on lane id
        lane_color_l = self._color_map[self.id % len(self._color_map)]
        lane_color_r = self._color_map[(self.id + 2) % len(self._color_map)]

        for marking in self.left_markings:
            if inverse_transform:
                # marking = inverse_transform * marking
                marking = inverse_transform.transform_points(
                    np.array([marking.as_numpy_array()]))
                marking = Vector3D(marking[0, 0], marking[0, 1], marking[0, 2])
            else:
                marking = marking.location
            pixel_location = marking.to_camera_view(extrinsic_matrix,
                                                    intrinsic_matrix)
            if (pixel_location.z >= 0):
                try:
                    frame.draw_point(pixel_location, lane_color_l)
                    if binary_frame:
                        binary_frame.draw_point(pixel_location,
                                                (255, 255, 255))
                except Exception:
                    continue
        for marking in self.right_markings:
            if inverse_transform:
                # marking = inverse_transform * marking
                marking = inverse_transform.transform_points(
                    np.array([marking.as_numpy_array()]))
                marking = Vector3D(marking[0, 0], marking[0, 1], marking[0, 2])
            else:
                marking = marking.location
            pixel_location = marking.to_camera_view(extrinsic_matrix,
                                                    intrinsic_matrix)
            if (pixel_location.z >= 0):
                try:
                    frame.draw_point(pixel_location, lane_color_r)
                    if binary_frame:
                        binary_frame.draw_point(pixel_location,
                                                (255, 255, 255))
                except Exception:
                    continue
    def process_collision(self, collision_event):
        """ Invoked when a collision event is received from the simulation.

        Args:
            collision_event (:py:class:`carla.CollisionEvent`): A collision
                event that contains the impulse, location and the object with
                which the ego-vehicle collided.
        """
        game_time = int(collision_event.timestamp * 1000)
        self._logger.debug(
            "@[{}]: Received a collision event from the simulator.".format(
                game_time))

        # Create a CollisionMessage.
        timestamp = erdos.Timestamp(coordinates=[game_time])
        msg = CollisionMessage(
            collision_event.other_actor,
            Vector3D.from_simulator_vector(collision_event.normal_impulse),
            timestamp)

        # Send the CollisionMessage.
        self._collision_stream.send(msg)
        # TODO(ionel): This code will fail if process_collision is called twice
        # for the same timestamp (i.e., if the vehicle collides with two other
        # actors)
        self._collision_stream.send(erdos.WatermarkMessage(timestamp))
Ejemplo n.º 8
0
def test_from_carla_vector(x, y, z):
    """ Tests the creation of Vector3D from Carla. """
    carla_vector3d = carla.Vector3D(x, y, z)
    vector3d = Vector3D.from_carla_vector(carla_vector3d)
    assert isinstance(vector3d, Vector3D), "The returned object is not of type"
    "Vector3D"
    assert np.isclose(carla_vector3d.x, vector3d.x), "X value is not the same"
    assert np.isclose(carla_vector3d.y, vector3d.y), "Y value is not the same"
    assert np.isclose(carla_vector3d.z, vector3d.z), "Z value is not the same"
Ejemplo n.º 9
0
def test_as_carla_vector():
    vector_carla = Vector3D().as_carla_vector()
    assert isinstance(vector_carla, carla.Vector3D), "The returned object "
    "is not of the type carla.Vector3D"
    assert np.isclose(vector_carla.x, 0), "The x value of the returned vector"
    " is not 0"
    assert np.isclose(vector_carla.y, 0), "The y value of the returned vector"
    " is not 0"
    assert np.isclose(vector_carla.z, 0), "The z value of the returned vector"
    " is not 0"
Ejemplo n.º 10
0
    def get_traffic_light_waypoints(self, traffic_light):
        """
        get area of a given traffic light
        """
        base_transform = traffic_light.get_transform()
        base_rot = base_transform.rotation.yaw
        area_loc = base_transform.transform(
            traffic_light.trigger_volume.location)

        # Discretize the trigger box into points
        area_ext = traffic_light.trigger_volume.extent
        x_values = np.arange(-0.9 * area_ext.x, 0.9 * area_ext.x,
                             1.0)  # 0.9 to avoid crossing to adjacent lanes

        area = []
        for x in x_values:

            point = Vector3D(x, 0,
                             area_ext.z).rotate(base_rot).as_carla_vector()
            point_location = area_loc + carla.Location(x=point.x, y=point.y)
            area.append(point_location)

        # Get the waypoints of these points, removing duplicates
        ini_wps = []
        for pt in area:
            wpx = self._map.get_waypoint(pt)
            # As x_values are arranged in order, only the last one has
            # to be checked
            if not ini_wps or ini_wps[-1].road_id != wpx.road_id or ini_wps[
                    -1].lane_id != wpx.lane_id:
                ini_wps.append(wpx)

        # Advance them until the intersection
        wps = []
        for wpx in ini_wps:
            while not wpx.is_intersection:
                next_wp = wpx.next(0.5)[0]
                if next_wp and not next_wp.is_intersection:
                    wpx = next_wp
                else:
                    break
            wps.append(wpx)

        return area_loc, wps
Ejemplo n.º 11
0
    def on_watermark(self, timestamp: Timestamp):
        self._logger.debug("@{}: received watermark.".format(timestamp))

        # Retrieve the messages for this timestamp.
        pose_msg = self.get_message(self._ground_pose_updates, timestamp,
                                    "pose")
        gnss_msg = self.get_message(self._gnss_updates, timestamp, "GNSS")
        imu_msg = self.get_message(self._imu_updates, timestamp, "IMU")

        if self._last_pose_estimate is None or \
           (abs(imu_msg.acceleration.y) > 100 and not self._is_started):
            self._logger.debug(
                "@{}: The initial pose estimate is not initialized.".format(
                    timestamp))
            # If this is the first update or values have not stabilized,
            # save the estimates.
            if pose_msg:
                self._last_pose_estimate = pose_msg.data
                self._last_timestamp = timestamp.coordinates[0]
                if (self._flags.execution_mode == 'challenge-map'
                        or self._flags.execution_mode == 'challenge-sensors'):
                    self._pose_stream.send(pose_msg)
                    self._pose_stream.send(erdos.WatermarkMessage(timestamp))
            else:
                raise NotImplementedError(
                    "Need pose message to initialize the estimates.")
        else:
            self._is_started = True

            # Initialize the delta_t
            current_ts = timestamp.coordinates[0]
            delta_t = (current_ts - self._last_timestamp) / 1000.0

            # Estimate the rotation.
            last_rotation_estimate = Quaternion.from_rotation(
                self._last_pose_estimate.transform.rotation)
            rotation_estimate = (
                last_rotation_estimate *
                Quaternion.from_angular_velocity(imu_msg.gyro, delta_t))

            # Transform the IMU accelerometer data from the body frame to the
            # world frame, and retrieve location and velocity estimates.
            accelerometer_data = last_rotation_estimate.matrix.dot(
                imu_msg.acceleration.as_numpy_array()) + self._g
            last_location_estimate = \
                self._last_pose_estimate.transform.location.as_numpy_array()
            last_velocity_estimate = \
                self._last_pose_estimate.velocity_vector.as_numpy_array()

            # Estimate the location.
            location_estimate = last_location_estimate + (
                delta_t * last_velocity_estimate) + ((
                    (delta_t**2) / 2.0) * accelerometer_data)

            # Estimate the velocity.
            velocity_estimate = last_velocity_estimate + (delta_t *
                                                          accelerometer_data)

            # Fuse the GNSS values using an EKF to fix drifts and noise in
            # the estimates.

            # Linearize the motion model and compute Jacobians.
            self.__F[0:3, 3:6] = np.identity(3) * delta_t
            self.__F[3:6, 6:9] = last_rotation_estimate.matrix.dot(
                -self.__skew_symmetric(accelerometer_data.reshape(
                    (3, 1)))) * delta_t

            # Fix estimates using GNSS
            gnss_reading = Location.from_gps(
                gnss_msg.latitude, gnss_msg.longitude,
                gnss_msg.altitude).as_numpy_array()
            (
                location_estimate,
                velocity_estimate,
                rotation_estimate,
            ) = self.__update_using_gnss(location_estimate, velocity_estimate,
                                         rotation_estimate, gnss_reading,
                                         delta_t)

            # Create the PoseMessage and send it downstream.
            current_pose = Pose(
                transform=Transform(location=Location(*location_estimate),
                                    rotation=rotation_estimate.as_rotation()),
                forward_speed=Vector3D(*velocity_estimate).magnitude(),
                velocity_vector=Vector3D(*velocity_estimate),
                localization_time=current_ts,
            )
            self._logger.debug("@{}: Predicted pose: {}".format(
                timestamp, current_pose))
            self._pose_stream.send(erdos.Message(timestamp, current_pose))
            self._pose_stream.send(erdos.WatermarkMessage(timestamp))

            # Set the estimates for the next iteration.
            self._last_timestamp = current_ts
            self._last_pose_estimate = current_pose
Ejemplo n.º 12
0
    def on_pose_update(self, msg):
        self._logger.debug("@[{}]: pose update.".format(msg.timestamp))

        transform = msg.data.transform
        location = carla.Location(transform.location.x, transform.location.y,
                                  transform.location.z)

        veh_extent = self._vehicle.bounding_box.extent.x

        tail_close_pt = Vector3D(-0.8 * veh_extent, 0.0, location.z).rotate(
            transform.rotation.yaw).as_carla_vector()
        tail_close_pt = location + carla.Location(tail_close_pt)

        tail_far_pt = Vector3D(-veh_extent - 1, 0.0, location.z).rotate(
            transform.rotation.yaw).as_carla_vector()
        tail_far_pt = location + carla.Location(tail_far_pt)

        for traffic_light, center, waypoints in self._traffic_lights:
            center_loc = carla.Location(center)

            if self._last_red_light_id and \
                    self._last_red_light_id == traffic_light.id:
                continue
            if center_loc.distance(location) > self.DISTANCE_LIGHT:
                continue
            if traffic_light.state != carla.TrafficLightState.Red:
                continue

            for wp in waypoints:
                tail_wp = self._map.get_waypoint(tail_far_pt)

                # Calculate the dot product
                ve_dir = transform.forward_vector
                wp_dir = wp.transform.get_forward_vector()
                dot_ve_wp=ve_dir.x * wp_dir.x + ve_dir.y * wp_dir.y + \
                        ve_dir.z * wp_dir.z

                # Check the lane until all the "tail" has passed
                if tail_wp.road_id == wp.road_id and \
                        tail_wp.lane_id == wp.lane_id and dot_ve_wp > 0:
                    # This light is red and is affecting our lane
                    yaw_wp = wp.transform.rotation.yaw
                    lane_width = wp.lane_width
                    location_wp = wp.transform.location

                    lft_lane_wp = Vector3D(
                        0.4 * lane_width, 0.0,
                        location_wp.z).rotate(yaw_wp + 90).as_carla_vector()
                    lft_lane_wp = location_wp + carla.Location(lft_lane_wp)
                    rgt_lane_wp = Vector3D(
                        0.4 * lane_width, 0.0,
                        location_wp.z).rotate(yaw_wp - 90).as_carla_vector()
                    rgt_lane_wp = location_wp + carla.Location(rgt_lane_wp)

                    # Is the vehicle traversing the stop line?
                    if self.is_vehicle_crossing_line(
                        (tail_close_pt, tail_far_pt),
                        (lft_lane_wp, rgt_lane_wp)):
                        location = traffic_light.get_transform().location
                        message = TrafficInfractionMessage(
                            TrafficInfractionType.RED_LIGHT_INVASION,
                            Location.from_carla_location(location),
                            msg.timestamp)
                        self._traffic_light_invasion_stream.send(message)
                        self._last_red_light_id = traffic_light.id
                        break
Ejemplo n.º 13
0
def test_vector_magnitude(point, expected):
    magnitude = Vector3D(*point).magnitude()
    assert np.isclose(magnitude, expected), "The magnitude was not similar to"
    " the expected magnitude."
Ejemplo n.º 14
0
def test_vector_as_numpy_array():
    vector_np = Vector3D().as_numpy_array()
    assert isinstance(vector_np, np.ndarray), "Returned instance is not of "
    "type numpy.ndarray"
    assert all(vector_np == [0, 0, 0]), "The values returned in the numpy "
    "array are not the expected values."
Ejemplo n.º 15
0
def test_empty_vector3d():
    """ Test that an empty Vector3D is initialized at (0, 0, 0) """
    empty_vector = Vector3D()
    assert np.isclose(empty_vector.x, 0), "X value is not zero"
    assert np.isclose(empty_vector.y, 0), "Y value is not zero"
    assert np.isclose(empty_vector.z, 0), "Z value is not zero"