Example #1
0
def test_pixel_has_same_depth(x, y, z, threshold, expected):
    """Tests if the pixel at (x,y) has a depth within the specified
       threshold of z."""
    camera_setup = None
    depth_frame = DepthFrame([[0, 0.1, 0], [0, 0, 0.5]], camera_setup)
    assert depth_frame.pixel_has_same_depth(x, y, z, threshold) is expected, \
           "Depth thresholding did not work correctly."
Example #2
0
def test_get_pixel_locations(depth_frame, pixels, expected):
    height, width = depth_frame.shape
    camera_setup = CameraSetup('test_setup',
                               'test_type',
                               width,
                               height,
                               Transform(location=Location(0, 0, 0),
                                         rotation=Rotation(0, 0, 0)),
                               fov=90)
    depth_frame = DepthFrame(depth_frame, camera_setup)
    locations = depth_frame.get_pixel_locations(pixels)
    for i in range(len(pixels)):
        assert np.isclose(locations[i].x, expected[i].x), 'Returned x '
        'value is not the same as expected'
        assert np.isclose(locations[i].y, expected[i].y), 'Returned y '
        'value is not the same as expected'
        assert np.isclose(locations[i].z, expected[i].z), 'Returned z '
        'value is not the same as expected'
Example #3
0
def test_depth_to_point_cloud(depth_frame, expected):
    height, width = depth_frame.shape
    camera_setup = CameraSetup('test_setup',
                               'test_type',
                               width,
                               height,
                               Transform(location=Location(0, 0, 0),
                                         rotation=Rotation(0, 0, 0)),
                               fov=90)
    depth_frame = DepthFrame(depth_frame, camera_setup)
    # Resulting unreal coordinates.
    point_cloud = depth_frame.as_point_cloud()
    for i in range(width * height):
        assert np.isclose(point_cloud[i].x, expected[i].x), 'Returned x '
        'value is not the same as expected'
        assert np.isclose(point_cloud[i].y, expected[i].y), 'Returned y '
        'value is not the same as expected'
        assert np.isclose(point_cloud[i].z, expected[i].z), 'Returned z '
        'value is not the same as expected'
Example #4
0
def test_depth_to_point_cloud(depth_frame, expected):
    height, width = depth_frame.shape
    camera_setup = CameraSetup('test_setup',
                               'sensor.camera.depth',
                               width,
                               height,
                               Transform(location=Location(0, 0, 0),
                                         rotation=Rotation(0, 0, 0)),
                               fov=90)
    depth_frame = DepthFrame(depth_frame, camera_setup)
    # Resulting unreal coordinates.
    point_cloud = depth_frame.as_point_cloud()
    for i in range(width * height):
        assert np.isclose(point_cloud[i][0], expected[i][0]), \
            'Returned x value is not the same as expected'
        assert np.isclose(point_cloud[i][1], expected[i][1]), \
            'Returned y value is not the same as expected'
        assert np.isclose(point_cloud[i][2], expected[i][2]), \
            'Returned z value is not the same as expected'
Example #5
0
def on_depth_msg(simulator_image):
    global DEPTH_FRAME
    transform = pylot.utils.Transform.from_simulator_transform(
        simulator_image.transform)
    camera_setup = DepthCameraSetup("depth_camera", FLAGS.frame_width,
                                    FLAGS.frame_height, transform,
                                    FLAGS.camera_fov)

    DEPTH_FRAME = DepthFrame.from_simulator_frame(simulator_image,
                                                  camera_setup)
Example #6
0
def on_depth_msg(carla_image):
    global DEPTH_FRAME
    transform = pylot.utils.Transform.from_carla_transform(
        carla_image.transform)
    camera_setup = DepthCameraSetup("depth_camera", FLAGS.frame_width,
                                    FLAGS.camera_height, transform,
                                    FLAGS.camera_fov)

    DEPTH_FRAME = DepthFrameMessage(
        DepthFrame.from_carla_frame(carla_image, camera_setup),
        int(carla_image.timestamp * 1000))
    def process_images(self, simulator_image):
        """ Invoked when an image is received from the simulator.

        Args:
            simulator_image: a carla.Image.
        """
        game_time = int(simulator_image.timestamp * 1000)
        timestamp = erdos.Timestamp(coordinates=[game_time])
        watermark_msg = erdos.WatermarkMessage(timestamp)
        with erdos.profile(self.config.name + '.process_images',
                           self,
                           event_data={'timestamp': str(timestamp)}):
            # Ensure that the code executes serially
            with self._lock:
                msg = None
                if self._camera_setup.camera_type == 'sensor.camera.rgb':
                    msg = FrameMessage(
                        timestamp,
                        CameraFrame.from_simulator_frame(
                            simulator_image, self._camera_setup))
                elif self._camera_setup.camera_type == 'sensor.camera.depth':
                    # Include the transform relative to the vehicle.
                    # simulator_image.transform returns the world transform,
                    # but we do not use it directly.
                    msg = DepthFrameMessage(
                        timestamp,
                        DepthFrame.from_simulator_frame(
                            simulator_image,
                            self._camera_setup,
                            save_original_frame=self._flags.
                            visualize_depth_camera))
                elif (self._camera_setup.camera_type ==
                      'sensor.camera.semantic_segmentation'):
                    msg = SegmentedFrameMessage(
                        timestamp,
                        SegmentedFrame.from_simulator_image(
                            simulator_image, self._camera_setup))

                if self._release_data:
                    self._camera_stream.send(msg)
                    self._camera_stream.send(watermark_msg)
                else:
                    # Pickle the data, and release it upon release msg receipt.
                    pickled_msg = pickle.dumps(
                        msg, protocol=pickle.HIGHEST_PROTOCOL)
                    with self._pickle_lock:
                        self._pickled_messages[msg.timestamp] = pickled_msg
                    self._notify_reading_stream.send(watermark_msg)
    def process_images(self, carla_image):
        """ Invoked when an image is received from the simulator.

        Args:
            carla_image: a carla.Image.
        """
        # Ensure that the code executes serially
        with self._lock:
            game_time = int(carla_image.timestamp * 1000)
            timestamp = erdos.Timestamp(coordinates=[game_time])
            watermark_msg = erdos.WatermarkMessage(timestamp)

            msg = None
            if self._camera_setup.camera_type == 'sensor.camera.rgb':
                msg = FrameMessage(
                    timestamp,
                    CameraFrame.from_carla_frame(carla_image,
                                                 self._camera_setup))
            elif self._camera_setup.camera_type == 'sensor.camera.depth':
                # Include the transform relative to the vehicle.
                # Carla carla_image.transform returns the world transform, but
                # we do not use it directly.
                msg = DepthFrameMessage(
                    timestamp,
                    DepthFrame.from_carla_frame(carla_image,
                                                self._camera_setup))
            elif self._camera_setup.camera_type == \
                 'sensor.camera.semantic_segmentation':
                msg = SegmentedFrameMessage(
                    timestamp,
                    SegmentedFrame.from_carla_image(carla_image,
                                                    self._camera_setup))
            # Send the message containing the frame.
            self._camera_stream.send(msg)
            # Note: The operator is set not to automatically propagate
            # watermark messages received on input streams. Thus, we can
            # issue watermarks only after the Carla callback is invoked.
            self._camera_stream.send(watermark_msg)
Example #9
0
def on_depth_msg(simulator_image):
    game_time = int(simulator_image.timestamp * 1000)
    print("Received depth camera msg {}".format(game_time))

    depth_camera_transform = pylot.utils.Transform.from_simulator_transform(
        simulator_image.transform)

    camera_setup = CameraSetup("depth_camera",
                               "sensor.camera.depth",
                               800,
                               600,
                               depth_camera_transform,
                               fov=90.0)
    depth_frame = DepthFrame.from_simulator_frame(simulator_image, camera_setup)

    for (x, y) in pixels_to_check:
        print("{} Depth at pixel {}".format((x, y), depth_frame.frame[y][x]))
        pos3d_depth = depth_frame.get_pixel_locations(
            [pylot.utils.Vector2D(x, y)])[0]
        print("{} Computed using depth map {}".format((x, y), pos3d_depth))

    global depth_pc
    depth_pc = depth_frame.as_point_cloud()
Example #10
0
    def compute_depth(self, timestamp, depth_estimation_stream):
        self._logger.debug('@{}: {} received watermark'.format(
            timestamp, self._name))
        start_time = time.time()

        imgL = self._left_imgs.pop(timestamp)
        imgR = self._right_imgs.pop(timestamp)

        cudnn.benchmark = False
        self._model.eval()
        imgL = imgL.float().cuda().unsqueeze(0)
        imgR = imgR.float().cuda().unsqueeze(0)
        with torch.no_grad():
            outputs = self._model(imgL, imgR)
            output = torch.squeeze(outputs[2], 1)
        output = output.squeeze().cpu().numpy()
        # Process the output (disparity) to depth, model-dependent
        depth = preprocess.disp2depth(output)
        # Get runtime in ms.
        runtime = (time.time() - start_time) * 1000
        self._csv_logger.info('{},{},"{}",{}'.format(time_epoch_ms(),
                                                     self._name, timestamp,
                                                     runtime))

        if self._flags.visualize_depth_est:
            cv2.imshow(self._name, output)
            cv2.waitKey(1)

        camera_setup = CameraSetup("depth_estimation",
                                   "estimation.anynet",
                                   depth.shape[1],
                                   depth.shape[0],
                                   self._transform,
                                   fov=self._fov)
        depth_estimation_stream.send(
            DepthFrameMessage(DepthFrame(depth, camera_setup), timestamp))
Example #11
0
def process_depth_images(msg,
                         depth_camera_setup,
                         ego_vehicle,
                         speed,
                         csv,
                         surface,
                         visualize=False):
    print("Received a message for the time: {}".format(msg.timestamp))

    # If we are in distance to the destination, stop and exit with success.
    if ego_vehicle.get_location().distance(VEHICLE_DESTINATION) <= 5:
        ego_vehicle.set_velocity(carla.Vector3D())
        CLEANUP_FUNCTION()
        sys.exit(0)

    # Get the RGB image corresponding to the given depth image timestamp.
    rgb_image = retrieve_rgb_image(msg.timestamp)

    # Get the semantic image corresponding to the given depth image timestamp.
    semantic_image = retrieve_semantic_image(msg.timestamp)
    semantic_frame = SegmentedFrame.from_carla_image(semantic_image,
                                                     depth_frame.camera_setup)

    # Visualize the image and the bounding boxes if needed.
    if visualize:
        draw_image(rgb_image, surface)

    # Transform people into obstacles relative to the current frame.
    bb_surface = None
    resolution = (depth_camera_setup.width, depth_camera_setup.height)
    if visualize:
        bb_surface = pygame.Surface(resolution)
        bb_surface.set_colorkey((0, 0, 0))

    vehicle_transform = Transform.from_carla_transform(
        ego_vehicle.get_transform())

    depth_frame = DepthFrame.from_carla_frame(msg, depth_camera_setup)
    # Transform the static camera setup with respect to the location of the
    # vehicle in the world.
    depth_frame.camera_setup.set_transform(vehicle_transform *
                                           depth_frame.camera_setup.transform)

    detected_people = []
    for person in ego_vehicle.get_world().get_actors().filter('walker.*'):
        obstacle = Obstacle.from_carla_actor(person)
        if obstacle.distance(vehicle_transform) > 125:
            bbox = None
        else:
            bbox = obstacle.to_camera_view(depth_frame, semantic_frame.frame)
        if bbox is not None:
            detected_people.append(bbox)
            if visualize:
                draw_bounding_box(bbox, bb_surface)

    # We have drawn all the bounding boxes on the bb_surface, now put it on
    # the RGB image surface.
    if visualize:
        surface.blit(bb_surface, (0, 0))
        pygame.display.flip()

    # Compute the mAP.
    print("We detected a total of {} people.".format(len(detected_people)))
    compute_and_log_map(detected_people, msg.timestamp, csv)

    # Move the ego_vehicle according to the given speed.
    ego_vehicle.set_velocity(carla.Vector3D(x=-speed))

    ego_vehicle.get_world().tick()