def process_segmentation_images(msg, camera_setup, ego_vehicle, speed, csv, dump=False): print("Received a message for the time: {}".format(msg.timestamp)) # If we are in distance to the destination, stop and exit with success. if ego_vehicle.get_location().distance(VEHICLE_DESTINATION) <= 5: ego_vehicle.set_velocity(carla.Vector3D()) CLEANUP_FUNCTION() sys.exit(0) # Compute the segmentation mIOU. frame = SegmentedFrame.from_carla_image(msg, camera_setup) compute_and_log_miou(frame, msg.timestamp, csv) # Visualize the run. if dump: frame.save(int(msg.timestamp * 1000), './_out/', 'seg') # Move the ego_vehicle according to the given speed. ego_vehicle.set_velocity(carla.Vector3D(x=-speed)) # Move the simulator forward. ego_vehicle.get_world().tick()
def process_images(self, carla_image): """ Invoked when an image is received from the simulator. Args: carla_image: a carla.Image. """ game_time = int(carla_image.timestamp * 1000) timestamp = erdos.Timestamp(coordinates=[game_time]) watermark_msg = erdos.WatermarkMessage(timestamp) with erdos.profile(self.config.name + '.process_images', self, event_data={'timestamp': str(timestamp)}): # Ensure that the code executes serially with self._lock: msg = None if self._camera_setup.camera_type == 'sensor.camera.rgb': msg = FrameMessage( timestamp, CameraFrame.from_carla_frame(carla_image, self._camera_setup)) elif self._camera_setup.camera_type == 'sensor.camera.depth': # Include the transform relative to the vehicle. # Carla carla_image.transform returns the world transform, # but we do not use it directly. msg = DepthFrameMessage( timestamp, DepthFrame.from_carla_frame( carla_image, self._camera_setup, save_original_frame=self._flags. visualize_depth_camera)) elif (self._camera_setup.camera_type == 'sensor.camera.semantic_segmentation'): msg = SegmentedFrameMessage( timestamp, SegmentedFrame.from_carla_image( carla_image, self._camera_setup)) if self._release_data: self._camera_stream.send(msg) self._camera_stream.send(watermark_msg) else: # Pickle the data, and release it upon release msg receipt. pickled_msg = pickle.dumps( msg, protocol=pickle.HIGHEST_PROTOCOL) with self._pickle_lock: self._pickled_messages[msg.timestamp] = pickled_msg self._notify_reading_stream.send(watermark_msg)
def process_images(self, carla_image): """ Invoked when an image is received from the simulator. Args: carla_image: a carla.Image. """ # Ensure that the code executes serially with self._lock: game_time = int(carla_image.timestamp * 1000) timestamp = erdos.Timestamp(coordinates=[game_time]) watermark_msg = erdos.WatermarkMessage(timestamp) msg = None if self._camera_setup.camera_type == 'sensor.camera.rgb': msg = FrameMessage( timestamp, CameraFrame.from_carla_frame(carla_image, self._camera_setup)) elif self._camera_setup.camera_type == 'sensor.camera.depth': # Include the transform relative to the vehicle. # Carla carla_image.transform returns the world transform, but # we do not use it directly. msg = DepthFrameMessage( timestamp, DepthFrame.from_carla_frame(carla_image, self._camera_setup)) elif self._camera_setup.camera_type == \ 'sensor.camera.semantic_segmentation': msg = SegmentedFrameMessage( timestamp, SegmentedFrame.from_carla_image(carla_image, self._camera_setup)) # Send the message containing the frame. self._camera_stream.send(msg) # Note: The operator is set not to automatically propagate # watermark messages received on input streams. Thus, we can # issue watermarks only after the Carla callback is invoked. self._camera_stream.send(watermark_msg)
def process_depth_images(msg, depth_camera_setup, ego_vehicle, speed, csv, surface, visualize=False): print("Received a message for the time: {}".format(msg.timestamp)) # If we are in distance to the destination, stop and exit with success. if ego_vehicle.get_location().distance(VEHICLE_DESTINATION) <= 5: ego_vehicle.set_velocity(carla.Vector3D()) CLEANUP_FUNCTION() sys.exit(0) # Get the RGB image corresponding to the given depth image timestamp. rgb_image = retrieve_rgb_image(msg.timestamp) # Get the semantic image corresponding to the given depth image timestamp. semantic_image = retrieve_semantic_image(msg.timestamp) semantic_frame = SegmentedFrame.from_carla_image(semantic_image, depth_frame.camera_setup) # Visualize the image and the bounding boxes if needed. if visualize: draw_image(rgb_image, surface) # Transform people into obstacles relative to the current frame. bb_surface = None resolution = (depth_camera_setup.width, depth_camera_setup.height) if visualize: bb_surface = pygame.Surface(resolution) bb_surface.set_colorkey((0, 0, 0)) vehicle_transform = Transform.from_carla_transform( ego_vehicle.get_transform()) depth_frame = DepthFrame.from_carla_frame(msg, depth_camera_setup) # Transform the static camera setup with respect to the location of the # vehicle in the world. depth_frame.camera_setup.set_transform(vehicle_transform * depth_frame.camera_setup.transform) detected_people = [] for person in ego_vehicle.get_world().get_actors().filter('walker.*'): obstacle = Obstacle.from_carla_actor(person) if obstacle.distance(vehicle_transform) > 125: bbox = None else: bbox = obstacle.to_camera_view(depth_frame, semantic_frame.frame) if bbox is not None: detected_people.append(bbox) if visualize: draw_bounding_box(bbox, bb_surface) # We have drawn all the bounding boxes on the bb_surface, now put it on # the RGB image surface. if visualize: surface.blit(bb_surface, (0, 0)) pygame.display.flip() # Compute the mAP. print("We detected a total of {} people.".format(len(detected_people))) compute_and_log_map(detected_people, msg.timestamp, csv) # Move the ego_vehicle according to the given speed. ego_vehicle.set_velocity(carla.Vector3D(x=-speed)) ego_vehicle.get_world().tick()