def on_msg_camera_stream(self, msg, segmented_stream): """Invoked upon the receipt of a message on the camera stream. Args: msg: A :py:class:`~pylot.perception.messages.FrameMessage`. segmented_stream (:py:class:`erdos.WriteStream`): Stream on which the operator sends :py:class:`~pylot.perception.messages.SegmentedFrameMessage` messages. """ self._logger.debug('@{}: {} received message'.format( msg.timestamp, self.config.name)) start_time = time.time() assert msg.frame.encoding == 'BGR', 'Expects BGR frames' image = torch.from_numpy(msg.frame.frame.transpose( [2, 0, 1])).unsqueeze(0).float() image_var = Variable(image, requires_grad=False, volatile=True) final = self._model(image_var)[0] _, pred = torch.max(final, 1) pred = pred.cpu().data.numpy()[0] # After we apply the pallete, the image is in RGB format image_np = self._pallete[pred.squeeze()] # Get runtime in ms. runtime = (time.time() - start_time) * 1000 frame = SegmentedFrame(image_np, 'cityscapes', msg.frame.camera_setup) if self._flags.visualize_segmentation_output: frame.visualize(self.config.name, msg.timestamp, pygame_display=pylot.utils.PYGAME_DISPLAY) segmented_stream.send( SegmentedFrameMessage(msg.timestamp, frame, runtime))
def on_msg_camera_stream(self, msg, segmented_stream): """Camera stream callback method. Invoked upon the receipt of a message on the camera stream. """ self._logger.debug('@{}: {} received message'.format( msg.timestamp, self._name)) start_time = time.time() assert msg.frame.encoding == 'BGR', 'Expects BGR frames' image = torch.from_numpy(msg.frame.frame.transpose( [2, 0, 1])).unsqueeze(0).float() image_var = Variable(image, requires_grad=False, volatile=True) final = self._model(image_var)[0] _, pred = torch.max(final, 1) pred = pred.cpu().data.numpy()[0] # After we apply the pallete, the image is in RGB format image_np = self._pallete[pred.squeeze()] # Get runtime in ms. runtime = (time.time() - start_time) * 1000 self._csv_logger.info('{},{},"{}",{}'.format(time_epoch_ms(), self._name, msg.timestamp, runtime)) frame = SegmentedFrame(image_np, 'cityscapes', msg.frame.camera_setup) if self._flags.visualize_segmentation_output: frame.visualize(self._name, msg.timestamp) segmented_stream.send( SegmentedFrameMessage(frame, msg.timestamp, runtime))
def process_segmentation_images(msg, camera_setup, ego_vehicle, speed, csv, dump=False): print("Received a message for the time: {}".format(msg.timestamp)) # If we are in distance to the destination, stop and exit with success. if ego_vehicle.get_location().distance(VEHICLE_DESTINATION) <= 5: ego_vehicle.set_velocity(carla.Vector3D()) CLEANUP_FUNCTION() sys.exit(0) # Compute the segmentation mIOU. frame = SegmentedFrame.from_carla_image(msg, camera_setup) compute_and_log_miou(frame, msg.timestamp, csv) # Visualize the run. if dump: frame.save(int(msg.timestamp * 1000), './_out/', 'seg') # Move the ego_vehicle according to the given speed. ego_vehicle.set_velocity(carla.Vector3D(x=-speed)) # Move the simulator forward. ego_vehicle.get_world().tick()
def on_segmented_msg(carla_image): global SEGMENTED_FRAME transform = pylot.utils.Transform.from_carla_transform( carla_image.transform) camera_setup = SegmentedCameraSetup("segmented_camera", FLAGS.frame_width, FLAGS.camera_height, transform, FLAGS.camera_fov) SEGMENTED_FRAME = SegmentedFrame(carla_image, 'carla', camera_setup)
def on_segmented_msg(simulator_image): global SEGMENTED_FRAME transform = pylot.utils.Transform.from_simulator_transform( simulator_image.transform) camera_setup = SegmentedCameraSetup("segmented_camera", FLAGS.frame_width, FLAGS.frame_height, transform, FLAGS.camera_fov) SEGMENTED_FRAME = SegmentedFrame.from_simulator_image( simulator_image, camera_setup)
def process_images(self, simulator_image): """ Invoked when an image is received from the simulator. Args: simulator_image: a carla.Image. """ game_time = int(simulator_image.timestamp * 1000) timestamp = erdos.Timestamp(coordinates=[game_time]) watermark_msg = erdos.WatermarkMessage(timestamp) with erdos.profile(self.config.name + '.process_images', self, event_data={'timestamp': str(timestamp)}): # Ensure that the code executes serially with self._lock: msg = None if self._camera_setup.camera_type == 'sensor.camera.rgb': msg = FrameMessage( timestamp, CameraFrame.from_simulator_frame( simulator_image, self._camera_setup)) elif self._camera_setup.camera_type == 'sensor.camera.depth': # Include the transform relative to the vehicle. # simulator_image.transform returns the world transform, # but we do not use it directly. msg = DepthFrameMessage( timestamp, DepthFrame.from_simulator_frame( simulator_image, self._camera_setup, save_original_frame=self._flags. visualize_depth_camera)) elif (self._camera_setup.camera_type == 'sensor.camera.semantic_segmentation'): msg = SegmentedFrameMessage( timestamp, SegmentedFrame.from_simulator_image( simulator_image, self._camera_setup)) if self._release_data: self._camera_stream.send(msg) self._camera_stream.send(watermark_msg) else: # Pickle the data, and release it upon release msg receipt. pickled_msg = pickle.dumps( msg, protocol=pickle.HIGHEST_PROTOCOL) with self._pickle_lock: self._pickled_messages[msg.timestamp] = pickled_msg self._notify_reading_stream.send(watermark_msg)
def process_images(self, carla_image): """ Invoked when an image is received from the simulator. Args: carla_image: a carla.Image. """ # Ensure that the code executes serially with self._lock: game_time = int(carla_image.timestamp * 1000) timestamp = erdos.Timestamp(coordinates=[game_time]) watermark_msg = erdos.WatermarkMessage(timestamp) msg = None if self._camera_setup.camera_type == 'sensor.camera.rgb': msg = FrameMessage( timestamp, CameraFrame.from_carla_frame(carla_image, self._camera_setup)) elif self._camera_setup.camera_type == 'sensor.camera.depth': # Include the transform relative to the vehicle. # Carla carla_image.transform returns the world transform, but # we do not use it directly. msg = DepthFrameMessage( timestamp, DepthFrame.from_carla_frame(carla_image, self._camera_setup)) elif self._camera_setup.camera_type == \ 'sensor.camera.semantic_segmentation': msg = SegmentedFrameMessage( timestamp, SegmentedFrame.from_carla_image(carla_image, self._camera_setup)) # Send the message containing the frame. self._camera_stream.send(msg) # Note: The operator is set not to automatically propagate # watermark messages received on input streams. Thus, we can # issue watermarks only after the Carla callback is invoked. self._camera_stream.send(watermark_msg)
def process_depth_images(msg, depth_camera_setup, ego_vehicle, speed, csv, surface, visualize=False): print("Received a message for the time: {}".format(msg.timestamp)) # If we are in distance to the destination, stop and exit with success. if ego_vehicle.get_location().distance(VEHICLE_DESTINATION) <= 5: ego_vehicle.set_velocity(carla.Vector3D()) CLEANUP_FUNCTION() sys.exit(0) # Get the RGB image corresponding to the given depth image timestamp. rgb_image = retrieve_rgb_image(msg.timestamp) # Get the semantic image corresponding to the given depth image timestamp. semantic_image = retrieve_semantic_image(msg.timestamp) semantic_frame = SegmentedFrame.from_carla_image(semantic_image, depth_frame.camera_setup) # Visualize the image and the bounding boxes if needed. if visualize: draw_image(rgb_image, surface) # Transform people into obstacles relative to the current frame. bb_surface = None resolution = (depth_camera_setup.width, depth_camera_setup.height) if visualize: bb_surface = pygame.Surface(resolution) bb_surface.set_colorkey((0, 0, 0)) vehicle_transform = Transform.from_carla_transform( ego_vehicle.get_transform()) depth_frame = DepthFrame.from_carla_frame(msg, depth_camera_setup) # Transform the static camera setup with respect to the location of the # vehicle in the world. depth_frame.camera_setup.set_transform(vehicle_transform * depth_frame.camera_setup.transform) detected_people = [] for person in ego_vehicle.get_world().get_actors().filter('walker.*'): obstacle = Obstacle.from_carla_actor(person) if obstacle.distance(vehicle_transform) > 125: bbox = None else: bbox = obstacle.to_camera_view(depth_frame, semantic_frame.frame) if bbox is not None: detected_people.append(bbox) if visualize: draw_bounding_box(bbox, bb_surface) # We have drawn all the bounding boxes on the bb_surface, now put it on # the RGB image surface. if visualize: surface.blit(bb_surface, (0, 0)) pygame.display.flip() # Compute the mAP. print("We detected a total of {} people.".format(len(detected_people))) compute_and_log_map(detected_people, msg.timestamp, csv) # Move the ego_vehicle according to the given speed. ego_vehicle.set_velocity(carla.Vector3D(x=-speed)) ego_vehicle.get_world().tick()