def _camera_callback(self, msg: Image, args: tuple):
     # return
     field_name, _ = args
     if field_name == "observation":
         self._last_image = process_image(msg, {
             'height': 200,
             'width': 200,
             'depth': 3
         })
     if field_name == "mask":
         self._last_mask = process_image(msg, {
             'height': 200,
             'width': 200,
             'depth': 1
         })
 def _process_image(self, msg: Image, args: tuple) -> None:
     sensor_topic, sensor_stats = args
     image = process_image(msg, sensor_stats)
     image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
     if sum(image.shape) < 3000:
         image = cv2.resize(image, (600, 400), interpolation=cv2.INTER_AREA)
     self._draw(image)
예제 #3
0
 def _process_observation(self):
     if self._view_msg is None:
         return None
     msg = copy.deepcopy(self._view_msg)
     self._view_msg = None
     image = process_image(msg,
                           self._observation_sensor_stats) if isinstance(
                               msg, Image) else process_compressed_image(
                                   msg, self._observation_sensor_stats)
     return cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
 def _process_mask(self):
     if self._mask_msg is None:
         return None
     msg = copy.deepcopy(self._mask_msg)
     self._mask_msg = None
     image = process_image(msg, self._mask_sensor_stats) if isinstance(
         msg, Image) else process_compressed_image(msg,
                                                   self._mask_sensor_stats)
     image -= image.min()
     image /= image.max()
     image *= 255
     return cv2.applyColorMap(image.astype(np.uint8),
                              cv2.COLORMAP_AUTUMN) / 255.
예제 #5
0
 def _process_image(self, msg: Image, args: tuple) -> None:
     # cprint(f'image received {msg.data}', self._logger)
     sensor_topic, sensor_stats = args
     processed_image = process_image(msg, sensor_stats=sensor_stats)
     processed_image = torch.Tensor(processed_image).permute(2, 0, 1).unsqueeze(0)
     assert processed_image.size()[0] == 1 and processed_image.size()[1] == 3
     output = self._model.forward([processed_image])[0].detach().cpu().numpy()
     cprint(f'output predicted {output}', self._logger, msg_type=MessageType.debug)
     action = Action(
         actor_name='dnn_actor',
         value=output  # assuming control is first output
     )
     self._publisher.publish(adapt_action_to_twist(action))
 def _process_image(self, msg: Image, args: tuple) -> None:
     sensor_topic, sensor_stats = args
     if 'max_depth' in self._specs.keys():
         sensor_stats['max_depth'] = self._specs['max_depth']
     if 'field_of_view' in self._specs.keys():
         sensor_stats['field_of_view'] = self._specs['field_of_view']
     sensor_stats['num_smooth_bins'] = 1  # use full resolution
     processed_depth_image = process_image(msg, sensor_stats)
     # TODO processed_depth_image has to be reduced to 1d array as yaw is calculated on first matrix array
     if len(processed_depth_image) != 0:
         self._set_yaw_from_depth_map(
             depth_map=processed_depth_image,
             field_of_view=sensor_stats['field_of_view'],
             front_width=self._specs['front_width'])
    def test_modified_state_frame_visualizer(self):
        self.output_dir = f'{get_data_dir(os.environ["CODEDIR"])}/test_dir/{get_filename_without_extension(__file__)}'
        os.makedirs(self.output_dir, exist_ok=True)
        config = {
            'output_path': self.output_dir,
            'modified_state_publisher_mode': 'CombinedGlobalPoses',
            'modified_state_frame_visualizer': True,
        }

        # spinoff roslaunch
        self._ros_process = RosWrapper(launch_file='load_ros.launch',
                                       config=config,
                                       visible=True)

        # subscribe to modified_state_topic
        self.frame_topic = '/modified_state_frame'
        subscribe_topics = [
            TopicConfig(topic_name=self.frame_topic, msg_type='Image'),
        ]
        # create publishers for modified state
        self.modified_state_topic = '/modified_state'
        publish_topics = [
            TopicConfig(topic_name=self.modified_state_topic,
                        msg_type='CombinedGlobalPoses'),
        ]
        self.ros_topic = TestPublisherSubscriber(
            subscribe_topics=subscribe_topics, publish_topics=publish_topics)
        # test center view
        message = get_fake_combined_global_poses(0, 0, 1, 3, 1, 1, 0, 0, 0)
        rospy.sleep(2)
        self.ros_topic.publishers[self.modified_state_topic].publish(message)

        safe_wait_till_true(
            '"/modified_state_frame" in kwargs["ros_topic"].topic_values.keys()',
            True,
            3,
            0.1,
            ros_topic=self.ros_topic)
        frame = process_image(
            self.ros_topic.topic_values['/modified_state_frame'])
        plt.imshow(frame)
        plt.show()
        a = 100
예제 #8
0
 def _update(self):
     if self._last_image is not None:
         image = deepcopy(self._last_image)
         self._last_image = None
         image = process_image(image, {'height': 200, 'width': 200, 'depth': 3})
         if self._enhance_brightness:
             original_mean = image.mean()*255
             enhance_factor = 140 / original_mean
             image = PILImage.fromarray(np.uint8(image * 255))
             enhancer = PILImageEnhance.Brightness(image)
             im_output = np.asarray(enhancer.enhance(enhance_factor))
             image_tensor = torch.from_numpy(im_output/255).permute(2, 0, 1).float().unsqueeze(0).to(self.device)
         else:
             image_tensor = torch.from_numpy(image).permute(2, 0, 1).float().unsqueeze(0).to(self.device)
         prediction = self.model(image_tensor).detach().cpu().numpy().squeeze()
         if self._task != 'pretrain':
             self._publish_prediction(prediction)       
             mask = self.model.encoder(image_tensor).detach().cpu().numpy().squeeze()
         else:
             mask = prediction
         self._publish_mask(mask)
예제 #9
0
 def _check_image(self, msg: Image) -> None:
     sensor_stats = {'depth': 1, 'min_depth': 0.1, 'max_depth': 5}
     processed_image = process_image(msg, sensor_stats)
     self._check_depth(processed_image)
    def tweak_combined_axis_keyboard(self, measured_data, index,
                                     point_ref_drone):
        initial_point_ref_drone = copy.deepcopy(point_ref_drone)
        # gets fsm in taken over state
        safe_wait_till_true(
            'kwargs["ros_topic"].topic_values["/fsm/state"].data',
            FsmState.TakenOver.name,
            20,
            0.1,
            ros_topic=self.ros_topic)

        # once drone is in good starting position invoke 'go' with key m
        while self.ros_topic.topic_values[
                "/fsm/state"].data != FsmState.Running.name:
            # while taking off, update reference point for PID controller to remain at same height
            self.ros_topic.publishers[self._reference_topic].publish(
                PointStamped(header=Header(frame_id="agent"),
                             point=Point(x=point_ref_drone[0],
                                         y=point_ref_drone[1],
                                         z=point_ref_drone[2])))
            last_pose = self.get_pose()
            rospy.sleep(0.5)

        measured_data[index] = {'x': [], 'y': [], 'z': [], 'yaw': []}
        point_ref_global = transform(points=[np.asarray(point_ref_drone)],
                                     orientation=R.from_euler(
                                         'XYZ', (0, 0, last_pose[-1]),
                                         degrees=False).as_matrix(),
                                     translation=np.asarray(last_pose[:3]))[0]

        # Mathias controller should keep drone in steady pose
        while self.ros_topic.topic_values[
                "/fsm/state"].data != FsmState.TakenOver.name:
            point_drone_global = self.get_pose()
            point_ref_drone = Point(
                x=point_ref_global[0] - point_drone_global[0],
                y=point_ref_global[1] - point_drone_global[1],
                z=point_ref_global[2] - point_drone_global[2])
            # rotate pose error to rotated frame
            pose_error_local = transform(points=[point_ref_drone],
                                         orientation=R.from_euler(
                                             'XYZ', (0, 0, -last_pose[-1]),
                                             degrees=False).as_matrix())[0]
            measured_data[index]['x'].append(pose_error_local.x)
            measured_data[index]['y'].append(pose_error_local.y)
            measured_data[index]['z'].append(pose_error_local.z)
            # measured_data[index]['yaw'].append(last_pose[-1])
            rospy.sleep(0.5)

        if False and '/bebop/land' in self.ros_topic.publishers.keys():
            self.ros_topic.publishers['/bebop/land'].publish(Empty())

        colors = ['C0', 'C1', 'C2', 'C3', 'C4']
        styles = {'x': '-', 'y': '--', 'z': ':', 'yaw': '-.'}
        fig = plt.figure(figsize=(15, 15))
        for key in measured_data.keys():
            for a in measured_data[key].keys():
                if len(measured_data[key][a]) != 0:
                    plt.plot(measured_data[key][a],
                             linestyle=styles[a],
                             linewidth=3 if key == index else 1,
                             color=colors[key % len(colors)],
                             label=f'{key}: {a}')
        plt.legend()
        #plt.savefig(os.path.join(self.output_dir, 'measured_data.jpg'))
        plt.show()

        # print visualisation if it's in ros topic:
        if self.visualisation_topic in self.ros_topic.topic_values.keys():
            frame = process_image(
                self.ros_topic.topic_values[self.visualisation_topic])
            plt.figure(figsize=(15, 20))
            plt.imshow(frame)
            plt.axis('off')
            plt.tight_layout()
            plt.savefig(
                f'{os.environ["HOME"]}/kf_study/pid_tweak_{initial_point_ref_drone[0]}_{initial_point_ref_drone[1]}_{initial_point_ref_drone[2]}.png'
            )
        plt.clf()
        plt.close()

        index += 1
        index %= len(colors)
        return index