def test_bounding_box(self):
        resolution = (400, 400)
        tracking_agent_position = [0, 0, 4]
        tracking_agent_orientation = [0, 0, 0]
        fleeing_agent_position = [2, 0, 1]
        bounding_boxes = calculate_bounding_box(state=[
            *tracking_agent_position, *fleeing_agent_position,
            *tracking_agent_orientation
        ],
                                                orientation=(0, 0, 1),
                                                resolution=resolution)
        position = bounding_boxes[3]
        width = bounding_boxes[4]
        height = bounding_boxes[5]
        frame = np.zeros(resolution)
        frame[position[1] - height // 2:position[1] + height // 2,
              position[0] - width // 2:position[0] + width // 2] = 1
        plt.imshow(frame)
        plt.show()

        self.assertEqual(bounding_boxes,
                         ((500, 500), 66, 66, (500, 500), 66, 66))

        fleeing_agent_position = [3, 2, 1]
        bounding_boxes = calculate_bounding_box(state=[
            *tracking_agent_position, *fleeing_agent_position,
            *tracking_agent_orientation
        ],
                                                resolution=resolution)
        self.assertEqual(bounding_boxes,
                         ((500, 500), 66, 66, (166, 500), 63, 66))

        fleeing_agent_position = [3, 0, 1]
        bounding_boxes = calculate_bounding_box(state=[
            *tracking_agent_position, *fleeing_agent_position,
            *tracking_agent_orientation
        ],
                                                resolution=resolution)
        self.assertEqual(bounding_boxes,
                         ((500, 500), 66, 66, (833, 500), 63, 66))

        fleeing_agent_position = [4, 1, 1]
        tracking_agent_orientation = [0.3, 0, 0]
        bounding_boxes = calculate_bounding_box(state=[
            *tracking_agent_position, *fleeing_agent_position,
            *tracking_agent_orientation
        ],
                                                resolution=resolution)
        self.assertEqual(bounding_boxes,
                         ((500, 500), 66, 66, (190, 500), 50, 52))
    def get_action(self,
                   inputs,
                   train: bool = False,
                   agent_id: int = -1) -> Action:
        positions = np.squeeze(self.process_inputs(inputs))
        try:
            bb = calculate_bounding_box(inputs)
            inputs_bb = (bb[3][0], bb[3][1], bb[4], bb[5])
            inputs_ = (bb[3][0] - 200) / 40
            inputs = torch.Tensor([inputs_])
            self.previous_input = inputs_
        except TypeError:
            inputs_bb = (200, 200, 40, 40)
            inputs = torch.Tensor([self.previous_input])
        if agent_id == 0:  # tracking agent ==> tracking_linear_y
            output = self.sample(inputs,
                                 train=train).clamp(min=self.action_min,
                                                    max=self.action_max)
            actions = np.stack(
                [0, output.data.cpu().numpy().squeeze(), 0, 0, 0, 0, 0, 0])
        elif agent_id == 1:  # fleeing agent ==> fleeing_linear_y
            output = self.sample(inputs, train=train,
                                 adversarial=True).clamp(min=self.action_min,
                                                         max=self.action_max)
            actions = np.stack(
                [0, 0, 0, 0,
                 output.data.cpu().numpy().squeeze(), 0, 0, 0])
        else:
            output = self.sample(inputs, train=train,
                                 adversarial=False).clamp(min=self.action_min,
                                                          max=self.action_max)
            adversarial_output = self.sample(inputs,
                                             train=train,
                                             adversarial=True).clamp(
                                                 min=self.action_min,
                                                 max=self.action_max)

            # rand_run = get_rand_run_ros(self.waypoint, np.asarray(positions[3:6]).squeeze(), self._playfield_size)
            # run_action = np.squeeze(rand_run[1])
            # self.waypoint = np.squeeze(rand_run[0])
            # hunt_action = np.squeeze(get_slow_hunt_ros(np.asarray(positions), self._playfield_size))
            # run_action = np.squeeze(get_slow_run_ros(inputs_bb, self._playfield_size))

            actions = np.stack([
                0,
                output.data.cpu().numpy().squeeze().item(), 0, 0,
                adversarial_output.data.cpu().numpy().squeeze().item(), 0, 0, 0
            ],
                               axis=-1)

            # actions = np.stack([0, output.data.cpu().numpy().squeeze().item(), 0, *run_action, 0, 0], axis=-1)

        # actions = self.adjust_height(positions, actions)  Not necessary, controller keeps altitude fixed
        actions = clip_action_according_to_playfield_size_flipped(
            positions.detach().numpy().squeeze(), actions,
            self._playfield_size)
        return Action(
            actor_name=
            "tracking_fleeing_agent",  # assume output [1, 8] so no batch!
            value=actions)
    def test_bounding_box(self):
        resolution = (1000, 1000)
        tracking_agent_position = [1, 0, 1]
        tracking_agent_orientation = [0, 0, 0]
        fleeing_agent_position = [1, 3, 1]
        bounding_boxes = calculate_bounding_box(state=[
            *tracking_agent_position, *fleeing_agent_position,
            *tracking_agent_orientation
        ],
                                                resolution=resolution)
        self.assertEqual(bounding_boxes,
                         ((500, 500), 66, 66, (500, 500), 66, 66))

        fleeing_agent_position = [0, 3, 1]
        bounding_boxes = calculate_bounding_box(state=[
            *tracking_agent_position, *fleeing_agent_position,
            *tracking_agent_orientation
        ],
                                                resolution=resolution)
        self.assertEqual(bounding_boxes,
                         ((500, 500), 66, 66, (166, 500), 63, 66))

        fleeing_agent_position = [2, 3, 1]
        bounding_boxes = calculate_bounding_box(state=[
            *tracking_agent_position, *fleeing_agent_position,
            *tracking_agent_orientation
        ],
                                                resolution=resolution)
        self.assertEqual(bounding_boxes,
                         ((500, 500), 66, 66, (833, 500), 63, 66))

        fleeing_agent_position = [1, 4, 1]
        tracking_agent_orientation = [0.3, 0, 0]
        bounding_boxes = calculate_bounding_box(state=[
            *tracking_agent_position, *fleeing_agent_position,
            *tracking_agent_orientation
        ],
                                                resolution=resolution)
        self.assertEqual(bounding_boxes,
                         ((500, 500), 66, 66, (190, 500), 50, 52))
Example #4
0
    def get_action(self,
                   inputs,
                   train: bool = False,
                   agent_id: int = -1) -> Action:
        positions = np.squeeze(self.process_inputs(inputs))
        try:
            bb = calculate_bounding_box(inputs, orientation=(0, 0, 1))
            inputs_bb = (bb[3][0], bb[3][1], bb[4], bb[5])
            inputs = ((bb[3][0] - 200) / 40, (bb[3][1] - 200) / 40)
            inputs = np.squeeze(self.process_inputs(inputs))
            self.previous_input = inputs
        except TypeError:
            inputs = self.previous_input
            inputs_bb = (200, 200, 40, 40)
        if agent_id == 0:  # tracking agent ==> tracking_linear_y
            output = self.sample(inputs,
                                 train=train).clamp(min=self.action_min,
                                                    max=self.action_max)
            actions = np.stack(
                [output.data.cpu().numpy().squeeze(), 0, 0, 0, 0, 0, 0, 0])
        elif agent_id == 1:  # fleeing agent ==> fleeing_linear_y
            output = self.sample(inputs, train=train,
                                 adversarial=True).clamp(min=self.action_min,
                                                         max=self.action_max)
            actions = np.stack(
                [0, 0, 0,
                 output.data.cpu().numpy().squeeze(), 0, 0, 0, 0])
        else:
            output = self.action_max * self.sample(
                inputs, train=train, adversarial=False)
            adversarial_output = self.action_max * self.sample(
                inputs, train=train, adversarial=True)

            run_action = np.squeeze(
                get_slow_run_ros(inputs_bb, self._playfield_size))

            #actions = np.stack([*output.data.cpu().numpy().squeeze(), 0,
            #                    *adversarial_output.data.cpu().numpy().squeeze(), 0,
            #                    0, 0], axis=-1)
            print(inputs_bb)
            actions = np.stack(
                [*output.data.cpu().numpy().squeeze(), 0, *run_action, 0, 0],
                axis=-1)
            actions = np.stack([0, 0, 0, 1, 0, 0, 0, 0], axis=-1)
        # actions = self.adjust_height(positions, actions)  Not necessary, controller keeps altitude fixed
        actions = clip_action_according_to_playfield_size_flipped(
            positions.detach().numpy().squeeze(), actions,
            self._playfield_size)
        return Action(
            actor_name=
            "tracking_fleeing_agent",  # assume output [1, 8] so no batch!
            value=actions)
    def test_intersection_over_union(self):
        tracking_agent_position = [1, 0, 1]
        tracking_agent_orientation = [0, 0, 0]
        fleeing_agent_position = [1, 3, 1]
        info = {
            'combined_global_poses':
            array_to_combined_global_pose([
                *tracking_agent_position, *fleeing_agent_position,
                *tracking_agent_orientation
            ])
        }
        result = get_iou(info)
        self.assertEqual(result, 1)

        fleeing_agent_position = [0, 3, 1]
        info = {
            'combined_global_poses':
            array_to_combined_global_pose([
                *tracking_agent_position, *fleeing_agent_position,
                *tracking_agent_orientation
            ])
        }
        result = get_iou(info)
        self.assertEqual(result, 0)

        fleeing_agent_position = [1.1, 3, 1.1]
        info = {
            'combined_global_poses':
            array_to_combined_global_pose([
                *tracking_agent_position, *fleeing_agent_position,
                *tracking_agent_orientation
            ])
        }
        result = get_iou(info)
        self.assertEqual(round(result, 3), 0.143)

        fleeing_agent_position = [1.01, 3, 1.01]
        info = {
            'combined_global_poses':
            array_to_combined_global_pose([
                *tracking_agent_position, *fleeing_agent_position,
                *tracking_agent_orientation
            ])
        }
        result = get_iou(info)
        pos0, w0, h0, pos1, w1, h1 = calculate_bounding_box(state=[
            *tracking_agent_position, *fleeing_agent_position,
            *tracking_agent_orientation
        ])
        self.assertEqual(round(result, 3), 0.837)
Example #6
0
    def _publish_combined_global_poses(self, data: np.ndarray) -> None:
        resolution = (100, 100)
        pos0, w0, h0, pos1, w1, h1 = calculate_bounding_box(
            state=data, resolution=resolution)
        frame = np.zeros(resolution)
        frame[pos0[0]:pos0[0] + w0, pos0[1]:pos0[1] + h0] = 255
        frame[pos1[0]:pos1[0] + w1, pos1[1]:pos1[1] + h1] = 125

        image = Image()
        image.data = frame.astype(np.uint8).flatten().tolist()
        image.height = resolution[0]
        image.width = resolution[1]
        image.encoding = 'mono8'
        self._publisher.publish(image)
Example #7
0
 def critic(self, inputs, train: bool = False) -> torch.Tensor:
     if len(inputs[0]) == 9:
         for index in range(len(inputs)):
             try:
                 bb = calculate_bounding_box(np.asarray(inputs[index]),
                                             orientation=(0, 0, 1))
                 inputs[index] = torch.Tensor([(bb[3][0] - 200) / 40,
                                               (bb[3][1] - 200) / 40])
             except TypeError:
                 if index == 0:
                     inputs[index] = torch.Tensor([0, 0])
                 else:
                     inputs[index] = inputs[index - 1]
     self._critic.train()
     inputs = np.squeeze(self.process_inputs(inputs=inputs))
     return self._critic(inputs)
 def critic(self, inputs, train: bool = False) -> torch.Tensor:
     if len(inputs[0]) == 9:
         for index in range(len(inputs)):
             try:
                 bb = calculate_bounding_box(np.asarray(inputs[index]))
                 if index == 0:
                     inputs[index] = torch.Tensor([(bb[3][0] - 200) / 40])
                 else:
                     inputs[index] = torch.Tensor([(bb[3][0] - 200) / 40])
             except (TypeError, IndexError):
                 if index == 0:
                     inputs[index] = torch.Tensor([0])
                 else:
                     inputs[index] = inputs[index - 1]
     self._critic.train()
     inputs = self.process_inputs(inputs=inputs)
     return self._critic(inputs)
 def _publish_combined_global_poses(self, data: np.ndarray) -> None:
     resolution = (400, 400)
     pos0, w0, h0, pos1, w1, h1 = calculate_bounding_box(
         state=data, orientation=(0, 0, 1), resolution=resolution)
     frame = np.zeros(resolution)
     frame[pos0[1] - w0 // 2:pos0[1] + w0 // 2,
           pos0[0] - h0 // 2:pos0[0] + h0 // 2] = 255
     try:
         frame[pos1[1] - w1 // 2:pos1[1] + w1 // 2,
               pos1[0] - h1 // 2:pos1[0] + h1 // 2] = 125
     except TypeError:
         pass
     image = Image()
     image.data = frame.astype(np.uint8).flatten().tolist()
     image.height = resolution[0]
     image.width = resolution[1]
     image.encoding = 'mono8'
     self._publisher.publish(image)