Example #1
0
    def __init__(self,
                 environment_id=None,
                 correct_order=None,
                 fragsize=-1,
                 segment=True,
                 overlap=0):
        """
        Initializes a fragment action object.

        Args:
            environment_id (str, optional): Environment ID of the strategy this object is a part of
            correct_order (bool, optional): Whether or not the fragments/segments should be returned in the correct order
            fragsize (int, optional): The index this packet should be cut. Defaults to -1, which cuts it in half.
            segment (bool, optional): Whether we should perform fragmentation or segmentation
            overlap (int, optional): How many bytes the fragments/segments should overlap
        """
        Action.__init__(self, "fragment", "out")
        self.enabled = True
        self.branching = True
        self.terminal = False
        self.fragsize = fragsize
        self.segment = segment
        self.overlap = overlap
        if correct_order == None:
            self.correct_order = self.get_rand_order()
        else:
            self.correct_order = correct_order
Example #2
0
    def __init__(self,
                 environment_id=None,
                 field=None,
                 tamper_type=None,
                 tamper_value=None,
                 tamper_proto="TCP"):
        """
        Creates a tamper object.

        Args:
            environment_id (str, optional): environment_id of a previously run strategy, used to find packet captures
            field (str, optional): field that the object will tamper. If not set, all the parameters are chosen randomly
            tamper_type (str, optional): primitive this tamper will use ("corrupt")
            tamper_value (str, optional): value to tamper to
            tamper_proto (str, optional): protocol we are tampering
        """
        Action.__init__(self, "tamper", "both")
        self.field = field
        self.tamper_value = tamper_value
        self.tamper_proto = actions.utils.string_to_protocol(tamper_proto)
        self.tamper_proto_str = tamper_proto

        self.tamper_type = tamper_type
        if not self.tamper_type:
            self._mutate_tamper_type()

        if not self.field:
            self._mutate(environment_id)
 def append_episode(reward: float, bin_enum: Bin) -> None:
     e = Episode()
     a = Action(action_type='grasp')
     a.reward = reward
     a.bin = bin_enum
     e.actions.append(a)
     eh.append(e)
    def calculate_pose(self, action: Action,
                       images: List[OrthographicImage]) -> None:
        if action.type == 'grasp':
            self.grasp_convert(action, images)
            is_safe = self.grasp_check_safety(action, images)

        elif action.type == 'shift':
            self.shift_convert(action, images)
            is_safe = self.shift_check_safety(action, images)

        elif action.type == 'place':
            self.place_convert(action, images)
            is_safe = self.place_check_safety(action, images)

        else:
            raise Exception('Unknown action type f{action.type}.')

        if not self.action_should_be_safe:
            is_safe = True

        if not is_safe:
            action.safe = -1
        elif not np.isfinite(action.pose.translation()).all():
            action.safe = 0
        else:
            action.safe = 1
Example #5
0
def make_action_speech_response(game_json: GameResponse,
                                action: Action) -> str:
    """
    :param game_response: the JSON response from the game.
    :param action: the action that was parsed.
    :return: the speech response to say to the user indicating whether or not the action was performed.
    """
    # Choose from negative or positive responses depending on whether the action could be performed.
    if action_was_successful(game_json):
        log_conversation('speech reply', 'positive')
        responses = action.positive_responses(game_json)
    else:
        log_conversation('speech reply', 'negative')
        responses = action.negative_responses(game_json)

    if responses == []:
        log_conversation('speech reply',
                         'empty response list, using transcription.json')
        speech_reply = random_from_json(
            './failure_responses/transcription.json')
    else:
        random_index = randrange(0, len(responses))
        speech_reply = responses[random_index]

    log_conversation('success reply', speech_reply)
    return speech_reply
Example #6
0
    def __init__(self, environment_id=None):
        """
        Initializes this drop action.

        Args:
            environment_id (str, optional): Environment ID of the strategy we are a part of
        """
        Action.__init__(self, "drop", "both")
        self.terminal = True
        self.branching = False
    def test_action(self):
        p = RobotPose()
        self.assertEqual(p.d, 0.0)

        a = Action()
        a.index = 1
        self.assertEqual(a.index, 1)

        a_data = a.__dict__
        self.assertEqual(a_data['index'], 1)
Example #8
0
 def setUpClass(self):
     super(FlankingDecisionRulesTestCase, self).setUpClass()
     self.bot1 = BotMock("bot1", 10)
     self.bot2 = BotMock("bot2", 10)
     self.action1 = {
         self.bot1: Action(Action.ActionType.PREPARE_FLANKING_LEFT)}
     self.action2 = {
         self.bot2: Action(Action.ActionType.PREPARE_FLANKING_RIGHT)}
     self.event1 = {self.bot1: Event(Event.EventType.STATUS_HAS_CHANGED)}
     self.event2 = {self.bot2: Event(Event.EventType.STATUS_HAS_CHANGED)}
Example #9
0
 def __init__(self, start_ttl=1, end_ttl=64, environment_id=None):
     Action.__init__(self, "trace", "out")
     self.terminal = True
     self.branching = False
     self.start_ttl = start_ttl
     self.end_ttl = end_ttl
     # Since running this action might take enough time that additional packets
     # get generated, only allow this action to run once
     self.ran = False
     # Define a socket
     self.socket = conf.L3socket(iface=actions.utils.get_interface())
Example #10
0
 def __init__(self,
              environment_id=None,
              field=None,
              tamper_type=None,
              tamper_value=None,
              tamper_proto="TCP"):
     Action.__init__(self, "tamper", "both")
     self.field = field
     self.tamper_value = tamper_value
     self.tamper_proto = actions.utils.string_to_protocol(tamper_proto)
     self.tamper_proto_str = tamper_proto
     self.tamper_type = tamper_type
Example #11
0
    def __init__(self, time=1, environment_id=None):
        """
        Initializes the sleep action.

        Args:
            time (float): How much time the packet should delay before sending
            environment_id (str, optional): Environment ID of the strategy this action is a part of
        """
        Action.__init__(self, "sleep", "out")
        self.terminal = False
        self.branching = False
        self.time = time
Example #12
0
    def place(self,
              current_episode: Episode,
              action_id: int,
              action: Action,
              action_frame: Affine,
              image_frame: Affine,
              place_bin=None):
        place_bin = place_bin if place_bin else self.current_bin

        self.move_to_safety(self.md)

        md_approach_down = MotionData().with_dynamics(
            0.22).with_z_force_condition(7.0)
        md_approach_up = MotionData().with_dynamics(
            1.0).with_z_force_condition(20.0)

        action_approch_affine = Affine(z=Config.approach_distance_from_pose)
        action_approach_frame = action_frame * action_approch_affine

        if Config.release_in_other_bin:
            self.move_to_release(self.md, target=action_approach_frame)
        else:
            self.robot.move_cartesian(Frames.gripper, action_approach_frame,
                                      self.md)

        self.robot.move_relative_cartesian(Frames.gripper,
                                           action_approch_affine.inverse(),
                                           md_approach_down)

        if md_approach_down.did_break:
            self.robot.recover_from_errors()
            action.collision = True
            self.robot.move_relative_cartesian(Frames.gripper, Affine(z=0.001),
                                               md_approach_up)

        action.final_pose = RobotPose(
            affine=(image_frame.inverse() *
                    self.robot.current_pose(Frames.gripper)))

        action.pose.d = self.gripper.width()
        self.gripper.release(action.pose.d + 0.01)  # [m]

        if Config.mode is not Mode.Perform:
            self.move_to_safety(md_approach_up)

        if Config.mode is Mode.Measure and Config.take_after_images:
            self.robot.move_cartesian(Frames.camera, image_frame, self.md)
            self.last_after_images = self.take_images(current_bin=place_bin)
            self.saver.save_image(self.last_after_images,
                                  current_episode.id,
                                  action_id,
                                  'after',
                                  action=action)
Example #13
0
    def __init__(self,
                 environment_id=None,
                 field=None,
                 tamper_type=None,
                 tamper_value=None,
                 tamper_proto="TCP"):
        Action.__init__(self, "tamper", "both")
        self.field = field
        self.tamper_value = tamper_value
        self.tamper_proto = actions.utils.string_to_protocol(tamper_proto)
        self.tamper_proto_str = tamper_proto

        self.tamper_type = tamper_type
        if not self.tamper_type:
            self.tamper_type = random.choice(["corrupt", "replace"])
Example #14
0
 def init_store(self):
     reducers = self.reducers
     action = Action(None)
     new_state = {}
     for key in reducers:
         new_state[key] = reducers[key](action)
     return new_state
Example #15
0
    def infer(self, images: List[OrthographicImage],
              method: SelectionMethod) -> Action:
        if self.monte_carlo:  # Adapt monte carlo progress parameter s
            epoch_in_database = Loader.get_episode_count(Config.grasp_database)
            s_not_bounded = (epoch_in_database - 3500) * 1 / (4500 - 3500)
            self.inference.current_s = max(min(s_not_bounded, 1.0), 0.0)

        current_model_st_mtime = Loader.get_model_path(
            self.model).stat().st_mtime
        if self.watch_for_model_modification and current_model_st_mtime > self.model_last_modified + 0.5:  # [s]
            logger.warning(f'Reload model {self.model}.')
            try:
                self.inference.model = Loader.get_model(
                    self.model, output_layer=self.output_layer)
                self.model_last_modified = Loader.get_model_path(
                    self.model).stat().st_mtime
            except OSError:
                logger.info('Could not load model, probabily file locked.')

        if len(images) == 3:
            images[2].mat = images[2].mat[:, :, ::-1]  # BGR to RGB

        action = self.inference.infer(images, method)
        self.indexer.to_action(action)

        estimated_reward_lower_than_threshold = action.estimated_reward < Config.bin_empty_at_max_probability
        bin_empty = estimated_reward_lower_than_threshold and Epoch.selection_method_should_be_high(
            method)

        if bin_empty:
            return Action('bin_empty', safe=1)

        self.converter.calculate_pose(action, images)
        return action
Example #16
0
 def conclude(self, actions, events):
     # TODO
     # I'm too long, Fix me!
     # chech if flanking right and left were commadned and executed
     flanking_done = 0
     flanking_expected = 0
     actions_done = []
     events_serviced = []
     new_orders = []
     flanked_units = []
     for action in actions:
         key = action.keys()[0]
         if (action[key].action_type
                 == Action.ActionType.PREPARE_FLANKING_LEFT
                 or action[key].action_type
                 == Action.ActionType.PREPARE_FLANKING_RIGHT):
             flanking_expected += 1
             flanked_units.append(key)
             actions_done.append(action)
         related_event = get_event_from_key(key, events)
         if related_event != None and related_event[
                 key].event_type == Event.EventType.STATUS_HAS_CHANGED:
             flanking_done += 1
             events_serviced.append(related_event)
     if flanking_expected == flanking_done:
         # clear events and action
         for action in actions_done:
             actions.remove(action)
         for event in events_serviced:
             events.remove(event)
         for flanking_unit in flanked_units:
             new_orders.append(
                 {flanking_unit: Action(Action.ActionType.ATTACK_BASE)})
     return new_orders
Example #17
0
 def __str__(self):
     """
     Returns a string representation.
     """
     s = Action.__str__(self)
     s += "{%d:%d}" % (self.start_ttl, self.end_ttl)
     return s
Example #18
0
    def get_action(cls,
                   collection: str,
                   episode_id: str,
                   action_id: int,
                   suffix: Union[str, List[str]] = None) -> Any:
        episode = Loader.get_episode(collection, episode_id)

        if not episode:
            raise Exception(f'Episode {episode_id} not found')

        if not episode['actions'] or not (0 <= action_id < len(
                episode['actions'])):
            raise Exception(f'Episode {episode_id} has not enough actions.')

        action = Action(data=episode['actions'][action_id])
        if not suffix:
            return action

        if isinstance(suffix, str):
            suffix = [suffix]

        return (action, *[
            cls.get_image(
                collection, episode_id, action_id, s, images=action.images)
            for s in suffix
        ])
Example #19
0
    def infer(self, images: List[OrthographicImage], method: SelectionMethod,
              **params) -> List[Action]:
        if self.monte_carlo:  # Adapt monte carlo progress parameter s
            epoch_in_collection = Loader.get_episode_count(Config.collection)
            s_not_bounded = (epoch_in_collection - 3500) * 1 / (4500 - 3500)
            self.inference.current_s = max(min(s_not_bounded, 1.0), 0.0)

        self.check_for_model_reload()

        if len(images) == 3:
            images[2].mat = images[2].mat[:, :, ::-1]  # BGR to RGB

        action = self.inference.infer(images, method)
        self.indexer.to_action(action)

        print(action, method)

        estimated_reward_lower_than_threshold = action.estimated_reward < Config.bin_empty_at_max_probability
        bin_empty = estimated_reward_lower_than_threshold and Epoch.selection_method_should_be_high(
            method)

        if bin_empty:
            return [Action('bin_empty', safe=1)]

        self.converter.calculate_pose(action, images)
        return [action]
Example #20
0
 def __str__(self):
     """
     Returns a string representation.
     """
     s = Action.__str__(self)
     s += "{%g}" % self.time
     return s
Example #21
0
 def test_constructor(self):
     """ Should be correctly initialize with paramters."""
     value = "value"
     action = Action(Action.ActionType.PREPARE_FLANKING_LEFT, param="value")
     self.assertIsNotNone(action)
     self.assertEqual(action.action_type,
                      Action.ActionType.PREPARE_FLANKING_LEFT)
     self.assertEqual(action.kwargs['param'], value)
 def test_get_flaking_coordinates(self):
     reference_position = Vector2(0, 0)
     flanked_position = Vector2(0, 10)
     firing_distance = 5
     action_left = Action(Action.ActionType.PREPARE_FLANKING_LEFT,
                          flanking_distance=1.0)
     action_rigth = Action(Action.ActionType.PREPARE_FLANKING_RIGHT,
                           flanking_distance=1.0)
     flanking_left = self.coordinates_calculator.get_flanking_coordinates(
         reference_position, flanked_position, firing_distance, action_left)
     flanking_rigth = self.coordinates_calculator.get_flanking_coordinates(
         reference_position, flanked_position, firing_distance,
         action_rigth)
     self.assertEqual(flanking_rigth[0].x, -flanking_left[0].x)
     self.assertEqual(flanking_rigth[1].x, -flanking_left[1].x)
     self.assertEqual(flanking_rigth[0].y, flanking_left[0].y)
     self.assertEqual(flanking_rigth[1].y, flanking_left[1].y)
Example #23
0
    def to_action(self, action: Action) -> None:
        gripper_index = action.index // len(self.angles)
        angle_index = action.index % len(self.angles)

        aff = Affine(b=self.angles[angle_index][0], c=self.angles[angle_index][1]) * action.pose
        action.pose.b = aff.b
        action.pose.c = aff.c
        action.pose.d = self.gripper_classes[gripper_index]
        action.type = 'grasp'
Example #24
0
def action(user, output, action_name):
    action = dict(([item for item in ACTION_CHOICES if action_name in item]))
    if action:
        function = action[action_name]
        action_class = Action(user, output)
        func = getattr(action_class, function)
        response = func()
    else:
        response = None
    return response
Example #25
0
    def get_image(cls,
                  collection: str,
                  episode_id: str,
                  action_id: int,
                  suffix: str,
                  images=None,
                  image_data=None,
                  as_float=False) -> OrthographicImage:
        image = cv2.imread(
            str(cls.get_image_path(collection, episode_id, action_id, suffix)),
            cv2.IMREAD_UNCHANGED)
        if image is None:
            raise FileNotFoundError(
                f'Image {collection} {episode_id} {action_id} {suffix} not found.'
            )

        if not as_float and image.dtype == np.uint8:
            image = image.astype(np.uint16)
            image *= 255
        elif as_float:
            mult = 255 if image.dtype == np.uint8 else 255 * 255
            image = image.astype(np.float32)
            image /= mult

        if image_data:
            image_data_result = {
                'info': image_data['info'],
                'pose': RobotPose(data=image_data['pose']),
            }

        elif images:
            image_data_result = images[suffix]

        else:
            episode = cls.database[collection].find_one({'id': episode_id},
                                                        {'actions.images': 1})
            if not episode or not episode['actions'] or not (
                    0 <= action_id < len(episode['actions'])):
                raise Exception(
                    f'Internal mismatch of image {collection} {episode_id} {action_id} {suffix} not found.'
                )

            image_data_result = Action(
                data=episode['actions'][action_id]).images[suffix]

        return OrthographicImage(
            image,
            image_data_result['info']['pixel_size'],
            image_data_result['info']['min_depth'],
            image_data_result['info']['max_depth'],
            suffix.split('-')[0],
            image_data_result['pose'].to_array(),
            # list(image_data_result['pose'].values()),
            # Affine(image_data_result['pose']).to_array(),
        )
Example #26
0
    def __init__(self,
                 environment_id=None,
                 correct_order=None,
                 fragsize=-1,
                 segment=True):
        '''
        correct_order specifies if the fragmented packets should come in the correct order
        fragsize specifies how
        '''
        Action.__init__(self, "fragment", "out")
        self.enabled = True
        self.branching = True
        self.terminal = False
        self.fragsize = fragsize
        self.segment = segment

        if correct_order == None:
            self.correct_order = self.get_rand_order()
        else:
            self.correct_order = correct_order
Example #27
0
    def __init__(self, start_ttl=1, end_ttl=64, environment_id=None):
        """
        Initializes the trace action.

        Args:
            start_ttl (int): Starting TTL to use
            end_ttl (int): TTL to end with
            environment_id (str, optional): Environment ID associated with the strategy we are a part of
        """
        Action.__init__(self, "trace", "out")
        self.enabled = True
        self.terminal = True
        self.branching = False
        self.start_ttl = start_ttl
        self.end_ttl = end_ttl
        # Since running this action might take enough time that additional packets
        # get generated, only allow this action to run once
        self.ran = False
        # Define a socket
        self.socket = conf.L3socket(iface=actions.utils.get_interface())
Example #28
0
 def __str__(self):
     """
     Returns a string representation with the fragsize
     """
     s = Action.__str__(self)
     if self.segment:
         s += "{" + "tcp" + ":" + str(self.fragsize) + ":" + str(
             self.correct_order) + "}"
     else:
         s += "{" + "ip" + ":" + str(self.fragsize) + ":" + str(
             self.correct_order) + "}"
     return s
Example #29
0
def place(
        robot: Robot,
        gripper: Gripper,
        current_episode: Episode,
        current_bin: Bin,
        action: Action,
        action_frame: Affine,
        grasp_action: Action,
        image_frame: Affine,
        camera: Camera,
        saver: Saver,
        md: MotionData
    ) -> None:

    move_to_safety(robot, md)

    md_approach_down = MotionData().with_dynamics(0.3).with_z_force_condition(7.0)
    md_approach_up = MotionData().with_dynamics(1.0).with_z_force_condition(20.0)

    action_approch_affine = Affine(z=Config.approach_distance_from_pose)
    action_approach_frame = action_frame * action_approch_affine

    robot.move_cartesian(Frames.gripper, action_approach_frame, md)
    robot.move_relative_cartesian(Frames.gripper, action_approch_affine.inverse(), md_approach_down)

    if md_approach_down.did_break:
        robot.recover_from_errors()
        action.collision = True
        robot.move_relative_cartesian(Frames.gripper, Affine(z=0.001), md_approach_up)

    action.final_pose = RobotPose(affine=(image_frame.inverse() * robot.current_pose(Frames.gripper)))

    gripper.release(grasp_action.final_pose.d + 0.006)  # [m]

    if Config.mode is not Mode.Perform:
        move_to_safety(robot, md_approach_up)

    if Config.mode is Mode.Measure and Config.take_after_images and Config.release_in_other_bin:
        robot.move_cartesian(Frames.camera, image_frame, md)
        saver.save_image(take_images(current_bin, camera, robot), current_episode.id, 'after', action=action)
    def test_shift_conversion(self):
        conv = Converter(shift_z_offset=0.0, box=self.box)

        image = OrthographicImage(
            self.read_image(self.file_path / self.image_name), 2000.0, 0.22,
            0.4, '', Config.default_image_pose)

        action = Action()
        action.type = 'shift'
        action.pose = RobotPose()
        action.pose.x = -0.02
        action.pose.y = 0.105
        action.pose.a = 1.52
        action.pose.d = 0.078
        action.index = 1
        action.shift_motion = [0.03, 0.0]

        draw_pose(image, action.pose, convert_to_rgb=True)
        draw_around_box(image, box=self.box, draw_lines=True)
        imageio.imwrite(str(self.file_path / 'gen-shift-draw-pose.png'),
                        image.mat)

        self.assertTrue(conv.shift_check_safety(action, [image]))

        conv.calculate_pose(action, [image])
        self.assertLess(action.pose.z, 0.0)