Example #1
0
def place(
        robot: Robot,
        gripper: Gripper,
        current_episode: Episode,
        current_bin: Bin,
        action: Action,
        action_frame: Affine,
        grasp_action: Action,
        image_frame: Affine,
        camera: Camera,
        saver: Saver,
        md: MotionData
    ) -> None:

    move_to_safety(robot, md)

    md_approach_down = MotionData().with_dynamics(0.3).with_z_force_condition(7.0)
    md_approach_up = MotionData().with_dynamics(1.0).with_z_force_condition(20.0)

    action_approch_affine = Affine(z=Config.approach_distance_from_pose)
    action_approach_frame = action_frame * action_approch_affine

    robot.move_cartesian(Frames.gripper, action_approach_frame, md)
    robot.move_relative_cartesian(Frames.gripper, action_approch_affine.inverse(), md_approach_down)

    if md_approach_down.did_break:
        robot.recover_from_errors()
        action.collision = True
        robot.move_relative_cartesian(Frames.gripper, Affine(z=0.001), md_approach_up)

    action.final_pose = RobotPose(affine=(image_frame.inverse() * robot.current_pose(Frames.gripper)))

    gripper.release(grasp_action.final_pose.d + 0.006)  # [m]

    if Config.mode is not Mode.Perform:
        move_to_safety(robot, md_approach_up)

    if Config.mode is Mode.Measure and Config.take_after_images and Config.release_in_other_bin:
        robot.move_cartesian(Frames.camera, image_frame, md)
        saver.save_image(take_images(current_bin, camera, robot), current_episode.id, 'after', action=action)
Example #2
0
def shift(
        robot: Robot,
        gripper: Gripper,
        current_episode: Episode,
        current_bin: Bin,
        action: Action,
        action_frame: Affine,
        image_frame: Affine,
        camera: Camera,
        saver: Saver,
        md: MotionData
    ) -> None:
    md_approach_down = MotionData().with_dynamics(0.15).with_z_force_condition(6.0)
    md_approach_up = MotionData().with_dynamics(0.6).with_z_force_condition(20.0)
    md_shift = MotionData().with_dynamics(0.1).with_xy_force_condition(10.0)

    action_approch_affine = Affine(z=Config.approach_distance_from_pose)
    action_approach_frame = action_approch_affine * action_frame

    try:
        process_gripper = Process(target=gripper.move, args=(action.pose.d, ))
        process_gripper.start()

        robot.move_cartesian(Frames.gripper, action_approach_frame, md)

        process_gripper.join()
    except OSError:
        gripper.move(0.08)
        robot.move_cartesian(Frames.gripper, action_approach_frame, md)

    robot.move_relative_cartesian(Frames.gripper, action_approch_affine.inverse(), md_approach_down)

    if md_approach_down.did_break:
        robot.recover_from_errors()
        action.collision = True
        robot.move_relative_cartesian(Frames.gripper, Affine(z=0.001), md_approach_up)

    robot.move_relative_cartesian(Frames.gripper, Affine(x=action.shift_motion[0], y=action.shift_motion[1]), md_shift)
    robot.move_relative_cartesian(Frames.gripper, action_approch_affine, md_approach_up)
Example #3
0
def move_to_release(robot: Robot, current_bin: Bin, md: MotionData) -> bool:
    possible_random_affine = Affine()
    if Config.random_pose_before_release:
        possible_random_affine = Config.max_random_affine_before_release.get_inner_random()

    robot.recover_from_errors()

    if Config.mode is Mode.Measure:
        move_to_safety(robot, md)

    if Config.release_in_other_bin:
        if Config.release_as_fast_as_possible:
            waypoints = [
                Waypoint(
                    Frames.release_fastest,
                    Waypoint.ReferenceType.ABSOLUTE
                )
            ]
        else:
            waypoints = [
                Waypoint(
                    Frames.release_midway,
                    Waypoint.ReferenceType.ABSOLUTE
                ),
                Waypoint(
                    Frames.get_release_frame(Frames.get_next_bin(current_bin)) * possible_random_affine,
                    Waypoint.ReferenceType.ABSOLUTE
                )
            ]
        return robot.move_waypoints_cartesian(Frames.gripper, waypoints, MotionData())

    return robot.move_cartesian(
        Frames.gripper,
        Frames.get_release_frame(current_bin) * possible_random_affine,
        MotionData()
    )
Example #4
0
def grasp(
        robot: Robot,
        gripper: Gripper,
        current_episode: Episode,
        current_bin: Bin,
        action: Action,
        action_frame: Affine,
        image_frame: Affine,
        camera: Camera,
        saver: Saver,
        md: MotionData
    ) -> None:
    # md_approach_down = MotionData().with_dynamics(0.12).with_z_force_condition(7.0)
    # md_approach_up = MotionData().with_dynamics(0.6).with_z_force_condition(20.0)

    md_approach_down = MotionData().with_dynamics(0.3).with_z_force_condition(7.0)
    md_approach_up = MotionData().with_dynamics(1.0).with_z_force_condition(20.0)

    action_approch_affine = Affine(z=Config.approach_distance_from_pose)
    action_approach_frame = action_frame * action_approch_affine

    try:
        process_gripper = Process(target=gripper.move, args=(action.pose.d, ))
        process_gripper.start()

        robot.move_cartesian(Frames.gripper, action_approach_frame, md)

        process_gripper.join()
    except OSError:
        gripper.move(0.08)
        robot.move_cartesian(Frames.gripper, action_approach_frame, md)

    robot.move_relative_cartesian(Frames.gripper, action_approch_affine.inverse(), md_approach_down)

    if md_approach_down.did_break:
        robot.recover_from_errors()
        action.collision = True
        robot.move_relative_cartesian(Frames.gripper, Affine(z=0.001), md_approach_up)

    action.final_pose = RobotPose(affine=(image_frame.inverse() * robot.current_pose(Frames.gripper)))

    first_grasp_successful = gripper.clamp()
    if first_grasp_successful:
        logger.info('Grasp successful at first.')
        robot.recover_from_errors()

        # Change here
        action_approch_affine = Affine(z=1.5*Config.approach_distance_from_pose)

        move_up_success = robot.move_relative_cartesian(Frames.gripper, action_approch_affine, md_approach_up)
        if move_up_success and not md_approach_up.did_break:
            if Config.mode is Mode.Measure and Config.take_after_images and not Config.release_in_other_bin:
                robot.move_cartesian(Frames.camera, image_frame, md)
                saver.save_image(take_images(current_bin, camera, robot), current_episode.id, 'after', action=action)

            if Config.file_objects:
                raise Exception('File objects not implemented!')

            if Config.release_during_grasp_action:
                move_to_release_success = move_to_release(robot, current_bin, md)
                if move_to_release_success:
                    if gripper.is_grasping():
                        action.reward = 1.0
                        action.final_pose.d = gripper.width()

                    if Config.mode is Mode.Perform:
                        gripper.release(action.final_pose.d + 0.002)  # [m]
                    else:
                        gripper.release(action.pose.d + 0.002)  # [m]
                        move_to_safety(robot, md_approach_up)

                    if Config.mode is Mode.Measure and Config.take_after_images and Config.release_in_other_bin:
                        robot.move_cartesian(Frames.camera, image_frame, md)
                        saver.save_image(take_images(current_bin, camera, robot), current_episode.id, 'after', action=action)
            else:
                if Config.mode is not Mode.Perform:
                    move_to_safety(robot, md_approach_up)

                if Config.mode is Mode.Measure and Config.take_after_images and Config.release_in_other_bin:
                    robot.move_cartesian(Frames.camera, image_frame, md)
                    saver.save_image(take_images(current_bin, camera, robot), current_episode.id, 'after', action=action)

        else:
            gripper.release(action.pose.d + 0.002)  # [m]

            robot.recover_from_errors()
            robot.move_relative_cartesian(Frames.gripper, action_approch_affine, md_approach_up)
            move_to_safety_success = move_to_safety(robot, md_approach_up)
            if not move_to_safety_success:
                robot.recover_from_errors()
                robot.recover_from_errors()
                move_to_safety(robot, md_approach_up)

            gripper.move(gripper.max_width)

            move_to_safety(robot, md_approach_up)
            if Config.mode is Mode.Measure and Config.take_after_images:
                robot.move_cartesian(Frames.camera, image_frame, md)
                saver.save_image(take_images(current_bin, camera, robot), current_episode.id, 'after', action=action)

    else:
        logger.info('Grasp not successful.')
        gripper.release(gripper.width() + 0.002)  # [m]

        robot.recover_from_errors()
        move_up_successful = robot.move_relative_cartesian(Frames.gripper, action_approch_affine, md_approach_up)

        if md_approach_up.did_break or not move_up_successful:
            gripper.release(action.pose.d)  # [m]

            robot.recover_from_errors()
            robot.move_relative_cartesian(Frames.gripper, action_approch_affine, md_approach_up)
            move_to_safety(robot, md_approach_up)

        if Config.mode is Mode.Measure and Config.take_after_images:
            robot.move_cartesian(Frames.camera, image_frame, md)
            saver.save_image(take_images(current_bin, camera, robot), current_episode.id, 'after', action=action)
Example #5
0
def bin_picking():
    # agent = Agent()
    # agent.inference.current_type = 2

    # agent = AgentShift()

    agent = AgentPredict(prediction_model=Loader.get_model('cube-1', 'predict-bi-gen-5'))
    agent.grasp_inference.current_type = 2

    camera = Camera(camera_suffixes=Config.camera_suffixes)
    episode_history = EpisodeHistory()
    gripper = Gripper('172.16.0.2', Config.gripper_speed)
    robot = Robot('panda_arm', Config.general_dynamics_rel)
    saver = Saver(Config.database_url, Config.grasp_database)

    current_bin = Config.start_bin

    md = MotionData().with_dynamics(1.0)

    gripper.stop()

    robot.recover_from_errors()
    move_to_safety(robot, md)
    move_joints_successful = robot.move_joints(Frames.bin_joint_values[current_bin], md)

    if not move_joints_successful:
        gripper.move(0.07)

        robot.recover_from_errors()
        move_to_safety(robot, md)
        move_joints_successful = robot.move_joints(Frames.bin_joint_values[current_bin], md)

    if Config.mode is Mode.Measure and not Config.home_gripper:
        logger.warning('Want to measure without homing gripper?')
    elif Config.mode is Mode.Measure and Config.home_gripper:
        gripper.homing()

    move_to_safety(robot, md)
    gripper.homing()

    overall_start = time.time()

    for epoch in Config.epochs:
        while episode_history.total() < epoch.number_episodes:
            current_episode = Episode()
            current_selection_method = epoch.get_selection_method()
            if Config.mode in [Mode.Evaluate, Mode.Perform]:
                current_selection_method = epoch.get_selection_method_perform(episode_history.failed_grasps_since_last_success_in_bin(current_bin))

            start = time.time()

            if (not Config.predict_images) or agent.reinfer_next_time:
                robot.recover_from_errors()
                robot.move_joints(Frames.bin_joint_values[current_bin], md)

                b, c = random.choice(Config.overview_image_angles) if Config.lateral_overview_image else 0, 0
                camera_frame_overview = Frames.get_camera_frame(current_bin, b=b, c=c)
                if not Frames.is_camera_frame_safe(camera_frame_overview):
                    continue

                if Config.mode is Mode.Measure or Config.lateral_overview_image:
                    robot.move_cartesian(Frames.camera, camera_frame_overview, md)

                image_frame = robot.current_pose(Frames.camera)
                images = take_images(current_bin, camera, robot, image_frame=image_frame)

                if not Frames.is_gripper_frame_safe(robot.current_pose(Frames.gripper)):
                    logger.info('Image frame not safe!')
                    robot.recover_from_errors()
                    continue

            input_images = list(filter(lambda i: i.camera in Config.model_input_suffixes, images))

            # if episode_history.data:
            #     agent.successful_grasp_before = episode_history.data[-1].actions[0].reward > 0

            action = agent.infer(input_images, current_selection_method)
            action.images = {}
            action.save = True

            if Config.mode is Mode.Measure:
                saver.save_image(images, current_episode.id, 'v', action=action)

            if Config.mode is not Mode.Perform:
                saver.save_action_plan(action, current_episode.id)

            logger.info(f'Action: {action} at time {time.time() - overall_start:0.1f}')

            action.reward = 0.0
            action.collision = False
            action.bin = current_bin

            if Config.set_zero_reward:
                action.safe = -1

            if action.type == 'bin_empty':
                action.save = False

            elif action.type == 'new_image':
                action.save = False
                agent.reinfer_next_time = True

            if action.safe == 0:
                logger.warning('Action ignored.')
                action.save = False

            else:
                if Config.mode is Mode.Measure and Config.take_lateral_images and action.save:
                    md_lateral = MotionData().with_dynamics(1.0)

                    for b, c in Config.lateral_images_angles:
                        lateral_frame = Frames.get_camera_frame(current_bin, a=action.pose.a, b=b, c=c, reference_pose=image_frame)

                        if not Frames.is_camera_frame_safe(lateral_frame) or (b == 0.0 and c == 0.0):
                            continue

                        lateral_move_succss = robot.move_cartesian(Frames.camera, lateral_frame, md_lateral)  # Remove a for global b, c pose
                        if lateral_move_succss:
                            saver.save_image(take_images(current_bin, camera, robot), current_episode.id, f'lateral_b{b:0.3f}_c{c:0.3f}'.replace('.', '_'), action=action)

                if action.safe == 1 and action.type not in ['bin_empty', 'new_image']:
                    action_frame = Frames.get_action_pose(action_pose=action.pose, image_pose=image_frame)

                    if Config.mode is Mode.Measure and Config.take_direct_images:
                        robot.move_cartesian(Frames.camera, Affine(z=0.308) * Affine(b=0.0, c=0.0) * action_frame, md)
                        saver.save_image(take_images(current_bin, camera, robot), current_episode.id, 'direct', action=action)

                    if action.type == 'grasp':
                        grasp(robot, gripper, current_episode, current_bin, action, action_frame, image_frame, camera, saver, md)

                    elif action.type == 'shift':
                        old_reward_around_action = 0.0
                        shift(robot, gripper, current_episode, current_bin, action, action_frame, image_frame, camera, saver, md)
                        new_reward_around_action = 0.0

                        action.reward = new_reward_around_action - old_reward_around_action

                    elif action.type == 'place':
                        last_grasp = episode_history.data[-1].actions[0]
                        action.grasp_episode_id = episode_history.data[-1].id
                        place(robot, gripper, current_episode, current_bin, action, action_frame, last_grasp, image_frame, camera, saver, md)

                elif action.safe < 0:
                    logger.info('Pose not found.')
                    action.collision = True

                    if Config.take_after_images:
                        robot.move_cartesian(Frames.camera, image_frame, md)
                        saver.save_image(take_images(current_bin, camera, robot), current_episode.id, 'after', action=action)

            action.execution_time = time.time() - start
            logger.info(f'Time for action: {action.execution_time:0.3f} [s]')

            if action.save:
                current_episode.actions.append(action)

                if Config.mode is Mode.Measure:
                    logger.info(f'Save episode {current_episode.id}.')
                    saver.save_episode(current_episode)

            episode_history.append(current_episode)

            logger.info(f'Episodes (reward / done / total): {episode_history.total_reward(action_type="grasp")} / {episode_history.total()} / {sum(e.number_episodes for e in Config.epochs)}')
            logger.info(f'Last success: {episode_history.failed_grasps_since_last_success_in_bin(current_bin)} cycles ago.')

            # episode_history.save_grasp_rate_prediction_step_evaluation(Config.evaluation_path)

            # Change bin
            should_change_bin_for_evaluation = (Config.mode is Mode.Evaluate and episode_history.successful_grasps_in_bin(current_bin) == Config.change_bin_at_number_of_success_grasps)
            should_change_bin = (Config.mode is not Mode.Evaluate and (episode_history.failed_grasps_since_last_success_in_bin(current_bin) >= Config.change_bin_at_number_of_failed_grasps or action.type == 'bin_empty'))
            if should_change_bin_for_evaluation or (Config.change_bins and should_change_bin):
                if Config.mode is Mode.Evaluate:
                    pass

                current_bin = Frames.get_next_bin(current_bin)
                agent.reinfer_next_time = True
                logger.info('Switch to other bin.')

                if Config.mode is not Mode.Perform and Config.home_gripper:
                    gripper.homing()

            # Retrain model
            if Config.train_model and episode_history.total() > 0 and not episode_history.total() % Config.train_model_every_number_cycles:
                logger.warning('Retrain model!')
                with open('/tmp/training.txt', 'wb') as out:
                    p = Popen([sys.executable, str(Config.train_script)], stdout=out)
                    if not Config.train_async:
                        p.communicate()

    logger.info('Finished cleanly.')
Example #6
0
class Experiment:
    def __init__(self):
        self.camera = Camera(camera_suffixes=Config.camera_suffixes)
        self.history = EpisodeHistory()
        self.gripper = Gripper('172.16.0.2', Config.gripper_speed,
                               Config.gripper_force)
        self.robot = Robot('panda_arm', Config.general_dynamics_rel)
        self.saver = Saver(Config.database_url, Config.collection)

        self.current_bin = Config.start_bin

        self.md = MotionData().with_dynamics(1.0)

        self.overall_start = 0

        self.last_after_images: Optional[List[OrthographicImage]] = None

    def move_to_release(self,
                        md: MotionData,
                        direct=False,
                        target=None) -> bool:
        possible_random_affine = Affine()
        if Config.random_pose_before_release:
            possible_random_affine = Config.max_random_affine_before_release.get_inner_random(
            )

        target = target if target else Frames.get_release_frame(
            Frames.get_next_bin(self.current_bin)) * possible_random_affine

        self.robot.recover_from_errors()

        if Config.mode is Mode.Measure:
            self.move_to_safety(md)

        if Config.release_in_other_bin:
            if Config.release_as_fast_as_possible:
                waypoints = [
                    Waypoint(Frames.release_fastest,
                             Waypoint.ReferenceType.ABSOLUTE)
                ]
            else:
                waypoints = [Waypoint(target, Waypoint.ReferenceType.ABSOLUTE)]

                if not direct:
                    waypoints.insert(
                        0,
                        Waypoint(Frames.release_midway,
                                 Waypoint.ReferenceType.ABSOLUTE))

            return self.robot.move_waypoints_cartesian(Frames.gripper,
                                                       waypoints, MotionData())

        return self.robot.move_cartesian(
            Frames.gripper,
            Frames.get_release_frame(self.current_bin) *
            possible_random_affine, MotionData())

    def move_to_safety(self, md: MotionData) -> bool:
        move_up = max(0.0, 0.16 - self.robot.current_pose(Frames.gripper).z)
        return self.robot.move_relative_cartesian(Frames.gripper,
                                                  Affine(z=move_up), md)

    def take_images(self,
                    image_frame: Affine = None,
                    current_bin=None) -> List[OrthographicImage]:
        current_bin = current_bin if current_bin else self.current_bin

        images = self.camera.take_images()
        if not image_frame:
            image_frame = self.robot.current_pose(Frames.camera)
        pose = RobotPose(affine=(image_frame.inverse() *
                                 Frames.get_frame(current_bin)))

        for image in images:
            image.pose = pose.to_array()

        return images

    def grasp(self, current_episode: Episode, action_id: int, action: Action,
              action_frame: Affine, image_frame: Affine):
        md_approach_down = MotionData().with_dynamics(
            0.3).with_z_force_condition(7.0)
        md_approach_up = MotionData().with_dynamics(
            1.0).with_z_force_condition(20.0)

        action_approch_affine = Affine(z=Config.approach_distance_from_pose)
        action_approach_frame = action_frame * action_approch_affine

        try:
            process_gripper = Process(target=self.gripper.move,
                                      args=(action.pose.d, ))
            process_gripper.start()

            self.robot.move_cartesian(Frames.gripper, action_approach_frame,
                                      self.md)

            process_gripper.join()
        except OSError:
            self.gripper.move(0.08)
            self.robot.move_cartesian(Frames.gripper, action_approach_frame,
                                      self.md)

        self.robot.move_relative_cartesian(Frames.gripper,
                                           action_approch_affine.inverse(),
                                           md_approach_down)

        if md_approach_down.did_break:
            self.robot.recover_from_errors()
            action.collision = True
            self.robot.move_relative_cartesian(Frames.gripper, Affine(z=0.001),
                                               md_approach_up)

        action.final_pose = RobotPose(
            affine=(image_frame.inverse() *
                    self.robot.current_pose(Frames.gripper)))

        first_grasp_successful = self.gripper.clamp()
        if first_grasp_successful:
            logger.info('Grasp successful at first.')
            self.robot.recover_from_errors()

            action_approch_affine = Affine(
                z=Config.approach_distance_from_pose)

            move_up_success = self.robot.move_relative_cartesian(
                Frames.gripper, action_approch_affine, md_approach_up)
            if move_up_success and not md_approach_up.did_break:
                if Config.mode is Mode.Measure and Config.take_after_images and not Config.release_in_other_bin:
                    self.robot.move_cartesian(Frames.camera, image_frame,
                                              self.md)
                    self.last_after_images = self.take_images()
                    self.saver.save_image(self.last_after_images,
                                          current_episode.id,
                                          action_id,
                                          'after',
                                          action=action)

                if Config.release_during_grasp_action:
                    move_to_release_success = self.move_to_release(self.md)
                    if move_to_release_success:
                        if self.gripper.is_grasping():
                            action.reward = 1.0
                            action.final_pose.d = self.gripper.width()

                        if Config.mode is Mode.Perform:
                            self.gripper.release(action.final_pose.d +
                                                 0.005)  # [m]
                        else:
                            self.gripper.release(action.pose.d + 0.005)  # [m]
                            self.move_to_safety(md_approach_up)

                        if Config.mode is Mode.Measure and Config.take_after_images and Config.release_in_other_bin:
                            self.robot.move_cartesian(Frames.camera,
                                                      image_frame, self.md)
                            self.last_after_images = self.take_images()
                            self.saver.save_image(self.last_after_images,
                                                  current_episode.id,
                                                  action_id,
                                                  'after',
                                                  action=action)
                else:
                    if Config.mode is not Mode.Perform:
                        self.move_to_safety(md_approach_up)

                    if self.gripper.is_grasping():
                        action.reward = 1.0
                        action.final_pose.d = self.gripper.width()

                    if Config.mode is Mode.Measure and Config.take_after_images:
                        self.robot.move_cartesian(Frames.camera, image_frame,
                                                  self.md)
                        self.last_after_images = self.take_images()
                        self.saver.save_image(self.last_after_images,
                                              current_episode.id,
                                              action_id,
                                              'after',
                                              action=action)

            else:
                self.gripper.release(action.pose.d + 0.002)  # [m]

                self.robot.recover_from_errors()
                self.robot.move_relative_cartesian(Frames.gripper,
                                                   action_approch_affine,
                                                   md_approach_up)
                move_to_safety_success = self.move_to_safety(md_approach_up)
                if not move_to_safety_success:
                    self.robot.recover_from_errors()
                    self.robot.recover_from_errors()
                    self.move_to_safety(md_approach_up)

                self.gripper.move(self.gripper.max_width)

                self.move_to_safety(md_approach_up)
                if Config.mode is Mode.Measure and Config.take_after_images:
                    self.robot.move_cartesian(Frames.camera, image_frame,
                                              self.md)
                    self.last_after_images = self.take_images()
                    self.saver.save_image(self.last_after_images,
                                          current_episode.id,
                                          action_id,
                                          'after',
                                          action=action)

        else:
            logger.info('Grasp not successful.')
            self.gripper.release(self.gripper.width() + 0.002)  # [m]

            self.robot.recover_from_errors()
            move_up_successful = self.robot.move_relative_cartesian(
                Frames.gripper, action_approch_affine, md_approach_up)

            if md_approach_up.did_break or not move_up_successful:
                self.gripper.release(action.pose.d)  # [m]

                self.robot.recover_from_errors()
                self.robot.move_relative_cartesian(Frames.gripper,
                                                   action_approch_affine,
                                                   md_approach_up)
                self.move_to_safety(md_approach_up)

            if Config.mode is Mode.Measure and Config.take_after_images:
                self.robot.move_cartesian(Frames.camera, image_frame, self.md)
                self.last_after_images = self.take_images()
                self.saver.save_image(self.last_after_images,
                                      current_episode.id,
                                      action_id,
                                      'after',
                                      action=action)

    def shift(self, current_episode: Episode, action_id: int, action: Action,
              action_frame: Affine, image_frame: Affine):
        md_approach_down = MotionData().with_dynamics(
            0.15).with_z_force_condition(6.0)
        md_approach_up = MotionData().with_dynamics(
            0.6).with_z_force_condition(20.0)
        md_shift = MotionData().with_dynamics(0.1).with_xy_force_condition(
            10.0)

        action_approch_affine = Affine(z=Config.approach_distance_from_pose)
        action_approach_frame = action_approch_affine * action_frame

        try:
            process_gripper = Process(target=self.gripper.move,
                                      args=(action.pose.d, ))
            process_gripper.start()

            self.robot.move_cartesian(Frames.gripper, action_approach_frame,
                                      self.md)

            process_gripper.join()
        except OSError:
            self.gripper.move(0.08)
            self.robot.move_cartesian(Frames.gripper, action_approach_frame,
                                      self.md)

        self.robot.move_relative_cartesian(Frames.gripper,
                                           action_approch_affine.inverse(),
                                           md_approach_down)

        if md_approach_down.did_break:
            self.robot.recover_from_errors()
            action.collision = True
            self.robot.move_relative_cartesian(Frames.gripper, Affine(z=0.001),
                                               md_approach_up)

        self.robot.move_relative_cartesian(
            Frames.gripper,
            Affine(x=action.shift_motion[0], y=action.shift_motion[1]),
            md_shift)
        self.robot.move_relative_cartesian(Frames.gripper,
                                           action_approch_affine,
                                           md_approach_up)

        # Reward is set outside of this function, due to dependency on agent

    def place(self,
              current_episode: Episode,
              action_id: int,
              action: Action,
              action_frame: Affine,
              image_frame: Affine,
              place_bin=None):
        place_bin = place_bin if place_bin else self.current_bin

        self.move_to_safety(self.md)

        md_approach_down = MotionData().with_dynamics(
            0.22).with_z_force_condition(7.0)
        md_approach_up = MotionData().with_dynamics(
            1.0).with_z_force_condition(20.0)

        action_approch_affine = Affine(z=Config.approach_distance_from_pose)
        action_approach_frame = action_frame * action_approch_affine

        if Config.release_in_other_bin:
            self.move_to_release(self.md, target=action_approach_frame)
        else:
            self.robot.move_cartesian(Frames.gripper, action_approach_frame,
                                      self.md)

        self.robot.move_relative_cartesian(Frames.gripper,
                                           action_approch_affine.inverse(),
                                           md_approach_down)

        if md_approach_down.did_break:
            self.robot.recover_from_errors()
            action.collision = True
            self.robot.move_relative_cartesian(Frames.gripper, Affine(z=0.001),
                                               md_approach_up)

        action.final_pose = RobotPose(
            affine=(image_frame.inverse() *
                    self.robot.current_pose(Frames.gripper)))

        action.pose.d = self.gripper.width()
        self.gripper.release(action.pose.d + 0.01)  # [m]

        if Config.mode is not Mode.Perform:
            self.move_to_safety(md_approach_up)

        if Config.mode is Mode.Measure and Config.take_after_images:
            self.robot.move_cartesian(Frames.camera, image_frame, self.md)
            self.last_after_images = self.take_images(current_bin=place_bin)
            self.saver.save_image(self.last_after_images,
                                  current_episode.id,
                                  action_id,
                                  'after',
                                  action=action)

    def init(self) -> None:
        self.gripper.stop()

        self.robot.recover_from_errors()
        self.move_to_safety(self.md)
        move_joints_successful = self.robot.move_joints(
            Frames.bin_joint_values[self.current_bin], self.md)

        if not move_joints_successful:
            self.gripper.move(0.07)

            self.robot.recover_from_errors()
            self.move_to_safety(self.md)
            move_joints_successful = self.robot.move_joints(
                Frames.bin_joint_values[self.current_bin], self.md)

        if Config.mode is Mode.Measure and not Config.home_gripper:
            logger.warning('Want to measure without homing gripper?')
        elif Config.home_gripper:
            self.gripper.homing()

        self.move_to_safety(self.md)
        self.overall_start = time.time()

    def retrain_model(self) -> None:
        with open('/tmp/training.txt', 'wb') as out:
            p = Popen([sys.executable, str(Config.train_script)], stdout=out)
            if not Config.train_async:
                p.communicate()

    def get_current_selection_method(self, epoch) -> SelectionMethod:
        if Config.mode in [Mode.Evaluate, Mode.Perform]:
            return epoch.get_selection_method_perform(
                self.history.failed_grasps_since_last_success_in_bin(
                    self.current_bin))
        return epoch.get_selection_method()

    def get_input_images(self, images):
        return list(
            filter(lambda i: i.camera in Config.model_input_suffixes, images))