Пример #1
0
    def get_pose_in_image(cls,
                          action_pose: RobotPose,
                          image_pose: Affine,
                          reference_pose: Affine = None) -> RobotPose:
        if np.isnan(action_pose.z):
            action_pose.z = -0.35  # [m]

        z_offset = -0.022  # [m]

        image_translation = Affine(image_pose.x, image_pose.y,
                                   image_pose.z + z_offset)
        image_rotation = Affine(a=image_pose.a, b=image_pose.b, c=image_pose.c)
        action_translation = Affine(action_pose.x, action_pose.y,
                                    action_pose.z)
        action_rotation = Affine(b=action_pose.b,
                                 c=action_pose.c) * Affine(a=action_pose.a)
        affine = image_rotation * image_translation.inverse(
        ) * action_translation * action_rotation

        if reference_pose:
            reference_rotation = Affine(b=reference_pose.b,
                                        c=reference_pose.c).inverse()
            affine = reference_rotation * affine

        return RobotPose(affine=affine, d=action_pose.d)
Пример #2
0
    def shift_check_safety(self, action: Action,
                           images: List[OrthographicImage]) -> bool:
        start_pose_inside_box = self.is_pose_inside_box(action.pose)

        end_pose = RobotPose(affine=(action.pose * Affine(
            x=action.shift_motion[0] * 0.2, y=action.shift_motion[1] * 0.2)))
        end_pose.d = action.pose.d

        end_pose_inside_box = self.is_pose_inside_box(end_pose)
        return start_pose_inside_box and end_pose_inside_box
Пример #3
0
    def pose_from_index(self, index, index_shape, example_image: OrthographicImage) -> RobotPose:
        vec = self.rotate_vector((
            example_image.position_from_index(index[1], index_shape[1]),
            example_image.position_from_index(index[2], index_shape[2])
        ), self.a_space[index[0]])

        pose = RobotPose()
        pose.x = -vec[0] * self.resolution_factor * self.scale_factors[0]  # [m]
        pose.y = -vec[1] * self.resolution_factor * self.scale_factors[1]  # [m]
        pose.a = -self.a_space[index[0]]  # [rad]
        return pose
Пример #4
0
    def take_images(self,
                    image_frame: Affine = None,
                    current_bin=None) -> List[OrthographicImage]:
        current_bin = current_bin if current_bin else self.current_bin

        images = self.camera.take_images()
        if not image_frame:
            image_frame = self.robot.current_pose(Frames.camera)
        pose = RobotPose(affine=(image_frame.inverse() *
                                 Frames.get_frame(current_bin)))

        for image in images:
            image.pose = pose.to_array()

        return images
    def test_shift_conversion(self):
        conv = Converter(shift_z_offset=0.0, box=self.box)

        image = OrthographicImage(
            self.read_image(self.file_path / self.image_name), 2000.0, 0.22,
            0.4, '', Config.default_image_pose)

        action = Action()
        action.type = 'shift'
        action.pose = RobotPose()
        action.pose.x = -0.02
        action.pose.y = 0.105
        action.pose.a = 1.52
        action.pose.d = 0.078
        action.index = 1
        action.shift_motion = [0.03, 0.0]

        draw_pose(image, action.pose, convert_to_rgb=True)
        draw_around_box(image, box=self.box, draw_lines=True)
        imageio.imwrite(str(self.file_path / 'gen-shift-draw-pose.png'),
                        image.mat)

        self.assertTrue(conv.shift_check_safety(action, [image]))

        conv.calculate_pose(action, [image])
        self.assertLess(action.pose.z, 0.0)
Пример #6
0
 def get_action_pose(cls, action_pose: RobotPose,
                     image_pose: Affine) -> RobotPose:
     action_translation = Affine(action_pose.x, action_pose.y,
                                 action_pose.z)
     action_rotation = Affine(a=action_pose.a,
                              b=action_pose.b,
                              c=action_pose.c)
     affine = image_pose * action_translation * action_rotation
     return RobotPose(affine=affine, d=action_pose.d)
    def test_image_transformation(self):
        image = OrthographicImage(self.read_image(self.file_path / self.image_name), 2000.0, 0.22, 0.4, '', Config.default_image_pose)

        pose = RobotPose(affine=Affine(0.02, 0.0, 0.0))
        area_image = get_area_of_interest(image, pose, border_color=(0))
        imageio.imwrite(str(self.file_path / 'gen-x20-b.png'), area_image.mat)

        pose = RobotPose(affine=Affine(0.03, 0.03, 0.0))
        area_image = get_area_of_interest(image, pose, border_color=(0))
        imageio.imwrite(str(self.file_path / 'gen-x30-y30-b.png'), area_image.mat)

        pose = RobotPose(affine=Affine(0.01, 0.04, 0.0, 0.4))
        area_image = get_area_of_interest(image, pose, border_color=(0))
        imageio.imwrite(str(self.file_path / 'gen-x10-y40-a04-b.png'), area_image.mat)

        image = image.translate([0.0, 0.0, 0.05])
        image = image.rotate_x(-0.3, [0.0, 0.25])
        imageio.imwrite(str(self.file_path / 'gen-rot0_3.png'), image.mat)
    def test_action(self):
        p = RobotPose()
        self.assertEqual(p.d, 0.0)

        a = Action()
        a.index = 1
        self.assertEqual(a.index, 1)

        a_data = a.__dict__
        self.assertEqual(a_data['index'], 1)
Пример #9
0
def take_images(current_bin: Bin, camera: Camera, robot: Robot, image_frame: Affine = None):
    images = camera.take_images()
    if not image_frame:
        image_frame = robot.current_pose(Frames.camera)
    pose = RobotPose(affine=(image_frame.inverse() * Frames.get_frame(current_bin)))

    for image in images:
        image.pose = pose

    return images
Пример #10
0
    def get_image(cls,
                  collection: str,
                  episode_id: str,
                  action_id: int,
                  suffix: str,
                  images=None,
                  image_data=None,
                  as_float=False) -> OrthographicImage:
        image = cv2.imread(
            str(cls.get_image_path(collection, episode_id, action_id, suffix)),
            cv2.IMREAD_UNCHANGED)
        if image is None:
            raise FileNotFoundError(
                f'Image {collection} {episode_id} {action_id} {suffix} not found.'
            )

        if not as_float and image.dtype == np.uint8:
            image = image.astype(np.uint16)
            image *= 255
        elif as_float:
            mult = 255 if image.dtype == np.uint8 else 255 * 255
            image = image.astype(np.float32)
            image /= mult

        if image_data:
            image_data_result = {
                'info': image_data['info'],
                'pose': RobotPose(data=image_data['pose']),
            }

        elif images:
            image_data_result = images[suffix]

        else:
            episode = cls.database[collection].find_one({'id': episode_id},
                                                        {'actions.images': 1})
            if not episode or not episode['actions'] or not (
                    0 <= action_id < len(episode['actions'])):
                raise Exception(
                    f'Internal mismatch of image {collection} {episode_id} {action_id} {suffix} not found.'
                )

            image_data_result = Action(
                data=episode['actions'][action_id]).images[suffix]

        return OrthographicImage(
            image,
            image_data_result['info']['pixel_size'],
            image_data_result['info']['min_depth'],
            image_data_result['info']['max_depth'],
            suffix.split('-')[0],
            image_data_result['pose'].to_array(),
            # list(image_data_result['pose'].values()),
            # Affine(image_data_result['pose']).to_array(),
        )
Пример #11
0
 def area_of_interest(self, image, pose):
     area = get_area_of_interest_new(
         image,
         RobotPose(all_data=pose),
         size_cropped=self.size_cropped_area,
         size_result=self.size_result,
         border_color=image.value_from_depth(self.box_distance) / (255 * 255),
     )
     if len(area.mat.shape) == 2:
         return np.expand_dims(area.mat, 2)
     return area.mat
Пример #12
0
    def place(self,
              current_episode: Episode,
              action_id: int,
              action: Action,
              action_frame: Affine,
              image_frame: Affine,
              place_bin=None):
        place_bin = place_bin if place_bin else self.current_bin

        self.move_to_safety(self.md)

        md_approach_down = MotionData().with_dynamics(
            0.22).with_z_force_condition(7.0)
        md_approach_up = MotionData().with_dynamics(
            1.0).with_z_force_condition(20.0)

        action_approch_affine = Affine(z=Config.approach_distance_from_pose)
        action_approach_frame = action_frame * action_approch_affine

        if Config.release_in_other_bin:
            self.move_to_release(self.md, target=action_approach_frame)
        else:
            self.robot.move_cartesian(Frames.gripper, action_approach_frame,
                                      self.md)

        self.robot.move_relative_cartesian(Frames.gripper,
                                           action_approch_affine.inverse(),
                                           md_approach_down)

        if md_approach_down.did_break:
            self.robot.recover_from_errors()
            action.collision = True
            self.robot.move_relative_cartesian(Frames.gripper, Affine(z=0.001),
                                               md_approach_up)

        action.final_pose = RobotPose(
            affine=(image_frame.inverse() *
                    self.robot.current_pose(Frames.gripper)))

        action.pose.d = self.gripper.width()
        self.gripper.release(action.pose.d + 0.01)  # [m]

        if Config.mode is not Mode.Perform:
            self.move_to_safety(md_approach_up)

        if Config.mode is Mode.Measure and Config.take_after_images:
            self.robot.move_cartesian(Frames.camera, image_frame, self.md)
            self.last_after_images = self.take_images(current_bin=place_bin)
            self.saver.save_image(self.last_after_images,
                                  current_episode.id,
                                  action_id,
                                  'after',
                                  action=action)
Пример #13
0
def place(
        robot: Robot,
        gripper: Gripper,
        current_episode: Episode,
        current_bin: Bin,
        action: Action,
        action_frame: Affine,
        grasp_action: Action,
        image_frame: Affine,
        camera: Camera,
        saver: Saver,
        md: MotionData
    ) -> None:

    move_to_safety(robot, md)

    md_approach_down = MotionData().with_dynamics(0.3).with_z_force_condition(7.0)
    md_approach_up = MotionData().with_dynamics(1.0).with_z_force_condition(20.0)

    action_approch_affine = Affine(z=Config.approach_distance_from_pose)
    action_approach_frame = action_frame * action_approch_affine

    robot.move_cartesian(Frames.gripper, action_approach_frame, md)
    robot.move_relative_cartesian(Frames.gripper, action_approch_affine.inverse(), md_approach_down)

    if md_approach_down.did_break:
        robot.recover_from_errors()
        action.collision = True
        robot.move_relative_cartesian(Frames.gripper, Affine(z=0.001), md_approach_up)

    action.final_pose = RobotPose(affine=(image_frame.inverse() * robot.current_pose(Frames.gripper)))

    gripper.release(grasp_action.final_pose.d + 0.006)  # [m]

    if Config.mode is not Mode.Perform:
        move_to_safety(robot, md_approach_up)

    if Config.mode is Mode.Measure and Config.take_after_images and Config.release_in_other_bin:
        robot.move_cartesian(Frames.camera, image_frame, md)
        saver.save_image(take_images(current_bin, camera, robot), current_episode.id, 'after', action=action)
Пример #14
0
                        '--episode',
                        dest='episode',
                        type=str,
                        required=True)
    parser.add_argument('-ca',
                        '--camera',
                        dest='camera',
                        type=str,
                        default='ed')
    parser.add_argument('--save', action='store_true')
    args = parser.parse_args()

    action, place_after, place_goal = Loader.get_action(
        args.collection, args.episode, 1,
        [args.camera + '-after', args.camera + '-goal'])
    new_pose = RobotPose(action.pose)

    if not args.save:

        def update_image():
            after_area = get_area_of_interest_new(place_after,
                                                  action.pose,
                                                  size_cropped=(200, 200))
            goal_area = get_area_of_interest_new(place_goal,
                                                 new_pose,
                                                 size_cropped=(200, 200))

            ontop = np.zeros((200, 200, 3), dtype=np.uint8)

            if len(after_area.mat.shape) == 3:
                after_area.mat = after_area.mat[:, :, 0]
Пример #15
0
    def infer(
        self,
        images: List[OrthographicImage],
        method: SelectionMethod,
        verbose=1,
        uncertainty_images: List[OrthographicImage] = None,
    ) -> Action:

        start = time.time()

        if method == SelectionMethod.Random:
            action = Action()
            action.index = np.random.choice(range(3))
            action.pose = RobotPose(affine=Affine(
                x=np.random.uniform(self.lower_random_pose[0],
                                    self.upper_random_pose[0]),  # [m]
                y=np.random.uniform(self.lower_random_pose[1],
                                    self.upper_random_pose[1]),  # [m]
                a=np.random.uniform(self.lower_random_pose[3],
                                    self.upper_random_pose[3]),  # [rad]
            ))
            action.estimated_reward = -1
            action.estimated_reward_std = 0.0
            action.method = method
            action.step = 0
            return action

        input_images = [self.get_images(i) for i in images]
        result = self.model.predict(input_images)

        if self.with_types:
            estimated_reward = result[0]
            types = result[1]
        else:
            estimated_reward = result

        estimated_reward_std = np.zeros(estimated_reward.shape)

        filter_method = method
        filter_measure = estimated_reward

        # Calculate model uncertainty
        if self.monte_carlo:
            rewards_sampling = [
                self.model.predict(input_images)
                for i in range(self.monte_carlo)
            ]
            estimated_reward = np.mean(rewards_sampling, axis=0)
            estimated_reward_std += self.mutual_information(rewards_sampling)

            if verbose:
                logger.info(f'Current monte carlo s: {self.current_s}')

        # Calculate input uncertainty
        if self.input_uncertainty:
            input_uncertainty_images = [
                self.get_images(i) for i in uncertainty_images
            ]

            array_estimated_unc = tkb.get_session().run(
                self.propagation_input_uncertainty,
                feed_dict={
                    self.model.input: input_images[0],
                    self.uncertainty_placeholder: input_uncertainty_images[0]
                })
            estimated_input_uncertainty = np.concatenate(array_estimated_unc,
                                                         axis=3)
            estimated_reward_std += 0.0025 * estimated_input_uncertainty

        # Adapt filter measure for more fancy SelectionMethods
        if method == SelectionMethod.Top5LowerBound:
            filter_measure = estimated_reward - estimated_reward_std
            filter_method = SelectionMethod.Top5
        elif method == SelectionMethod.BayesTop:
            filter_measure = self.probability_in_policy(
                estimated_reward, s=self.current_s) * estimated_reward_std
            filter_method = SelectionMethod.Top5
        elif method == SelectionMethod.BayesProb:
            filter_measure = self.probability_in_policy(
                estimated_reward, s=self.current_s) * estimated_reward_std
            filter_method = SelectionMethod.Prob

        filter_lambda = self.get_filter(filter_method)

        # Set some action (indices) to zero
        if self.keep_indixes:
            self.keep_array_at_last_indixes(filter_measure, self.keep_indixes)

        # Grasp specific types
        if self.with_types and self.current_type > -1:
            alpha = 0.7
            factor_current_type = np.tile(
                np.expand_dims(types[:, :, :, self.current_type], axis=-1),
                reps=(1, 1, 1, estimated_reward.shape[-1]))
            factor_alt_type = np.tile(np.expand_dims(types[:, :, :, 1],
                                                     axis=-1),
                                      reps=(1, 1, 1,
                                            estimated_reward.shape[-1]))

            filter_measure = estimated_reward * (alpha * factor_current_type +
                                                 (1 - alpha) * factor_alt_type)

        # Find the index and corresponding action using the selection method
        index_raveled = filter_lambda(filter_measure)
        index = np.unravel_index(index_raveled, filter_measure.shape)

        action = Action()
        action.index = index[3]
        action.pose = self.pose_from_index(index, filter_measure.shape,
                                           images[0])
        action.estimated_reward = estimated_reward[index]
        action.estimated_reward_std = estimated_reward_std[index]
        action.method = method
        action.step = 0  # Default value

        if verbose:
            logger.info(f'NN inference time [s]: {time.time() - start:.3}')
        return action
Пример #16
0
def grasp(
        robot: Robot,
        gripper: Gripper,
        current_episode: Episode,
        current_bin: Bin,
        action: Action,
        action_frame: Affine,
        image_frame: Affine,
        camera: Camera,
        saver: Saver,
        md: MotionData
    ) -> None:
    # md_approach_down = MotionData().with_dynamics(0.12).with_z_force_condition(7.0)
    # md_approach_up = MotionData().with_dynamics(0.6).with_z_force_condition(20.0)

    md_approach_down = MotionData().with_dynamics(0.3).with_z_force_condition(7.0)
    md_approach_up = MotionData().with_dynamics(1.0).with_z_force_condition(20.0)

    action_approch_affine = Affine(z=Config.approach_distance_from_pose)
    action_approach_frame = action_frame * action_approch_affine

    try:
        process_gripper = Process(target=gripper.move, args=(action.pose.d, ))
        process_gripper.start()

        robot.move_cartesian(Frames.gripper, action_approach_frame, md)

        process_gripper.join()
    except OSError:
        gripper.move(0.08)
        robot.move_cartesian(Frames.gripper, action_approach_frame, md)

    robot.move_relative_cartesian(Frames.gripper, action_approch_affine.inverse(), md_approach_down)

    if md_approach_down.did_break:
        robot.recover_from_errors()
        action.collision = True
        robot.move_relative_cartesian(Frames.gripper, Affine(z=0.001), md_approach_up)

    action.final_pose = RobotPose(affine=(image_frame.inverse() * robot.current_pose(Frames.gripper)))

    first_grasp_successful = gripper.clamp()
    if first_grasp_successful:
        logger.info('Grasp successful at first.')
        robot.recover_from_errors()

        # Change here
        action_approch_affine = Affine(z=1.5*Config.approach_distance_from_pose)

        move_up_success = robot.move_relative_cartesian(Frames.gripper, action_approch_affine, md_approach_up)
        if move_up_success and not md_approach_up.did_break:
            if Config.mode is Mode.Measure and Config.take_after_images and not Config.release_in_other_bin:
                robot.move_cartesian(Frames.camera, image_frame, md)
                saver.save_image(take_images(current_bin, camera, robot), current_episode.id, 'after', action=action)

            if Config.file_objects:
                raise Exception('File objects not implemented!')

            if Config.release_during_grasp_action:
                move_to_release_success = move_to_release(robot, current_bin, md)
                if move_to_release_success:
                    if gripper.is_grasping():
                        action.reward = 1.0
                        action.final_pose.d = gripper.width()

                    if Config.mode is Mode.Perform:
                        gripper.release(action.final_pose.d + 0.002)  # [m]
                    else:
                        gripper.release(action.pose.d + 0.002)  # [m]
                        move_to_safety(robot, md_approach_up)

                    if Config.mode is Mode.Measure and Config.take_after_images and Config.release_in_other_bin:
                        robot.move_cartesian(Frames.camera, image_frame, md)
                        saver.save_image(take_images(current_bin, camera, robot), current_episode.id, 'after', action=action)
            else:
                if Config.mode is not Mode.Perform:
                    move_to_safety(robot, md_approach_up)

                if Config.mode is Mode.Measure and Config.take_after_images and Config.release_in_other_bin:
                    robot.move_cartesian(Frames.camera, image_frame, md)
                    saver.save_image(take_images(current_bin, camera, robot), current_episode.id, 'after', action=action)

        else:
            gripper.release(action.pose.d + 0.002)  # [m]

            robot.recover_from_errors()
            robot.move_relative_cartesian(Frames.gripper, action_approch_affine, md_approach_up)
            move_to_safety_success = move_to_safety(robot, md_approach_up)
            if not move_to_safety_success:
                robot.recover_from_errors()
                robot.recover_from_errors()
                move_to_safety(robot, md_approach_up)

            gripper.move(gripper.max_width)

            move_to_safety(robot, md_approach_up)
            if Config.mode is Mode.Measure and Config.take_after_images:
                robot.move_cartesian(Frames.camera, image_frame, md)
                saver.save_image(take_images(current_bin, camera, robot), current_episode.id, 'after', action=action)

    else:
        logger.info('Grasp not successful.')
        gripper.release(gripper.width() + 0.002)  # [m]

        robot.recover_from_errors()
        move_up_successful = robot.move_relative_cartesian(Frames.gripper, action_approch_affine, md_approach_up)

        if md_approach_up.did_break or not move_up_successful:
            gripper.release(action.pose.d)  # [m]

            robot.recover_from_errors()
            robot.move_relative_cartesian(Frames.gripper, action_approch_affine, md_approach_up)
            move_to_safety(robot, md_approach_up)

        if Config.mode is Mode.Measure and Config.take_after_images:
            robot.move_cartesian(Frames.camera, image_frame, md)
            saver.save_image(take_images(current_bin, camera, robot), current_episode.id, 'after', action=action)
Пример #17
0
    def manipulate(self) -> None:
        current_bin_episode = None
        goal_images = None

        for epoch in Config.epochs:
            while self.history.total() < epoch.number_episodes:
                current_episode = Episode()
                current_bin_episode = current_bin_episode if current_bin_episode else current_episode.id
                current_selection_method = self.get_current_selection_method(
                    epoch)

                start = time.time()

                place_action_in_other_bin = Config.release_in_other_bin and not Config.release_during_grasp_action
                place_bin = Frames.get_next_bin(
                    self.current_bin
                ) if place_action_in_other_bin else self.current_bin

                if (not Config.predict_images) or self.agent.reinfer_next_time:
                    self.robot.recover_from_errors()

                    if not place_action_in_other_bin or Config.take_after_images:
                        self.robot.move_joints(
                            Frames.bin_joint_values[self.current_bin], self.md)

                    b, c = random.choice(
                        Config.overview_image_angles
                    ) if Config.lateral_overview_image else 0, 0
                    camera_frame_overview = Frames.get_camera_frame(
                        self.current_bin, b=b, c=c)
                    if not Frames.is_camera_frame_safe(camera_frame_overview):
                        continue

                    if place_action_in_other_bin:
                        self.robot.move_cartesian(
                            Frames.camera,
                            Frames.get_camera_frame(place_bin, b=b, c=c),
                            self.md)
                    elif Config.take_goal_images:
                        self.robot.move_cartesian(Frames.camera,
                                                  camera_frame_overview,
                                                  self.md)

                    if Config.take_goal_images:
                        new_goal_images = self.take_goal_images(
                            current_bin=place_bin,
                            current_goal_images=goal_images)
                        goal_images = new_goal_images if new_goal_images else goal_images

                    elif Config.use_goal_images:
                        attr = random.choice(
                            GoalDatabase.get(Config.goal_images_dataset))
                        goal_images = [
                            Loader.get_image(attr[0], attr[1], attr[2], s)
                            for s in attr[3]
                        ]

                    if place_action_in_other_bin:
                        place_image_frame = self.robot.current_pose(
                            Frames.camera)
                        place_images = self.take_images(
                            image_frame=place_image_frame,
                            current_bin=place_bin)

                    if Config.mode is Mode.Measure or Config.lateral_overview_image:
                        self.robot.move_cartesian(Frames.camera,
                                                  camera_frame_overview,
                                                  self.md)

                    image_frame = self.robot.current_pose(Frames.camera)
                    images = self.take_images(image_frame=image_frame)

                    if not Frames.is_gripper_frame_safe(
                            self.robot.current_pose(Frames.gripper)):
                        logger.info('Image frame not safe!')
                        self.robot.recover_from_errors()
                        continue

                input_images = self.get_input_images(images)
                input_place_images = self.get_input_images(
                    place_images) if place_action_in_other_bin else None
                input_goal_images = None

                if Config.use_goal_images:
                    if isinstance(goal_images, list) and isinstance(
                            goal_images[0], list):
                        goal_images_single = goal_images.pop(0)
                    else:
                        goal_images_single = goal_images

                    input_goal_images = self.get_input_images(
                        goal_images_single)

                actions = self.agent.infer(
                    input_images,
                    current_selection_method,
                    goal_images=input_goal_images,
                    place_images=input_place_images,
                )

                for action_id, action in enumerate(actions):
                    logger.info(
                        f'Action ({action_id+1}/{len(actions)}): {action}')

                for action_id, action in enumerate(actions):
                    action.images = {}
                    action.save = True
                    action.bin = self.current_bin
                    action.bin_episode = current_bin_episode

                    current_action_place_in_other_bin = place_action_in_other_bin and action.type == 'place'
                    current_image_pose = place_image_frame if current_action_place_in_other_bin else image_frame
                    current_bin = place_bin if current_action_place_in_other_bin else self.current_bin

                    if Config.mode is Mode.Measure:
                        before_images = place_images if current_action_place_in_other_bin else images
                        self.saver.save_image(before_images,
                                              current_episode.id,
                                              action_id,
                                              'v',
                                              action=action)

                        if Config.use_goal_images:
                            self.saver.save_image(goal_images_single,
                                                  current_episode.id,
                                                  action_id,
                                                  'goal',
                                                  action=action)

                    self.saver.save_action_plan(action, current_episode.id)

                    logger.info(
                        f'Executing action: {action_id} at time {time.time() - self.overall_start:0.1f}'
                    )

                    if Config.set_zero_reward:
                        action.safe = -1

                    execute_action = True

                    if action.type == 'bin_empty':
                        action.save = False
                        execute_action = False
                    elif action.type == 'new_image':
                        action.save = False
                        execute_action = False
                        self.agent.reinfer_next_time = True

                    if action.safe <= 0:
                        execute_action = False
                        action.collision = True

                        # Set actions after this action to unsafe
                        for a in actions[action_id + 1:]:
                            a.safe = action.safe

                        reason = 'not within box' if action.safe == -1 else 'not a number'
                        logger.warning(
                            f'Action (type={action.type}) is {reason} (safe={action.safe}).'
                        )

                        if action.safe == 0 and action.type in [
                                'grasp', 'shift'
                        ]:
                            logger.warning(f'Episode is not saved.')
                            current_episode.save = False
                            break

                        if action.type == 'place' and action_id > 0:
                            prior_action = actions[action_id - 1]

                            if prior_action.type == 'grasp' and prior_action.reward > 0:
                                central_pose = RobotPose(
                                    affine=Affine(z=-0.28), d=action.pose.d)

                                action_frame = Frames.get_action_pose(
                                    action_pose=central_pose,
                                    image_pose=current_image_pose)
                                self.place(current_episode, action_id, action,
                                           action_frame, current_image_pose)

                    # Dont place if grasp was not successful
                    if action.type == 'place' and action_id > 0:
                        prior_action = actions[action_id - 1]

                        if prior_action.type == 'grasp' and (
                                prior_action.reward == 0
                                or prior_action.safe < 1):
                            execute_action = False

                    if Config.take_lateral_images and action.save and Config.mode is Mode.Measure:
                        md_lateral = MotionData().with_dynamics(1.0)

                        for b, c in Config.lateral_images_angles:
                            lateral_frame = Frames.get_camera_frame(
                                current_bin,
                                a=action.pose.a,
                                b=b,
                                c=c,
                                reference_pose=image_frame)

                            if not Frames.is_camera_frame_safe(
                                    lateral_frame) or (b == 0.0 and c == 0.0):
                                continue

                            lateral_move_succss = self.robot.move_cartesian(
                                Frames.camera, lateral_frame,
                                md_lateral)  # Remove a for global b, c pose
                            if lateral_move_succss:
                                self.saver.save_image(
                                    self.take_images(current_bin=current_bin),
                                    current_episode.id,
                                    action_id,
                                    f'lateral_b{b:0.3f}_c{c:0.3f}'.replace(
                                        '.', '_'),
                                    action=action)

                    if execute_action:
                        action_frame = Frames.get_action_pose(
                            action_pose=action.pose,
                            image_pose=current_image_pose)

                        if Config.mode is Mode.Measure and Config.take_direct_images:
                            self.robot.move_cartesian(
                                Frames.camera,
                                Affine(z=0.308) * Affine(b=0.0, c=0.0) *
                                action_frame)
                            self.saver.save_image(
                                self.take_images(current_bin=current_bin),
                                current_episode.id,
                                action_id,
                                'direct',
                                action=action)

                        if action.type == 'grasp':
                            self.grasp(current_episode, action_id, action,
                                       action_frame, current_image_pose)

                            if Config.use_goal_images and self.last_after_images and not place_action_in_other_bin:  # Next action is Place
                                place_action_id = action_id + 1
                                actions[
                                    place_action_id].pose.d = self.gripper.width(
                                    )  # Use current gripper width for safety analysis
                                self.agent.converter.calculate_pose(
                                    actions[place_action_id],
                                    self.last_after_images)

                        elif action.type == 'shift':
                            old_reward_around_action = 0.0
                            self.shift(current_episode, action_id, action,
                                       action_frame, current_image_pose)
                            new_reward_around_action = 0.0

                            action.reward = new_reward_around_action - old_reward_around_action

                        elif action.type == 'place':
                            self.place(current_episode,
                                       action_id,
                                       action,
                                       action_frame,
                                       current_image_pose,
                                       place_bin=place_bin)
                            action.reward = actions[action_id - 1].reward

                    else:
                        if Config.take_after_images:
                            self.robot.move_cartesian(Frames.camera,
                                                      current_image_pose,
                                                      self.md)
                            self.saver.save_image(
                                self.take_images(current_bin=current_bin),
                                current_episode.id,
                                action_id,
                                'after',
                                action=action)

                    action.execution_time = time.time() - start
                    logger.info(
                        f'Time for action: {action.execution_time:0.3f} [s]')

                    if action.save:
                        current_episode.actions.append(action)
                        self.history.append(current_episode)
                    else:
                        break

                    logger.info(
                        f'Episodes (reward / done / total): {self.history.total_reward(action_type="grasp")} / {self.history.total()} / {sum(e.number_episodes for e in Config.epochs)}'
                    )
                    logger.info(
                        f'Last success: {self.history.failed_grasps_since_last_success_in_bin(self.current_bin)} cycles ago.'
                    )

                    # history.save_grasp_rate_prediction_step_evaluation(Config.evaluation_path)

                # Switch bin
                should_change_bin_for_evaluation = (
                    Config.mode is Mode.Evaluate
                    and self.history.successful_grasps_in_bin(self.current_bin)
                    == Config.change_bin_at_number_of_success_grasps)
                should_change_bin = (
                    Config.mode is not Mode.Evaluate
                    and (self.history.failed_grasps_since_last_success_in_bin(
                        self.current_bin) >=
                         Config.change_bin_at_number_of_failed_grasps
                         or action.type == 'bin_empty'))
                if should_change_bin_for_evaluation or (Config.change_bins
                                                        and should_change_bin):
                    if Config.mode is Mode.Evaluate:
                        pass
                        # history.save_grasp_rate_prediction_step_evaluation(Config.evaluation_path)

                    self.current_bin = Frames.get_next_bin(self.current_bin)
                    self.agent.reinfer_next_time = True
                    current_bin_episode = None
                    logger.info('Switch to other bin.')

                    if Config.mode is not Mode.Perform and Config.home_gripper:
                        self.gripper.homing()

                if Config.mode is Mode.Measure and current_episode.actions and current_episode.save:
                    logger.info(f'Save episode {current_episode.id}.')
                    self.saver.save_episode(current_episode)

                # Retrain model
                if Config.train_model and self.history.total(
                ) > 0 and not self.history.total(
                ) % Config.train_model_every_number_cycles:
                    logger.warning('Retrain model!')
                    self.retrain_model()

        logger.info('Finished cleanly.')
Пример #18
0
    def grasp(self, current_episode: Episode, action_id: int, action: Action,
              action_frame: Affine, image_frame: Affine):
        md_approach_down = MotionData().with_dynamics(
            0.3).with_z_force_condition(7.0)
        md_approach_up = MotionData().with_dynamics(
            1.0).with_z_force_condition(20.0)

        action_approch_affine = Affine(z=Config.approach_distance_from_pose)
        action_approach_frame = action_frame * action_approch_affine

        try:
            process_gripper = Process(target=self.gripper.move,
                                      args=(action.pose.d, ))
            process_gripper.start()

            self.robot.move_cartesian(Frames.gripper, action_approach_frame,
                                      self.md)

            process_gripper.join()
        except OSError:
            self.gripper.move(0.08)
            self.robot.move_cartesian(Frames.gripper, action_approach_frame,
                                      self.md)

        self.robot.move_relative_cartesian(Frames.gripper,
                                           action_approch_affine.inverse(),
                                           md_approach_down)

        if md_approach_down.did_break:
            self.robot.recover_from_errors()
            action.collision = True
            self.robot.move_relative_cartesian(Frames.gripper, Affine(z=0.001),
                                               md_approach_up)

        action.final_pose = RobotPose(
            affine=(image_frame.inverse() *
                    self.robot.current_pose(Frames.gripper)))

        first_grasp_successful = self.gripper.clamp()
        if first_grasp_successful:
            logger.info('Grasp successful at first.')
            self.robot.recover_from_errors()

            action_approch_affine = Affine(
                z=Config.approach_distance_from_pose)

            move_up_success = self.robot.move_relative_cartesian(
                Frames.gripper, action_approch_affine, md_approach_up)
            if move_up_success and not md_approach_up.did_break:
                if Config.mode is Mode.Measure and Config.take_after_images and not Config.release_in_other_bin:
                    self.robot.move_cartesian(Frames.camera, image_frame,
                                              self.md)
                    self.last_after_images = self.take_images()
                    self.saver.save_image(self.last_after_images,
                                          current_episode.id,
                                          action_id,
                                          'after',
                                          action=action)

                if Config.release_during_grasp_action:
                    move_to_release_success = self.move_to_release(self.md)
                    if move_to_release_success:
                        if self.gripper.is_grasping():
                            action.reward = 1.0
                            action.final_pose.d = self.gripper.width()

                        if Config.mode is Mode.Perform:
                            self.gripper.release(action.final_pose.d +
                                                 0.005)  # [m]
                        else:
                            self.gripper.release(action.pose.d + 0.005)  # [m]
                            self.move_to_safety(md_approach_up)

                        if Config.mode is Mode.Measure and Config.take_after_images and Config.release_in_other_bin:
                            self.robot.move_cartesian(Frames.camera,
                                                      image_frame, self.md)
                            self.last_after_images = self.take_images()
                            self.saver.save_image(self.last_after_images,
                                                  current_episode.id,
                                                  action_id,
                                                  'after',
                                                  action=action)
                else:
                    if Config.mode is not Mode.Perform:
                        self.move_to_safety(md_approach_up)

                    if self.gripper.is_grasping():
                        action.reward = 1.0
                        action.final_pose.d = self.gripper.width()

                    if Config.mode is Mode.Measure and Config.take_after_images:
                        self.robot.move_cartesian(Frames.camera, image_frame,
                                                  self.md)
                        self.last_after_images = self.take_images()
                        self.saver.save_image(self.last_after_images,
                                              current_episode.id,
                                              action_id,
                                              'after',
                                              action=action)

            else:
                self.gripper.release(action.pose.d + 0.002)  # [m]

                self.robot.recover_from_errors()
                self.robot.move_relative_cartesian(Frames.gripper,
                                                   action_approch_affine,
                                                   md_approach_up)
                move_to_safety_success = self.move_to_safety(md_approach_up)
                if not move_to_safety_success:
                    self.robot.recover_from_errors()
                    self.robot.recover_from_errors()
                    self.move_to_safety(md_approach_up)

                self.gripper.move(self.gripper.max_width)

                self.move_to_safety(md_approach_up)
                if Config.mode is Mode.Measure and Config.take_after_images:
                    self.robot.move_cartesian(Frames.camera, image_frame,
                                              self.md)
                    self.last_after_images = self.take_images()
                    self.saver.save_image(self.last_after_images,
                                          current_episode.id,
                                          action_id,
                                          'after',
                                          action=action)

        else:
            logger.info('Grasp not successful.')
            self.gripper.release(self.gripper.width() + 0.002)  # [m]

            self.robot.recover_from_errors()
            move_up_successful = self.robot.move_relative_cartesian(
                Frames.gripper, action_approch_affine, md_approach_up)

            if md_approach_up.did_break or not move_up_successful:
                self.gripper.release(action.pose.d)  # [m]

                self.robot.recover_from_errors()
                self.robot.move_relative_cartesian(Frames.gripper,
                                                   action_approch_affine,
                                                   md_approach_up)
                self.move_to_safety(md_approach_up)

            if Config.mode is Mode.Measure and Config.take_after_images:
                self.robot.move_cartesian(Frames.camera, image_frame, self.md)
                self.last_after_images = self.take_images()
                self.saver.save_image(self.last_after_images,
                                      current_episode.id,
                                      action_id,
                                      'after',
                                      action=action)
Пример #19
0
    def infer(
        self,
        images: List[OrthographicImage],
        goal_images: List[OrthographicImage],
        method: SelectionMethod,
        verbose=1,
        place_images: List[OrthographicImage] = None,
    ) -> List[Action]:

        start = time.time()

        if method == SelectionMethod.Random:
            grasp_action = Action(action_type='grasp')
            grasp_action.index = np.random.choice(range(3))
            grasp_action.pose = RobotPose(affine=Affine(
                x=np.random.uniform(self.lower_random_pose[0],
                                    self.upper_random_pose[0]),  # [m]
                y=np.random.uniform(self.lower_random_pose[1],
                                    self.upper_random_pose[1]),  # [m]
                a=np.random.uniform(self.lower_random_pose[3],
                                    self.upper_random_pose[3]),  # [rad]
            ))
            grasp_action.estimated_reward = -1
            grasp_action.estimated_reward_std = 0.0
            grasp_action.method = method
            grasp_action.step = 0

            place_action = Action(action_type='place')
            place_action.index = np.random.choice(range(3))
            place_action.pose = RobotPose(affine=Affine(
                x=np.random.uniform(self.lower_random_pose[0],
                                    self.upper_random_pose[0]),  # [m]
                y=np.random.uniform(self.lower_random_pose[1],
                                    self.upper_random_pose[1]),  # [m]
                a=np.random.uniform(self.lower_random_pose[3],
                                    self.upper_random_pose[3]),  # [rad]
            ))
            place_action.estimated_reward = -1
            place_action.estimated_reward_std = 0.0
            place_action.method = method
            place_action.step = 0

            return [grasp_action, place_action]

        input_images = [self.get_images(i) for i in images]
        goal_input_images = [self.get_images(i) for i in goal_images]

        if self.network_type == '2' and not place_images:
            raise Exception(
                f'Place images are missing for network type {self.network_type}'
            )
        elif place_images:
            place_input_images = [self.get_images(i) for i in place_images]

        grasp_input = input_images + goal_input_images if self.network_type == '1' else input_images
        place_input = input_images + goal_input_images if self.network_type == '1' else place_input_images + goal_input_images

        m_reward, m_z = self.grasp_model.predict(grasp_input, batch_size=128)
        # m_reward, *m_z_list = self.grasp_model.predict(grasp_input, batch_size=128)
        # m_z_list = tuple(np.expand_dims(m_zi, axis=3) for m_zi in m_z_list)
        # m_z = np.concatenate(m_z_list, axis=3)

        p_reward, p_z = self.place_model.predict(place_input, batch_size=128)

        if self.keep_indixes:
            self.keep_array_at_last_indixes(m_reward, self.keep_indixes)

        first_method = SelectionMethod.PowerProb if method in [
            SelectionMethod.Top5, SelectionMethod.Max
        ] else method
        # first_method = SelectionMethod.Top5
        filter_lambda_n_grasp = self.get_filter_n(first_method,
                                                  self.number_top_grasp)
        filter_lambda_n_place = self.get_filter_n(first_method,
                                                  self.number_top_place)

        m_top_index = filter_lambda_n_grasp(m_reward)
        p_top_index = filter_lambda_n_place(p_reward)

        m_top_index_unraveled = np.transpose(
            np.asarray(np.unravel_index(m_top_index, m_reward.shape)))
        p_top_index_unraveled = np.transpose(
            np.asarray(np.unravel_index(p_top_index, p_reward.shape)))

        # print(m_top_index_unraveled.tolist())
        # print(p_top_index_unraveled.tolist())

        m_top_z = m_z[m_top_index_unraveled[:, 0], m_top_index_unraveled[:, 1],
                      m_top_index_unraveled[:, 2]]
        # m_top_z = m_z[m_top_index_unraveled[:, 0], m_top_index_unraveled[:, 1], m_top_index_unraveled[:, 2], m_top_index_unraveled[:, 3]]
        p_top_z = p_z[p_top_index_unraveled[:, 0], p_top_index_unraveled[:, 1],
                      p_top_index_unraveled[:, 2]]

        reward = self.merge_model.predict([m_top_z, p_top_z], batch_size=2**12)

        m_top_reward = m_reward[m_top_index_unraveled[:, 0],
                                m_top_index_unraveled[:, 1],
                                m_top_index_unraveled[:, 2],
                                m_top_index_unraveled[:, 3]]
        # p_top_reward = p_reward[p_top_index_unraveled[:, 0], p_top_index_unraveled[:, 1], p_top_index_unraveled[:, 2]]
        m_top_reward_repeated = np.repeat(np.expand_dims(np.expand_dims(
            m_top_reward, axis=1),
                                                         axis=1),
                                          self.number_top_place,
                                          axis=1)

        filter_measure = reward * m_top_reward_repeated

        filter_lambda = self.get_filter(method)
        index_raveled = filter_lambda(filter_measure)

        index_unraveled = np.unravel_index(index_raveled, reward.shape)
        m_index = m_top_index_unraveled[index_unraveled[0]]
        p_index = p_top_index_unraveled[index_unraveled[1]]

        grasp_action = Action(action_type='grasp')
        grasp_action.index = m_index[3]
        grasp_action.pose = self.pose_from_index(m_index, m_reward.shape,
                                                 images[0])
        grasp_action.estimated_reward = m_reward[tuple(m_index)]
        grasp_action.method = method
        grasp_action.step = 0

        place_action = Action(action_type='place')
        place_action.index = p_index[3]
        place_action.pose = self.pose_from_index(p_index,
                                                 p_reward.shape,
                                                 images[0],
                                                 resolution_factor=1.0)
        place_action.estimated_reward = reward[
            index_unraveled]  # reward[index_raveled, 0]  # p_reward[tuple(p_index)]
        place_action.method = method
        place_action.step = 0

        if verbose:
            logger.info(f'NN inference time [s]: {time.time() - start:.3}')
        return [grasp_action, place_action]