def test_shift_conversion(self):
        conv = Converter(shift_z_offset=0.0, box=self.box)

        image = OrthographicImage(
            self.read_image(self.file_path / self.image_name), 2000.0, 0.22,
            0.4, '', Config.default_image_pose)

        action = Action()
        action.type = 'shift'
        action.pose = RobotPose()
        action.pose.x = -0.02
        action.pose.y = 0.105
        action.pose.a = 1.52
        action.pose.d = 0.078
        action.index = 1
        action.shift_motion = [0.03, 0.0]

        draw_pose(image, action.pose, convert_to_rgb=True)
        draw_around_box(image, box=self.box, draw_lines=True)
        imageio.imwrite(str(self.file_path / 'gen-shift-draw-pose.png'),
                        image.mat)

        self.assertTrue(conv.shift_check_safety(action, [image]))

        conv.calculate_pose(action, [image])
        self.assertLess(action.pose.z, 0.0)
Exemplo n.º 2
0
    def infer_grasp(self, req: InferGraspRequest) -> InferGraspResponse:
        image_res = self.get_images()
        cv_image = self.bridge.imgmsg_to_cv2(image_res.image, 'mono16')
        image = OrthographicImage(cv_image, image_res.pixel_size,
                                  image_res.min_depth, image_res.max_depth)

        images = [image]

        action = self.inference.infer(images, req.method)

        draw_image = clone(image)
        draw_image.mat = cv2.cvtColor(draw_image.mat, cv2.COLOR_GRAY2RGB)
        if self.draw_on_image:
            draw_around_box(draw_image, box=self.box, draw_lines=True)
            draw_pose(draw_image, action)

        if self.save_tmp_image:
            cv2.imwrite('/tmp/graspro/current-image.png', draw_image.mat)

        draw_image_msg = self.bridge.cv2_to_imgmsg(draw_image.mat, 'rgb8')
        self.image_publisher.publish(draw_image_msg)

        if self.publish_heatmap:
            heatmap = self.heatmapper.render(image, alpha=0.5)
            heatmap_msg = self.bridge.cv2_to_imgmsg(heatmap, 'rgb8')
            self.heatmap_publisher.publish(heatmap_msg)

        return InferGraspResponse(action=action)
Exemplo n.º 3
0
def api_image(episode_id):
    def send_image(image):
        _, image_encoded = cv2.imencode('.jpg', image)
        return flask.send_file(io.BytesIO(image_encoded),
                               mimetype='image/jpeg')

    def send_empty_image():
        empty = np.zeros((480, 752, 1))
        cv2.putText(empty,
                    '?', (310, 300),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    6,
                    100,
                    thickness=6)
        return send_image(empty)

    database_name = flask.request.values.get('database')
    suffix = flask.request.values.get('suffix')

    if flask.request.values.get('pose'):
        action = Action(data=json.loads(flask.request.values.get('pose')))
        image = Loader.get_image(database_name,
                                 episode_id,
                                 suffix,
                                 images=action.images)
    else:
        action, image = Loader.get_action(database_name, episode_id, suffix)

    if not action or suffix not in action.images.keys() or not image:
        return send_empty_image()

    draw_pose(image,
              action.pose,
              convert_to_rgb=True,
              reference_pose=action.images['ed-v']['pose'])

    if int(flask.request.values.get('box', default=0)):
        draw_around_box(image, box=Config.box, draw_lines=True)

    return send_image(image.mat / 255)
    def test_grasp_conversion(self):
        conv = Converter(grasp_z_offset=0.0, box=self.box)

        image = OrthographicImage(
            self.read_image(self.file_path / self.image_name), 2000.0, 0.22,
            0.4, '', Config.default_image_pose)

        action = Action()
        action.type = 'grasp'
        action.pose.x = -0.06
        action.pose.y = 0.099
        action.pose.a = 0.523
        action.pose.d = 0.078
        action.index = 1

        draw_pose(image, action.pose, convert_to_rgb=True)
        draw_around_box(image, box=self.box, draw_lines=True)
        imageio.imwrite(str(self.file_path / 'gen-grasp-draw-pose.png'),
                        image.mat)

        self.assertTrue(conv.grasp_check_safety(action, [image]))

        conv.calculate_pose(action, [image])
        self.assertLess(action.pose.z, 0.0)
    # inference = InferencePlanarPose(
    #   Loader.get_model('shifting', 'model-3'),
    #   box=Config.box,
    # )

    _, image = Loader.get_action('cylinder-cube-mc-1',
                                 '2019-07-02-13-27-22-246', 'ed-v')

    indexer = GraspIndexer(gripper_classes=Config.gripper_classes)

    converter = Converter(grasp_z_offset=Config.grasp_z_offset, box=Config.box)

    times = []
    for i in range(1):
        start = time.time()
        action = inference.infer([image], SelectionMethod.Top5, verbose=1)
        indexer.to_action(action)

        end = time.time()
        times.append(end - start)

    converter.calculate_pose(action, [image])
    print(action)
    print(
        f'Mean inference time [ms]: {(np.array(times[1:]).mean() * 1000):0.5f}'
    )

    draw_pose(image, action.pose, convert_to_rgb=True)
    cv2.imshow('image', image.mat)
    cv2.waitKey(1500)
Exemplo n.º 6
0
    def predict_actions(
        self,
        images: List[OrthographicImage],
        method: SelectionMethod,
        N=1,
        verbose=True,
    ) -> List[Action]:

        actions: List[Action] = []

        uncertainty_images = [
            OrthographicImage(np.zeros(i.mat.shape,
                                       dtype=np.uint16), i.pixel_size,
                              i.min_depth, i.max_depth, i.camera, i.pose)
            for i in images
        ]

        for i in range(N):
            for image in images:
                draw_around_box(image, box=Config.box)

            grasp = self.grasp_inference.infer(
                images, method, uncertainty_images=uncertainty_images)
            self.grasp_indexer.to_action(grasp)

            # Shift actions
            if Config.shift_objects and grasp.estimated_reward < Config.grasp_shift_threshold:
                shift = self.shift_inference.infer(images, method)
                self.shift_indexer.to_action(shift)

                bin_empty = shift.estimated_reward < Config.shift_empty_threshold

                if bin_empty:
                    actions.append(Action('bin_empty', safe=1))
                else:
                    self.converter.calculate_pose(shift, images)
                    actions.append(shift)

            # Grasp actions
            else:
                estimated_reward_lower_than_threshold = grasp.estimated_reward < Config.bin_empty_at_max_probability
                bin_empty = estimated_reward_lower_than_threshold and Epoch.selection_method_should_be_high(
                    method)
                new_image = False

                if bin_empty:
                    actions.append(Action('bin_empty', safe=1))
                elif grasp.estimated_reward_std > 0.9:  # default=0.25
                    actions.append(Action('new_image', safe=1))
                else:
                    self.converter.calculate_pose(grasp, images)
                    actions.append(grasp)

            actions[-1].step = i
            action = actions[-1]
            logger.info(f'{i}: {action}')

            if verbose:
                image_copy = clone(images[0])
                uncertainty_image_copy = clone(uncertainty_images[0])

                draw_pose(image_copy, action.pose, convert_to_rgb=True)
                draw_pose(uncertainty_image_copy,
                          action.pose,
                          convert_to_rgb=True)

                cv2.imwrite(str(self.output_path / f'result-{i}.png'),
                            image_copy.mat)
                cv2.imwrite(str(self.output_path / f'uncertainty-{i}.png'),
                            uncertainty_image_copy.mat)

            if action.type == 'bin_empty' or action.type == 'new_image':
                break

            # Predict next image
            reward = action.estimated_reward > Config.bin_empty_at_max_probability if action.type == 'grasp' else action.estimated_reward
            action_type = self.grasp_shift_indexer.from_action(action)
            images = self.predict_images_after_action(
                images,
                action,
                reward=reward,
                action_type=action_type,
                uncertainty_images=uncertainty_images,
            )

            if isinstance(images, tuple):
                images, uncertainty_images = images
            else:
                uncertainty_images = None

        if verbose:
            cv2.imwrite(str(self.output_path / f'result-{i+1}.png'),
                        images[0].mat)
            cv2.imwrite(str(self.output_path / f'uncertainty-{i+1}.png'),
                        uncertainty_images[0].mat)

        return actions
Exemplo n.º 7
0
    def plan_actions(
        self,
        images: List[OrthographicImage],
        method: SelectionMethod,
        depth=1,
        leaves=1,
        verbose=False,
    ) -> List[Action]:

        uncertainty_images = [
            OrthographicImage(np.zeros(i.mat.shape,
                                       dtype=np.uint16), i.pixel_size,
                              i.min_depth, i.max_depth, i.camera, i.pose)
            for i in images
        ]

        tree = PlanningTree(images, uncertainty_images)

        for node, i in tree.fill_nodes(leaves=leaves, depth=depth):
            # Visited actions: node.actions

            for image in node.images:
                draw_around_box(image, box=Config.box)

            grasp = self.grasp_inference.infer(
                node.images,
                method,
                uncertainty_images=node.uncertainty_images)
            self.grasp_indexer.to_action(grasp)

            # Shift actions
            if Config.shift_objects and grasp.estimated_reward < Config.grasp_shift_threshold:
                shift = self.shift_inference.infer(node.images, method)
                self.shift_indexer.to_action(shift)

                bin_empty = shift.estimated_reward < Config.shift_empty_threshold

                if bin_empty:
                    action = Action('bin_empty', safe=1)
                else:
                    self.converter.calculate_pose(shift, node.images)
                    action = shift

            # Grasp actions
            else:
                estimated_reward_lower_than_threshold = grasp.estimated_reward < Config.bin_empty_at_max_probability
                bin_empty = estimated_reward_lower_than_threshold and Epoch.selection_method_should_be_high(
                    method)
                new_image = False

                if bin_empty:
                    action = Action('bin_empty', safe=1)
                elif grasp.estimated_reward_std > 0.9:  # default=0.25
                    action = Action('new_image', safe=1)
                else:
                    self.converter.calculate_pose(grasp, node.images)
                    action = grasp
            logger.info(f'{i}: {action}')

            if verbose:
                image_copy = clone(images[0])
                uncertainty_image_copy = clone(uncertainty_images[0])

                draw_pose(image_copy, action.pose, convert_to_rgb=True)
                draw_pose(uncertainty_image_copy,
                          action.pose,
                          convert_to_rgb=True)

                cv2.imwrite(str(self.output_path / f'result-{i}.png'),
                            image_copy.mat)
                cv2.imwrite(str(self.output_path / f'uncertainty-{i}.png'),
                            uncertainty_image_copy.mat)

            if action.type == 'bin_empty' or action.type == 'new_image':
                break

            # Predict next image
            reward = action.estimated_reward > Config.bin_empty_at_max_probability if action.type == 'grasp' else action.estimated_reward
            action_type = self.grasp_shift_indexer.from_action(action)
            images = self.predict_images_after_action(
                node.images,
                action,
                reward=reward,
                action_type=action_type,
                uncertainty_images=node.uncertainty_images,
            )

            if isinstance(images, tuple):
                images, uncertainty_images = images
            else:
                uncertainty_images = None

            node.add_action(action, images, uncertainty_images)

        if verbose:
            cv2.imwrite(str(self.output_path / f'result-{i+1}.png'),
                        node.images[0].mat)
            cv2.imwrite(str(self.output_path / f'uncertainty-{i+1}.png'),
                        node.uncertainty_images[0].mat)

        actions, max_reward, mean_reward = tree.get_actions_maximize_reward(
            max_depth=depth)
        print(
            f'Max reward: {max_reward:0.3f}, Mean reward: {mean_reward:0.3f}, Length: {len(actions)}'
        )

        # actions, max_steps, mean_steps = tree.get_actions_minimize_steps()
        return actions