def get_rect(size: List[float], center=np.array([0.0, 0.0, 0.0])) -> List[Affine]:
    return [
        Affine(center[0] + size[0] / 2, center[1] + size[1] / 2, size[2]),
        Affine(center[0] + size[0] / 2, center[1] - size[1] / 2, size[2]),
        Affine(center[0] - size[0] / 2, center[1] - size[1] / 2, size[2]),
        Affine(center[0] - size[0] / 2, center[1] + size[1] / 2, size[2]),
    ]
Beispiel #2
0
    def is_pose_inside_box(self, pose: RobotPose) -> bool:
        gripper_one_side_size = 0.5 * (pose.d + 0.002)  # [m]
        gripper_b1 = (pose *
                      Affine(y=gripper_one_side_size)).translation()[0:2]
        gripper_b2 = (pose *
                      Affine(y=-gripper_one_side_size)).translation()[0:2]

        rect = (
            (np.array(self.box_center) - np.array(self.box_size) / 2)[0:2],
            (np.array(self.box_center) + np.array(self.box_size) / 2)[0:2],
        )

        jaw1_inside_box = (rect[0] < gripper_b1).all() and (gripper_b1 <
                                                            rect[1]).all()
        jaw2_inside_box = (rect[0] < gripper_b2).all() and (gripper_b2 <
                                                            rect[1]).all()

        if (pose.b != 0.0 or pose.c != 0.0) and np.isfinite(pose.z):
            # start_point = (Affine(z=0.14) * pose.translation()
            # start_point_inside_box = (rect[0] < start_point).all() and (start_point < rect[1]).all()
            start_point_inside_box = True
        else:
            start_point_inside_box = True

        return jaw1_inside_box and jaw2_inside_box and start_point_inside_box
Beispiel #3
0
 def get_action_pose(cls, action_pose: RobotPose,
                     image_pose: Affine) -> RobotPose:
     action_translation = Affine(action_pose.x, action_pose.y,
                                 action_pose.z)
     action_rotation = Affine(a=action_pose.a,
                              b=action_pose.b,
                              c=action_pose.c)
     affine = image_pose * action_translation * action_rotation
     return RobotPose(affine=affine, d=action_pose.d)
Beispiel #4
0
    def place(self,
              current_episode: Episode,
              action_id: int,
              action: Action,
              action_frame: Affine,
              image_frame: Affine,
              place_bin=None):
        place_bin = place_bin if place_bin else self.current_bin

        self.move_to_safety(self.md)

        md_approach_down = MotionData().with_dynamics(
            0.22).with_z_force_condition(7.0)
        md_approach_up = MotionData().with_dynamics(
            1.0).with_z_force_condition(20.0)

        action_approch_affine = Affine(z=Config.approach_distance_from_pose)
        action_approach_frame = action_frame * action_approch_affine

        if Config.release_in_other_bin:
            self.move_to_release(self.md, target=action_approach_frame)
        else:
            self.robot.move_cartesian(Frames.gripper, action_approach_frame,
                                      self.md)

        self.robot.move_relative_cartesian(Frames.gripper,
                                           action_approch_affine.inverse(),
                                           md_approach_down)

        if md_approach_down.did_break:
            self.robot.recover_from_errors()
            action.collision = True
            self.robot.move_relative_cartesian(Frames.gripper, Affine(z=0.001),
                                               md_approach_up)

        action.final_pose = RobotPose(
            affine=(image_frame.inverse() *
                    self.robot.current_pose(Frames.gripper)))

        action.pose.d = self.gripper.width()
        self.gripper.release(action.pose.d + 0.01)  # [m]

        if Config.mode is not Mode.Perform:
            self.move_to_safety(md_approach_up)

        if Config.mode is Mode.Measure and Config.take_after_images:
            self.robot.move_cartesian(Frames.camera, image_frame, self.md)
            self.last_after_images = self.take_images(current_bin=place_bin)
            self.saver.save_image(self.last_after_images,
                                  current_episode.id,
                                  action_id,
                                  'after',
                                  action=action)
Beispiel #5
0
 def get_camera_frame(cls,
                      current_bin: Bin,
                      a=0.0,
                      b=0.0,
                      c=0.0,
                      reference_pose=None) -> Affine:
     reference_pose = reference_pose or Affine()
     lateral = Affine(b=b, c=c) * Affine(a=a).inverse()
     return cls.bin_frames[current_bin] * Affine(
         b=lateral.b, c=lateral.c) * Affine(
             b=reference_pose.b,
             c=reference_pose.c) * Config.default_image_pose.inverse()
Beispiel #6
0
    def get_pose_in_image(cls,
                          action_pose: RobotPose,
                          image_pose: Affine,
                          reference_pose: Affine = None) -> RobotPose:
        if np.isnan(action_pose.z):
            action_pose.z = -0.35  # [m]

        z_offset = -0.022  # [m]

        image_translation = Affine(image_pose.x, image_pose.y,
                                   image_pose.z + z_offset)
        image_rotation = Affine(a=image_pose.a, b=image_pose.b, c=image_pose.c)
        action_translation = Affine(action_pose.x, action_pose.y,
                                    action_pose.z)
        action_rotation = Affine(b=action_pose.b,
                                 c=action_pose.c) * Affine(a=action_pose.a)
        affine = image_rotation * image_translation.inverse(
        ) * action_translation * action_rotation

        if reference_pose:
            reference_rotation = Affine(b=reference_pose.b,
                                        c=reference_pose.c).inverse()
            affine = reference_rotation * affine

        return RobotPose(affine=affine, d=action_pose.d)
def get_area_of_interest_new(
        image: OrthographicImage,
        pose: Affine,
        size_cropped: Tuple[float, float] = None,
        size_result: Tuple[float, float] = None,
        border_color=None,
        project_3d=True,
        flags=cv2.INTER_LINEAR,
) -> OrthographicImage:
    size_input = (image.mat.shape[1], image.mat.shape[0])
    center_image = (size_input[0] / 2, size_input[1] / 2)

    action_pose = Frames.get_pose_in_image(action_pose=pose, image_pose=Affine(image.pose)) if project_3d else pose

    angle_a = action_pose.a
    if abs(action_pose.b) > np.pi - 0.1 and abs(action_pose.c) > np.pi - 0.1:
        angle_a = action_pose.a - np.pi

    if size_result and size_cropped:
        scale = size_result[0] / size_cropped[0]
        assert scale == (size_result[1] / size_cropped[1])
    elif size_result:
        scale = size_result[0] / size_input[0]
        assert scale == (size_result[1] / size_input[1])
    else:
        scale = 1.0

    size_final = size_result or size_cropped or size_input

    trans = get_transformation(
        image.pixel_size * action_pose.y,
        image.pixel_size * action_pose.x,
        -angle_a,
        center_image,
        scale=scale,
        cropped=size_cropped,
    )
    mat_result = cv2.warpAffine(image.mat, trans, size_final, flags=flags, borderValue=border_color)  # INTERPOLATION_METHOD

    image_pose = Affine(x=action_pose.x, y=action_pose.y, a=-action_pose.a) * Affine(image.pose)

    return OrthographicImage(
        mat_result,
        scale * image.pixel_size,
        image.min_depth,
        image.max_depth,
        image.camera,
        image_pose.to_array(),
    )
Beispiel #8
0
def draw_around_box(image: OrthographicImage,
                    box: Dict[str, List[float]],
                    draw_lines=False) -> None:
    bin_border = get_rect(size=box['size'], center=box['center'])
    image_border = get_rect(size=[10.0, 10.0, box['size'][2]])

    image_pose = Affine(image.pose.x, image.pose.y, -image.pose.z,
                        image.pose.a, image.pose.b, image.pose.c)

    bin_border_projection = [image.project(image_pose * p) for p in bin_border]
    color = [
        max(
            image.value_from_depth((image_pose * border.inverse()).z)
            for border in bin_border)
    ] * 3

    if draw_lines:
        color_direction = (0, 255 * 255, 0)  # Green

        for i in range(len(bin_border)):
            cv2.line(image.mat, tuple(bin_border_projection[i]),
                     tuple(bin_border_projection[(i + 1) % len(bin_border)]),
                     color_direction, 1)
    else:
        image_border_projection = [
            image.project(image_pose * p) for p in image_border
        ]
        cv2.fillPoly(
            image.mat,
            np.array([image_border_projection, bin_border_projection]), color)
def draw_around_box(image: OrthographicImage, box: Dict[str, List[float]], draw_lines=False) -> None:
    bin_border = get_rect(size=box['size'], center=box['center'])
    image_border = get_rect(size=[10.0, 10.0, box['size'][2]])

    image_pose = Affine(image.pose)
    color_divisor = 255 * 255 if image.mat.dtype == np.float32 else 1

    bin_border_projection = [image.project((image_pose * p).to_array()) for p in bin_border]

    if draw_lines:
        color_direction = np.array([0, 255 * 255, 0]) / color_divisor  # Green

        for i in range(len(bin_border)):
            cv2.line(
                image.mat, tuple(bin_border_projection[i]),
                tuple(bin_border_projection[(i + 1) % len(bin_border)]),
                color_direction, 1
            )
    else:
        color = np.array([
            max(image.value_from_depth((image_pose * border.inverse()).z) for border in bin_border)
        ] * 3) / color_divisor

        image_border_projection = [image.project((image_pose * p).to_array()) for p in image_border]
        cv2.fillPoly(image.mat, np.array([image_border_projection, bin_border_projection]), color)
    def test_image_transformation(self):
        image = OrthographicImage(self.read_image(self.file_path / self.image_name), 2000.0, 0.22, 0.4, '', Config.default_image_pose)

        pose = RobotPose(affine=Affine(0.02, 0.0, 0.0))
        area_image = get_area_of_interest(image, pose, border_color=(0))
        imageio.imwrite(str(self.file_path / 'gen-x20-b.png'), area_image.mat)

        pose = RobotPose(affine=Affine(0.03, 0.03, 0.0))
        area_image = get_area_of_interest(image, pose, border_color=(0))
        imageio.imwrite(str(self.file_path / 'gen-x30-y30-b.png'), area_image.mat)

        pose = RobotPose(affine=Affine(0.01, 0.04, 0.0, 0.4))
        area_image = get_area_of_interest(image, pose, border_color=(0))
        imageio.imwrite(str(self.file_path / 'gen-x10-y40-a04-b.png'), area_image.mat)

        image = image.translate([0.0, 0.0, 0.05])
        image = image.rotate_x(-0.3, [0.0, 0.25])
        imageio.imwrite(str(self.file_path / 'gen-rot0_3.png'), image.mat)
    def to_action(self, action: Action) -> None:
        gripper_index = action.index // len(self.angles)
        angle_index = action.index % len(self.angles)

        aff = Affine(b=self.angles[angle_index][0], c=self.angles[angle_index][1]) * action.pose
        action.pose.b = aff.b
        action.pose.c = aff.c
        action.pose.d = self.gripper_classes[gripper_index]
        action.type = 'grasp'
Beispiel #12
0
def take_images(current_bin: Bin, camera: Camera, robot: Robot, image_frame: Affine = None):
    images = camera.take_images()
    if not image_frame:
        image_frame = robot.current_pose(Frames.camera)
    pose = RobotPose(affine=(image_frame.inverse() * Frames.get_frame(current_bin)))

    for image in images:
        image.pose = pose

    return images
Beispiel #13
0
    def shift_check_safety(self, action: Action,
                           images: List[OrthographicImage]) -> bool:
        start_pose_inside_box = self.is_pose_inside_box(action.pose)

        end_pose = RobotPose(affine=(action.pose * Affine(
            x=action.shift_motion[0] * 0.2, y=action.shift_motion[1] * 0.2)))
        end_pose.d = action.pose.d

        end_pose_inside_box = self.is_pose_inside_box(end_pose)
        return start_pose_inside_box and end_pose_inside_box
Beispiel #14
0
    def shift(self, current_episode: Episode, action_id: int, action: Action,
              action_frame: Affine, image_frame: Affine):
        md_approach_down = MotionData().with_dynamics(
            0.15).with_z_force_condition(6.0)
        md_approach_up = MotionData().with_dynamics(
            0.6).with_z_force_condition(20.0)
        md_shift = MotionData().with_dynamics(0.1).with_xy_force_condition(
            10.0)

        action_approch_affine = Affine(z=Config.approach_distance_from_pose)
        action_approach_frame = action_approch_affine * action_frame

        try:
            process_gripper = Process(target=self.gripper.move,
                                      args=(action.pose.d, ))
            process_gripper.start()

            self.robot.move_cartesian(Frames.gripper, action_approach_frame,
                                      self.md)

            process_gripper.join()
        except OSError:
            self.gripper.move(0.08)
            self.robot.move_cartesian(Frames.gripper, action_approach_frame,
                                      self.md)

        self.robot.move_relative_cartesian(Frames.gripper,
                                           action_approch_affine.inverse(),
                                           md_approach_down)

        if md_approach_down.did_break:
            self.robot.recover_from_errors()
            action.collision = True
            self.robot.move_relative_cartesian(Frames.gripper, Affine(z=0.001),
                                               md_approach_up)

        self.robot.move_relative_cartesian(
            Frames.gripper,
            Affine(x=action.shift_motion[0], y=action.shift_motion[1]),
            md_shift)
        self.robot.move_relative_cartesian(Frames.gripper,
                                           action_approch_affine,
                                           md_approach_up)
Beispiel #15
0
def place(
        robot: Robot,
        gripper: Gripper,
        current_episode: Episode,
        current_bin: Bin,
        action: Action,
        action_frame: Affine,
        grasp_action: Action,
        image_frame: Affine,
        camera: Camera,
        saver: Saver,
        md: MotionData
    ) -> None:

    move_to_safety(robot, md)

    md_approach_down = MotionData().with_dynamics(0.3).with_z_force_condition(7.0)
    md_approach_up = MotionData().with_dynamics(1.0).with_z_force_condition(20.0)

    action_approch_affine = Affine(z=Config.approach_distance_from_pose)
    action_approach_frame = action_frame * action_approch_affine

    robot.move_cartesian(Frames.gripper, action_approach_frame, md)
    robot.move_relative_cartesian(Frames.gripper, action_approch_affine.inverse(), md_approach_down)

    if md_approach_down.did_break:
        robot.recover_from_errors()
        action.collision = True
        robot.move_relative_cartesian(Frames.gripper, Affine(z=0.001), md_approach_up)

    action.final_pose = RobotPose(affine=(image_frame.inverse() * robot.current_pose(Frames.gripper)))

    gripper.release(grasp_action.final_pose.d + 0.006)  # [m]

    if Config.mode is not Mode.Perform:
        move_to_safety(robot, md_approach_up)

    if Config.mode is Mode.Measure and Config.take_after_images and Config.release_in_other_bin:
        robot.move_cartesian(Frames.camera, image_frame, md)
        saver.save_image(take_images(current_bin, camera, robot), current_episode.id, 'after', action=action)
    def test_affine(self):
        a = Affine(1.2, 0.4, 0.6, -0.8, 0.0, 0.4)
        self.assertAlmostEqual(a.x, 1.2)
        self.assertAlmostEqual(a.y, 0.4)
        self.assertAlmostEqual(a.z, 0.6)
        self.assertAlmostEqual(a.a, -0.8)
        self.assertAlmostEqual(a.b, 0.0)
        self.assertAlmostEqual(a.c, 0.4)

        a.x = 1.5
        a.a = 0.3
        a.c = 0.11
        self.assertAlmostEqual(a.x, 1.5)
        self.assertAlmostEqual(a.y, 0.4)
        self.assertAlmostEqual(a.z, 0.6)
        self.assertAlmostEqual(a.a, 0.3)
        self.assertAlmostEqual(a.b, 0.0)
        self.assertAlmostEqual(a.c, 0.11)

        b = Affine(0.1, 0.2, c=0.3)
        self.assertAlmostEqual(b.x, 0.1)
        self.assertAlmostEqual(b.y, 0.2)
        self.assertAlmostEqual(b.z, 0.0)
        self.assertAlmostEqual(b.a, 0.0)
        self.assertAlmostEqual(b.b, 0.0)
        self.assertAlmostEqual(b.c, 0.3)
Beispiel #17
0
def draw_line(
        image: OrthographicImage,
        action_pose: Affine,
        pt1: Affine,
        pt2: Affine,
        color,
        thickness=1,
        reference_pose=None,
    ) -> None:
    action_pose = Frames.get_pose_in_image(action_pose=action_pose, image_pose=Affine(image.pose), reference_pose=reference_pose)
    pt1_projection = tuple(image.project((action_pose * pt1).to_array()))
    pt2_projection = tuple(image.project((action_pose * pt2).to_array()))
    cv2.line(image.mat, pt1_projection, pt2_projection, color, thickness, lineType=cv2.LINE_AA)
Beispiel #18
0
def shift(
        robot: Robot,
        gripper: Gripper,
        current_episode: Episode,
        current_bin: Bin,
        action: Action,
        action_frame: Affine,
        image_frame: Affine,
        camera: Camera,
        saver: Saver,
        md: MotionData
    ) -> None:
    md_approach_down = MotionData().with_dynamics(0.15).with_z_force_condition(6.0)
    md_approach_up = MotionData().with_dynamics(0.6).with_z_force_condition(20.0)
    md_shift = MotionData().with_dynamics(0.1).with_xy_force_condition(10.0)

    action_approch_affine = Affine(z=Config.approach_distance_from_pose)
    action_approach_frame = action_approch_affine * action_frame

    try:
        process_gripper = Process(target=gripper.move, args=(action.pose.d, ))
        process_gripper.start()

        robot.move_cartesian(Frames.gripper, action_approach_frame, md)

        process_gripper.join()
    except OSError:
        gripper.move(0.08)
        robot.move_cartesian(Frames.gripper, action_approach_frame, md)

    robot.move_relative_cartesian(Frames.gripper, action_approch_affine.inverse(), md_approach_down)

    if md_approach_down.did_break:
        robot.recover_from_errors()
        action.collision = True
        robot.move_relative_cartesian(Frames.gripper, Affine(z=0.001), md_approach_up)

    robot.move_relative_cartesian(Frames.gripper, Affine(x=action.shift_motion[0], y=action.shift_motion[1]), md_shift)
    robot.move_relative_cartesian(Frames.gripper, action_approch_affine, md_approach_up)
Beispiel #19
0
    def take_images(self,
                    image_frame: Affine = None,
                    current_bin=None) -> List[OrthographicImage]:
        current_bin = current_bin if current_bin else self.current_bin

        images = self.camera.take_images()
        if not image_frame:
            image_frame = self.robot.current_pose(Frames.camera)
        pose = RobotPose(affine=(image_frame.inverse() *
                                 Frames.get_frame(current_bin)))

        for image in images:
            image.pose = pose.to_array()

        return images
Beispiel #20
0
def get_area_of_interest(
    image: OrthographicImage,
    pose: Affine,
    size_cropped: Tuple[float, float] = None,
    size_result: Tuple[float, float] = None,
    border_color=None,
    project_3d=True,
) -> OrthographicImage:
    size_input = (image.mat.shape[1], image.mat.shape[0])
    center_image = (size_input[0] / 2, size_input[1] / 2)

    action_pose = Frames.get_pose_in_image(
        action_pose=pose, image_pose=image.pose) if project_3d else pose

    trans = get_transformation(image.pixel_size * action_pose.y,
                               image.pixel_size * action_pose.x,
                               -action_pose.a, center_image)
    mat_result = cv2.warpAffine(image.mat,
                                trans,
                                size_input,
                                borderValue=border_color)

    new_pixel_size = image.pixel_size

    if size_cropped:
        mat_result = crop(mat_result, size_output=size_cropped)

    if size_result:
        mat_result = cv2.resize(mat_result, size_result)
        new_pixel_size *= size_result[0] / size_cropped[
            0] if size_cropped else size_result[0] / size_input[0]

    return OrthographicImage(
        mat_result,
        new_pixel_size,
        image.min_depth,
        image.max_depth,
        image.camera,
        Affine(x=action_pose.x, y=action_pose.y, a=-action_pose.a) *
        image.pose,
    )
Beispiel #21
0
    def move_to_release(self,
                        md: MotionData,
                        direct=False,
                        target=None) -> bool:
        possible_random_affine = Affine()
        if Config.random_pose_before_release:
            possible_random_affine = Config.max_random_affine_before_release.get_inner_random(
            )

        target = target if target else Frames.get_release_frame(
            Frames.get_next_bin(self.current_bin)) * possible_random_affine

        self.robot.recover_from_errors()

        if Config.mode is Mode.Measure:
            self.move_to_safety(md)

        if Config.release_in_other_bin:
            if Config.release_as_fast_as_possible:
                waypoints = [
                    Waypoint(Frames.release_fastest,
                             Waypoint.ReferenceType.ABSOLUTE)
                ]
            else:
                waypoints = [Waypoint(target, Waypoint.ReferenceType.ABSOLUTE)]

                if not direct:
                    waypoints.insert(
                        0,
                        Waypoint(Frames.release_midway,
                                 Waypoint.ReferenceType.ABSOLUTE))

            return self.robot.move_waypoints_cartesian(Frames.gripper,
                                                       waypoints, MotionData())

        return self.robot.move_cartesian(
            Frames.gripper,
            Frames.get_release_frame(self.current_bin) *
            possible_random_affine, MotionData())
Beispiel #22
0
def move_to_release(robot: Robot, current_bin: Bin, md: MotionData) -> bool:
    possible_random_affine = Affine()
    if Config.random_pose_before_release:
        possible_random_affine = Config.max_random_affine_before_release.get_inner_random()

    robot.recover_from_errors()

    if Config.mode is Mode.Measure:
        move_to_safety(robot, md)

    if Config.release_in_other_bin:
        if Config.release_as_fast_as_possible:
            waypoints = [
                Waypoint(
                    Frames.release_fastest,
                    Waypoint.ReferenceType.ABSOLUTE
                )
            ]
        else:
            waypoints = [
                Waypoint(
                    Frames.release_midway,
                    Waypoint.ReferenceType.ABSOLUTE
                ),
                Waypoint(
                    Frames.get_release_frame(Frames.get_next_bin(current_bin)) * possible_random_affine,
                    Waypoint.ReferenceType.ABSOLUTE
                )
            ]
        return robot.move_waypoints_cartesian(Frames.gripper, waypoints, MotionData())

    return robot.move_cartesian(
        Frames.gripper,
        Frames.get_release_frame(current_bin) * possible_random_affine,
        MotionData()
    )
Beispiel #23
0
    def save_image(self,
                   images: List[OrthographicImage],
                   episode_id: str,
                   action_id: int,
                   suffix: str,
                   action=None):
        for image in images:
            image_data = cv2.imencode('.png', image.mat)[1].tostring()
            suffix_final = f'{image.camera}-{suffix}'

            if action:
                action.images[suffix_final] = {
                    'info': {
                        'pixel_size': image.pixel_size,
                        'min_depth': image.min_depth,
                        'max_depth': image.max_depth,
                    },
                    'pose': Affine(image.pose),
                }

            try:
                requests.post(self.url + 'upload-image',
                              files={
                                  'file':
                                  ('image.png', image_data, 'image/png', {
                                      'Expires': '0'
                                  })
                              },
                              data={
                                  'collection': self.collection,
                                  'episode_id': episode_id,
                                  'action_id': action_id,
                                  'suffix': suffix_final,
                              })
            except requests.exceptions.RequestException:
                raise Exception('Could not save image!')
def print_before_after_image(episode_id: str):
    action, before_image, after_image = Loader.get_action(
        'shifting', episode_id, ['ed-v', 'ed-after'])

    area_before_image = get_area_of_interest(before_image,
                                             action.pose,
                                             size_cropped=(300, 300),
                                             size_result=(150, 150))
    area_after_image = get_area_of_interest(after_image,
                                            action.pose,
                                            size_cropped=(300, 300))

    white = [255 * 255] * 3
    draw_line(area_before_image,
              action.pose,
              Affine(0, 0),
              Affine(0.036, 0),
              color=white,
              thickness=2)
    draw_line(area_before_image,
              action.pose,
              Affine(0.036, 0.0),
              Affine(0.026, -0.008),
              color=white,
              thickness=2)
    draw_line(area_before_image,
              action.pose,
              Affine(0.036, 0.0),
              Affine(0.026, 0.008),
              color=white,
              thickness=2)

    cv2.imwrite(
        str(Path.home() / 'Desktop' / f'example-{episode_id}-before.png'),
        area_before_image.mat)
    cv2.imwrite(
        str(Path.home() / 'Desktop' / f'example-{episode_id}-after.png'),
        area_after_image.mat)

    print('---')
    print(episode_id)
Beispiel #25
0
import numpy as np

from argparse import ArgumentParser

from cfrankr import Robot, Affine, MotionData

if __name__ == '__main__':
    parser = ArgumentParser()
    parser.add_argument('--host',
                        default='panda_arm',
                        help='name of the robot')
    args = parser.parse_args()

    # Setup Robot
    general_dynamics_rel = 0.32
    robot = Robot(args.host, general_dynamics_rel)
    #robot.recover_from_errors()

    # Move down
    md_approach_down = MotionData().with_dynamics(0.3).with_z_force_condition(
        7.0)  # dynamtics is both velocity and acceleration of robot (= 0.3)
    tcp = Affine(0.0, 0.0, 0.18, -np.pi / 4, 0.0, -np.pi)
    approach_distance_from_pose = 0.120
    action_approch_affine = Affine(z=approach_distance_from_pose)
    robot.move_relative_cartesian(tcp, action_approch_affine.inverse(),
                                  md_approach_down)
    def infer(
        self,
        images: List[OrthographicImage],
        method: SelectionMethod,
        verbose=1,
        uncertainty_images: List[OrthographicImage] = None,
    ) -> Action:

        start = time.time()

        if method == SelectionMethod.Random:
            action = Action()
            action.index = np.random.choice(range(3))
            action.pose = RobotPose(affine=Affine(
                x=np.random.uniform(self.lower_random_pose[0],
                                    self.upper_random_pose[0]),  # [m]
                y=np.random.uniform(self.lower_random_pose[1],
                                    self.upper_random_pose[1]),  # [m]
                a=np.random.uniform(self.lower_random_pose[3],
                                    self.upper_random_pose[3]),  # [rad]
            ))
            action.estimated_reward = -1
            action.estimated_reward_std = 0.0
            action.method = method
            action.step = 0
            return action

        input_images = [self.get_images(i) for i in images]
        result = self.model.predict(input_images)

        if self.with_types:
            estimated_reward = result[0]
            types = result[1]
        else:
            estimated_reward = result

        estimated_reward_std = np.zeros(estimated_reward.shape)

        filter_method = method
        filter_measure = estimated_reward

        # Calculate model uncertainty
        if self.monte_carlo:
            rewards_sampling = [
                self.model.predict(input_images)
                for i in range(self.monte_carlo)
            ]
            estimated_reward = np.mean(rewards_sampling, axis=0)
            estimated_reward_std += self.mutual_information(rewards_sampling)

            if verbose:
                logger.info(f'Current monte carlo s: {self.current_s}')

        # Calculate input uncertainty
        if self.input_uncertainty:
            input_uncertainty_images = [
                self.get_images(i) for i in uncertainty_images
            ]

            array_estimated_unc = tkb.get_session().run(
                self.propagation_input_uncertainty,
                feed_dict={
                    self.model.input: input_images[0],
                    self.uncertainty_placeholder: input_uncertainty_images[0]
                })
            estimated_input_uncertainty = np.concatenate(array_estimated_unc,
                                                         axis=3)
            estimated_reward_std += 0.0025 * estimated_input_uncertainty

        # Adapt filter measure for more fancy SelectionMethods
        if method == SelectionMethod.Top5LowerBound:
            filter_measure = estimated_reward - estimated_reward_std
            filter_method = SelectionMethod.Top5
        elif method == SelectionMethod.BayesTop:
            filter_measure = self.probability_in_policy(
                estimated_reward, s=self.current_s) * estimated_reward_std
            filter_method = SelectionMethod.Top5
        elif method == SelectionMethod.BayesProb:
            filter_measure = self.probability_in_policy(
                estimated_reward, s=self.current_s) * estimated_reward_std
            filter_method = SelectionMethod.Prob

        filter_lambda = self.get_filter(filter_method)

        # Set some action (indices) to zero
        if self.keep_indixes:
            self.keep_array_at_last_indixes(filter_measure, self.keep_indixes)

        # Grasp specific types
        if self.with_types and self.current_type > -1:
            alpha = 0.7
            factor_current_type = np.tile(
                np.expand_dims(types[:, :, :, self.current_type], axis=-1),
                reps=(1, 1, 1, estimated_reward.shape[-1]))
            factor_alt_type = np.tile(np.expand_dims(types[:, :, :, 1],
                                                     axis=-1),
                                      reps=(1, 1, 1,
                                            estimated_reward.shape[-1]))

            filter_measure = estimated_reward * (alpha * factor_current_type +
                                                 (1 - alpha) * factor_alt_type)

        # Find the index and corresponding action using the selection method
        index_raveled = filter_lambda(filter_measure)
        index = np.unravel_index(index_raveled, filter_measure.shape)

        action = Action()
        action.index = index[3]
        action.pose = self.pose_from_index(index, filter_measure.shape,
                                           images[0])
        action.estimated_reward = estimated_reward[index]
        action.estimated_reward_std = estimated_reward_std[index]
        action.method = method
        action.step = 0  # Default value

        if verbose:
            logger.info(f'NN inference time [s]: {time.time() - start:.3}')
        return action
Beispiel #27
0
def draw_pose(image: OrthographicImage, action_pose: RobotPose, convert_to_rgb=False, reference_pose=None) -> None:
    if convert_to_rgb and image.mat.ndim == 2:
        image.mat = cv2.cvtColor(image.mat, cv2.COLOR_GRAY2RGB)

    color_rect = (255 * 255, 0, 0)  # Blue
    color_lines = (0, 0, 255 * 255)  # Red
    color_direction = (0, 255 * 255, 0)  # Green

    rect = get_rect([200.0 / image.pixel_size, 200.0 / image.pixel_size, 0.0])
    for i, r in enumerate(rect):
        draw_line(image, action_pose, r, rect[(i + 1) % len(rect)], color_rect, 2, reference_pose)

    draw_line(image, action_pose, Affine(90 / image.pixel_size, 0), Affine(100 / image.pixel_size, 0), color_rect, 2, reference_pose)
    draw_line(image, action_pose, Affine(0.012, action_pose.d / 2), Affine(-0.012, action_pose.d / 2), color_lines, 1, reference_pose)
    draw_line(image, action_pose, Affine(0.012, -action_pose.d / 2), Affine(-0.012, -action_pose.d / 2), color_lines, 1, reference_pose)
    draw_line(image, action_pose, Affine(0, action_pose.d / 2), Affine(0, -action_pose.d / 2), color_lines, 1, reference_pose)
    draw_line(image, action_pose, Affine(0.006, 0), Affine(-0.006, 0), color_lines, 1, reference_pose)

    if not isinstance(action_pose.z, str) and np.isfinite(action_pose.z):
        draw_line(image, action_pose, Affine(z=0.14), Affine(), color_direction, 1, reference_pose)
Beispiel #28
0
def get_distance_to_box(image: OrthographicImage, box: Dict[str, List[float]]) -> float:
    bin_border = get_rect(size=box['size'], center=box['center'])
    image_pose = Affine(image.pose)
    return min((image_pose * border.inverse()).z for border in bin_border)
Beispiel #29
0
class Frames:
    camera = Affine(-0.079, -0.0005, 0.011, -np.pi / 4, 0.0, -np.pi)
    gripper = Affine(0.0, 0.0, 0.18, -np.pi / 4, 0.0, -np.pi)

    bin_frames = {
        Bin.Left: Affine(0.480, -0.125, 0.011, np.pi / 2),
        Bin.Right: Affine(0.480, 0.125, 0.011, np.pi / 2),
    }

    bin_joint_values = {
        Bin.Left: [
            -1.8119446041276, 1.1791089121678, 1.7571002245448,
            -2.141621800118, -1.143369391372, 1.633046061666, -0.432171664388
        ],
        Bin.Right: [
            -1.4637412426804, 1.0494154046592, 1.7926908288289,
            -2.283032105735, -1.035444400130, 1.752863485400, 0.04325164650034
        ],
    }

    release_fastest = Affine(0.480, -0.06, 0.180, np.pi / 2)
    release_midway = Affine(0.480, 0.0, 0.240, np.pi / 2)

    @classmethod
    def get_frame(cls, current_bin: Bin) -> Affine:
        return cls.bin_frames[current_bin]

    @classmethod
    def get_camera_frame(cls,
                         current_bin: Bin,
                         a=0.0,
                         b=0.0,
                         c=0.0,
                         reference_pose=None) -> Affine:
        reference_pose = reference_pose or Affine()
        lateral = Affine(b=b, c=c) * Affine(a=a).inverse()
        return cls.bin_frames[current_bin] * Affine(
            b=lateral.b, c=lateral.c) * Affine(
                b=reference_pose.b,
                c=reference_pose.c) * Config.default_image_pose.inverse()

    @classmethod
    def get_release_frame(cls, current_bin: Bin) -> Affine:
        return Affine(z=Config.move_down_distance_for_release
                      ) * cls.bin_frames[current_bin]

    @classmethod
    def get_next_bin(cls, current_bin: Bin) -> Bin:
        return {Bin.Left: Bin.Right, Bin.Right: Bin.Left}[current_bin]

    @classmethod
    def get_pose_in_image(cls,
                          action_pose: RobotPose,
                          image_pose: Affine,
                          reference_pose: Affine = None) -> RobotPose:
        if np.isnan(action_pose.z):
            action_pose.z = -0.35  # [m]

        z_offset = -0.022  # [m]

        image_translation = Affine(image_pose.x, image_pose.y,
                                   image_pose.z + z_offset)
        image_rotation = Affine(a=image_pose.a, b=image_pose.b, c=image_pose.c)
        action_translation = Affine(action_pose.x, action_pose.y,
                                    action_pose.z)
        action_rotation = Affine(b=action_pose.b,
                                 c=action_pose.c) * Affine(a=action_pose.a)
        affine = image_rotation * image_translation.inverse(
        ) * action_translation * action_rotation

        if reference_pose:
            reference_rotation = Affine(b=reference_pose.b,
                                        c=reference_pose.c).inverse()
            affine = reference_rotation * affine

        return RobotPose(affine=affine, d=action_pose.d)

    @classmethod
    def get_action_pose(cls, action_pose: RobotPose,
                        image_pose: Affine) -> RobotPose:
        action_translation = Affine(action_pose.x, action_pose.y,
                                    action_pose.z)
        action_rotation = Affine(a=action_pose.a,
                                 b=action_pose.b,
                                 c=action_pose.c)
        affine = image_pose * action_translation * action_rotation
        return RobotPose(affine=affine, d=action_pose.d)

    @classmethod
    def is_gripper_frame_safe(cls, current_pose: Affine) -> bool:
        return current_pose.z > 0.16  # [m]

    @classmethod
    def is_camera_frame_safe(cls, camera_frame) -> bool:
        def find_nearest(obj, k):
            return list(obj.values())[np.argmin(
                np.abs(np.array(list(obj.keys())) - k))]

        lower_x_for_y = {  # [m]
            -0.2: 0.29,
            -0.15: 0.31,
            -0.10: 0.32,
            -0.05: 0.35,
            0.0: 0.37,
            0.05: 0.38,
            0.1: 0.38,
            0.15: 0.35,
        }
        upper_x_for_y = {  # [m]
            -0.2: 0.64,
            0.0: 0.68,
            0.2: 0.64,
        }
        lower_x = find_nearest(lower_x_for_y, camera_frame.y)
        upper_x = find_nearest(upper_x_for_y, camera_frame.y)
        return (lower_x < camera_frame.x < upper_x) and (
            0.28 < camera_frame.z < 0.6)  # [m]
Beispiel #30
0
 def get_release_frame(cls, current_bin: Bin) -> Affine:
     return Affine(z=Config.move_down_distance_for_release
                   ) * cls.bin_frames[current_bin]