示例#1
0
def load_camera(recorder: BaseEnv):
    camera_pose = sapyen.Pose([-0.5, -1.5, 2.5],
                              transforms3d.euler.euler2quat(0, 0.7, -0.9))
    recorder.add_camera("left_view", camera_pose, width=960, height=540)
    camera_pose = sapyen.Pose([0.7, -3, 2.5],
                              transforms3d.euler.euler2quat(0, 1.3, 3.14))
    recorder.add_camera("front_view", camera_pose, width=960, height=540)
    camera_pose = sapyen.Pose([0.5, -3, 3],
                              transforms3d.euler.euler2quat(0, 1.57, 0))
    recorder.add_camera("bird_view", camera_pose, width=960, height=540)
示例#2
0
    def add_camera(self, name, camera_pose: np.ndarray, width: int, height: int, fov=1.1, near=0.01, far=100):
        actor = self.builder.build(False, True, "{}".format(name), True)
        self.mount_actor_list.append(actor)
        self.camera_name_list.append(name)

        pose = sapyen.Pose(camera_pose[:3, 3])
        pose.set_q(transforms3d.quaternions.mat2quat(camera_pose[:3, :3]))
        self.sim.add_mounted_camera(name, actor, sapyen.Pose([0, 0, 0], [1, 0, 0, 0]), width, height, fov, fov,
                                    near, far)
        actor.set_global_pose(pose)

        camera = self.renderer.get_camera(len(self.cam_list))
        self.cam_list.append(camera)
        self.build_camera_mapping(height, width, camera.get_camera_matrix())
        self.depth_lambda_list.append(lambda depth: 1 / (depth * (1 / far - 1 / near) + 1 / near))
示例#3
0
    def calculate_pose_in_front_of_object_link(self, link_index: int, category: Optional[str], horizontal_offset: float,
                                               vertical_offset: float) -> sapyen.Pose:
        """
        Get a heuristic pose which locate in front of a object link
        :param link_index: Index of a link in the object link list
        :param category: Category of the object
        :param horizontal_offset: How far does the target pose be in front of the semantic part
        :param vertical_offset: How far does the target pose be in above the semantic part
        :return: Target pose
        """
        object_pose: sapyen.Pose = self.object_links[0].get_global_pose()
        semantic_local_pose: sapyen.Pose = object_pose.inv().transform(
            self.object_links[link_index].get_global_mass_center())
        if semantic_local_pose.p[0] > 0:
            target_x = semantic_local_pose.p[0] + horizontal_offset
            target_direction_quaternion = [0, 0, 0, 1]
        else:
            target_x = semantic_local_pose.p[0] - horizontal_offset
            target_direction_quaternion = [1, 0, 0, 0]

        target_pos = semantic_local_pose.p
        target_pos[0] = target_x
        target_pos[2] += vertical_offset
        target_pose = sapyen.Pose(target_pos, target_direction_quaternion)
        return object_pose.transform(target_pose)
示例#4
0
    def calculate_grasp_pose_from_handle_cloud(
            point_cloud: np.ndarray) -> sapyen.Pose:
        # Calculate center and generic pose of gripper
        assert point_cloud.shape[1] == 3, "Point Cloud must be in (n, 3) shape"
        pc = open3d.geometry.PointCloud()
        pc.points = open3d.utility.Vector3dVector(point_cloud)
        bounding_box: open3d.geometry.AxisAlignedBoundingBox = pc.get_axis_aligned_bounding_box(
        )
        center = bounding_box.get_center()
        box_min = bounding_box.get_min_bound()
        box_max = bounding_box.get_max_bound()
        scale = box_max - box_min
        gripper_pose = sapyen.Pose(center)
        z_euler = 1.57 * np.sign(center[0]) + 1.57
        if scale[1] > scale[2]:
            print("Horizontal Handle Detected")
            gripper_pose.set_q(
                transforms3d.euler.euler2quat(1.57, 0, z_euler, "rxyz"))
        else:
            print("Vertical Handle Detected")
            gripper_pose.set_q(
                transforms3d.euler.euler2quat(0, 0, z_euler, "rxyz"))

        # Add offset for Gripper
        position = gripper_pose.p
        position[0] -= 0.05
        gripper_pose.set_p(position)
        return gripper_pose
示例#5
0
def load_scene(sim: sapyen.Simulation):
    builder: sapyen.ActorBuilder = sim.create_actor_builder()
    global_scale = 1
    builder.add_multiple_shapes("../assets/object/walls/wall_scene.obj",
                                sapyen.Pose([0, 0, 0]),
                                [global_scale, global_scale, global_scale])
    builder.add_obj_visual("../assets/object/walls/wall_scene.obj",
                           sapyen.Pose([0, 0, 0]),
                           [global_scale, global_scale, global_scale])
    builder.build(True, False, "scene")

    loader = sim.create_urdf_loader()
    loader.fix_loaded_object = True
    loader.scale = global_scale * 1.5
    obj8: sapyen.ArticulationWrapper = loader.load(
        "/home/sim/mobility_dataset/mobility_v1_alpha5/8867/mobility.urdf")
    obj8.set_root_pose(np.array([-3.127, 2.281, 1.481]) * global_scale)
示例#6
0
 def set_robot_base_pose(
         self, pose: Union[np.ndarray, List, sapyen.Pose]) -> None:
     if isinstance(pose, sapyen.Pose):
         self.manger.move_base(pose)
     else:
         assert len(
             pose
         ) == 7, "Pose should be in Position: x y z, Quaternion: w x y z format"
         self.manger.move_base(sapyen.Pose(pose[:3], pose[3:]))
    def calculate_edge_grasp_pose(self, return_size_info=False):
        obj_segmentation_list = []
        for cam_id in range(len(self.cam_list)):
            part_cloud = self.get_global_part_point_cloud_with_seg_id(
                cam_id, "door",
                self.object_link_segmentation_ids[self.target_link_index])
            obj_segmentation_list.append(part_cloud.reshape(-1, 3))
        all_part_cloud = np.concatenate(obj_segmentation_list, axis=0)

        pc = open3d.geometry.PointCloud()
        pc.points = open3d.utility.Vector3dVector(all_part_cloud)
        bounding_box: open3d.geometry.OrientedBoundingBox = pc.get_oriented_bounding_box(
        )
        eight_points = np.asarray(bounding_box.get_box_points())
        front_four_index = np.argsort(np.abs(eight_points[:, 0]))[4:]
        back_four_index = np.argsort(np.abs(eight_points[:, 0]))[:4]

        edge_points = eight_points[front_four_index]
        center = np.mean(edge_points, axis=0)
        direction = center - np.mean(eight_points[back_four_index, :], axis=0)
        door_length = np.linalg.norm(direction)
        direction /= door_length

        gripper_position = center + direction * 0.2
        angle = np.arccos(
            np.sum(
                np.array([np.sign(center[0]), 0, 0]) * direction)) * np.sign(
                    np.cross(np.array([np.sign(center[0]), 0, 0]),
                             direction)[2])
        gripper_quaternion = transforms3d.euler.euler2quat(0, 0, angle, "rxyz")
        if return_size_info:
            return sapyen.Pose(
                gripper_position,
                gripper_quaternion), -direction, door_length, center
        else:
            return sapyen.Pose(gripper_position,
                               gripper_quaternion), -direction
    def move_to_target_pose_policy(self, pose: sapyen.Pose):
        target_position = pose.p
        target_quat = pose.q

        # First step pose
        target_position[0] += 0.5 * np.sign(target_position[0])
        first_target_pose = sapyen.Pose(target_position, target_quat)
        self.arrive_pose_with_closed_loop(first_target_pose, loop_step=1)

        # Second step pose
        self.arrive_pose_with_closed_loop(pose, loop_step=1)

        # Rest and close the gripper
        for _ in range(100):
            self.hold_and_step()
示例#9
0
    def add_camera(self, name: str, camera_pose: Union[np.ndarray, sapyen.Pose], width: int, height: int, fov=1.1,
                   near=0.01, far=100) -> None:
        """
        Add custom mounted camera to the scene. These camera have same property as the urdf-defined camera
        :param name: Name of the camera, used for get camera name and may be used for namespace of topic if ROS enabled
        :param camera_pose: (4, 4) transformation matrix to define the pose of camera. Camera point forward along
            positive x-axis, the y-axis and z-axis accounts for the width and height of the image captured by camera
        :param width: The width of the camera, e.g. 1920 in the (1920, 1080) hd image
        :param height: The height of the camera, e.g. 1080 in the (1920, 1080) hd image
        :param fov: Field of view angle in arc. Note the fov is the full angle of view, not half angle. Currently,
            we do not support differernt fov for x and y axis and thus we can only render square pixel
        :param near: Minimum distance camera can observe, it will influence all texture channel
        :param far: Maximum distance camera can observe, it will influence all texture channel
        """
        actor = self.builder.build(False, True, "{}".format(name), True)
        self.mount_actor_list.append(actor)
        self.camera_name_list.append(name)

        if isinstance(camera_pose, np.ndarray):
            assert camera_pose.shape == (4, 4), "Camera pose matrix must be (4, 4)"
            pose = mat2transform(camera_pose)
        elif isinstance(camera_pose, sapyen.Pose):
            pose = camera_pose
            camera_pose = transform2mat(pose)
        else:
            raise RuntimeError("Unknown format of camera pose: {}".format(type(camera_pose)))

        self.sim.add_mounted_camera(name, actor, sapyen.Pose([0, 0, 0], [1, 0, 0, 0]), width, height, fov, fov,
                                    near, far)
        actor.set_global_pose(pose)

        camera = self.renderer.get_camera(len(self.cam_list))
        self.cam_list.append(camera)
        self.__build_camera_mapping(height, width, camera.get_camera_matrix())
        self.depth_lambda_list.append(lambda depth: 1 / (depth * (1 / far - 1 / near) + 1 / near))
        self.camera_frame_id.append("/base_link")
        self.camera_pose.append((camera_pose @ CAMERA_TO_LINK).astype(np.float32))

        # Build OpenGL camera mapping
        width_points = np.repeat(np.linspace(1 / (2 * width), 1 - 1 / (2 * width), width).reshape([1, -1]), height,
                                 axis=0)
        height_points = np.repeat(np.linspace(1 / (2 * height), 1 - 1 / (2 * height), height)[::-1].reshape([-1, 1]),
                                  width, axis=1)

        points = np.stack([width_points, height_points], 2) * 2 - 1
        homo_padding = np.ones_like(width_points) * 2 - 1
        self.__gl_camera_mapping.append((points, homo_padding[:, :, np.newaxis]))
        self._gl_projection_inv.append(np.linalg.inv(camera.get_projection_mat()))
示例#10
0
    def calculate_grasp_pose_from_handle_cloud(
            self, point_cloud: np.ndarray) -> sapyen.Pose:
        # Calculate center and generic pose of gripper
        assert point_cloud.shape[1] == 3, "Point Cloud must be in (n, 3) shape"
        pc = open3d.geometry.PointCloud()
        pc.points = open3d.utility.Vector3dVector(point_cloud)
        bounding_box: open3d.geometry.AxisAlignedBoundingBox = pc.get_axis_aligned_bounding_box(
        )
        center = bounding_box.get_center()
        box_min = bounding_box.get_min_bound()
        box_max = bounding_box.get_max_bound()
        scale = box_max - box_min
        gripper_pose = sapyen.Pose(center)
        if scale[1] > scale[2]:
            print("Horizontal Handle Detected")
            gripper_pose.set_q(
                transforms3d.euler.euler2quat(0, 1.57, 1.57, "rxyz"))
        else:
            print("Vertical Handle Detected")
            gripper_pose.set_q(
                transforms3d.euler.euler2quat(0, 1.57, 3.14, "rxyz"))

        # Move robot to the right place
        self.object.set_qpos(np.ones(self.object.dof()) * 0.01)
        robot_target_pose = self.calculate_pose_in_front_of_semantics(
            "rotation_door")
        target_position = robot_target_pose.p
        target_position[1:3] = [
            np.mean([target_position[1], center[1]]), center[2] - 0.5
        ]
        robot_target_pose.set_p(target_position)
        self.set_robot_base_pose(robot_target_pose)
        self.object.set_qpos(np.ones(self.object.dof()) * 0.01)
        self.sim._step()

        # Add offset for XArm
        gripper_target = self.robot_global_pose.inv().transform(gripper_pose)
        position = gripper_target.p
        position[0] -= 0.16
        gripper_target.set_p(position)
        return gripper_target
示例#11
0
def rpy2transform(rpy: np.ndarray) -> sapyen.Pose:
    assert rpy.shape == (3,)
    pose = sapyen.Pose(np.zeros(3), transforms3d.euler.euler2quat(rpy))
    return pose
示例#12
0
def rot2transform(rot: np.ndarray) -> sapyen.Pose:
    assert rot.shape == (3, 3)
    pose = sapyen.Pose(np.zeros(3), transforms3d.quaternions.mat2quat(rot))
    return pose
示例#13
0
def mat2transform(mat: np.ndarray) -> sapyen.Pose:
    assert mat.shape == (4, 4)
    pose = sapyen.Pose(mat[: 3, 3], transforms3d.quaternions.mat2quat(mat[:3, :3]))
    return pose
示例#14
0
def load_scene(sim: sapyen.Simulation):
    builder: sapyen.ActorBuilder = sim.create_actor_builder()
    global_scale = 1
    builder.add_multiple_shapes("../assets/object/walls/wall_scene.obj",
                                sapyen.Pose([0, 0, 0]),
                                [global_scale, global_scale, global_scale])
    builder.add_obj_visual("../assets/object/walls/wall_scene.obj",
                           sapyen.Pose([0, 0, 0]),
                           [global_scale, global_scale, global_scale])
    builder.build(True, False, "scene")
    loader: sapyen.URDFLoader = sim.create_urdf_loader()
    loader.fix_loaded_object = True

    loader.scale = global_scale
    obj1: sapyen.ArticulationWrapper = loader.load(
        "/home/sim/mobility_dataset/mobility_v1_alpha5/102044/mobility.urdf")
    obj1.set_root_pose(
        np.array([1.221, 1.244, 0.614]) * global_scale, [1, 0, 0, 0])

    loader.scale = global_scale * 1.4
    obj2: sapyen.ArticulationWrapper = loader.load(
        "/home/sim/mobility_dataset/mobility_v1_alpha5/10905/mobility.urdf")
    obj2.set_root_pose(
        np.array([0.9, -0.954, 0.622]) * global_scale, [1, 0, 0, 0])

    loader.scale = global_scale * 0.8
    obj3: sapyen.ArticulationWrapper = loader.load(
        "/home/sim/mobility_dataset/mobility_v1_alpha5/12065/mobility.urdf")
    obj3.set_root_pose(
        np.array([1.12, 0.109, 0.582]) * global_scale, [1, 0, 0, 0])
    obj3.set_pd(20000, 3000, 2000, [1])

    loader.scale = global_scale * 1
    obj4: sapyen.ArticulationWrapper = loader.load(
        "/home/sim/mobility_dataset/mobility_v1_alpha5/23511/mobility.urdf")
    obj4.set_root_pose(
        np.array([-2.246, -3.518, 0.910]) * global_scale,
        transforms3d.quaternions.axangle2quat([0, 0, 1], 3.14159))

    loader.scale = global_scale * 1
    obj5: sapyen.ArticulationWrapper = loader.load(
        "/home/sim/mobility_dataset/mobility_v1_alpha5/45594/mobility.urdf")
    obj5.set_root_pose(np.array([1.271, 2.393, 0.946]) * global_scale)

    loader.scale = global_scale * 1
    noj6: sapyen.ArticulationWrapper = loader.load(
        "/home/sim/mobility_dataset/mobility_v1_alpha5/46037/mobility.urdf")
    noj6.set_root_pose(
        np.array([0.597, -3.789, 0.774]) * global_scale,
        transforms3d.quaternions.axangle2quat([0, 0, 1], -1.5708))

    loader.scale = global_scale * 0.4
    obj7: sapyen.ArticulationWrapper = loader.load(
        "/home/sim/mobility_dataset/mobility_v1_alpha5/7310/mobility.urdf")
    obj7.set_root_pose(
        np.array([1.195, 0.847, 1.259]) * global_scale,
        transforms3d.quaternions.axangle2quat([0, 0, 1], -0.1))

    loader.scale = global_scale * 1.5
    obj8: sapyen.ArticulationWrapper = loader.load(
        "/home/sim/mobility_dataset/mobility_v1_alpha5/8867/mobility.urdf")
    obj8.set_root_pose(np.array([-3.127, 2.281, 1.481]) * global_scale)