示例#1
0
 def __init__(self, fps, image_size=(270, 270)):
     self.fps = fps
     self.image_size = image_size
     self.frame_size = None
     self.cameras = camera.TriFingerCameras(image_size=image_size)
     self._add_new_camera()
     self.frames = []
def main():
    time_step = 0.004
    finger = sim_finger.SimFinger(
        finger_type="trifingerone",
        time_step=time_step,
        enable_visualization=True,
    )

    # Important: The cameras need the be created _after_ the simulation is
    # initialized.
    cameras = camera.TriFingerCameras()

    # Move the fingers to random positions
    while True:
        goal = np.array(
            sample.random_joint_positions(
                number_of_fingers=3,
                lower_bounds=[-1, -1, -2],
                upper_bounds=[1, 1, 2],
            ))
        finger_action = finger.Action(position=goal)

        for _ in range(50):
            t = finger.append_desired_action(finger_action)
            finger.get_observation(t)

            images = cameras.get_images()
            # images are rgb --> convert to bgr for opencv
            images = [cv2.cvtColor(img, cv2.COLOR_RGB2BGR) for img in images]
            cv2.imshow("camera60", images[0])
            cv2.imshow("camera180", images[1])
            cv2.imshow("camera300", images[2])
            cv2.waitKey(int(time_step * 1000))
 def __init__(self, env):
     super().__init__(env)
     self.cameras = camera.TriFingerCameras(image_size=(360, 270))
     self.metadata = {"render.modes": ["rgb_array"]}
     self._initial_reset = True
     self._accum_reward = 0
     self._reward_at_step = 0
示例#4
0
def test_trifingercameras():
    pybullet.connect(pybullet.DIRECT)

    width = 150
    height = 100
    expected_shape = (height, width, 3)

    tricamera = sim_camera.TriFingerCameras(image_size=(width, height), )

    images = tricamera.get_images()
    assert len(images) == 3
    assert images[0].shape == expected_shape
    assert images[1].shape == expected_shape
    assert images[2].shape == expected_shape

    pybullet.disconnect()
def main():
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument(
        "--robot",
        "-r",
        choices=["trifingerone", "trifingerpro"],
        default="trifingerone",
        help="Which robot to use.  Default: %(default)s",
    )
    parser.add_argument(
        "--config-dir",
        "-c",
        type=pathlib.Path,
        help="""Path to the directory containing camera calibration files. This
            is optional, if not specified, some default values will be used.
        """,
    )
    parser.add_argument(
        "--param-file-format",
        type=str,
        default="camera{id}.yml",
        help="""Format of the camera parameter files. '{id}' is replaced with
            the camera id.  Default: %(default)s
        """,
    )
    args = parser.parse_args()

    time_step = 0.004
    finger = sim_finger.SimFinger(
        finger_type=args.robot,
        time_step=time_step,
        enable_visualization=True,
    )

    # Important: The cameras need the be created _after_ the simulation is
    # initialized.
    if args.config_dir:
        cameras = camera.create_trifinger_camera_array_from_config(
            args.config_dir, calib_filename_pattern=args.param_file_format)
    else:
        cameras = camera.TriFingerCameras()

    # Move the fingers to random positions
    while True:
        goal = np.array(
            sample.random_joint_positions(
                number_of_fingers=3,
                lower_bounds=[-1, -1, -2],
                upper_bounds=[1, 1, 2],
            ))
        finger_action = finger.Action(position=goal)

        for _ in range(50):
            t = finger.append_desired_action(finger_action)
            finger.get_observation(t)

            images = cameras.get_images()
            # images are rgb --> convert to bgr for opencv
            images = [cv2.cvtColor(img, cv2.COLOR_RGB2BGR) for img in images]
            cv2.imshow("camera60", images[0])
            cv2.imshow("camera180", images[1])
            cv2.imshow("camera300", images[2])
            key = cv2.waitKey(int(time_step * 1000))
            if key == ord("q"):
                return
    def __init__(
        self,
        visualization=False,
        initial_robot_position=None,
        initial_object_pose=None,
        enable_cameras=False,
    ):
        """Initialize.

        Args:
            visualization (bool):  Set to true to run visualization.
            initial_robot_position: Initial robot joint angles
            initial_object_pose:  Initial pose for the manipulation object.
                Can be any object with attributes ``position`` (x, y, z) and
                ``orientation`` (x, y, z, w).  This is optional, if not set, a
                random pose will be sampled.
            enable_cameras (bool):  Set to true to enable camera observations.
                By default this is disabled as rendering of images takes a lot
                of computational power.  Therefore the cameras should only be
                enabled if the images are actually used.
        """
        #: Camera rate in frames per second.  Observations of camera and
        #: object pose will only be updated with this rate.
        #: NOTE: This is currently not used!
        self._camera_rate_fps = 30

        #: Set to true to render camera observations
        self.enable_cameras = enable_cameras

        #: Simulation time step
        self._time_step = 0.004

        # first camera update in the first step
        self._next_camera_update_step = 0

        # Initialize robot, object and cameras
        # ====================================

        self.simfinger = SimFinger(
            finger_type="trifingerpro",
            time_step=self._time_step,
            enable_visualization=visualization,
        )

        if initial_robot_position is None:
            initial_robot_position = self.spaces.robot_position.default

        self.simfinger.reset_finger_positions_and_velocities(
            initial_robot_position)

        if initial_object_pose is None:
            initial_object_pose = move_cube.Pose(
                position=self.spaces.object_position.default,
                orientation=self.spaces.object_orientation.default,
            )
        self.cube = collision_objects.Block(
            initial_object_pose.position,
            initial_object_pose.orientation,
            mass=0.020,
        )

        self.tricamera = camera.TriFingerCameras()

        # Forward some methods for convenience
        # ====================================
        # forward "RobotFrontend" methods directly to simfinger
        self.Action = self.simfinger.Action
        self.get_desired_action = self.simfinger.get_desired_action
        self.get_applied_action = self.simfinger.get_applied_action
        self.get_timestamp_ms = self.simfinger.get_timestamp_ms
        self.get_current_timeindex = self.simfinger.get_current_timeindex
        self.get_robot_observation = self.simfinger.get_observation

        # forward kinematics directly to simfinger
        self.forward_kinematics = (
            self.simfinger.pinocchio_utils.forward_kinematics)

        # Initialize log
        # ==============
        self._action_log = {
            "initial_robot_position": initial_robot_position.tolist(),
            "initial_object_pose": {
                "position": initial_object_pose.position.tolist(),
                "orientation": initial_object_pose.orientation.tolist(),
            },
            "actions": [],
        }