コード例 #1
0
ファイル: caging.py プロジェクト: yanweiw/rrc
    def __init__(self):

        self.time_step = 0.004
        self.finger = sim_finger.SimFinger(finger_type=Cage.args.finger_type,
                                           time_step=self.time_step,
                                           enable_visualization=True,)
        self.num_fingers = self.finger.number_of_fingers
        _kwargs = {"physicsClientId": self.finger._pybullet_client_id}

        if Cage.args.control_mode == "position":
            self.position_goals = visual_objects.Marker(number_of_goals=self.num_fingers)

        self.initial_robot_position=np.array([0.0, np.deg2rad(-60), np.deg2rad(-60)] * 3, dtype=np.float32,)
        self.finger.reset_finger_positions_and_velocities(self.initial_robot_position)
        self.initial_object_pose = move_cube.Pose(position=[0, 0, move_cube._min_height],
                                                  orientation=move_cube.Pose().orientation)
        self.cube = collision_objects.Block(self.initial_object_pose.position, 
                                            self.initial_object_pose.orientation, mass=0.020, **_kwargs)
        self.cube_z = 0.0325
        self.workspace_radius = 0.18
        self.tricamera = camera.TriFingerCameras(**_kwargs)

        self.initial_marker_position = np.array([[ 0.        ,  0.17856407,  self.cube_z],
                                                 [ 0.15464102, -0.08928203,  self.cube_z],
                                                 [-0.15464102, -0.08928203,  self.cube_z]])
        self.position_goals.set_state(self.initial_marker_position)
        self.marker_position = self.initial_marker_position

        self.ik_module = geometric_ik.GeometricIK()
コード例 #2
0
def main():
    time_step = 0.004
    finger = sim_finger.SimFinger(
        finger_type="trifingerone",
        time_step=time_step,
        enable_visualization=True,
    )

    # Important: The cameras need the be created _after_ the simulation is
    # initialized.
    cameras = camera.TriFingerCameras()

    # Move the fingers to random positions
    while True:
        goal = np.array(
            sample.random_joint_positions(
                number_of_fingers=3,
                lower_bounds=[-1, -1, -2],
                upper_bounds=[1, 1, 2],
            ))
        finger_action = finger.Action(position=goal)

        for _ in range(50):
            t = finger.append_desired_action(finger_action)
            finger.get_observation(t)

            images = cameras.get_images()
            # images are rgb --> convert to bgr for opencv
            images = [cv2.cvtColor(img, cv2.COLOR_RGB2BGR) for img in images]
            cv2.imshow("camera60", images[0])
            cv2.imshow("camera180", images[1])
            cv2.imshow("camera300", images[2])
            cv2.waitKey(int(time_step * 1000))
コード例 #3
0
def main():
    '''
    sample joint positions to illustrate end-effector workspace
    '''
    time_step = 0.004

    finger = sim_finger.SimFinger(
        finger_type='fingerone',
        time_step=time_step,
        enable_visualization=True,
    )
    num_fingers = finger.number_of_fingers

    position_goals = visual_objects.Marker(number_of_goals=num_fingers)

    for i in range(500):
        jpos = np.array(sample.random_joint_positions(number_of_fingers=1))
        marker_id = pybullet.createVisualShape(shapeType=pybullet.GEOM_SPHERE,
                                               radius=0.015,
                                               rgbaColor=[1, 0, 0, 0.5])
        goal_id = pybullet.createMultiBody(baseVisualShapeIndex=marker_id,
                                           basePosition=[0.18, 0.18, 0.08],
                                           baseOrientation=[0, 0, 0, 1])
        pybullet.resetBasePositionAndOrientation(
            goal_id,
            finger.pinocchio_utils.forward_kinematics(jpos)[0], [0, 0, 0, 1])
コード例 #4
0
def main():

    argparser = argparse.ArgumentParser(description=__doc__)
    argparser.add_argument(
        "--control-mode",
        default="position",
        choices=["position", "torque"],
        help="Specify position or torque as the control mode.",
    )
    argparser.add_argument(
        "--finger-type",
        default="tri",
        choices=finger_types_data.get_valid_finger_types(),
        help="Specify type of finger as single or tri.",
    )
    args = argparser.parse_args()
    time_step = 0.004

    finger = sim_finger.SimFinger(
        finger_type=args.finger_type,
        time_step=time_step,
        enable_visualization=True,
    )
    num_fingers = finger.number_of_fingers

    if args.control_mode == "position":
        position_goals = visual_objects.Marker(number_of_goals=num_fingers)

    while True:

        if args.control_mode == "position":
            desired_joint_positions = np.array(
                sample.random_joint_positions(number_of_fingers=num_fingers)
            )
            finger_action = finger.Action(position=desired_joint_positions)
            # visualize the goal position of the finger tip
            position_goals.set_state(
                finger.pinocchio_utils.forward_kinematics(
                    desired_joint_positions
                )
            )
        if args.control_mode == "torque":
            desired_joint_torques = [random.random()] * 3 * num_fingers
            finger_action = finger.Action(torque=desired_joint_torques)

        # pursue this goal for one second
        for _ in range(int(1 / time_step)):
            t = finger.append_desired_action(finger_action)
            finger.get_observation(t)
            time.sleep(time_step)
コード例 #5
0
def main():
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument(
        "--save-poses",
        type=str,
        metavar="FILENAME",
        help="""If set, the demo stops when the history of the object pose time
            series is full and stores the buffered poses to the specified file.
        """,
    )
    parser.add_argument(
        "--history-size",
        type=int,
        default=1000,
        help="""Size of the object pose time series.  Default: %(default)d""",
    )
    parser.add_argument(
        "--non-real-time",
        action="store_true",
        help="""Do not run the simulation in real-time but as fast as
            possible.
        """,
    )
    args = parser.parse_args()

    real_time = not args.non_real_time
    time_step = 0.004
    finger = sim_finger.SimFinger(
        finger_type="trifingerone",
        time_step=time_step,
        enable_visualization=True,
    )

    # Object and Object Tracker Interface
    # ===================================
    #
    # Important:  These objects need to be created _after_ the simulation is
    # initialized (i.e. after the SimFinger instance is created).

    # spawn a cube in the arena
    cube = collision_objects.Block()

    # initialize the object tracker interface
    object_tracker_data = object_tracker.Data("object_tracker", True,
                                              args.history_size)
    object_tracker_backend = object_tracker.SimulationBackend(
        object_tracker_data, cube, real_time)
    object_tracker_frontend = object_tracker.Frontend(object_tracker_data)

    # Move the fingers to random positions so that the cube is kicked around
    # (and thus it's position changes).
    while True:
        goal = np.array(
            sample.random_joint_positions(
                number_of_fingers=3,
                lower_bounds=[-1, -1, -2],
                upper_bounds=[1, 1, 2],
            ))
        finger_action = finger.Action(position=goal)

        for _ in range(50):
            t = finger.append_desired_action(finger_action)
            finger.get_observation(t)
            if real_time:
                time.sleep(time_step)

        # get the latest pose estimate of the cube and print it
        t_obj = object_tracker_frontend.get_current_timeindex()
        cube_pose = object_tracker_frontend.get_pose(t_obj)
        print("Cube Pose: %s" % cube_pose)

        # if --save-poses is set, stop when the buffer is full and write it to
        # a file
        if args.save_poses and t_obj > args.history_size:
            # the backend needs to be stopped before saving the buffered poses
            object_tracker_backend.stop()
            object_tracker_backend.store_buffered_data(args.save_poses)
            break
コード例 #6
0
def main():

    argparser = argparse.ArgumentParser(description=__doc__)

    argparser.add_argument(
        "--demo_trajectory",
        default="phase_walking",
        choices=["phase_walking", "circle_drawing", "setpoint"],
        help="Choose demo to run!",
        )

    args = argparser.parse_args()

    time_step = 0.004

    finger = sim_finger.SimFinger(
        finger_type="tri",
        time_step=time_step,
        enable_visualization=True,
    )
    num_fingers = finger.number_of_fingers
    ik_module = geometric_ik.GeometricIK()

    #position_goals = visual_objects.Marker(number_of_goals=num_fingers)

    # First, go to some preconfigured joint trajectory so that we won't run into
    # singularities for the IK module.

    desired_joint_positions = np.array([0.0, -0.5, -0.5, 0.0, -0.5, -0.5, 0.0, -0.5, -0.5])
    finger_action = finger.Action(position=desired_joint_positions)

    # Wait 1 second for the simulation to get there....
    for _ in range(int(1 / time_step)):
        t = finger.append_desired_action(finger_action)
        finger.get_observation(t)
        time.sleep(time_step)

    while True:

        # Time to dance!
        current_time = time.time()

        if (args.demo_trajectory == "phase_walking"):
            desired_ee_trajectory = np.array([0.1 * np.sin(0),
                                              0.1 * np.cos(0),
                                              0.1 + 0.05 * np.sin(current_time), # phase 0
                                              0.1 * np.sin((2./3.) * np.pi),
                                              0.1 * np.cos((2./3.) * np.pi),
                                              0.1 + 0.05 * np.sin(
                                                  current_time + (2./3.) * np.pi),
                                              0.1 * np.sin((4./3.) * np.pi),
                                              0.1 * np.cos((4./3.) * np.pi),
                                              0.1 + 0.05 * np.sin(
                                                  current_time + (4./3.) * np.pi)])
        elif (args.demo_trajectory == "circle_drawing"):
            desired_ee_trajectory = np.array([0.1 * np.sin(0) + 0.05 * np.cos(current_time),
                                              0.1 * np.cos(0) + 0.05 * np.sin(current_time),
                                              0.1,
                                              0.1 * np.sin((2./3.) * np.pi) +
                                                0.05 * np.cos(current_time),
                                              0.1 * np.cos((2./3.) * np.pi) +
                                                0.05 * np.sin(current_time),
                                              0.1,
                                              0.1 * np.sin((4./3.) * np.pi) +
                                                0.05 * np.cos(current_time),
                                              0.1 * np.cos((4./3.) * np.pi) +
                                                0.05 * np.sin(current_time),
                                              0.1])
            
        elif (args.demo_trajectory == "setpoint"):
            desired_ee_trajectory = np.array([0., 0.1, 0.15, 0.1, 0.0, 0.15, -0.1, 0.0, 0.15])
                                              
        desired_joint_positions = ik_module.compute_ik(desired_ee_trajectory)
        finger_action = finger.Action(position=desired_joint_positions)

        t = finger.append_desired_action(finger_action)
        finger.get_observation(t)
        time.sleep(time_step)
コード例 #7
0
#!/usr/bin/env python3
"""Demonstrate how to run the simulated finger with torque control."""
import time
import numpy as np

from rrc_simulation import sim_finger


if __name__ == "__main__":
    time_step = 0.001

    finger = sim_finger.SimFinger(
        finger_type="fingerone",
        time_step=time_step,
        enable_visualization=True,
    )
    # set the finger to a reasonable start position
    finger.reset_finger_positions_and_velocities([0, -0.7, -1.5])

    # Send a constant torque to the joints, switching direction periodically.
    torque = np.array([0.0, 0.3, 0.3])
    while True:
        time.sleep(time_step)

        action = finger.Action(torque=torque)
        t = finger.append_desired_action(action)
        observation = finger.get_observation(t)

        # invert the direction of the command every 100 steps
        if t % 100 == 0:
            torque *= -1