Пример #1
0
def main():
    time_step = 0.004
    finger = sim_finger.SimFinger(
        finger_type="trifingerone",
        time_step=time_step,
        enable_visualization=True,
    )

    # Important: The cameras need the be created _after_ the simulation is
    # initialized.
    cameras = camera.TriFingerCameras()

    # Move the fingers to random positions
    while True:
        goal = np.array(
            sample.random_joint_positions(
                number_of_fingers=3,
                lower_bounds=[-1, -1, -2],
                upper_bounds=[1, 1, 2],
            ))
        finger_action = finger.Action(position=goal)

        for _ in range(50):
            t = finger.append_desired_action(finger_action)
            finger.get_observation(t)

            images = cameras.get_images()
            # images are rgb --> convert to bgr for opencv
            images = [cv2.cvtColor(img, cv2.COLOR_RGB2BGR) for img in images]
            cv2.imshow("camera60", images[0])
            cv2.imshow("camera180", images[1])
            cv2.imshow("camera300", images[2])
            cv2.waitKey(int(time_step * 1000))
def main():
    # argparser = argparse.ArgumentParser(description=__doc__)
    # args = argparser.parse_args()

    time_step = 0.001

    finger = sim_finger.SimFinger(
        finger_type="trifingerpro",
        time_step=time_step,
        enable_visualization=True,
    )

    markers = []
    while True:
        # action = finger.Action(torque=[0.3, 0.3, -0.2] * 3)
        action = finger.Action(position=[0.0, 0.9, -1.7] * 3)
        t = finger.append_desired_action(action)
        finger.get_observation(t)

        # delete old markers
        for m in markers:
            del m
        markers = visualize_collisions(finger)

        time.sleep(time_step)
Пример #3
0
def main():
    argparser = argparse.ArgumentParser(description=__doc__)
    argparser.add_argument(
        "--control-mode",
        default="position",
        choices=["position", "torque"],
        help="Specify position or torque as the control mode.",
    )
    argparser.add_argument(
        "--finger-type",
        default="trifingerone",
        choices=finger_types_data.get_valid_finger_types(),
        help="Specify a valid finger type",
    )
    args = argparser.parse_args()
    time_step = 0.004

    finger = sim_finger.SimFinger(
        finger_type=args.finger_type,
        time_step=time_step,
        enable_visualization=True,
    )
    num_fingers = finger.number_of_fingers

    if args.control_mode == "position":
        position_goals = visual_objects.Marker(number_of_goals=num_fingers)

    while True:

        if args.control_mode == "position":
            desired_joint_positions = np.array(
                sample.random_joint_positions(number_of_fingers=num_fingers))
            finger_action = finger.Action(position=desired_joint_positions)
            # visualize the goal position of the finger tip
            position_goals.set_state(
                finger.pinocchio_utils.forward_kinematics(
                    desired_joint_positions))
        if args.control_mode == "torque":
            desired_joint_torques = [random.random()] * 3 * num_fingers
            finger_action = finger.Action(torque=desired_joint_torques)

        # pursue this goal for one second
        for _ in range(int(1 / time_step)):
            t = finger.append_desired_action(finger_action)
            finger.get_observation(t)
            time.sleep(time_step)
def main():
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument(
        "--save-poses",
        type=str,
        metavar="FILENAME",
        help="""If set, the demo stops when the history of the object pose time
            series is full and stores the buffered poses to the specified file.
        """,
    )
    parser.add_argument(
        "--history-size",
        type=int,
        default=1000,
        help="""Size of the object pose time series.  Default: %(default)d""",
    )
    parser.add_argument(
        "--non-real-time",
        action="store_true",
        help="""Do not run the simulation in real-time but as fast as
            possible.
        """,
    )
    args = parser.parse_args()

    real_time = not args.non_real_time
    time_step = 0.004
    finger = sim_finger.SimFinger(
        finger_type="trifingerone",
        time_step=time_step,
        enable_visualizartion=True,
    )

    # Object and Object Tracker Interface
    # ===================================
    #
    # Important:  These objects need to be created _after_ the simulation is
    # initialized (i.e. after the SimFinger instance is created).

    # spawn a cube in the arena
    cube = collision_objects.Block()

    # initialize the object tracker interface
    object_tracker_data = object_tracker.Data("object_tracker", True,
                                              args.history_size)
    object_tracker_backend = object_tracker.SimulationBackend(
        object_tracker_data, cube, real_time)
    object_tracker_frontend = object_tracker.Frontend(object_tracker_data)

    # Move the fingers to random positions so that the cube is kicked around
    # (and thus it's position changes).
    while True:
        goal = np.array(
            sample.random_joint_positions(
                number_of_fingers=3,
                lower_bounds=[-1, -1, -2],
                upper_bounds=[1, 1, 2],
            ))
        finger_action = finger.Action(position=goal)

        for _ in range(50):
            t = finger.append_desired_action(finger_action)
            finger.get_observation(t)
            if real_time:
                time.sleep(time_step)

        # get the latest pose estimate of the cube and print it
        t_obj = object_tracker_frontend.get_current_timeindex()
        cube_pose = object_tracker_frontend.get_pose(t_obj)
        print("Cube Pose: %s" % cube_pose)

        # if --save-poses is set, stop when the buffer is full and write it to
        # a file
        if args.save_poses and t_obj > args.history_size:
            # the backend needs to be stopped before saving the buffered poses
            object_tracker_backend.stop()
            object_tracker_backend.store_buffered_data(args.save_poses)
            break
import matplotlib.pyplot as plt

from trifinger_simulation import sim_finger, sample


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--time-step", "-t", type=float, required=True)
    parser.add_argument("--plot", action="store_true")
    args = parser.parse_args()

    time_step = args.time_step

    finger = sim_finger.SimFinger(
        time_step=time_step,
        enable_visualization=True,
        finger_type="fingerone",
    )
    # set the finger to a reasonable start position
    finger.reset_finger_positions_and_velocities([0, -0.7, -1.5])

    errors = []
    for _ in range(100):
        target_position = np.array(
            sample.random_joint_positions(number_of_fingers=1)
        )

        action = finger.Action(position=target_position)
        positions = []
        for _ in range(500):
            t = finger.append_desired_action(action)
from trifinger_simulation import sim_finger, sample


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--time-step", "-t", type=float, required=True)
    parser.add_argument("--plot", type=int)
    args = parser.parse_args()

    time_step = args.time_step

    finger = sim_finger.SimFinger(
        time_step=time_step,
        enable_visualization=True,
        finger_type="fingerpro",
        # spawn robot higher up to avoid collisions with the table
        robot_position_offset=(0, 0, 0.5),
    )

    errors = []
    for _ in range(100):
        target_position = np.array(
            sample.random_joint_positions(
                number_of_fingers=1,
                lower_bounds=[-0.33, 0.0, -2.7],
                upper_bounds=[1.0, 1.57, 0.0],
            )
        )

        action = finger.Action(position=target_position)
Пример #7
0
def main():
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument("log_dir",
                        type=pathlib.Path,
                        help="Path to the log files.")
    parser.add_argument("--rate",
                        type=int,
                        default=1,
                        help="Time in ms per step.")
    args = parser.parse_args()

    robot_log_file = str(args.log_dir / "robot_data.dat")
    camera_log_file = str(args.log_dir / "camera_data.dat")

    log = robot_fingers.TriFingerPlatformLog(robot_log_file, camera_log_file)

    simulation = sim_finger.SimFinger(
        finger_type="trifingerpro",
        enable_visualization=True,
    )
    cameras = camera.create_trifinger_camera_array_from_config(args.log_dir)

    cube_urdf_file = (pathlib.Path(trifinger_simulation.__file__).parent /
                      "data/cube_v2/cube_v2.urdf")
    cube = pybullet.loadURDF(fileName=str(cube_urdf_file), )
    assert cube >= 0, "Failed to load cube model."

    pybullet.configureDebugVisualizer(lightPosition=(0, 0, 1.0))
    pybullet.configureDebugVisualizer(pybullet.COV_ENABLE_SHADOWS, 1)
    pybullet.configureDebugVisualizer(pybullet.COV_ENABLE_RGB_BUFFER_PREVIEW,
                                      0)
    pybullet.configureDebugVisualizer(pybullet.COV_ENABLE_DEPTH_BUFFER_PREVIEW,
                                      0)
    # yes, it is really a segmentation maRk...
    pybullet.configureDebugVisualizer(
        pybullet.COV_ENABLE_SEGMENTATION_MARK_PREVIEW, 0)

    for t in range(log.get_first_timeindex(),
                   log.get_last_timeindex() + 1, 100):
        robot_observation = log.get_robot_observation(t)
        simulation.reset_finger_positions_and_velocities(
            robot_observation.position)

        # get rendered images from simulation
        sim_images = cameras.get_images()
        # images are rgb --> convert to bgr for opencv
        sim_images = [
            cv2.cvtColor(img, cv2.COLOR_RGB2BGR) for img in sim_images
        ]
        sim_images = np.hstack(sim_images)

        # get real images from cameras
        try:
            camera_observation = log.get_camera_observation(t)
            # set cube pose
            pybullet.resetBasePositionAndOrientation(
                cube,
                camera_observation.object_pose.position,
                camera_observation.object_pose.orientation,
            )

            real_images = [
                utils.convert_image(cam.image)
                for cam in camera_observation.cameras
            ]
            real_images = np.hstack(real_images)
        except Exception as e:
            print(e)
            real_images = np.zeros_like(sim_images)

        # display images
        image = np.vstack((sim_images, real_images))
        cv2.imshow("cameras", image)
        key = cv2.waitKey(args.rate)
        # exit on "q"
        if key == ord("q"):
            return
    sim_finger,
)

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "finger_type",
        choices=finger_types_data.get_valid_finger_types(),
    )
    args = parser.parse_args()

    time_step = 0.001

    finger = sim_finger.SimFinger(
        finger_type=args.finger_type,
        time_step=time_step,
        enable_visualization=True,
    )

    _config: typing.Dict[str, typing.Dict[str, typing.Sequence[float]]] = {
        "fingerone": {
            "joint_pos": (0, -0.7, -1.5),
            "cube_pos": (0.05, -0.08, 0.04),
        },
        "trifingerone": {
            "joint_pos": (0, -0.7, -1.5) * 3,
            "cube_pos": (0.05, 0.08, 0.04),
        },
        "fingeredu": {
            "joint_pos": (0, 0.7, -1.5),
            "cube_pos": (0.08, 0.05, 0.04),
def main():
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument(
        "--robot",
        "-r",
        choices=["trifingerone", "trifingerpro"],
        default="trifingerone",
        help="Which robot to use.  Default: %(default)s",
    )
    parser.add_argument(
        "--config-dir",
        "-c",
        type=pathlib.Path,
        help="""Path to the directory containing camera calibration files. This
            is optional, if not specified, some default values will be used.
        """,
    )
    parser.add_argument(
        "--param-file-format",
        type=str,
        default="camera{id}.yml",
        help="""Format of the camera parameter files. '{id}' is replaced with
            the camera id.  Default: %(default)s
        """,
    )
    args = parser.parse_args()

    time_step = 0.004
    finger = sim_finger.SimFinger(
        finger_type=args.robot,
        time_step=time_step,
        enable_visualization=True,
    )

    # Important: The cameras need the be created _after_ the simulation is
    # initialized.
    if args.config_dir:
        cameras = camera.create_trifinger_camera_array_from_config(
            args.config_dir, calib_filename_pattern=args.param_file_format)
    else:
        cameras = camera.TriFingerCameras()

    # Move the fingers to random positions
    while True:
        goal = np.array(
            sample.random_joint_positions(
                number_of_fingers=3,
                lower_bounds=[-1, -1, -2],
                upper_bounds=[1, 1, 2],
            ))
        finger_action = finger.Action(position=goal)

        for _ in range(50):
            t = finger.append_desired_action(finger_action)
            finger.get_observation(t)

            images = cameras.get_images()
            # images are rgb --> convert to bgr for opencv
            images = [cv2.cvtColor(img, cv2.COLOR_RGB2BGR) for img in images]
            cv2.imshow("camera60", images[0])
            cv2.imshow("camera180", images[1])
            cv2.imshow("camera300", images[2])
            key = cv2.waitKey(int(time_step * 1000))
            if key == ord("q"):
                return