def execute_random_motion(*, finger_name, nb_timesteps, enable_visualization):
    finger = SimFinger(
        finger_type=finger_name,
        enable_visualization=enable_visualization,
    )
    cube = collision_objects.Block()

    for t in range(nb_timesteps):
        if t % 50 == 0:
            torque = np.random.uniform(low=-0.36, high=0.36, size=(9))
        finger.append_desired_action(finger.Action(torque=torque))
    del finger
    del cube
def main():
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument(
        "--save-poses",
        type=str,
        metavar="FILENAME",
        help="""If set, the demo stops when the history of the object pose time
            series is full and stores the buffered poses to the specified file.
        """,
    )
    parser.add_argument(
        "--history-size",
        type=int,
        default=1000,
        help="""Size of the object pose time series.  Default: %(default)d""",
    )
    parser.add_argument(
        "--non-real-time",
        action="store_true",
        help="""Do not run the simulation in real-time but as fast as
            possible.
        """,
    )
    args = parser.parse_args()

    real_time = not args.non_real_time
    time_step = 0.004
    finger = sim_finger.SimFinger(
        finger_type="trifingerone",
        time_step=time_step,
        enable_visualizartion=True,
    )

    # Object and Object Tracker Interface
    # ===================================
    #
    # Important:  These objects need to be created _after_ the simulation is
    # initialized (i.e. after the SimFinger instance is created).

    # spawn a cube in the arena
    cube = collision_objects.Block()

    # initialize the object tracker interface
    object_tracker_data = object_tracker.Data("object_tracker", True,
                                              args.history_size)
    object_tracker_backend = object_tracker.SimulationBackend(
        object_tracker_data, cube, real_time)
    object_tracker_frontend = object_tracker.Frontend(object_tracker_data)

    # Move the fingers to random positions so that the cube is kicked around
    # (and thus it's position changes).
    while True:
        goal = np.array(
            sample.random_joint_positions(
                number_of_fingers=3,
                lower_bounds=[-1, -1, -2],
                upper_bounds=[1, 1, 2],
            ))
        finger_action = finger.Action(position=goal)

        for _ in range(50):
            t = finger.append_desired_action(finger_action)
            finger.get_observation(t)
            if real_time:
                time.sleep(time_step)

        # get the latest pose estimate of the cube and print it
        t_obj = object_tracker_frontend.get_current_timeindex()
        cube_pose = object_tracker_frontend.get_pose(t_obj)
        print("Cube Pose: %s" % cube_pose)

        # if --save-poses is set, stop when the buffer is full and write it to
        # a file
        if args.save_poses and t_obj > args.history_size:
            # the backend needs to be stopped before saving the buffered poses
            object_tracker_backend.stop()
            object_tracker_backend.store_buffered_data(args.save_poses)
            break
def main():
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument(
        "--finger-type",
        choices=finger_types_data.get_valid_finger_types(),
        required=True,
        help="""Pass a valid finger type.""",
    )
    parser.add_argument(
        "--real-time-mode",
        "-r",
        action="store_true",
        help="""Run simulation in real time.  If not set, the simulation runs
            as fast as possible.
        """,
    )
    parser.add_argument(
        "--first-action-timeout",
        "-t",
        type=float,
        default=math.inf,
        help="""Timeout (in seconds) for reception of first action after
            starting the backend.  If not set, the timeout is disabled.
        """,
    )
    parser.add_argument(
        "--max-number-of-actions",
        "-a",
        type=int,
        default=0,
        help="""Maximum numbers of actions that are processed.  After this the
            backend shuts down automatically.
        """,
    )
    parser.add_argument(
        "--robot-logfile",
        "-l",
        type=str,
        help="""Path to a file to which the robot data log is written.  If not
            specified, no log is generated.
        """,
    )
    parser.add_argument(
        "--camera-logfile",
        type=str,
        help="""Path to a file to which the camera data log is written.  If not
            specified, no log is generated.
        """,
    )
    parser.add_argument(
        "--ready-indicator",
        type=str,
        metavar="READY_INDICATOR_FILE",
        help="""Path to a file that will be created once the backend is ready
            and will be deleted again when it stops (before storing the logs).
        """,
    )
    parser.add_argument(
        "--add-cube",
        action="store_true",
        help="""Spawn a cube and run the object tracker backend.""",
    )
    parser.add_argument(
        "--cameras",
        action="store_true",
        help="""Run camera backend using rendered images.""",
    )
    parser.add_argument(
        "--visualize",
        "-v",
        action="store_true",
        help="Run pyBullet's GUI for visualization.",
    )
    args = parser.parse_args()

    # configure the logging module
    log_handler = logging.StreamHandler(sys.stdout)
    logging.basicConfig(
        format="[SIM_TRIFINGER_BACKEND %(levelname)s %(asctime)s] %(message)s",
        level=logging.DEBUG,
        handlers=[log_handler],
    )

    # select the correct types/functions based on which robot is used
    num_fingers = finger_types_data.get_number_of_fingers(args.finger_type)
    if num_fingers == 1:
        shared_memory_id = "finger"
        finger_types = robot_interfaces.finger
        create_backend = drivers.create_single_finger_backend
    elif num_fingers == 3:
        shared_memory_id = "trifinger"
        finger_types = robot_interfaces.trifinger
        create_backend = drivers.create_trifinger_backend

    # If max_number_of_actions is set, choose the history size of the time
    # series such that the whole episode fits in (+1 for the status message
    # containing the "limit exceeded" error).
    if args.max_number_of_actions:
        history_size = args.max_number_of_actions + 1
    else:
        history_size = 1000

    robot_data = finger_types.MultiProcessData(
        shared_memory_id, True, history_size=history_size
    )

    robot_logger = finger_types.Logger(robot_data)

    backend = create_backend(
        robot_data,
        args.real_time_mode,
        args.visualize,
        args.first_action_timeout,
        args.max_number_of_actions,
    )
    backend.initialize()

    #
    # Camera and Object Tracker Interface
    # Important:  These objects need to be created _after_ the simulation is
    # initialized (i.e. after the SimFinger instance is created).
    #
    if args.cameras and not args.add_cube:
        # If cameras are enabled but not the object, use the normal
        # PyBulletTriCameraDriver.
        from trifinger_cameras import tricamera

        camera_data = tricamera.MultiProcessData("tricamera", True, 10)
        camera_driver = tricamera.PyBulletTriCameraDriver()
        camera_backend = tricamera.Backend(camera_driver, camera_data)  # noqa

    elif args.add_cube:
        # If the cube is enabled, use the PyBulletTriCameraObjectTrackerDriver.
        # In case the cameras are not requested, disable rendering of the
        # images to save time.
        import trifinger_object_tracking.py_tricamera_types as tricamera

        # spawn a cube in the centre of the arena
        cube = collision_objects.Block(position=[0.0, 0.0, 0.035])
        render_images = args.cameras

        camera_data = tricamera.MultiProcessData("tricamera", True, 10)
        camera_driver = tricamera.PyBulletTriCameraObjectTrackerDriver(
            cube, robot_data, render_images
        )
        camera_backend = tricamera.Backend(camera_driver, camera_data)  # noqa

    #
    # Camera/Object Logger
    #
    camera_logger = None
    if args.camera_logfile:
        try:
            camera_data
        except NameError:
            logging.critical("Cannot create camera log camera is not running.")
            return

        # TODO
        camera_fps = 10
        robot_rate_hz = 1000
        buffer_length_factor = 1.5
        episode_length_s = args.max_number_of_actions / robot_rate_hz
        # Compute camera log size based on number of robot actions plus some
        # safety buffer
        log_size = int(camera_fps * episode_length_s * buffer_length_factor)

        logging.info("Initialize camera logger with buffer size %d", log_size)
        camera_logger = tricamera.Logger(camera_data, log_size)

    # if specified, create the "ready indicator" file to indicate that the
    # backend is ready
    if args.ready_indicator:
        pathlib.Path(args.ready_indicator).touch()

    backend.wait_until_first_action()

    if camera_logger:
        camera_logger.start()
        logging.info("Start camera logging")

    backend.wait_until_terminated()

    # delete the ready indicator file to indicate that the backend has shut
    # down
    if args.ready_indicator:
        pathlib.Path(args.ready_indicator).unlink()

    if camera_logger:
        logging.info(
            "Save recorded camera data to file %s", args.camera_logfile
        )
        camera_logger.stop_and_save(args.camera_logfile)

    if camera_data:
        # stop the camera backend
        logging.info("Stop camera backend")
        camera_backend.shutdown()

    if args.robot_logfile:
        logging.info("Save robot data to file %s", args.robot_logfile)
        if args.max_number_of_actions:
            end_index = args.max_number_of_actions
        else:
            end_index = -1

        robot_logger.write_current_buffer_binary(
            args.robot_logfile, start_index=0, end_index=end_index
        )

    # cleanup stuff before the simulation (backend) is terminated
    if args.add_cube:
        del cube

    logging.info("Done.")
Exemple #4
0
def main():
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument(
        "--finger-type",
        choices=finger_types_data.get_valid_finger_types(),
        required=True,
        help="""Pass a valid finger type.""",
    )
    parser.add_argument(
        "--real-time-mode",
        "-r",
        action="store_true",
        help="""Run simulation in real time.  If not set, the simulation runs
            as fast as possible.
        """,
    )
    parser.add_argument(
        "--first-action-timeout",
        "-t",
        type=float,
        default=math.inf,
        help="""Timeout (in seconds) for reception of first action after
            starting the backend.  If not set, the timeout is disabled.
        """,
    )
    parser.add_argument(
        "--max-number-of-actions",
        "-a",
        type=int,
        default=0,
        help="""Maximum numbers of actions that are processed.  After this the
            backend shuts down automatically.
        """,
    )
    parser.add_argument(
        "--logfile",
        "-l",
        type=str,
        help="""Path to a file to which the data log is written.  If not
            specified, no log is generated.
        """,
    )
    parser.add_argument(
        "--add-cube",
        action="store_true",
        help="""Spawn a cube and run the object tracker backend.""",
    )
    parser.add_argument(
        "--cameras",
        action="store_true",
        help="""Run camera backend using rendered images.""",
    )
    parser.add_argument(
        "--visualize",
        "-v",
        action="store_true",
        help="Run pyBullet's GUI for visualization.",
    )
    args = parser.parse_args()

    # select the correct types/functions based on which robot is used
    num_fingers = finger_types_data.get_number_of_fingers(args.finger_type)
    if num_fingers == 1:
        shared_memory_id = "finger"
        finger_types = robot_interfaces.finger
        create_backend = drivers.create_single_finger_backend
    elif num_fingers == 3:
        shared_memory_id = "trifinger"
        finger_types = robot_interfaces.trifinger
        create_backend = drivers.create_trifinger_backend

    # If max_number_of_actions is set, choose the history size of the time
    # series such that the whole episode fits in (+1 for the status message
    # containing the "limit exceeded" error).
    if args.max_number_of_actions:
        history_size = args.max_number_of_actions + 1
    else:
        history_size = 1000

    robot_data = finger_types.MultiProcessData(shared_memory_id,
                                               True,
                                               history_size=history_size)

    logger = finger_types.Logger(robot_data)

    backend = create_backend(
        robot_data,
        args.real_time_mode,
        args.visualize,
        args.first_action_timeout,
        args.max_number_of_actions,
    )
    backend.initialize()

    # Object and Object Tracker Interface
    # Important:  These objects need to be created _after_ the simulation is
    # initialized (i.e. after the SimFinger instance is created).
    if args.add_cube:
        # only import when really needed
        import trifinger_object_tracking.py_object_tracker as object_tracker

        # spawn a cube in the arena
        cube = collision_objects.Block()

        # initialize the object tracker interface
        object_tracker_data = object_tracker.Data("object_tracker", True)
        object_tracker_backend = object_tracker.SimulationBackend(  # noqa
            object_tracker_data, cube, args.real_time_mode)

    if args.cameras:
        from trifinger_cameras import tricamera

        camera_data = tricamera.MultiProcessData("tricamera", True, 10)
        camera_driver = tricamera.PyBulletTriCameraDriver()
        camera_backend = tricamera.Backend(camera_driver, camera_data)  # noqa

    backend.wait_until_terminated()

    if args.logfile:
        if args.max_number_of_actions:
            end_index = args.max_number_of_actions
        else:
            end_index = -1
        logger.write_current_buffer(args.logfile,
                                    start_index=0,
                                    end_index=end_index)
Exemple #5
0
    def __init__(
        self,
        control_rate_s,
        finger_type,
        enable_visualization,
    ):
        """Intializes the constituents of the pushing environment.

        Constructor sets up the finger robot depending on the finger type,
        sets up the observation and action spaces, smoothing for
        reducing jitter on the robot, and provides for a way to synchronize
        robots being trained independently.


        Args:
            control_rate_s (float): the rate (in seconds) at which step method of the env
                will run. The actual robot controller may run at a higher rate,
                so this is used to compute the number of robot control updates
                per environment step.
            finger_type (string): Name of the finger type.  In order to get
                a list of the valid finger types, call
                :meth:`.finger_types_data.get_valid_finger_types`
            enable_visualization (bool): if the simulation env is to be
                visualized
        """

        #: an instance of the simulated robot depending on the desired
        #: robot type
        self.finger = SimFinger(
            finger_type=finger_type,
            enable_visualization=enable_visualization,
        )

        self.num_fingers = finger_types_data.get_number_of_fingers(finger_type)

        #: the number of times the same action is to be applied to
        #: the robot in one step.
        self.steps_per_control = int(
            round(control_rate_s / self.finger.time_step_s))
        assert (abs(control_rate_s -
                    self.steps_per_control * self.finger.time_step_s) <=
                0.000001)

        #: the types of observations that should be a part of the environment's
        #: observed state
        self.observations_keys = [
            "joint_positions",
            "joint_velocities",
            "action_joint_positions",
            "goal_position",
            "object_position",
        ]
        #: the size of each of the observation type that is part of the
        #: observation keys (in the same order)
        self.observations_sizes = [
            3 * self.num_fingers,
            3 * self.num_fingers,
            3 * self.num_fingers,
            3,
            3,
        ]

        # sets up the observation and action spaces for the environment,
        # unscaled spaces have the custom bounds set up for each observation
        # or action type, whereas all the values in the observation and action
        # spaces lie between 1 and -1
        self.spaces = FingerSpaces(
            num_fingers=self.num_fingers,
            observations_keys=self.observations_keys,
            observations_sizes=self.observations_sizes,
            separate_goals=False,
        )

        self.unscaled_observation_space = (
            self.spaces.get_unscaled_observation_space())
        self.unscaled_action_space = self.spaces.get_unscaled_action_space()

        self.observation_space = self.spaces.get_scaled_observation_space()
        self.action_space = self.spaces.get_scaled_action_space()

        #: a logger to enable logging of observations if desired
        self.logger = DataLogger()

        #: the object that has to be pushed
        self.block = collision_objects.Block()

        #: a marker to visualize where the target goal position for the episode
        #: is
        self.goal_marker = visual_objects.Marker(
            number_of_goals=1,
            goal_size=0.0325,
            initial_position=[0.19, 0.08, 0.0425],
        )

        self.reset()
    def __init__(
        self,
        visualization=False,
        initial_robot_position=None,
        initial_object_pose=None,
        enable_cameras=False,
    ):
        """Initialize.

        Args:
            visualization (bool):  Set to true to run visualization.
            initial_robot_position: Initial robot joint angles
            initial_object_pose:  Initial pose for the manipulation object.
                Can be any object with attributes ``position`` (x, y, z) and
                ``orientation`` (x, y, z, w).  This is optional, if not set, a
                random pose will be sampled.
            enable_cameras (bool):  Set to true to enable camera observations.
                By default this is disabled as rendering of images takes a lot
                of computational power.  Therefore the cameras should only be
                enabled if the images are actually used.
        """
        #: Camera rate in frames per second.  Observations of camera and
        #: object pose will only be updated with this rate.
        #: NOTE: This is currently not used!
        self._camera_rate_fps = 30

        #: Set to true to render camera observations
        self.enable_cameras = enable_cameras

        #: Simulation time step
        self._time_step = 0.004

        # first camera update in the first step
        self._next_camera_update_step = 0

        # Initialize robot, object and cameras
        # ====================================

        self.simfinger = SimFinger(
            finger_type="trifingerpro",
            time_step=self._time_step,
            enable_visualization=visualization,
        )

        if initial_robot_position is None:
            initial_robot_position = self.spaces.robot_position.default

        self.simfinger.reset_finger_positions_and_velocities(
            initial_robot_position)

        if initial_object_pose is None:
            initial_object_pose = move_cube.Pose(
                position=self.spaces.object_position.default,
                orientation=self.spaces.object_orientation.default,
            )
        self.cube = collision_objects.Block(
            initial_object_pose.position,
            initial_object_pose.orientation,
            mass=0.020,
        )

        self.tricamera = camera.TriFingerCameras()

        # Forward some methods for convenience
        # ====================================
        # forward "RobotFrontend" methods directly to simfinger
        self.Action = self.simfinger.Action
        self.get_desired_action = self.simfinger.get_desired_action
        self.get_applied_action = self.simfinger.get_applied_action
        self.get_timestamp_ms = self.simfinger.get_timestamp_ms
        self.get_current_timeindex = self.simfinger.get_current_timeindex
        self.get_robot_observation = self.simfinger.get_observation

        # forward kinematics directly to simfinger
        self.forward_kinematics = (
            self.simfinger.pinocchio_utils.forward_kinematics)

        # Initialize log
        # ==============
        self._action_log = {
            "initial_robot_position": initial_robot_position.tolist(),
            "initial_object_pose": {
                "position": initial_object_pose.position.tolist(),
                "orientation": initial_object_pose.orientation.tolist(),
            },
            "actions": [],
        }