Beispiel #1
0
    # Create environment
    env = suite.make(
        **config,
        has_renderer=True,
        has_offscreen_renderer=False,
        render_camera="agentview",
        ignore_done=True,
        use_camera_obs=False,
        reward_shaping=True,
        control_freq=20,
        hard_reset=False,
    )

    # Wrap this environment in a visualization wrapper
    env = VisualizationWrapper(env, indicator_configs=None)

    # Setup printing options for numbers
    np.set_printoptions(formatter={"float": lambda x: "{0:0.3f}".format(x)})

    # initialize device
    if args.device == "keyboard":
        from robosuite.devices import Keyboard

        device = Keyboard(pos_sensitivity=args.pos_sensitivity, rot_sensitivity=args.rot_sensitivity)
        env.viewer.add_keypress_callback("any", device.on_press)
        env.viewer.add_keyup_callback("any", device.on_release)
        env.viewer.add_keyrepeat_callback("any", device.on_press)
    elif args.device == "spacemouse":
        from robosuite.devices import SpaceMouse
        config["env_configuration"] = args.config

    # Create environment
    env = suite.make(
        **config,
        has_renderer=True,
        has_offscreen_renderer=False,
        render_camera=args.camera,
        ignore_done=True,
        use_camera_obs=False,
        reward_shaping=True,
        control_freq=20,
    )

    # Wrap this with visualization wrapper
    env = VisualizationWrapper(env)

    # Grab reference to controller config and convert it to json-encoded string
    env_info = json.dumps(config)

    # wrap the environment with data collection wrapper
    tmp_directory = "/tmp/{}".format(str(time.time()).replace(".", "_"))
    env = DataCollectionWrapper(env, tmp_directory)

    # initialize device
    if args.device == "keyboard":
        from robosuite.devices import Keyboard

        device = Keyboard(pos_sensitivity=args.pos_sensitivity, rot_sensitivity=args.rot_sensitivity)
        env.viewer.add_keypress_callback("any", device.on_press)
        env.viewer.add_keyup_callback("any", device.on_release)
Beispiel #3
0
    # Create environment
    env = suite.make(
        **config,
        has_renderer=False,
        has_offscreen_renderer=True,
        ignore_done=True,
        camera_names=args.camera,
        camera_heights=args.height,
        camera_widths=args.width,
        use_camera_obs=True,
        use_object_obs=True,
        hard_reset=False,
    )

    # Wrap this environment in a visualization wrapper
    env = VisualizationWrapper(env, indicator_configs=None)

    # Set shared settings
    attributes = ["corrupter", "delayer", "sampling_rate"]
    corruption_mode = 1     # 1 is corruption = ON, 0 is corruption = OFF
    obs_settings = {}

    # Function to easily modify observable on the fly
    def modify_obs(obs_name, attrs, mods):
        for attr, mod in zip(attrs, mods):
            env.modify_observable(
                observable_name=obs_name,
                attribute=attr,
                modifier=mod,
            )
Beispiel #4
0
    def __init__(
        self,
        demo,
        env_config=None,
        replay_from_actions=False,
        visualize_sites=False,
        camera="frontview",
        init_camera_pos=None,
        init_camera_quat=None,
        use_dr=False,
        dr_args=None,
    ):
        # Store relevant values and initialize other values
        self.camera_id = None
        self.replay_from_actions = replay_from_actions
        self.states = None
        self.actions = None
        self.step = None
        self.n_steps = None
        self.current_ep = None
        self.started = False

        # Load the demo
        self.f = h5py.File(demo, "r")

        # Extract relevant info
        env_info = json.loads(self.f["data"].attrs["env_info"])

        # Construct default env arguments
        default_args = {
            "has_renderer": False,
            "has_offscreen_renderer": True,
            "ignore_done": True,
            "use_camera_obs": True,
            "reward_shaping": True,
            "hard_reset": False,
            "camera_names": camera,
        }

        # If custom env_config is specified, make sure that there's no overlap with default args and merge with config
        if env_config is not None:
            for k in env_config.keys():
                assert k not in default_args, f"Key {k} cannot be specified in env_config!"
            env_info.update(env_config)

        # Merge in default args
        env_info.update(default_args)

        # Create env
        env = robosuite.make(**env_info)

        # Optionally wrap with visualization wrapper
        if visualize_sites:
            env = VisualizationWrapper(env=self.env)

        # Optionally use domain randomization if specified
        self.use_dr = use_dr
        if self.use_dr:
            default_dr_args = {
                "seed": 1,
                "randomize_camera": False,
                "randomize_every_n_steps": 10,
            }
            default_dr_args.update(dr_args)
            env = DomainRandomizationWrapper(
                env=self.env,
                **default_dr_args,
            )

        # list of all demonstrations episodes
        self.demos = list(self.f["data"].keys())

        # Run super init
        super().__init__(
            env=env,
            camera=camera,
            init_camera_pos=init_camera_pos,
            init_camera_quat=init_camera_quat,
        )

        # Load episode 0 by default
        self.load_episode_xml(demo_num=0)
Beispiel #5
0
def run_sim_test(type_test):
    real_robot_joints = []
    sim_robot_joints = []
    target_robot_joints = []
    real_robot_eef_rframe = []
    real_robot_eef_sframe = []
    sim_robot_eef_rframe = []
    sim_robot_eef_sframe = []

    # Create dict to hold options that will be passed to env creation call
    options = {}

    # Choose environment and add it to options
    options["env_name"] = "Lift"
    options["robots"] = ["Jaco"]

    # Choose camera
    camera = "frontview"

    n_joints = 7
    write_path = os.path.join('datasets', type_test)

    # load data
    # latin1 allows us to load python2
    real_robot = np.load(os.path.join('datasets', type_test + '.npz'),
                         allow_pickle=True,
                         encoding='latin1')
    real_eef_pos = real_robot['eef_pos']
    #real_joint_pos = np.mod(real_robot['joint_pos'], 4*np.pi)
    real_joint_pos = real_robot['joint_pos']

    real_actions = real_robot['actions']
    init_qpos = list(real_joint_pos[0][:7])
    # Choose controller
    controller_file = "jaco_joint_position_5hz.json"
    controller_fpath = os.path.join(
        os.path.split(suite.__file__)[0], 'controllers', 'config',
        controller_file)
    print('loading controller from', controller_fpath)
    # Load the desired controller
    options["controller_configs"] = load_controller_config(
        custom_fpath=controller_fpath)
    #options['initial_qposes'] = [init_qpos]

    control_freq = 2
    n_steps = len(real_actions)

    # initialize the task
    env = suite.make(
        **options,
        has_renderer=False,
        has_offscreen_renderer=True,
        ignore_done=True,
        use_camera_obs=True,
        control_freq=control_freq,
        camera_names=camera,
        camera_heights=512,
        camera_widths=512,
    )
    site = 'gripper0_grip_site'
    env = VisualizationWrapper(env)
    env.reset()
    env.robots[0].set_robot_joint_positions(init_qpos)
    env.robots[0].controller.update_initial_joints(init_qpos)

    video_writer = imageio.get_writer(write_path + '.mp4', fps=2)
    eef_site_id = env.robots[0].eef_site_id

    # Get action limits
    low, high = env.action_spec
    #env.robots[0].set_robot_joint_positions(init_real[:7])

    sim_joint_pos = env.sim.data.qpos[env.robots[0]._ref_joint_pos_indexes]
    for t in range(n_steps - 1):
        #action = np.deg2rad(real_actions[t-1])
        action = real_joint_pos[t, :7] - sim_joint_pos

        if len(action) == 7:
            action = np.hstack((action, [0]))

        obs, reward, done, _ = env.step(action)

        video_img = obs['%s_image' % camera][::-1]
        video_writer.append_data(video_img)

        # get simulator position and quaternion in real robot frame
        sim_eef_pos_rframe, sim_eef_quat_rframe = get_sim2real_posquat(env)
        sim_eef_pos_sframe, sim_eef_quat_sframe = get_sim_posquat(env)
        sim_joint_pos = env.sim.data.qpos[env.robots[0]._ref_joint_pos_indexes]
        sim_goal_joint_pos = env.robots[0].controller.goal_qpos

        sim_robot_eef_rframe.append(
            deepcopy(np.hstack((sim_eef_pos_rframe, sim_eef_quat_rframe))))
        sim_robot_eef_sframe.append(
            deepcopy(np.hstack((sim_eef_pos_sframe, sim_eef_quat_sframe))))
        sim_robot_joints.append(deepcopy(sim_joint_pos))
        target_robot_joints.append(deepcopy(sim_goal_joint_pos))

        real_eef_pos_sframe, real_eef_quat_sframe = get_real2sim_posquat(
            real_eef_pos[t, :3], real_eef_pos[t, 3:7])
        real_robot_eef_rframe.append(real_eef_pos[t])
        real_robot_eef_sframe.append(
            deepcopy(np.hstack((real_eef_pos_sframe, real_eef_quat_sframe))))
        real_robot_joints.append(real_joint_pos[t])

    f, ax = plt.subplots(7, figsize=(10, 20))
    real_robot_eef_rframe = np.array(real_robot_eef_rframe)
    real_robot_eef_sframe = np.array(real_robot_eef_sframe)
    sim_robot_eef_rframe = np.array(sim_robot_eef_rframe)
    sim_robot_eef_sframe = np.array(sim_robot_eef_sframe)
    y = np.arange(len(real_robot_eef_rframe))
    vmin = -np.pi
    vmax = np.pi
    for i in range(7):
        if not i:
            ax[i].scatter(y,
                          real_robot_eef_rframe[:, i],
                          marker='o',
                          s=4,
                          c='r',
                          label='robot_rframe')
            ax[i].scatter(y,
                          real_robot_eef_sframe[:, i],
                          marker='o',
                          s=4,
                          c='k',
                          label='robot_sframe')
            ax[i].scatter(y,
                          sim_robot_eef_rframe[:, i],
                          marker='o',
                          s=4,
                          c='g',
                          label='sim_rframe')
            ax[i].scatter(y,
                          sim_robot_eef_sframe[:, i],
                          marker='o',
                          s=4,
                          c='b',
                          label='sim_sframe')
        else:
            ax[i].scatter(y,
                          real_robot_eef_rframe[:, i],
                          marker='o',
                          s=4,
                          c='r')
            ax[i].scatter(y,
                          real_robot_eef_sframe[:, i],
                          marker='o',
                          s=4,
                          c='k')
            ax[i].scatter(y,
                          sim_robot_eef_rframe[:, i],
                          marker='o',
                          s=4,
                          c='g')
            ax[i].scatter(y,
                          sim_robot_eef_sframe[:, i],
                          marker='o',
                          s=4,
                          c='b')
        ax[i].plot(real_robot_eef_rframe[:, i], c='r')
        ax[i].plot(real_robot_eef_sframe[:, i], c='k')
        ax[i].plot(sim_robot_eef_rframe[:, i], c='g')
        ax[i].plot(sim_robot_eef_sframe[:, i], c='b')

    for i in range(4, 7):
        ax[i].set_ylim([vmin, vmax])
    ax[0].set_title('x')
    ax[1].set_title('y')
    ax[2].set_title('z')
    ax[3].set_title('qx')
    ax[4].set_title('qy')
    ax[5].set_title('qz')
    ax[6].set_title('qw')
    ax[0].legend()
    plt.savefig(write_path + '_eef.png')
    plt.close()

    f, ax = plt.subplots(7, figsize=(10, 20))
    real_robot_joints = np.array(real_robot_joints)
    sim_robot_joints = np.array(sim_robot_joints)
    target_robot_joints = np.array(target_robot_joints)
    vmin = -4 * np.pi
    vmax = 4 * np.pi
    for i in range(7):
        ax[i].set_title(i)
        if not i:
            ax[i].plot(real_robot_joints[:, i], c='b', label='real')
            ax[i].plot(sim_robot_joints[:, i], c='k', label='sim')
            ax[i].plot(target_robot_joints[:, i], c='c', label='goal')
        else:
            ax[i].plot(real_robot_joints[:, i], c='b')
            ax[i].plot(sim_robot_joints[:, i], c='k')
            ax[i].plot(target_robot_joints[:, i], c='c')
        ax[i].scatter(y, real_robot_joints[:, i], s=2, c='b')
        ax[i].scatter(y, sim_robot_joints[:, i], s=2, c='k')
        ax[i].scatter(y, target_robot_joints[:, i], s=2, c='c')

    for x in range(7):
        ax[x].set_ylim([vmin, vmax])
    ax[0].legend()
    plt.savefig(write_path + '_joints.png')
    plt.close()

    video_writer.close()
    print("Video saved to {}".format(write_path))