コード例 #1
0
    def test_sizes_of_parsed_data(self):

        # Instantiate the humanoid environment.
        env = humanoid_CMU.stand()

        # Parse and convert specified clip.
        converted = parse_amc.convert(_TEST_AMC_PATH, env.physics,
                                      env.control_timestep())

        self.assertEqual(converted.qpos.shape[0], 63)
        self.assertEqual(converted.qvel.shape[0], 62)
        self.assertEqual(converted.time.shape[0], converted.qpos.shape[1])
        self.assertEqual(converted.qpos.shape[1], converted.qvel.shape[1] + 1)

        # Parse and convert specified clip -- WITH SMALLER TIMESTEP
        converted2 = parse_amc.convert(_TEST_AMC_PATH, env.physics,
                                       0.5 * env.control_timestep())

        self.assertEqual(converted2.qpos.shape[0], 63)
        self.assertEqual(converted2.qvel.shape[0], 62)
        self.assertEqual(converted2.time.shape[0], converted2.qpos.shape[1])
        self.assertEqual(converted.qpos.shape[1], converted.qvel.shape[1] + 1)

        # Compare sizes of parsed objects for different timesteps
        self.assertEqual(converted.qpos.shape[1] * 2, converted2.qpos.shape[1])
コード例 #2
0
ファイル: mocap_demo.py プロジェクト: yangyi0318/dm_control
def main(unused_argv):
  env = humanoid_CMU.stand()

  # Parse and convert specified clip.
  converted = parse_amc.convert(FLAGS.filename,
                                env.physics, env.control_timestep())

  max_frame = min(FLAGS.max_num_frames, converted.qpos.shape[1] - 1)

  width = 480
  height = 480
  video = np.zeros((max_frame, height, 2 * width, 3), dtype=np.uint8)

  for i in range(max_frame):
    p_i = converted.qpos[:, i]
    with env.physics.reset_context():
      env.physics.data.qpos[:] = p_i
    video[i] = np.hstack([env.physics.render(height, width, camera_id=0),
                          env.physics.render(height, width, camera_id=1)])

  tic = time.time()
  for i in range(max_frame):
    if i == 0:
      img = plt.imshow(video[i])
    else:
      img.set_data(video[i])
    toc = time.time()
    clock_dt = toc - tic
    tic = time.time()
    # Real-time playback not always possible as clock_dt > .03
    plt.pause(max(0.01, 0.03 - clock_dt))  # Need min display time > 0.0.
    plt.draw()
  plt.waitforbuttonpress()
コード例 #3
0
def getNumFrames(filename):
	env = humanoid_CMU.stand()

  # Parse and convert specified clip.
	converted = parse_amc.convert(filename, env.physics, env.control_timestep())
	frame_num = converted.qpos.shape[1] - 1
	return frame_num
コード例 #4
0
def main(filename):
    env = humanoid_CMU.run()
    # Parse and convert specified clip.
    converted = parse_amc.convert(filename, env.physics,
                                  env.control_timestep())

    max_frame = (converted.qvel.shape[1])
    trajectory = []
    reward = []
    #pos = []
    width = 480
    height = 480
    video = np.zeros((max_frame, height, 2 * width, 3), dtype=np.uint8)

    print(converted.qpos.shape)
    print(converted.qvel.shape)

    for i in range(max_frame):
        #print('we are at frame %d' %i)
        p_i = converted.qpos[:, i]
        v_i = converted.qvel[:, i]
        with env.physics.reset_context():
            env.physics.data.qpos[:] = p_i
            env.physics.data.qvel[:] = v_i
        trajectory.append(obs['observations'])
    trajectory = np.array([traj for traj in trajectory])
    return trajectory, converted.qpos[:, 0:max_frame], converted.qvel, np.sum(
        reward)
コード例 #5
0
def load_state_dataset(data_dir_path, env, control_timestep):
    from dm_control.suite.utils import parse_amc
    converted_data = []
    files = os.listdir(data_dir_path)
    for file in files:
        if (file.endswith(".amc")):
            file_path = data_dir_path + file
            converted = parse_amc.convert(file_path, env.physics, control_timestep)  # 0.01 --> env.control_timestep()
            file_save_path = "/home/jmj/Download/subjects_08/" + file_path.split(".")[0] + ".npy"
            # np.save(file_save_path, converted.qpos)
            converted_data.append({"qpos": converted.qpos, "qvel": converted.qvel})
    state_dataset = Dset(converted_data)
    return state_dataset
コード例 #6
0
def load_state_dataset(data_dir_path, env, control_timestep):
    from dm_control.suite.utils import parse_amc
    converted_data = []
    files = os.listdir(data_dir_path)
    for file in files:
        if (file.endswith(".amc")):
            file_path = data_dir_path + file
            converted = parse_amc.convert(
                file_path, env.physics,
                control_timestep)  # 0.01 --> env.control_timestep()
            converted_data.append({
                "qpos": converted.qpos,
                "qvel": converted.qvel
            })
    state_dataset = Dset(converted_data)
    return state_dataset
コード例 #7
0
def load_mocap_expert_trajectory(env,
                                 traj_data_dir,
                                 only_preprocess=False,
                                 force_repreprocess=False):
    """ @brief:
            load the mocap data from amc files. If the amc has been
            preprocessed, load the npy file that contains the qpos and qvel data
    """
    from dm_control.suite.utils import parse_amc
    # get all the file names
    if traj_data_dir.endswith('.amc'):
        file_names = [traj_data_dir]
    else:
        assert os.path.exists(traj_data_dir)
        file_names = glob.glob(traj_data_dir + '/*.amc')

    mocap_data = []
    for i_amc in file_names:
        i_npy = i_amc.replace('.amc', '.npy')

        if os.path.exists(i_npy) and (not force_repreprocess):
            # if the npy files has been processed
            i_mocap_data = np.load(i_npy, encoding="latin1").item()

        else:
            # if the npy has not been processed
            converted_data = \
                parse_amc.convert(i_amc, env.physics, env.control_timestep())
            qvel = converted_data.qvel.transpose()
            qpos = converted_data.qpos.transpose()
            assert qpos.shape[0] == qvel.shape[0] + 1
            num_qpos = len(qpos) - 1
            observation = []

            # get the other observation info
            for i_id in range(num_qpos):
                with env.physics.reset_context():
                    env.physics.data.qpos[:] = qpos[i_id]
                    env.physics.data.qvel[:] = qvel[i_id]
                env.physics.after_reset()
                i_observation_dict = env.task.get_observation(env.physics)
                i_observation = np.concatenate([
                    env.physics.data.qpos[:7],
                    env_util.vectorize_ob(i_observation_dict),
                    np.ones(1) * i_id,
                    env.physics.center_of_mass_position()
                ])

                observation.append(i_observation)
                # the center of mass
            i_mocap_data = {
                'qvel': qvel,
                'qpos': qpos,
                'num_qpos': num_qpos,
                'observation': np.array(observation)
            }
            np.save(i_npy, i_mocap_data)  # save the data

        mocap_data.append(i_mocap_data)

    return mocap_data, file_names