예제 #1
0
def cut_bvh(filename, timespan, destpath, starttime=0.0, endtime=-1.0):
    print(f'Cutting BVH {filename} into intervals of {timespan}')

    p = BVHParser()
    bvh_data = p.parse(filename)
    if endtime <= 0:
        endtime = bvh_data.framerate * bvh_data.values.shape[0]

    writer = BVHWriter()
    suffix = 0
    while (starttime + timespan) <= endtime:
        out_basename = os.path.splitext(os.path.basename(filename))[0]
        bvh_outfile = os.path.join(
            destpath, out_basename + "_" + str(suffix).zfill(3) + '.bvh')
        start_idx = int(np.round(starttime / bvh_data.framerate))
        end_idx = int(np.round(
            (starttime + timespan) / bvh_data.framerate)) + 1
        if end_idx >= bvh_data.values.shape[0]:
            return

        with open(bvh_outfile, 'w') as f:
            writer.write(bvh_data, f, start=start_idx, stop=end_idx)

        starttime += timespan
        suffix += 1
예제 #2
0
def process_bvh(gesture_filename):
    p = BVHParser()

    data_all = list()
    data_all.append(p.parse(gesture_filename))

    data_pipe = Pipeline([('dwnsampl', DownSampler(tgt_fps=20,
                                                   keep_all=False)),
                          ('root', RootTransformer('hip_centric')),
                          ('mir', Mirror(axis='X', append=True)),
                          ('jtsel',
                           JointSelector(target_joints, include_root=True)),
                          ('cnst', ConstantsRemover()), ('np', Numpyfier())])

    out_data = data_pipe.fit_transform(data_all)
    jl.dump(data_pipe, os.path.join('../resource', 'data_pipe.sav'))

    # euler -> rotation matrix
    out_data = out_data.reshape((out_data.shape[0], out_data.shape[1], -1, 3))
    out_matrix = np.zeros(
        (out_data.shape[0], out_data.shape[1], out_data.shape[2], 9))
    for i in range(out_data.shape[0]):  # mirror
        for j in range(out_data.shape[1]):  # frames
            r = R.from_euler('ZXY', out_data[i, j], degrees=True)
            out_matrix[i, j] = r.as_matrix().reshape(out_data.shape[2], 9)
    out_matrix = out_matrix.reshape((out_data.shape[0], out_data.shape[1], -1))

    return out_matrix[0], out_matrix[1]
예제 #3
0
def extract_hand_pos(bvh_dir, files, destpath, fps):

    p = BVHParser()

    data_all = list()
    for f in files:
        ff = os.path.join(bvh_dir, f + '.bvh')
        print(ff)
        data_all.append(p.parse(ff))
        
    data_pipe = Pipeline([
        ('dwnsampl', DownSampler(tgt_fps=fps, keep_all=False)),
        ('root', RootTransformer('hip_centric')),
        ('pos', MocapParameterizer('position')), 
        ('jtsel', JointSelector(['RightHand','LeftHand'], include_root=False)),
        ('np', Numpyfier())
    ])

    out_data = data_pipe.fit_transform(data_all)
    
    assert len(out_data) == len(files)
    
    fi=0
    for f in files:
        ff = os.path.join(destpath, f)
        print(ff)
        np.save(ff + ".npy", out_data[fi])
        fi=fi+1
예제 #4
0
def get_bvh_positions(bvh_file: str):
    parser = BVHParser()
    parsed_data = parser.parse(bvh_file)
    mp = MocapParameterizer('position')
    positions = mp.fit_transform([parsed_data])
    # Returns a pandas dataframe containing the positions of all frames
    return positions[0]
예제 #5
0
def process_bvh(basepath, destpath, filename, start, stop):
    p = BVHParser()
    print(f'Processing BVH {filename} from: {start} to {stop}')
    outpath = os.path.join(destpath, 'bvh')
    if not os.path.exists(outpath):
        os.makedirs(outpath)
    outfile = uniquify(os.path.join(outpath, filename + '.bvh'))
    infile = os.path.join(os.path.join(basepath, 'bvh'), filename + '.bvh')
    bvh_data = p.parse(infile, start=start, stop=stop)
    dwnsampl = DownSampler(tgt_fps=60)
    out_data = dwnsampl.fit_transform([bvh_data])
    writer = BVHWriter()
    with open(outfile, 'w') as f:
        writer.write(out_data[0], f)
예제 #6
0
def extract_joint_angles(bvh_dir, files, destpath, fps, fullbody=False):
    p = BVHParser()

    data_all = list()
    print("Importing data...")
    for f in files:
        ff = os.path.join(bvh_dir, f + '.bvh')
        print(ff)
        data_all.append(p.parse(ff))

    if fullbody:
        data_pipe = Pipeline([
            ('dwnsampl', DownSampler(tgt_fps=fps, keep_all=False)),
            ('mir', Mirror(axis='X', append=True)),
            ('jtsel', JointSelector(['Spine','Spine1','Neck','Head','RightShoulder', 'RightArm', 'RightForeArm', 'RightHand', 'LeftShoulder', 'LeftArm', 'LeftForeArm', 'LeftHand', 'RightUpLeg', 'RightLeg', 'RightFoot', 'RightToeBase', 'LeftUpLeg', 'LeftLeg', 'LeftFoot', 'LeftToeBase'], include_root=True)),
            ('root', RootTransformer('pos_rot_deltas', position_smoothing=5, rotation_smoothing=10)),
            ('exp', MocapParameterizer('expmap')), 
            ('cnst', ConstantsRemover()),
            ('npf', Numpyfier())
        ])
    else:
        data_pipe = Pipeline([
           ('dwnsampl', DownSampler(tgt_fps=fps,  keep_all=False)),
           ('root', RootTransformer('hip_centric')),
           ('mir', Mirror(axis='X', append=True)),
           ('jtsel', JointSelector(['Spine','Spine1','Spine2','Spine3','Neck','Neck1','Head','RightShoulder', 'RightArm', 'RightForeArm', 'RightHand', 'LeftShoulder', 'LeftArm', 'LeftForeArm', 'LeftHand'], include_root=True)),
           ('exp', MocapParameterizer('expmap')), 
           ('cnst', ConstantsRemover()),
           ('np', Numpyfier())
        ])


    print("Processing...")
    out_data = data_pipe.fit_transform(data_all)
    
    # the datapipe will append the mirrored files to the end
    assert len(out_data) == 2*len(files)
    
    jl.dump(data_pipe, os.path.join(destpath, 'data_pipe.sav'))
        
    fi=0
    for f in files:
        ff = os.path.join(destpath, f)
        print(ff)
        np.savez(ff + ".npz", clips=out_data[fi])
        np.savez(ff + "_mirrored.npz", clips=out_data[len(files)+fi])
        fi=fi+1
예제 #7
0
def main(src: str, dst: str):
    bvh_parser = BVHParser()
    data = bvh_parser.parse(src)

    target_fps = 20
    orig_fps = round(1.0 / data.framerate)
    rate = orig_fps // target_fps
    print(orig_fps, rate)

    new_data = data.clone()
    new_data.values = data.values[0:-1:rate]
    new_data.values = new_data.values[:1200]
    new_data.framerate = 1.0 / target_fps

    bvh_writer = BVHWriter()
    with open(dst, 'w') as f:
        bvh_writer.write(new_data, f)
예제 #8
0
def extract_joint_angles(in_file, out_file, start_t, end_t, fps):
    p = BVHParser()

    print("Reading ...")
    data_all = list()
    data_all.append(p.parse(in_file))

    print("Cropping ...")
    cropped_data = data_all[0]
    cropped_data.values = cropped_data.values
    if end_t == -1:
        cropped_data.values = cropped_data.values[start_t * fps:]
    else:
        cropped_data.values = cropped_data.values[start_t * fps:end_t * fps]

    print("Writing ...")
    writer = BVHWriter()
    with open(out_file, 'w') as f:
        writer.write(cropped_data, f)
예제 #9
0
def extract_joint_angles(bvh_dir, files, dest_dir, pipeline_dir, fps):
    p = BVHParser()

    if not os.path.exists(pipeline_dir):
        raise Exception("Pipeline dir for the motion processing ",
                        pipeline_dir,
                        " does not exist! Change -pipe flag value.")

    data_all = list()
    for f in files:
        ff = os.path.join(bvh_dir, f + '.bvh')
        print(ff)
        data_all.append(p.parse(ff))

    data_pipe = Pipeline([
        ('dwnsampl', DownSampler(tgt_fps=fps, keep_all=False)),
        ('root', RootTransformer('hip_centric')),
        ('mir', Mirror(axis='X', append=True)),
        ('jtsel',
         JointSelector([
             'Spine', 'Spine1', 'Spine2', 'Spine3', 'Neck', 'Neck1', 'Head',
             'RightShoulder', 'RightArm', 'RightForeArm', 'RightHand',
             'LeftShoulder', 'LeftArm', 'LeftForeArm', 'LeftHand'
         ],
                       include_root=True)),
        ('exp', MocapParameterizer('expmap')), ('cnst', ConstantsRemover()),
        ('np', Numpyfier())
    ])

    out_data = data_pipe.fit_transform(data_all)

    # the datapipe will append the mirrored files to the end
    assert len(out_data) == 2 * len(files)

    jl.dump(data_pipe, os.path.join(pipeline_dir + 'data_pipe.sav'))

    fi = 0
    for f in files:
        ff = os.path.join(dest_dir, f)
        print(ff)
        np.savez(ff + ".npz", clips=out_data[fi])
        np.savez(ff + "_mirrored.npz", clips=out_data[len(files) + fi])
        fi = fi + 1
def get_foot_heights():
    Rfoot_height_list = []
    Lfoot_height_list = []

    parser = BVHParser()

    parsed_data = parser.parse('motion_data/69/69_01.bvh')

    mp = MocapParameterizer('position')

    positions = mp.fit_transform([parsed_data])

    length = len(positions[0].values['RightToe_Yposition'])

    for i in range(length):
        Rfoot_height = positions[0].values['RightToe_Yposition'][i]
        Lfoot_height = positions[0].values['LeftToe_Yposition'][i]

        Rfoot_height_list.append(Rfoot_height)
        Lfoot_height_list.append(Lfoot_height)

    return Rfoot_height_list, Lfoot_height_list
예제 #11
0
def process_folder(src_dir: str, dst_dir: str, pipeline_dir: str, fps: int = 20):
    bvh_parser = BVHParser()
    data = []
    bvh_names = listdir(src_dir)
    logging.info('Parsing BVH files...')
    for bvh_name in bvh_names:
        bvh_path = join(src_dir, bvh_name)
        logging.info(bvh_path)
        data.append(bvh_parser.parse(bvh_path))

    # pipeline from https://github.com/GestureGeneration/Speech_driven_gesture_generation_with_autoencoder
    data_pipe = Pipeline([
        ('dwnsampl', DownSampler(tgt_fps=fps, keep_all=False)),
        ('root', RootTransformer('hip_centric')),
        # ('mir', Mirror(axis='X', append=True)),
        ('jtsel', JointSelector(
            ['Spine', 'Spine1', 'Spine2', 'Spine3', 'Neck', 'Neck1', 'Head', 'RightShoulder', 'RightArm',
             'RightForeArm', 'RightHand', 'LeftShoulder', 'LeftArm', 'LeftForeArm', 'LeftHand'],
            include_root=True)),
        ('exp', MocapParameterizer('expmap')),
        ('cnst', ConstantsRemover()),
        ('np', Numpyfier())
    ])
    logging.info('Transforming data...')
    out_data = data_pipe.fit_transform(data)
    if not exists(pipeline_dir):
        mkdir(pipeline_dir)
    jl.dump(data_pipe, join(pipeline_dir, 'data_pipe.sav'))

    logging.info('Saving result...')
    if not exists(dst_dir):
        mkdir(dst_dir)
    for i, bvh_name in enumerate(bvh_names):
        name, _ = splitext(bvh_name)
        logging.info(name)
        np.save(join(dst_dir, name + ".npy"), out_data[i])
예제 #12
0
class DataLoader:
    def __init__(self,
                 root_path='motion_data/',
                 target_path='preprocessed_data/raw_position'):
        self.total_path_list = []
        self.subject_names = os.listdir(root_path)
        for subject_num in self.subject_names:
            dir_path = os.path.join(root_path, subject_num)
            file_names = os.listdir(dir_path)
            for file_name in file_names:
                self.total_path_list.append(os.path.join(dir_path, file_name))
        self.target_path = target_path

        self.parser = BVHParser()
        self.mp = MocapParameterizer('position')
        self.joints_to_draw = ('Hips', 'RightKnee', 'RightToe', 'Chest2',
                               'Head', 'LeftShoulder', 'lhand',
                               'RightShoulder', 'rhand', 'LeftKnee', 'LeftToe')
        self.df = None
        self.min_frame = 100

    def get_pos_of_a_frame(self, frame):
        pos_batch = np.empty(
            (11, 3),
            dtype=np.float32)  ##[joint_num, channel_num(xyz)] = [11, 3]

        for idx, joint in enumerate(self.joints_to_draw):
            parent_x = self.df['%s_Xposition' % joint][frame]
            parent_y = self.df['%s_Yposition' % joint][frame]
            parent_z = self.df['%s_Zposition' % joint][frame]

            pos_batch[idx, 0] = parent_x
            pos_batch[idx, 1] = parent_y
            pos_batch[idx, 2] = parent_z

            # children_to_draw = [c for c in mocap_track.skeleton[joint]['children'] if c in joints_to_draw]
            # for c in children_to_draw:
            #     child_x = df['%s_Xposition' % c][frame]
            #     child_y = df['%s_Yposition' % c][frame]
            #     child_z = df['%s_Zposition' % c][frame]
            #
            #     pos_list.append(child_x)
            #     pos_list.append(child_y)
            #     pos_list.append(child_z)

        return pos_batch

    def get_pos_batch(self, pos_data, down_sample_scale):
        batch_len = pos_data[0].values.shape[0] // down_sample_scale

        total_batch = np.empty(
            (batch_len, 11, 3), dtype=np.float32
        )  ##[frame_num, joint_num, channel_num(xyz)] = [f, 11, 3]

        # self.joints_to_draw = pos_data[0].skeleton.keys()
        self.df = pos_data[0].values

        for i in range(batch_len):
            frame_batch = self.get_pos_of_a_frame(frame=i * down_sample_scale)

            total_batch[i] = frame_batch

        return total_batch

    def save_total_motion(self, down_sample_scale=1):
        total_len = len(self.total_path_list)
        progress = 0
        processed_motion_num = 0
        filtered_motion_num = 0

        for path in self.total_path_list:
            start_time = time.time()

            start_idx = path.find('\\') + 1
            end_idx = path.find('.bvh')
            output_file_name = path[start_idx:end_idx] + '.npy'

            output_path = os.path.join(self.target_path, output_file_name)

            parsed_data = self.parser.parse(path)
            positions = self.mp.fit_transform([parsed_data])

            batch_size = positions[0].values.shape[0] // down_sample_scale
            if self.min_frame > batch_size:
                print(output_file_name + 'is too short : ', batch_size,
                      ' frames')
                filtered_motion_num = filtered_motion_num + 1
                continue
            else:
                processed_motion_num = processed_motion_num + 1

            single_motion_batch = self.get_pos_batch(positions,
                                                     down_sample_scale)

            np.save(output_path, single_motion_batch)

            progress = progress + 1
            ptime = time.time() - start_time
            print('[%d/%d] ptime = %.3f' % (progress, total_len, ptime))

        print('processed_motion_num = ', processed_motion_num,
              '/ filtered_motion_num = ', filtered_motion_num)
예제 #13
0
    proxy_motion = naoqi.ALProxy("ALMotion", IP_ROBOT, PORT_ROBOT)
    proxy_motion.wakeUp()


assert abs(rad2deg(deg2rad(12)) - 12) < 1e-6
assert abs(rad2deg(deg2rad(-12)) - (-12)) < 1e-6

if __name__ == '__main__':
    IP_ROBOT = "127.0.0.1"
    PORT_ROBOT = 9559
    IP_ME = "127.0.0.1"
    PORT_ME = 1234

    parser = BVHParser()
    parsed_data = parser.parse('rand_1.bvh')

    from pymo.viz_tools import print_skel
    print_skel(parsed_data)

    initialize_robot()

    mp_euler = MocapParameterizer('euler')
    rotations, = mp_euler.fit_transform([parsed_data])

    for frame_idx in range(rotations.values["RightArm_Xrotation"].size):
        time_begin = time.time()

        r_arm_x_rot = rotations.values["RightArm_Xrotation"].iloc[frame_idx]
        r_arm_y_rot = rotations.values["RightArm_Yrotation"].iloc[frame_idx]
        r_arm_z_rot = rotations.values["RightArm_Zrotation"].iloc[frame_idx]
예제 #14
0
 def read_mocap_data(bvh_file):
     parser = BVHParser()
     return parser.parse(bvh_file)
# Jika USE_NORMALIZED_DF bernilai false, dataset berisi posisi absolut setiap sendi dalam koordinat global
# Jika di visualisasikan maka akan terlihat berjalan / melompat, percis seperti di program blender
USE_NORMALIZED_DF = False

if __name__ == "__main__":
    env = HumanoidBulletEnv(robot=CustomHumanoidRobot())
    for m in MOTION:
        sub = m["subject"]
        mot = m["motion_number"]
        start_frame = m["start_frame"]
        end_frame = m["end_frame"]

        print("Processing Motion {}_{}".format(sub, mot))
        parser = BVHParser()
        parsed_data = parser.parse(
            "{}/{}/{}_{}.bvh".format(BASE_DATASET_PATH, sub, sub, mot)
        )
        mp = MocapParameterizer("position")
        bvh_pos = mp.fit_transform([parsed_data])[0].values

        # Process vektor hips -> joint untuk setiap endpoint
        normalized_data = np.zeros((1, len(ENDPOINT_LIST) * 3))
        for i in range(start_frame, end_frame):
            basePos = _getJointPos(bvh_pos, "Hips", i, JOINT_MULTIPLIER)
            tmp = []
            for ep in ENDPOINT_LIST:
                if USE_NORMALIZED_DF:
                    epPos = _getJointPos(bvh_pos, ep, i, JOINT_MULTIPLIER)
                    tmp = np.hstack((tmp, epPos - basePos))
                else:
                    epPos = getJointPos(bvh_pos, ep, i)
예제 #16
0
def main():
    parser = BVHParser()

    parsed_data = parser.parse('walk-02-01-cmu.bvh')
    # parsed_data = parser.parse('walk-01-normal-azumi.bvh')

    mp = MocapParameterizer('position')

    positions = mp.fit_transform([parsed_data])

    body, joints, center_of_mass_joint_id = init_pybullet()
    compare_extremity_vectors(body, joints, center_of_mass_joint_id,
                              positions[0])

    # TODO write the cost function comparing the extremity vectors
    # and, to test it, use it to find the closest and furthest frames from the CMU positions[] data

    # vt.print_skel(parsed_data) # (See output below)
    # vt.draw_stickfigure(positions[0], frame=150)
    ax = draw_stickfigure3d(positions[0], frame=0, draw_names=True)

    # == This is a test to navigate the frames of the CMU recording
    # ax, fig = draw_stickfigure3d(positions[0], frame=g_frame)
    # plt.axis('equal')
    # ax.view_init(10, 40)
    # # plt.show()

    # def onclick(event):
    #     global g_frame
    #     global g_interval
    #     global g_forward
    #     # print('%s click: button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %
    #     #       ('double' if event.dblclick else 'single', event.button,
    #     #        event.x, event.y, event.xdata, event.ydata))
    #     if event.button == 1:
    #         g_frame += g_interval if g_forward else -g_interval
    #         plt.cla()
    #         draw_stickfigure3d(positions[0], frame=g_frame, ax=ax)
    #         plt.draw()
    #     elif event.button == 2:
    #         g_forward = not g_forward
    #     elif event.button == 3:
    #         g_interval += 1 if g_forward else -1

    #     print('%s, %s, %r' % (g_frame, g_interval, g_forward))

    # fig.canvas.mpl_connect('button_press_event', onclick)
    # plt.show()

    # Find feet ground contact points
    # signal = create_foot_contact_signal(positions[0], 'RightFoot_Yposition')
    # plt.figure(figsize=(12, 6))
    # # plt.plot(signal, 'r')
    # plt.plot(positions[0].values['RightFoot_Yposition'].values, 'g')
    # # plot_foot_up_down(
    # #     positions[0], 'RightFoot_Yposition', t=0.00002, min_dist=100)
    # print(get_foot_contact_idxs(
    #     positions[0].values['RightFoot_Yposition'].values))
    # print(positions[0].values['RightFoot_Yposition'].values[50:100].argmin())

    # peaks = [75, 207]
    # plt.plot(peaks,
    #          positions[0].values['RightFoot_Yposition'].values[peaks], 'ro')
    # plt.show()

    # This is how to get the position of a single joint:
    # Note that positions[0] is a pandas dataframe
    # joint_name = 'Hips'
    # frame = 150
    # print(positions[0].values['%s_Xposition' % joint_name][frame])

    ax = draw_stickfigure3d_mujoco(body,
                                   joints,
                                   center_of_mass_joint_id,
                                   draw_names=True,
                                   ax=ax)

    plt.axis('equal')
    # ax.view_init(30, 80)
    ax.view_init(0, 0)  # Side view
    # ax.view_init(10, 40)  # Best view imo
    plt.show()
예제 #17
0
class DataLoader:
    def __init__(self, root_path = 'motion_data/'):
        self.path_list = []
        self.total_path_list = []
        self.subject_names = os.listdir(root_path)
        for subject_num in self.subject_names:
            dir_path = os.path.join(root_path, subject_num)
            file_names = os.listdir(dir_path)
            for file_name in file_names:
                self.total_path_list.append(os.path.join(dir_path, file_name))

        self.parser = BVHParser()
        self.mp = MocapParameterizer('position')
        self.label_idx_from = None
        self.label_idx_to = None
        self.joints_to_draw = None
        self.df = None

    def set_bvh_data_path(self, is_train, train_rate = 0.9):
        motion_list= []
        for path in self.toral_path_list :
            is_bvh = path.find('.bvh')
            if is_bvh == -1 :
                continue
            else :
                motion_list.append(path)
        motion_len = len(motion_list)
        train_len = int(motion_len * train_rate)
        if is_train :
            self.path_list = motion_list[:train_len]
        else :
            self.path_list = motion_list[train_len:]

    def merge_velocity_data(self):
        motion_npy_list = []
        input_batch_list = []
        label_batch_list = []
        for path in self.total_path_list:
            is_npy = path.find('.npy')
            if is_npy == -1:
                continue
            else:
                motion_npy_list.append(path)

        total_len = len(motion_npy_list)
        progress = 0

        for path in motion_npy_list:
            start_time = time.time()

            motion = np.load(path)
            pre_motion = np.pad(motion, ((1, 0), (0, 0)), 'constant')
            post_motion = np.pad(motion, ((0, 1), (0, 0)), 'constant')
            velocity_motion = post_motion - pre_motion
            velocity_motion = velocity_motion[1:-1]
            motion_with_velocity = np.concatenate([motion[1:], velocity_motion], axis=1)
            is_input = path.find('input')
            if is_input == -1:
                label_batch_list.append(motion_with_velocity)
            else:
                input_batch_list.append(motion_with_velocity)

            progress = progress + 1
            ptime = time.time() - start_time
            print('[%d/%d] ptime = %.3f' % (progress, total_len, ptime))

        total_input_batch = np.concatenate(input_batch_list, axis=0)
        total_label_batch = np.concatenate(label_batch_list, axis=0)

        return total_input_batch, total_label_batch



    def get_pos_list(self, frame):
        pos_list = []

        for joint in self.joints_to_draw:
            parent_x = self.df['%s_Xposition' % joint][frame]
            parent_y = self.df['%s_Yposition' % joint][frame]
            parent_z = self.df['%s_Zposition' % joint][frame]

            pos_list.append(parent_x)
            pos_list.append(parent_y)
            pos_list.append(parent_z)

            # children_to_draw = [c for c in mocap_track.skeleton[joint]['children'] if c in joints_to_draw]
            # for c in children_to_draw:
            #     child_x = df['%s_Xposition' % c][frame]
            #     child_y = df['%s_Yposition' % c][frame]
            #     child_z = df['%s_Zposition' % c][frame]
            #
            #     pos_list.append(child_x)
            #     pos_list.append(child_y)
            #     pos_list.append(child_z)

        return pos_list

    def get_pos_batch(self, pos_data, down_sample_scale):
        batch_size = pos_data[0].values.shape[0] // down_sample_scale
        label_len = self.label_idx_to - self.label_idx_from

        input_batch = np.empty((batch_size, 114-label_len), dtype=np.float32)
        label_batch = np.empty((batch_size, label_len), dtype=np.float32)

        self.joints_to_draw = pos_data[0].skeleton.keys()
        self.df = pos_data[0].values

        for i in range(batch_size):
            pos_list = self.get_pos_list(frame=i * down_sample_scale)

            label_batch[i] = np.asarray(pos_list[self.label_idx_from:self.label_idx_to])
            input_batch[i] = np.asarray(pos_list[:self.label_idx_from] + pos_list[self.label_idx_to:])

        return input_batch, label_batch

    def get_single_motion(self, motion_num, label_idx_from, label_idx_to, down_sample_scale=1):
        idx = motion_num
        self.label_idx_from = label_idx_from
        self.label_idx_to = label_idx_to

        path = self.path_list[idx]
        parsed_data = self.parser.parse(path)
        positions = self.mp.fit_transform([parsed_data])
        input_batch, label_batch = self.get_pos_batch(positions, down_sample_scale)
        return input_batch, label_batch

    def get_multi_motion(self, motion_list, label_idx_from, label_idx_to, down_sample_scale=1):
        self.label_idx_from = label_idx_from
        self.label_idx_to = label_idx_to

        input_batch_list = []
        label_batch_list = []

        for idx in motion_list :
            path = self.path_list[idx]
            parsed_data = self.parser.parse(path)
            positions = self.mp.fit_transform([parsed_data])
            input_batch, label_batch = self.get_pos_batch(positions, down_sample_scale)
            input_batch_list.append(input_batch)
            label_batch_list.append(label_batch)

        multi_input_batch = np.concatenate(input_batch_list, axis=0)
        multi_label_batch = np.concatenate(label_batch_list, axis=0)

        return multi_input_batch, multi_label_batch

    def get_total_motion(self, label_idx_from, label_idx_to, down_sample_scale=1):
        self.label_idx_from = label_idx_from
        self.label_idx_to = label_idx_to

        input_batch_list = []
        label_batch_list = []

        total_len = len(self.path_list)
        progress = 0
        for path in self.path_list:
            start_time = time.time()

            parsed_data = self.parser.parse(path)
            positions = self.mp.fit_transform([parsed_data])
            input_batch, label_batch = self.get_pos_batch(positions, down_sample_scale)
            input_batch_list.append(input_batch)
            label_batch_list.append(label_batch)

            progress = progress + 1
            ptime = time.time() - start_time
            print('[%d/%d] ptime = %.3f'%(progress, total_len, ptime))

        total_input_batch = np.concatenate(input_batch_list, axis=0)
        total_label_batch = np.concatenate(label_batch_list, axis=0)

        return total_input_batch, total_label_batch

    def save_total_motion(self, label_idx_from, label_idx_to, down_sample_scale=1):
        self.label_idx_from = label_idx_from
        self.label_idx_to = label_idx_to

        total_len = len(self.path_list)
        progress = 0
        for path in self.path_list:
            start_time = time.time()

            parsed_data = self.parser.parse(path)
            positions = self.mp.fit_transform([parsed_data])
            input_batch, label_batch = self.get_pos_batch(positions, down_sample_scale)
            input_data_path = path[:-4] + '_input.npy'
            label_data_path = path[:-4] + '_label.npy'
            np.save(input_data_path, input_batch)
            np.save(label_data_path, label_batch)

            progress = progress + 1
            ptime = time.time() - start_time
            print('[%d/%d] ptime = %.3f'%(progress, total_len, ptime))