def extract_hand_pos(bvh_dir, files, destpath, fps): p = BVHParser() data_all = list() for f in files: ff = os.path.join(bvh_dir, f + '.bvh') print(ff) data_all.append(p.parse(ff)) data_pipe = Pipeline([ ('dwnsampl', DownSampler(tgt_fps=fps, keep_all=False)), ('root', RootTransformer('hip_centric')), ('pos', MocapParameterizer('position')), ('jtsel', JointSelector(['RightHand','LeftHand'], include_root=False)), ('np', Numpyfier()) ]) out_data = data_pipe.fit_transform(data_all) assert len(out_data) == len(files) fi=0 for f in files: ff = os.path.join(destpath, f) print(ff) np.save(ff + ".npy", out_data[fi]) fi=fi+1
def cut_bvh(filename, timespan, destpath, starttime=0.0, endtime=-1.0): print(f'Cutting BVH {filename} into intervals of {timespan}') p = BVHParser() bvh_data = p.parse(filename) if endtime <= 0: endtime = bvh_data.framerate * bvh_data.values.shape[0] writer = BVHWriter() suffix = 0 while (starttime + timespan) <= endtime: out_basename = os.path.splitext(os.path.basename(filename))[0] bvh_outfile = os.path.join( destpath, out_basename + "_" + str(suffix).zfill(3) + '.bvh') start_idx = int(np.round(starttime / bvh_data.framerate)) end_idx = int(np.round( (starttime + timespan) / bvh_data.framerate)) + 1 if end_idx >= bvh_data.values.shape[0]: return with open(bvh_outfile, 'w') as f: writer.write(bvh_data, f, start=start_idx, stop=end_idx) starttime += timespan suffix += 1
def get_bvh_positions(bvh_file: str): parser = BVHParser() parsed_data = parser.parse(bvh_file) mp = MocapParameterizer('position') positions = mp.fit_transform([parsed_data]) # Returns a pandas dataframe containing the positions of all frames return positions[0]
def process_bvh(gesture_filename): p = BVHParser() data_all = list() data_all.append(p.parse(gesture_filename)) data_pipe = Pipeline([('dwnsampl', DownSampler(tgt_fps=20, keep_all=False)), ('root', RootTransformer('hip_centric')), ('mir', Mirror(axis='X', append=True)), ('jtsel', JointSelector(target_joints, include_root=True)), ('cnst', ConstantsRemover()), ('np', Numpyfier())]) out_data = data_pipe.fit_transform(data_all) jl.dump(data_pipe, os.path.join('../resource', 'data_pipe.sav')) # euler -> rotation matrix out_data = out_data.reshape((out_data.shape[0], out_data.shape[1], -1, 3)) out_matrix = np.zeros( (out_data.shape[0], out_data.shape[1], out_data.shape[2], 9)) for i in range(out_data.shape[0]): # mirror for j in range(out_data.shape[1]): # frames r = R.from_euler('ZXY', out_data[i, j], degrees=True) out_matrix[i, j] = r.as_matrix().reshape(out_data.shape[2], 9) out_matrix = out_matrix.reshape((out_data.shape[0], out_data.shape[1], -1)) return out_matrix[0], out_matrix[1]
def process_bvh(basepath, destpath, filename, start, stop): p = BVHParser() print(f'Processing BVH {filename} from: {start} to {stop}') outpath = os.path.join(destpath, 'bvh') if not os.path.exists(outpath): os.makedirs(outpath) outfile = uniquify(os.path.join(outpath, filename + '.bvh')) infile = os.path.join(os.path.join(basepath, 'bvh'), filename + '.bvh') bvh_data = p.parse(infile, start=start, stop=stop) dwnsampl = DownSampler(tgt_fps=60) out_data = dwnsampl.fit_transform([bvh_data]) writer = BVHWriter() with open(outfile, 'w') as f: writer.write(out_data[0], f)
def __init__(self, root_path = 'motion_data/'): self.path_list = [] self.total_path_list = [] self.subject_names = os.listdir(root_path) for subject_num in self.subject_names: dir_path = os.path.join(root_path, subject_num) file_names = os.listdir(dir_path) for file_name in file_names: self.total_path_list.append(os.path.join(dir_path, file_name)) self.parser = BVHParser() self.mp = MocapParameterizer('position') self.label_idx_from = None self.label_idx_to = None self.joints_to_draw = None self.df = None
def extract_joint_angles(bvh_dir, files, destpath, fps, fullbody=False): p = BVHParser() data_all = list() print("Importing data...") for f in files: ff = os.path.join(bvh_dir, f + '.bvh') print(ff) data_all.append(p.parse(ff)) if fullbody: data_pipe = Pipeline([ ('dwnsampl', DownSampler(tgt_fps=fps, keep_all=False)), ('mir', Mirror(axis='X', append=True)), ('jtsel', JointSelector(['Spine','Spine1','Neck','Head','RightShoulder', 'RightArm', 'RightForeArm', 'RightHand', 'LeftShoulder', 'LeftArm', 'LeftForeArm', 'LeftHand', 'RightUpLeg', 'RightLeg', 'RightFoot', 'RightToeBase', 'LeftUpLeg', 'LeftLeg', 'LeftFoot', 'LeftToeBase'], include_root=True)), ('root', RootTransformer('pos_rot_deltas', position_smoothing=5, rotation_smoothing=10)), ('exp', MocapParameterizer('expmap')), ('cnst', ConstantsRemover()), ('npf', Numpyfier()) ]) else: data_pipe = Pipeline([ ('dwnsampl', DownSampler(tgt_fps=fps, keep_all=False)), ('root', RootTransformer('hip_centric')), ('mir', Mirror(axis='X', append=True)), ('jtsel', JointSelector(['Spine','Spine1','Spine2','Spine3','Neck','Neck1','Head','RightShoulder', 'RightArm', 'RightForeArm', 'RightHand', 'LeftShoulder', 'LeftArm', 'LeftForeArm', 'LeftHand'], include_root=True)), ('exp', MocapParameterizer('expmap')), ('cnst', ConstantsRemover()), ('np', Numpyfier()) ]) print("Processing...") out_data = data_pipe.fit_transform(data_all) # the datapipe will append the mirrored files to the end assert len(out_data) == 2*len(files) jl.dump(data_pipe, os.path.join(destpath, 'data_pipe.sav')) fi=0 for f in files: ff = os.path.join(destpath, f) print(ff) np.savez(ff + ".npz", clips=out_data[fi]) np.savez(ff + "_mirrored.npz", clips=out_data[len(files)+fi]) fi=fi+1
def main(src: str, dst: str): bvh_parser = BVHParser() data = bvh_parser.parse(src) target_fps = 20 orig_fps = round(1.0 / data.framerate) rate = orig_fps // target_fps print(orig_fps, rate) new_data = data.clone() new_data.values = data.values[0:-1:rate] new_data.values = new_data.values[:1200] new_data.framerate = 1.0 / target_fps bvh_writer = BVHWriter() with open(dst, 'w') as f: bvh_writer.write(new_data, f)
def __init__(self, root_path='motion_data/', target_path='preprocessed_data/raw_position'): self.total_path_list = [] self.subject_names = os.listdir(root_path) for subject_num in self.subject_names: dir_path = os.path.join(root_path, subject_num) file_names = os.listdir(dir_path) for file_name in file_names: self.total_path_list.append(os.path.join(dir_path, file_name)) self.target_path = target_path self.parser = BVHParser() self.mp = MocapParameterizer('position') self.joints_to_draw = ('Hips', 'RightKnee', 'RightToe', 'Chest2', 'Head', 'LeftShoulder', 'lhand', 'RightShoulder', 'rhand', 'LeftKnee', 'LeftToe') self.df = None self.min_frame = 100
def extract_joint_angles(bvh_dir, files, dest_dir, pipeline_dir, fps): p = BVHParser() if not os.path.exists(pipeline_dir): raise Exception("Pipeline dir for the motion processing ", pipeline_dir, " does not exist! Change -pipe flag value.") data_all = list() for f in files: ff = os.path.join(bvh_dir, f + '.bvh') print(ff) data_all.append(p.parse(ff)) data_pipe = Pipeline([ ('dwnsampl', DownSampler(tgt_fps=fps, keep_all=False)), ('root', RootTransformer('hip_centric')), ('mir', Mirror(axis='X', append=True)), ('jtsel', JointSelector([ 'Spine', 'Spine1', 'Spine2', 'Spine3', 'Neck', 'Neck1', 'Head', 'RightShoulder', 'RightArm', 'RightForeArm', 'RightHand', 'LeftShoulder', 'LeftArm', 'LeftForeArm', 'LeftHand' ], include_root=True)), ('exp', MocapParameterizer('expmap')), ('cnst', ConstantsRemover()), ('np', Numpyfier()) ]) out_data = data_pipe.fit_transform(data_all) # the datapipe will append the mirrored files to the end assert len(out_data) == 2 * len(files) jl.dump(data_pipe, os.path.join(pipeline_dir + 'data_pipe.sav')) fi = 0 for f in files: ff = os.path.join(dest_dir, f) print(ff) np.savez(ff + ".npz", clips=out_data[fi]) np.savez(ff + "_mirrored.npz", clips=out_data[len(files) + fi]) fi = fi + 1
def extract_joint_angles(in_file, out_file, start_t, end_t, fps): p = BVHParser() print("Reading ...") data_all = list() data_all.append(p.parse(in_file)) print("Cropping ...") cropped_data = data_all[0] cropped_data.values = cropped_data.values if end_t == -1: cropped_data.values = cropped_data.values[start_t * fps:] else: cropped_data.values = cropped_data.values[start_t * fps:end_t * fps] print("Writing ...") writer = BVHWriter() with open(out_file, 'w') as f: writer.write(cropped_data, f)
def get_foot_heights(): Rfoot_height_list = [] Lfoot_height_list = [] parser = BVHParser() parsed_data = parser.parse('motion_data/69/69_01.bvh') mp = MocapParameterizer('position') positions = mp.fit_transform([parsed_data]) length = len(positions[0].values['RightToe_Yposition']) for i in range(length): Rfoot_height = positions[0].values['RightToe_Yposition'][i] Lfoot_height = positions[0].values['LeftToe_Yposition'][i] Rfoot_height_list.append(Rfoot_height) Lfoot_height_list.append(Lfoot_height) return Rfoot_height_list, Lfoot_height_list
def process_folder(src_dir: str, dst_dir: str, pipeline_dir: str, fps: int = 20): bvh_parser = BVHParser() data = [] bvh_names = listdir(src_dir) logging.info('Parsing BVH files...') for bvh_name in bvh_names: bvh_path = join(src_dir, bvh_name) logging.info(bvh_path) data.append(bvh_parser.parse(bvh_path)) # pipeline from https://github.com/GestureGeneration/Speech_driven_gesture_generation_with_autoencoder data_pipe = Pipeline([ ('dwnsampl', DownSampler(tgt_fps=fps, keep_all=False)), ('root', RootTransformer('hip_centric')), # ('mir', Mirror(axis='X', append=True)), ('jtsel', JointSelector( ['Spine', 'Spine1', 'Spine2', 'Spine3', 'Neck', 'Neck1', 'Head', 'RightShoulder', 'RightArm', 'RightForeArm', 'RightHand', 'LeftShoulder', 'LeftArm', 'LeftForeArm', 'LeftHand'], include_root=True)), ('exp', MocapParameterizer('expmap')), ('cnst', ConstantsRemover()), ('np', Numpyfier()) ]) logging.info('Transforming data...') out_data = data_pipe.fit_transform(data) if not exists(pipeline_dir): mkdir(pipeline_dir) jl.dump(data_pipe, join(pipeline_dir, 'data_pipe.sav')) logging.info('Saving result...') if not exists(dst_dir): mkdir(dst_dir) for i, bvh_name in enumerate(bvh_names): name, _ = splitext(bvh_name) logging.info(name) np.save(join(dst_dir, name + ".npy"), out_data[i])
def read_mocap_data(bvh_file): parser = BVHParser() return parser.parse(bvh_file)
def main(): parser = BVHParser() parsed_data = parser.parse('walk-02-01-cmu.bvh') # parsed_data = parser.parse('walk-01-normal-azumi.bvh') mp = MocapParameterizer('position') positions = mp.fit_transform([parsed_data]) body, joints, center_of_mass_joint_id = init_pybullet() compare_extremity_vectors(body, joints, center_of_mass_joint_id, positions[0]) # TODO write the cost function comparing the extremity vectors # and, to test it, use it to find the closest and furthest frames from the CMU positions[] data # vt.print_skel(parsed_data) # (See output below) # vt.draw_stickfigure(positions[0], frame=150) ax = draw_stickfigure3d(positions[0], frame=0, draw_names=True) # == This is a test to navigate the frames of the CMU recording # ax, fig = draw_stickfigure3d(positions[0], frame=g_frame) # plt.axis('equal') # ax.view_init(10, 40) # # plt.show() # def onclick(event): # global g_frame # global g_interval # global g_forward # # print('%s click: button=%d, x=%d, y=%d, xdata=%f, ydata=%f' % # # ('double' if event.dblclick else 'single', event.button, # # event.x, event.y, event.xdata, event.ydata)) # if event.button == 1: # g_frame += g_interval if g_forward else -g_interval # plt.cla() # draw_stickfigure3d(positions[0], frame=g_frame, ax=ax) # plt.draw() # elif event.button == 2: # g_forward = not g_forward # elif event.button == 3: # g_interval += 1 if g_forward else -1 # print('%s, %s, %r' % (g_frame, g_interval, g_forward)) # fig.canvas.mpl_connect('button_press_event', onclick) # plt.show() # Find feet ground contact points # signal = create_foot_contact_signal(positions[0], 'RightFoot_Yposition') # plt.figure(figsize=(12, 6)) # # plt.plot(signal, 'r') # plt.plot(positions[0].values['RightFoot_Yposition'].values, 'g') # # plot_foot_up_down( # # positions[0], 'RightFoot_Yposition', t=0.00002, min_dist=100) # print(get_foot_contact_idxs( # positions[0].values['RightFoot_Yposition'].values)) # print(positions[0].values['RightFoot_Yposition'].values[50:100].argmin()) # peaks = [75, 207] # plt.plot(peaks, # positions[0].values['RightFoot_Yposition'].values[peaks], 'ro') # plt.show() # This is how to get the position of a single joint: # Note that positions[0] is a pandas dataframe # joint_name = 'Hips' # frame = 150 # print(positions[0].values['%s_Xposition' % joint_name][frame]) ax = draw_stickfigure3d_mujoco(body, joints, center_of_mass_joint_id, draw_names=True, ax=ax) plt.axis('equal') # ax.view_init(30, 80) ax.view_init(0, 0) # Side view # ax.view_init(10, 40) # Best view imo plt.show()
# Jika USE_NORMALIZED_DF bernilai true, dataset jointvecfromhip akan berisi vektor tiap sendi relatif terhadap hips pada setiap framenya # Jika di visualisasikan sama saja seperti melakukan gerakan tapi posisi robot tetap diam di tempat # Jika USE_NORMALIZED_DF bernilai false, dataset berisi posisi absolut setiap sendi dalam koordinat global # Jika di visualisasikan maka akan terlihat berjalan / melompat, percis seperti di program blender USE_NORMALIZED_DF = False if __name__ == "__main__": env = HumanoidBulletEnv(robot=CustomHumanoidRobot()) for m in MOTION: sub = m["subject"] mot = m["motion_number"] start_frame = m["start_frame"] end_frame = m["end_frame"] print("Processing Motion {}_{}".format(sub, mot)) parser = BVHParser() parsed_data = parser.parse( "{}/{}/{}_{}.bvh".format(BASE_DATASET_PATH, sub, sub, mot) ) mp = MocapParameterizer("position") bvh_pos = mp.fit_transform([parsed_data])[0].values # Process vektor hips -> joint untuk setiap endpoint normalized_data = np.zeros((1, len(ENDPOINT_LIST) * 3)) for i in range(start_frame, end_frame): basePos = _getJointPos(bvh_pos, "Hips", i, JOINT_MULTIPLIER) tmp = [] for ep in ENDPOINT_LIST: if USE_NORMALIZED_DF: epPos = _getJointPos(bvh_pos, ep, i, JOINT_MULTIPLIER) tmp = np.hstack((tmp, epPos - basePos))