def main(args): from utils.animation_data import AnimationData from utils.animation_2d_data import AnimationData2D data = load_output(args.file) total = len(data["trans"]) content, style, foot_contact, trans, recon = data["content"], data["style"], data["foot_contact"], data["trans"], data["recon"] content_meta, style_meta = data["content_meta"], data["style_meta"] selected = list(range(total)) print(total) # for test, selected = [6, 12, 7, 11, 4] for i in selected: if style_meta[i] == 0: style_meta[i] = {"style": [str(i)]} if content_meta[i] == 0: content_meta[i] = {"style": [str(i)]} vis_dict = {} cur_foot_contact = foot_contact[i].transpose(1, 0) if style[i].shape[0] == content[i].shape[0]: # 3d cur_style = AnimationData.from_network_output(to_float(style[i])).get_global_positions() else: # 2d cur_style = AnimationData2D.from_style2d(to_float(style[i])).get_projection() raws = [trans[i], recon[i], content[i]] cur_trans, cur_recon, cur_content = [AnimationData.from_network_output(to_float(raw)).get_global_positions() for raw in raws] vis_dict[" ".join(("style", style_meta[i]["style"][0]))] = {"motion": cur_style, "foot_contact": None} vis_dict["trans"] = {"motion": cur_trans, "foot_contact": cur_foot_contact} vis_dict["recon"] = {"motion": cur_recon, "foot_contact": cur_foot_contact} vis_dict[" ".join(("content", content_meta[i]["style"][0]))] = {"motion": cur_content, "foot_contact": cur_foot_contact} visualize(vis_dict)
def nrot2anim(nrot): anim = AnimationData.from_network_output(nrot) bvh, names, ftime = anim.get_BVH() anim = AnimationData.from_rotations_and_root_positions( np.array(bvh.rotations), bvh.positions[:, 0, :]) glb = anim.get_global_positions(trim=False) return (bvh, names, ftime), glb
def process_single_bvh(filename, config, norm_data_dir=None, downsample=4, skel=None, to_batch=False, panda=False): def to_tensor(x): return torch.tensor(x).float().to(config.device) # TODO: CHANGED trim_scale was 4 and downsample was default 4 if panda: anim = AnimationData.from_BVH(filename, downsample=1, skel=skel, trim_scale=None, mode="panda") else: anim = AnimationData.from_BVH(filename, downsample=1, skel=skel, trim_scale=None) # foot_contact = anim.get_foot_contact(transpose=True) # [4, T] content = to_tensor(anim.get_content_input()) style3d = to_tensor(anim.get_style3d_input()) data = { "meta": { "style": "test", "content": filename.split('/')[-1] }, # "foot_contact": to_tensor(foot_contact), "contentraw": content, "style3draw": style3d } if norm_data_dir is None: norm_data_dir = config.extra_data_dir for key, raw in zip(["content", "style3d"], [content, style3d]): norm_path = os.path.join(norm_data_dir, f'train_{key}.npz') norm = np.load(norm_path, allow_pickle=True) data[key] = normalize_motion(raw, to_tensor(norm['mean']).unsqueeze(-1), to_tensor(norm['std']).unsqueeze(-1), panda=panda) if key == "style3d": data[key] = to_tensor(np.nan_to_num(data[key])) if to_batch: data = single_to_batch(data) return data
def __init__(self, config, subset_name, data_path=None, extra_data_dir=None): super(MotionNorm, self).__init__() np.random.seed(2020) self.skel = Skel() # TD: add config if data_path is None: data_path = config.data_path dataset = np.load(data_path, allow_pickle=True)[subset_name].item() motions, labels, metas = dataset["motion"], dataset["style"], dataset["meta"] self.label_i = labels self.len = len(self.label_i) self.metas = [{key: metas[key][i] for key in metas.keys()} for i in range(self.len)] self.motion_i, self.foot_i = [], [] content, style3d, style2d = [], [], [] self.labels = [] self.data_dict = {} self.diff_labels_dict = {} for i, motion in enumerate(motions): label = labels[i] anim = AnimationData(motion, skel=self.skel) if label not in self.labels: self.labels.append(label) self.data_dict[label] = [] self.data_dict[label].append(i) self.motion_i.append(anim) self.foot_i.append(anim.get_foot_contact(transpose=True)) # [4, T] content.append(anim.get_content_input()) style3d.append(anim.get_style3d_input()) view_angles, scales = [], [] for v in range(10): view_angles.append(self.random_view_angle()) scales.append(self.random_scale()) style2d.append(anim.get_projections(view_angles, scales)) # calc diff labels for x in self.labels: self.diff_labels_dict[x] = [y for y in self.labels if y != x] if extra_data_dir is None: extra_data_dir = config.extra_data_dir norm_cfg = config.dataset_norm_config norm_data = [] for key, raw in zip(["content", "style3d", "style2d"], [content, style3d, style2d]): prefix = norm_cfg[subset_name][key] pre_computed = prefix is not None if prefix is None: prefix = subset_name norm_data.append(NormData(prefix + "_" + key, pre_computed, raw, config, extra_data_dir, keep_raw=(key != "style2d"))) self.content, self.style3d, self.style2d = norm_data self.device = config.device self.rand = random.SystemRandom()
def bvh_to_motion_and_phase(filename, downsample, skel): anim = AnimationData.from_BVH(filename, downsample=downsample, skel=skel) full = anim.get_full() # [T, xxx] phases = anim.get_phases() # [T, 1] return np.concatenate((full, phases), axis=-1)
def __init__(self, config, subset_name, data_path=None, extra_data_dir=None, panda=False): super(MotionNorm, self).__init__() np.random.seed(2020) self.skel = Skel() # TD: add config if panda: self.skel = PandaSkel() if data_path is None: data_path = config.data_path dataset = np.load(data_path, allow_pickle=True)[subset_name].item() ''' motions: arrays of T x 132 labels: array of integer values to denote the 'style' of the motion metas: - style: array of string labels e.g. 'angry', 'childlike' - content: array of string labels e.g. 'walk' - phase: array of floats ''' motions, labels, metas = dataset["motion"], dataset["style"], dataset[ "meta"] self.label_i = labels self.len = len(self.label_i) self.metas = [{key: metas[key][i] for key in metas.keys()} for i in range(self.len)] self.motion_i = [] # self.foot_i = [] content, style3d, style2d = [], [], [] self.labels = [] '''data_dict contains mapping of style label to indices belonging to this label''' self.data_dict = {} self.diff_labels_dict = {} for i, motion in enumerate(motions): label = labels[i] anim = AnimationData(motion, skel=self.skel, panda=panda) if label not in self.labels: self.labels.append(label) self.data_dict[label] = [] self.data_dict[label].append(i) self.motion_i.append(anim) # self.foot_i.append(anim.get_foot_contact(transpose=True)) # [4, T] content.append(anim.get_content_input()) style3d.append(anim.get_style3d_input()) # TODO: FIGURE OUT HOW TO CALCULATE ROOT ROTATION THEN CAN DO STYLE_2D # view_angles, scales = [], [] # for v in range(10): # view_angles.append(self.random_view_angle()) # scales.append(self.random_scale()) # style2d.append(anim.get_projections(view_angles, scales)) # calc diff labels for x in self.labels: self.diff_labels_dict[x] = [y for y in self.labels if y != x] if extra_data_dir is None: extra_data_dir = config.extra_data_dir norm_cfg = config.dataset_norm_config norm_data = [] # for key, raw in zip(["content", "style3d", "style2d"], [content, style3d, style2d]): # prefix = norm_cfg[subset_name][key] # pre_computed = prefix is not None # if prefix is None: # prefix = subset_name # norm_data.append(NormData(prefix + "_" + key, pre_computed, raw, # config, extra_data_dir, keep_raw=(key != "style2d"))) # self.content, self.style3d, self.style2d = norm_data for key, raw in zip(["content", "style3d"], [content, style3d]): prefix = norm_cfg[subset_name][key] pre_computed = prefix is not None if prefix is None: prefix = subset_name norm_data.append( NormData(prefix + "_" + key, pre_computed, raw, config, extra_data_dir, keep_raw=(key != "style2d"))) self.content, self.style3d = norm_data self.device = config.device self.rand = random.SystemRandom()
def save_bvh_from_network_output(nrot, output_path): anim = AnimationData.from_network_output(nrot) bvh, names, ftime = anim.get_BVH() if not os.path.exists(os.path.dirname(output_path)): os.makedirs(os.path.dirname(output_path)) BVH.save(output_path, bvh, names, ftime)