def __init__(self, args, datasets_groups, device=None): if device is None: device = torch.device(args.cuda_device if ( torch.cuda.is_available()) else 'cpu') self.final_data = [] self.length = 0 self.offsets = [] self.joint_topologies = [] self.ee_ids = [] self.means = [] self.vars = [] dataset_num = 0 total_length = 100000 all_datas = [] for datasets in datasets_groups: offsets_group = [] means_group = [] vars_group = [] dataset_num += len(datasets) tmp = [] for i, dataset in enumerate(datasets): new_args = copy.copy(args) new_args.data_augment = 0 new_args.dataset = dataset tmp.append(MotionData(new_args)) means_group.append(tmp[-1].mean) vars_group.append(tmp[-1].var) file = BVH_file(get_std_bvh(dataset=dataset)) if i == 0: self.joint_topologies.append(file.topology) self.ee_ids.append(file.get_ee_id()) new_offset = file.offset new_offset = torch.tensor(new_offset, dtype=torch.float) new_offset = new_offset.reshape((1, ) + new_offset.shape) offsets_group.append(new_offset) total_length = min(total_length, len(tmp[-1])) all_datas.append(tmp[0]) # tmp[0] means only support one ske offsets_group = torch.cat(offsets_group, dim=0) offsets_group = offsets_group.to(device) means_group = torch.cat(means_group, dim=0).to(device) vars_group = torch.cat(vars_group, dim=0).to(device) self.offsets.append(offsets_group) self.means.append(means_group) self.vars.append(vars_group) self.all_data = all_datas self.length = total_length
def collect_bvh(data_path, character, files): print('begin {}'.format(character)) motions = [] for i, motion in enumerate(files): if not os.path.exists(data_path + character + '/' + motion): continue file = BVH_file(data_path + character + '/' + motion) new_motion = file.to_tensor().permute((1, 0)).numpy() motions.append(new_motion) save_file = data_path + character + '.npy' np.save(save_file, motions) print('Npy file saved at {}'.format(save_file))
def get_item(self, gid, pid, id): character = self.characters[gid][pid] path = './datasets/Mixamo/{}/'.format(character) if isinstance(id, int): file = path + self.file_list[id] elif isinstance(id, str): file = id else: raise Exception('Wrong input file type') if not os.path.exists(file): raise Exception('Cannot find file') file = BVH_file(file) motion = file.to_tensor(quater=self.args.rotation == 'quaternion') motion = motion[:, ::2] length = motion.shape[-1] length = length // 4 * 4 return motion[..., :length].to(self.device)
def batch(char, suffix): input_path = './pretrained/results/bvh' all_err = [] ref_file = get_std_bvh(dataset=char) ref_file = BVH_file(ref_file) height = ref_file.get_height() test_num = 0 new_p = os.path.join(input_path, char) files = [ f for f in os.listdir(new_p) if f.endswith('_{}.bvh'.format(suffix)) and not f.endswith('_gt.bvh') and 'fix' not in f and not f.endswith('_input.bvh') ] for file in files: file_full = os.path.join(new_p, file) anim, names, _ = BVH.load(file_full) test_num += 1 index = [] for i, name in enumerate(names): if 'virtual' in name: continue index.append(i) file_ref = file_full[:-6] + '_gt.bvh' anim_ref, _, _ = BVH.load(file_ref) pos = Animation.positions_global(anim) # [T, J, 3] pos_ref = Animation.positions_global(anim_ref) pos = pos[:, index, :] pos_ref = pos_ref[:, index, :] err = (pos - pos_ref) * (pos - pos_ref) err /= height**2 err = np.mean(err) all_err.append(err) all_err = np.array(all_err) return all_err.mean()
def __init__(self, args, characters): self.characters = characters self.file_list = get_test_set() self.mean = [] self.joint_topologies = [] self.var = [] self.offsets = [] self.ee_ids = [] self.args = args self.device = torch.device(args.cuda_device) for i, character_group in enumerate(characters): mean_group = [] var_group = [] offsets_group = [] for j, character in enumerate(character_group): file = BVH_file(get_std_bvh(dataset=character)) if j == 0: self.joint_topologies.append(file.topology) self.ee_ids.append(file.get_ee_id()) new_offset = file.offset new_offset = torch.tensor(new_offset, dtype=torch.float) new_offset = new_offset.reshape((1, ) + new_offset.shape) offsets_group.append(new_offset) mean = np.load( './datasets/Mixamo/mean_var/{}_mean.npy'.format(character)) var = np.load( './datasets/Mixamo/mean_var/{}_var.npy'.format(character)) mean = torch.tensor(mean) mean = mean.reshape((1, ) + mean.shape) var = torch.tensor(var) var = var.reshape((1, ) + var.shape) mean_group.append(mean) var_group.append(var) mean_group = torch.cat(mean_group, dim=0).to(self.device) var_group = torch.cat(var_group, dim=0).to(self.device) offsets_group = torch.cat(offsets_group, dim=0).to(self.device) self.mean.append(mean_group) self.var.append(var_group) self.offsets.append(offsets_group)
def __init__(self, args, joint_topology, origin_offsets: torch.Tensor, device, characters): self.args = args self.joint_topology = joint_topology self.edges = build_edge_topology(joint_topology, torch.zeros((len(joint_topology), 3))) self.fk = ForwardKinematics(args, self.edges) self.height = [] # for normalize ee_loss self.real_height = [] for char in characters: if args.use_sep_ee: h = BVH_file(get_std_bvh(dataset=char)).get_ee_length() else: h = BVH_file(get_std_bvh(dataset=char)).get_height() if args.ee_loss_fact == 'learn': h = torch.tensor(h, dtype=torch.float) else: h = torch.tensor(h, dtype=torch.float, requires_grad=False) self.real_height.append( BVH_file(get_std_bvh(dataset=char)).get_height()) self.height.append(h.unsqueeze(0)) self.real_height = torch.tensor(self.real_height, device=device) self.height = torch.cat(self.height, dim=0) self.height = self.height.to(device) if not args.use_sep_ee: self.height.unsqueeze_(-1) if args.ee_loss_fact == 'learn': self.height_para = [self.height] else: self.height_para = [] if not args.simple_operator: self.auto_encoder = AE(args, topology=self.edges).to(device) self.discriminator = Discriminator(args, self.edges).to(device) self.static_encoder = StaticEncoder(args, self.edges).to(device) else: raise Exception('Conventional operator not yet implemented')
def __init__(self, args, character_names, dataset): super(GAN_model, self).__init__(args) self.character_names = character_names self.dataset = dataset self.n_topology = len(character_names) self.models = [] self.D_para = [] self.G_para = [] self.args = args for i in range(self.n_topology): model = IntegratedModel(args, dataset.joint_topologies[i], None, self.device, character_names[i]) self.models.append(model) self.D_para += model.D_parameters() self.G_para += model.G_parameters() if self.is_train: self.fake_pools = [] self.optimizerD = optim.Adam(self.D_para, args.learning_rate, betas=(0.9, 0.999)) self.optimizerG = optim.Adam(self.G_para, args.learning_rate, betas=(0.9, 0.999)) self.optimizers = [self.optimizerD, self.optimizerG] self.criterion_rec = torch.nn.MSELoss() self.criterion_gan = GAN_loss(args.gan_mode).to(self.device) self.criterion_cycle = torch.nn.L1Loss() self.criterion_ee = Criterion_EE(args, torch.nn.MSELoss()) for i in range(self.n_topology): self.fake_pools.append(ImagePool(args.pool_size)) else: import option_parser self.err_crit = [] for i in range(self.n_topology): self.err_crit.append( Eval_Criterion(dataset.joint_topologies[i])) self.id_test = 0 self.bvh_path = os.path.join(args.save_dir, 'results/bvh') option_parser.try_mkdir(self.bvh_path) self.writer = [] for i in range(self.n_topology): writer_group = [] for _, char in enumerate(character_names[i]): from datasets.bvh_writer import BVH_writer from datasets.bvh_parser import BVH_file import option_parser file = BVH_file(option_parser.get_std_bvh(dataset=char)) writer_group.append(BVH_writer(file.edges, file.names)) self.writer.append(writer_group)
def batch_split(source, dest): files = [f for f in os.listdir(source) if f.endswith('.bvh')] try: bvh_file = BVH_file(os.path.join(source, files[0])) if bvh_file.skeleton_type != 1: return except: return print("Working on {}".format(os.path.split(source)[-1])) try_mkdir(dest) files = [f for f in os.listdir(source) if f.endswith('.bvh')] for i, file in tqdm(enumerate(files), total=len(files)): in_file = os.path.join(source, file) out_file = os.path.join(dest, file) split_joint(in_file, out_file)
def __init__(self, args, datasets_groups): device = torch.device(args.cuda_device if ( torch.cuda.is_available()) else 'cpu') self.final_data = [] self.length = 0 self.offsets = [] self.joint_topologies = [] self.ee_ids = [] self.means = [] self.vars = [] dataset_num = 0 seed = 19260817 total_length = 10000000 all_datas = [] for datasets in datasets_groups: offsets_group = [] means_group = [] vars_group = [] dataset_num += len(datasets) tmp = [] for i, dataset in enumerate(datasets): new_args = copy.copy(args) new_args.data_augment = 0 new_args.dataset = dataset tmp.append(MotionData(new_args)) means_group.append(tmp[-1].mean) vars_group.append(tmp[-1].var) file = BVH_file(get_std_bvh(dataset=dataset)) if i == 0: self.joint_topologies.append(file.topology) self.ee_ids.append(file.get_ee_id()) new_offset = file.offset new_offset = torch.tensor(new_offset, dtype=torch.float) new_offset = new_offset.reshape((1, ) + new_offset.shape) offsets_group.append(new_offset) total_length = min(total_length, len(tmp[-1])) all_datas.append(tmp) offsets_group = torch.cat(offsets_group, dim=0) offsets_group = offsets_group.to(device) means_group = torch.cat(means_group, dim=0).to(device) vars_group = torch.cat(vars_group, dim=0).to(device) self.offsets.append(offsets_group) self.means.append(means_group) self.vars.append(vars_group) length_per_skeleton = total_length // dataset_num for datasets in all_datas: pt = 0 motions = [] skeleton_idx = [] for dataset in datasets: motions.append(dataset[pt * length_per_skeleton:(pt + 1) * length_per_skeleton]) skeleton_idx += [pt] * length_per_skeleton pt += 1 motions = torch.cat(motions, dim=0) if self.length != 0 and self.length != len(skeleton_idx): raise Exception( 'Not equal dataset size for different topologies') self.length = len(skeleton_idx) self.final_data.append(MixedData0(args, motions, skeleton_idx))
def __init__(self, args, datasets_groups): device = torch.device(args.cuda_device if ( torch.cuda.is_available()) else 'cpu') self.final_data = [] self.length = 0 self.offsets = [] self.joint_topologies = [] self.ee_ids = [] self.means = [] self.vars = [] dataset_num = 0 seed = 19260817 total_length = 10000000 all_datas = [] for datasets in datasets_groups: offsets_group = [] means_group = [] vars_group = [] dataset_num += len(datasets) tmp = [] for i, dataset in enumerate(datasets): new_args = copy.copy(args) new_args.data_augment = 0 new_args.dataset = dataset tmp.append(MotionData(new_args)) mean = np.load( './datasets/Mixamo/mean_var/{}_mean.npy'.format(dataset)) var = np.load( './datasets/Mixamo/mean_var/{}_var.npy'.format(dataset)) mean = torch.tensor(mean) mean = mean.reshape((1, ) + mean.shape) var = torch.tensor(var) var = var.reshape((1, ) + var.shape) means_group.append(mean) vars_group.append(var) file = BVH_file(get_std_bvh(dataset=dataset)) if i == 0: self.joint_topologies.append(file.topology) self.ee_ids.append(file.get_ee_id()) new_offset = file.offset new_offset = torch.tensor(new_offset, dtype=torch.float) new_offset = new_offset.reshape((1, ) + new_offset.shape) offsets_group.append(new_offset) total_length = min(total_length, len(tmp[-1])) all_datas.append(tmp) offsets_group = torch.cat(offsets_group, dim=0) offsets_group = offsets_group.to(device) means_group = torch.cat(means_group, dim=0).to(device) vars_group = torch.cat(vars_group, dim=0).to(device) self.offsets.append(offsets_group) self.means.append(means_group) self.vars.append(vars_group) for datasets in all_datas: pt = 0 motions = [] skeleton_idx = [] for dataset in datasets: motions.append(dataset[:]) skeleton_idx += [pt] * len(dataset) pt += 1 motions = torch.cat(motions, dim=0) if self.length != 0 and self.length != len(skeleton_idx): self.length = min(self.length, len(skeleton_idx)) else: self.length = len(skeleton_idx) self.final_data.append(MixedData0(args, motions, skeleton_idx))
def copy_ref_file(src, dst): file = BVH_file(src) writer = BVH_writer(file.edges, file.names) writer.write_raw(file.to_tensor(quater=True)[..., ::2], 'quaternion', dst)
def get_height(file): file = BVH_file(file) return file.get_height()
def get_character_height(file_name): file = BVH_file(file_name) return file.get_height()