def __init__(self, args, joint_topology, origin_offsets: torch.Tensor,
                 device, characters):
        self.args = args
        self.joint_topology = joint_topology
        self.edges = build_edge_topology(joint_topology,
                                         torch.zeros((len(joint_topology), 3)))
        self.fk = ForwardKinematics(args, self.edges)

        self.height = []  # for normalize ee_loss
        self.real_height = []
        for char in characters:
            if args.use_sep_ee:
                h = BVH_file(get_std_bvh(dataset=char)).get_ee_length()
            else:
                h = BVH_file(get_std_bvh(dataset=char)).get_height()
            if args.ee_loss_fact == 'learn':
                h = torch.tensor(h, dtype=torch.float)
            else:
                h = torch.tensor(h, dtype=torch.float, requires_grad=False)
            self.real_height.append(
                BVH_file(get_std_bvh(dataset=char)).get_height())
            self.height.append(h.unsqueeze(0))
        self.real_height = torch.tensor(self.real_height, device=device)
        self.height = torch.cat(self.height, dim=0)
        self.height = self.height.to(device)
        if not args.use_sep_ee: self.height.unsqueeze_(-1)
        if args.ee_loss_fact == 'learn': self.height_para = [self.height]
        else: self.height_para = []

        if not args.simple_operator:
            self.auto_encoder = AE(args, topology=self.edges).to(device)
            self.discriminator = Discriminator(args, self.edges).to(device)
            self.static_encoder = StaticEncoder(args, self.edges).to(device)
        else:
            raise Exception('Conventional operator not yet implemented')
    def __init__(self, args, character_names, dataset):
        super(GAN_model, self).__init__(args)
        self.character_names = character_names
        self.dataset = dataset
        self.n_topology = len(character_names)
        self.models = []
        self.D_para = []
        self.G_para = []
        self.args = args

        for i in range(self.n_topology):
            model = IntegratedModel(args, dataset.joint_topologies[i], None,
                                    self.device, character_names[i])
            self.models.append(model)
            self.D_para += model.D_parameters()
            self.G_para += model.G_parameters()

        if self.is_train:
            self.fake_pools = []
            self.optimizerD = optim.Adam(self.D_para,
                                         args.learning_rate,
                                         betas=(0.9, 0.999))
            self.optimizerG = optim.Adam(self.G_para,
                                         args.learning_rate,
                                         betas=(0.9, 0.999))
            self.optimizers = [self.optimizerD, self.optimizerG]
            self.criterion_rec = torch.nn.MSELoss()
            self.criterion_gan = GAN_loss(args.gan_mode).to(self.device)
            self.criterion_cycle = torch.nn.L1Loss()
            self.criterion_ee = Criterion_EE(args, torch.nn.MSELoss())
            for i in range(self.n_topology):
                self.fake_pools.append(ImagePool(args.pool_size))
        else:
            import option_parser
            self.err_crit = []
            for i in range(self.n_topology):
                self.err_crit.append(
                    Eval_Criterion(dataset.joint_topologies[i]))
            self.id_test = 0
            self.bvh_path = os.path.join(args.save_dir, 'results/bvh')
            option_parser.try_mkdir(self.bvh_path)

            self.writer = []
            for i in range(self.n_topology):
                writer_group = []
                for _, char in enumerate(character_names[i]):
                    from datasets.bvh_writer import BVH_writer
                    from datasets.bvh_parser import BVH_file
                    import option_parser
                    file = BVH_file(option_parser.get_std_bvh(dataset=char))
                    writer_group.append(BVH_writer(file.edges, file.names))
                self.writer.append(writer_group)
    def __init__(self, args, datasets_groups, device=None):
        if device is None:
            device = torch.device(args.cuda_device if (
                torch.cuda.is_available()) else 'cpu')
        self.final_data = []
        self.length = 0
        self.offsets = []
        self.joint_topologies = []
        self.ee_ids = []
        self.means = []
        self.vars = []
        dataset_num = 0
        total_length = 100000
        all_datas = []
        for datasets in datasets_groups:
            offsets_group = []
            means_group = []
            vars_group = []
            dataset_num += len(datasets)
            tmp = []
            for i, dataset in enumerate(datasets):
                new_args = copy.copy(args)
                new_args.data_augment = 0
                new_args.dataset = dataset

                tmp.append(MotionData(new_args))

                means_group.append(tmp[-1].mean)
                vars_group.append(tmp[-1].var)

                file = BVH_file(get_std_bvh(dataset=dataset))
                if i == 0:
                    self.joint_topologies.append(file.topology)
                    self.ee_ids.append(file.get_ee_id())
                new_offset = file.offset
                new_offset = torch.tensor(new_offset, dtype=torch.float)
                new_offset = new_offset.reshape((1, ) + new_offset.shape)
                offsets_group.append(new_offset)

                total_length = min(total_length, len(tmp[-1]))
            all_datas.append(tmp[0])  # tmp[0] means only support one ske
            offsets_group = torch.cat(offsets_group, dim=0)
            offsets_group = offsets_group.to(device)
            means_group = torch.cat(means_group, dim=0).to(device)
            vars_group = torch.cat(vars_group, dim=0).to(device)
            self.offsets.append(offsets_group)
            self.means.append(means_group)
            self.vars.append(vars_group)

        self.all_data = all_datas
        self.length = total_length
Beispiel #4
0
def batch(char, suffix):
    input_path = './pretrained/results/bvh'

    all_err = []
    ref_file = get_std_bvh(dataset=char)
    ref_file = BVH_file(ref_file)
    height = ref_file.get_height()

    test_num = 0

    new_p = os.path.join(input_path, char)

    files = [
        f for f in os.listdir(new_p)
        if f.endswith('_{}.bvh'.format(suffix)) and not f.endswith('_gt.bvh')
        and 'fix' not in f and not f.endswith('_input.bvh')
    ]

    for file in files:
        file_full = os.path.join(new_p, file)
        anim, names, _ = BVH.load(file_full)
        test_num += 1
        index = []
        for i, name in enumerate(names):
            if 'virtual' in name:
                continue
            index.append(i)

        file_ref = file_full[:-6] + '_gt.bvh'
        anim_ref, _, _ = BVH.load(file_ref)

        pos = Animation.positions_global(anim)  # [T, J, 3]
        pos_ref = Animation.positions_global(anim_ref)

        pos = pos[:, index, :]
        pos_ref = pos_ref[:, index, :]

        err = (pos - pos_ref) * (pos - pos_ref)
        err /= height**2
        err = np.mean(err)
        all_err.append(err)

    all_err = np.array(all_err)
    return all_err.mean()
Beispiel #5
0
    def __init__(self, args):
        super(MotionData, self).__init__()
        name = args.dataset
        file_path = './datasets/Mixamo/{}.npy'.format(name)

        if args.debug:
            file_path = file_path[:-4] + '_debug' + file_path[-4:]

        print('load from file {}'.format(file_path))
        self.total_frame = 0
        self.std_bvh = get_std_bvh(args)
        self.args = args
        self.data = []
        self.motion_length = []
        motions = np.load(file_path, allow_pickle=True)
        motions = list(motions)
        new_windows = self.get_windows(motions)
        self.data.append(new_windows)
        self.data = torch.cat(self.data)
        self.data = self.data.permute(0, 2, 1)

        if args.normalization == 1:
            self.mean = torch.mean(self.data, (0, 2), keepdim=True)
            self.var = torch.var(self.data, (0, 2), keepdim=True)
            self.var = self.var**(1 / 2)
            idx = self.var < 1e-5
            self.var[idx] = 1
            self.data = (self.data - self.mean) / self.var
        else:
            self.mean = torch.mean(self.data, (0, 2), keepdim=True)
            self.mean.zero_()
            self.var = torch.ones_like(self.mean)

        train_len = self.data.shape[0] * 95 // 100
        self.test_set = self.data[train_len:, ...]
        self.data = self.data[:train_len, ...]
        self.data_reverse = torch.tensor(self.data.numpy()[..., ::-1].copy())

        self.reset_length_flag = 0
        self.virtual_length = 0
        print(
            'Window count: {}, total frame (without downsampling): {}'.format(
                len(self), self.total_frame))
    def __init__(self, args, characters):
        self.characters = characters
        self.file_list = get_test_set()
        self.mean = []
        self.joint_topologies = []
        self.var = []
        self.offsets = []
        self.ee_ids = []
        self.args = args
        self.device = torch.device(args.cuda_device)

        for i, character_group in enumerate(characters):
            mean_group = []
            var_group = []
            offsets_group = []
            for j, character in enumerate(character_group):
                file = BVH_file(get_std_bvh(dataset=character))
                if j == 0:
                    self.joint_topologies.append(file.topology)
                    self.ee_ids.append(file.get_ee_id())
                new_offset = file.offset
                new_offset = torch.tensor(new_offset, dtype=torch.float)
                new_offset = new_offset.reshape((1, ) + new_offset.shape)
                offsets_group.append(new_offset)
                mean = np.load(
                    './datasets/Mixamo/mean_var/{}_mean.npy'.format(character))
                var = np.load(
                    './datasets/Mixamo/mean_var/{}_var.npy'.format(character))
                mean = torch.tensor(mean)
                mean = mean.reshape((1, ) + mean.shape)
                var = torch.tensor(var)
                var = var.reshape((1, ) + var.shape)
                mean_group.append(mean)
                var_group.append(var)

            mean_group = torch.cat(mean_group, dim=0).to(self.device)
            var_group = torch.cat(var_group, dim=0).to(self.device)
            offsets_group = torch.cat(offsets_group, dim=0).to(self.device)
            self.mean.append(mean_group)
            self.var.append(var_group)
            self.offsets.append(offsets_group)
Beispiel #7
0
    def __init__(self, file_path=None, args=None, dataset=None, new_root=None):
        if file_path is None:
            file_path = get_std_bvh(dataset=dataset)
        self.anim, self._names, self.frametime = BVH.load(file_path)
        if new_root is not None:
            self.set_new_root(new_root)
        self.skeleton_type = -1
        self.edges = []
        self.edge_mat = []
        self.edge_num = 0
        self._topology = None
        self.ee_length = []

        for i, name in enumerate(self._names):
            if ':' in name:
                name = name[name.find(':') + 1:]
                self._names[i] = name

        full_fill = [1] * len(corps_names)
        for i, ref_names in enumerate(corps_names):
            for ref_name in ref_names:
                if ref_name not in self._names:
                    full_fill[i] = 0
                    break

        if full_fill[3]:
            self.skeleton_type = 3
        else:
            for i, _ in enumerate(full_fill):
                if full_fill[i]:
                    self.skeleton_type = i
                    break

        if self.skeleton_type == 2 and full_fill[4]:
            self.skeleton_type = 4

        if 'Neck1' in self._names:
            self.skeleton_type = 5
        if 'Left_End' in self._names:
            self.skeleton_type = 6
        if 'Three_Arms_Hips' in self._names:
            self.skeleton_type = 7
        if 'Three_Arms_Hips_split' in self._names:
            self.skeleton_type = 8

        if 'LHipJoint' in self._names:
            self.skeleton_type = 3

        if 'HipsPrisoner' in self._names:
            self.skeleton_type = 9

        if 'Spine1_split' in self._names:
            self.skeleton_type = 10
        """
        4. 
        Here, you need to assign self.skeleton_type the corresponding index of your own dataset in corps_names or ee_names list.
        You can use self._names, which contains the joints name in original bvh file, to write your own if statement.
        """
        # if ...:
        #     self.skeleton_type = 11

        if self.skeleton_type == -1:
            print(self._names)
            raise Exception('Unknown skeleton')

        if self.skeleton_type == 0:
            self.set_new_root(1)

        self.details = []
        for i, name in enumerate(self._names):
            if ':' in name: name = name[name.find(':') + 1:]
            if name not in corps_names[self.skeleton_type]:
                self.details.append(i)
        self.joint_num = self.anim.shape[1]
        self.corps = []
        self.simplified_name = []
        self.simplify_map = {}
        self.inverse_simplify_map = {}

        for name in corps_names[self.skeleton_type]:
            for j in range(self.anim.shape[1]):
                if name == self._names[j]:
                    self.corps.append(j)
                    break

        if len(self.corps) != len(corps_names[self.skeleton_type]):
            for i in self.corps:
                print(self._names[i], end=' ')
            print(self.corps, self.skeleton_type, len(self.corps), sep='\n')
            raise Exception('Problem in file', file_path)

        self.ee_id = []
        for i in ee_names[self.skeleton_type]:
            self.ee_id.append(corps_names[self.skeleton_type].index(i))

        self.joint_num_simplify = len(self.corps)
        for i, j in enumerate(self.corps):
            self.simplify_map[j] = i
            self.inverse_simplify_map[i] = j
            self.simplified_name.append(self._names[j])
        self.inverse_simplify_map[0] = -1
        for i in range(self.anim.shape[1]):
            if i in self.details:
                self.simplify_map[i] = -1

        self.edges = build_edge_topology(self.topology, self.offset)
    def __init__(self, args, datasets_groups):
        device = torch.device(args.cuda_device if (
            torch.cuda.is_available()) else 'cpu')
        self.final_data = []
        self.length = 0
        self.offsets = []
        self.joint_topologies = []
        self.ee_ids = []
        self.means = []
        self.vars = []
        dataset_num = 0
        seed = 19260817
        total_length = 10000000
        all_datas = []
        for datasets in datasets_groups:
            offsets_group = []
            means_group = []
            vars_group = []
            dataset_num += len(datasets)
            tmp = []
            for i, dataset in enumerate(datasets):
                new_args = copy.copy(args)
                new_args.data_augment = 0
                new_args.dataset = dataset

                tmp.append(MotionData(new_args))

                means_group.append(tmp[-1].mean)
                vars_group.append(tmp[-1].var)

                file = BVH_file(get_std_bvh(dataset=dataset))
                if i == 0:
                    self.joint_topologies.append(file.topology)
                    self.ee_ids.append(file.get_ee_id())
                new_offset = file.offset
                new_offset = torch.tensor(new_offset, dtype=torch.float)
                new_offset = new_offset.reshape((1, ) + new_offset.shape)
                offsets_group.append(new_offset)

                total_length = min(total_length, len(tmp[-1]))
            all_datas.append(tmp)
            offsets_group = torch.cat(offsets_group, dim=0)
            offsets_group = offsets_group.to(device)
            means_group = torch.cat(means_group, dim=0).to(device)
            vars_group = torch.cat(vars_group, dim=0).to(device)
            self.offsets.append(offsets_group)
            self.means.append(means_group)
            self.vars.append(vars_group)

        length_per_skeleton = total_length // dataset_num

        for datasets in all_datas:
            pt = 0
            motions = []
            skeleton_idx = []
            for dataset in datasets:
                motions.append(dataset[pt * length_per_skeleton:(pt + 1) *
                                       length_per_skeleton])
                skeleton_idx += [pt] * length_per_skeleton
                pt += 1
            motions = torch.cat(motions, dim=0)
            if self.length != 0 and self.length != len(skeleton_idx):
                raise Exception(
                    'Not equal dataset size for different topologies')
            self.length = len(skeleton_idx)
            self.final_data.append(MixedData0(args, motions, skeleton_idx))
Beispiel #9
0
    def __init__(self, args, datasets_groups):
        device = torch.device(args.cuda_device if (
            torch.cuda.is_available()) else 'cpu')
        self.final_data = []
        self.length = 0
        self.offsets = []
        self.joint_topologies = []
        self.ee_ids = []
        self.means = []
        self.vars = []
        dataset_num = 0
        seed = 19260817
        total_length = 10000000
        all_datas = []
        for datasets in datasets_groups:
            offsets_group = []
            means_group = []
            vars_group = []
            dataset_num += len(datasets)
            tmp = []
            for i, dataset in enumerate(datasets):
                new_args = copy.copy(args)
                new_args.data_augment = 0
                new_args.dataset = dataset

                tmp.append(MotionData(new_args))

                mean = np.load(
                    './datasets/Mixamo/mean_var/{}_mean.npy'.format(dataset))
                var = np.load(
                    './datasets/Mixamo/mean_var/{}_var.npy'.format(dataset))
                mean = torch.tensor(mean)
                mean = mean.reshape((1, ) + mean.shape)
                var = torch.tensor(var)
                var = var.reshape((1, ) + var.shape)

                means_group.append(mean)
                vars_group.append(var)

                file = BVH_file(get_std_bvh(dataset=dataset))
                if i == 0:
                    self.joint_topologies.append(file.topology)
                    self.ee_ids.append(file.get_ee_id())
                new_offset = file.offset
                new_offset = torch.tensor(new_offset, dtype=torch.float)
                new_offset = new_offset.reshape((1, ) + new_offset.shape)
                offsets_group.append(new_offset)

                total_length = min(total_length, len(tmp[-1]))
            all_datas.append(tmp)
            offsets_group = torch.cat(offsets_group, dim=0)
            offsets_group = offsets_group.to(device)
            means_group = torch.cat(means_group, dim=0).to(device)
            vars_group = torch.cat(vars_group, dim=0).to(device)
            self.offsets.append(offsets_group)
            self.means.append(means_group)
            self.vars.append(vars_group)

        for datasets in all_datas:
            pt = 0
            motions = []
            skeleton_idx = []
            for dataset in datasets:
                motions.append(dataset[:])
                skeleton_idx += [pt] * len(dataset)
                pt += 1
            motions = torch.cat(motions, dim=0)
            if self.length != 0 and self.length != len(skeleton_idx):
                self.length = min(self.length, len(skeleton_idx))
            else:
                self.length = len(skeleton_idx)
            self.final_data.append(MixedData0(args, motions, skeleton_idx))