Exemplo n.º 1
0
def gen_meanpose(phase, config, n_samp=20000):

    data_dir = config.train_dir if phase == "train" else config.test_dir
    all_paths = glob.glob(os.path.join(data_dir, '*/*/motions/*.npy'))
    random.shuffle(all_paths)
    all_paths = all_paths[:n_samp]
    all_joints = []

    print("computing meanpose and stdpose")

    for path in tqdm(all_paths):
        try:
            motion = np.load(path)
        except:
            continue
        if motion.shape[1] == 3:
            basis = None
            if sum(config.rotation_axes) > 0:
                x_angles = view_angles if config.rotation_axes[
                    0] else np.array([0])
                z_angles = view_angles if config.rotation_axes[
                    1] else np.array([0])
                y_angles = view_angles if config.rotation_axes[
                    2] else np.array([0])
                x_angles, z_angles, y_angles = np.meshgrid(
                    x_angles, z_angles, y_angles)
                angles = np.stack([
                    x_angles.flatten(),
                    z_angles.flatten(),
                    y_angles.flatten()
                ],
                                  axis=1)
                i = np.random.choice(len(angles))
                basis = get_change_of_basis(motion, angles[i])
                motion = preprocess_mixamo(motion)
                motion = rotate_motion_3d(motion, basis)
                motion = localize_motion(motion)
                all_joints.append(motion)
            else:
                motion = preprocess_mixamo(motion)
                motion = rotate_motion_3d(motion, basis)
                motion = localize_motion(motion)
                all_joints.append(motion)
        else:
            motion = motion * 128
            motion_proj = localize_motion(motion)
            all_joints.append(motion_proj)

    all_joints = np.concatenate(all_joints, axis=2)

    meanpose = np.mean(all_joints, axis=2)
    stdpose = np.std(all_joints, axis=2)
    stdpose[np.where(stdpose == 0)] = 1e-9

    return meanpose, stdpose
Exemplo n.º 2
0
    def preprocessing(self, motion3d, view_angle=None, params=None):
        if self.aug: motion3d, params = self.augmentation(motion3d, params)

        basis = None
        if view_angle is not None:
            basis = get_change_of_basis(motion3d, view_angle)

        motion3d = preprocess_mixamo(motion3d)
        motion3d = rotate_motion_3d(motion3d, basis)
        motion2d = motion3d[:, [0, 2], :]
        motion2d_scale = limb_scale_motion_2d(motion2d, self.global_range,
                                              self.local_range)

        motion2d = localize_motion(motion2d)
        motion2d_scale = localize_motion(motion2d_scale)

        motion2d = normalize_motion(motion2d, self.meanpose, self.stdpose)
        motion2d_scale = normalize_motion(motion2d_scale, self.meanpose,
                                          self.stdpose)

        motion2d = motion2d.reshape([-1, motion2d.shape[-1]])
        motion2d_scale = motion2d_scale.reshape((-1, motion2d_scale.shape[-1]))
        motion2d = torch.from_numpy(motion2d).float()
        motion2d_scale = torch.from_numpy(motion2d_scale).float()

        return motion2d, motion2d_scale
Exemplo n.º 3
0
    def preprocessing(self, motion3d, view_angle=None, params=None):
        """
        :param item: filename built from self.build_tiem
        :return:
        """

        if self.aug: motion3d, params = self.augmentation(motion3d, params)

        basis = None
        if view_angle is not None:
            basis = get_change_of_basis(motion3d, view_angle)

        motion3d = preprocess_mixamo(motion3d)
        motion3d = rotate_motion_3d(motion3d, basis)
        motion3d = localize_motion(motion3d)
        motion3d = normalize_motion(motion3d, self.meanpose, self.stdpose)

        motion2d = motion3d[:, [0, 2], :]

        motion3d = motion3d.reshape([-1, motion3d.shape[-1]])
        motion2d = motion2d.reshape([-1, motion2d.shape[-1]])

        motion3d = torch.from_numpy(motion3d).float()
        motion2d = torch.from_numpy(motion2d).float()

        return motion3d, motion2d
Exemplo n.º 4
0
def load_and_preprocess(path):

    motion3d = np.load(path)

    # length must be multiples of 8 due to the size of convolution
    _, _, T = motion3d.shape
    T = (T // 8) * 8
    motion3d = motion3d[:, :, :T]

    # project to 2d
    motion_proj = motion3d[:, [0, 2], :]

    # reformat for mixamo data
    motion_proj = preprocess_mixamo(motion_proj, unit=1.0)

    return motion_proj
Exemplo n.º 5
0
def load_and_preprocess(path, config, mean_pose, std_pose):

    motion3d = np.load(path)

    # length must be multiples of 8 due to the size of convolution
    _, _, T = motion3d.shape
    T = (T // 8) * 8
    motion3d = motion3d[:, :, :T]

    # project to 2d
    motion_proj = motion3d[:, [0, 2], :]

    # reformat for mixamo data
    motion_proj = preprocess_mixamo(motion_proj, unit=1.0)

    # preprocess for network input
    motion_proj, start = preprocess_test(motion_proj, mean_pose, std_pose,
                                         config.data.unit)
    motion_proj = motion_proj.reshape((-1, motion_proj.shape[-1]))
    motion_proj = torch.from_numpy(motion_proj).float()

    return motion_proj, start