Пример #1
0
def reconstruct_gait(pose, differentials, V):
    num_frames = differentials.shape[1]
    gait = np.zeros((num_frames + 1, pose.shape[0]))
    gait[0, :] = pose
    if differentials.shape[-1] == V * 7:
        del_pos_available = True
    else:
        del_pos_available = False
    for tidx in range(num_frames):
        if del_pos_available:
            differentials_curr = np.reshape(differentials[:, tidx, :], (-1, 7))
            del_pos = differentials_curr[:, 4:].flatten()
        else:
            differentials_curr = np.reshape(differentials[:, tidx, :], (-1, 4))
            del_pos = np.zeros((differentials_curr.shape[0] * 3))
        del_orientation = differentials_curr[:, :4]
        theta, axis_normalized = Quaternions.angle_axis(
            Quaternions(del_orientation))
        theta = np.remainder(theta + np.pi, 2 * np.pi) - np.pi
        axis_normalized = axis_normalized / np.linalg.norm(axis_normalized,
                                                           axis=1)[:, None]
        # theta = np.linalg.norm(del_orientation, axis=1)
        # axis_normalized = del_orientation / theta[:, None]
        gait[tidx + 1, :] = get_rotated_points(axis_normalized, theta,
                                               gait[tidx, :]) + del_pos
        # gait[tidx + 1, :] = gait[tidx, :]
    return gait
 def unravel(clas, anim, shape, parents):
     nf, nj = shape
     rotations = anim[nf*nj*0:nf*nj*3]
     positions = anim[nf*nj*3:nf*nj*6]
     orients   = anim[nf*nj*6+nj*0:nf*nj*6+nj*3]
     offsets   = anim[nf*nj*6+nj*3:nf*nj*6+nj*6]
     return cls(
         Quaternions.exp(rotations), positions,
         Quaternions.exp(orients), offsets,
         parents.copy())
    def get_positions_and_transformations(self, raw_data, mirrored=False):
        data = np.swapaxes(np.squeeze(raw_data), -1, 0)
        if data.shape[-1] == 73:
            positions, root_x, root_z, root_r = data[:, 3:-7], data[:, -7], data[:, -6], data[:, -5]
        elif data.shape[-1] == 66:
            positions, root_x, root_z, root_r = data[:, :-3], data[:, -3], data[:, -2], data[:, -1]
        else:
            raise AssertionError('Input data format not understood')
        num_frames = len(positions)
        positions_local = positions.reshape((num_frames, -1, 3))
        if mirrored:
            positions_local[:, self.joints_left], positions_local[:, self.joints_right] = \
            positions_local[:, self.joints_right], positions_local[:, self.joints_left]
            positions_local[:, :, [0, 2]] = -positions_local[:, :, [0, 2]]
        positions_world = np.zeros_like(positions_local)
        num_joints = positions_world.shape[1]

        trajectory = np.empty((num_frames, 3))
        orientations = np.empty(num_frames)
        rotations = np.zeros((num_frames, num_joints - 1, 4))
        cum_rotations = np.zeros((num_frames, 4))
        rotations_euler = np.zeros((num_frames, num_joints - 1, 3))
        cum_rotations_euler = np.zeros((num_frames, 3))
        translations = np.zeros((num_frames, num_joints, 3))
        cum_translations = np.zeros((num_frames, 3))
        offsets = []

        for t in range(num_frames):
            positions_world[t, :, :] = (Quaternions(cum_rotations[t - 1]) if t > 0 else Quaternions.id(1)) * \
                                       positions_local[t]
            positions_world[t, :, 0] = positions_world[t, :, 0] + (cum_translations[t - 1, 0] if t > 0 else 0)
            positions_world[t, :, 2] = positions_world[t, :, 2] + (cum_translations[t - 1, 2] if t > 0 else 0)
            trajectory[t] = positions_world[t, 0]
            # if t > 0:
            #     rotations[t, 1:] = Quaternions.between(positions_world[t - 1, 1:], positions_world[t, 1:]).qs
            # else:
            #     rotations[t, 1:] = Quaternions.id(positions_world.shape[1] - 1).qs
            limbs = positions_world[t, 1:] - positions_world[t, self.joint_parents[1:]]
            rotations[t] = Quaternions.between(self.joint_offsets[1:], limbs)
            # limb_recons = Quaternions(rotations[t, 1:]) * self._offsets[1:]
            # test_limbs = np.setdiff1d(np.arange(20), [12, 16])
            # if np.max(np.abs(limb_recons[test_limbs] - limbs[test_limbs])) > 1e-6:
            #     temp = 1
            rotations_euler[t] = Quaternions(rotations[t]).euler('yzx')
            orientations[t] = -root_r[t]
            # rotations[t, 0] = Quaternions.from_angle_axis(-root_r[t], np.array([0, 1, 0])).qs
            # rotations_euler[t, 0] = Quaternions(rotations[t, 0]).euler('yzx')
            cum_rotations[t] = (Quaternions.from_angle_axis(orientations[t], np.array([0, 1, 0])) *
                                (Quaternions(cum_rotations[t - 1]) if t > 0 else Quaternions.id(1))).qs
            # cum_rotations[t] = (Quaternions(rotations[t, 0]) *
            #                     (Quaternions(cum_rotations[t - 1]) if t > 0 else Quaternions.id(1))).qs
            cum_rotations_euler[t] = Quaternions(cum_rotations[t]).euler('yzx')
            offsets.append(Quaternions(cum_rotations[t]) * np.array([0, 0, 1]))
            translations[t, 0] = Quaternions(cum_rotations[t]) * np.array([root_x[t], 0, root_z[t]])
            cum_translations[t] = (cum_translations[t - 1] if t > 0 else np.zeros((1, 3))) + translations[t, 0]
        # limb_lengths = np.zeros((num_frames, 20))
        return positions_local, positions_world, trajectory, orientations, rotations, rotations_euler, \
               translations, cum_rotations, cum_rotations_euler, cum_translations, offsets
 def write(self, rotations, positions, order, path, frametime=1.0/30):
     # position in shape [frame, 3]
     # rotation in shape [frame, J-1, 4]
     if order == 'quaternion':
         norm = rotations[:, :, 0] ** 2 + rotations[:, :, 1] ** 2 + rotations[:, :, 2] ** 2 + rotations[:, :, 3] ** 2
         norm = np.repeat(norm[:, :, np.newaxis], 4, axis=2)
         rotations /= norm
         rotations = Quaternions(rotations)
         rotations = np.degrees(rotations.euler())
         order = 'xyz'
     # convert the rotation, [frame, J, 3]
     rotations, parent_of_joint = self.edge_rotation_to_joint_rotation(rotations)
     return write_bvh(parent_of_joint, self.offset, rotations, positions, self.names, frametime, order, path)
def rotations_global(anim):
    """
    Global Animation Rotations
    
    This relies on joint ordering
    being incremental. That means a joint
    J1 must not be a ancestor of J0 if
    J0 appears before J1 in the joint
    ordering.
    
    Parameters
    ----------
    
    anim : Animation
        Input animation
        
    Returns
    -------
    
    points : (F, J) Quaternions
        global rotations for every frame F 
        and joint J
    """

    joints  = np.arange(anim.shape[1])
    parents = np.arange(anim.shape[1])
    locals  = anim.rotations
    globals = Quaternions.id(anim.shape)
    
    globals[:,0] = locals[:,0]
    
    for i in range(1, anim.shape[1]):
        globals[:,i] = globals[:,anim.parents[i]] * locals[:,i]
        
    return globals
Пример #6
0
 def rotate(self, theta, axis):
     q = Quaternions(
         np.hstack((np.cos(theta / 2), np.sin(theta / 2) * axis)))
     position = self.anim.positions[:, 0, :].copy()
     rotation = self.anim.rotations[:, 0, :]
     position[1:, ...] -= position[0:-1, ...]
     q_position = Quaternions(
         np.hstack((np.zeros((position.shape[0], 1)), position)))
     q_rotation = Quaternions.from_euler(np.radians(rotation))
     q_rotation = q * q_rotation
     q_position = q * q_position * (-q)
     self.anim.rotations[:, 0, :] = np.degrees(q_rotation.euler())
     position = q_position.imaginaries
     for i in range(1, position.shape[0]):
         position[i] += position[i - 1]
     self.anim.positions[:, 0, :] = position
Пример #7
0
    def set_new_root(self, new_root):
        euler = torch.tensor(self.anim.rotations[:, 0, :], dtype=torch.float)
        transform = ForwardKinematics.transform_from_euler(euler, 'xyz')
        offset = torch.tensor(self.anim.offsets[new_root], dtype=torch.float)
        new_pos = torch.matmul(transform, offset)
        new_pos = new_pos.numpy() + self.anim.positions[:, 0, :]
        self.anim.offsets[0] = -self.anim.offsets[new_root]
        self.anim.offsets[new_root] = np.zeros((3, ))
        self.anim.positions[:, new_root, :] = new_pos
        rot0 = Quaternions.from_euler(np.radians(self.anim.rotations[:, 0, :]),
                                      order='xyz')
        rot1 = Quaternions.from_euler(np.radians(
            self.anim.rotations[:, new_root, :]),
                                      order='xyz')
        new_rot1 = rot0 * rot1
        new_rot0 = (-rot1)
        new_rot0 = np.degrees(new_rot0.euler())
        new_rot1 = np.degrees(new_rot1.euler())
        self.anim.rotations[:, 0, :] = new_rot0
        self.anim.rotations[:, new_root, :] = new_rot1

        new_seq = []
        vis = [0] * self.anim.rotations.shape[1]
        new_idx = [-1] * len(vis)
        new_parent = [0] * len(vis)

        def relabel(x):
            nonlocal new_seq, vis, new_idx, new_parent
            new_idx[x] = len(new_seq)
            new_seq.append(x)
            vis[x] = 1
            for y in range(len(vis)):
                if not vis[y] and (self.anim.parents[x] == y
                                   or self.anim.parents[y] == x):
                    relabel(y)
                    new_parent[new_idx[y]] = new_idx[x]

        relabel(new_root)
        self.anim.rotations = self.anim.rotations[:, new_seq, :]
        self.anim.offsets = self.anim.offsets[new_seq]
        names = self._names.copy()
        for i, j in enumerate(new_seq):
            self._names[i] = names[j]
        self.anim.parents = np.array(new_parent, dtype=np.int)
Пример #8
0
 def get_root_rotation(self):
     """
     Get the rotation of the root joint
     :return: A numpy Array in shape [frame, 1, 4]
     """
     # rotation in shape[frame, 1, 4]
     rotation = self.anim.rotations[:, self.corps, :]
     rotation = rotation[:, 0:1, :]
     # convert to quaternion format
     rotation = Quaternions.from_euler(np.radians(rotation)).qs
     return rotation
Пример #9
0
def get_del_pos_and_orientation(pos1, pos2, dim):
    # del_orientation, axis, theta = get_del_orientation(pos1, pos2, dim)
    # pos1_rotated = get_rotated_points(axis, theta, pos1)
    axis, theta = get_del_orientation(pos1, pos2, dim)
    quats = Quaternions.from_angle_axis(theta, axis).qs
    axis_norm = np.sum(axis ** 2, axis=-1) ** 0.5
    axis_normalized = axis / axis_norm[:, None]
    pos1_rotated = get_rotated_points(axis_normalized, theta, pos1)
    del_pos = np.reshape(pos2 - pos1_rotated, (-1, dim))
    # return np.append(del_pos, del_orientation, axis=1).flatten()
    return quats.flatten(), del_pos.flatten()
Пример #10
0
def get_joints_from_mocap_data(data, apply_transformations=True):
    data = np.moveaxis(np.squeeze(data), -1, 0)
    if not apply_transformations:
        joints = np.copy(data)
    else:
        if data.shape[-1] == 73:
            joints, root_x, root_z, root_r = data[:, 3:-7], data[:, -7], data[:, -6], data[:, -5]
        elif data.shape[-1] == 66:
            joints, root_x, root_z, root_r = data[:, :-3], data[:, -3], data[:, -2], data[:, -1]
    joints = joints.reshape((len(joints), -1, 3))

    rotations = np.empty((len(joints), 4))
    translations = np.empty((len(joints), 3))
    rotations[0, :] = Quaternions.id(1).qs
    offsets = []
    translations[0, :] = np.array([[0, 0, 0]])

    if apply_transformations:
        for i in range(len(joints)):
            joints[i, :, :] = Quaternions(rotations[i, :]) * joints[i]
            joints[i, :, 0] = joints[i, :, 0] + translations[i, 0]
            joints[i, :, 2] = joints[i, :, 2] + translations[i, 2]
            if i + 1 < len(joints):
                rotations[i + 1, :] = (Quaternions.from_angle_axis(-root_r[i], np.array([0, 1, 0])) *
                                       Quaternions(rotations[i, :])).qs
                offsets.append(Quaternions(rotations[i + 1, :]) * np.array([0, 0, 1]))
                translations[i + 1, :] = translations[i, :] +\
                                         Quaternions(rotations[i + 1, :]) * np.array([root_x[i], 0, root_z[i]])
    return joints, rotations, translations
def orients_global(anim):

    joints  = np.arange(anim.shape[1])
    parents = np.arange(anim.shape[1])
    locals  = anim.orients
    globals = Quaternions.id(anim.shape[1])
    
    globals[:,0] = locals[:,0]
    
    for i in range(1, anim.shape[1]):
        globals[:,i] = globals[:,anim.parents[i]] * locals[:,i]
        
    return globals
Пример #12
0
 def convert_eular_to_quaternions(self, rotations):
     rotations_reshape = rotations.view((-1, 3))
     batch = rotations_reshape.shape[0]
     c1 = torch.cos(rotations_reshape[:, 0]).view(batch, 1)  # batch*1
     s1 = torch.sin(rotations_reshape[:, 0]).view(batch, 1)  # batch*1
     c2 = torch.cos(rotations_reshape[:, 2]).view(batch, 1)  # batch*1
     s2 = torch.sin(rotations_reshape[:, 2]).view(batch, 1)  # batch*1
     c3 = torch.cos(rotations_reshape[:, 1]).view(batch, 1)  # batch*1
     s3 = torch.sin(rotations_reshape[:, 1]).view(batch, 1)  # batch*1
     row1 = torch.cat((c2 * c3, -s2, c2 * s3), 1).view(-1, 1, 3)  # batch*1*3
     row2 = torch.cat((c1 * s2 * c3 + s1 * s3, c1 * c2, c1 * s2 * s3 - s1 * c3), 1).view(-1, 1, 3)  # batch*1*3
     row3 = torch.cat((s1 * s2 * c3 - c1 * s3, s1 * c2, s1 * s2 * s3 + c1 * c3), 1).view(-1, 1, 3)  # batch*1*3
     matrices = torch.cat((row1, row2, row3), 1).cpu().numpy()
     q = Quaternions.from_transforms(matrices)
     return q.qs
Пример #13
0
    def convert_6d_to_quaternions(self, rotations):
        rotations_reshape = rotations.view((-1, 6))
        x_raw = rotations_reshape[:, 0:3]  # batch*3
        y_raw = rotations_reshape[:, 3:6]  # batch*3

        x = self.normalize_vector(x_raw)  # batch*3
        z = self.cross_product(x, y_raw)  # batch*3
        z = self.normalize_vector(z)  # batch*3
        y = self.cross_product(z, x)  # batch*3

        x = x.view(-1, 3, 1)
        y = y.view(-1, 3, 1)
        z = z.view(-1, 3, 1)
        matrices = torch.cat((x, y, z), 2).cpu().numpy()
        q = Quaternions.from_transforms(matrices)
        return q.qs
Пример #14
0
def load_ewalk_data(_path, coords, joints, upsample=1):

    file_feature = os.path.join(_path, 'features' + '_ewalk.h5')
    ff = h5py.File(file_feature, 'r')
    file_label = os.path.join(_path, 'labels' + '_ewalk.h5')
    fl = h5py.File(file_label, 'r')

    data_list = []
    num_samples = len(ff.keys())
    time_steps = 0
    labels = np.empty(num_samples)
    for si in range(num_samples):
        ff_group_key = list(ff.keys())[si]
        data_list.append(list(ff[ff_group_key]))  # Get the data
        time_steps_curr = len(ff[ff_group_key])
        if time_steps_curr > time_steps:
            time_steps = time_steps_curr
        labels[si] = fl[list(fl.keys())[si]][()]

    data = np.zeros((num_samples, time_steps * upsample, joints * coords))
    num_frames = np.empty(num_samples)
    for si in range(num_samples):
        data_list_curr_arr = np.array(data_list[si])
        tsteps_curr = len(data_list[si]) * upsample
        for lidx in range(data_list_curr_arr.shape[1]):
            data[si, :tsteps_curr,
                 lidx] = signal.resample(data_list_curr_arr[:, lidx],
                                         tsteps_curr)
            if lidx > 0 and lidx % coords == 0:
                temp = np.copy(data[si, :tsteps_curr, lidx - 1])
                data[si, :tsteps_curr,
                     lidx - 1] = np.copy(-data[si, :tsteps_curr, lidx - 2])
                data[si, :tsteps_curr, lidx - 2] = temp
                rotation = Quaternions.from_angle_axis(np.pi / 2.,
                                                       np.array([1, 0, 0]))
                for t in range(tsteps_curr):
                    data[si, t,
                         lidx - 3:lidx] = rotation * data[si, t, lidx - 3:lidx]
        num_frames[si] = tsteps_curr
    poses, differentials, affective_features = common.get_ewalk_differentials_with_padding(
        data, num_frames, coords)
    return train_test_split(poses,
                            differentials,
                            affective_features,
                            num_frames,
                            labels,
                            test_size=0.1)
Пример #15
0
 def get_rotation(self):
     """
     Get the rotation of each joint's parent except the root joint
     :return: A numpy Array in shape [frame, simple_joint_num - 1, 4]]
     """
     # rotation in shape[frame, simple_joint_num, 3]
     rotations = self.anim.rotations[:, self.corps, :]
     # transform euler degree into radians then into quaternion
     # in shape [frame, simple_joint_num, 4]
     rotations = Quaternions.from_euler(np.radians(rotations)).qs
     # 除去root的剩下所有joint信息都存储在edges中
     # 因为.bvh中每个joint的rotation指的是施加在child joint上的旋转
     # 而此时简化的骨架的旋转是完整骨架中每根骨骼的parent的旋转,所以要把这些旋转信息替换成
     # 简化骨骼中对应的parent的旋转
     # 又因为使用了edges的信息,所以一定程度上可以看作这些edges自身旋转了多少
     index = []
     for e in self.edges:
         index.append(e[0])
     # now rotation is in shape[frame, simple_joint_num - 1, 4]
     rotations = rotations[:, index, :]
     return rotations
Пример #16
0
def get_edin_differentials_with_padding(gaits, num_frames, coords):
    dataset = 'edin'
    affs_dim = 18
    num_samples = gaits.shape[0]
    num_frames_max = gaits.shape[1]
    num_coords = gaits.shape[2]
    num_joints = int(num_coords / coords)
    poses = np.zeros((num_samples, num_coords))
    rotations = np.zeros((num_samples, num_frames_max, num_joints * 4))
    translations = np.zeros((num_samples, num_frames_max, num_joints * 3))
    affective_features = np.zeros((num_samples, num_frames_max, affs_dim))
    for sidx in range(num_samples):
        for tidx in range(int(num_frames[sidx])):
            affective_features[sidx, tidx, :] = \
                get_edin_affective_features(gaits[sidx, tidx, :], coords, affs_dim)
            if tidx == 0:
                poses[sidx, :] = gaits[sidx, tidx, :]
                rotations[sidx, tidx, :] = Quaternions.id(num_joints).qs.flatten()
            else:
                rotations[sidx, tidx, :], translations[sidx, tidx, :] = \
                    get_del_pos_and_orientation(gaits[sidx, tidx - 1, :],
                                                gaits[sidx, tidx, :], coords)
    return poses, rotations, translations, affective_features
Пример #17
0
 def convert_eular_to_quaternions(self, rotations):
     rotations_reshape = rotations.view((-1, 3))
     q = Quaternions.from_euler(rotations_reshape)
     return q.qs
Пример #18
0
def load(filename, start=None, end=None, order=None, world=False):
    """
    Reads a BVH file and constructs an animation

    Parameters
    ----------
    filename: str
        File to be opened

    start : int
        Optional Starting Frame

    end : int
        Optional Ending Frame

    order : str
        Optional Specifier for joint order.
        Given as string E.G 'xyz', 'zxy'

    world : bool
        If set to true euler angles are applied
        together in world space rather than local
        space

    Returns
    -------

    (animation, joint_names, frametime)
        Tuple of loaded animation and joint names
    """

    f = open(filename, "r")

    i = 0
    active = -1
    end_site = False

    names = []
    orients = Quaternions.id(0)
    offsets = np.array([]).reshape((0, 3))
    parents = np.array([], dtype=int)

    for line in f:

        if "HIERARCHY" in line: continue
        if "MOTION" in line: continue

        rmatch = re.match(r"ROOT (\w+)", line)
        if rmatch:
            names.append(rmatch.group(1))
            offsets = np.append(offsets, np.array([[0, 0, 0]]), axis=0)
            orients.qs = np.append(orients.qs,
                                   np.array([[1, 0, 0, 0]]),
                                   axis=0)
            parents = np.append(parents, active)
            active = (len(parents) - 1)
            continue

        if "{" in line: continue

        if "}" in line:
            if end_site:
                end_site = False
            else:
                active = parents[active]
            continue

        offmatch = re.match(
            r"\s*OFFSET\s+([\-\d\.e]+)\s+([\-\d\.e]+)\s+([\-\d\.e]+)", line)
        if offmatch:
            if not end_site:
                offsets[active] = np.array(
                    [list(map(float, offmatch.groups()))])
            continue

        chanmatch = re.match(r"\s*CHANNELS\s+(\d+)", line)
        if chanmatch:
            channels = int(chanmatch.group(1))
            if order is None:
                channelis = 0 if channels == 3 else 3
                channelie = 3 if channels == 3 else 6
                parts = line.split()[2 + channelis:2 + channelie]
                if any([p not in channelmap for p in parts]):
                    continue
                order = "".join([channelmap[p] for p in parts])
            continue

        jmatch = re.match("\s*JOINT\s+(\w+)", line)
        if jmatch:
            names.append(jmatch.group(1))
            offsets = np.append(offsets, np.array([[0, 0, 0]]), axis=0)
            orients.qs = np.append(orients.qs,
                                   np.array([[1, 0, 0, 0]]),
                                   axis=0)
            parents = np.append(parents, active)
            active = (len(parents) - 1)
            continue

        if "End Site" in line:
            end_site = True
            continue

        fmatch = re.match("\s*Frames:\s+(\d+)", line)
        if fmatch:
            if start and end:
                fnum = (end - start) - 1
            else:
                fnum = int(fmatch.group(1))
            jnum = len(parents)
            positions = offsets[np.newaxis].repeat(fnum, axis=0)
            rotations = np.zeros((fnum, len(orients), 3))
            continue

        fmatch = re.match("\s*Frame Time:\s+([\d\.]+)", line)
        if fmatch:
            frametime = float(fmatch.group(1))
            continue

        if (start and end) and (i < start or i >= end - 1):
            i += 1
            continue

        dmatch = line.strip().split(' ')
        if dmatch:
            data_block = np.array(list(map(float, dmatch)))
            N = len(parents)
            fi = i - start if start else i
            if channels == 3:
                positions[fi, 0:1] = data_block[0:3]
                rotations[fi, :] = data_block[3:].reshape(N, 3)
            elif channels == 6:
                data_block = data_block.reshape(N, 6)
                positions[fi, :] = data_block[:, 0:3]
                rotations[fi, :] = data_block[:, 3:6]
            elif channels == 9:
                positions[fi, 0] = data_block[0:3]
                data_block = data_block[3:].reshape(N - 1, 9)
                rotations[fi, 1:] = data_block[:, 3:6]
                positions[fi, 1:] += data_block[:, 0:3] * data_block[:, 6:9]
            else:
                raise Exception("Too many channels! %i" % channels)

            i += 1

    f.close()

    rotations = Quaternions.from_euler(np.radians(rotations),
                                       order=order,
                                       world=world)

    return Animation(rotations, positions, orients, offsets,
                     parents), names, frametime
Пример #19
0
    def get_positions_and_transformations(self, raw_data, mirrored=False):
        data = np.swapaxes(np.squeeze(raw_data), -1, 0)
        if data.shape[-1] == 73:
            positions, root_x, root_z, root_r = data[:, 3:
                                                     -7], data[:,
                                                               -7], data[:,
                                                                         -6], data[:,
                                                                                   -5]
        elif data.shape[-1] == 66:
            positions, root_x, root_z, root_r = data[:, :
                                                     -3], data[:,
                                                               -3], data[:,
                                                                         -2], data[:,
                                                                                   -1]
        else:
            raise AssertionError('Input data format not understood')
        num_frames = len(positions)
        positions_local = positions.reshape((num_frames, -1, 3))
        # positions_local[:, 11:13, [0, 2]] += 2. * (positions_local[:, 10:11, [0, 2]] - positions_local[:, 11:12, [0, 2]])
        # positions_local[:, 12, [0, 2]] = 2. * positions_local[:, 11, [0, 2]] - positions_local[:, 12, [0, 2]]
        # positions_local[:, 13:17, 0] += \
        #     np.sqrt(
        #         np.sum(np.square(positions_local[:, 13:14, [0, 2]] -
        #                          positions_local[:, 10:11, [0, 2]]), axis=-1) -\
        #         np.square(0.8)
        #     ) + positions_local[:, 10:11, 0] - positions_local[:, 13:14, 0]
        # positions_local[:, 13:17, 2] += positions_local[:, 10:11, 2] + 0.8 - positions_local[:, 13:14, 2]
        # positions_local[:, 17:, 0] += \
        #     positions_local[:, 10:11, 0] - \
        #     np.sqrt(
        #         np.sum(np.square(positions_local[:, 17:18, [0, 2]] -
        #                          positions_local[:, 10:11, [0, 2]]), axis=-1) -\
        #         np.square(0.5)
        #     ) - positions_local[:, 17:18, 0]
        # positions_local[:, 17:, 2] += positions_local[:, 10:11, 2] + 0.5 - positions_local[:, 17:18, 2]
        if mirrored:
            positions_local[:, self.joints_left], positions_local[:, self.joints_right] = \
            positions_local[:, self.joints_right], positions_local[:, self.joints_left]
            positions_local[:, :, [0, 2]] = -positions_local[:, :, [0, 2]]
        positions_world = np.zeros_like(positions_local)
        num_joints = positions_world.shape[1]

        trajectory = np.empty((num_frames, 3))
        orientations = np.empty(num_frames)
        rotations = np.zeros((num_frames, num_joints - 1, 4))
        cum_rotations = np.zeros((num_frames, 4))
        rotations_euler = np.zeros((num_frames, num_joints - 1, 3))
        cum_rotations_euler = np.zeros((num_frames, 3))
        translations = np.zeros((num_frames, num_joints, 3))
        cum_translations = np.zeros((num_frames, 3))
        offsets = []
        limbs_all = []

        for t in range(num_frames):
            positions_world[t, :, :] = (Quaternions(cum_rotations[t - 1]) if t > 0 else Quaternions.id(1)) * \
                                       positions_local[t]
            positions_world[t, :, 0] = positions_world[t, :, 0] + (
                cum_translations[t - 1, 0] if t > 0 else 0)
            positions_world[t, :, 2] = positions_world[t, :, 2] + (
                cum_translations[t - 1, 2] if t > 0 else 0)
            trajectory[t] = positions_world[t, 0]
            # if t > 0:
            #     rotations[t, 1:] = Quaternions.between(positions_world[t - 1, 1:], positions_world[t, 1:]).qs
            # else:
            #     rotations[t, 1:] = Quaternions.id(positions_world.shape[1] - 1).qs
            limbs = positions_world[t, 1:] - positions_world[
                t, self.joint_parents[1:]]
            rotations[t] = Quaternions.between(self.joint_offsets[1:], limbs)
            limbs_all.append(limbs)
            # limb_recons = Quaternions(rotations[t, 1:]) * self._offsets[1:]
            # test_limbs = np.setdiff1d(np.arange(20), [12, 16])
            # if np.max(np.abs(limb_recons[test_limbs] - limbs[test_limbs])) > 1e-6:
            #     temp = 1
            rotations_euler[t] = Quaternions(rotations[t]).euler('yzx')
            orientations[t] = -root_r[t]
            # rotations[t, 0] = Quaternions.from_angle_axis(-root_r[t], np.array([0, 1, 0])).qs
            # rotations_euler[t, 0] = Quaternions(rotations[t, 0]).euler('yzx')
            cum_rotations[t] = (Quaternions.from_angle_axis(
                orientations[t], np.array([0, 1, 0])) * (Quaternions(
                    cum_rotations[t - 1]) if t > 0 else Quaternions.id(1))).qs
            # cum_rotations[t] = (Quaternions(rotations[t, 0]) *
            #                     (Quaternions(cum_rotations[t - 1]) if t > 0 else Quaternions.id(1))).qs
            cum_rotations_euler[t] = Quaternions(cum_rotations[t]).euler('yzx')
            offsets.append(Quaternions(cum_rotations[t]) * np.array([0, 0, 1]))
            translations[t, 0] = Quaternions(cum_rotations[t]) * np.array(
                [root_x[t], 0, root_z[t]])
            cum_translations[t] = (cum_translations[t - 1] if t > 0 else
                                   np.zeros((1, 3))) + translations[t, 0]
        # limb_lengths =
        limbs_all = np.stack(limbs_all)
        self.save_as_bvh(np.expand_dims(positions_world[:, 0], 0),
                         np.expand_dims(orientations.reshape(-1, 1), 0),
                         np.expand_dims(rotations, 0),
                         dataset_name='edin',
                         subset_name='test')
        return positions_local, positions_world, trajectory, orientations, rotations, rotations_euler, \
               translations, cum_rotations, cum_rotations_euler, cum_translations, offsets
def load_from_maya(root, start, end):
    """
    Load Animation Object from Maya Joint Skeleton    
    
    Parameters
    ----------
    
    root : PyNode
        Root Joint of Maya Skeleton
        
    start, end : int, int
        Start and End frame index of Maya Animation
    
    Returns
    -------
    
    animation : Animation
        Loaded animation from maya
        
    names : [str]
        Joint names from maya   
    """

    import pymel.core as pm
    
    original_time = pm.currentTime(q=True)
    pm.currentTime(start)
    
    """ Build Structure """
    
    names, parents = AnimationStructure.load_from_maya(root)
    descendants = AnimationStructure.descendants_list(parents)
    orients = Quaternions.id(len(names))
    offsets = np.array([pm.xform(j, q=True, translation=True) for j in names])
    
    for j, name in enumerate(names):
        scale = pm.xform(pm.PyNode(name), q=True, scale=True, relative=True)
        if len(descendants[j]) == 0: continue
        offsets[descendants[j]] *= scale
    
    """ Load Animation """

    eulers    = np.zeros((end-start, len(names), 3))
    positions = np.zeros((end-start, len(names), 3))
    rotations = Quaternions.id((end-start, len(names)))
    
    for i in range(end-start):
        
        pm.currentTime(start+i+1, u=True)
        
        scales = {}
        
        for j, name, parent in zip(range(len(names)), names, parents):
            
            node = pm.PyNode(name)
            
            if i == 0 and pm.hasAttr(node, 'jointOrient'):
                ort = node.getOrientation()
                orients[j] = Quaternions(np.array([ort[3], ort[0], ort[1], ort[2]]))
            
            if pm.hasAttr(node, 'rotate'):    eulers[i,j]    = np.radians(pm.xform(node, q=True, rotation=True))
            if pm.hasAttr(node, 'translate'): positions[i,j] = pm.xform(node, q=True, translation=True)
            if pm.hasAttr(node, 'scale'):     scales[j]      = pm.xform(node, q=True, scale=True, relative=True)

        for j in scales:
            if len(descendants[j]) == 0: continue
            positions[i,descendants[j]] *= scales[j] 
        
        positions[i,0] = pm.xform(root, q=True, translation=True, worldSpace=True)
    
    rotations = orients[np.newaxis] * Quaternions.from_euler(eulers, order='xyz', world=True)
    
    """ Done """
    
    pm.currentTime(original_time)
    
    return Animation(rotations, positions, orients, offsets, parents), names
def load_to_maya(anim, names=None, radius=0.5):
    """
    Load Animation Object into Maya as Joint Skeleton
    loads each frame as a new keyfame in maya.
    
    If the animation is too slow or too fast perhaps
    the framerate needs adjusting before being loaded
    such that it matches the maya scene framerate.
    
    
    Parameters
    ----------
    
    anim : Animation
        Animation to load into Scene
        
    names : [str]
        Optional list of Joint names for Skeleton
    
    Returns
    -------
    
    List of Maya Joint Nodes loaded into scene
    """
    
    import pymel.core as pm
    
    joints = []
    frames = range(1, len(anim)+1)
    
    if names is None: names = ["joint_" + str(i) for i in range(len(anim.parents))]
    
    for i, offset, orient, parent, name in zip(range(len(anim.offsets)), anim.offsets, anim.orients, anim.parents, names):
    
        if parent < 0:
            pm.select(d=True)
        else:
            pm.select(joints[parent])
        
        joint = pm.joint(n=name, p=offset, relative=True, radius=radius)
        joint.setOrientation([orient[1], orient[2], orient[3], orient[0]])
        
        curvex = pm.nodetypes.AnimCurveTA(n=name + "_rotateX")
        curvey = pm.nodetypes.AnimCurveTA(n=name + "_rotateY")
        curvez = pm.nodetypes.AnimCurveTA(n=name + "_rotateZ")  
        
        jrotations = (-Quaternions(orient[np.newaxis]) * anim.rotations[:,i]).euler()
        curvex.addKeys(frames, jrotations[:,0])
        curvey.addKeys(frames, jrotations[:,1])
        curvez.addKeys(frames, jrotations[:,2])
        
        pm.connectAttr(curvex.output, joint.rotateX)
        pm.connectAttr(curvey.output, joint.rotateY)
        pm.connectAttr(curvez.output, joint.rotateZ)
        
        offsetx = pm.nodetypes.AnimCurveTU(n=name + "_translateX")
        offsety = pm.nodetypes.AnimCurveTU(n=name + "_translateY")
        offsetz = pm.nodetypes.AnimCurveTU(n=name + "_translateZ")
        
        offsetx.addKeys(frames, anim.positions[:,i,0])
        offsety.addKeys(frames, anim.positions[:,i,1])
        offsetz.addKeys(frames, anim.positions[:,i,2])
        
        pm.connectAttr(offsetx.output, joint.translateX)
        pm.connectAttr(offsety.output, joint.translateY)
        pm.connectAttr(offsetz.output, joint.translateZ)
        
        joints.append(joint)
    
    return joints
Пример #22
0
    def load_bvh(file_name,
                 channel_map=None,
                 start=None,
                 end=None,
                 order=None,
                 world=False):
        '''
        Reads a BVH file and constructs an animation

        Parameters
        ----------
        file_name: str
            File to be opened

        channel_map: Dict
            Mapping between the coordinates x, y, z and
            the positions X, Y, Z in the bvh file

        start : int
            Optional Starting Frame

        end : int
            Optional Ending Frame

        order : str
            Optional Specifier for joint order.
            Given as string E.G 'xyz', 'zxy'

        world : bool
            If set to true euler angles are applied
            together in world space rather than local
            space

        Returns
        -------

        (animation, joint_names, frame_time)
            Tuple of loaded animation and joint names
        '''

        if channel_map is None:
            channel_map = {
                'Xrotation': 'x',
                'Yrotation': 'y',
                'Zrotation': 'z'
            }
        f = open(file_name, 'r')

        i = 0
        active = -1
        end_site = False

        names = []
        orients = Quaternions.id(0)
        offsets = np.array([]).reshape((0, 3))
        parents = np.array([], dtype=int)
        prev_data_match = []
        for line in f:

            if 'HIERARCHY' in line:
                continue
            if 'MOTION' in line:
                continue

            root_match = re.match(r'ROOT (\w+)', line)
            if root_match:
                names.append(root_match.group(1))
                offsets = np.append(offsets, np.array([[0, 0, 0]]), axis=0)
                orients.qs = np.append(orients.qs,
                                       np.array([[1, 0, 0, 0]]),
                                       axis=0)
                parents = np.append(parents, active)
                active = (len(parents) - 1)
                continue

            if '{' in line:
                continue

            if '}' in line:
                if end_site:
                    end_site = False
                else:
                    active = parents[active]
                continue

            offset_match = re.match(
                r'\s*OFFSET\s+([\-\d\.e]+)\s+([\-\d\.e]+)\s+([\-\d\.e]+)',
                line)
            if offset_match:
                if not end_site:
                    offsets[active] = np.array(
                        [list(map(float, offset_match.groups()))])
                continue

            channel_match = re.match(r'\s*CHANNELS\s+(\d+)', line)
            if channel_match:
                channels = int(channel_match.group(1))
                if order is None:
                    channel_is = 0 if channels == 3 else 3
                    channel_ie = 3 if channels == 3 else 6
                    parts = line.split()[2 + channel_is:2 + channel_ie]
                    if any([p not in channel_map for p in parts]):
                        continue
                    order = ''.join([channel_map[p] for p in parts])
                continue

            joint_match = re.match('\s*JOINT\s+(\w+)', line)
            if joint_match:
                names.append(joint_match.group(1))
                offsets = np.append(offsets, np.array([[0, 0, 0]]), axis=0)
                orients.qs = np.append(orients.qs,
                                       np.array([[1, 0, 0, 0]]),
                                       axis=0)
                parents = np.append(parents, active)
                active = (len(parents) - 1)
                continue

            if 'End Site' in line:
                end_site = True
                continue

            frame_match = re.match('\s*Frames:\s+(\d+)', line)
            if frame_match:
                if start and end:
                    frame_num = (end - start) - 1
                else:
                    frame_num = int(frame_match.group(1))
                joint_num = len(parents)
                positions = offsets[np.newaxis].repeat(frame_num, axis=0)
                rotations = np.zeros((frame_num, len(orients), 3))
                continue

            frame_match = re.match('\s*Frame Time:\s+([\d\.]+)', line)
            if frame_match:
                frame_time = float(frame_match.group(1))
                continue

            if (start and end) and (i < start or i >= end - 1):
                i += 1
                continue
            if '#IND' in line:
                new_data_match = line.strip().split(' ')
                for i in range(len(new_data_match)):
                    if '#IND' in new_data_match[i]:
                        new_data_match[i] = prev_data_match[i]
                line = ' '.join(new_data_match)

            data_match = line.strip().split(' ')
            prev_data_match = data_match
            if data_match:

                data_block = np.array(list(map(float, data_match)))
                N = len(parents)
                fi = i - start if start else i
                if channels == 3:
                    positions[fi, 0:1] = data_block[0:3]
                    rotations[fi, :] = data_block[3:].reshape(N, 3)
                elif channels == 6:
                    data_block = data_block.reshape(N, 6)
                    positions[fi, :] = data_block[:, 0:3]
                    rotations[fi, :] = data_block[:, 3:6]
                elif channels == 9:
                    positions[fi, 0] = data_block[0:3]
                    data_block = data_block[3:].reshape(N - 1, 9)
                    rotations[fi, 1:] = data_block[:, 3:6]
                    positions[fi,
                              1:] += data_block[:, 0:3] * data_block[:, 6:9]
                else:
                    raise Exception('Too many channels! {}'.format(channels))

                i += 1

        f.close()

        rotations = qfix(
            Quaternions.from_euler(np.radians(rotations),
                                   order=order,
                                   world=world).qs)
        positions = MocapDataset.forward_kinematics(
            torch.from_numpy(rotations).cuda().float().unsqueeze(0),
            torch.from_numpy(positions[:, 0]).cuda().float().unsqueeze(0),
            parents,
            torch.from_numpy(offsets).cuda().float()).squeeze().cpu().numpy()
        orientations, _ = Quaternions(rotations[:, 0]).angle_axis()
        return names, parents, offsets, positions, rotations
Пример #23
0
    def generate_motion(self, load_saved_model=True, samples_to_generate=1534, max_steps=300, randomized=True):

        if load_saved_model:
            self.load_best_model()
        self.model.eval()
        test_loader = self.data_loader['test']

        pos, quat, orient, z_mean, z_dev, \
        root_speed, affs, spline, labels = self.return_batch([samples_to_generate], test_loader, randomized=randomized)
        pos = torch.from_numpy(pos).cuda()
        traj = pos[:, :, 0, [0, 2]].clone()
        orient = torch.from_numpy(orient).cuda()
        quat = torch.from_numpy(quat).cuda()
        z_mean = torch.from_numpy(z_mean).cuda()
        z_dev = torch.from_numpy(z_dev).cuda()
        root_speed = torch.from_numpy(root_speed).cuda()
        affs = torch.from_numpy(affs).cuda()
        spline = torch.from_numpy(spline).cuda()
        z_rs = torch.cat((z_dev, root_speed), dim=-1)
        quat_all = torch.cat((orient[:, self.prefix_length - 1:], quat[:, self.prefix_length - 1:]), dim=-1)
        labels = np.tile(labels, (1, max_steps + self.prefix_length, 1))

        # Begin for transition
        # traj[:, self.prefix_length - 2] = torch.tensor([-0.208, 4.8]).cuda().float()
        # traj[:, self.prefix_length - 1] = torch.tensor([-0.204, 5.1]).cuda().float()
        # final_emo_idx = int(max_steps/2)
        # labels[:, final_emo_idx:] = np.array([1., 0., 0., 0.])
        # labels[:, :final_emo_idx + 1] = np.linspace(labels[:, 0], labels[:, final_emo_idx],
        #                                             num=final_emo_idx + 1, axis=1)
        # End for transition
        labels = torch.from_numpy(labels).cuda()

        # traj_np = traj_markers.detach().cpu().numpy()
        # import matplotlib.pyplot as plt
        # plt.plot(traj_np[6, :, 0], traj_np[6, :, 1])
        # plt.show()

        happy_idx = [25, 295, 390, 667, 1196]
        sad_idx = [169, 184, 258, 948, 974]
        angry_idx = [89, 93, 96, 112, 289, 290, 978]
        neutral_idx = [72, 106, 143, 237, 532, 747, 1177]
        sample_idx = np.squeeze(np.concatenate((happy_idx, sad_idx, angry_idx, neutral_idx)))

        ## CHANGE HERE
        # scene_corners = torch.tensor([[149.862, 50.833],
        #                               [149.862, 36.81],
        #                               [161.599, 36.81],
        #                               [161.599, 50.833]]).cuda().float()
        # character_heights = torch.tensor([0.95, 0.88, 0.86, 0.90, 0.95, 0.82]).cuda().float()
        # num_characters_per_side = torch.tensor([2, 3, 2, 3]).cuda().int()
        # traj_markers, traj_offsets, character_scale =\
        #     generate_trajectories(scene_corners, z_mean, character_heights,
        #                           num_characters_per_side, traj[:, :self.prefix_length])
        # num_characters_per_side = torch.tensor([4, 0, 0, 0]).cuda().int()
        # traj_markers, traj_offsets, character_scale =\
        #     generate_simple_trajectories(scene_corners, z_mean[:4], z_mean[:4],
        #                                  num_characters_per_side, traj[sample_idx, :self.prefix_length])
        # traj_markers, traj_offsets, character_scale =\
        #     generate_rvo_trajectories(scene_corners, z_mean[:4], z_mean[:4],
        #                               num_characters_per_side, traj[sample_idx, :self.prefix_length])

        # traj[sample_idx, :self.prefix_length] += traj_offsets
        # pos_sampled = pos[sample_idx].clone()
        # pos_sampled[:, :self.prefix_length, :, [0, 2]] += traj_offsets.unsqueeze(2).repeat(1, 1, self.V, 1)
        # pos[sample_idx] = pos_sampled
        # traj_markers = self.generate_linear_trajectory(traj)

        pos_pred = self.copy_prefix(pos)
        traj_pred = self.copy_prefix(traj)
        orient_pred = self.copy_prefix(orient)
        quat_pred = self.copy_prefix(quat)
        z_rs_pred = self.copy_prefix(z_rs)
        affs_pred = self.copy_prefix(affs)
        spline_pred = self.copy_prefix(spline)
        labels_pred = self.copy_prefix(labels, prefix_length=max_steps + self.prefix_length)

        # forward
        elapsed_time = np.zeros(len(sample_idx))
        for counter, s in enumerate(sample_idx):  # range(samples_to_generate):
            start_time = time.time()
            orient_h_copy = self.orient_h.clone()
            quat_h_copy = self.quat_h.clone()
            ## CHANGE HERE
            num_markers = max_steps + self.prefix_length + 1
            # num_markers = traj_markers[s].shape[0]
            marker_idx = 0
            t = -1
            with torch.no_grad():
                while marker_idx < num_markers:
                    t += 1
                    if t > max_steps:
                        print('Sample: {}. Did not reach end in {} steps.'.format(s, max_steps), end='')
                        break
                    pos_pred[s] = torch.cat((pos_pred[s], torch.zeros_like(pos_pred[s][:, -1:])), dim=1)
                    traj_pred[s] = torch.cat((traj_pred[s], torch.zeros_like(traj_pred[s][:, -1:])), dim=1)
                    orient_pred[s] = torch.cat((orient_pred[s], torch.zeros_like(orient_pred[s][:, -1:])), dim=1)
                    quat_pred[s] = torch.cat((quat_pred[s], torch.zeros_like(quat_pred[s][:, -1:])), dim=1)
                    z_rs_pred[s] = torch.cat((z_rs_pred[s], torch.zeros_like(z_rs_pred[s][:, -1:])), dim=1)
                    affs_pred[s] = torch.cat((affs_pred[s], torch.zeros_like(affs_pred[s][:, -1:])), dim=1)
                    spline_pred[s] = torch.cat((spline_pred[s], torch.zeros_like(spline_pred[s][:, -1:])), dim=1)

                    orient_pred[s][:, self.prefix_length + t:self.prefix_length + t + 1], \
                    quat_pred[s][:, self.prefix_length + t:self.prefix_length + t + 1], \
                    z_rs_pred[s][:, self.prefix_length + t:self.prefix_length + t + 1], \
                    orient_h_copy, quat_h_copy = \
                        self.model(
                            orient_pred[s][:, t:self.prefix_length + t],
                            quat_pred[s][:, t:self.prefix_length + t],
                            z_rs_pred[s][:, t:self.prefix_length + t],
                            affs_pred[s][:, t:self.prefix_length + t],
                            spline_pred[s][:, t:self.prefix_length + t],
                            labels_pred[s][:, t:self.prefix_length + t],
                            orient_h=None if t == 0 else orient_h_copy,
                            quat_h=None if t == 0 else quat_h_copy, return_prenorm=False)

                    traj_curr = traj_pred[s][:, self.prefix_length + t - 1:self.prefix_length + t].clone()
                    # root_speed = torch.norm(
                    #     pos_pred[s][:, self.prefix_length + t:self.prefix_length + t + 1, 0] - \
                    #     pos_pred[s][:, self.prefix_length + t - 1:self.prefix_length + t, 0], dim=-1)

                    ## CHANGE HERE
                    # traj_next = \
                    #     self.compute_next_traj_point(
                    #         traj_curr,
                    #         traj_markers[s, marker_idx],
                    #         o_z_rs_pred[s][:, self.prefix_length + t:self.prefix_length + t + 1, 2])
                    try:
                        traj_next = traj[s, self.prefix_length + t]
                    except IndexError:
                        traj_next = \
                            self.compute_next_traj_point_sans_markers(
                                pos_pred[s][:, self.prefix_length + t - 1:self.prefix_length + t],
                                quat_pred[s][:, self.prefix_length + t:self.prefix_length + t + 1],
                                z_rs_pred[s][:, self.prefix_length + t:self.prefix_length + t + 1, 0],
                                z_rs_pred[s][:, self.prefix_length + t:self.prefix_length + t + 1, 1])

                    pos_pred[s][:, self.prefix_length + t:self.prefix_length + t + 1], \
                    affs_pred[s][:, self.prefix_length + t:self.prefix_length + t + 1], \
                    spline_pred[s][:, self.prefix_length + t:self.prefix_length + t + 1] = \
                        self.mocap.get_predicted_features(
                            pos_pred[s][:, :self.prefix_length + t],
                            traj_next,
                            z_rs_pred[s][:, self.prefix_length + t:self.prefix_length + t + 1, 0] + z_mean[s:s + 1],
                            orient_pred[s][:, self.prefix_length + t:self.prefix_length + t + 1],
                            quat_pred[s][:, self.prefix_length + t:self.prefix_length + t + 1])

                    # min_speed_pred = torch.min(torch.cat((lf_speed_pred.unsqueeze(-1),
                    #                                        rf_speed_pred.unsqueeze(-1)), dim=-1), dim=-1)[0]
                    # if min_speed_pred - diff_speeds_mean[s] - diff_speeds_std[s] < 0.:
                    #     root_speed_pred = 0.
                    # else:
                    #     root_speed_pred = o_z_rs_pred[s][:, self.prefix_length + t:self.prefix_length + t + 1, 2]
                    #

                    ## CHANGE HERE
                    # traj_pred[s][:, self.prefix_length + t:self.prefix_length + t + 1] = \
                    #     self.compute_next_traj_point(
                    #         traj_curr,
                    #         traj_markers[s, marker_idx],
                    #         root_speed_pred)
                    # if torch.norm(traj_next - traj_curr, dim=-1).squeeze() >= \
                    #         torch.norm(traj_markers[s, marker_idx] - traj_curr, dim=-1).squeeze():
                    #     marker_idx += 1
                    traj_pred[s][:, self.prefix_length + t:self.prefix_length + t + 1] = traj_next
                    marker_idx += 1
                    pos_pred[s][:, self.prefix_length + t:self.prefix_length + t + 1], \
                    affs_pred[s][:, self.prefix_length + t:self.prefix_length + t + 1], \
                    spline_pred[s][:, self.prefix_length + t:self.prefix_length + t + 1] = \
                        self.mocap.get_predicted_features(
                            pos_pred[s][:, :self.prefix_length + t],
                            pos_pred[s][:, self.prefix_length + t:self.prefix_length + t + 1, 0, [0, 2]],
                            z_rs_pred[s][:, self.prefix_length + t:self.prefix_length + t + 1, 0] + z_mean[s:s + 1],
                            orient_pred[s][:, self.prefix_length + t:self.prefix_length + t + 1],
                            quat_pred[s][:, self.prefix_length + t:self.prefix_length + t + 1])
                    print('Sample: {}. Steps: {}'.format(s, t), end='\r')
            print()

            # shift = torch.zeros((1, scene_corners.shape[1] + 1)).cuda().float()
            # shift[..., [0, 2]] = scene_corners[0]
            # pos_pred[s] = (pos_pred[s] - shift) / character_scale + shift
            # pos_pred_np = pos_pred[s].contiguous().view(pos_pred[s].shape[0],
            #                                             pos_pred[s].shape[1], -1).permute(0, 2, 1).\
            #     detach().cpu().numpy()
            # display_animations(pos_pred_np, self.V, self.C, self.mocap.joint_parents, save=True,
            #                    dataset_name=self.dataset, subset_name='epoch_' + str(self.best_loss_epoch),
            #                    save_file_names=[str(s).zfill(6)],
            #                    overwrite=True)

            # plt.cla()
            # fig, (ax1, ax2) = plt.subplots(2, 1)
            # ax1.plot(root_speeds[s])
            # ax1.plot(lf_speeds[s])
            # ax1.plot(rf_speeds[s])
            # ax1.plot(min_speeds[s] - root_speeds[s])
            # ax1.legend(['root', 'left', 'right', 'diff'])
            # ax2.plot(root_speeds_pred)
            # ax2.plot(lf_speeds_pred)
            # ax2.plot(rf_speeds_pred)
            # ax2.plot(min_speeds_pred - root_speeds_pred)
            # ax2.legend(['root', 'left', 'right', 'diff'])
            # plt.show()

            head_tilt = np.tile(np.array([0., 0., 0.]), (1, quat_pred[s].shape[1], 1))
            l_shoulder_slouch = np.tile(np.array([0., 0., 0.]), (1, quat_pred[s].shape[1], 1))
            r_shoulder_slouch = np.tile(np.array([0., 0., 0.]), (1, quat_pred[s].shape[1], 1))
            head_tilt = Quaternions.from_euler(head_tilt, order='xyz').qs
            l_shoulder_slouch = Quaternions.from_euler(l_shoulder_slouch, order='xyz').qs
            r_shoulder_slouch = Quaternions.from_euler(r_shoulder_slouch, order='xyz').qs

            # Begin for aligning facing direction to trajectory
            axis_diff, angle_diff = self.get_diff_from_traj(pos_pred, traj_pred, s)
            angle_thres = 0.3
            # angle_thres = torch.max(angle_diff[:, 1:self.prefix_length])
            angle_diff[angle_diff <= angle_thres] = 0.
            angle_diff[:, self.prefix_length] = 0.
            # End for aligning facing direction to trajectory
            # pos_copy, quat_copy = self.rotate_gaits(pos_pred, quat_pred, quat_diff,
            #                                         head_tilt, l_shoulder_slouch, r_shoulder_slouch, s)
            # pos_pred[s] = pos_copy.clone()
            # angle_diff_intermediate = self.get_diff_from_traj(pos_pred, traj_pred, s)
            # if torch.max(angle_diff_intermediate[:, self.prefix_length:]) > np.pi / 2.:
            #     quat_diff = Quaternions.from_angle_axis(-angle_diff.cpu().numpy(), np.array([0, 1, 0])).qs
            #     pos_copy, quat_copy = self.rotate_gaits(pos_pred, quat_pred, quat_diff,
            #                                         head_tilt, l_shoulder_slouch, r_shoulder_slouch, s)
            # pos_pred[s] = pos_copy.clone()
            # axis_diff = torch.zeros_like(axis_diff)
            # axis_diff[..., 1] = 1.
            # angle_diff = torch.zeros_like(angle_diff)
            quat_diff = torch.from_numpy(Quaternions.from_angle_axis(
                angle_diff.cpu().numpy(), axis_diff.cpu().numpy()).qs).cuda().float()
            orient_pred[s], quat_pred[s] = self.rotate_gaits(orient_pred[s], quat_pred[s],
                                                             quat_diff, head_tilt,
                                                             l_shoulder_slouch, r_shoulder_slouch)

            if labels_pred[s][:, 0, 0] > 0.5:
                label_dir = 'happy'
            elif labels_pred[s][:, 0, 1] > 0.5:
                label_dir = 'sad'
            elif labels_pred[s][:, 0, 2] > 0.5:
                label_dir = 'angry'
            else:
                label_dir = 'neutral'

            ## CHANGE HERE
            # pos_pred[s] = pos_pred[s][:, self.prefix_length + 5:]
            # o_z_rs_pred[s] = o_z_rs_pred[s][:, self.prefix_length + 5:]
            # quat_pred[s] = quat_pred[s][:, self.prefix_length + 5:]

            traj_pred_np = pos_pred[s][0, :, 0].cpu().numpy()

            save_file_name = '{:06}_{:.2f}_{:.2f}_{:.2f}_{:.2f}'.format(s,
                                                                        labels_pred[s][0, 0, 0],
                                                                        labels_pred[s][0, 0, 1],
                                                                        labels_pred[s][0, 0, 2],
                                                                        labels_pred[s][0, 0, 3])

            animation_pred = {
                'joint_names': self.joint_names,
                'joint_offsets': torch.from_numpy(self.joint_offsets[1:]).float().unsqueeze(0).repeat(
                    len(pos_pred), 1, 1),
                'joint_parents': self.joint_parents,
                'positions': pos_pred[s],
                'rotations': torch.cat((orient_pred[s], quat_pred[s]), dim=-1)
            }
            self.mocap.save_as_bvh(animation_pred,
                                   dataset_name=self.dataset,
                                   # subset_name='epoch_' + str(self.best_loss_epoch),
                                   # save_file_names=[str(s).zfill(6)])
                                   subset_name=os.path.join('no_aff_epoch_' + str(self.best_loss_epoch),
                                                            str(counter).zfill(2) + '_' + label_dir),
                                   save_file_names=['root'])
            end_time = time.time()
            elapsed_time[counter] = end_time - start_time
            print('Elapsed Time: {}'.format(elapsed_time[counter]))

            # display_animations(pos_pred_np, self.V, self.C, self.mocap.joint_parents, save=True,
            #                    dataset_name=self.dataset,
            #                    # subset_name='epoch_' + str(self.best_loss_epoch),
            #                    # save_file_names=[str(s).zfill(6)],
            #                    subset_name=os.path.join('epoch_' + str(self.best_loss_epoch), label_dir),
            #                    save_file_names=[save_file_name],
            #                    overwrite=True)
        print('Mean Elapsed Time: {}'.format(np.mean(elapsed_time)))
Пример #24
0
    def save_as_bvh(animations,
                    dataset_name=None,
                    subset_name=None,
                    save_file_names=None,
                    fill=6,
                    frame_time=0.032):
        '''
        Saves an animations as a BVH file

        Parameters
        ----------
        animations: Dict containing the joint names, offsets, parents, positions, and rotations
            Animation to be saved.

        dataset_name: str
            Name of the dataset, e.g., mpi.

        subset_name: str
            Name of the subset, e.g., gt, epoch_200.

        save_file_names: str
            Name of the files to be saved. If the files exist, they are overwritten.
            If this is None, then the files are saved in numerical order 0, 1, 2, ...

        fill: int
            Zero padding for file name, if save_file_names is None. Otherwise, it is not used.

        frame_time: float
            Time duration of each frame.
        '''

        if not os.path.exists('render'):
            os.makedirs('render')
        dir_name = os.path.join('render', 'bvh')
        if not os.path.exists(dir_name):
            os.makedirs(dir_name)
        dir_name = os.path.join(dir_name, dataset_name)
        if not os.path.exists(dir_name):
            os.makedirs(dir_name)
        if subset_name is not None:
            dir_name = os.path.join(dir_name, subset_name)
            if not os.path.exists(dir_name):
                os.makedirs(dir_name)

        num_samples = animations['rotations'].shape[0]
        num_frames = animations['rotations'].shape[1]
        num_joints = len(animations['joint_parents'])
        save_quats = animations['rotations'].contiguous().view(
            num_samples, num_frames, num_joints, -1).detach().cpu().numpy()
        for s in range(num_samples):
            trajectory = animations['positions'][s, :,
                                                 0].detach().cpu().numpy()
            save_file_name = os.path.join(
                dir_name, (save_file_names[s] if save_file_names is not None
                           else str(s).zfill(fill)) + '.bvh')
            hierarchy = [[] for _ in range(len(animations['joint_parents']))]
            for j in range(len(animations['joint_parents'])):
                if not animations['joint_parents'][j] == -1:
                    hierarchy[animations['joint_parents'][j]].append(j)
            string = ''
            tabs = ''
            joint = 0
            rot_string = 'Xrotation Yrotation Zrotation'
            joint_offsets = animations['joint_offsets'][s].detach().cpu(
            ).numpy()
            joint_offsets = np.concatenate(
                (np.zeros_like(joint_offsets[0:1]), joint_offsets), axis=0)
            with open(save_file_name, 'w') as f:
                f.write('{}HIERARCHY\n'.format(tabs))
                f.write('{}ROOT {}\n{{\n'.format(
                    tabs, animations['joint_names'][joint]))
                tabs += '\t'
                f.write('{}OFFSET {:.6f} {:.6f} {:.6f}\n'.format(
                    tabs, joint_offsets[joint][0], joint_offsets[joint][1],
                    joint_offsets[joint][2]))
                f.write(
                    '{}CHANNELS 6 Xposition Yposition Zposition {}\n'.format(
                        tabs, rot_string))
                string, tabs = MocapDataset.traverse_hierarchy(
                    hierarchy, animations['joint_names'], joint_offsets,
                    animations['joint_parents'], joint, string, tabs,
                    rot_string)
                f.write(string)
                f.write('MOTION\nFrames: {}\nFrame Time: {}\n'.format(
                    num_frames + 1, frame_time))
                string = str(trajectory[0, 0]) + ' ' +\
                    str(trajectory[0, 1]) + ' ' + \
                    str(trajectory[0, 2])
                for j in range(num_joints * 3):
                    string += ' ' + '{:.6f}'.format(0)
                f.write(string + '\n')
                for t in range(num_frames):
                    string = str(trajectory[t, 0]) + ' ' + \
                             str(trajectory[t, 1]) + ' ' + \
                             str(trajectory[t, 2])
                    for j in range(num_joints):
                        eulers = np.degrees(
                            Quaternions(save_quats[s, t,
                                                   j]).euler(order='xyz'))[0]
                        string += ' ' + '{:.6f}'.format(eulers[0]) + \
                                  ' ' + '{:.6f}'.format(eulers[1]) + \
                                  ' ' + '{:.6f}'.format(eulers[2])
                    f.write(string + '\n')
def rotations_parents_global(anim):
    rotations = rotations_global(anim)
    rotations = rotations[:,anim.parents]
    rotations[:,0] = Quaternions.id(len(anim))
    return rotations
Пример #26
0
    def __init__(self, config, is_train=True):

        poses_3d_root, rotations, bones, alphas, contacts, projections = [], [], [], [], [], []
        self.frames = []
        self.config = config
        self.rotation_number = ROTATION_NUMBERS.get(config.arch.rotation_type)

        datasets = ['bvh']  #, 'bvh']
        if 'h36m' in datasets:
            dim_to_use_3d = h36m_utils.dimension_reducer(
                3, config.arch.predict_joints)
            subjects = h36m_utils.TRAIN_SUBJECTS if is_train else h36m_utils.TEST_SUBJECTS
            actions = h36m_utils.define_actions('All')
            self.cameras = h36m_utils.load_cameras(config.trainer.data_path)
            for subject in subjects:
                for action in actions:
                    for subaction in range(1, 3):
                        data_file = h5py.File(
                            '%s/S%s/%s-%s/annot.h5' %
                            (config.trainer.data_path, subject, action,
                             subaction), 'r')
                        data_size = data_file['frame'].size / 4
                        data_set = np.array(data_file['pose/3d']).reshape(
                            (-1, 96))[:, dim_to_use_3d]
                        for i in range(4):
                            camera_name = data_file['camera'][int(data_size *
                                                                  i)]
                            R, T, f, c, k, p, res_w, res_h = self.cameras[(
                                subject, str(camera_name))]
                            set_3d = data_set[int(data_size *
                                                  i):int(data_size *
                                                         (i + 1))].copy()
                            set_3d_world = h36m_utils.camera_to_world_frame(
                                set_3d.reshape((-1, 3)), R, T)
                            # set_3d_world[:, [1, 2]] = set_3d_world[:, [2, 1]]
                            # set_3d_world[:, [2]] *= -1
                            # set_3d_world = set_3d_world.reshape((-1, config.arch.predict_joints * 3))
                            set_3d_root = set_3d_world - np.tile(
                                set_3d_world[:, :3],
                                [1, int(set_3d_world.shape[-1] / 3)])

                            set_bones = self.get_bones(
                                set_3d_root, config.arch.predict_joints)
                            set_alphas = np.mean(set_bones, axis=1)

                            self.frames.append(set_3d_root.shape[0])
                            poses_3d_root.append(
                                set_3d_root /
                                np.expand_dims(set_alphas, axis=-1))
                            rotations.append(
                                np.zeros((set_3d_root.shape[0],
                                          int(set_3d_root.shape[1] / 3 *
                                              self.rotation_number))))
                            bones.append(set_bones /
                                         np.expand_dims(set_alphas, axis=-1))
                            alphas.append(set_alphas)
                            contacts.append(
                                self.get_contact(set_3d_world,
                                                 config.arch.predict_joints))
                            projections.append(
                                (set_3d_world.copy() /
                                 np.expand_dims(set_alphas, axis=-1)).reshape(
                                     (set_3d_world.shape[0], -1, 3))[:, 0, 2])

        if 'bvh' in datasets:
            to_keep = [
                0, 7, 8, 9, 2, 3, 4, 12, 15, 18, 19, 20, 25, 26, 27
            ] if config.arch.predict_joints == 15 else [
                0, 7, 8, 9, 2, 3, 4, 12, 13, 15, 16, 18, 19, 20, 25, 26, 27
            ]
            parents = [
                -1, 0, 1, 2, 0, 4, 5, 0, 7, 7, 9, 10, 7, 12, 13
            ] if config.arch.predict_joints == 15 else [
                -1, 0, 1, 2, 0, 4, 5, 0, 7, 8, 9, 8, 11, 12, 8, 14, 15
            ]

            bvh_files = util.make_dataset(['/mnt/dataset/test_bvh'],
                                          phase='bvh',
                                          data_split=1)
            bvh_files = bvh_files[:int(len(bvh_files) *
                                       0.8)] if is_train else bvh_files[
                                           int(len(bvh_files) * 0.8):]
            for bvh_file in bvh_files:
                original_anim, joint_names, frame_rate = BVH.load(bvh_file)
                set_skel_in = original_anim.positions[:, to_keep, :]
                set_rotations = original_anim.rotations.qs[:, to_keep, :]
                anim = Animation.Animation(
                    Quaternions(set_rotations), set_skel_in,
                    original_anim.orients.qs[to_keep, :], set_skel_in,
                    np.array(parents))
                set_3d_world = Animation.positions_global(anim).reshape(
                    set_rotations.shape[0], -1)
                set_3d_world[:, 0:3] = (set_3d_world[:, 3:6] +
                                        set_3d_world[:, 12:15]) / 2
                set_3d_root = set_3d_world - np.tile(
                    set_3d_world[:, :3],
                    [1, int(set_3d_world.shape[-1] / 3)])

                set_bones = self.get_bones(set_3d_root,
                                           config.arch.predict_joints)
                set_alphas = np.mean(set_bones, axis=1)

                self.frames.append(set_3d_root.shape[0])
                poses_3d_root.append(set_3d_root /
                                     np.expand_dims(set_alphas, axis=-1))
                rotations.append(
                    np.zeros((set_3d_root.shape[0],
                              int(set_3d_root.shape[1] / 3 *
                                  self.rotation_number))))
                bones.append(set_bones / np.expand_dims(set_alphas, axis=-1))
                alphas.append(set_alphas)
                contacts.append(
                    self.get_contact(set_3d_world, config.arch.predict_joints))
                projections.append(
                    (set_3d_world.copy() /
                     np.expand_dims(set_alphas, axis=-1)).reshape(
                         (set_3d_world.shape[0], -1, 3))[:, 0, 2])

        self.poses_3d = np.concatenate(poses_3d_root, axis=0)
        self.rotations = np.concatenate(rotations, axis=0)
        self.bones = np.concatenate(bones, axis=0)
        self.alphas = np.concatenate(alphas, axis=0)
        self.contacts = np.concatenate(contacts, axis=0)
        self.projections = np.concatenate(projections, axis=0)

        posed_3d_flip = self.get_flipping(self.poses_3d, 3,
                                          config.arch.predict_joints)
        if config.trainer.data_aug_flip and is_train:
            self.poses_3d = np.concatenate([self.poses_3d, posed_3d_flip],
                                           axis=0)

        self.poses_2d = self.get_projection(self.poses_3d)
        self.poses_2d_root = (self.poses_2d -
                              self.poses_2d[:, 0, None]).reshape(
                                  (self.poses_3d.shape[0], -1))

        import matplotlib.pyplot as plt
        import matplotlib.gridspec as gridspec
        from utils import visualization
        fig = plt.figure()
        gs = gridspec.GridSpec(1, 2)
        for i in range(1):
            ax1 = plt.subplot(gs[0], projection='3d')
            visualization.show3Dpose(self.poses_3d[i], ax1, radius=5)

            ax2 = plt.subplot(gs[1])
            visualization.show2Dpose(self.poses_2d_root[i] * 1000 + 500,
                                     ax2,
                                     radius=1000)

            fig.savefig('./images/2d_3d/_%d.png' % i)
            fig.clear()

        self.update_sequence_index()
    def save_as_bvh(self, trajectory, orientations, quaternions, save_path,
                    save_file_names=None, frame_time=0.032):

        quaternions = np.concatenate((Quaternions.from_angle_axis(-orientations, np.array([0., 1., 0.])).qs,
                                      quaternions), axis=-2)
        num_joints = len(self.joint_parents_all)
        num_samples = quaternions.shape[0]
        for s in range(num_samples):
            num_frames = quaternions[s].shape[0]
            positions = np.tile(self.joint_offsets_all, (num_frames, 1, 1))
            positions[:, 0] = trajectory[s]
            orients = Quaternions.id(num_joints)
            save_file_name = os.path.join(
                save_path, save_file_names[s] if save_file_names is not None else str(s).zfill(6) + '.bvh')
            save_quats = np.zeros((num_frames, num_joints, quaternions.shape[-1]))
            save_quats[..., 0] = 1.
            save_quats[:, 1] = Quaternions(quaternions[s, :, 0]) * \
                               Quaternions(quaternions[s, :, 1])
            save_quats[:, 2] = Quaternions(quaternions[s, :, 1]).__neg__() * \
                               Quaternions(quaternions[s, :, 2])
            save_quats[:, 3] = Quaternions(quaternions[s, :, 2]).__neg__() * \
                               Quaternions(quaternions[s, :, 3])
            save_quats[:, 4] = Quaternions(quaternions[s, :, 3]).__neg__() * \
                               Quaternions(quaternions[s, :, 4])
            save_quats[:, 6] = Quaternions(quaternions[s, :, 0]) * \
                               Quaternions(quaternions[s, :, 5])
            save_quats[:, 7] = Quaternions(quaternions[s, :, 5]).__neg__() * \
                               Quaternions(quaternions[s, :, 6])
            save_quats[:, 8] = Quaternions(quaternions[s, :, 6]).__neg__() * \
                               Quaternions(quaternions[s, :, 7])
            save_quats[:, 9] = Quaternions(quaternions[s, :, 7]).__neg__() * \
                               Quaternions(quaternions[s, :, 8])
            save_quats[:, 11] = Quaternions(quaternions[s, :, 0]) * \
                                Quaternions(quaternions[s, :, 9])
            save_quats[:, 12] = Quaternions(quaternions[s, :, 9]).__neg__() * \
                                Quaternions(quaternions[s, :, 10])
            save_quats[:, 13] = Quaternions(quaternions[s, :, 10]).__neg__() * \
                                Quaternions(quaternions[s, :, 11])
            save_quats[:, 15] = Quaternions(quaternions[s, :, 11]).__neg__() * \
                                Quaternions(quaternions[s, :, 12])
            save_quats[:, 17] = Quaternions(quaternions[s, :, 11]).__neg__() * \
                                Quaternions(quaternions[s, :, 13])
            save_quats[:, 18] = Quaternions(quaternions[s, :, 13]).__neg__() * \
                                Quaternions(quaternions[s, :, 14])
            save_quats[:, 19] = Quaternions(quaternions[s, :, 14]).__neg__() * \
                                Quaternions(quaternions[s, :, 15])
            save_quats[:, 20] = Quaternions(quaternions[s, :, 15]).__neg__() * \
                                Quaternions(quaternions[s, :, 16])
            save_quats[:, 22] = Quaternions(quaternions[s, :, 11]).__neg__() * \
                                Quaternions(quaternions[s, :, 17])
            save_quats[:, 23] = Quaternions(quaternions[s, :, 17]).__neg__() * \
                                Quaternions(quaternions[s, :, 18])
            save_quats[:, 24] = Quaternions(quaternions[s, :, 18]).__neg__() * \
                                Quaternions(quaternions[s, :, 19])
            save_quats[:, 25] = Quaternions(quaternions[s, :, 19]).__neg__() * \
                                Quaternions(quaternions[s, :, 20])
            # counter = 1
            # j = 2
            # while j < num_joints:
            #     # save_quats[:, counter] = Quaternions(save_quats[:, self.joint_parents[j]]).__neg__() * \
            #     #                          Quaternions(quaternions[s, :, j])
            #     save_quats[:, counter] = Quaternions(quaternions[s, :, self.joint_parents[j]]).__neg__() * \
            #                              Quaternions(quaternions[s, :, j])
            #     counter += 1 if self.has_children_all[j] else 2
            #     j += 1 if self.has_children_all[j] else 2
            BVH.save(save_file_name,
                     Animation(Quaternions(save_quats), positions, orients,
                               self.joint_offsets_all, self.joint_parents_all),
                     names=self.joint_names_all, frame_time=frame_time)