Esempio n. 1
0
def scale_and_round_pose(poses,
                         img_width,
                         img_height,
                         target_width,
                         target_height,
                         verbose=True):
    """
    Converts the input OpenPose coordinates to integer pixel numbers on a resized image,
    keeping only the Common14 joints.

    Parameters:
        poses: (nFrames, nPoses, 25, 3[x, y, score])
        img_width, img_height: width and height of the original image
        target_width, target_height: width and height of the resized image
    """
    assert_shape(poses, (None, None, 25, 3))

    poses = poses[:, :, mupots_3d.OPENPOSE25_TO_COMMON2D14, :2]
    poses[:, :, :, 0] *= target_width / float(img_width)
    poses[:, :, :, 1] *= target_height / float(img_height)
    poses = np.around(poses)

    if verbose:
        if np.any(poses < -1):
            print "too small value"
        if np.any(poses[:, :, :, 0] > target_width):
            print "too large width"
        if np.any(poses[:, :, :, 1] > target_height):
            print "too large height"

    poses[:, :, :, 0] = np.clip(poses[:, :, :, 0], 0, target_width - 1)
    poses[:, :, :, 1] = np.clip(poses[:, :, :, 1], 0, target_height - 1)
    poses = poses.astype('int64')

    return poses
Esempio n. 2
0
def preprocess_3d(data, add_root, log_root_z, joint_set, root_name):
    """
    3D preprocessing:
        1. Removes the root joint
        2. If add_root is True,  append the root joint at the end of the pose. The
           The logarithm of the z coordinate of the root is taken.
        3. Flattens the data.

    :param data: ndarray(nFrames, [nPoses], nJoints, 3[x, y, z]) 3D coordinates in MuPoTS order
    :param add_root: True if the absolute coordinates of the hip should be included in the output
    :param log_root_z:if true, the log of the z coordinate of the root is used
    :param root_name: name of the root joint, must be a MuPoTS joint
    :return: ndarray(nPoses, 3*nJoints|3*(nJoints-1)), 3*nJoints if add_root is true otherwise 3*(nJoints-1)
    """
    assert_shape(data, ("*", joint_set.NUM_JOINTS, 3))

    root_ind = joint_set.index_of(root_name)
    root3d = data[..., root_ind, :].copy()
    if log_root_z:
        root3d[..., 2] = np.log(root3d[..., 2])
    data = remove_root(data, root_ind)  # (nFrames, [nPoses], nJoints-1, 3)
    data = data.reshape(data.shape[:-2] + (-1,))  # (nFrames, [nPoses], (nJoints-1)*3)
    if add_root:
        data = np.concatenate([data, root3d], axis=-1)  # (nFrames, [nPoses], nJoints*3)

    return data.astype('float32')
Esempio n. 3
0
def combine_pose_and_trans(data3d, std3d, mean3d, joint_set, root_name):
    """
    3D result postprocess: unnormalizes data3d and reconstructs the absolute pose from relative + absolute split.

    Parameters:
        data3d: output of the PyTorch model, ndarray(nPoses, 3*nJoints), in the format created by preprocess3d
        std3d: normalization standard deviations
        mean3d: normalization means
        root_name: name of the root joint

    Returns:
        ndarray(nPoses, nJoints, 3)
    """
    assert_shape(data3d, (None, joint_set.NUM_JOINTS * 3))

    data3d = data3d * std3d + mean3d
    root = data3d[:, -3:]
    rel_pose = data3d[:, :-3].reshape((len(data3d), joint_set.NUM_JOINTS - 1, 3))

    root[:, 2] = np.exp(root[:, 2])

    rel_pose += root[:, np.newaxis, :]

    result = np.zeros((len(data3d), joint_set.NUM_JOINTS, 3), dtype='float32')
    root_ind = joint_set.index_of(root_name)
    result[:, :root_ind, :] = rel_pose[:, :root_ind, :]
    result[:, root_ind, :] = root
    result[:, root_ind + 1:, :] = rel_pose[:, root_ind:, :]

    return result
Esempio n. 4
0
def preprocess_2d(data, fx, cx, fy, cy, joint_set, root_name):
    """
    2D data preprocessing, performing the following:
        1. Keeps only COMMON14 joints
        2. Normalizes coordinates by multiplying with the inverse of the calibration matrix
        3. Converts numbers in a root-relative form
        4. Invisible joints are replaced by a single value
        5. Convert data into float

    :param data: (nPoses, 25, 3[x, y, scores]) - OpenPose detected coordinates
    :param fx: ndarray(nPoses) or float, horizontal focal length
    :param cx: ndarray(nPoses) or float, horizontal principal point
    :param fy: ndarray(nPoses) or float, vertical focal length
    :param cy: ndarray(nPoses) or float, horizontal principal point
    :param joint_set: the JointSet object describing the order of joints
    :param root_name: name of the root joint, must be a COMMON14 joint
    :return: ndarray(nPoses, 42), First 39 numbers are the non-root joints, last one is the root
    """
    assert_shape(data, ("*", None, joint_set.NUM_JOINTS, 3))
    assert not isinstance(fx, np.ndarray) or len(fx) == len(data)
    assert not isinstance(fy, np.ndarray) or len(fy) == len(data)

    if isinstance(fx, np.ndarray):
        N = len(data)
        shape = [1] * (data.ndim - 1)
        shape[0] = N
        fx = fx.reshape(shape)
        fy = fy.reshape(shape)
        cx = cx.reshape(shape)
        cy = cy.reshape(shape)

    data = data[..., joint_set.TO_COMMON14, :]

    data[..., :, 0] -= cx
    data[..., :, 1] -= cy
    data[..., :, 0] /= fx
    data[..., :, 1] /= fy

    root_ind = np.where(Common14Joints.NAMES == root_name)[0][0]
    root2d = data[..., root_ind, :].copy()
    data = remove_root_keepscore(data, root_ind)  # (nPoses, 13, 3), modifies data

    bad_frames = data[..., 2] < 0.1

    # replace joints having low scores with 1700/focus
    # this is to prevent leaking cx/cy
    if isinstance(fx, np.ndarray):
        fx = np.tile(fx, (1,) + data.shape[1:-1])
        fy = np.tile(fy, (1,) + data.shape[1:-1])
        data[bad_frames, 0] = -1700 / fx[bad_frames]
        data[bad_frames, 1] = -1700 / fy[bad_frames]
    else:
        data[bad_frames, 0] = -1700 / fx
        data[bad_frames, 1] = -1700 / fy

    # stack root next to the pose
    data = data.reshape(data.shape[:-2] + (-1,))  # (nPoses, 13*3)
    data = np.concatenate([data, root2d], axis=-1)  # (nPoses, 14*3)
    return data.astype('float32')
Esempio n. 5
0
def abs_to_hiprel(poses, joint_set):
    """ Converts an absolute pose into [hi]+relative_pose. """
    assert_shape(poses, (None, joint_set.NUM_JOINTS, 3))

    root = poses[:, [joint_set.index_of('hip')]].copy()
    rel = remove_root(poses, joint_set.index_of('hip'))

    return np.concatenate([root, rel], axis=-2)
Esempio n. 6
0
def mrpe(pred, gt, joint_set):
    """ Mean Roo Position Error. """
    assert_shape(pred, ('*', None, 3))
    assert pred.shape == gt.shape
    hip_ind = joint_set.index_of('hip')

    assert gt[..., hip_ind, :].shape[-1] == 3
    return np.nanmean(
        np.linalg.norm(gt[..., hip_ind, :] - pred[..., hip_ind, :], axis=-1))
Esempio n. 7
0
    def flip(self, data):
        """ Flips a dataset """
        assert_shape(data, ('*', self.NUM_JOINTS, None))

        data = data.copy()
        data[..., self.JOINTS_LEFT +
             self.JOINTS_RIGHT, :] = data[..., self.JOINTS_RIGHT +
                                          self.JOINTS_LEFT, :]
        return data
Esempio n. 8
0
def keep_hrnet_c14(data):
    """
    Keeps only COMMON-14 joints from hrnet.
    data - ndarray(..., 19), along the last dimension, each slice corresponds to a joint In CocoEx joint order.
    """
    assert_shape(data, ('*', None, CocoExJoints.NUM_JOINTS))
    data = data[..., CocoExJoints.TO_COMMON14]

    return data
Esempio n. 9
0
def add_back_hip(poses, joint_set):
    """ Inverse of abs_to_hiprel """
    assert_shape(poses, (None, joint_set.NUM_JOINTS, 3))
    root = poses[:, [0]].copy()

    hip_ind = joint_set.index_of('hip')
    result = insert_zero_joint(poses[:, 1:], hip_ind)
    result += root

    return result
Esempio n. 10
0
def extend_hrnet_raw(raw):
    assert_shape(raw, (None, 17, 3))
    js = CocoExJoints()

    result = np.zeros((len(raw), 19, 3), dtype='float32')
    result[:, :17, :] = raw
    _combine(result, js.index_of('hip'), js.index_of('left_hip'), js.index_of('right_hip'))
    _combine(result, js.index_of('neck'), js.index_of('left_shoulder'), js.index_of('right_shoulder'))

    return result
Esempio n. 11
0
    def flip(self, data):  # TODO Torchify
        """ Flips a dataset """
        assert_shape(data, ('*', self.NUM_JOINTS, None))

        if type(data) != torch.Tensor:
            data = data.copy()
        data[..., self.JOINTS_LEFT +
             self.JOINTS_RIGHT, :] = data[..., self.JOINTS_RIGHT +
                                          self.JOINTS_LEFT, :]
        return data
Esempio n. 12
0
def n_mpjpe(pred, gt):
    """
    Based on https://github.com/hrhodin/UnsupervisedGeometryAwareRepresentationLearning, losses/poses.py

    """
    assert pred.shape == gt.shape
    assert_shape(pred, ('*', None, 3))

    s_opt = optimal_scaling(pred, gt)

    return mpjpe(pred * s_opt[..., np.newaxis, np.newaxis], gt)
Esempio n. 13
0
def abs_to_hiprel(poses, joint_set):
    """ Converts an absolute pose into [hi]+relative_pose. """
    assert_shape(poses, (None, joint_set.NUM_JOINTS, 3))

    if isinstance(poses, torch.Tensor):
        root = poses[:, [joint_set.index_of('hip')]]
        rel = remove_root(poses, joint_set.index_of('hip'))
        return torch.cat([root, rel], dim=-2)
    else:
        root = poses[:, [joint_set.index_of('hip')]].copy()
        rel = remove_root(poses, joint_set.index_of('hip'))
        return np.concatenate([root, rel], axis=-2)
Esempio n. 14
0
def create_prediction_viz_frames(gt, pred, valid, out_path, draw_func=None):
    """
    Creates frames showing the GT and predicted poses.

    Parameters:
        gt: ndarray(nFrames, nPoses, 17, 3) - ground-truth 3D coordinates using MuPoTS joints
        pred: ndarray(nFrames, nPoses, 17, 3) - predicted 3D coordinates using MuPoTS joints
        valid: ndarray(nFrames, nPoses) - true if the given pose was detected
        out_path: path to the frame, must have a %d format specifier for the image index.
        draw_func: optional function that draws the 3D plot It receives the gt, predicted joints, valid joint index and a 3D matplotlib axes.
                    if None, it simply draws gt skeletons in dark, predicted skeletons light.
    """
    assert_shape(gt, (None, None, 17, 3))
    assert_shape(pred, (None, None, 17, 3))

    assert gt.shape[:2] == valid.shape
    assert gt.shape[:2] == pred.shape[:2]

    plt.ioff()

    RADIUS = 1500
    plt.clf()
    ax = get_3d_axes()
    xroot, yroot, zroot = np.mean(gt[0][:, 14, :], axis=0)
    bottom = np.max(gt[0][:, :, 1])

    def default_drawer(gts, preds, valid, ax):
        show3Dpose(gts,
                   MuPoTSJoints(),
                   ax=ax,
                   invert_vertical=True,
                   show_numbers=False,
                   lcolor="#911f1f",
                   rcolor="#874924",
                   ccolor="#1b4882")
        for p in preds[valid]:
            add3Dpose(p, ax, MuPoTSJoints())

        ax.set_xlim3d([-RADIUS - 400 + xroot, RADIUS + xroot + 600])
        ax.set_ylim3d([-RADIUS + zroot - 200, RADIUS + zroot + 100])
        ax.set_zlim3d([bottom + 10, bottom - 2500])

    if draw_func is None:
        draw_func = default_drawer

    for i in range(len(gt)):
        plt.cla()
        draw_func(gt[i], pred[i], valid[i], ax)
        plt.savefig(out_path % i)

    plt.ion()
Esempio n. 15
0
def rn_mpjpe(pred, gt, root_ind):
    """
    N-MPJPE, when optimal scaling factor is calculated on relative pose.
    This hsould be a good comparison to height based scaling
    Based on https://github.com/hrhodin/UnsupervisedGeometryAwareRepresentationLearning, losses/poses.py

    """
    assert pred.shape == gt.shape
    assert_shape(pred, ('*', None, 3))

    s_opt = optimal_scaling(remove_root(pred, root_ind),
                            remove_root(gt, root_ind))

    return mpjpe(pred * s_opt[..., np.newaxis, np.newaxis], gt)
Esempio n. 16
0
def combine_pose_and_trans(data3d,
                           std3d,
                           mean3d,
                           joint_set,
                           root_name,
                           log_root_z=True):  # TODO Torchify
    """
    3D result postprocess: unnormalizes data3d and reconstructs the absolute pose from relative + absolute split.

    Parameters:
        data3d: output of the PyTorch model, ndarray(nPoses, 3*nJoints), in the format created by preprocess3d
        std3d: normalization standard deviations
        mean3d: normalization means
        root_name: name of the root joint
        log_root_z: The z coordinate of the depth is in logarithms

    Returns:
        ndarray(nPoses, nJoints, 3)
    """
    assert_shape(data3d, (None, joint_set.NUM_JOINTS * 3))
    if type(data3d) == torch.Tensor:
        data3d = data3d * torch.from_numpy(std3d).cuda() + torch.from_numpy(
            mean3d).cuda()
    else:
        data3d = data3d * std3d + mean3d
    root = data3d[:, -3:]  # (201, 3)
    rel_pose = data3d[:, :-3].reshape(
        (len(data3d), joint_set.NUM_JOINTS - 1, 3))  # (201, 16, 3)

    if log_root_z:
        if type(data3d) == torch.Tensor:
            root[:, 2] = torch.exp(root[:, 2])
        else:
            root[:, 2] = np.exp(root[:, 2])

    rel_pose += root[:, np.newaxis, :]

    if type(data3d) == torch.Tensor:
        result = torch.zeros((len(data3d), joint_set.NUM_JOINTS, 3)).cuda()
    else:
        result = np.zeros((len(data3d), joint_set.NUM_JOINTS, 3),
                          dtype='float32')

    root_ind = joint_set.index_of(root_name)
    result[:, :root_ind, :] = rel_pose[:, :root_ind, :]
    result[:, root_ind, :] = root
    result[:, root_ind + 1:, :] = rel_pose[:, root_ind:, :]

    return result
Esempio n. 17
0
def extend_hrnet_raw(raw):
    """
    Adds the hip and neck to a Coco skeleton by averaging left/right hips and shoulders.
    The score will be the harmonic mean of the two.
    """
    assert_shape(raw, (None, 17, 3))
    js = CocoExJoints()

    result = np.zeros((len(raw), 19, 3), dtype='float32')
    result[:, :17, :] = raw
    _combine(result, js.index_of('hip'), js.index_of('left_hip'),
             js.index_of('right_hip'))
    _combine(result, js.index_of('neck'), js.index_of('left_shoulder'),
             js.index_of('right_shoulder'))

    return result
Esempio n. 18
0
def show_result(image_path, poses):
    assert_shape(poses, (None, MuPoTSJoints.NUM_JOINTS, 3))

    # import here so it's not needed for prediction
    import matplotlib.pyplot as plt
    from util import viz

    img = cv2.imread(image_path)
    img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)

    plt.figure(figsize=(9, 4.5))
    plt.subplot(1, 2, 1)
    plt.imshow(img)
    ax = viz.subplot(1, 2, 2)
    viz.show3Dpose(poses, MuPoTSJoints(), ax, invert_vertical=True)
    plt.show()
Esempio n. 19
0
def _calc_limb_length(poses, joint_set, bones):
    """
    calculates the length of a limb that contains multiple bones.
    :param bones: list of (joint1, joint2) pairs, where joint1 and joint2 determines the bone.

    :return: For each pose, the sum of the lengths of the bones in `bones`
    """
    assert_shape(poses, ('*', joint_set.NUM_JOINTS, 3))
    bone_inds = [[joint_set.index_of(j) for j in b] for b in bones]

    height = np.zeros(poses.shape[:-2], dtype='float32')
    for bone in bone_inds:
        bones = poses[..., bone[0], :] - poses[...,
                                               bone[1], :]  # (shapePose, 3)
        bones = np.linalg.norm(bones, axis=-1)  # (shapePose)
        height += bones

    return height
Esempio n. 20
0
def preprocess_3d(data, add_hip):
    """

    :param data:
    :param add_hip: True if the absolute coordinates of the hip should be included in the output
    :return:
    """
    assert_shape(data, ("*", 17, 3))

    hip3d = data[..., 14, :].copy()
    hip3d[..., 2] = np.log(hip3d[..., 2])
    data = remove_root(data, 14)  # (nFrames[*nPoses], 16, 3)
    data = data.reshape(data.shape[:-2] + (-1, ))  # (nFrames[*nPoses], 16*3)
    if add_hip:
        data = np.concatenate([data, hip3d],
                              axis=-1)  # (nFrames[*nPoses], 17*3)

    return data.astype('float32')
Esempio n. 21
0
def _scale_to_gt(pred_poses, gt_poses):
    """ Scales bone lengths in pred_poses to match gt_poses. Corresponds to ``mpii_map_to_gt_bone_lengths.m``."""
    assert_shape(pred_poses, (None, 17, 3))
    assert_shape(gt_poses, (None, 17, 3))

    rescaled_pred_poses = pred_poses.copy()

    for ind in _TRAVERSAL_ORDER:
        parent = _JOINT_PARENTS[ind]
        gt_bone_length = np.linalg.norm(gt_poses[:, ind] - gt_poses[:, parent],
                                        axis=1)  # (nPoses,)
        pred_bone = pred_poses[:, ind] - pred_poses[:, parent]  # (nPoses, 3)
        pred_bone = pred_bone * gt_bone_length[:, np.newaxis] / \
                    (np.linalg.norm(pred_bone, axis=1, keepdims=True) + 1e-8)
        rescaled_pred_poses[:,
                            ind] = rescaled_pred_poses[:, parent] + pred_bone

    return rescaled_pred_poses
Esempio n. 22
0
def preprocess_2d(data, fx, cx, fy, cy):
    assert_shape(data, ("*", 25, 3))
    assert not isinstance(fx, np.ndarray) or len(fx) == len(data)
    assert not isinstance(fy, np.ndarray) or len(fy) == len(data)

    if isinstance(fx, np.ndarray):
        N = len(data)
        shape = [1] * (data.ndim - 1)
        shape[0] = N
        fx = fx.reshape(shape)
        fy = fy.reshape(shape)
        cx = cx.reshape(shape)
        cy = cy.reshape(shape)

    root_ind = 0  # 0-common14, 8-openpose
    data = data[..., mupots_3d.OPENPOSE25_TO_COMMON2D14, :]

    data[..., :, 0] -= cx
    data[..., :, 1] -= cy
    data[..., :, 0] /= fx
    data[..., :, 1] /= fy

    hip2d = data[..., root_ind, :].copy()
    data = remove_openpose_root(data, root_ind)  # (nPoses, 13, 3)

    bad_frames = data[..., 2] < 0.1

    # replace joints having low scores with 1700/focus
    # this is to prevent leaking cx/cy
    if isinstance(fx, np.ndarray):
        fx = np.tile(fx, (1, ) + data.shape[1:-1])
        fy = np.tile(fy, (1, ) + data.shape[1:-1])
        data[bad_frames, 0] = -1700 / fx[bad_frames]
        data[bad_frames, 1] = -1700 / fy[bad_frames]
    else:
        data[bad_frames, 0] = -1700 / fx
        data[bad_frames, 1] = -1700 / fy

    # stack hip next to pose
    data = data.reshape(data.shape[:-2] + (-1, ))  # (nPoses, 13*3)
    data = np.concatenate([data, hip2d], axis=-1)  # (nPoses, 14*3)
    return data.astype('float32')
Esempio n. 23
0
def optimal_scaling(pred, gt):
    """
    Calculates optimal scaling factor for a given set of points. Optimal scaling is the scalar s,
    with which the pred points scaled become the closest to gt points, in L2 sense.

    :param pred: array(nFrames, nPoints, 3)
    :param gt: array(nFrames, nPoints, 3)
    :return: array(nFrames,3)
    """
    assert pred.shape == gt.shape
    assert_shape(pred, ('*', None, 3))

    # Optimal scale transform
    dot_pose_pose = np.sum(
        pred * pred,
        axis=(-1,
              -2))  # (nShape) torch.sum(torch.mul(pred,pred),1,keepdim=True)
    dot_pose_gt = np.sum(pred * gt, axis=(-1, -2))

    return dot_pose_gt / dot_pose_pose  # (nShape), the optimal scaling factor s
Esempio n. 24
0
def procrustes_depth(coords2d, coords3d, focal_length, verbose=False, approximate=False):
    """
    Absolute depth prediction based on Mehta et al. (https://arxiv.org/pdf/1611.09813.pdf) .

    Parameters:
        pose3d: ndarray(nJoints, 3[x,y,z), the relative 3D coordinates
        pose2d: ndarray(nJoints, 3[x,y]), the 2D coordinates, relative to the centerpoint of the camera
        focal_length: scalar, focus distance
        approximate: if True, uses the formula in https://arxiv.org/pdf/1611.09813.pdf, otherwise uses the solution without
                     any approximation. The latter gives better results.
    Returns:
        ndarray(3,), the optimal translation vector
    """
    assert len(coords2d) == len(coords3d)
    assert coords2d.ndim == 2
    assert coords3d.ndim == 2
    assert coords3d.shape[1] == 3

    coords3d = coords3d[:, :2]
    mean2d = np.mean(coords2d, axis=0, keepdims=True)
    mean3d = np.mean(coords3d, axis=0, keepdims=True)

    assert_shape(mean2d, (1, 2))
    assert_shape(mean3d, (1, 2))

    # orig method using an approximation (does not provide any visible speedup)
    if approximate:
        numer = np.sqrt(np.sum(np.square(coords3d - mean3d)))
        denom = np.sqrt(np.sum(np.square(coords2d - mean2d)))
    else:
        # no cos approximation
        numer = np.sum(np.square(coords3d - mean3d))
        denom = np.trace(np.dot((coords2d - mean2d), (coords3d - mean3d).T))

    if verbose:
        print "proc: %f / %f" % (numer, denom)
    return numer / denom * np.array([mean2d[0, 0], mean2d[0, 1], focal_length]) - np.array([mean3d[0, 0], mean3d[0, 1], 0])
Esempio n. 25
0
def eval_results(pred3d,
                 gt3d,
                 joint_set,
                 verbose=True,
                 pck_threshold=150,
                 pctiles=[99]):
    """
    Evaluates the results by printing various statistics. Also returns those results.
    Poses can be represented either in hipless 16 joints or 17 joints with hip format.
    Order is MuPo-TS order in all cases.

    Parameters:
        pred3d: dictionary of predictions in mm, seqname -> (nSample, [16|17], 3)
        gt3d: dictionary of ground truth in mm, seqname -> (nSample, [16|17], 3)
        joint_set; JointSet instance describing the order of joints
        verbose: if True, a table of the results is printed
        pctiles: list of percentiles of the errors to calculate
    Returns:
        sequence_mpjpes, sequence_pcks, sequence_pctiles, joint_means, joint_pctiles
    """

    has_hip = list(
        pred3d.values()
    )[0].shape[1] == joint_set.NUM_JOINTS  # whether it contains the hip or not

    if has_hip:
        common14_joints = joint_set.TO_COMMON14
    else:
        common14_joints = np.array(
            joint_set.TO_COMMON14[1:]).copy()  # a copy of original list
        hip_ind = joint_set.index_of('hip')
        common14_joints[common14_joints > hip_ind] -= 1

    sequence_mpjpes = {}
    sequence_pcks = {}
    sequence_aucs = {}
    sequence_common14_pcks = {
    }  # pck for the common 14 joints (used by Mehta et al.)
    sequence_pctiles = {}
    all_errs = []

    for k in sorted(pred3d.keys()):
        pred = pred3d[k]
        gt = gt3d[k]

        assert pred.shape == gt.shape, "Pred shape:%s, gt shape:%s" % (
            pred.shape, gt.shape)
        assert (not has_hip and pred.shape[1:] == (joint_set.NUM_JOINTS - 1, 3)) or \
               (has_hip and pred.shape[1:] == (joint_set.NUM_JOINTS, 3)), \
            "Unexpected shape:" + str(pred.shape)

        errs = np.linalg.norm(pred - gt, axis=2, ord=2)  # (nSample, nJoints)

        sequence_pctiles[k] = np.nanpercentile(errs, pctiles)
        sequence_pcks[k] = np.nanmean(
            (errs < pck_threshold).astype(np.float64))
        pck_curve = []
        for t in range(0, 151, 5):  # go from 0 to 150, 150 inclusive
            pck_curve.append(np.mean((errs < t).astype(np.float64)))
        sequence_aucs[k] = np.mean(pck_curve)

        sequence_common14_pcks[k] = np.nanmean(
            (errs[:, common14_joints] < pck_threshold).astype(np.float64))
        sequence_mpjpes[k] = np.nanmean(errs)

        # Adjusting results for missing hip
        if not has_hip:
            N = float(joint_set.NUM_JOINTS)
            sequence_pcks[k] = sequence_pcks[k] * ((N - 1) / N) + 1. / N
            sequence_aucs[k] = sequence_aucs[k] * ((N - 1) / N) + 1. / N
            sequence_common14_pcks[k] = sequence_common14_pcks[k] * (
                (N - 1) / N) + 1. / N
            sequence_mpjpes[k] = sequence_mpjpes[k] * ((N - 1) / N)

        all_errs.append(errs)

    all_errs = np.concatenate(all_errs)  # errors per joint, (nPoses, nJoints)
    joint_mpjpes = np.nanmean(all_errs, axis=0)
    joint_pctiles = np.nanpercentile(all_errs, pctiles, axis=0)

    num_joints = joint_set.NUM_JOINTS if has_hip else joint_set.NUM_JOINTS - 1
    assert_shape(all_errs, (None, num_joints))
    assert_shape(joint_mpjpes, (num_joints, ))
    assert_shape(joint_pctiles, (len(pctiles), num_joints))

    if verbose:
        joint_names = joint_set.NAMES.copy()
        if not has_hip:
            joint_names = np.delete(joint_names,
                                    joint_set.index_of('hip'))  # remove root

        # Index of the percentile that will be printed. If 99 is calculated it is selected,
        # otherwise the last one
        pctile_ind = len(pctiles) - 1
        if 99 in pctiles:
            pctile_ind = pctiles.index(99)

        print(
            " ----- Per sequence and joint errors in millimeter on the validation set ----- "
        )
        print(" %s       %6s      %5s   %6s   \t %22s  %6s     %6s" %
              ('Sequence', 'Avg', 'PCK', str(pctiles[pctile_ind]) + '%', '',
               'Avg', str(pctiles[pctile_ind]) + '%'))
        for seq, joint_id in zip_longest(sorted(pred3d.keys()),
                                         range(num_joints)):
            if seq is not None:
                seq_str = " %-8s:   %6.2f mm   %4.1f%%   %6.2f mm\t " \
                          % (str(seq), sequence_mpjpes[seq], sequence_pcks[seq] * 100, sequence_pctiles[seq][pctile_ind])
            else:
                seq_str = " " * 49

            if joint_id is not None:
                print('%s%15s (#%2d):  %6.2f mm   %6.2f mm ' %
                      (seq_str, joint_names[joint_id], joint_id,
                       joint_mpjpes[joint_id], joint_pctiles[pctile_ind,
                                                             joint_id]))
            else:
                print(seq_str)

        mean_sequence_err = np.mean(list(sequence_mpjpes.values()))
        print("\nMean absolute error: %6.2f mm" % mean_sequence_err)

    return sequence_mpjpes, sequence_pcks, sequence_pctiles, joint_mpjpes, joint_pctiles
Esempio n. 26
0
def eval_results(pred3d,
                 gt3d,
                 joint_set,
                 verbose=True,
                 pck_threshold=150,
                 pctiles=[99]):
    """
    Evaluates the results by printing various statistics. Also returns those results.
    Poses can be represented either in hipless 16 joints or 17 joints with hip format.
    Order is MuPo-TS order in all cases.

    Parameters:
        pred3d: dictionary of predictions in mm, seqname -> (nSample, [16|17], 3)
        gt3d: dictionary of ground truth in mm, seqname -> (nSample, [16|17], 3)
        joint_set; JointSet instance describing the order of joints
        verbose: if True, a table of the results is printed
        pctiles: list of percentiles of the errors to calculate
    Returns:
        sequence_mpjpes, sequence_pcks, sequence_pctiles, joint_means, joint_pctiles
    """

    has_hip = (list(pred3d.values())[0].shape[1] == joint_set.NUM_JOINTS
               )  # whether it contains the hip or not

    sequence_mpjpes = {}
    sequence_pcks = {}
    sequence_pctiles = {}
    all_errs = []

    for k in sorted(pred3d.keys()):
        pred = pred3d[k]
        gt = gt3d[k]

        assert pred.shape == gt.shape, "Pred shape:%s, gt shape:%s" % (
            pred.shape,
            gt.shape,
        )
        assert (not has_hip and pred.shape[1:] == (joint_set.NUM_JOINTS - 1, 3)
                ) or (has_hip and pred.shape[1:] == (joint_set.NUM_JOINTS, 3)
                      ), "Unexpected shape:" + str(pred.shape)

        errs = np.linalg.norm(pred - gt, axis=2, ord=2)  # (nSample, nJoints)

        sequence_pctiles[k] = np.nanpercentile(errs, pctiles)
        sequence_pcks[k] = np.nanmean(
            (errs < pck_threshold).astype(np.float64))
        sequence_mpjpes[k] = np.nanmean(errs)

        # Adjusting results for missing hip
        if not has_hip:
            N = float(joint_set.NUM_JOINTS)
            sequence_pcks[k] = sequence_pcks[k] * ((N - 1) / N) + 1.0 / N
            sequence_mpjpes[k] = sequence_mpjpes[k] * ((N - 1) / N)

        all_errs.append(errs)

    all_errs = np.concatenate(all_errs)  # errors per joint, (nPoses, nJoints)
    joint_mpjpes = np.nanmean(all_errs, axis=0)
    joint_pctiles = np.nanpercentile(all_errs, pctiles, axis=0)

    num_joints = joint_set.NUM_JOINTS if has_hip else joint_set.NUM_JOINTS - 1
    assert_shape(all_errs, (None, num_joints))
    assert_shape(joint_mpjpes, (num_joints, ))
    assert_shape(joint_pctiles, (len(pctiles), num_joints))

    if verbose:
        joint_names = joint_set.NAMES.copy()
        if not has_hip:
            joint_names = np.delete(joint_names,
                                    joint_set.index_of("hip"))  # remove root

        # Index of the percentile that will be printed. If 99 is calculated it is selected,
        # otherwise the last one
        pctile_ind = len(pctiles) - 1
        if 99 in pctiles:
            pctile_ind = pctiles.index(99)

        print(
            "----- Per sequence and joint errors in millimeter on the validation set ----- "
        )
        print("%s       %6s      %5s   %6s   \t %22s  %6s     %6s" % (
            "Sequence",
            "Avg",
            "PCK",
            str(pctiles[pctile_ind]) + "%",
            "",
            "Avg",
            str(pctiles[pctile_ind]) + "%",
        ))
        for seq, joint_id in zip_longest(sorted(pred3d.keys()),
                                         range(num_joints)):
            if seq is not None:
                seq_str = "%-8s:   %6.2f mm   %4.1f%%   %6.2f mm\t " % (
                    str(seq),
                    sequence_mpjpes[seq],
                    sequence_pcks[seq] * 100,
                    sequence_pctiles[seq][pctile_ind],
                )
            else:
                seq_str = " " * 49

            if joint_id is not None:
                print("%s%15s (#%2d):  %6.2f mm   %6.2f mm " % (
                    seq_str,
                    joint_names[joint_id],
                    joint_id,
                    joint_mpjpes[joint_id],
                    joint_pctiles[pctile_ind, joint_id],
                ))
            else:
                print(seq_str)

        mean_sequence_err = np.mean(
            np.asarray(list(sequence_mpjpes.values()), dtype=np.float32))
        print("\nMean sequence error (Absolute MPJPE) is %6.2f mm" %
              mean_sequence_err)
        print(
            "---------------------------------------------------------------- "
        )
        print(
            "MRPE: %.1f" %
            np.mean([mrpe(pred3d[k], gt3d[k], joint_set)
                     for k in gt3d.keys()]))

    return sequence_mpjpes, sequence_pcks, sequence_pctiles, joint_mpjpes, joint_pctiles
Esempio n. 27
0
def mpjpe(pred, gt):
    assert_shape(pred, ('*', None, 3))
    assert pred.shape == gt.shape

    return np.mean(np.linalg.norm(gt - pred, axis=-1))