예제 #1
0
def _calculate_univ_scale_factor(annot3, univ_annot3, skel_desc):
    rel_annot3 = absolute_to_root_relative(torch.as_tensor(annot3),
                                           skel_desc.root_joint_id)
    rel_univ = absolute_to_root_relative(torch.as_tensor(univ_annot3),
                                         skel_desc.root_joint_id)

    # NOTE: annot3 and univ_annot3 are not congruent for the revised release of TS6. The
    #       discrepancies appear for the knee and ankle joints only. It seems like it is the
    #       universal annotations that are incorrect, since annot3 projects to annot2 correctly.
    exclude = {
        'pelvis', 'left_knee', 'left_ankle', 'right_knee', 'right_ankle'
    }
    include_indices = [
        i for i, name in enumerate(skel_desc.joint_names)
        if not name in exclude
    ]
    rel_annot3 = rel_annot3[..., include_indices, :]
    rel_univ = rel_univ[..., include_indices, :]

    non_zero = rel_univ.abs().gt(1e-6)
    ratio = (rel_annot3 / rel_univ).masked_select(non_zero)

    scale = float(ratio.median())
    rel_univ_recons = rel_annot3 / scale
    err_count = (rel_univ_recons - rel_univ).abs().gt(1e-6).sum()
    assert err_count == 0

    return scale
예제 #2
0
def _add_annotation_metadata(f, annot, n_frames):
    ds = f.create_dataset(
        'joints3d',
        (Constants['n_cameras'], n_frames, 28, 3),
        dtype='f8'
    )
    ds[:] = annot.annot3[:, :n_frames]

    ds = f.create_dataset(
        'scale',
        (1,),
        dtype='f8'
    )
    root_index = Constants['root_joint']
    rel_annot3 = absolute_to_root_relative(torch.from_numpy(annot.annot3), root_index)
    rel_univ = absolute_to_root_relative(torch.from_numpy(annot.univ_annot3), root_index)
    non_zero = rel_univ.abs().gt(1e-6)
    ratio = (rel_annot3 / rel_univ).masked_select(non_zero)
    assert ratio.std().item() < 1e-6
    ds[:] = ratio.mean().item()
예제 #3
0
def test_absolute_to_root_relative():
    joints = torch.Tensor([
        [1, 1, 1],
        [1, 2, 1],
        [1, 2, 2],
    ])
    root_joint = 0
    expected = torch.Tensor([
        [0, 0, 0],
        [0, 1, 0],
        [0, 1, 1],
    ])
    actual = absolute_to_root_relative(joints, root_joint)
    assert_allclose(actual, expected)
예제 #4
0
def prepare_for_3d_evaluation(original_skel,
                              norm_pred,
                              dataset,
                              camera_intrinsics,
                              transform_opts,
                              known_depth=False):
    """Process predictions and ground truth into root-relative original skeleton space.

    Args:
        original_skel (torch.Tensor): Ground truth skeleton joint locations in the original
                                      coordinate space.
        norm_pred (torch.Tensor): Normalised predictions for skeleton joints.
        dataset:
        camera_intrinsics:
        transform_opts:
        known_depth (bool): If true, use the ground truth depth of the root joint. If false,
                            use skeleton height of 92cm knee-neck to infer depth.

    Returns:
        Expected and actual skeletons in original coordinate space.
    """
    if known_depth:
        z_ref = original_skel[dataset.skeleton_desc.root_joint_id][2]
        denorm_skel = dataset.denormalise_with_depth(norm_pred, z_ref,
                                                     camera_intrinsics)
    else:
        denorm_skel = dataset.denormalise_with_skeleton_height(
            norm_pred, camera_intrinsics, transform_opts)
    pred_skel = dataset.untransform_skeleton(denorm_skel, transform_opts)
    actual = absolute_to_root_relative(
        dataset.to_canonical_skeleton(ensure_cartesian(pred_skel, d=3)),
        CanonicalSkeletonDesc.root_joint_id)
    expected = absolute_to_root_relative(
        dataset.to_canonical_skeleton(ensure_cartesian(original_skel, d=3)),
        CanonicalSkeletonDesc.root_joint_id)
    return expected, actual
예제 #5
0
def root_relative(skel):
    return absolute_to_root_relative(
        ensure_cartesian(skel, d=3),
        CanonicalSkeletonDesc.root_joint_id
    )
예제 #6
0
def preprocess_test_data(src_dir, dest_dir):
    from margipose.data.mpi_inf_3dhp.raw import RawMpiTestDataset, RawMpiTestSeqDataset

    for seq_id in _progress(RawMpiTestDataset.SEQ_IDS, 'Sequences'):
        dataset = RawMpiTestSeqDataset(src_dir, seq_id, valid_only=True)

        out_dir = path.join(dest_dir, seq_id.replace('TS', 'S'), 'Seq1')
        image_out_dir = path.join(out_dir, 'imageSequence', 'video_0')
        os.makedirs(image_out_dir, exist_ok=True)

        image_width = image_height = -1
        for example in _progress(dataset, 'Images'):
            image = PIL.Image.open(example['image_file'])
            image_width, image_height = image.size
            image = image.resize((int(image_width * 768 / image_height), 768), PIL.Image.ANTIALIAS)
            image.save(path.join(image_out_dir, 'img_%06d.jpg' % (example['frame_index'] + 1)))

        copy(dataset.annot_file, path.join(out_dir, 'annot_data.mat'))

        with h5py.File(path.join(out_dir, 'metadata.h5'), 'w') as f:
            with h5py.File(dataset.annot_file, 'r') as annot:
                n_frames = len(annot['annot3'])
                annot3 = np.array(annot['annot3']).reshape(1, n_frames, 17, 3)
                univ_annot3 = np.array(annot['univ_annot3']).reshape(1, n_frames, 17, 3)
                annot2 = np.array(annot['annot2']).reshape(1, n_frames, 17, 2)

                # Infer camera intrinsics
                x3d = np.stack([annot3[0, :, :, 0], annot3[0, :, :, 2]], axis=-1).reshape(n_frames * 17, 2)
                x2d = (annot2[0, :, :, 0] * annot3[0, :, :, 2]).reshape(n_frames * 17, 1)
                fx, cx = list(np.linalg.lstsq(x3d, x2d, rcond=None)[0].flatten())
                y3d = np.stack([annot3[0, :, :, 1], annot3[0, :, :, 2]], axis=-1).reshape(n_frames * 17, 2)
                y2d = (annot2[0, :, :, 1] * annot3[0, :, :, 2]).reshape(n_frames * 17, 1)
                fy, cy = list(np.linalg.lstsq(y3d, y2d, rcond=None)[0].flatten())

                with open(path.join(out_dir, 'camera.calibration'), 'w') as cam_file:
                    lines = [
                        'Fake Camera Calibration File',
                        'name          0',
                        '  size        {:d} {:d}'.format(image_width, image_height),
                        '  intrinsic   {:0.3f} 0 {:0.3f} 0 0 {:0.3f} {:0.3f} 0 0 0 1 0 0 0 0 1'
                            .format(fx, cx, fy, cy),
                        '  extrinsic   1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1',
                    ]
                    for line in lines:
                        cam_file.write(line + '\n')

                ds = f.create_dataset('joints3d', (1, n_frames, 17, 3), dtype='f8')
                ds[:] = np.array(annot3).reshape(1, n_frames, 17, 3)

                root_index = Constants['root_joint']
                rel_annot3 = absolute_to_root_relative(torch.from_numpy(annot3), root_index)
                rel_univ = absolute_to_root_relative(torch.from_numpy(univ_annot3), root_index)
                non_zero = rel_univ.abs().gt(1e-6)
                ratio = (rel_annot3 / rel_univ).masked_select(non_zero)
                assert ratio.std() < 1e-6
                ds = f.create_dataset('scale', (1,), dtype='f8')
                ds[:] = ratio.mean()

                indices = []
                for frame_index, is_valid in enumerate(np.array(annot['valid_frame']).flatten()):
                    if is_valid == 1:
                        indices.append(frame_index)
                ds = f.create_dataset( 'interesting_frames/camera0', (len(indices),), dtype='i8')
                ds[:] = np.array(indices)