Exemple #1
0
def calculate_knee_neck_height(skel, joint_names):
    """Calculate skeleton height from left knee to neck via the spine joint.

    This function is based on a code snippet provided courtesy of Dushyant Mehta.

    Args:
        skel (torch.Tensor): The skeleton.
        joint_names (list): List of joint names for the skeleton.

    Returns:
        The knee-neck height of the skeleton.
    """

    left_knee = joint_names.index('left_knee')
    left_hip = joint_names.index('left_hip')
    spine = joint_names.index('spine')
    pelvis = joint_names.index('pelvis')
    neck = joint_names.index('neck')

    skel = ensure_cartesian(skel, d=3)

    return sum([
        (skel[left_knee] - skel[left_hip]).norm(2).item(),
        (skel[spine] - skel[pelvis]).norm(2).item(),
        (skel[neck] - skel[spine]).norm(2).item(),
    ])
Exemple #2
0
def main(argv, common_opts):
    args = parse_args(argv)
    seed_all(12345)
    init_algorithms(deterministic=True)
    torch.set_grad_enabled(False)

    device = common_opts['device']

    assert args.multicrop == False, 'TODO: Implement multi-crop for single image inference.'

    model = load_model(args.model).to(device).eval()

    input_specs: ImageSpecs = model.data_specs.input_specs

    image: PIL.Image.Image = PIL.Image.open(args.image, 'r')
    image.thumbnail((input_specs.width, input_specs.height))
    inp = input_specs.convert(image).to(device, torch.float32)

    output = model(inp[None, ...])[0]

    norm_skel3d = ensure_cartesian(output.to(CPU, torch.float64), d=3)

    fig = plt.figure(figsize=(16, 8))
    ax1 = fig.add_subplot(1, 2, 1)
    ax2: Axes3D = fig.add_subplot(1, 2, 2, projection='3d')

    ax1.imshow(input_specs.unconvert(inp.to(CPU)))
    plot_skeleton_on_axes3d(norm_skel3d, CanonicalSkeletonDesc, ax2, invert=True)

    plt.show()
Exemple #3
0
def make_eval_scale_bone_lengths(skel_desc, untransform, ref_skel):
    joint_tree = skel_desc.joint_tree
    ref_bone_lengths = cartesian_to_spherical(
        absolute_to_parent_relative(ensure_cartesian(ref_skel, d=3), joint_tree)
    )[:, 0]
    def eval_scale(test_skel):
        skel = untransform(test_skel)
        return calc_relative_scale(skel, ref_bone_lengths, joint_tree)
    return eval_scale
Exemple #4
0
def infer_joints(model, image):
    #Obtains input spec from model and resizes the image
    input_specs: ImageSpecs = model.data_specs.input_specs
    try:
        image: PIL.Image.Image = PIL.Image.open(image, 'r')
    except:
        pass
    if image.width != image.height:
        cropSize = min(image.width, image.height)
        image = image.crop((image.width/2 - cropSize/2, image.height/2 - cropSize/2,
                    image.width/2 + cropSize/2, image.height/2 + cropSize/2))

    if image.width < 256:
        image = image.resize((256, 256), PIL.Image.ANTIALIAS)

    image.thumbnail((input_specs.width, input_specs.height))
    input_image = input_specs.convert(image).to(CPU, torch.float32)
    # input_image = input_specs.convert(image).to(CPU, torch.cuda)

    # Make inference
    output = model(input_image[None, ...])[0]
    
    # Create location of normalized skeleton
    norm_skel3d = ensure_cartesian(output.to(CPU, torch.float64), d=3)
    # norm_skel3d = ensure_cartesian(output, d=3)
    coords = norm_skel3d.cpu().numpy()
    # if torch.cuda.is_available():    
        # coords = norm_skel3d.cpu().numpy()
    # else:
        # coords = norm_skel3d.numpy()
    coords_raw = coords
    coords_img = np.rint((1+coords)*(255-0)/2)[:,:3]
    coords_img = coords_img.astype(int)
    # print(coords_2d)
    img = input_specs.unconvert(input_image.to(CPU).cpu())
    # print(norm_skel3d)
       
    # create visualization of normalized skeleton
    fig = plt.figure(1)
    plt_3d: Axes3D = fig.add_subplot(1, 1, 1, projection='3d')
    plot_skeleton_on_axes3d(norm_skel3d, CanonicalSkeletonDesc, plt_3d, invert=True)
    # plt.show()

    #saving all outputs as image files with corresponding filename
    fig.canvas.draw()
    fig_img = np.array(fig.canvas.renderer._renderer, np.uint8)[:,:,:3]
    # fig_img = fig_img[:,:,:3] 
    plt.close(fig)

    return (coords_img, coords_raw, img, fig_img)
Exemple #5
0
def prepare_for_3d_evaluation(original_skel,
                              norm_pred,
                              dataset,
                              camera_intrinsics,
                              transform_opts,
                              known_depth=False):
    """Process predictions and ground truth into root-relative original skeleton space.

    Args:
        original_skel (torch.Tensor): Ground truth skeleton joint locations in the original
                                      coordinate space.
        norm_pred (torch.Tensor): Normalised predictions for skeleton joints.
        dataset:
        camera_intrinsics:
        transform_opts:
        known_depth (bool): If true, use the ground truth depth of the root joint. If false,
                            use skeleton height of 92cm knee-neck to infer depth.

    Returns:
        Expected and actual skeletons in original coordinate space.
    """
    if known_depth:
        z_ref = original_skel[dataset.skeleton_desc.root_joint_id][2]
        denorm_skel = dataset.denormalise_with_depth(norm_pred, z_ref,
                                                     camera_intrinsics)
    else:
        denorm_skel = dataset.denormalise_with_skeleton_height(
            norm_pred, camera_intrinsics, transform_opts)
    pred_skel = dataset.untransform_skeleton(denorm_skel, transform_opts)
    actual = absolute_to_root_relative(
        dataset.to_canonical_skeleton(ensure_cartesian(pred_skel, d=3)),
        CanonicalSkeletonDesc.root_joint_id)
    expected = absolute_to_root_relative(
        dataset.to_canonical_skeleton(ensure_cartesian(original_skel, d=3)),
        CanonicalSkeletonDesc.root_joint_id)
    return expected, actual
Exemple #6
0
def calc_relative_scale(skeleton, ref_bone_lengths, joint_tree) -> (float, float):
    """Calculate the factor by which the reference is larger than the query skeleton.

    Args:
        skeleton (torch.DoubleTensor): The query skeleton.
        ref_bone_lengths (torch.DoubleTensor): The reference skeleton bone lengths.
        joint_tree (list of int):

    Returns:
        The average scale factor.
    """

    bone_lengths = cartesian_to_spherical(
        absolute_to_parent_relative(ensure_cartesian(skeleton, d=3), joint_tree)
    )[:, 0]

    non_zero = bone_lengths.gt(1e-6)
    if non_zero.sum() == 0: return 0
    ratio = (ref_bone_lengths / bone_lengths).masked_select(non_zero)

    return ratio.median().item()
Exemple #7
0
 def project_cartesian(self, coords):
     coords = ensure_homogeneous(coords, d=3)
     return ensure_cartesian(self.project(coords), d=2)
Exemple #8
0
def root_relative(skel):
    return absolute_to_root_relative(
        ensure_cartesian(skel, d=3),
        CanonicalSkeletonDesc.root_joint_id
    )