예제 #1
0
def single_person(model_features, input_features, normalise=True):

    
    (input_features_copy, model_features_copy) = prepocessing.handle_undetected_points(input_features, model_features)

    if (normalise):
        model_features_copy = normalising.feature_scaling(model_features_copy)
        input_features_copy = normalising.feature_scaling(input_features_copy)

    
    (model_face, model_torso, model_legs) = prepocessing.split_in_face_legs_torso(model_features_copy)
    (input_face, input_torso, input_legs) = prepocessing.split_in_face_legs_torso(input_features_copy)

    
    
    (input_transformed_face, transformation_matrix_face) = affine_transformation.find_transformation(model_face, input_face)
    (input_transformed_torso, transformation_matrix_torso) = affine_transformation.find_transformation(model_torso, input_torso)
    (input_transformed_legs, transformation_matrix_legs) = affine_transformation.find_transformation(model_legs, input_legs)

    
    input_transformation = prepocessing.unsplit(input_transformed_face, input_transformed_torso, input_transformed_legs)

    
    if(not normalise):
        result = MatchResult(None,
                             error_score=0,
                             input_transformation=input_transformation)
        return result

    max_euclidean_error_face = pose_comparison.max_euclidean_distance(model_face, input_transformed_face)
    max_euclidean_error_torso = pose_comparison.max_euclidean_distance(model_torso, input_transformed_torso)
    max_euclidean_error_legs = pose_comparison.max_euclidean_distance(model_legs, input_transformed_legs)

    max_euclidean_error_shoulders = pose_comparison.max_euclidean_distance_shoulders(model_torso, input_transformed_torso)


    ######### THE THRESHOLDS #######
    eucl_dis_tresh_torso = 0.11 #0.065  of 0.11 ??
    rotation_tresh_torso = 40
    eucl_dis_tresh_legs = 0.055
    rotation_tresh_legs = 40

    eucld_dis_shoulders_tresh = 0.063
    ################################

    result_torso, torso_value = pose_comparison.decide_torso_shoulders_incl(max_euclidean_error_torso, transformation_matrix_torso,
                                                eucl_dis_tresh_torso, rotation_tresh_torso,
                                                max_euclidean_error_shoulders, eucld_dis_shoulders_tresh)

    result_legs = pose_comparison.decide_legs(max_euclidean_error_legs, transformation_matrix_legs,
                                              eucl_dis_tresh_legs, rotation_tresh_legs)

    #TODO: construct a solid score algorithm
    error_score = (max_euclidean_error_torso + max_euclidean_error_legs)/2.0

    result = MatchResult((result_torso and result_legs),
                         error_score=error_score,
                         input_transformation=input_transformation)
    return result, torso_value
예제 #2
0
def single_person_zonder_split(model_features, input_features, normalise=True):

    # Filter the undetected features and mirror them in the other pose
    #(input_features_copy, model_features_copy) = prepocessing.handle_undetected_points(input_features, model_features)

    if (normalise):
        model_features = normalising.feature_scaling(model_features)
        input_features = normalising.feature_scaling(input_features)

    # Zoek transformatie om input af te beelden op model
    # Returnt transformatie matrix + afbeelding/image van input op model
    (input_transformed,
     transformation_matrix) = affine_transformation.find_transformation(
         model_features, input_features)

    max_euclidean = pose_comparison.max_euclidean_distance(
        model_features, input_transformed)

    result = MatchResult(True,
                         error_score=max_euclidean,
                         input_transformation=input_transformed)
    return result
예제 #3
0
def single_person(model_features, input_features, normalise=True):

    # Filter the undetected features and mirror them in the other pose
    (input_features_copy,
     model_features_copy) = prepocessing.handle_undetected_points(
         input_features, model_features)

    if (normalise):
        model_features_copy = normalising.feature_scaling(model_features_copy)
        input_features_copy = normalising.feature_scaling(input_features_copy)

    #Split features in three parts
    (model_face, model_torso,
     model_legs) = prepocessing.split_in_face_legs_torso(model_features_copy)
    (input_face, input_torso,
     input_legs) = prepocessing.split_in_face_legs_torso(input_features_copy)

    # Zoek transformatie om input af te beelden op model
    # Returnt transformatie matrix + afbeelding/image van input op model
    (input_transformed_face,
     transformation_matrix_face) = affine_transformation.find_transformation(
         model_face, input_face)
    (input_transformed_torso,
     transformation_matrix_torso) = affine_transformation.find_transformation(
         model_torso, input_torso)
    (input_transformed_legs,
     transformation_matrix_legs) = affine_transformation.find_transformation(
         model_legs, input_legs)

    # Wrapped the transformed input in one whole pose
    input_transformation = prepocessing.unsplit(input_transformed_face,
                                                input_transformed_torso,
                                                input_transformed_legs)

    # In case of no normalisation, return here (ex; plotting)
    # Without normalisation the thresholds don't say anything
    #   -> so comparison is useless
    if (not normalise):
        result = MatchResult(None,
                             error_score=0,
                             input_transformation=input_transformation)
        return result

    max_euclidean_error_face = pose_comparison.max_euclidean_distance(
        model_face, input_transformed_face)
    max_euclidean_error_torso = pose_comparison.max_euclidean_distance(
        model_torso, input_transformed_torso)
    max_euclidean_error_legs = pose_comparison.max_euclidean_distance(
        model_legs, input_transformed_legs)

    max_euclidean_error_shoulders = pose_comparison.max_euclidean_distance_shoulders(
        model_torso, input_transformed_torso)

    ######### THE THRESHOLDS #######
    eucl_dis_tresh_torso = 0.11  #0.065  of 0.11 ??
    rotation_tresh_torso = 40
    eucl_dis_tresh_legs = 0.055
    rotation_tresh_legs = 40

    eucld_dis_shoulders_tresh = 0.063
    ################################

    result_torso = pose_comparison.decide_torso_shoulders_incl(
        max_euclidean_error_torso, transformation_matrix_torso,
        eucl_dis_tresh_torso, rotation_tresh_torso,
        max_euclidean_error_shoulders, eucld_dis_shoulders_tresh)

    result_legs = pose_comparison.decide_legs(max_euclidean_error_legs,
                                              transformation_matrix_legs,
                                              eucl_dis_tresh_legs,
                                              rotation_tresh_legs)

    #TODO: construct a solid score algorithm
    error_score = (max_euclidean_error_torso + max_euclidean_error_legs) / 2.0

    result = MatchResult((result_torso and result_legs),
                         error_score=error_score,
                         input_transformation=input_transformation)
    return result
예제 #4
0
def multi_person2(model_poses,
                  input_poses,
                  model_image_name,
                  input_image_name,
                  normalise=True):
    # Find for each model_pose the best match input_pose
    # returns a list of best matches !! WITH normalisation !!
    # TODO fine-tune return tuple
    result = multi_person(model_poses, input_poses, model_image_name,
                          input_image_name)

    if (result is False):
        # Minimum one model pose is not matched with a input pose
        logger.error("Multi-person step1 match failed!")
        return False

    aantal_models = len(result)
    input_transformed_combined = np.zeros((18 * aantal_models, 2))

    # The new input_transformed; contains all poses and wrapped in one total pose.
    # This input_transformed_combined is achieved by superimposing all the model poses on their corresponding inputpose
    input_transformed_combined = []

    updated_models_combined = []

    # Loop over the best-matches
    #       [modelpose 1 -> inputpose x ; modelpose2 -> inputpose y; ...]
    logger.info("-- multi_pose2(): looping over best-matches for procrustes:")
    for best_match in result:
        # First check for undetected body parts. If present=> make corresponding point in model also (0,0)
        # We can know strip them from our poses because we don't use split() for affine trans
        # TODO: deze clean updated_model_pose wordt eigenlijk al eens berekent in single_pose()
        #   -> loopke hier opnieuw is stevig redundant

        # make a array with the indecex of undetected points
        indexes_undetected_points = []
        if np.any(best_match.input_features[:] == [0, 0]):
            assert True
            counter = 0
            for feature in best_match.input_features:
                if feature[0] == 0 and feature[1] == 0:  # (0,0)
                    indexes_undetected_points.append(counter)
                    #logger.warning(" Undetected body part in input: index(%d) %s", counter,prepocessing.get_bodypart(counter))
                    best_match.model_features[counter][0] = 0
                    best_match.model_features[counter][1] = 0
                counter = counter + 1

        best_match.input_features = best_match.input_features[
            (best_match.input_features[:, 0] != 0)
            & (best_match.input_features[:, 1] != 0)]
        best_match.model_features = best_match.model_features[
            (best_match.model_features[:, 0] != 0)
            & (best_match.model_features[:, 1] != 0)]
        # Note1: the input_transformed from single_pose() is not used!!!
        input_transformed = proc_do_it.superimpose(best_match.input_features,
                                                   best_match.model_features,
                                                   input_image_name,
                                                   model_image_name)

        input_transformed_combined.append(np.array(input_transformed))
        updated_models_combined.append(np.array(best_match.model_features))

        #logger.info("inputtt %s", str(input_transformed))
        #logger.info("modeelll %s ", str(best_match.model_features))

    assert len(input_transformed_combined) == len(model_poses)

    # TODO: harded code indexen weg doen
    # TODO: transpose van ne lijst? Mss beter toch met np.array() werken..  maar hoe init'en?
    # TODO : hier is wa refactoring/optimalisatie nodig ...

    #Lijst vervormen naar matrix

    input_transformed_combined = np.vstack(
        [input_transformed_combined[0], input_transformed_combined[1]])
    #model_poses = np.vstack([model_poses[0], model_poses[1]])
    model_poses = np.vstack(
        [updated_models_combined[0], updated_models_combined[1]])

    # Redundant, wordt enkel gebruikt voor plotten
    input_poses = np.vstack([input_poses[0], input_poses[1]])
    print("-------trans: ", input_transformed_combined.shape)
    if (normalise):
        input_transformed_combined = normalising.feature_scaling(
            input_transformed_combined)
        model_poses = normalising.feature_scaling(model_poses)

    # Calc the affine trans of the whole
    (full_transformation,
     A_matrix) = affine_transformation.find_transformation(
         model_poses, input_transformed_combined)

    # TODO return True in case of match
    if (normalise):
        max_eucl_distance = pose_comparison.max_euclidean_distance(
            model_poses, input_transformed_combined)
        logger.info("--->Max eucl distance: %s  (thresh ca. 0.13)",
                    str(max_eucl_distance))  # torso thresh is 0.11

        markersize = 2

        f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True, figsize=(14, 6))
        ax1.set_title('(input transformed (model superimposed on input )')
        ax1.plot(*zip(*input_transformed_combined),
                 marker='o',
                 color='r',
                 ls='',
                 label='model',
                 ms=markersize)  # ms = markersize

        ax2.set_title('(model)')
        ax2.plot(*zip(*model_poses),
                 marker='o',
                 color='r',
                 ls='',
                 label='model',
                 ms=markersize)  # ms = markersize

        ax3.set_title('(affine trans and model (red))')
        ax3.plot(*zip(*full_transformation),
                 marker='o',
                 color='r',
                 ls='',
                 label='model',
                 ms=markersize)  # ms = markersize
        ax3.plot(*zip(*model_poses),
                 marker='o',
                 color='b',
                 ls='',
                 label='model',
                 ms=markersize)  # ms = markersize
        ax = plt.gca()
        ax.invert_yaxis()
        #plt.show()
        plt.draw()

    else:
        logger.info("-- multi_pose2(): procrustes plotjes incoming ")
        plot_multi_pose(model_poses, input_poses, full_transformation,
                        model_image_name, input_image_name, "input poses",
                        "full procrustes")
        plot_multi_pose(model_poses, input_transformed_combined,
                        full_transformation, model_image_name,
                        input_image_name, "superimposed model on input",
                        "full procrustes")

    #Block plots
    plt.show()
    return True
예제 #5
0
def plot_single_person_zonder_split(
        model_features,
        input_features,
        model_image_name,
        input_image_name,
        input_title="input",
        model_title="model",
        transformation_title="transformed input -excl. split()"
):  #-excl. split()

    # plot vars
    markersize = 3

    # Zoek transformatie om input af te beelden op model
    # Returnt transformatie matrix + afbeelding/image van input op model
    (input_transformed,
     transformation_matrix) = affine_transformation.find_transformation(
         model_features, input_features)

    model_image = plt.imread(model_image_name)
    input_image = plt.imread(input_image_name)
    #Load images
    model_image = draw_humans.draw_humans(
        model_image, model_features)  #plt.imread(model_image_name)
    input_image = draw_humans.draw_humans(input_image, input_features,
                                          True)  #plt.imread(input_image_name)

    input_trans_image = draw_humans.draw_square(plt.imread(model_image_name),
                                                model_features)
    input_trans_image = draw_humans.draw_humans(
        input_trans_image, input_transformed,
        True)  # plt.imread(input_image_name)

    f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True, figsize=(14, 6))
    plt.axis('off')
    ax1.imshow(model_image)
    ax1.axis('off')
    #ax1.set_title(model_image_name + ' (model)')
    ax1.set_title(model_title)
    #ax1.plot(*zip(*model_features), marker='o', color='magenta', ls='', label='model', ms=markersize)  # ms = markersize
    # red_patch = mpatches.Patch(color='magenta', label='model')
    # ax1.legend(handles=[red_patch])

    #ax2.set_title(input_image_name + ' (input)')
    ax2.set_title(input_title)
    ax2.axis('off')
    ax2.imshow(input_image)
    # ax2.plot(*zip(*input_features), marker='o', color='r', ls='', ms=markersize)
    # ax2.legend(handles=[mpatches.Patch(color='red', label='input')])

    ax3.set_title(transformation_title)
    ax3.axis('off')
    ax3.imshow(input_trans_image)
    # ax3.plot(*zip(*input_transformed), marker='s', color='y', ls='', ms=4, )
    # ax3.plot(*zip(*model_features), marker='o', color='magenta', ls='', label='model', ms=markersize)  # ms = markersize
    #
    # ax3.legend(handles=[mpatches.Patch(color='y', label='transformed input'), mpatches.Patch(color='magenta', label='model')])

    plot_name = model_image_name.split("/")[-1] + "_" + input_image_name.split(
        "/")[-1]
    plt.savefig('./plots/' + plot_name + '.png', bbox_inches='tight')
    plt.show(block=False)
예제 #6
0
def plot_single_person(
        model_features,
        input_features,
        model_image_name,
        input_image_name,
        input_title="input",
        model_title="model",
        transformation_title="transformed input -incl. split()"):

    # Filter the undetected features and mirror them in the other pose
    (input_features_copy,
     model_features_copy) = prepocessing.handle_undetected_points(
         input_features, model_features)

    # plot vars
    markersize = 3

    #Load images
    # model_image = plt.imread(model_image_name)
    # input_image = plt.imread(input_image_name)

    # Split features in three parts
    (model_face, model_torso,
     model_legs) = prepocessing.split_in_face_legs_torso(model_features_copy)
    (input_face, input_torso,
     input_legs) = prepocessing.split_in_face_legs_torso(input_features_copy)

    # Zoek transformatie om input af te beelden op model
    # Returnt transformatie matrix + afbeelding/image van input op model
    (input_transformed_face,
     transformation_matrix_face) = affine_transformation.find_transformation(
         model_face, input_face)
    (input_transformed_torso,
     transformation_matrix_torso) = affine_transformation.find_transformation(
         model_torso, input_torso)
    (input_transformed_legs,
     transformation_matrix_legs) = affine_transformation.find_transformation(
         model_legs, input_legs)

    whole_input_transform = prepocessing.unsplit(input_transformed_face,
                                                 input_transformed_torso,
                                                 input_transformed_legs)

    model_image = plt.imread(
        model_image_name
    )  #png.read_png_int(model_image_name) #plt.imread(model_image_name)
    input_image = plt.imread(
        input_image_name
    )  #png.read_png_int(input_image_name) #plt.imread(input_image_name)

    model_image = draw_humans.draw_humans(model_image, model_features,
                                          True)  # plt.imread(model_image_name)
    input_image = draw_humans.draw_humans(input_image, input_features,
                                          True)  # plt.imread(input_image_name)

    input_trans_image = draw_humans.draw_square(plt.imread(model_image_name),
                                                model_features)
    input_trans_image = draw_humans.draw_humans(
        input_trans_image, whole_input_transform, True
    )  # plt.imread(input_image_name) png.read_png_int(model_image_name)

    f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True, figsize=(14, 6))
    implot = ax1.imshow(model_image)
    plt.axis('off')
    #ax1.set_title(model_image_name + ' (model)')
    ax1.set_title(model_title)
    ax1.axis('off')
    # ax1.plot(*zip(*model_features_copy), marker='o', color='magenta', ls='', label='model', ms=markersize)  # ms = markersize
    # red_patch = mpatches.Patch(color='magenta', label='model')
    # ax1.legend(handles=[red_patch])

    #ax2.set_title(input_image_name + ' (input)')
    ax2.set_title(input_title)
    ax2.axis('off')
    ax2.imshow(input_image)
    # ax2.plot(*zip(*input_features_copy), marker='o', color='r', ls='', ms=markersize)
    # ax2.legend(handles=[mpatches.Patch(color='red', label='input')])

    ax3.set_title(transformation_title)
    ax3.axis('off')
    ax3.imshow(input_trans_image)
    # ax3.plot(*zip(*model_features_copy), marker='o', color='magenta', ls='', label='model', ms=markersize)  # ms = markersize
    # ax3.plot(*zip(*whole_input_transform), marker='o', color='b', ls='', ms=markersize)
    # ax3.legend(handles=[mpatches.Patch(color='blue', label='transformed input'), mpatches.Patch(color='magenta', label='model')])

    plot_name = model_image_name.split("/")[-1] + "_" + input_image_name.split(
        "/")[-1]
    plt.savefig(plot_name + '.png', bbox_inches='tight')
    plt.show(block=False)
예제 #7
0
import parse_openpose_json
import prepocessing
import affine_transformation
import pose_match
'''
Some plotties for a blog
'''

json_data_path = 'data/json_data/'
images_data_path = 'data/image_data/'

model = "trap7"
input = "trap9"
model_json = json_data_path + model + '.json'
input_json = json_data_path + input + '.json'

model_image = images_data_path + model + '.jpg'
input_image = images_data_path + input + '.jpg'

model_features = parse_openpose_json.parse_JSON_single_person(model_json)
input_features = parse_openpose_json.parse_JSON_single_person(input_json)

input_features = prepocessing.unpad(input_features)
model_features = prepocessing.unpad(model_features)

(input_trans, A ) = affine_transformation.find_transformation(model_features, input_features)
print(input_trans)
pose_match.plot_match(model_features, input_features, input_trans, model_image, input_image)