def inspect_number_of_occurrences():
    # Get image paths in the training and testing datasets
    _, training_image_index_list = prepare_data.get_image_paths_in_training_dataset()

    repeated_num = 20
    seed_array = np.random.choice(range(repeated_num), size=repeated_num, replace=False)
    records_list = Parallel(n_jobs=-1)(
        delayed(inspect_final_data_set_without_labels)(training_image_index_list, seed) for seed in seed_array
    )

    # repeated_num = 100
    # seed_array = np.random.choice(range(repeated_num), size=repeated_num, replace=False)
    # records_list = (Parallel(n_jobs=-1)(delayed(inspect_final_data_set_with_labels)(training_image_index_list, seed) for seed in seed_array))

    true_records_num_list = []
    false_records_num_list = []

    for single_true_records_num_list, single_false_records_num_list in records_list:
        for value in single_true_records_num_list:
            true_records_num_list.append(value)

        for value in single_false_records_num_list:
            false_records_num_list.append(value)

    for single_list in [true_records_num_list, false_records_num_list]:
        repeated_times_list = []
        min_value_list = []
        max_value_list = []
        mean_value_list = []

        for end_index in range(len(single_list)):
            current_list = single_list[0 : end_index + 1]

            repeated_times_list.append(len(current_list))
            min_value_list.append(np.min(current_list))
            max_value_list.append(np.max(current_list))
            mean_value_list.append(np.mean(current_list))

        pylab.figure()
        pylab.plot(repeated_times_list, min_value_list, color="yellowgreen", label="Minimum")
        pylab.plot(repeated_times_list, max_value_list, color="lightskyblue", label="Maximum")
        pylab.plot(repeated_times_list, mean_value_list, color="darkorange", label="Mean")
        pylab.legend(bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc=3, ncol=3, mode="expand", borderaxespad=0.0)
        pylab.xlabel("Repeated Times", fontsize="large")
        pylab.ylabel("Number of Occurrences", fontsize="large")
        pylab.grid()
        pylab.show()
def load_feature(facial_image_extension, feature_extension):
    """Load feature.
    
    :param facial_image_extension: the extension of the facial images
    :type facial_image_extension: string
    :param feature_extension: the extension of the feature files
    :type feature_extension: string
    :return: valid_training_image_feature_list refers to the features of the training images, 
        valid_training_image_index_list refers to the indexes of the training images, 
        testing_image_feature_dict refers to the features of the testing images which is saved in a dict.
    :rtype: tuple
    """

    print("\nLoading feature ...")

    # Get image paths in the training and testing datasets
    image_paths_in_training_dataset, training_image_index_list = prepare_data.get_image_paths_in_training_dataset(
    )
    image_paths_in_testing_dataset = prepare_data.get_image_paths_in_testing_dataset(
    )

    # Load feature from file
    training_image_feature_list = load_feature_from_file(\
                                                         image_paths_in_training_dataset, facial_image_extension, feature_extension)
    testing_image_feature_list = load_feature_from_file(\
                                                        image_paths_in_testing_dataset, facial_image_extension, feature_extension)

    # Omit possible None element in training image feature list
    valid_training_image_feature_list = []
    valid_training_image_index_list = []
    for training_image_feature, training_image_index in zip(
            training_image_feature_list, training_image_index_list):
        if training_image_feature is not None:
            valid_training_image_feature_list.append(training_image_feature)
            valid_training_image_index_list.append(training_image_index)

    # Generate a dictionary to save the testing image feature
    testing_image_feature_dict = {}
    for testing_image_feature, testing_image_path in zip(
            testing_image_feature_list, image_paths_in_testing_dataset):
        testing_image_name = os.path.basename(testing_image_path)
        testing_image_feature_dict[testing_image_name] = testing_image_feature

    print("Feature loaded successfully.\n")
    return (valid_training_image_feature_list, valid_training_image_index_list,
            testing_image_feature_dict)
def inspect_number_of_images():
    # Get image paths in the training and testing datasets
    _, training_image_index_list = prepare_data.get_image_paths_in_training_dataset()

    images_number_list = []
    for current_image_index in np.unique(training_image_index_list):
        images_number_list.append(np.sum(np.array(training_image_index_list) == current_image_index))

    # the histogram of the data with histtype="step"
    bins = np.arange(np.min(images_number_list), np.max(images_number_list) + 2) - 0.5
    _, _, patches = pylab.hist(images_number_list, bins=bins)
    pylab.setp(patches, "facecolor", "yellowgreen", "alpha", 0.75)
    pylab.xlim([bins[0], bins[-1]])
    pylab.xticks(np.arange(np.min(images_number_list), np.max(images_number_list) + 1))
    pylab.xlabel("Number of Images from the Same Person", fontsize="large")
    pylab.ylabel("Number of Occurrences", fontsize="large")
    pylab.title("Histogram of Number of Images from the Same Person")
    pylab.show()
def load_feature(facial_image_extension, feature_extension):
    """Load feature.
    
    :param facial_image_extension: the extension of the facial images
    :type facial_image_extension: string
    :param feature_extension: the extension of the feature files
    :type feature_extension: string
    :return: valid_training_image_feature_list refers to the features of the training images, 
        valid_training_image_index_list refers to the indexes of the training images, 
        testing_image_feature_dict refers to the features of the testing images which is saved in a dict.
    :rtype: tuple
    """

    print("\nLoading feature ...")

    # Get image paths in the training and testing datasets
    image_paths_in_training_dataset, training_image_index_list = prepare_data.get_image_paths_in_training_dataset()
    image_paths_in_testing_dataset = prepare_data.get_image_paths_in_testing_dataset()

    # Load feature from file
    training_image_feature_list = load_feature_from_file(
        image_paths_in_training_dataset, facial_image_extension, feature_extension
    )
    testing_image_feature_list = load_feature_from_file(
        image_paths_in_testing_dataset, facial_image_extension, feature_extension
    )

    # Omit possible None element in training image feature list
    valid_training_image_feature_list = []
    valid_training_image_index_list = []
    for training_image_feature, training_image_index in zip(training_image_feature_list, training_image_index_list):
        if training_image_feature is not None:
            valid_training_image_feature_list.append(training_image_feature)
            valid_training_image_index_list.append(training_image_index)

    # Generate a dictionary to save the testing image feature
    testing_image_feature_dict = {}
    for testing_image_feature, testing_image_path in zip(testing_image_feature_list, image_paths_in_testing_dataset):
        testing_image_name = os.path.basename(testing_image_path)
        testing_image_feature_dict[testing_image_name] = testing_image_feature

    print("Feature loaded successfully.\n")
    return (valid_training_image_feature_list, valid_training_image_index_list, testing_image_feature_dict)
Exemple #5
0
def inspect_number_of_images():
    # Get image paths in the training and testing datasets
    _, training_image_index_list = prepare_data.get_image_paths_in_training_dataset(
    )

    images_number_list = []
    for current_image_index in np.unique(training_image_index_list):
        images_number_list.append(
            np.sum(np.array(training_image_index_list) == current_image_index))

    # the histogram of the data with histtype="step"
    bins = np.arange(np.min(images_number_list),
                     np.max(images_number_list) + 2) - 0.5
    _, _, patches = pylab.hist(images_number_list, bins=bins)
    pylab.setp(patches, "facecolor", "yellowgreen", "alpha", 0.75)
    pylab.xlim([bins[0], bins[-1]])
    pylab.xticks(
        np.arange(np.min(images_number_list),
                  np.max(images_number_list) + 1))
    pylab.xlabel("Number of Images from the Same Person", fontsize="large")
    pylab.ylabel("Number of Occurrences", fontsize="large")
    pylab.title("Histogram of Number of Images from the Same Person")
    pylab.show()
import os
import prepare_data
import solution_basic
import cv2

facial_image_extension = "_open_face.jpg"
feature_extension = "_open_face.csv"

# Get image paths in the training and testing datasets
image_paths_in_training_dataset, training_image_index_list = prepare_data.get_image_paths_in_training_dataset(
)

# Load feature from file
training_image_feature_list = solution_basic.load_feature_from_file(image_paths_in_training_dataset, \
                                                    facial_image_extension, feature_extension)

feature_file_paths = [image_path + facial_image_extension + feature_extension \
                      for image_path in image_paths_in_training_dataset]

# Omit possible None element in training image feature list
invalid_feature_file_path_list = []
for training_image_feature, feature_file_path in zip(
        training_image_feature_list, feature_file_paths):
    if training_image_feature is None:
        invalid_feature_file_path_list.append(feature_file_path)

for invalid_feature_file_path in sorted(invalid_feature_file_path_list):
    found_index = invalid_feature_file_path.find(facial_image_extension)
    invalid_image_file_path = invalid_feature_file_path[0:found_index]

    invalid_image_file_name = os.path.basename(invalid_image_file_path)
Exemple #7
0
def inspect_number_of_occurrences():
    # Get image paths in the training and testing datasets
    _, training_image_index_list = prepare_data.get_image_paths_in_training_dataset(
    )

    repeated_num = 20
    seed_array = np.random.choice(range(repeated_num),
                                  size=repeated_num,
                                  replace=False)
    records_list = (Parallel(n_jobs=-1)(delayed(
        inspect_final_data_set_without_labels)(training_image_index_list, seed)
                                        for seed in seed_array))

    # repeated_num = 100
    # seed_array = np.random.choice(range(repeated_num), size=repeated_num, replace=False)
    # records_list = (Parallel(n_jobs=-1)(delayed(inspect_final_data_set_with_labels)(training_image_index_list, seed) for seed in seed_array))

    true_records_num_list = []
    false_records_num_list = []

    for single_true_records_num_list, single_false_records_num_list in records_list:
        for value in single_true_records_num_list:
            true_records_num_list.append(value)

        for value in single_false_records_num_list:
            false_records_num_list.append(value)

    for single_list in [true_records_num_list, false_records_num_list]:
        repeated_times_list = []
        min_value_list = []
        max_value_list = []
        mean_value_list = []

        for end_index in range(len(single_list)):
            current_list = single_list[0:end_index + 1]

            repeated_times_list.append(len(current_list))
            min_value_list.append(np.min(current_list))
            max_value_list.append(np.max(current_list))
            mean_value_list.append(np.mean(current_list))

        pylab.figure()
        pylab.plot(repeated_times_list,
                   min_value_list,
                   color="yellowgreen",
                   label="Minimum")
        pylab.plot(repeated_times_list,
                   max_value_list,
                   color="lightskyblue",
                   label="Maximum")
        pylab.plot(repeated_times_list,
                   mean_value_list,
                   color="darkorange",
                   label="Mean")
        pylab.legend(bbox_to_anchor=(0., 1.02, 1., .102),
                     loc=3,
                     ncol=3,
                     mode="expand",
                     borderaxespad=0.)
        pylab.xlabel("Repeated Times", fontsize="large")
        pylab.ylabel("Number of Occurrences", fontsize="large")
        pylab.grid()
        pylab.show()