示例#1
0
def find_norm_parameters(database_dir):
    '''
    Find normalization parameters for images, tilt values and pan values from training subset and store them in a .csv
    file in the dataset directory.

    Arguments:
        database_dir: Directory containing the preprocessed dataset.
    '''

    # Get image, tilt and pan arrays from the preprocessed dataset.
    img, tilt, pan = array_from_csv(database_dir + 'train.csv', database_dir)

    # Calculate normalization parameters for the images.
    i_mean = np.mean(img / 255.0)
    i_std = np.std(img / 255.0)

    # Calculate normalization parameters for the tilt values.
    t_mean = np.mean(tilt / 90.0)
    t_std = np.std(tilt / 90.0)

    # Calculate normalization parameters for the pan values.
    p_mean = np.mean(pan / 90.0)
    p_std = np.std(pan / 90.0)

    # Store normalization values in a .csv file.
    file = open(database_dir + 'norm.csv', 'w')
    file.write('img_mean,img_std,t_mean,t_std,p_mean,p_std\n')
    file.write('%f,%f,%f,%f,%f,%f\n' % (i_mean, i_std, t_mean, t_std, p_mean, p_std))

    # Close .csv file.
    file.close()
示例#2
0
def store_dataset_arrays(database_dir):
    '''
    Store preprocessed dataset images as a collection of .npy files.

    Arguments:
        database_dir: Directory containing the preprocessed dataset.
    '''

    # Load dataset as Numpy arrays.
    tr_img, tr_tilt, tr_pan = array_from_csv(database_dir + 'train.csv', database_dir)
    v_img, v_tilt, v_pan = array_from_csv(database_dir + 'validation.csv', database_dir)
    t_img, t_tilt, t_pan = array_from_csv(database_dir + 'test.csv', database_dir)

    # Store images as .npy arrays.
    np.save(database_dir + 'train_img.npy', tr_img)
    np.save(database_dir + 'validation_img.npy', v_img)
    np.save(database_dir + 'test_img.npy', t_img)
示例#3
0
sec_df = pd.read_csv(csv_path_2)

# Sort both datasets.

ref_df.sort_values(by=['tilt', 'pan'], inplace=True)
sec_df.sort_values(by=['tilt', 'pan'], inplace=True)

# Create the pandas dataframe that will contain the labels of the pictures from the second dataset with a
# correspondence in the reference dataset.

destination_df = pd.DataFrame(columns=ref_df.columns.values)

# Load pictures and pose values from the second dataset.

img = array_from_csv(csv_path_2, source_dir_2)[0]

# List that will contain the pictures from the second dataset with a correspondence in the reference dataset.

destination_list = []

# Naively search matching pictures between the two datasets using pose values as criterion.

start = 0

for row in sec_df.itertuples():

    found = False
    start_old = start

    for i in range(start, len(ref_df.index)):
示例#4
0
from clean_utils import array_from_csv

import os

dataset_dir = 'clean/aflw_pointing04/'

source_csv_file = 'test.csv'
destination_csv_file = 'test_aflw.csv'

destination_npy_file = 'test_aflw_img.npy'

source_csv = dataset_dir + source_csv_file

destination_csv = dataset_dir + destination_csv_file
destination_npy = dataset_dir + destination_npy_file

df = pd.read_csv(source_csv)

file = open(destination_csv, 'w')
file.write('file,tilt,pan\n')

for index, row in df.iterrows():
    if int(row['file'][4:-4]) < 44782:
        file.write(row['file'] + "," + str(row['tilt']) + "," +
                   str(row['pan']) + "\n")

file.close()

img_array, _, _ = array_from_csv(destination_csv, dataset_dir)

np.save(destination_npy, img_array)
dense_layer_size = 512

# Normalization parameters.

mean = 0.407335
std = 0.236271

t_mean = -0.022308
t_std = 0.324841

p_mean = 0.000171
p_std = 0.518044

# Load image, tilt and pan arrays for the dataset.

img, tilt, pan = array_from_csv(dataset_csv, dataset_dir)

# Estimator model.

pose_estimator = mpatacchiola_generic(in_size_estimator, num_conv_blocks,
                                      num_filters_start, num_dense_layers,
                                      dense_layer_size)
pose_estimator.load_weights(estimator_path)

# Get score for the dataset (tilt, pan and global error).

pred = pose_estimator.predict((img / 255.0 - mean) / std)

mean_tilt_error = np.mean(np.abs(tilt -
                                 ((pred[:, 0] * t_std + t_mean) * 90.0)))
mean_pan_error = np.mean(np.abs(pan - ((pred[:, 1] * p_std + p_mean) * 90.0)))
sec_df = pd.read_csv(dockerface_poses_file)

# Sort both datasets.

ref_df.sort_values(by=['tilt', 'pan'], inplace=True)
sec_df.sort_values(by=['tilt', 'pan'], inplace=True)

# Create the pandas dataframe that will contain the labels of the pictures from the second dataset with a
# correspondence in the reference dataset.

destination_df = pd.DataFrame(columns=['file', 'tilt', 'pan'])

# Load pictures from the Dockerface detections.

img = array_from_csv(dockerface_poses_file, aflw_dir)[0]

# List that will contain the pictures from the Dockerface detections with a correspondence in the reference dataset.

destination_list = []

# Grayscale version (for testing with RealHePoNet).
destination_list_grayscale = []

# Naively search matching pictures between the two datasets using pose values as criterion.

start = 0

for row in sec_df.itertuples():

    found = False