def generate_labeled_image_list(self):
     folders = glob.glob(self.db_base + '/*')
     self.examples_all = []
     for folder in folders:
         id_ = self.id_class_mapping[os.path.basename(folder)[:7]]
         folder_images = glob.glob(folder + IMAGE_FILE_ENDING)
         if NUMBER_ALPHAS > 0:
             alphas, _ = oal.read_fitting_log(folder + '/fitting.log')
         for folder_image in folder_images:
             example = Training_example()
             example.images.append(folder_image)
             example.xyz.append(folder_image.replace('isomap', 'xyzmap'))
             example.label = id_
             if NUMBER_ALPHAS > 0:
                 example.alphas.append(alphas)
             self.examples_all.append(example)
            if experiment[0] == num_imgs:
                experiment.append(fitting_log)

    #sort experiments by number of images
    experiments.sort(key=lambda experiment: experiment[0])

    for experiment in experiments:
        print('for experiment with', experiment[0], 'images',
              len(experiment) - 1, 'fittings have been found')

    # now calculate the std deviation
    experiment_std_devs = []
    for experiment in experiments:
        alphas = np.empty(((len(experiment) - 1), 63))
        for idx, fitting in enumerate(experiment[1:]):
            alphas[idx, :], _ = oal.read_fitting_log(fitting)
        std_devs = np.std(alphas, axis=0)
        experiment_std_devs.append(std_devs)
    all_std_devs.append(experiment_std_devs)
    #break

#print (len(all_std_devs), ",", len(all_std_devs[0]), ",", all_std_devs[0][0].shape )
#x_coordinates = [i[0] for i in experiments]

# make sure number of image sets is equal for all videos
min_num_sets = min([len(i) for i in all_std_devs
                    ])  #if len(all_std_devs)>1 else len(all_std_devs[0])
#print([len(i) for i in all_std_devs])
#print (min_num_sets)
for video_idx in range(len(all_std_devs)):
    all_std_devs[video_idx] = all_std_devs[video_idx][:min_num_sets]
    # assemble all fitting results we find for this video and load the alphas
    experiment_alphas = [[] for i in range(len(categories))]

    #fitting_dirs = glob.glob(id_and_expression_dir+'/00[3,4,5,7]*')
    fitting_dirs = glob.glob(id_and_expression_dir + '/pose_exp_*')
    for fitting_dir in fitting_dirs:
        pose_exp = int(fitting_dir.split('/')[-1][-5:-3])
        iteration = int(fitting_dir.split('/')[-1][-2:])
        fitting_log = fitting_dir + '/fitting.log'
        if not os.path.exists(fitting_log):
            print(
                "ERROR: There is no fitting log file where there should be one!!",
                fitting_dir)
            exit(0)

        alphas, angles = oal.read_fitting_log(fitting_log)
        angles = np.array(angles)

        # add the alphas to the specific experiment_alphas
        experiment_alphas[pose_exp].append(alphas)

    # print some information
    for i in range(len(categories)):
        print('for experiment with', categories[i], len(experiment_alphas[i]),
              'fittings have been found that match the criteria')

    # now calculate the std deviation
    experiment_std_devs = []
    for i in range(len(experiment_alphas)):
        alphas = np.array(experiment_alphas[i])
        std_devs = np.std(alphas, axis=0)
)
if len(id_and_expression_dirs) == 0:
    print("ERROR: no videos found!!")
    exit(0)

#categories = ['yaw<20', 'yaw>-20', '-30<yaw<30', '-40<yaw<40', 'yaw<-10 or yaw>10', 'all', '<-20 and +-20 and >20']
categories = ['exp ' + format(i, '01d') for i in range(10)]

all_errors = []

for id_and_expression_dir in id_and_expression_dirs:
    print('id and expression dir: ', id_and_expression_dir)

    # load "ground truth" alphas
    gt_alphas, _ = oal.read_fitting_log(
        GT_ALPHA_DIR + id_and_expression_dir.split('/')[-2] + "/" +
        id_and_expression_dir.split('/')[-1][0:-5] + GT_ALPHA_EXP_FITTING_LOG)

    # assemble all fitting results we find for this video and load the alphas
    experiment_alphas = [[] for i in range(len(categories))]

    #fitting_dirs = glob.glob(id_and_expression_dir+'/00[3,4,5,7]*')
    fitting_dirs = glob.glob(id_and_expression_dir + '/pose_exp_*')
    for fitting_dir in fitting_dirs:
        pose_exp = int(fitting_dir.split('/')[-1][-5:-3])
        iteration = int(fitting_dir.split('/')[-1][-2:])
        fitting_log = fitting_dir + '/fitting.log'
        if not os.path.exists(fitting_log):
            print(
                "ERROR: There is no fitting log file where there should be one!!",
                fitting_dir)
def main(argv=None):  # pylint: disable=unused-argument

    if not os.path.exists(experiment_dir):
        print('no experiment dir found!')
        exit()

    if not os.path.exists(train_dir):
        print('no training dir found!')
        exit()

    if not os.path.exists(eval_dir):
        print('no eval dir found!')
        exit()

    if not os.path.exists(eval_log):
        print('no log file found!')
        exit()

    if not take_iter:
        # find best network
        best_accuracy = 0.0
        best_iter = 0
        with open(eval_log, 'r') as log:
            for line in log:
                iter_, accuracy = [float(x) for x in line.split()]
                if accuracy > best_accuracy:
                    best_accuracy = accuracy
                    best_iter = int(iter_)

        best_net_checkpoint = train_dir + '/model.ckpt-' + str(best_iter)
        print('best network is', best_net_checkpoint)
        # double check if we have this network checkpoint
        if not os.path.exists(best_net_checkpoint + '.meta'):
            print('shit! this checkpoint went missing... exiting ...')
            exit(0)
    else:
        best_net_checkpoint = train_dir + '/model.ckpt-' + str(take_iter)

    #db_loader = cnn_db_loader.PaSC_db_loader(db_base=PaSC_BASE, outputfolder=experiment_dir)

    with tf.device('/gpu:0'):

        if NUMBER_ALPHAS == 0:
            vectors = test(best_net_checkpoint, image_list)
        else:
            print('reading fitting logs for alphas now')
            all_alphas = []
            old_alphas = []
            old_folder = ''
            for image in image_list:
                folder = os.path.dirname(image)
                if folder == old_folder:
                    alphas = old_alphas
                else:
                    alphas, _ = oal.read_fitting_log(folder + '/fitting.log')
                old_alphas = alphas
                old_folder = folder
                all_alphas.append(alphas)
            vectors = test(best_net_checkpoint, image_list, alphas=all_alphas)

        with open(test_log, 'w') as log:
            for i in range(len(image_list)):
                log.write(image_list[i] + ' ')
                for x in range(vectors.shape[1]):
                    log.write(str(vectors[i, x]) + ' ')
                log.write('\n')
Exemple #6
0
OUTPUT_FILE = '/user/HS204/m09113/my_project_folder/PaSC/multi_fit_CCR_iter75_reg30_only_10_alphas_control_without_fte.csv'

QUERY_AND_TARGET_SAME = True

query = ET.parse(query_file)
query_root = query.getroot()

#print (query_root[0][0])
query_db = []
for i, query_video in enumerate(query_root):
    query_video_name = query_video[0].attrib['file-name']
    alphas = None
    try:
        query_alphas, _ = oal.read_fitting_log(FITTING_RESULTS_BASE +
                                               query_video_name[:-4] +
                                               '/fitting.log')
        query_alphas = np.array(query_alphas)
        #query_alphas = query_alphas/np.linalg.norm(query_alphas)
    except oal.OalException:
        print("No alphas found in", query_video_name)
    except FileNotFoundError:
        print("Video not found", query_video_name)
    query_id_name = query_video.attrib['name']
    query_db.append([query_id_name, query_alphas])
    if i % 100 == 0:
        print('loaded', i, 'of', len(query_root))

print('measuring distance...')
if QUERY_AND_TARGET_SAME:
    target_db = query_db