Пример #1
0
def main(path_landmarks,
         path_dataset,
         path_output,
         scales,
         nb_jobs=NB_THREADS):
    assert path_landmarks != path_output, \
        'this folder "%s" cannot be used as output' % path_output
    assert path_dataset != path_output, \
        'this folder "%s" cannot be used as output' % path_output

    coll_dirs, _ = collect_triple_dir([path_landmarks],
                                      path_dataset,
                                      path_output,
                                      scales=scales)
    # filter existing
    coll_dirs = [
        d for d in coll_dirs
        if os.path.isdir(d['images']) and os.path.isdir(d['landmarks'])
    ]
    if not coll_dirs:
        logging.info('No sub-folders collected.')
        return 0
    lnds_dirs = sorted([cd['landmarks'] for cd in coll_dirs])
    logging.info('Collected %i sub-folder: \n%s', len(coll_dirs),
                 '\n'.join(lnds_dirs))

    counts = list(
        wrap_execute_parallel(export_visual_set_scale,
                              coll_dirs,
                              nb_jobs=nb_jobs,
                              desc='visualise'))
    logging.info('Performed %i visualisations', sum(counts))
    return counts
Пример #2
0
def main(path_annots,
         path_dataset,
         path_output,
         nb_jobs=NB_THREADS,
         visual=False):
    coll_dirs, _ = collect_triple_dir([path_annots], '', '', with_user=True)
    logging.info('Collected sub-folder: %i', len(coll_dirs))
    user_names = sorted(
        {parse_path_user_scale(d['landmarks'])[0]
         for d in coll_dirs})
    logging.info('Found users: %r', user_names)
    if len(user_names) < 2:
        logging.info('Not enough user annotations.')

    _evaluate_user = partial(evaluate_user,
                             path_annots=path_annots,
                             path_dataset=path_dataset,
                             path_out=path_output,
                             visual=visual)
    counts = list(
        wrap_execute_parallel(_evaluate_user,
                              user_names,
                              nb_jobs=nb_jobs,
                              desc='evaluate'))
    logging.info('Created %i statistics.', sum(counts))
    return counts
def main(path_annots,
         path_dataset,
         path_output,
         consensus='mean',
         visual=False,
         nb_jobs=NB_THREADS):
    coll_dirs, _ = collect_triple_dir([path_annots], '', '', with_user=True)
    logging.info('Collected sub-folder: %i', len(coll_dirs))
    user_names = sorted(
        {parse_path_user_scale(d['landmarks'])[0]
         for d in coll_dirs})
    logging.info('Found users: %r', user_names)
    if len(user_names) < 2:
        logging.info('Not enough user annotations.')

    _evaluate_user = partial(evaluate_user,
                             path_annots=path_annots,
                             path_dataset=path_dataset,
                             path_out=path_output,
                             tp_consensus=consensus,
                             visual=visual)
    dfs = list(
        iterate_mproc_map(_evaluate_user,
                          user_names,
                          nb_workers=nb_jobs,
                          desc='evaluate'))

    # aggregate results
    df_all = pd.concat(dfs, sort=False)
    df_all.to_csv(os.path.join(path_output, 'STATISTIC__partial.csv'))
    df_short = pd.DataFrame()
    for user, dfg in df_all.groupby('user'):
        stat = dict(dfg['rTRE median'].describe().T[['mean', 'std', 'max']])
        stat = {'%s [median rTRE]' % k: stat[k] for k in stat if k != 'count'}
        stat.update({
            'user': user,
            'count': len(dfg),
        })
        df_short = df_short.append(stat, ignore_index=True)
    df_short.set_index('user', inplace=True)
    logging.info('OVERALL \n%s \n %s' % ('=' * 10, df_short))
    df_short.to_csv(os.path.join(path_output, 'STATISTIC__overview.csv'))

    logging.info('Created %i statistics.', len(df_all))
    return len(df_all)