コード例 #1
0
def main(params):
    """ PIPELINE candidate clustering

    :param {str: any} params:
    """
    params['path_expt'] = os.path.join(params['path_output'], FOLDER_EXPERIMENT % params['name'])
    tl_expt.save_config_yaml(os.path.join(params['path_expt'], NAME_YAML_PARAMS), params)
    tl_expt.create_subfolders(params['path_expt'], LIST_SUBDIRS)

    list_paths = [params[k] for k in ['path_images', 'path_segms', 'path_centers']]
    df_paths = tl_data.find_files_match_names_across_dirs(list_paths)
    df_paths.columns = ['path_image', 'path_segm', 'path_points']
    df_paths.index = range(1, len(df_paths) + 1)
    path_cover = os.path.join(params['path_expt'], run_train.NAME_CSV_TRIPLES)
    df_paths.to_csv(path_cover)

    logging.info('run clustering...')
    df_paths_new = pd.DataFrame()
    _wrapper_clustering = partial(cluster_points_draw_export, params=params, path_out=params['path_expt'])
    rows = (dict(row) for idx, row in df_paths.iterrows())
    iterate = tl_expt.WrapExecuteSequence(_wrapper_clustering, rows, nb_workers=params['nb_workers'])
    for dict_center in iterate:
        df_paths_new = df_paths_new.append(dict_center, ignore_index=True)

    df_paths_new.set_index('image', inplace=True)
    df_paths_new.to_csv(path_cover)
コード例 #2
0
def main(dict_paths,
         padding=0,
         use_mask=False,
         bg_color=None,
         nb_workers=NB_WORKERS):
    """ the main executable

    :param dict_paths:
    :param int padding:
    :param int nb_workers:
    """
    if not os.path.isdir(dict_paths['output']):
        if not os.path.isdir(os.path.dirname(dict_paths['output'])):
            raise NotADirectoryError('"%s" should be folder' %
                                     dict_paths['output'])
        logging.debug('creating dir: %s', dict_paths['output'])
        os.mkdir(dict_paths['output'])

    list_dirs = [dict_paths['annot'], dict_paths['image']]
    df_paths = tl_data.find_files_match_names_across_dirs(list_dirs)

    logging.info('start cutting images')
    _wrapper_cutting = partial(
        export_cut_objects,
        path_out=dict_paths['output'],
        padding=padding,
        use_mask=use_mask,
        bg_color=bg_color,
    )
    iterate = tl_expt.WrapExecuteSequence(
        _wrapper_cutting,
        (row for idx, row in df_paths.iterrows()),
        nb_workers=nb_workers,
    )
    list(iterate)
コード例 #3
0
def main(dict_paths,
         visual=True,
         drop_labels=None,
         relabel=True,
         segm_alpha=1.,
         nb_workers=NB_THREADS):
    """ main evaluation

    :param {str: str} dict_paths:
    :param int nb_workers: number of thred running in parallel
    :param bool relabel: whether relabel segmentation as sequential
    """
    if not os.path.isdir(dict_paths['output']):
        assert os.path.isdir(os.path.dirname(dict_paths['output'])), \
            'missing folder: %s' % dict_paths['output']
        os.mkdir(dict_paths['output'])

    name = os.path.basename(os.path.dirname(dict_paths['segm']))
    list_dirs = [dict_paths['annot'], dict_paths['segm']]
    if dict_paths.get('image', '') != '':
        list_dirs.append(dict_paths['image'])
    df_paths = tl_data.find_files_match_names_across_dirs(list_dirs)
    path_csv = os.path.join(dict_paths['output'], NAME_CVS_PER_IMAGE % name)
    logging.info('found %i pairs', len(df_paths))
    df_paths.to_csv(path_csv)

    assert not df_paths.empty, 'nothing to compare'

    name_seg_dir = os.path.basename(os.path.dirname(dict_paths['segm']))
    path_visu = os.path.join(dict_paths['output'],
                             name_seg_dir + SUFFIX_VISUAL)
    if visual and not os.path.isdir(path_visu):
        os.mkdir(path_visu)
    elif not visual:
        path_visu = ''

    logging.info('compute statistic per image')
    _wrapper_stat = partial(stat_single_set,
                            drop_labels=drop_labels,
                            relabel=relabel,
                            path_visu=path_visu,
                            segm_alpha=segm_alpha)
    iterate = tl_expt.WrapExecuteSequence(_wrapper_stat,
                                          df_paths.iterrows(),
                                          desc='compute statistic',
                                          nb_workers=nb_workers)
    list_stats = list(iterate)
    df_stat = pd.DataFrame(list_stats)

    path_csv = os.path.join(dict_paths['output'], NAME_CVS_PER_IMAGE % name)
    logging.debug('export to "%s"', path_csv)
    df_stat.to_csv(path_csv)

    logging.info('summarise statistic')
    path_csv = os.path.join(dict_paths['output'], NAME_CVS_OVERALL % name)
    logging.debug('export to "%s"', path_csv)
    df_desc = df_stat.describe()
    df_desc = df_desc.append(pd.Series(df_stat.median(), name='median'))
    logging.info(df_desc.T[['count', 'mean', 'std', 'median']])
    df_desc.to_csv(path_csv)
コード例 #4
0
def main(params):
    """ compute the distance among segmented superpixels and given annotation

    :param dict params:
    """
    if os.path.isdir(params['path_out']):
        logging.info('Missing output dir -> no visual export & results table.')

    list_paths = [params['path_images'], params['path_segms']]
    df_paths = tl_data.find_files_match_names_across_dirs(list_paths)
    df_paths.columns = ['path_image', 'path_segm']

    df_dist = pd.DataFrame()

    _wrapper_eval = partial(compute_boundary_distance,
                            params=params,
                            path_out=params['path_out'])
    iterate = tl_expt.WrapExecuteSequence(_wrapper_eval,
                                          df_paths.iterrows(),
                                          nb_workers=params['nb_workers'],
                                          desc='evaluate SLIC')
    for name, dist in iterate:
        df_dist = df_dist.append({
            'name': name,
            'mean boundary distance': dist
        },
                                 ignore_index=True)
    df_dist.set_index('name', inplace=True)

    if os.path.isdir(params['path_out']):
        csv_name = NAME_CSV_DISTANCES % (params['slic_size'],
                                         params['slic_regul'], params['slico'])
        df_dist.to_csv(os.path.join(params['path_out'], csv_name))
    logging.info('STATISTIC:')
    logging.info(df_dist.describe())
コード例 #5
0
def evaluate_folder(path_dir, dict_paths, export_visual=EXPORT_VUSIALISATION):
    """ take a single folder with segmentation and compute statistic
    against annotation and export some visualisations, return computed stat.

    :param str path_dir:
    :param {str, str} dict_paths:
    :param bool export_visual:
    :return {str: float}:
    """
    logging.info('evaluate folder: %s', path_dir)
    name = os.path.basename(path_dir)

    list_paths = [dict_paths['images'], dict_paths['annots'],
                  dict_paths['segments'], dict_paths['centers'],
                  os.path.join(path_dir, '*.png')]
    df_paths = tl_data.find_files_match_names_across_dirs(list_paths)

    if df_paths.empty:
        return {'method': name, 'count': 0}

    if dict_paths['annots'] is not None:
        df_paths.columns = ['path_image', 'path_annot', 'path_in-segm',
                            'path_centers', 'path_egg-segm']
    else:
        df_paths.columns = ['path_image', 'path_in-segm',
                            'path_centers', 'path_egg-segm']
    df_paths.index = range(1, len(df_paths) + 1)
    df_paths.to_csv(os.path.join(dict_paths['results'], NAME_CSV_STAT % name))

    if export_visual:
        for _, row in df_paths.iterrows():
            expert_visual(row, name, path_out=dict_paths['results'])

    if not dict_paths['annots']:
        logging.info('no Annotation given')
        return {'method': name, 'count': 0}

    df_eval = pd.DataFrame()
    for _, row in df_paths.iterrows():
        dict_seg = compute_metrics(row)
        df_eval = df_eval.append(dict_seg, ignore_index=True)

    df_eval.set_index(['name'], inplace=True)
    df_eval.to_csv(os.path.join(dict_paths['results'], NAME_CSV_STAT % name))

    df_summary = df_eval.describe()
    cols = df_eval.columns.tolist()
    dict_eval = {'method': name, 'count': len(df_eval)}
    for n in ['mean', 'std']:
        names = ['%s (%s)' % (c, n) for c in cols]
        dict_eval.update(zip(names, df_summary.T[n].values.tolist()))
    dict_eval.update(zip(['%s (median)' % c for c in cols],
                         df_eval.median(axis=0).values.tolist()))

    return dict_eval
コード例 #6
0
def main(params):
    df_paths = tl_data.find_files_match_names_across_dirs([params['path_images'],
                                                           params['path_segms'],
                                                           params['path_centers']])
    df_paths.columns = ['path_image', 'path_segm', 'path_centers']
    df_paths.index = range(1, len(df_paths) + 1)

    if not os.path.exists(params['path_output']):
        assert os.path.exists(os.path.dirname(params['path_output'])), \
            'missing folder: "%s"' % os.path.dirname(params['path_output'])
        os.mkdir(params['path_output'])

    df_slices_info = seg_annot.load_info_group_by_slices(params['path_infofile'],
                                                         params['stages'])
    _wrapper_export = partial(export_figure, df_slices_info=df_slices_info,
                              path_out=params['path_output'])
    iterate = tl_expt.WrapExecuteSequence(_wrapper_export, df_paths.iterrows(),
                                          nb_workers=params['nb_workers'])
    list(iterate)
コード例 #7
0
def find_match_images_segms_centers(path_pattern_imgs, path_pattern_segms,
                                    path_pattern_center=None):
    """ walk over dir with images and segmentation and pair those with the same
    name and if the folder with centers exists also add to each par a center
    NOTE: returns just paths

    :param str path_pattern_imgs:
    :param str path_pattern_segms:
    :param str path_pattern_center:
    :return DF: DF<path_img, path_segm, path_center>
    """
    logging.info('find match images-segms-centres...')
    list_paths = [path_pattern_imgs, path_pattern_segms, path_pattern_center]
    df_paths = tl_data.find_files_match_names_across_dirs(list_paths)

    if not path_pattern_center:
        df_paths.columns = ['path_image', 'path_segm']
        df_paths['path_centers'] = ''
    else:
        df_paths.columns = ['path_image', 'path_segm', 'path_centers']
    df_paths.index = range(1, len(df_paths) + 1)
    return df_paths