def eval_segment_with_annot(params, dict_annot, dict_segm, dict_label_hist=None,
                            name_csv=NAME_CSV_SEGM_STAT_SLIC_ANNOT, nb_jobs=1):
    """ evaluate the segmentation results according given annotation

    :param {str: ...} params:
    :param {str: ndarray} dict_annot:
    :param {str: ndarray} dict_segm:
    :param {str: ndarray} dict_label_hist:
    :param str name_csv:
    :param int nb_jobs:
    :return:
    """
    if dict_label_hist is not None:
        visu_histogram_labels(params, dict_label_hist)
    assert sorted(dict_annot.keys()) == sorted(dict_segm.keys()), \
        'mismatch in dictionary keys: \n%s \n%s' % (sorted(dict_annot.keys()),
                                                    sorted(dict_segm.keys()))
    list_annot = [dict_annot[n] for n in dict_annot]
    list_segm = [dict_segm[n] for n in dict_annot]
    df_stat = seg_clf.compute_stat_per_image(list_segm, list_annot,
                                             [n for n in dict_annot], nb_jobs)

    path_csv = os.path.join(params['path_exp'], name_csv)
    logging.info('STAT on seg_pipe and annot (%s):', name_csv)
    df_stat.to_csv(path_csv)

    logging.info(metrics.classification_report(
        seg_label.convert_segms_2_list(list_segm),
        seg_label.convert_segms_2_list(list_annot), digits=4))
    logging.debug(repr(df_stat))
    return df_stat
def main(dict_paths, nb_jobs=NB_THREADS, relabel=True):
    """ main evaluation

    :param {str: str} dict_paths:
    :param int nb_jobs: number of thred running in parallel
    :param bool relabel: whether relabel segmentation as sequential
    """
    logging.info('running...')
    if not os.path.isdir(dict_paths['output']):
        assert os.path.isdir(os.path.dirname(dict_paths['output'])), \
            'missing folder: %s' % dict_paths['output']
        os.mkdir(dict_paths['output'])

    name = os.path.basename(os.path.dirname(dict_paths['segm']))
    list_dirs = [dict_paths['annot'], dict_paths['segm']]
    if dict_paths['image'] != '':
        list_dirs.append(dict_paths['image'])
    df_paths = tl_data.find_files_match_names_across_dirs(list_dirs)
    path_csv = os.path.join(dict_paths['output'], NAME_CVS_PER_IMAGE % name)
    df_paths.to_csv(path_csv)

    annots, _ = tl_data.load_images_list(df_paths['path_1'].values.tolist())
    segms, names = tl_data.load_images_list(df_paths['path_2'].values.tolist())
    logging.info('loaded %i annots and %i segms', len(annots), len(segms))

    if relabel:
        annots = [relabel_sequential(annot)[0] for annot in annots]
        segms = list(map(wrapper_relabel_segm, zip(annots, segms)))

    path_csv = os.path.join(dict_paths['output'], NAME_CVS_PER_IMAGE % name)
    logging.debug('export to "%s"', path_csv)
    df_stat = seg_clf.compute_stat_per_image(segms, annots, names, nb_jobs)
    df_stat.to_csv(path_csv)

    path_csv = os.path.join(dict_paths['output'], NAME_CVS_OVERALL % name)
    logging.debug('export to "%s"', path_csv)
    df_desc = df_stat.describe()
    logging.info(df_desc.T[['count', 'mean', 'std']])
    df_desc.to_csv(path_csv)

    path_visu = os.path.join(dict_paths['output'], '%s__visual' % name)
    if not os.path.isdir(path_visu):
        os.mkdir(path_visu)
    # for idx, row in df_paths.iterrows():
    #     export_visual(row, path_visu)
    wrapper_visual = partial(export_visual, path_out=path_visu)
    iterate = tl_expt.WrapExecuteSequence(
        wrapper_visual, (row for idx, row in df_paths.iterrows()),
        nb_jobs=nb_jobs)
    list(iterate)

    logging.info('DONE')
def eval_segment_with_annot(
    params,
    dict_annot,
    dict_segm,
    dict_label_hist=None,
    name_csv='statistic___.csv',
    drop_labels=None,
    nb_workers=1,
):
    """ evaluate the segmentation results according given annotation

    :param dict params:
    :param {str: ndarray} dict_annot:
    :param {str: ndarray} dict_segm:
    :param {str: ndarray} dict_label_hist:
    :param str name_csv:
    :param int nb_workers:
    :return:
    """
    if dict_label_hist is not None:
        visu_histogram_labels(params, dict_label_hist)
    if sorted(dict_annot) != sorted(dict_segm):
        raise ValueError('mismatch in dictionary keys: \n%s \n%s' %
                         (sorted(dict_annot.keys()), sorted(dict_segm.keys())))
    list_annot = [dict_annot[n] for n in dict_annot]
    list_segm = [dict_segm[n] for n in dict_annot]
    df_stat = seg_clf.compute_stat_per_image(list_segm,
                                             list_annot,
                                             list(dict_annot),
                                             nb_workers,
                                             drop_labels=drop_labels)

    path_csv = os.path.join(params['path_exp'], name_csv)
    logging.info('STATISTIC on segm and annot (%s):', name_csv)
    df_stat.to_csv(path_csv)

    logging.info(
        metrics.classification_report(
            seg_label.convert_segms_2_list(list_segm),
            seg_label.convert_segms_2_list(list_annot),
            digits=4))
    logging.debug('%r', df_stat)
    return df_stat
Exemple #4
0
def main(dict_paths,
         visual=True,
         drop_labels=None,
         relabel=True,
         segm_alpha=1.,
         nb_jobs=NB_THREADS):
    """ main evaluation

    :param {str: str} dict_paths:
    :param int nb_jobs: number of thred running in parallel
    :param bool relabel: whether relabel segmentation as sequential
    """
    if not os.path.isdir(dict_paths['output']):
        assert os.path.isdir(os.path.dirname(dict_paths['output'])), \
            'missing folder: %s' % dict_paths['output']
        os.mkdir(dict_paths['output'])

    name = os.path.basename(os.path.dirname(dict_paths['segm']))
    list_dirs = [dict_paths['annot'], dict_paths['segm']]
    if dict_paths.get('image', '') != '':
        list_dirs.append(dict_paths['image'])
    df_paths = tl_data.find_files_match_names_across_dirs(list_dirs)
    path_csv = os.path.join(dict_paths['output'], NAME_CVS_PER_IMAGE % name)
    df_paths.to_csv(path_csv)

    assert len(df_paths) > 0, 'nothing to compare'

    annots, _ = tl_data.load_images_list(df_paths['path_1'].values.tolist())
    segms, names = tl_data.load_images_list(df_paths['path_2'].values.tolist())
    logging.info('loaded %i annots and %i segms', len(annots), len(segms))

    if drop_labels is not None:
        annots = [np.array(annot, dtype=float) for annot in annots]
        for lb in drop_labels:
            for i, annot in enumerate(annots):
                annots[i][annot == lb] = np.nan
    annots = [np.nan_to_num(annot + 1).astype(int) - 1 for annot in annots]
    segms = [seg.astype(int) for seg in segms]

    if relabel:
        logging.info('relabel annotations and segmentations')
        if drop_labels is None:
            annots = [relabel_sequential(annot)[0] for annot in annots]
        iterate = tl_expt.WrapExecuteSequence(wrapper_relabel_segm,
                                              zip(annots, segms),
                                              nb_jobs=nb_jobs,
                                              ordered=True,
                                              desc='relabeling')
        segms = list(iterate)

    logging.info('compute statistic per image')
    path_csv = os.path.join(dict_paths['output'], NAME_CVS_PER_IMAGE % name)
    logging.debug('export to "%s"', path_csv)
    df_stat = seg_clf.compute_stat_per_image(segms,
                                             annots,
                                             names,
                                             nb_jobs,
                                             drop_labels=[-1])
    df_stat.to_csv(path_csv)

    logging.info('summarise statistic')
    path_csv = os.path.join(dict_paths['output'], NAME_CVS_OVERALL % name)
    logging.debug('export to "%s"', path_csv)
    df_desc = df_stat.describe()
    df_desc = df_desc.append(pd.Series(df_stat.median(), name='median'))
    logging.info(df_desc.T[['count', 'mean', 'std', 'median']])
    df_desc.to_csv(path_csv)

    if visual:
        images = [None] * len(annots)
        if 'path_3' in df_paths:
            images, _ = tl_data.load_images_list(df_paths['path_3'].values)
        path_visu = os.path.join(dict_paths['output'],
                                 '%s%s' % (name, SUFFIX_VISUAL))
        if not os.path.isdir(path_visu):
            os.mkdir(path_visu)
        # for idx, row in df_paths.iterrows():
        #     export_visual(row, path_visu)
        _wrapper_visual = partial(export_visual,
                                  path_out=path_visu,
                                  segm_alpha=segm_alpha)
        it_values = zip(names, annots, segms, images)
        iterate = tl_expt.WrapExecuteSequence(_wrapper_visual,
                                              it_values,
                                              desc='visualisations',
                                              nb_jobs=nb_jobs)
        list(iterate)