def visual_coannotation(lnds_user, lnds_refs, path_dataset, path_user,
                        img_name, path_visu):
    """ visualise the co-annotation

    show consensus annotation and use annotation

    :param DF lnds_user: loaded DataFrame
    :param DF lnds_refs: loaded DataFrame
    :param path_dataset: path to the image dataset (root)
    :param name_set: name of the image set
    :param name_user_scale: annotation folder containing user name and used scale
    :param img_name: particular image/annotation/stain name
    :param str|None path_visu: path to output
    :return str: figure path
    """
    name_set, name_user_scale = path_user.split(os.sep)[-2:]
    user, scale = parse_path_user_scale(name_user_scale)
    folder_scale = TEMPLATE_FOLDER_SCALE % scale
    image = None
    if path_dataset is not None and os.path.isdir(path_dataset):
        path_dir = os.path.join(path_dataset, name_set, folder_scale)
        paths_image = find_images(path_dir, img_name)
        image = load_image(paths_image[0]) if paths_image else None

    lnds_user = lnds_user * (scale / 100.)
    lnds_refs = lnds_refs * (scale / 100.)
    fig = figure_image_landmarks(lnds_refs, image, lnds_user, lnds2_name=user)

    fig_name = NAME_FIGURE_COANNOT % img_name
    path_dir = os.path.join(path_visu, name_set, name_user_scale)
    create_folder_path(path_dir)
    path_fig = os.path.join(path_dir, fig_name)
    fig.savefig(path_fig)
    plt.close(fig)
    return path_fig
def scale_set_landmarks(path_set, scales=SCALES):
    """ scale given set with landmarks to particular scales

    :param str path_set: path to image/landmark set
    :param [int] scales: selected output scales
    :return {str: int}: collection of lengths
    """
    logging.debug('> processing: %s', path_set)
    path_scale100 = os.path.join(path_set, TEMPLATE_FOLDER_SCALE % 100)
    if not os.path.isdir(path_scale100):
        logging.error('missing base scale 100pc in "%s"', path_scale100)
        return
    list_csv = glob.glob(os.path.join(path_scale100, '*.csv'))
    logging.debug('>> found landmarks: %i', len(list_csv))
    dict_lnds = {
        os.path.basename(p): pd.read_csv(p, index_col=0)
        for p in list_csv
    }
    set_scales = {}
    for sc in (sc for sc in scales if sc not in [100]):  # drop the base scale
        path_scale = os.path.join(path_set, TEMPLATE_FOLDER_SCALE % sc)
        create_folder_path(path_scale)
        for name in dict_lnds:
            df_scale = dict_lnds[name] * (sc / 100.)
            df_scale.to_csv(os.path.join(path_scale, name))
        set_scales[sc] = len(dict_lnds)
    dict_lens = {os.path.basename(path_set): set_scales}
    return dict_lens
def generate_consensus_landmarks(path_set, path_dataset):
    """ generate consensus landmarks for a particular image/landmark set

    :param str path_set: path to the set with annotations
    :param str path_dataset: output dataset path (root)
    :return {str: int}:
    """
    path_annots = list_sub_folders(path_set, '*_scale-*pc')
    logging.debug('>> found annotations: %i', len(path_annots))

    dict_lnds, dict_lens = create_consensus_landmarks(path_annots)

    path_scale = os.path.join(path_dataset, os.path.basename(path_set),
                              TEMPLATE_FOLDER_SCALE % 100)
    create_folder_path(path_scale)
    for name in dict_lnds:
        dict_lnds[name].to_csv(os.path.join(path_scale, name))

    return {os.path.basename(path_set): dict_lens}