예제 #1
0
def parse_experiments(params):
    """ with specific input parameters wal over result folder and parse it

    :param dict params:
    :return: DF<nb_experiments, nb_info>
    """
    logging.info('running parse Experiments results')
    logging.info(string_dict(params, desc='ARGUMENTS:'))
    assert os.path.isdir(params['path']), 'missing "%s"' % params['path']
    nb_workers = params.get('nb_workers', NB_WORKERS)

    df_all = pd.DataFrame()
    path_dirs = [
        p for p in glob.glob(os.path.join(params['path'], '*'))
        if os.path.isdir(p)
    ]
    logging.info('found experiments: %i', len(path_dirs))

    _wrapper_parse_folder = partial(parse_experiment_folder, params=params)
    for df_folder in WrapExecuteSequence(_wrapper_parse_folder, path_dirs,
                                         nb_workers):
        df_all = append_df_folder(df_all, df_folder)

    if isinstance(params['name_results'], list):
        name_results = '_'.join(
            os.path.splitext(n)[0] for n in params['name_results'])
    else:
        name_results = os.path.splitext(params['name_results'])[0]

    df_all.reset_index(inplace=True)
    path_csv = os.path.join(params['path'],
                            TEMPLATE_NAME_OVERALL_RESULT % name_results)
    logging.info('export results as %s', path_csv)
    df_all.to_csv(path_csv, index=False)
    return df_all
예제 #2
0
def estim_model_classes_group(list_images, nb_classes, dict_features,
                              sp_size=30, sp_regul=0.2,
                              use_scaler=True, pca_coef=None, model_type='GMM',
                              nb_workers=NB_THREADS):
    """ estimate a model from sequence of input images and return it as result

    :param [ndarray] list_images:
    :param int nb_classes: number of classes
    :param int sp_size: initial size of a superpixel(meaning edge lenght)
    :param float sp_regul: regularisation in range(0;1) where "0" gives elastic
        and "1" nearly square slic
    :param {str: [str]} dict_features: list of features to be extracted
    :param float pca_coef: range (0, 1) or None
    :param bool use_scaler: whether use a scaler
    :param str model_type: model type
    :param int nb_workers: number of jobs running in parallel
    :return:
    """
    list_slic, list_features = list(), list()
    _wrapper_compute = partial(compute_color2d_superpixels_features,
                               sp_size=sp_size, sp_regul=sp_regul,
                               dict_features=dict_features)
    iterate = WrapExecuteSequence(_wrapper_compute, list_images,
                                  desc='compute SLIC & features',
                                  nb_workers=nb_workers)
    for slic, features in iterate:
        list_slic.append(slic)
        list_features.append(features)

    features = np.concatenate(tuple(list_features), axis=0)
    features = np.nan_to_num(features)

    model = estim_class_model(features, nb_classes, model_type, pca_coef, use_scaler)

    return model, list_features
예제 #3
0
def main(path_pattern_in, path_out, nb_workers=NB_WORKERS):
    assert os.path.isdir(os.path.dirname(path_pattern_in)), \
        'missing: %s' % path_pattern_in
    assert os.path.isdir(os.path.dirname(path_out)), \
        'missing: %s' % os.path.dirname(path_out)

    if not os.path.isdir(path_out):
        logging.info('create dir: %s', path_out)
        os.mkdir(path_out)

    list_img_paths = glob.glob(path_pattern_in)
    logging.info('found images: %i', len(list_img_paths))

    # create partial subset with image pathes
    list_img_paths_partial = [
        list_img_paths[i::nb_workers * LOAD_SUBSET_COEF]
        for i in range(nb_workers * LOAD_SUBSET_COEF)
    ]
    list_img_paths_partial = [ls for ls in list_img_paths_partial if ls]
    mean_imgs = list(
        WrapExecuteSequence(load_mean_image,
                            list_img_paths_partial,
                            nb_workers=nb_workers,
                            desc='loading mean images'))
    # imgs, im_names = tl_data.dataset_load_images(list_img_paths, nb_workers=1)
    img_mean = np.mean(np.asarray(mean_imgs), axis=0)
    export_image(path_out, img_mean, 'mean_image')

    logging.info('original image size: %r', img_mean.shape)
    # bbox = find_min_bbox_cumul_sum(img_mean, params['threshold'])
    if params['thr_method'] == 'line-grad':
        bbox = find_min_bbox_grad(img_mean)
    elif params['threshold'] == 0:
        bbox = [0] * 4
    elif params['thr_method'] == 'line-sum':
        bbox = find_min_bbox_line_sum(img_mean, params['threshold'])
    else:
        bbox = find_min_bbox_cumul_sum(img_mean, params['threshold'])
    d_bbox = export_bbox_json(path_out, bbox)
    logging.info('found BBox: %r', d_bbox)

    _cut_export = partial(export_cut_image, d_bbox=d_bbox, path_out=path_out)
    list(
        WrapExecuteSequence(_cut_export,
                            list_img_paths,
                            nb_workers,
                            desc='exporting cut images'))
예제 #4
0
def main(path_pattern_in, path_out, nb_workers=NB_WORKERS):
    assert os.path.isdir(os.path.dirname(path_pattern_in)), \
        'missing: %s' % path_pattern_in
    assert os.path.isdir(os.path.dirname(path_out)), \
        'missing: %s' % os.path.dirname(path_out)

    if not os.path.isdir(path_out):
        logging.info('create dir: %s', path_out)
        os.mkdir(path_out)

    list_img_paths = glob.glob(path_pattern_in)
    logging.info('found images: %i', len(list_img_paths))

    _wrapper_extract = partial(extract_activation, path_out=path_out)
    list(WrapExecuteSequence(_wrapper_extract, list_img_paths, nb_workers))
예제 #5
0
    def _perform(self):
        """ perform experiment as sequence of iterated configurations """
        if is_list_like(self.iter_params):
            logging.info('iterate over %i configurations',
                         len(self.iter_params))
            nb_workers = self.params.get('nb_workers', 1)

            for detail in WrapExecuteSequence(self._perform_once,
                                              self.iter_params,
                                              nb_workers,
                                              desc='experiments'):
                self.df_results = self.df_results.append(detail,
                                                         ignore_index=True)
                logging.debug('partial results: %r', detail)
        else:
            logging.debug('perform single configuration')
            detail = self._perform_once({})
            self.df_results = pd.DataFrame([detail])
def parse_experiments(params):
    """ with specific input parameters wal over result folder and parse it

    :param dict params:
    """
    logging.info('running recompute Experiments results')
    logging.info(string_dict(params, desc='ARGUMENTS:'))
    assert os.path.exists(params['path']), 'missing "%s"' % params['path']
    nb_workers = params.get('nb_workers', NB_WORKERS)

    path_dirs = [
        p for p in glob.glob(os.path.join(params['path'], '*'))
        if os.path.isdir(p)
    ]
    logging.info('found experiments: %i', len(path_dirs))

    _wrapper_parse_folder = partial(parse_experiment_folder, params=params)
    list(WrapExecuteSequence(_wrapper_parse_folder, path_dirs, nb_workers))
예제 #7
0
def warp2d_images_deformations(list_images,
                               list_deforms,
                               method='linear',
                               inverse=False,
                               nb_workers=NB_WORKERS):
    """ deform whole set of images to expected image domain

    :param list(ndarray) list_images:
    :param ndarray list_deforms:
    :param int nb_workers:
    :return: [ndarray]

    >>> img = np.zeros((5, 9), dtype=int)
    >>> img[:3, 1:5] = 1
    >>> deform = register_demons_sym_diffeom(img, img, smooth_sigma=10.)
    >>> imgs = warp2d_images_deformations([img], [deform], method='nearest')
    >>> imgs  # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
    [array([[0, 1, 1, 1, 1, 0, 0, 0, 0],
            [0, 1, 1, 1, 1, 0, 0, 0, 0],
            [0, 1, 1, 1, 1, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0, 0, 0, 0, 0]]...)]
    """
    assert len(list_images) == len(list_deforms), \
        'number of images (%i) and deformations (%i) have to match' \
        % (len(list_images), len(list_deforms))
    list_deforms = list(list_deforms)

    _wrap_deform = partial(wrapper_warp2d_transform_image,
                           method=method,
                           inverse=inverse)
    list_imgs_wrap = [None] * len(list_images)
    list_items = zip(range(len(list_images)), list_images, list_deforms)
    for idx, img_w in WrapExecuteSequence(_wrap_deform,
                                          list_items,
                                          nb_workers,
                                          desc=None):
        list_imgs_wrap[idx] = img_w

    return list_imgs_wrap
예제 #8
0
def dataset_add_noise(path_in,
                      path_out,
                      noise_level,
                      img_pattern=IMAGE_PATTERN,
                      nb_workers=NB_WORKERS):
    """

    :param str path_in:
    :param str path_out:
    :param float noise_level:
    :param str img_pattern:
    :param int nb_workers:
    """
    logging.info('starting adding noise %f', noise_level)
    assert os.path.exists(path_in), 'missing: %s' % path_in
    assert os.path.exists(path_out), 'missing: %s' % path_out

    path_imgs = sorted(glob.glob(os.path.join(path_in, img_pattern)))
    name_imgs = [os.path.basename(p) for p in path_imgs]
    logging.info('found images: %i', len(name_imgs))

    dir_in = os.path.basename(path_in)
    dir_out = dir_in + DIR_POSIX % noise_level
    path_out = os.path.join(path_out, dir_out)
    logging.debug('creating dir: %s', path_out)
    if not os.path.exists(path_out):
        os.mkdir(path_out)
    else:
        logging.warning('the output dir already exists')

    _wrapper_noise = partial(add_noise_image,
                             path_in=path_in,
                             path_out=path_out,
                             noise_level=noise_level)
    list(WrapExecuteSequence(_wrapper_noise, name_imgs, nb_workers))

    logging.info('DONE')
예제 #9
0
def process_expt_reconstruction(name_expt, path_expt, path_dataset=None,
                                path_imgs=None, nb_workers=NB_WORKERS, visual=False):
    atlas, df_weights, dict_deforms, segms, images = load_experiment(
        path_expt, name_expt, path_dataset, path_imgs, nb_workers)
    df_weights.set_index('image', inplace=True)

    path_out = os.path.join(path_expt, BASE_NAME_RECONST + name_expt)
    if not os.path.isdir(path_out):
        logging.debug('create folder: %s', path_out)
        os.mkdir(path_out)

    if visual:
        path_visu = os.path.join(path_expt, BASE_NAME_VISUAL + name_expt)
        if not os.path.isdir(path_visu):
            logging.debug('create folder: %s', path_visu)
            os.mkdir(path_visu)
    else:
        path_visu = None

    if dict_deforms is not None:
        deforms = [dict_deforms[n] for n in df_weights.index]
    else:
        deforms = [None] * len(df_weights)
    segms = [None] * len(df_weights) if segms is None else segms
    images = [None] * len(df_weights) if images is None else images

    _reconst = partial(perform_reconstruction, atlas=atlas,
                       path_out=path_out, path_visu=path_visu)
    iterate = zip(df_weights.index, df_weights.values, segms, images, deforms)
    list_diffs = []
    for n, diff in WrapExecuteSequence(_reconst, iterate, nb_workers=nb_workers):
        list_diffs.append({'image': n, 'reconstruction diff.': diff})

    df_diff = pd.DataFrame(list_diffs)
    df_diff.set_index('image', inplace=True)
    df_diff.to_csv(os.path.join(path_out, CSV_RECONT_DIFF))
예제 #10
0
def train_classif_color2d_slic_features(list_images,
                                        list_annots,
                                        dict_features,
                                        sp_size=30,
                                        sp_regul=0.2,
                                        clf_name=CLASSIF_NAME,
                                        label_purity=0.9,
                                        feature_balance='unique',
                                        pca_coef=None,
                                        nb_classif_search=1,
                                        nb_hold_out=CROSS_VAL_LEAVE_OUT,
                                        nb_workers=1):
    """ train classifier on list of annotated images

    :param [ndarray] list_images:
    :param [ndarray] list_annots:
    :param int sp_size: initial size of a superpixel(meaning edge lenght)
    :param float sp_regul: regularisation in range(0;1) where "0" gives elastic
        and "1" nearly square segments
    :param dict(list(str)) dict_features: list of features to be extracted
    :param str clf_name: selet udsed classifier
    :param float label_purity: set the sample-labels purity for training
    :param str feature_balance: set how to balance datasets
    :param float pca_coef: select PCA coef or None
    :param int nb_classif_search: number of tries for hyper-parameters seach
    :param int nb_hold_out: cross-val leave out
    :param int nb_workers: parallelism
    :return:
    """
    logging.info('TRAIN Superpixels-Features-Classifier')
    assert len(list_images) == len(list_annots), \
        'size of images (%i) and annotations (%i) should match' \
        % (len(list_images), len(list_annots))

    list_slic, list_features, list_labels = list(), list(), list()
    _wrapper_compute = partial(wrapper_compute_color2d_slic_features_labels,
                               sp_size=sp_size,
                               sp_regul=sp_regul,
                               dict_features=dict_features,
                               label_purity=label_purity)
    list_imgs_annot = zip(list_images, list_annots)
    iterate = WrapExecuteSequence(_wrapper_compute,
                                  list_imgs_annot,
                                  desc='compute SLIC & features & labels',
                                  nb_workers=nb_workers)
    for slic, fts, lbs in iterate:
        list_slic.append(slic)
        list_features.append(fts)
        list_labels.append(lbs)

    logging.debug('concentrate features...')
    # concentrate features, labels
    features, labels, sizes = convert_set_features_labels_2_dataset(
        dict(zip(range(len(list_features)), list_features)),
        dict(zip(range(len(list_labels)), list_labels)),
        balance_type=feature_balance,
        drop_labels=[-1])
    # drop do not care label whichare -1
    features = np.nan_to_num(features)

    logging.debug('train classifier...')
    # clf_pipeline = seg_clf.create_clf_pipeline(clf_name, pca_coef)
    # clf_pipeline.fit(np.array(features), np.array(labels, dtype=int))

    if len(sizes) > (nb_hold_out * 5):
        cv = CrossValidateGroups(sizes, nb_hold_out=nb_hold_out)
    # for small nuber of training images this does not make sence
    else:
        cv = 10

    classif, _ = create_classif_search_train_export(
        clf_name,
        features,
        labels,
        pca_coef=pca_coef,
        cross_val=cv,
        nb_search_iter=nb_classif_search,
        nb_workers=nb_workers)

    return classif, list_slic, list_features, list_labels
예제 #11
0
def register_images_to_atlas_demons(list_images,
                                    atlas,
                                    list_weights,
                                    smooth_coef=1.,
                                    params=None,
                                    interp_method='linear',
                                    inverse=False,
                                    rm_mean=True,
                                    nb_workers=NB_WORKERS):
    """ register whole set of images to estimated atlas and weights
    IDEA: think about parallel registration per sets as for loading images

    :param list(ndarray) list_images:
    :param ndarray atlas:
    :param ndarray list_weights:
    :param float coef:
    :param dict params:
    :param int nb_workers:
    :return: [ndarray], [ndarray]

    >>> import bpdl.pattern_atlas as ptn_atlas
    >>> np.random.seed(42)
    >>> atlas = np.zeros((8, 12), dtype=int)
    >>> atlas[:3, 1:5] = 1
    >>> atlas[3:7, 6:12] = 2
    >>> w_bins = np.array([[0, 0], [0, 1], [1, 1]], dtype=bool)
    >>> imgs = ptn_atlas.reconstruct_samples(atlas, w_bins)
    >>> deform = np.ones(atlas.shape + (2,))
    >>> imgs[1] = warp2d_apply_deform_field(imgs[1], deform * -2)
    >>> np.round(imgs[1]).astype(int)
    array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
           [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
           [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
           [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
           [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
    >>> _, _ = register_images_to_atlas_demons(imgs, atlas, w_bins, nb_workers=1)
    >>> imgs_w, deforms = register_images_to_atlas_demons(imgs, atlas, w_bins,
    ...                     smooth_coef=20., interp_method='nearest', nb_workers=2)
    >>> np.sum(imgs_w[0])
    0
    >>> imgs_w[1]  # doctest: +SKIP
    array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
           [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0],
           [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0],
           [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0],
           [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0],
           [0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
           [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
    >>> sorted(deforms[1].keys())
    ['mapping', 'mapping-inv', 'package']
    """
    assert len(list_images) == len(list_weights), \
        'number of images (%i) and weights (%i) have to match' \
        % (len(list_images), len(list_weights))
    atlas = np.asarray(atlas, dtype=int)
    list_weights = list(list_weights)

    list_imgs_wrap = [None] * len(list_images)
    list_deform = [None] * len(list_weights)
    iterations = zip(range(len(list_images)), list_images, list_weights)
    _wrapper_register = partial(wrapper_register_demons_image_weights,
                                atlas=atlas,
                                smooth_coef=smooth_coef,
                                params=params,
                                interp_method=interp_method,
                                inverse=inverse)
    for idx, deform in WrapExecuteSequence(_wrapper_register,
                                           iterations,
                                           nb_workers,
                                           desc=None):
        list_deform[idx] = deform

    # remove mean transform
    if rm_mean:
        for name in ['mapping', 'mapping-inv']:
            list_deform = subtract_mean_deform(list_deform, name)

    _wrapper_warp = partial(wrapper_warp2d_transform_image,
                            method='linear',
                            inverse=False)
    iterations = zip(range(len(list_images)), list_images, list_deform)
    for idx, img_w in WrapExecuteSequence(_wrapper_warp,
                                          iterations,
                                          nb_workers,
                                          desc=None):
        list_imgs_wrap[idx] = img_w

    return list_imgs_wrap, list_deform