def retrain_loo_segment_image(imgs_idx_path, path_classif, path_dump,
                              path_out, path_visu):
    """ load the classifier, and dumped data, subtract the image,
    retrain the classif. without it and do the segmentation

    :param str path_img: path to input image
    :param str path_classif: path to saved classifier
    :param str path_dump: path to dumped data
    :param, str path_out: path to segmentation outputs
    :return (str, ndarray, ndarray):
    """
    idx, path_img = parse_imgs_idx_path(imgs_idx_path)
    dict_imgs, _, _, dict_features, dict_labels, _, _ = \
        load_dump_data(path_dump)
    dict_classif = seg_clf.load_classifier(path_classif)
    classif = dict_classif['clf_pipeline']
    params = dict_classif['params']

    idx_name = get_idx_name(idx, path_img)
    for d in [dict_features, dict_labels]:
        _ = d.pop(idx_name, None)
    assert (len(dict_imgs) - len(dict_features)) == 1, \
        'no image was dropped from training set'

    features, labels, _ = seg_clf.convert_set_features_labels_2_dataset(
        dict_features, dict_labels, balance_type=params['balance'], drop_labels=[-1])
    classif.fit(features, labels)

    idx_name, segm, segm_gc = segment_image(imgs_idx_path, params, classif,
                                            path_out, path_visu)
    # gc.collect(), time.sleep(1)
    return idx_name, segm, segm_gc
Exemple #2
0
def load_image_annot_compute_features_labels(idx_row,
                                             params,
                                             show_debug_imgs=SHOW_DEBUG_IMAGES
                                             ):
    """ load image and annotation, and compute superpixel features and labels

    :param (int, {...}) idx_row: row from table with paths
    :param {str: ...} params: segmentation parameters
    :param bool show_debug_imgs: whether show debug images
    :return (...):
    """
    def _path_out_img(params, dir_name, name):
        return os.path.join(params['path_exp'], dir_name, name + '.png')

    idx, row = idx_row
    idx_name = get_idx_name(idx, row['path_image'])
    img = load_image(row['path_image'], params['img_type'])
    annot = load_image(row['path_annot'], '2d_segm')
    logging.debug('.. processing: %s', idx_name)
    assert img.shape[:2] == annot.shape[:2], \
        'individual size of image %s and seg_pipe %s for "%s" - "%s"' % \
        (repr(img.shape), repr(annot.shape), row['path_image'], row['path_annot'])
    if show_debug_imgs:
        plt.imsave(_path_out_img(params, FOLDER_IMAGE, idx_name),
                   img,
                   cmap=plt.cm.gray)
        plt.imsave(_path_out_img(params, FOLDER_ANNOT, idx_name), annot)

    # duplicate gray band to be as rgb
    # if img.ndim == 2:
    #     img = np.rollaxis(np.tile(img, (3, 1, 1)), 0, 3)
    slic = seg_spx.segment_slic_img2d(img,
                                      sp_size=params['slic_size'],
                                      rltv_compact=params['slic_regul'])
    img = tl_data.convert_img_color_from_rgb(img,
                                             params.get('clr_space', 'rgb'))
    logging.debug('computed SLIC with %i labels', slic.max())
    if show_debug_imgs:
        img_rgb = use_rgb_image(img)
        img_slic = segmentation.mark_boundaries(img_rgb,
                                                slic,
                                                color=(1, 0, 0),
                                                mode='subpixel')
        plt.imsave(_path_out_img(params, FOLDER_SLIC, idx_name),
                   np.clip(img_slic, 0, 1))
    slic_label_hist = seg_label.histogram_regions_labels_norm(slic, annot)
    labels = np.argmax(slic_label_hist, axis=1)
    slic_annot = labels[slic]
    if show_debug_imgs:
        plt.imsave(_path_out_img(params, FOLDER_SLIC_ANNOT, idx_name),
                   np.clip(slic_annot, 0, slic_annot.max()))

    features, feature_names = seg_fts.compute_selected_features_img2d(
        img, slic, params['features'])
    return idx_name, img, annot, slic, features, labels, \
           slic_label_hist, feature_names
def retrain_lpo_segment_image(list_imgs_idx_path,
                              path_classif,
                              path_dump,
                              path_out,
                              path_visu,
                              show_debug_imgs=SHOW_DEBUG_IMAGES):
    """ load the classifier, and dumped data, subtract the image,
    retrain the classif without it and do the segmentation

    :param list(str) list_imgs_idx_path: path to input image
    :param str path_classif: path to saved classifier
    :param str path_dump: path to dumped data
    :param, str path_out: path to segmentation outputs
    :param bool show_debug_imgs: whether show debug images
    :return (str, ndarray, ndarray):
    """
    dict_imgs, _, _, dict_features, dict_labels, _, _ = load_dump_data(
        path_dump)
    dict_classif = seg_clf.load_classifier(path_classif)
    classif = dict_classif['clf_pipeline']
    params = dict_classif['params']

    for idx, path_img in list_imgs_idx_path:
        idx_name = get_idx_name(idx, path_img)
        _ = dict_features.pop(idx_name, None)
        _ = dict_labels.pop(idx_name, None)
    if (len(dict_imgs) - len(dict_features)) != len(list_imgs_idx_path):
        raise ValueError(
            'subset of %i images was not dropped, training set %i from total %i'
            % (len(list_imgs_idx_path), len(dict_features), len(dict_imgs)))

    features, labels, _ = seg_clf.convert_set_features_labels_2_dataset(
        dict_features,
        dict_labels,
        balance_type=params['balance'],
        drop_labels=[-1, np.nan] + params.get('drop_labels', []))
    classif.fit(features, labels)

    dict_segm, dict_segm_gc = {}, {}
    for imgs_idx_path in list_imgs_idx_path:
        idx_name, segm, segm_gc = segment_image(
            imgs_idx_path,
            params,
            classif,
            path_out,
            path_visu,
            show_debug_imgs=show_debug_imgs)
        dict_segm[idx_name] = segm
        dict_segm_gc[idx_name] = segm_gc
    gc.collect()
    time.sleep(1)
    return dict_segm, dict_segm_gc
Exemple #4
0
def retrain_lpo_segment_image(list_imgs_idx_path, path_classif, path_dump,
                              path_out, path_visu):
    """ load the classifier, and dumped data, subtract the image,
    retrain the classif without it and do the segmentation

    :param str path_img: path to input image
    :param str path_classif: path to saved classifier
    :param str path_dump: path to dumped data
    :param, str path_out: path to segmentation outputs
    :return str, ndarray, ndarray:
    """
    dict_imgs, dict_annot, dict_slics, dict_features, dict_labels, \
        _, feature_names = load_dump_data(path_dump)
    dict_classif = seg_clf.load_classifier(path_classif)
    classif = dict_classif['clf_pipeline']
    params = dict_classif['params']

    for idx, path_img in list_imgs_idx_path:
        idx_name = get_idx_name(idx, path_img)
        for d in [dict_features, dict_labels]:
            _ = d.pop(idx_name, None)
    assert (len(dict_imgs) - len(dict_features)) == len(list_imgs_idx_path), \
        'no (%i) images of (%i) was dropped from training set (%i)' \
        % (len(list_imgs_idx_path), len(dict_imgs), len(dict_features))

    features, labels, _ = seg_clf.convert_set_features_labels_2_dataset(
        dict_features,
        dict_labels,
        balance=params['balance'],
        drop_labels=[-1])
    classif.fit(features, labels)

    dict_segm, dict_segm_gc = {}, {}
    for imgs_idx_path in list_imgs_idx_path:
        idx_name, segm, segm_gc = segment_image(imgs_idx_path, params, classif,
                                                path_out, path_visu)
        dict_segm[idx_name] = segm
        dict_segm_gc[idx_name] = segm_gc
    # gc.collect(), time.sleep(1)
    return dict_segm, dict_segm_gc
Exemple #5
0
def segment_image(imgs_idx_path,
                  params,
                  classif,
                  path_out,
                  path_visu=None,
                  show_debug_imgs=SHOW_DEBUG_IMAGES):
    """ perform image segmentation on input image with given paramters
    and trained classifier, and save results

    :param (int, str) imgs_idx_path:
    :param {str: ...} params: segmentation parameters
    :param obj classif: trained classifier
    :param str path_out: path for output
    :param str path_visu: the existing patch means export also visualisation
    :param bool show_debug_imgs: whether show debug images
    :return (str, ndarray, ndarray):
    """
    idx, path_img = parse_imgs_idx_path(imgs_idx_path)
    logging.debug('segmenting image: "%s"', path_img)
    idx_name = get_idx_name(idx, path_img)
    img = load_image(path_img, params['img_type'])

    debug_visual = dict() if show_debug_imgs else None

    gc_regul = params['gc_regul']
    if params['gc_use_trans']:
        label_penalty = seg_gc.compute_pairwise_cost_from_transitions(
            params['label_transitions'])
        gc_regul = (gc_regul * label_penalty)

    segm_gc, segm_soft = seg_pipe.segment_color2d_slic_features_model_graphcut(
        img,
        classif,
        sp_size=params['slic_size'],
        sp_regul=params['slic_regul'],
        dict_features=params['features'],
        gc_regul=gc_regul,
        gc_edge_type=params['gc_edge_type'],
        debug_visual=debug_visual)
    segm_map = np.argmax(segm_soft, axis=-1)

    for segm, suffix in [(segm_gc, ''), (segm_map, '_MAP')]:
        path_img = os.path.join(path_out, idx_name + suffix + '.png')
        logging.debug('export segmentation: %s', path_img)
        if np.max(segm) <= 1:
            img_seg = Image.fromarray((segm * 255).astype(np.uint8))
        else:
            img_seg = Image.fromarray(segm.astype(np.uint8))
        img_seg.convert('L').save(path_img)
        # io.imsave(path_img, segm_gc)

    path_npz = os.path.join(path_out, idx_name + '.npz')
    np.savez_compressed(path_npz, segm_soft)

    # plt.imsave(os.path.join(path_out, idx_name + '_rgb.png'), seg_pipe)
    if params.get('visual', False) and path_visu is not None \
            and os.path.isdir(path_visu):
        export_draw_image_segm_contour(img, segm_gc, path_visu, idx_name,
                                       '_GC')
        export_draw_image_segm_contour(img, segm_map, path_visu, idx_name,
                                       '_MAP')
        if show_debug_imgs and debug_visual is not None:
            path_fig = os.path.join(path_visu, str(idx_name) + '_debug.png')
            logging.debug('exporting (debug) visualization: %s', path_fig)
            fig = tl_visu.figure_segm_graphcut_debug(debug_visual)
            fig.savefig(path_fig, bbox_inches='tight', pad_inches=0.1)
            plt.close(fig)
    gc.collect()
    time.sleep(1)
    return idx_name, segm_map, segm_gc
def segment_image(imgs_idx_path, params, classif, path_out, path_visu=None,
                  show_debug_imgs=SHOW_DEBUG_IMAGES):
    """ perform image segmentation on input image with given paramters
    and trained classifier, and save results

    :param (int, str) imgs_idx_path:
    :param {str: ...} params: segmentation parameters
    :param obj classif: trained classifier
    :param str path_out: path for output
    :param str path_visu: the existing patch means export also visualisation
    :return (str, ndarray, ndarray):
    """
    idx, path_img = parse_imgs_idx_path(imgs_idx_path)
    logging.debug('segmenting image: "%s"', path_img)
    idx_name = get_idx_name(idx, path_img)
    img = load_image(path_img, params['img_type'])
    slic = seg_spx.segment_slic_img2d(img, sp_size=params['slic_size'],
                                            rltv_compact=params['slic_regul'])
    img = seg_pipe.convert_img_color_space(img, params.get('clr_space', 'rgb'))
    features, _ = seg_fts.compute_selected_features_img2d(img, slic,
                                                          params['features'])
    labels = classif.predict(features)
    segm = labels[slic]
    path_img = os.path.join(path_out, idx_name + '.png')
    logging.debug('export segmentation: %s', path_img)
    img_seg = Image.fromarray(segm.astype(np.uint8))
    img_seg.convert('L').save(path_img)
    # io.imsave(path_img, segm)

    # plt.imsave(os.path.join(path_out, idx_name + '_rgb.png'), seg_pipe)
    if path_visu is not None and os.path.isdir(path_visu):
        export_draw_image_segm_contour(img, segm, path_visu, idx_name)

    try:  # in case some classiefier do not support predict_proba
        proba = classif.predict_proba(features)
        segm_soft = proba[slic]
        path_npz = os.path.join(path_out, idx_name + '.npz')
        np.savez_compressed(path_npz, segm_soft)
    except Exception:
        logging.warning('classif: %s not support predict_proba(.)',
                        repr(classif))
        proba = None
        segm_soft = None

    # if probabilities was not estimated of GC regul. is zero
    if proba is not None and params['gc_regul'] > 0:
        gc_regul = params['gc_regul']
        if params['gc_use_trans']:
            label_penalty = seg_gc.compute_pairwise_cost_from_transitions(
                                                params['label_transitions'])
            gc_regul = (gc_regul * label_penalty)
        labels_gc = seg_gc.segment_graph_cut_general(slic, proba, img, features,
                                     gc_regul, edge_type=params['gc_edge_type'])
        # labels_gc = seg_gc.segment_graph_cut_simple(slic, proba, gc_regul)
        segm_gc = labels_gc[slic]
        # relabel according classif classes
        segm_gc = classif.classes_[segm_gc]

        path_img = os.path.join(path_out, idx_name + '_gc.png')
        logging.debug('export segmentation: %s', path_img)
        img_seg_gc = Image.fromarray(segm_gc.astype(np.uint8))
        img_seg_gc.convert('L').save(path_img)
        # io.imsave(path_img, segm_gc)

        if path_visu is not None and os.path.isdir(path_visu):
            export_draw_image_segm_contour(img, segm_gc, path_visu,
                                           idx_name, '_gc')

            if show_debug_imgs:
                labels_map = np.argmax(proba, axis=1)
                plt.imsave(os.path.join(path_visu, idx_name + '_map.png'),
                           labels_map[slic])
                if not segm_soft is None:
                    for lb in range(segm_soft.shape[2]):
                        uc_name = idx_name + '_gc_unary-lb%i.png' % lb
                        plt.imsave(os.path.join(path_visu, uc_name),
                                   segm_soft[:, :, lb], vmin=0., vmax=1.,
                                   cmap=plt.cm.Greens)
    else:
        segm_gc = np.zeros(segm.shape)
    # gc.collect(), time.sleep(1)
    return idx_name, segm, segm_gc