Ejemplo n.º 1
0
def compute_color2d_superpixels_features(image,
                                         dict_features,
                                         sp_size=30,
                                         sp_regul=0.2):
    """ segment image into superpixels and estimate features per superpixel

    :param ndarray image: input RGB image
    :param dict(list(str)) dict_features: list of features to be extracted
    :param int sp_size: initial size of a superpixel(meaning edge length)
    :param float sp_regul: regularisation in range(0;1) where "0" gives elastic
           and "1" nearly square segments
    :return list(list(int)), [[floats]]: superpixels and related of features
    """
    assert sp_regul > 0., 'slic. regularisation must be positive'
    logging.debug('run Superpixel clustering.')
    slic = segment_slic_img2d(image,
                              sp_size=sp_size,
                              relative_compact=sp_regul)
    # plt.figure(), plt.imshow(slic)

    logging.debug('extract slic/superpixels features.')
    features, _ = compute_selected_features_img2d(image, slic, dict_features)
    logging.debug('list of features RAW: %r', features.shape)
    features[np.isnan(features)] = 0

    # if fts_norm:
    #     logging.debug('norm all features.')
    #     features, _ = seg_fts.norm_features(features)
    #     logging.debug('list of features NORM: %s', repr(features.shape))
    return slic, features
Ejemplo n.º 2
0
def prepare_boundary_points_close(seg,
                                  centers,
                                  sp_size=25,
                                  relative_compact=0.3):
    """ extract some point around foreground boundaries

    :param ndarray seg: input segmentation
    :param [(int, int)] centers: list of centers
    :param int sp_size: superpixel size
    :return [ndarray]:

    >>> seg = np.zeros((100, 200), dtype=int)
    >>> ell_params = 50, 100, 40, 60, np.deg2rad(30)
    >>> seg = add_overlap_ellipse(seg, ell_params, 1)
    >>> pts = prepare_boundary_points_close(seg, [(40, 90)])
    >>> pts  # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
    [array([[  6,  85],
            [  8, 150],
            ...
            [ 92, 118]])]
    """
    slic = segment_slic_img2d(seg / float(seg.max()),
                              sp_size=sp_size,
                              relative_compact=relative_compact)
    points_all = filter_boundary_points(seg, slic)

    dists = spatial.distance.cdist(points_all, centers, metric='euclidean')
    close_center = np.argmin(dists, axis=1)

    points_centers = []
    for i in range(int(close_center.max() + 1)):
        points = points_all[close_center == i]
        points_centers.append(points)
    return points_centers
Ejemplo n.º 3
0
def prepare_boundary_points_close(seg, centers, sp_size=25, rltv_compact=0.3):
    """ extract some point around foreground boundaries

    :param ndarray seg: input segmentation
    :param [(int, int)] centers: list of centers
    :return [ndarray]:

    >>> seg = np.zeros((100, 200), dtype=int)
    >>> ell_params = 50, 100, 40, 60, np.deg2rad(30)
    >>> seg = add_overlap_ellipse(seg, ell_params, 1)
    >>> pts = prepare_boundary_points_close(seg, [(40, 90)])
    >>> sorted(np.round(pts).tolist())  # doctest: +NORMALIZE_WHITESPACE
    [[[6, 85], [8, 150], [16, 109], [27, 139], [32, 77], [36, 41], [34, 177],
    [59, 161], [54, 135], [67, 62], [64, 33], [84, 150], [91, 48], [92, 118]]]
    """
    slic = seg_spx.segment_slic_img2d(seg / float(seg.max()),
                                      sp_size=sp_size,
                                      rltv_compact=rltv_compact)
    points_all = filter_boundary_points(seg, slic)

    dists = spatial.distance.cdist(points_all, centers, metric='euclidean')
    close_center = np.argmin(dists, axis=1)

    points_centers = []
    for i in range(int(close_center.max() + 1)):
        points = points_all[close_center == i]
        points_centers.append(points)
    return points_centers
Ejemplo n.º 4
0
def compute_boundary_distance(idx_row, params, path_out=''):
    """ compute nearest distance between two segmentation contours

    :param (int, str) idx_row:
    :param {} params:
    :param str path_out:
    :return (str, float):
    """
    _, row = idx_row
    name = os.path.splitext(os.path.basename(row['path_image']))[0]
    img = load_image(row['path_image'], params['img_type'])
    segm = load_image(row['path_segm'], 'segm')

    logging.debug('segment SLIC...')
    slic = seg_spx.segment_slic_img2d(img,
                                      params['slic_size'],
                                      params['slic_regul'],
                                      params['slico'])
    _, dists = seg_lbs.compute_boundary_distances(segm, slic)

    if os.path.isdir(path_out):
        logging.debug('visualise results...')
        fig = tl_visu.figure_segm_boundary_dist(segm, slic)
        fig.savefig(os.path.join(path_out, name + '.jpg'))

    return name, np.mean(dists)
Ejemplo n.º 5
0
def load_inputs(name):
    img, _ = load_image_2d(os.path.join(PATH_IMAGE, name + '.jpg'))
    seg, _ = load_image_2d(os.path.join(PATH_SEGM, name + '.png'))
    annot, _ = load_image_2d(os.path.join(PATH_ANNOT, name + '.png'))
    centers = pd.read_csv(os.path.join(PATH_CENTRE, name + '.csv'), index_col=0).values
    centers[:, [0, 1]] = centers[:, [1, 0]]

    slic = segment_slic_img2d(img, sp_size=25, relative_compact=0.3)
    return img, seg, slic, centers, annot
Ejemplo n.º 6
0
def load_image_annot_compute_features_labels(idx_row,
                                             params,
                                             show_debug_imgs=SHOW_DEBUG_IMAGES
                                             ):
    """ load image and annotation, and compute superpixel features and labels

    :param (int, {...}) idx_row: row from table with paths
    :param {str: ...} params: segmentation parameters
    :param bool show_debug_imgs: whether show debug images
    :return (...):
    """
    def _path_out_img(params, dir_name, name):
        return os.path.join(params['path_exp'], dir_name, name + '.png')

    idx, row = idx_row
    idx_name = get_idx_name(idx, row['path_image'])
    img = load_image(row['path_image'], params['img_type'])
    annot = load_image(row['path_annot'], '2d_segm')
    logging.debug('.. processing: %s', idx_name)
    assert img.shape[:2] == annot.shape[:2], \
        'individual size of image %s and seg_pipe %s for "%s" - "%s"' % \
        (repr(img.shape), repr(annot.shape), row['path_image'], row['path_annot'])
    if show_debug_imgs:
        plt.imsave(_path_out_img(params, FOLDER_IMAGE, idx_name),
                   img,
                   cmap=plt.cm.gray)
        plt.imsave(_path_out_img(params, FOLDER_ANNOT, idx_name), annot)

    # duplicate gray band to be as rgb
    # if img.ndim == 2:
    #     img = np.rollaxis(np.tile(img, (3, 1, 1)), 0, 3)
    slic = seg_spx.segment_slic_img2d(img,
                                      sp_size=params['slic_size'],
                                      rltv_compact=params['slic_regul'])
    img = tl_data.convert_img_color_from_rgb(img,
                                             params.get('clr_space', 'rgb'))
    logging.debug('computed SLIC with %i labels', slic.max())
    if show_debug_imgs:
        img_rgb = use_rgb_image(img)
        img_slic = segmentation.mark_boundaries(img_rgb,
                                                slic,
                                                color=(1, 0, 0),
                                                mode='subpixel')
        plt.imsave(_path_out_img(params, FOLDER_SLIC, idx_name),
                   np.clip(img_slic, 0, 1))
    slic_label_hist = seg_label.histogram_regions_labels_norm(slic, annot)
    labels = np.argmax(slic_label_hist, axis=1)
    slic_annot = labels[slic]
    if show_debug_imgs:
        plt.imsave(_path_out_img(params, FOLDER_SLIC_ANNOT, idx_name),
                   np.clip(slic_annot, 0, slic_annot.max()))

    features, feature_names = seg_fts.compute_selected_features_img2d(
        img, slic, params['features'])
    return idx_name, img, annot, slic, features, labels, \
           slic_label_hist, feature_names
Ejemplo n.º 7
0
    def test_region_growing_graphcut(self, name='insitu7545'):
        """    """
        if not os.path.exists(PATH_PKL_MODEL):
            self.test_shape_modeling()

        # file_model = pickle.load(open(PATH_PKL_MODEL, 'r'))
        npz_file = np.load(PATH_PKL_MODEL, allow_pickle=True)
        file_model = dict(npz_file[npz_file.files[0]].tolist())
        logging.info('loaded model: %r', file_model.keys())
        list_mean_cdf = file_model['cdfs']
        model = file_model['mix_model']

        img, _ = load_image_2d(os.path.join(PATH_IMAGE, name + '.jpg'))
        seg, _ = load_image_2d(os.path.join(PATH_SEGM, name + '.png'))
        annot, _ = load_image_2d(os.path.join(PATH_ANNOT, name + '.png'))
        centers = pd.read_csv(os.path.join(PATH_CENTRE, name + '.csv'), index_col=0).values
        centers[:, [0, 1]] = centers[:, [1, 0]]

        slic = segment_slic_img2d(img, sp_size=25, relative_compact=0.3)
        slic_prob_fg = compute_segm_prob_fg(slic, seg, LABELS_FG_PROB)

        dict_debug = {}
        labels_gc = region_growing_shape_slic_graphcut(
            slic,
            slic_prob_fg,
            centers, (model, list_mean_cdf),
            'set_cdfs',
            coef_shape=5.,
            coef_pairwise=15.,
            prob_label_trans=[0.1, 0.03],
            optim_global=False,
            nb_iter=65,
            allow_obj_swap=False,
            dict_thresholds=DEFAULT_RG2SP_THRESHOLDS,
            debug_history=dict_debug
        )

        segm_obj = labels_gc[slic]
        logging.info('show debug: %r', dict_debug.keys())

        for i in np.linspace(0, len(dict_debug['criteria']) - 1, 5):
            fig = figure_rg2sp_debug_complete(seg, slic, dict_debug, int(i), max_size=5)
            fig_name = 'RG2Sp_graph-cut_%s_debug-%03d.pdf' % (name, i)
            fig.savefig(os.path.join(PATH_OUTPUT, fig_name), bbox_inches='tight', pad_inches=0)
            plt.close(fig)

        _ = adjusted_rand_score(annot.ravel(), segm_obj.ravel())
        # self.assertGreaterEqual(score, 0.5)

        expert_segm(name, img, seg, segm_obj, annot, str_type='RG2Sp_graph-cut')
Ejemplo n.º 8
0
    def test_general(self):
        slic = segment_slic_img2d(self.img, sp_size=15, relative_compact=0.2)

        logging.debug(np.max(slic))

        vertices, edges = make_graph_segm_connect_grid2d_conn4(slic)
        logging.debug('vertices: %r', vertices)
        logging.debug(len(edges))
        logging.debug('edges: %r', edges)
        fig, axarr = plt.subplots(ncols=2)
        axarr[0].imshow(self.img)
        axarr[1].imshow(slic, cmap=plt.cm.jet)
        fig.savefig(os.path.join(PATH_OUTPUT, 'temp_superpixels.png'))
        if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
            plt.show()
        plt.close(fig)
Ejemplo n.º 9
0
def get_slic_points_labels(segm, img=None, slic_size=20, slic_regul=0.1):
    """ run SLIC on image or supepixels and return superpixels, their centers
    and also lebels (label from segmentation in position of superpixel centre)

    :param ndarray segm: segmentation
    :param ndarray img: input image
    :param int slic_size: superpixel size
    :param float slic_regul: regularisation in range (0, 1)
    :return tuple:
    """
    if not img:
        img = segm / float(segm.max())
    slic = segment_slic_img2d(img, sp_size=slic_size, relative_compact=slic_regul)
    slic_centers = np.array(superpixel_centers(slic)).astype(int)
    labels = segm[slic_centers[:, 0], slic_centers[:, 1]]
    return slic, slic_centers, labels
Ejemplo n.º 10
0
    def test_show_image_features_clr2d(self):
        img = load_sample_image(IMAGE_LENNA)
        img = transform.resize(img, (128, 128))
        slic = segment_slic_img2d(img, sp_size=10, relative_compact=0.2)

        features, names = compute_selected_features_color2d(
            img, slic, FEATURES_SET_ALL)

        path_dir = os.path.join(PATH_OUTPUT, 'temp_image-rgb2d-features')
        if not os.path.isdir(path_dir):
            os.mkdir(path_dir)

        for i in range(features.shape[1]):
            fts = features[:, i]
            im_fts = fts[slic]
            p_fig = os.path.join(path_dir, names[i] + '.png')
            plt.imsave(p_fig, im_fts)
            self.assertTrue(os.path.isfile(p_fig))
Ejemplo n.º 11
0
    def test_count_transitions_segment(self):
        img = self.img[:, :, 0]
        annot = self.annot.astype(int)

        slic = segment_slic_img2d(img, sp_size=15, relative_compact=0.2)
        label_hist = histogram_regions_labels_norm(slic, annot)
        labels = np.argmax(label_hist, axis=1)
        trans = count_label_transitions_connected_segments({'a': slic}, {'a': labels})
        path_csv = os.path.join(PATH_OUTPUT, 'labels_transitions.csv')
        pd.DataFrame(trans).to_csv(path_csv)
        gc_regul = compute_pairwise_cost_from_transitions(trans, 10.)

        np.random.seed(0)
        features = np.tile(labels, (5, 1)).T.astype(float)
        features += np.random.random(features.shape) - 0.5

        gmm = estim_class_model_gmm(features, 4)
        proba = gmm.predict_proba(features)

        segment_graph_cut_general(slic, proba, gc_regul)
Ejemplo n.º 12
0
def estim_points_compute_features(name, img, segm, params):
    """ determine points (center candidates) using slic
    and for each compute feature vector with their names

    :param str name:
    :param ndarray img:
    :param ndarray segm:
    :param {str: any} params:
    :return (str, ndarray, [(int, int)], [[float]], [str]):
    """
    # superpixels on image
    assert img.shape[:2] == segm.shape[:2], \
        'not matching shapes: %r : %r' % (img.shape, segm.shape)
    slic = seg_spx.segment_slic_img2d(img, params['slic_size'], params['slic_regul'])
    slic_centers = seg_spx.superpixel_centers(slic)
    # slic_edges = seg_spx.make_graph_segm_connect_grid2d_conn4(slic)

    features, feature_names = compute_points_features(segm, slic_centers,
                                                      params)

    return name, slic, slic_centers, features, feature_names
Ejemplo n.º 13
0
def compute_color2d_superpixels_features(image,
                                         clr_space='rgb',
                                         sp_size=30,
                                         sp_regul=0.2,
                                         dict_features=FTS_SET_SIMPLE,
                                         fts_norm=True):
    """ segment image into superpixels and estimate features per superpixel

    :param ndarray image: input RGB image
    :param str clr_space: chose the color space
    :param int sp_size: initial size of a superpixel(meaning edge lenght)
    :param float sp_regul: regularisation in range(0;1) where "0" gives elastic
           and "1" nearly square segments
    :param {str: [str]} dict_features: list of features to be extracted
    :param bool fts_norm: weather nomalise features
    :return [[int]], [[floats]]: superpixels and related of features
    """
    assert sp_regul > 0., 'slic. regularisation must be positive'
    logging.debug('run Superpixel clustering.')
    slic = seg_sp.segment_slic_img2d(image,
                                     sp_size=sp_size,
                                     rltv_compact=sp_regul)
    # plt.figure(), plt.imshow(slic)

    logging.debug('extract slic/superpixels features.')
    image = convert_img_color_space(image, clr_space)
    features, _ = seg_fts.compute_selected_features_img2d(
        image, slic, dict_features)
    logging.debug('list of features RAW: %s', repr(features.shape))
    features[np.isnan(features)] = 0

    if fts_norm:
        logging.debug('norm all features.')
        features, _ = seg_fts.norm_features(features)
        logging.debug('list of features NORM: %s', repr(features.shape))
    return slic, features
Ejemplo n.º 14
0
def image_segmentation(idx_row, params, debug_export=DEBUG_EXPORT):
    """ image segmentation which prepare inputs (imsegm, centres)
    and perform segmentation of various imsegm methods

    :param (int, str) idx_row: input image and centres
    :param dict params: segmentation parameters
    :return str: image name
    """
    _, row_path = idx_row
    for k in dict(row_path):
        if isinstance(k, str) and k.startswith('path_'):
            row_path[k] = tl_data.update_path(row_path[k], absolute=True)
    logging.debug('segmenting image: "%s"', row_path['path_image'])
    name = os.path.splitext(os.path.basename(row_path['path_image']))[0]

    img = load_image(row_path['path_image'])
    # make the image like RGB
    img_rgb = np.rollaxis(np.tile(img, (3, 1, 1)), 0, 3)
    seg = load_image(row_path['path_segm'], 'segm')
    assert img_rgb.shape[:2] == seg.shape, \
        'image %r and segm %r do not match' % (img_rgb.shape[:2], seg.shape)
    if not os.path.isfile(row_path['path_centers']):
        logging.warning('no center was detected for "%s"', name)
        return name
    centers = tl_data.load_landmarks_csv(row_path['path_centers'])
    centers = tl_data.swap_coord_x_y(centers)
    if not list(centers):
        logging.warning('no center was detected for "%s"', name)
        return name
    # img = seg / float(seg.max())
    slic = seg_spx.segment_slic_img2d(img_rgb,
                                      sp_size=params['slic_size'],
                                      relative_compact=params['slic_regul'])

    path_segm = os.path.join(params['path_exp'], 'input', name + '.png')
    export_draw_image_segm(path_segm, img_rgb, segm_obj=seg, centers=centers)

    seg_simple = simplify_segm_3cls(seg)
    path_segm = os.path.join(params['path_exp'], 'simple', name + '.png')
    export_draw_image_segm(path_segm, seg_simple - 1.)

    dict_segment = create_dict_segmentation(params, slic, seg, img, centers)

    image_name = name + '.png'
    centre_name = name + '.csv'

    # iterate over segmentation methods and perform segmentation on this image
    for method in dict_segment:
        (fn, args) = dict_segment[method]
        logging.debug(' -> %s on "%s"', method, name)
        path_dir = os.path.join(params['path_exp'], method)  # n.split('_')[0]
        path_segm = os.path.join(path_dir, image_name)
        path_centre = os.path.join(path_dir + DIR_CENTRE_POSIX, centre_name)
        path_fig = os.path.join(path_dir + DIR_VISUAL_POSIX, image_name)
        path_debug = os.path.join(path_dir + DIR_DEBUG_POSIX, name)
        # assuming that segmentation may fail
        try:
            t = time.time()
            if debug_export and 'rg2sp' in method:
                os.mkdir(path_debug)
                segm_obj, centers, dict_export = fn(*args,
                                                    debug_export=path_debug)
            else:
                segm_obj, centers, dict_export = fn(*args)

            # also export ellipse params here or inside the segm fn
            if dict_export is not None:
                for k in dict_export:
                    export_partial(k, dict_export[k], path_dir, name)

            logging.info('running time of %r on image "%s" is %d s',
                         fn.__name__, image_name,
                         time.time() - t)
            tl_data.io_imsave(path_segm, segm_obj.astype(np.uint8))
            export_draw_image_segm(path_fig, img_rgb, seg, segm_obj, centers)
            # export also centers
            centers = tl_data.swap_coord_x_y(centers)
            tl_data.save_landmarks_csv(path_centre, centers)
        except Exception:
            logging.exception('segment fail for "%s" via %s', name, method)

    return name
def segment_image(imgs_idx_path, params, classif, path_out, path_visu=None,
                  show_debug_imgs=SHOW_DEBUG_IMAGES):
    """ perform image segmentation on input image with given paramters
    and trained classifier, and save results

    :param (int, str) imgs_idx_path:
    :param {str: ...} params: segmentation parameters
    :param obj classif: trained classifier
    :param str path_out: path for output
    :param str path_visu: the existing patch means export also visualisation
    :return (str, ndarray, ndarray):
    """
    idx, path_img = parse_imgs_idx_path(imgs_idx_path)
    logging.debug('segmenting image: "%s"', path_img)
    idx_name = get_idx_name(idx, path_img)
    img = load_image(path_img, params['img_type'])
    slic = seg_spx.segment_slic_img2d(img, sp_size=params['slic_size'],
                                            rltv_compact=params['slic_regul'])
    img = seg_pipe.convert_img_color_space(img, params.get('clr_space', 'rgb'))
    features, _ = seg_fts.compute_selected_features_img2d(img, slic,
                                                          params['features'])
    labels = classif.predict(features)
    segm = labels[slic]
    path_img = os.path.join(path_out, idx_name + '.png')
    logging.debug('export segmentation: %s', path_img)
    img_seg = Image.fromarray(segm.astype(np.uint8))
    img_seg.convert('L').save(path_img)
    # io.imsave(path_img, segm)

    # plt.imsave(os.path.join(path_out, idx_name + '_rgb.png'), seg_pipe)
    if path_visu is not None and os.path.isdir(path_visu):
        export_draw_image_segm_contour(img, segm, path_visu, idx_name)

    try:  # in case some classiefier do not support predict_proba
        proba = classif.predict_proba(features)
        segm_soft = proba[slic]
        path_npz = os.path.join(path_out, idx_name + '.npz')
        np.savez_compressed(path_npz, segm_soft)
    except Exception:
        logging.warning('classif: %s not support predict_proba(.)',
                        repr(classif))
        proba = None
        segm_soft = None

    # if probabilities was not estimated of GC regul. is zero
    if proba is not None and params['gc_regul'] > 0:
        gc_regul = params['gc_regul']
        if params['gc_use_trans']:
            label_penalty = seg_gc.compute_pairwise_cost_from_transitions(
                                                params['label_transitions'])
            gc_regul = (gc_regul * label_penalty)
        labels_gc = seg_gc.segment_graph_cut_general(slic, proba, img, features,
                                     gc_regul, edge_type=params['gc_edge_type'])
        # labels_gc = seg_gc.segment_graph_cut_simple(slic, proba, gc_regul)
        segm_gc = labels_gc[slic]
        # relabel according classif classes
        segm_gc = classif.classes_[segm_gc]

        path_img = os.path.join(path_out, idx_name + '_gc.png')
        logging.debug('export segmentation: %s', path_img)
        img_seg_gc = Image.fromarray(segm_gc.astype(np.uint8))
        img_seg_gc.convert('L').save(path_img)
        # io.imsave(path_img, segm_gc)

        if path_visu is not None and os.path.isdir(path_visu):
            export_draw_image_segm_contour(img, segm_gc, path_visu,
                                           idx_name, '_gc')

            if show_debug_imgs:
                labels_map = np.argmax(proba, axis=1)
                plt.imsave(os.path.join(path_visu, idx_name + '_map.png'),
                           labels_map[slic])
                if not segm_soft is None:
                    for lb in range(segm_soft.shape[2]):
                        uc_name = idx_name + '_gc_unary-lb%i.png' % lb
                        plt.imsave(os.path.join(path_visu, uc_name),
                                   segm_soft[:, :, lb], vmin=0., vmax=1.,
                                   cmap=plt.cm.Greens)
    else:
        segm_gc = np.zeros(segm.shape)
    # gc.collect(), time.sleep(1)
    return idx_name, segm, segm_gc