예제 #1
0
def prepare_boundary_points_close(seg, centers, sp_size=25, rltv_compact=0.3):
    """ extract some point around foreground boundaries

    :param ndarray seg: input segmentation
    :param [(int, int)] centers: list of centers
    :return [ndarray]:

    >>> seg = np.zeros((100, 200), dtype=int)
    >>> ell_params = 50, 100, 40, 60, np.deg2rad(30)
    >>> seg = add_overlap_ellipse(seg, ell_params, 1)
    >>> pts = prepare_boundary_points_close(seg, [(40, 90)])
    >>> sorted(np.round(pts).tolist())  # doctest: +NORMALIZE_WHITESPACE
    [[[6, 85], [8, 150], [16, 109], [27, 139], [32, 77], [36, 41], [34, 177],
    [59, 161], [54, 135], [67, 62], [64, 33], [84, 150], [91, 48], [92, 118]]]
    """
    slic = seg_spx.segment_slic_img2d(seg / float(seg.max()), sp_size=sp_size,
                                      rltv_compact=rltv_compact)
    points_all = filter_boundary_points(seg, slic)

    dists = spatial.distance.cdist(points_all, centers, metric='euclidean')
    close_center = np.argmin(dists, axis=1)

    points_centers = []
    for i in range(int(close_center.max() + 1)):
        points = points_all[close_center == i]
        points_centers.append(points)
    return points_centers
def compute_boundary_distance(idx_row, params, path_out=''):
    """ compute nearest distance between two segmentation contours

    :param (int, str) idx_row:
    :param {} params:
    :param str path_out:
    :return (str, float):
    """
    _, row = idx_row
    name = os.path.splitext(os.path.basename(row['path_image']))[0]
    img = load_image(row['path_image'], params['img_type'])
    segm = load_image(row['path_segm'], 'segm')

    logging.debug('segment SLIC...')
    slic = seg_spx.segment_slic_img2d(img, params['slic_size'],
                                      params['slic_regul'],
                                      bool(params['slico']))
    _, dists = seg_lbs.compute_boundary_distances(segm, slic)

    if os.path.isdir(path_out):
        logging.debug('visualise results...')
        fig = tl_visu.figure_segm_boundary_dist(segm, slic)
        fig.savefig(os.path.join(path_out, name + '.jpg'))

    return name, np.mean(dists)
예제 #3
0
def load_inputs(name):
    img, _ = tl_io.load_image_2d(os.path.join(PATH_IMAGE, name + '.jpg'))
    seg, _ = tl_io.load_image_2d(os.path.join(PATH_SEGM, name + '.png'))
    annot, _ = tl_io.load_image_2d(os.path.join(PATH_ANNOT, name + '.png'))
    centers = pd.DataFrame.from_csv(os.path.join(PATH_CENTRE,
                                                 name + '.csv')).values
    centers[:, [0, 1]] = centers[:, [1, 0]]

    slic = seg_spx.segment_slic_img2d(img, sp_size=25, rltv_compact=0.3)
    return img, seg, slic, centers, annot
def load_image_annot_compute_features_labels(idx_row,
                                             params,
                                             show_debug_imgs=SHOW_DEBUG_IMAGES
                                             ):
    """ load image and annotation, and compute superpixel features and labels

    :param (int, {...}) idx_row: row from table with paths
    :param {str: ...} params: segmentation parameters
    :param bool show_debug_imgs: whether show debug images
    :return (...):
    """
    def path_out_img(params, dir_name, name):
        return os.path.join(params['path_exp'], dir_name, name + '.png')

    idx, row = idx_row
    idx_name = get_idx_name(idx, row['path_image'])
    img = load_image(row['path_image'], params['img_type'])
    annot = load_image(row['path_annot'], 'segm')
    logging.debug('.. processing: %s', idx_name)
    assert img.shape[:2] == annot.shape[:2], \
        'individual size of image %s and seg_pipe %s for "%s" - "%s"' % \
        (repr(img.shape), repr(annot.shape), row['path_image'],
         row['path_annot'])
    if show_debug_imgs:
        plt.imsave(path_out_img(params, FOLDER_IMAGE, idx_name),
                   img,
                   cmap=plt.cm.gray)
        plt.imsave(path_out_img(params, FOLDER_ANNOT, idx_name), annot)

    # duplicate gray band to be as rgb
    # if img.ndim == 2:
    #     img = np.rollaxis(np.tile(img, (3, 1, 1)), 0, 3)
    slic = seg_spx.segment_slic_img2d(img,
                                      sp_size=params['slic_size'],
                                      rltv_compact=params['slic_regul'])
    img = seg_pipe.convert_img_color_space(img, params.get('clr_space', 'rgb'))
    logging.debug('computed SLIC with %i labels', slic.max())
    if show_debug_imgs:
        img_slic = segmentation.mark_boundaries(img / float(img.max()),
                                                slic,
                                                color=(1, 0, 0),
                                                mode='subpixel')
        plt.imsave(path_out_img(params, FOLDER_SLIC, idx_name), img_slic)
    features, ft_names = seg_fts.compute_selected_features_img2d(
        img, slic, params['features'])

    label_hist = seg_label.histogram_regions_labels_norm(slic, annot)
    labels = np.argmax(label_hist, axis=1)
    slic_annot = labels[slic]
    if show_debug_imgs:
        plt.imsave(path_out_img(params, FOLDER_SLIC_ANNOT, idx_name),
                   slic_annot)
    return idx_name, img, annot, slic, features, labels, label_hist, ft_names
예제 #5
0
    def test_show_image_features_clr2d(self):
        img = d_spl.load_sample_image(d_spl.IMAGE_LENNA)
        img = transform.resize(img, (128, 128))
        slic = seg_spx.segment_slic_img2d(img, sp_size=10, rltv_compact=0.2)

        features, names = seg_fts.compute_selected_features_color2d(
            img, slic, seg_fts.FEATURES_SET_ALL)

        path_dir = os.path.join(PATH_OUTPUT, 'test_image_rgb2d_features')
        if not os.path.exists(path_dir):
            os.mkdir(path_dir)

        for i in range(features.shape[1]):
            fts = features[:, i]
            im_fts = fts[slic]
            plt.imsave(os.path.join(path_dir, names[i] + '.png'), im_fts)
예제 #6
0
def get_slic_points_labels(segm, img=None, slic_size=20, slic_regul=0.1):
    """ run SLIC on image or supepixels and return superpixels, their centers
    and also lebels (label from segmentation in position of superpixel centre)

    :param ndarray segm:
    :param ndarray img:
    :param int slic_size: superpixel size
    :param float slic_regul: regularisation in range (0, 1)
    :return:
    """
    if img is None:
        img = segm / float(segm.max())
    slic = seg_spx.segment_slic_img2d(img, sp_size=slic_size,
                                      rltv_compact=slic_regul)
    slic_centers = np.array(seg_spx.superpixel_centers(slic)).astype(int)
    labels = segm[slic_centers[:, 0], slic_centers[:, 1]]
    return slic, slic_centers, labels
예제 #7
0
    def test_general(self):
        slic = seg_spx.segment_slic_img2d(self.img,
                                          sp_size=15,
                                          rltv_compact=0.2)

        logging.debug(np.max(slic))

        vertices, edges = seg_spx.make_graph_segm_connect2d_conn4(slic)
        logging.debug(repr(vertices))
        logging.debug(len(edges))
        logging.debug(repr(edges))

        fig, axarr = plt.subplots(ncols=2)
        axarr[0].imshow(self.img)
        axarr[1].imshow(slic, cmap=plt.cm.jet)
        fig.savefig(os.path.join(PATH_OUTPUT, 'test_superpixels.png'))
        if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
            plt.show()
        plt.close(fig)
def estim_points_compute_features(name, img, segm, params):
    """ determine points (center candidates) using slic
    and for each compute feature vector with their names

    :param str name:
    :param ndarray img:
    :param ndarray segm:
    :param {str: any} params:
    :return str, np.array, [(int, int)], [[float]], [str]:
    """
    # superpixels on image
    assert img.shape[:2] == segm.shape[:2], \
        'shapes: %s : %s' % (repr(img.shape), repr(segm.shape))
    slic = seg_spx.segment_slic_img2d(img, params['slic_size'],
                                      params['slic_regul'])
    slic_centers = seg_spx.superpixel_centers(slic)
    # slic_edges = seg_spx.make_graph_segm_connect2d_conn4(slic)

    features, feature_names = compute_points_features(segm, slic_centers,
                                                      params)

    return name, slic, slic_centers, features, feature_names
예제 #9
0
    def test_count_transitions_segment(self):
        img = d_spl.load_sample_image(d_spl.IMAGE_DROSOPHILA_OVARY_2D)[:, :, 0]

        annot = d_spl.load_sample_image(d_spl.ANNOT_DROSOPHILA_OVARY_2D)
        annot = annot.astype(int)

        slic = seg_spx.segment_slic_img2d(img, sp_size=15, rltv_compact=0.2)
        label_hist = seg_lb.histogram_regions_labels_norm(slic, annot)
        labels = np.argmax(label_hist, axis=1)
        trans = seg_gc.count_label_transitions_connected_segments({'a': slic},
                                                                  {'a': labels})
        path_csv = os.path.join(PATH_OUTPUT, 'labels_transitions.csv')
        pd.DataFrame(trans).to_csv(path_csv)
        gc_regul = seg_gc.compute_pairwise_cost_from_transitions(trans, 10.)

        np.random.seed(0)
        features = np.tile(labels, (5, 1)).T.astype(float)
        features += np.random.random(features.shape) - 0.5

        gmm = seg_gc.estim_class_model_gmm(features, 4)
        proba = gmm.predict_proba(features)

        seg_gc.segment_graph_cut_general(slic, proba, gc_regul)
예제 #10
0
def compute_color2d_superpixels_features(image,
                                         clr_space='rgb',
                                         sp_size=30,
                                         sp_regul=0.2,
                                         dict_features=FTS_SET_SIMPLE,
                                         fts_norm=True):
    """ segment image into superpixels and estimate features per superpixel

    :param ndarray image: input RGB image
    :param str clr_space: chose the color space
    :param int sp_size: initial size of a superpixel(meaning edge lenght)
    :param float sp_regul: regularisation in range(0;1) where "0" gives elastic
           and "1" nearly square segments
    :param {str: [str]} dict_features: list of features to be extracted
    :param bool fts_norm: weather nomalise features
    :return [[int]], [[floats]]: superpixels and related of features
    """
    assert sp_regul > 0., 'slic. regularisation must be positive'
    logging.debug('run Superpixel clustering.')
    slic = seg_sp.segment_slic_img2d(image,
                                     sp_size=sp_size,
                                     rltv_compact=sp_regul)
    # plt.figure(), plt.imshow(slic)

    logging.debug('extract slic/superpixels features.')
    image = convert_img_color_space(image, clr_space)
    features, _ = seg_fts.compute_selected_features_img2d(
        image, slic, dict_features)
    logging.debug('list of features RAW: %s', repr(features.shape))
    features[np.isnan(features)] = 0

    if fts_norm:
        logging.debug('norm all features.')
        features, _ = seg_fts.norm_features(features)
        logging.debug('list of features NORM: %s', repr(features.shape))
    return slic, features
def image_segmentation(idx_row, params, debug_export=DEBUG_EXPORT):
    """ image segmentation which prepare inputs (segmentation, centres)
    and perform segmentation of various segmentation methods

    :param (int, str) idx_row: input image and centres
    :param {str: ...} params: segmentation parameters
    :return str: image name
    """
    _, row_path = idx_row
    for k in dict(row_path):
        if isinstance(k, str) and k.startswith('path_'):
            row_path[k] = tl_data.update_path(row_path[k], absolute=True)
    logging.debug('segmenting image: "%s"', row_path['path_image'])
    name = os.path.splitext(os.path.basename(row_path['path_image']))[0]

    img = load_image(row_path['path_image'])
    # make the image like RGB
    img_rgb = np.rollaxis(np.tile(img, (3, 1, 1)), 0, 3)
    seg = load_image(row_path['path_segm'], 'segm')
    assert img_rgb.shape[:2] == seg.shape, \
        'image %s and segm %s do not match' \
         % (repr(img_rgb.shape[:2]), repr(seg.shape))
    if not os.path.isfile(row_path['path_centers']):
        logging.warning('no center was detected for "%s"', name)
        return name
    centers = tl_data.load_landmarks_csv(row_path['path_centers'])
    centers = tl_data.swap_coord_x_y(centers)
    if len(centers) == 0:
        logging.warning('no center was detected for "%s"', name)
        return name
    # img = seg / float(seg.max())
    slic = seg_spx.segment_slic_img2d(img_rgb,
                                      sp_size=params['slic_size'],
                                      rltv_compact=params['slic_regul'])

    path_segm = os.path.join(params['path_exp'], 'input', name + '.png')
    export_draw_image_segm(path_segm, img_rgb, segm_obj=seg, centers=centers)

    seg_simple = simplify_segm_3cls(seg)
    path_segm = os.path.join(params['path_exp'], 'simple', name + '.png')
    export_draw_image_segm(path_segm, seg_simple - 1.)

    dict_segment = create_dict_segmentation(params, slic, seg, img, centers)

    image_name = name + '.png'
    centre_name = name + '.csv'

    # iterate over segmentation methods and perform segmentation on this image
    for method in dict_segment:
        (fn, args) = dict_segment[method]
        logging.debug(' -> %s on "%s"', method, name)
        path_dir = os.path.join(params['path_exp'], method)  # n.split('_')[0]
        path_segm = os.path.join(path_dir, image_name)
        path_centre = os.path.join(path_dir + DIR_CENTRE_POSIX, centre_name)
        path_fig = os.path.join(path_dir + DIR_VISUAL_POSIX, image_name)
        path_debug = os.path.join(path_dir + DIR_DEBUG_POSIX, name)
        # assuming that segmentation may fail
        try:
            t = time.time()
            if debug_export and 'rg2sp' in method:
                os.mkdir(path_debug)
                segm_obj, centers, dict_export = fn(*args,
                                                    debug_export=path_debug)
            else:
                segm_obj, centers, dict_export = fn(*args)

            # also export ellipse params here or inside the segm fn
            if dict_export is not None:
                for k in dict_export:
                    export_partial(k, dict_export[k], path_dir, name)

            logging.info('running time of %s on image "%s" is %d s',
                         repr(fn.__name__), image_name,
                         time.time() - t)
            Image.fromarray(segm_obj.astype(np.uint8)).save(path_segm)
            export_draw_image_segm(path_fig, img_rgb, seg, segm_obj, centers)
            # export also centers
            centers = tl_data.swap_coord_x_y(centers)
            tl_data.save_landmarks_csv(path_centre, centers)
        except:
            logging.error('segment fail for "%s" via %s with \n %s', name,
                          method, traceback.format_exc())

    return name
예제 #12
0
    def test_region_growing_graphcut(self, name='insitu7545'):
        """    """
        if not os.path.exists(PATH_PKL_MODEL):
            self.test_shape_modeling()

        # file_model = pickle.load(open(PATH_PKL_MODEL, 'r'))
        npz_file = np.load(PATH_PKL_MODEL)
        file_model = dict(npz_file[npz_file.files[0]].tolist())
        logging.info('loaded model: %s', repr(file_model.keys()))
        list_mean_cdf = file_model['cdfs']
        model = file_model['mix_model']

        img, _ = tl_io.load_image_2d(os.path.join(PATH_IMAGE, name + '.jpg'))
        seg, _ = tl_io.load_image_2d(os.path.join(PATH_SEGM, name + '.png'))
        annot, _ = tl_io.load_image_2d(os.path.join(PATH_ANNOT, name + '.png'))
        centers = pd.DataFrame.from_csv(
            os.path.join(PATH_CENTRE, name + '.csv')).values
        centers[:, [0, 1]] = centers[:, [1, 0]]

        slic = seg_spx.segment_slic_img2d(img, sp_size=25, rltv_compact=0.3)

        dict_debug = {}
        labels_gc = seg_rg.region_growing_shape_slic_graphcut(
            seg,
            slic,
            centers, (model, list_mean_cdf),
            'set_cdfs',
            LABELS_FG_PROB,
            coef_shape=5.,
            coef_pairwise=15.,
            prob_label_trans=[0.1, 0.03],
            optim_global=False,
            nb_iter=65,
            allow_obj_swap=False,
            dict_thresholds=DEFAULT_RG2SP_THRESHOLDS,
            dict_debug_history=dict_debug)

        segm_obj = labels_gc[slic]
        logging.info('debug: %s', repr(dict_debug.keys()))

        for i in np.linspace(0, len(dict_debug['energy']) - 1, 5):
            fig = tl_visu.figure_rg2sp_debug_complete(seg,
                                                      slic,
                                                      dict_debug,
                                                      int(i),
                                                      max_size=5)
            fig_name = 'RG2Sp_graph-cut_%s_debug-%03d.pdf' % (name, i)
            fig.savefig(os.path.join(PATH_OUTPUT, fig_name),
                        bbox_inches='tight',
                        pad_inches=0)
            plt.close(fig)

        score = adjusted_rand_score(annot.ravel(), segm_obj.ravel())
        self.assertGreaterEqual(score, 0.5)

        expert_segm(name,
                    img,
                    seg,
                    segm_obj,
                    annot,
                    str_type='RG2Sp_graph-cut')
예제 #13
0
def segment_image(imgs_idx_path, params, classif, path_out, path_visu=None):
    """ perform image segmentation on input image with given paramters
    and trained classifier, and save results

    :param (int, str) imgs_idx_path:
    :param {str: ...} params: segmentation parameters
    :param obj classif: trained classifier
    :param str path_out: path for output
    :param str path_visu: the existing patch means export also visualisation
    :return str, ndarray, ndarray:
    """
    idx, path_img = parse_imgs_idx_path(imgs_idx_path)
    logging.debug('segmenting image: "%s"', path_img)
    idx_name = get_idx_name(idx, path_img)
    img = load_image(path_img, params['img_type'])
    slic = seg_spx.segment_slic_img2d(img,
                                      sp_size=params['slic_size'],
                                      rltv_compact=params['slic_regul'])
    img = seg_pipe.convert_img_color_space(img, params.get('clr_space', 'rgb'))
    features, _ = seg_fts.compute_selected_features_img2d(
        img, slic, params['features'])
    labels = classif.predict(features)
    segm = labels[slic]
    img_seg = Image.fromarray(segm.astype(np.uint8))
    path_img = os.path.join(path_out, idx_name + '.png')
    logging.debug('export segmentation: %s', path_img)
    img_seg = Image.fromarray(segm.astype(np.uint8))
    img_seg.convert('L').save(path_img)
    # io.imsave(path_img, segm)

    # plt.imsave(os.path.join(path_out, idx_name + '_rgb.png'), seg_pipe)
    if path_visu is not None and os.path.isdir(path_visu):
        export_draw_image_segm_contour(img, segm, path_visu, idx_name)

    try:  # in case some classiefier do not support predict_proba
        proba = classif.predict_proba(features)
        segm_soft = proba[slic]
        path_npz = os.path.join(path_out, idx_name + '.npz')
        np.savez_compressed(path_npz, segm_soft)
    except:
        logging.warning('classif: %s not support predict_proba(.)',
                        repr(classif))
        proba = None
        segm_soft = None

    # if probabilities was not estimated of GC regul. is zero
    if proba is not None and params['gc_regul'] > 0:
        gc_regul = params['gc_regul']
        if params['gc_use_trans']:
            label_penalty = seg_gc.compute_pairwise_cost_from_transitions(
                params['label_transitions'])
            gc_regul = (gc_regul * label_penalty)
        labels_gc = seg_gc.segment_graph_cut_general(
            slic,
            proba,
            img,
            features,
            gc_regul,
            edge_type=params['gc_edge_type'])
        # labels_gc = seg_gc.segment_graph_cut_simple(slic, proba, gc_regul)
        segm_gc = labels_gc[slic]
        # relabel according classif classes
        segm_gc = classif.classes_[segm_gc]

        path_img = os.path.join(path_out, idx_name + '_gc.png')
        logging.debug('export segmentation: %s', path_img)
        img_seg_gc = Image.fromarray(segm_gc.astype(np.uint8))
        img_seg_gc.convert('L').save(path_img)
        # io.imsave(path_img, segm_gc)

        if path_visu is not None and os.path.isdir(path_visu):
            export_draw_image_segm_contour(img, segm_gc, path_visu, idx_name,
                                           '_gc')

            if SHOW_DEBUG_IMAGES:
                labels_map = np.argmax(proba, axis=1)
                plt.imsave(os.path.join(path_visu, idx_name + '_map.png'),
                           labels_map[slic])
                if not segm_soft is None:
                    for lb in range(segm_soft.shape[2]):
                        uc_name = idx_name + '_gc_unary-lb%i.png' % lb
                        plt.imsave(os.path.join(path_visu, uc_name),
                                   segm_soft[:, :, lb],
                                   vmin=0.,
                                   vmax=1.,
                                   cmap=plt.cm.Greens)
    else:
        segm_gc = np.zeros(segm.shape)
    # gc.collect(), time.sleep(1)
    return idx_name, segm, segm_gc