示例#1
0
def load_image(path_img, img_type=TYPES_LOAD_IMAGE[0]):
    """ load image and annotation according chosen type

    :param str path_img:
    :param str img_type:
    :return ndarray:
    """
    path_img = tl_data.update_path(path_img)
    if not os.path.isfile(path_img):
        raise FileNotFoundError('missing: "%s"' % path_img)
    if img_type == '2d_split':
        img, _ = tl_data.load_img_double_band_split(path_img)
        if img.ndim != 2:
            raise ImageDimensionError('image dims: %r' % img.shape)
        # img = np.rollaxis(np.tile(img, (3, 1, 1)), 0, 3)
        # if img.max() > 1:
        #     img = (img / 255.)
    elif img_type == '2d_rgb':
        img, _ = tl_data.load_image_2d(path_img)
        # if img.max() > 1:
        #     img = (img / 255.)
    elif img_type == '2d_segm':
        img, _ = tl_data.load_image_2d(path_img)
        if img.ndim == 3:
            img = img[:, :, 0]
        if ANNOT_RELABEL_SEQUENCE:
            img, _, _ = segmentation.relabel_sequential(img)
    else:
        logging.error('not supported loading img_type: %s', img_type)
        img = None
    return img
示例#2
0
def export_cut_objects(df_row,
                       path_out,
                       padding,
                       use_mask=True,
                       bg_color=None):
    """ cut and expert objects in image according given segmentation

    :param df_row:
    :param str path_out: path for exporting image
    :param int padding: set padding around segmented object
    """
    annot, _ = tl_data.load_image_2d(df_row['path_1'])
    img, name = tl_data.load_image_2d(df_row['path_2'])
    if annot.shape[:2] != img.shape[:2]:
        raise ImageDimensionError('image sizes not match %r vs %r' %
                                  (annot.shape, img.shape))

    uq_objects = np.unique(annot)
    if len(uq_objects) == 1:
        return

    for idx in uq_objects[1:]:
        img_new = tl_data.cut_object(img, annot == idx, padding, use_mask,
                                     bg_color)
        path_img = os.path.join(path_out, '%s_%i.png' % (name, idx))
        logging.debug('saving image "%s"', path_img)
        tl_data.io_imsave(path_img, img_new)
示例#3
0
def load_inputs(name):
    img, _ = load_image_2d(os.path.join(PATH_IMAGE, name + '.jpg'))
    seg, _ = load_image_2d(os.path.join(PATH_SEGM, name + '.png'))
    annot, _ = load_image_2d(os.path.join(PATH_ANNOT, name + '.png'))
    centers = pd.read_csv(os.path.join(PATH_CENTRE, name + '.csv'), index_col=0).values
    centers[:, [0, 1]] = centers[:, [1, 0]]

    slic = segment_slic_img2d(img, sp_size=25, relative_compact=0.3)
    return img, seg, slic, centers, annot
示例#4
0
    def test_ellipse_fitting(self,
                             name='insitu7545',
                             table_prob=TABLE_FB_PROBA):
        """    """
        img, _ = tl_data.load_image_2d(os.path.join(PATH_IMAGES,
                                                    name + '.jpg'))
        seg, _ = tl_data.load_image_2d(os.path.join(PATH_SEGM, name + '.png'))
        annot, _ = tl_data.load_image_2d(
            os.path.join(PATH_ANNOT, name + '.png'))
        path_center = os.path.join(PATH_CENTRE, name + '.csv')
        centers = pd.read_csv(path_center, index_col=0).values[:, [1, 0]]

        slic, points_all, labels = seg_fit.get_slic_points_labels(
            seg, slic_size=20, slic_regul=0.3)
        weights = np.bincount(slic.ravel())
        points_centers = seg_fit.prepare_boundary_points_ray_edge(
            seg, centers, close_points=5)

        segm = np.zeros(seg.shape)
        ellipses, crits = [], []
        for i, points in enumerate(points_centers):
            model, _ = seg_fit.ransac_segm(points,
                                           seg_fit.EllipseModelSegm,
                                           points_all,
                                           weights,
                                           labels,
                                           table_prob,
                                           min_samples=0.6,
                                           residual_threshold=15,
                                           max_trials=50)
            if model is None:
                continue
            ellipses.append(model.params)
            crit = model.criterion(points_all, weights, labels, table_prob)
            crits.append(np.round(crit))
            logging.info('model params: %s', repr(model.params))
            logging.info('-> crit: %f', crit)
            c1, c2, h, w, phi = model.params
            rr, cc = tl_visu.ellipse(int(c1), int(c2), int(h), int(w), phi,
                                     segm.shape)
            segm[rr, cc] = (i + 1)

        if img.ndim == 3:
            img = img[:, :, 0]
        fig = tl_visu.figure_ellipse_fitting(img, seg, ellipses, centers,
                                             crits)
        fig_name = 'ellipse-fitting_%s.pdf' % name
        fig.savefig(os.path.join(PATH_OUTPUT, fig_name),
                    bbox_inches='tight',
                    pad_inches=0)
        plt.close(fig)

        score = adjusted_rand_score(annot.ravel(), segm.ravel())
        self.assertGreaterEqual(score, 0.5)
示例#5
0
def expert_visual(row, method_name, path_out, max_fig_size=10):
    """ export several visualisation segmentation and annotation

    :param dict row:
    :param str method_name:
    :param str path_out:
    :param int max_fig_size:
    """
    im_name = os.path.splitext(os.path.basename(row['path_image']))[0]
    img, _ = tl_data.load_image_2d(row['path_image'])
    # annot = tl_data.load_image(row['path_annot'])
    egg_segm, _ = tl_data.load_image_2d(row['path_egg-segm'])
    in_segm, _ = tl_data.load_image_2d(row['path_in-segm'])
    centers = tl_data.load_landmarks_csv(row['path_centers'])
    centers = np.array(tl_data.swap_coord_x_y(centers))

    fig_size = max_fig_size * np.array(img.shape[:2]) / float(np.max(
        img.shape))
    fig_name = '%s_%s.jpg' % (im_name, method_name)

    fig, ax = plt.subplots(figsize=fig_size[::-1])
    ax.imshow(img[:, :, 0], cmap=plt.cm.gray)
    ax.imshow(egg_segm, alpha=0.15)
    ax.contour(egg_segm, levels=np.unique(egg_segm), linewidths=(3, ))
    ax.plot(centers[:, 1], centers[:, 0], 'ob')
    tl_visu.figure_image_adjustment(fig, img.shape)
    path_fig = os.path.join(path_out, NAME_DIR_VISUAL_1, fig_name)
    fig.savefig(path_fig, bbox_inches='tight', pad_inches=0)
    plt.close(fig)

    fig, ax = plt.subplots(figsize=fig_size[::-1])
    # ax.imshow(np.max(in_segm) - in_segm, cmap=plt.cm.gray)
    ax.imshow(LUT_COLOR[in_segm], vmin=0., vmax=1., alpha=0.5)
    ax.contour(in_segm, levels=np.unique(in_segm), colors='k')
    ax.imshow(egg_segm, alpha=0.3)
    ax.contour(egg_segm, levels=np.unique(egg_segm), linewidths=(5, ))
    ax.plot(centers[:, 1], centers[:, 0], 'or')
    tl_visu.figure_image_adjustment(fig, img.shape)
    path_fig = os.path.join(path_out, NAME_DIR_VISUAL_2, fig_name)
    fig.savefig(path_fig, bbox_inches='tight', pad_inches=0)
    plt.close(fig)

    fig, ax = plt.subplots(figsize=fig_size[::-1])
    ax.imshow(img[:, :, 0], cmap=plt.cm.gray, alpha=1.)
    ax.contour(in_segm, levels=np.unique(in_segm), colors='w')
    ax.imshow(egg_segm, alpha=0.3)
    ax.contour(egg_segm, levels=np.unique(egg_segm), linewidths=(5, ))
    ax.plot(centers[:, 1], centers[:, 0], 'og')
    tl_visu.figure_image_adjustment(fig, img.shape)
    path_fig = os.path.join(path_out, NAME_DIR_VISUAL_3, fig_name)
    fig.savefig(path_fig, bbox_inches='tight', pad_inches=0)
    plt.close(fig)
示例#6
0
    def test_region_growing_graphcut(self, name='insitu7545'):
        """    """
        if not os.path.exists(PATH_PKL_MODEL):
            self.test_shape_modeling()

        # file_model = pickle.load(open(PATH_PKL_MODEL, 'r'))
        npz_file = np.load(PATH_PKL_MODEL, allow_pickle=True)
        file_model = dict(npz_file[npz_file.files[0]].tolist())
        logging.info('loaded model: %r', file_model.keys())
        list_mean_cdf = file_model['cdfs']
        model = file_model['mix_model']

        img, _ = load_image_2d(os.path.join(PATH_IMAGE, name + '.jpg'))
        seg, _ = load_image_2d(os.path.join(PATH_SEGM, name + '.png'))
        annot, _ = load_image_2d(os.path.join(PATH_ANNOT, name + '.png'))
        centers = pd.read_csv(os.path.join(PATH_CENTRE, name + '.csv'), index_col=0).values
        centers[:, [0, 1]] = centers[:, [1, 0]]

        slic = segment_slic_img2d(img, sp_size=25, relative_compact=0.3)
        slic_prob_fg = compute_segm_prob_fg(slic, seg, LABELS_FG_PROB)

        dict_debug = {}
        labels_gc = region_growing_shape_slic_graphcut(
            slic,
            slic_prob_fg,
            centers, (model, list_mean_cdf),
            'set_cdfs',
            coef_shape=5.,
            coef_pairwise=15.,
            prob_label_trans=[0.1, 0.03],
            optim_global=False,
            nb_iter=65,
            allow_obj_swap=False,
            dict_thresholds=DEFAULT_RG2SP_THRESHOLDS,
            debug_history=dict_debug
        )

        segm_obj = labels_gc[slic]
        logging.info('show debug: %r', dict_debug.keys())

        for i in np.linspace(0, len(dict_debug['criteria']) - 1, 5):
            fig = figure_rg2sp_debug_complete(seg, slic, dict_debug, int(i), max_size=5)
            fig_name = 'RG2Sp_graph-cut_%s_debug-%03d.pdf' % (name, i)
            fig.savefig(os.path.join(PATH_OUTPUT, fig_name), bbox_inches='tight', pad_inches=0)
            plt.close(fig)

        _ = adjusted_rand_score(annot.ravel(), segm_obj.ravel())
        # self.assertGreaterEqual(score, 0.5)

        expert_segm(name, img, seg, segm_obj, annot, str_type='RG2Sp_graph-cut')
def perform_orientation_swap(path_img,
                             path_out,
                             img_template,
                             swap_type=SWAP_CONDITION):
    """ compute the density in front adn back part of the egg rotate eventually
    we split the egg into thirds instead half because the middle part variate

    :param str path_img: path to input image
    :param str path_out: path to output folder
    :param ndarray img_template: template / mean image
    :param str swap_type: used swap condition
    """
    img, _ = tl_data.load_image_2d(path_img)
    # cut the same image
    img_size = img_template.shape
    img = img[:img_size[0], :img_size[1]]

    if swap_type == 'cc':
        b_swap = condition_swap_correl(img, img_template)
    else:
        b_swap = condition_swap_density(img)

    if b_swap:
        img = img[::-1, ::-1, :]

    path_img = os.path.join(path_out, os.path.basename(path_img))
    tl_data.export_image(path_img, img)
def extract_ellipse_object(idx_row, path_images, path_out, norm_size):
    """ cut the image selection according ellipse parameters
    and scale it into given size to have all image in the end the same sizes

    :param (int, row) idx_row: index and row with ellipse parameters
    :param str path_images: path to the image folder
    :param str path_out: path to output folder
    :param (int, int) norm_size: output image size
    """
    _, row = idx_row
    # select image with this name and any extension
    list_imgs = glob.glob(os.path.join(path_images, row['image_name'] + '.*'))
    path_img = sorted(list_imgs)[0]
    img, _ = tl_data.load_image_2d(path_img)

    # create mask according to chosen ellipse
    ell_params = row[COLUMNS_ELLIPSE].tolist()
    mask = ell_fit.add_overlap_ellipse(np.zeros(img.shape[:2], dtype=int),
                                       ell_params, 1)

    # cut the particular image
    img_cut = tl_data.cut_object(img, mask, 0, use_mask=True, bg_color=None)

    # scaling according to the normal size
    img_norm = transform.resize(img_cut, norm_size)

    path_img = os.path.join(path_out, os.path.basename(path_img))
    tl_data.export_image(path_img, img_norm)
def visualise_overlap(
    path_img,
    path_seg,
    path_out,
    b_img_scale=BOOL_IMAGE_RESCALE_INTENSITY,
    b_img_contour=BOOL_SAVE_IMAGE_CONTOUR,
    b_relabel=BOOL_ANNOT_RELABEL,
    segm_alpha=MIDDLE_ALPHA_OVERLAP,
):
    img, _ = tl_data.load_image_2d(path_img)
    seg, _ = tl_data.load_image_2d(path_seg)

    # normalise alpha in range (0, 1)
    segm_alpha = tl_visu.norm_aplha(segm_alpha)

    if b_relabel:
        seg, _, _ = segmentation.relabel_sequential(seg.copy())

    if img.ndim == 2:  # for gray images of ovary
        img = np.rollaxis(np.tile(img, (3, 1, 1)), 0, 3)

    if b_img_scale:
        p_low, p_high = np.percentile(img, q=(3, 98))
        # plt.imshow(255 - img, cmap='Greys')
        img = exposure.rescale_intensity(img,
                                         in_range=(p_low, p_high),
                                         out_range='uint8')

    if b_img_contour:
        path_im_visu = os.path.splitext(path_out)[0] + '_contour.png'
        img_contour = segmentation.mark_boundaries(img[:, :, :3],
                                                   seg,
                                                   color=COLOR_CONTOUR,
                                                   mode='subpixel')
        plt.imsave(path_im_visu, img_contour)
    # else:  # for colour images of disc
    #     mask = (np.sum(img, axis=2) == 0)
    #     img[mask] = [255, 255, 255]

    fig = tl_visu.figure_image_segm_results(img,
                                            seg,
                                            SIZE_SUB_FIGURE,
                                            mid_labels_alpha=segm_alpha,
                                            mid_image_gray=MIDDLE_IMAGE_GRAY)
    fig.savefig(path_out)
    plt.close(fig)
示例#10
0
    def test_shape_modeling(self, dir_annot=PATH_ANNOT):
        """    """
        list_paths = sorted(glob.glob(os.path.join(dir_annot, '*.png')))
        logging.info('nb images: %i SAMPLES: %r', len(list_paths),
                     [os.path.basename(p) for p in list_paths[:5]])
        list_segms = []
        for path_seg in list_paths:
            seg, _ = load_image_2d(path_seg)
            list_segms.append(seg)

        list_rays, _ = compute_object_shapes(list_segms,
                                             ray_step=25,
                                             smooth_coef=1,
                                             interp_order='spline')
        logging.info('nb eggs: %i nb rays: %i', len(list_rays),
                     len(list_rays[0]))

        model, list_mean_cdf = transform_rays_model_sets_mean_cdf_mixture(
            list_rays, 2)

        np.savez(PATH_PKL_MODEL,
                 data={
                     'name': 'set_cdfs',
                     'cdfs': list_mean_cdf,
                     'mix_model': model
                 })
        # with open(PATH_PKL_MODEL, 'w') as fp:
        #     pickle.dump({'name': 'set_cdfs',
        #                  'cdfs': list_mean_cdf,
        #                  'mix_model': model}, fp)
        self.assertTrue(os.path.exists(PATH_PKL_MODEL))

        max_len = max([np.asarray(mc[1]).shape[1] for mc in list_mean_cdf])

        fig, axarr = plt.subplots(nrows=len(list_mean_cdf),
                                  ncols=2,
                                  figsize=(12, 3.5 * len(list_mean_cdf)))
        for i, (_, list_cdf) in enumerate(list_mean_cdf):
            cdist = np.zeros((len(list_cdf), max_len))
            cdist[:, :len(list_cdf[0])] = np.array(list_cdf)
            axarr[i, 0].imshow(cdist, aspect='auto')
            axarr[i, 0].set(title='Inverse cumulative distribution',
                            ylabel='Ray steps',
                            xlabel='Distance [px]',
                            xlim=[0, max_len])
            axarr[i, 1].set_title('Reconstructions')
            axarr[i, 1].imshow(compute_prior_map(cdist, step=10))

        fig.savefig(os.path.join(PATH_OUTPUT, 'RG2Sp_shape-modeling.pdf'),
                    bbox_inches='tight',
                    pad_inches=0)
        plt.close(fig)
示例#11
0
def compute_metrics(row):
    """ load segmentation and compute similarity metrics

    :param dict row:
    :return {str: float}:
    """
    logging.debug('loading annot "%s"\n and segm "%s"', row['path_annot'],
                  row['path_egg-segm'])
    annot, _ = tl_data.load_image_2d(row['path_annot'])
    segm, _ = tl_data.load_image_2d(row['path_egg-segm'])
    if annot.shape != segm.shape:
        raise ImageDimensionError('dimension do mot match %r - %r' %
                                  (annot.shape, segm.shape))
    jacobs = []
    segm = seg_lbs.relabel_max_overlap_unique(annot, segm, keep_bg=True)
    for lb in np.unique(annot)[1:]:
        annot_obj = (annot == lb)
        segm_obj = (segm == lb)
        # label_hist = seg_lb.histogram_regions_labels_counts(segm, annot_obj)
        # segm_obj = np.argmax(label_hist, axis=1)[segm]
        sum_or = np.sum(np.logical_or(annot_obj, segm_obj))
        jaccoby = np.sum(np.logical_and(annot_obj, segm_obj)) / float(sum_or)
        jacobs.append(jaccoby)
    if not jacobs:
        jacobs.append(0)

    # avg_weight = 'samples' if len(np.unique(annot)) > 2 else 'binary'
    y_true, y_pred = annot.ravel(), segm.ravel()
    dict_eval = {
        'name': os.path.basename(row['path_annot']),
        'ARS': metrics.adjusted_rand_score(y_true, y_pred),
        'Jaccard': np.mean(jacobs),
        'f1': metrics.f1_score(y_true, y_pred, average='micro'),
        'accuracy': metrics.accuracy_score(y_true, y_pred),
        'precision': metrics.precision_score(y_true, y_pred, average='micro'),
        'recall': metrics.recall_score(y_true, y_pred, average='micro'),
    }

    return dict_eval