def load_image(path_img, img_type=TYPES_LOAD_IMAGE[0]):
    """ load image and annotation according chosen type

    :param str path_img:
    :param str img_type:
    :return ndarray:
    """
    path_img = tl_data.update_path(path_img)
    assert os.path.isfile(path_img), 'missing: "%s"' % path_img
    if img_type == '2d_split':
        img, _ = tl_data.load_img_double_band_split(path_img)
        assert img.ndim == 2, 'image dims: %s' % repr(img.shape)
        # img = np.rollaxis(np.tile(img, (3, 1, 1)), 0, 3)
        # if img.max() > 1:
        #     img = (img / 255.)
    elif img_type == '2d_rgb':
        img, _ = tl_data.load_image_2d(path_img)
        # if img.max() > 1:
        #     img = (img / 255.)
    elif img_type == '2d_segm':
        img, _ = tl_data.load_image_2d(path_img)
        if img.ndim == 3:
            img = img[:, :, 0]
        if ANNOT_RELABEL_SEQUENCE:
            img, _, _ = segmentation.relabel_sequential(img)
    else:
        logging.error('not supported loading img_type: %s', img_type)
        img = None
    return img
Exemplo n.º 2
0
def load_inputs(name):
    img, _ = tl_data.load_image_2d(os.path.join(PATH_IMAGE, name + '.jpg'))
    seg, _ = tl_data.load_image_2d(os.path.join(PATH_SEGM, name + '.png'))
    annot, _ = tl_data.load_image_2d(os.path.join(PATH_ANNOT, name + '.png'))
    centers = pd.read_csv(os.path.join(PATH_CENTRE, name + '.csv'),
                          index_col=0).values
    centers[:, [0, 1]] = centers[:, [1, 0]]

    slic = seg_spx.segment_slic_img2d(img, sp_size=25, rltv_compact=0.3)
    return img, seg, slic, centers, annot
    def test_ellipse_fitting(self,
                             name='insitu7545',
                             table_prob=TABLE_FB_PROBA):
        """    """
        img, _ = tl_data.load_image_2d(os.path.join(PATH_IMAGES,
                                                    name + '.jpg'))
        seg, _ = tl_data.load_image_2d(os.path.join(PATH_SEGM, name + '.png'))
        annot, _ = tl_data.load_image_2d(
            os.path.join(PATH_ANNOT, name + '.png'))
        path_center = os.path.join(PATH_CENTRE, name + '.csv')
        centers = pd.read_csv(path_center, index_col=0).values[:, [1, 0]]

        slic, points_all, labels = seg_fit.get_slic_points_labels(
            seg, slic_size=20, slic_regul=0.3)
        weights = np.bincount(slic.ravel())
        points_centers = seg_fit.prepare_boundary_points_ray_edge(
            seg, centers, close_points=5)

        segm = np.zeros(seg.shape)
        ellipses, crits = [], []
        for i, points in enumerate(points_centers):
            model, _ = seg_fit.ransac_segm(points,
                                           seg_fit.EllipseModelSegm,
                                           points_all,
                                           weights,
                                           labels,
                                           table_prob,
                                           min_samples=0.6,
                                           residual_threshold=15,
                                           max_trials=50)
            if model is None: continue
            ellipses.append(model.params)
            crit = model.criterion(points_all, weights, labels, table_prob)
            crits.append(np.round(crit))
            logging.info('model params: %s', repr(model.params))
            logging.info('-> crit: %f', crit)
            c1, c2, h, w, phi = model.params
            rr, cc = tl_visu.ellipse(int(c1), int(c2), int(h), int(w), phi,
                                     segm.shape)
            segm[rr, cc] = (i + 1)

        if img.ndim == 3:
            img = img[:, :, 0]
        fig = tl_visu.figure_ellipse_fitting(img, seg, ellipses, centers,
                                             crits)
        fig_name = 'ellipse-fitting_%s.pdf' % name
        fig.savefig(os.path.join(PATH_OUTPUT, fig_name),
                    bbox_inches='tight',
                    pad_inches=0)
        plt.close(fig)

        score = adjusted_rand_score(annot.ravel(), segm.ravel())
        self.assertGreaterEqual(score, 0.5)
Exemplo n.º 4
0
def expert_visual(row, method_name, path_out, max_fig_size=10):
    """ export several visualisation segmentation and annotation

    :param {str: ...} row:
    :param str method_name:
    :param str path_out:
    :param int max_fig_size:
    """
    im_name = os.path.splitext(os.path.basename(row['path_image']))[0]
    img, _ = tl_data.load_image_2d(row['path_image'])
    # annot = tl_data.load_image(row['path_annot'])
    egg_segm, _ = tl_data.load_image_2d(row['path_egg-segm'])
    in_segm, _ = tl_data.load_image_2d(row['path_in-segm'])
    centers = tl_data.load_landmarks_csv(row['path_centers'])
    centers = np.array(tl_data.swap_coord_x_y(centers))

    fig_size = max_fig_size * np.array(img.shape[:2]) / float(np.max(
        img.shape))
    fig_name = '%s_%s.jpg' % (im_name, method_name)

    fig, ax = plt.subplots(figsize=fig_size[::-1])
    ax.imshow(img[:, :, 0], cmap=plt.cm.gray)
    ax.imshow(egg_segm, alpha=0.15)
    ax.contour(egg_segm, levels=np.unique(egg_segm), linewidths=(3, ))
    ax.plot(centers[:, 1], centers[:, 0], 'ob')
    tl_visu.figure_image_adjustment(fig, img.shape)
    path_fig = os.path.join(path_out, NAME_DIR_VISUAL_1, fig_name)
    fig.savefig(path_fig, bbox_inches='tight', pad_inches=0)
    plt.close(fig)

    fig, ax = plt.subplots(figsize=fig_size[::-1])
    # ax.imshow(np.max(in_segm) - in_segm, cmap=plt.cm.gray)
    ax.imshow(LUT_COLOR[in_segm], vmin=0., vmax=1., alpha=0.5)
    ax.contour(in_segm, levels=np.unique(in_segm), colors='k')
    ax.imshow(egg_segm, alpha=0.3)
    ax.contour(egg_segm, levels=np.unique(egg_segm), linewidths=(5, ))
    ax.plot(centers[:, 1], centers[:, 0], 'or')
    tl_visu.figure_image_adjustment(fig, img.shape)
    path_fig = os.path.join(path_out, NAME_DIR_VISUAL_2, fig_name)
    fig.savefig(path_fig, bbox_inches='tight', pad_inches=0)
    plt.close(fig)

    fig, ax = plt.subplots(figsize=fig_size[::-1])
    ax.imshow(img[:, :, 0], cmap=plt.cm.gray, alpha=1.)
    ax.contour(in_segm, levels=np.unique(in_segm), colors='w')
    ax.imshow(egg_segm, alpha=0.3)
    ax.contour(egg_segm, levels=np.unique(egg_segm), linewidths=(5, ))
    ax.plot(centers[:, 1], centers[:, 0], 'og')
    tl_visu.figure_image_adjustment(fig, img.shape)
    path_fig = os.path.join(path_out, NAME_DIR_VISUAL_3, fig_name)
    fig.savefig(path_fig, bbox_inches='tight', pad_inches=0)
    plt.close(fig)
Exemplo n.º 5
0
def extract_ellipse_object(idx_row, path_images, path_out, norm_size):
    """ cut the image selection according ellipse parameters
    and scale it into given size to have all image in the end the same sizes

    :param (int, row) idx_row: index and row with ellipse parameters
    :param str path_images: path to the image folder
    :param str path_out: path to output folder
    :param (int, int) norm_size: output image size
    """
    _, row = idx_row
    # select image with this name and any extension
    list_imgs = glob.glob(os.path.join(path_images, row['image_name'] + '.*'))
    path_img = sorted(list_imgs)[0]
    img, _ = tl_data.load_image_2d(path_img)

    # create mask according to chosen ellipse
    ell_params = row[COLUMNS_ELLIPSE].tolist()
    mask = ell_fit.add_overlap_ellipse(np.zeros(img.shape[:2], dtype=int),
                                       ell_params, 1)

    # cut the particular image
    img_cut = tl_data.cut_object(img, mask, 0, use_mask=True, bg_color=None)

    # scaling according to the normal size
    img_norm = transform.resize(img_cut, norm_size)

    path_img = os.path.join(path_out, os.path.basename(path_img))
    tl_data.export_image(path_img, img_norm)
def perform_orientation_swap(path_img, path_out, img_template,
                             swap_type=SWAP_CONDITION):
    """ compute the density in front adn back part of the egg rotate eventually
    we split the egg into thirds instead half because the middle part variate

    :param str path_img: path to input image
    :param str path_out: path to output folder
    :param ndarray img_template: template / mean image
    :param str swap_type: used swap condition
    """
    img, _ = tl_data.load_image_2d(path_img)
    # cut the same image
    img_size = img_template.shape
    img = img[:img_size[0], :img_size[1]]

    if swap_type == 'cc':
        b_swap = condition_swap_correl(img, img_template)
    else:
        b_swap = condition_swap_density(img)

    if b_swap:
        img = img[::-1, ::-1, :]

    path_img = os.path.join(path_out, os.path.basename(path_img))
    tl_data.export_image(path_img, img)
def export_visual(df_row, path_out, relabel=True):
    """ given visualisation of segmented image and annotation

    :param {str: ...} df_row:
    :param str path_out: path to the visualisation directory
    :param bool relabel: whether relabel segmentation as sequential
    """
    annot, _ = tl_data.load_image_2d(df_row['path_1'])
    segm, _ = tl_data.load_image_2d(df_row['path_2'])
    img = None
    if 'path_3' in df_row:
        img, _ = tl_data.load_image_2d(df_row['path_3'])
    if relabel:
        annot = relabel_sequential(annot)[0]
        segm = seg_lbs.relabel_max_overlap_unique(annot, segm)
    fig = seg_visu.figure_overlap_annot_segm_image(annot, segm, img)
    name = os.path.splitext(os.path.basename(df_row['path_1']))[0]
    logging.debug('>> exporting -> %s', name)
    fig.savefig(os.path.join(path_out, '%s.png' % name))
Exemplo n.º 8
0
def export_cut_objects(df_row, path_out, padding, use_mask=True, bg_color=None):
    """ cut and expert objects in image according given segmentation

    :param df_row:
    :param str path_out: path for exporting image
    :param int padding: set padding around segmented object
    """
    annot, _ = tl_data.load_image_2d(df_row['path_1'])
    img, name = tl_data.load_image_2d(df_row['path_2'])
    assert annot.shape[:2] == img.shape[:2], \
        'image sizes not match %s vs %s' % (repr(annot.shape), repr(img.shape))

    uq_objects = np.unique(annot)
    if len(uq_objects) == 1:
        return

    for idx in uq_objects[1:]:
        img_new = tl_data.cut_object(img, annot == idx, padding, use_mask, bg_color)
        path_img = os.path.join(path_out, '%s_%i.png' % (name, idx))
        logging.debug('saving image "%s"', path_img)
        tl_data.io_imsave(path_img, img_new)
Exemplo n.º 9
0
    def test_shape_modeling(self, dir_annot=PATH_ANNOT):
        """    """
        list_paths = sorted(glob.glob(os.path.join(dir_annot, '*.png')))
        logging.info('nb images: %i SAMPLES: %s', len(list_paths),
                     repr([os.path.basename(p) for p in list_paths[:5]]))
        list_segms = []
        for path_seg in list_paths:
            seg, _ = tl_data.load_image_2d(path_seg)
            list_segms.append(seg)

        list_rays, _ = seg_rg.compute_object_shapes(list_segms,
                                                    ray_step=25,
                                                    smooth_coef=1,
                                                    interp_order='spline')
        logging.info('nb eggs: %i nb rays: %i', len(list_rays),
                     len(list_rays[0]))

        model, list_mean_cdf = seg_rg.transform_rays_model_sets_mean_cdf_mixture(
            list_rays, 2)

        np.savez(PATH_PKL_MODEL,
                 data={
                     'name': 'set_cdfs',
                     'cdfs': list_mean_cdf,
                     'mix_model': model
                 })
        # with open(PATH_PKL_MODEL, 'w') as fp:
        #     pickle.dump({'name': 'set_cdfs',
        #                  'cdfs': list_mean_cdf,
        #                  'mix_model': model}, fp)
        self.assertTrue(os.path.exists(PATH_PKL_MODEL))

        max_len = max(
            [np.asarray(l_cdf).shape[1] for _, l_cdf in list_mean_cdf])

        fig, axarr = plt.subplots(nrows=len(list_mean_cdf),
                                  ncols=2,
                                  figsize=(12, 3.5 * len(list_mean_cdf)))
        for i, (_, list_cdf) in enumerate(list_mean_cdf):
            cdist = np.zeros((len(list_cdf), max_len))
            cdist[:, :len(list_cdf[0])] = np.array(list_cdf)
            axarr[i, 0].set_title('Inverse cumulative distribution')
            axarr[i, 0].imshow(cdist, aspect='auto')
            axarr[i, 0].set_xlim([0, max_len])
            axarr[i, 0].set_ylabel('Ray steps')
            axarr[i, 0].set_xlabel('Distance [px]')
            axarr[i, 1].set_title('Reconstructions')
            axarr[i, 1].imshow(compute_prior_map(cdist, step=10))

        fig.savefig(os.path.join(PATH_OUTPUT, 'RG2Sp_shape-modeling.pdf'),
                    bbox_inches='tight',
                    pad_inches=0)
Exemplo n.º 10
0
def compute_metrics(row):
    """ load segmentation and compute similarity metrics

    :param {str: ...} row:
    :return {str: float}:
    """
    logging.debug('loading annot "%s"\n and segm "%s"', row['path_annot'],
                  row['path_egg-segm'])
    annot, _ = tl_data.load_image_2d(row['path_annot'])
    segm, _ = tl_data.load_image_2d(row['path_egg-segm'])
    assert annot.shape == segm.shape, 'dimension do mot match %s - %s' % \
                                      (repr(annot.shape), repr(segm.shape))
    list_jacob = []
    segm = seg_lbs.relabel_max_overlap_unique(annot, segm, keep_bg=True)
    for lb in np.unique(annot)[1:]:
        annot_obj = (annot == lb)
        segm_obj = (segm == lb)
        # label_hist = seg_lb.histogram_regions_labels_counts(segm, annot_obj)
        # segm_obj = np.argmax(label_hist, axis=1)[segm]
        jaccoby = np.sum(np.logical_and(annot_obj, segm_obj)) \
                  / float(np.sum(np.logical_or(annot_obj, segm_obj)))
        list_jacob.append(jaccoby)
    if len(list_jacob) == 0:
        list_jacob.append(0)

    # avg_weight = 'samples' if len(np.unique(annot)) > 2 else 'binary'
    y_true, y_pred = annot.ravel(), segm.ravel()
    dict_eval = {
        'name': os.path.basename(row['path_annot']),
        'ARS': metrics.adjusted_rand_score(y_true, y_pred),
        'Jaccard': np.mean(list_jacob),
        'f1': metrics.f1_score(y_true, y_pred, average='micro'),
        'accuracy': metrics.accuracy_score(y_true, y_pred),
        'precision': metrics.precision_score(y_true, y_pred, average='micro'),
        'recall': metrics.recall_score(y_true, y_pred, average='micro'),
    }

    return dict_eval
def visualise_overlap(path_img, path_seg, path_out,
                      b_img_scale=BOOL_IMAGE_RESCALE_INTENSITY,
                      b_img_contour=BOOL_SAVE_IMAGE_CONTOUR,
                      b_relabel=BOOL_ANNOT_RELABEL,
                      segm_alpha=MIDDLE_ALPHA_OVERLAP):
    img, _ = tl_data.load_image_2d(path_img)
    seg, _ = tl_data.load_image_2d(path_seg)

    # normalise alpha in range (0, 1)
    segm_alpha = tl_visu.norm_aplha(segm_alpha)

    if b_relabel:
        seg, _, _ = segmentation.relabel_sequential(seg)

    if img.ndim == 2:  # for gray images of ovary
        img = np.rollaxis(np.tile(img, (3, 1, 1)), 0, 3)

    if b_img_scale:
        p_low, p_high = np.percentile(img, q=(3, 98))
        # plt.imshow(255 - img, cmap='Greys')
        img = exposure.rescale_intensity(img, in_range=(p_low, p_high),
                                         out_range='uint8')

    if b_img_contour:
        path_im_visu = os.path.splitext(path_out)[0] + '_contour.png'
        img_contour = segmentation.mark_boundaries(img[:, :, :3], seg,
                                       color=COLOR_CONTOUR, mode='subpixel')
        plt.imsave(path_im_visu, img_contour)
    # else:  # for colour images of disc
    #     mask = (np.sum(img, axis=2) == 0)
    #     img[mask] = [255, 255, 255]

    fig = tl_visu.figure_image_segm_results(img, seg, SIZE_SUB_FIGURE,
                                            mid_labels_alpha=segm_alpha,
                                            mid_image_gray=MIDDLE_IMAGE_GRAY)
    fig.savefig(path_out)
    plt.close(fig)
Exemplo n.º 12
0
def perform_orientation_swap(path_img, path_out):
    """ compute the density in front adn back part of the egg rotate eventually
    we split the egg into thirds instead half because the middle part variate

    :param str path_img:
    :param str path_out:
    """
    img, _ = tl_data.load_image_2d(path_img)

    part = int(img.shape[1] / 3)
    sel_mask = img[:, :, IMAGE_CHANNEL] > np.min(img[:, :, IMAGE_CHANNEL])
    norm_val = np.mean(img[sel_mask, IMAGE_CHANNEL])
    val_left = np.sum(img[:, :part, IMAGE_CHANNEL] > norm_val)
    val_fight = np.sum(img[:, -part:, IMAGE_CHANNEL] > norm_val)
    ration = val_left / float(val_fight)
    # ration = STAT_FUNC(img[:, :half, IMAGE_CHANNEL]) \
    #          / float(STAT_FUNC(img[:, half:, IMAGE_CHANNEL]))

    if ration > 1.:
        img = img[::-1, ::-1, :]

    path_img = os.path.join(path_out, os.path.basename(path_img))
    tl_data.export_image(path_img, img)
Exemplo n.º 13
0
    def test_region_growing_graphcut(self, name='insitu7545'):
        """    """
        if not os.path.exists(PATH_PKL_MODEL):
            self.test_shape_modeling()

        # file_model = pickle.load(open(PATH_PKL_MODEL, 'r'))
        npz_file = np.load(PATH_PKL_MODEL)
        file_model = dict(npz_file[npz_file.files[0]].tolist())
        logging.info('loaded model: %s', repr(file_model.keys()))
        list_mean_cdf = file_model['cdfs']
        model = file_model['mix_model']

        img, _ = tl_data.load_image_2d(os.path.join(PATH_IMAGE, name + '.jpg'))
        seg, _ = tl_data.load_image_2d(os.path.join(PATH_SEGM, name + '.png'))
        annot, _ = tl_data.load_image_2d(
            os.path.join(PATH_ANNOT, name + '.png'))
        centers = pd.read_csv(os.path.join(PATH_CENTRE, name + '.csv'),
                              index_col=0).values
        centers[:, [0, 1]] = centers[:, [1, 0]]

        slic = seg_spx.segment_slic_img2d(img, sp_size=25, rltv_compact=0.3)

        dict_debug = {}
        labels_gc = seg_rg.region_growing_shape_slic_graphcut(
            seg,
            slic,
            centers, (model, list_mean_cdf),
            'set_cdfs',
            LABELS_FG_PROB,
            coef_shape=5.,
            coef_pairwise=15.,
            prob_label_trans=[0.1, 0.03],
            optim_global=False,
            nb_iter=65,
            allow_obj_swap=False,
            dict_thresholds=DEFAULT_RG2SP_THRESHOLDS,
            dict_debug_history=dict_debug)

        segm_obj = labels_gc[slic]
        logging.info('debug: %s', repr(dict_debug.keys()))

        for i in np.linspace(0, len(dict_debug['energy']) - 1, 5):
            fig = tl_visu.figure_rg2sp_debug_complete(seg,
                                                      slic,
                                                      dict_debug,
                                                      int(i),
                                                      max_size=5)
            fig_name = 'RG2Sp_graph-cut_%s_debug-%03d.pdf' % (name, i)
            fig.savefig(os.path.join(PATH_OUTPUT, fig_name),
                        bbox_inches='tight',
                        pad_inches=0)
            plt.close(fig)

        score = adjusted_rand_score(annot.ravel(), segm_obj.ravel())
        self.assertGreaterEqual(score, 0.5)

        expert_segm(name,
                    img,
                    seg,
                    segm_obj,
                    annot,
                    str_type='RG2Sp_graph-cut')