def wrapper_relabel_segm(annot_segm): annot, segm = annot_segm try: segm = seg_lbs.relabel_max_overlap_unique(annot, segm) except Exception: logging.error(traceback.format_exc()) return segm
def export_visual(df_row, path_out, relabel=True): """ given visualisation of segmented image and annotation :param {str: ...} df_row: :param str path_out: path to the visualisation directory :param bool relabel: whether relabel segmentation as sequential """ annot, _ = tl_data.load_image_2d(df_row['path_1']) segm, _ = tl_data.load_image_2d(df_row['path_2']) img = None if 'path_3' in df_row: img, _ = tl_data.load_image_2d(df_row['path_3']) if relabel: annot = relabel_sequential(annot)[0] segm = seg_lbs.relabel_max_overlap_unique(annot, segm) fig = seg_visu.figure_overlap_annot_segm_image(annot, segm, img) name = os.path.splitext(os.path.basename(df_row['path_1']))[0] logging.debug('>> exporting -> %s', name) fig.savefig(os.path.join(path_out, '%s.png' % name))
def compute_metrics(row): """ load segmentation and compute similarity metrics :param dict row: :return {str: float}: """ logging.debug('loading annot "%s"\n and segm "%s"', row['path_annot'], row['path_egg-segm']) annot, _ = tl_data.load_image_2d(row['path_annot']) segm, _ = tl_data.load_image_2d(row['path_egg-segm']) if annot.shape != segm.shape: raise ImageDimensionError('dimension do mot match %r - %r' % (annot.shape, segm.shape)) jacobs = [] segm = seg_lbs.relabel_max_overlap_unique(annot, segm, keep_bg=True) for lb in np.unique(annot)[1:]: annot_obj = (annot == lb) segm_obj = (segm == lb) # label_hist = seg_lb.histogram_regions_labels_counts(segm, annot_obj) # segm_obj = np.argmax(label_hist, axis=1)[segm] sum_or = np.sum(np.logical_or(annot_obj, segm_obj)) jaccoby = np.sum(np.logical_and(annot_obj, segm_obj)) / float(sum_or) jacobs.append(jaccoby) if not jacobs: jacobs.append(0) # avg_weight = 'samples' if len(np.unique(annot)) > 2 else 'binary' y_true, y_pred = annot.ravel(), segm.ravel() dict_eval = { 'name': os.path.basename(row['path_annot']), 'ARS': metrics.adjusted_rand_score(y_true, y_pred), 'Jaccard': np.mean(jacobs), 'f1': metrics.f1_score(y_true, y_pred, average='micro'), 'accuracy': metrics.accuracy_score(y_true, y_pred), 'precision': metrics.precision_score(y_true, y_pred, average='micro'), 'recall': metrics.recall_score(y_true, y_pred, average='micro'), } return dict_eval