def aparse_params(dict_paths=PATHS): """ SEE: https://docs.python.org/3/library/argparse.html :return: {str: str}, int """ parser = argparse.ArgumentParser() parser.add_argument('-annot', '--path_annot', type=str, required=False, help='path to directory with annotations & name pattern', default=dict_paths['annot']) parser.add_argument('-segm', '--path_segm', type=str, required=False, help='path to directory & name pattern for segmentation', default=dict_paths['segm']) parser.add_argument('-imgs', '--path_image', type=str, required=False, help='path to directory & name pattern for images', default=dict_paths['image']) parser.add_argument('-out', '--path_out', type=str, required=False, help='path to the output directory', default=dict_paths['output']) args = parser.parse_args() logging.info('ARG PARAMETERS: \n %s', repr(args)) dict_paths = { 'annot': tl_io.update_path(args.path_annot), 'segm': tl_io.update_path(args.path_segm), 'image': '', 'output': tl_io.update_path(args.path_out), } if isinstance(args.path_image, str) and args.path_image.lower() != 'none': dict_paths['image'] = tl_io.update_path(args.path_image) for k in dict_paths: if dict_paths[k] == '' or k == 'output': continue p = os.path.dirname(dict_paths[k]) if '*' in dict_paths[k] else dict_paths[k] assert os.path.exists(p), 'missing (%s) "%s"' % (k, p) return dict_paths, args
def arg_parse_params(dict_paths=PATHS): """ SEE: https://docs.python.org/3/library/argparse.html :return: {str: str}, int """ parser = argparse.ArgumentParser() parser.add_argument('-annot', '--path_annot', type=str, required=False, help='annotations', default=dict_paths['annot']) parser.add_argument('-imgs', '--path_image', type=str, required=False, help='path to directory & name pattern for images', default=dict_paths['image']) parser.add_argument('-out', '--path_out', type=str, required=False, help='path to the output directory', default=dict_paths['output']) parser.add_argument('--padding', type=int, required=False, help='crop padding [px]', default=25) parser.add_argument('--nb_jobs', type=int, required=False, default=NB_THREADS, help='number of processes in parallel') args = parser.parse_args() logging.info('ARG PARAMETERS: \n %s', repr(args)) dict_paths = { 'annot': tl_io.update_path(args.path_annot), 'image': tl_io.update_path(args.path_image), 'output': tl_io.update_path(args.path_out), } for k in dict_paths: if dict_paths[k] == '' or k == 'output': continue p = os.path.dirname(dict_paths[k]) \ if '*' in dict_paths[k] else dict_paths[k] assert os.path.exists(p), 'missing (%s) "%s"' % (k, p) return dict_paths, args
def arg_parse_params(params=CENTER_PARAMS): """ SEE: https://docs.python.org/3/library/argparse.html :return: {str: str}, int """ parser = argparse.ArgumentParser() parser.add_argument('-list', '--path_list', type=str, required=False, help='path to the list of input files', default=params['path_list']) parser.add_argument('-imgs', '--path_images', type=str, required=False, help='path to directory & name pattern for images', default=params['path_images']) parser.add_argument('-segs', '--path_segms', type=str, required=False, help='path to directory & name pattern for segmentation', default=params['path_segms']) parser.add_argument('-centers', '--path_centers', type=str, required=False, help='path to directory & name pattern for centres', default=params['path_centers']) parser.add_argument('-info', '--path_infofile', type=str, required=False, help='path to the global information file', default=params['path_infofile']) parser.add_argument('-out', '--path_output', type=str, required=False, help='path to the output directory', default=params['path_output']) parser.add_argument('-n', '--name', type=str, required=False, help='name of the experiment', default='ovary') parser.add_argument('-config', '--path_config', type=str, required=False, help='path to the configuration', default=None) parser.add_argument('--nb_jobs', type=int, required=False, default=NB_THREADS, help='number of processes in parallel') params.update(vars(parser.parse_args())) paths = {} for k in (k for k in params if 'path' in k): if not isinstance(params[k], str) or params[k].lower() == 'none': paths[k] = '' continue if '*' in params[k] or k == 'path_expt': p_dir = tl_io.update_path(os.path.dirname(params[k])) paths[k] = os.path.join(p_dir, os.path.basename(params[k])) else: paths[k] = tl_io.update_path(params[k], absolute=True) p_dir = paths[k] assert os.path.exists(p_dir), 'missing (%s) %s' % (k, p_dir) # load saved configuration if params['path_config'] is not None: assert os.path.splitext(params['path_config'])[-1] == '.json', \ 'wrong extension for %s' % params['path_config'] with open(params['path_config'], 'r') as fd: data = json.load(fd) params.update(data) params.update(paths) logging.info('ARG PARAMETERS: \n %s', repr(params)) return params
def parse_arg_params(): """ create simple arg parser with default values (input, results, dataset) :return: argparse """ parser = argparse.ArgumentParser() parser.add_argument('-imgs', '--path_images', type=str, required=True, help='path to dir with annot', default=PATH_IMAGES) parser.add_argument('--label', type=int, required=False, nargs='+', help='labels to be replaced', default=[-1]) parser.add_argument('--nb_jobs', type=int, required=False, help='number of jobs in parallel', default=NB_THREADS) args = vars(parser.parse_args()) p_dir = tl_io.update_path(os.path.dirname(args['path_images'])) assert os.path.isdir(p_dir), '%s' % args['path_images'] args['path_images'] = os.path.join(p_dir, os.path.basename(args['path_images'])) logging.info(tl_expt.string_dict(args, desc='ARG PARAMETERS')) return args
def load_image(path_img, img_type=TYPES_LOAD_IMAGE[0]): """ load image and annotation according chosen type :param str path_img: :param str img_type: :return ndarray: """ path_img = tl_data.update_path(path_img) assert os.path.isfile(path_img), 'missing "%s"' % path_img if img_type == '2d_gray': img, _ = tl_data.load_img_double_band_split(path_img) assert img.ndim == 2, 'image dims: %s' % repr(img.shape) # img = np.rollaxis(np.tile(img, (3, 1, 1)), 0, 3) # if img.max() > 1: # img = (img / 255.) elif img_type == '2d_rgb': img, _ = tl_data.load_image_2d(path_img) # if img.max() > 1: # img = (img / 255.) elif img_type == 'segm': img, _ = tl_data.load_image_2d(path_img) if ANNOT_RELABEL_SEQUENCE: img, _, _ = segmentation.relabel_sequential(img) else: logging.error('not supported loading img_type: %s', img_type) img = None return img
def arg_parse_params(params=PARAMS): """ SEE: https://docs.python.org/3/library/argparse.html :return: {str: str}, int """ parser = argparse.ArgumentParser() parser.add_argument('-imgs', '--path_images', type=str, required=False, help='path to directory & name pattern for images', default=params['path_images']) parser.add_argument( '-segs', '--path_segms', type=str, required=False, help='path to directory & name pattern for segmentation', default=params['path_segms']) parser.add_argument('-centers', '--path_centers', type=str, required=False, help='path to directory & name pattern for centres', default=params['path_centers']) parser.add_argument('-info', '--path_infofile', type=str, required=False, help='path to the global information file', default=params['path_infofile']) parser.add_argument('--stages', type=int, required=False, nargs='+', help='stage indexes', default=[1, 2, 3, 4, 5]) parser.add_argument('-out', '--path_output', type=str, required=False, help='path to the output directory', default=params['path_output']) parser.add_argument('--nb_jobs', type=int, required=False, default=NB_THREADS, help='number of processes in parallel') arg_params = vars(parser.parse_args()) params.update(arg_params) for k in (k for k in params if 'path' in k): params[k] = tl_io.update_path(params[k], absolute=True) logging.info('ARG PARAMETERS: \n %s', repr(params)) return params
def arg_parse_params(paths=PATHS): """ SEE: https://docs.python.org/3/library/argparse.html :return: {str: str}, int """ parser = argparse.ArgumentParser() parser.add_argument('--images', type=str, required=False, help='path to directory & name pattern for images', default=paths['images']) parser.add_argument('--annots', type=str, required=False, help='path to directory & name pattern for annotation', default=paths['annots']) parser.add_argument( '--segments', type=str, required=False, help='path to directory & name pattern for segmentation', default=paths['segments']) parser.add_argument('--centers', type=str, required=False, help='path to directory & name pattern for centres', default=paths['centers']) parser.add_argument('--results', type=str, required=False, help='path to the result directory', default=paths['results']) parser.add_argument('--nb_jobs', type=int, required=False, default=NB_THREADS, help='number of processes in parallel') parser.add_argument('--visual', type=int, required=False, default=EXPORT_VUSIALISATION, help='export visualisations') arg_params = vars(parser.parse_args()) export_visual = bool(arg_params['visual']) for k in (k for k in arg_params if k != 'nb_jobs' and k != 'visual'): if not isinstance(arg_params[k], str) or arg_params[k].lower() == 'none': paths[k] = None continue paths[k] = tl_io.update_path(arg_params[k], absolute=True) p = os.path.dirname(paths[k]) if '*' in paths[k] else paths[k] assert os.path.exists(p), '%s' % p logging.info('ARG PARAMETERS: \n %s', repr(paths)) return paths, export_visual, arg_params['nb_jobs']
def load_image_segm_center(idx_row, path_out=None, dict_relabel=None): """ by paths load images and segmentation and weather centers exist, load them if the path out is given r\draw visualisation of inputs :param (int, DF:row) idx_row: :param str path_out: path to output directory :param {} dict_relabel: :return: str, np.array, np.array, [[int, int]] or np.array """ idx, row_path = idx_row for k in ['path_image', 'path_segm', 'path_centers']: row_path[k] = tl_io.update_path(row_path[k]) assert os.path.exists(row_path[k]), 'missing %s' % row_path[k] idx_name = get_idx_name(idx, row_path['path_image']) img_struc, img_gene = tl_io.load_img_double_band_split(row_path['path_image'], im_range=None) # img_rgb = np.array(Image.open(row_path['path_img'])) img_rgb = tl_io.merge_image_channels(img_struc, img_gene) if np.max(img_rgb) > 1: img_rgb = img_rgb / float(np.max(img_rgb)) seg_ext = os.path.splitext(os.path.basename(row_path['path_segm']))[-1] if seg_ext == '.npz': with np.load(row_path['path_segm']) as npzfile: segm = npzfile[npzfile.files[0]] if dict_relabel is not None: segm = seg_lbs.merge_probab_labeling_2d(segm, dict_relabel) else: segm = np.array(Image.open(row_path['path_segm'])) if dict_relabel is not None: segm = seg_lbs.relabel_by_dict(segm, dict_relabel) if row_path['path_centers'] is not None \ and os.path.isfile(row_path['path_centers']): posix = os.path.splitext(os.path.basename(row_path['path_centers']))[-1] if posix == '.csv': centers = tl_io.load_landmarks_csv(row_path['path_centers']) centers = tl_io.swap_coord_x_y(centers) elif posix == '.png': centers = np.array(Image.open(row_path['path_centers'])) # relabel loaded segm into relevant one centers = np.array(LUT_ANNOT_CENTER_RELABEL)[centers] else: logging.warning('not supported file format %s', posix) else: centers = None if is_drawing(path_out): export_visual_input_image_segm(path_out, idx_name, img_rgb, segm, centers) return idx_name, img_rgb, segm, centers
def parse_arg_params(): """ create simple arg parser with default values (input, output, dataset) :param dict_params: {str: ...} :return: object argparse<in, out, ant, name> """ parser = argparse.ArgumentParser() parser.add_argument('-imgs', '--path_images', type=str, required=True, help='path to the input images') parser.add_argument('-segs', '--path_segms', type=str, required=True, help='path to the input segms') parser.add_argument('-out', '--path_output', type=str, required=True, help='path to the output') parser.add_argument('--nb_jobs', type=int, required=False, help='number of jobs in parallel', default=NB_THREADS) args = parser.parse_args() paths = dict( zip(['images', 'segms', 'output'], [args.path_images, args.path_segms, args.path_output])) for k in paths: if '*' in paths[k] or k == 'output': p_dir = tl_io.update_path(os.path.dirname(paths[k])) paths[k] = os.path.join(p_dir, os.path.basename(paths[k])) else: paths[k] = tl_io.update_path(paths[k]) p_dir = paths[k] assert os.path.exists(p_dir), '%s' % paths[k] return paths, args.nb_jobs
def parse_arg_params(): """ create simple arg parser with default values (input, results, dataset) :return: argparse """ parser = argparse.ArgumentParser() parser.add_argument('-imgs', '--path_images', type=str, required=True, help='path to dir with images', default=PATH_INPUT) parser.add_argument('-out', '--path_out', type=str, required=True, help='path to output dir', default=PATH_OUTPUT) parser.add_argument('-clrs', '--path_colors', type=str, required=False, help='json with colour-label dict', default=None) parser.add_argument('--nb_jobs', type=int, required=False, help='number of jobs in parallel', default=NB_THREADS) args = vars(parser.parse_args()) for n in ['path_images', 'path_out']: p_dir = tl_io.update_path(os.path.dirname(args[n])) assert os.path.isdir(p_dir), 'missing: %s' % args[n] args[n] = os.path.join(p_dir, os.path.basename(args[n])) if args['path_colors'] is not None: args['path_colors'] = tl_io.update_path(args['path_colors']) logging.info(tl_expt.string_dict(args, desc='ARG PARAMETERS')) return args
def get_image_path(name_img, path_base=PATH_IMAGES): """ merge default image path and sample image :param str name_img: :return str: >>> p = get_image_path(IMAGE_LENNA) >>> os.path.isfile(p) True >>> os.path.basename(p) 'lena.png' """ path_img = os.path.join(path_base, name_img) path_img = tl_io.update_path(path_img) return path_img
def arg_parse_params(params=SEGM_PARAMS): """ argument parser from cmd SEE: https://docs.python.org/3/library/argparse.html :return: {str: any} """ parser = argparse.ArgumentParser() parser.add_argument('-list', '--path_train_list', type=str, required=False, help='path to the list of image', default=params['path_train_list']) parser.add_argument('-imgs', '--path_predict_imgs', type=str, help='path to folder & name pattern with new image', required=False, default=params['path_predict_imgs']) parser.add_argument('-out', '--path_out', type=str, required=False, help='path to the output directory', default=params['path_out']) parser.add_argument('-n', '--name', type=str, required=False, help='name of the experiment', default=params['name']) parser.add_argument('--path_config', type=str, required=False, default='', help='path to the segmentation config') parser.add_argument('--img_type', type=str, required=False, default=params['img_type'], choices=TYPES_LOAD_IMAGE, help='type of image to be loaded') parser.add_argument('--nb_classes', type=int, required=False, help='number of classes for segmentation', default=params['nb_classes']) parser.add_argument('--nb_jobs', type=int, required=False, default=NB_THREADS, help='number of processes in parallel') parser.add_argument('--visual', type=int, required=False, default=False, help='export debug visualisations') args = vars(parser.parse_args()) logging.info('ARG PARAMETERS: \n %s', repr(args)) for k in (k for k in args if 'path' in k): if args[k] == '' or args[k] == 'none': continue args[k] = tl_data.update_path(args[k]) p = os.path.dirname(args[k]) if '*' in args[k] else args[k] assert os.path.exists(p), 'missing (%s) "%s"' % (k, p) args['visual'] = bool(args['visual']) # if the config path is set load the it otherwise use default if os.path.isfile(args['path_config']): with open(args['path_config'], 'r') as fd: config = json.load(fd) params.update(config) params.update(args) return params
def parse_arg_params(): """ create simple arg parser with default values (input, results, dataset) :return: argparse """ parser = argparse.ArgumentParser() parser.add_argument('-imgs', '--path_images', type=str, required=True, help='path to dir with images', default=PATH_IMAGES) parser.add_argument('-m', '--method', type=str, required=False, help='method for quantisation color/position', default='color', choices=['color', 'position']) parser.add_argument('-thr', '--px_threshold', type=float, required=False, help='percentage of pixels of a color to be removed', default=THRESHOLD_INVALID_PIXELS) parser.add_argument('--nb_jobs', type=int, required=False, help='number of jobs in parallel', default=NB_THREADS) args = vars(parser.parse_args()) p_dir = tl_io.update_path(os.path.dirname(args['path_images'])) assert os.path.isdir(p_dir), '%s' % args['path_images'] args['path_images'] = os.path.join(p_dir, os.path.basename(args['path_images'])) logging.info(tl_expt.string_dict(args, desc='ARG PARAMETERS')) return args
def arg_parse_params(params): """ SEE: https://docs.python.org/3/library/argparse.html :return: {str: str}, int """ parser = argparse.ArgumentParser() parser.add_argument('-list', '--path_list', type=str, required=False, help='path to the list of image', default=params['path_list']) parser.add_argument('-out', '--path_out', type=str, required=False, help='path to the output directory', default=params['path_out']) parser.add_argument('-n', '--name', type=str, required=False, help='name of the experiment', default='ovary') parser.add_argument('-config', '--path_config', type=str, required=False, help='path to the configuration', default=None) parser.add_argument('--nb_jobs', type=int, required=False, default=NB_THREADS, help='number of processes in parallel') arg_params = vars(parser.parse_args()) params.update(arg_params) if not isinstance(arg_params['path_config'], str) \ or arg_params['path_config'].lower() == 'none': params['path_config'] = '' else: assert os.path.isfile(params['path_config']), '%s' % params['path_config'] assert os.path.splitext(params['path_config'])[-1] == '.json' with open(params['path_config'], 'r') as fd: data = json.load(fd) params.update(data) for k in (k for k in arg_params if 'path' in k): if arg_params[k] is None: continue params[k] = tl_data.update_path(arg_params[k], absolute=True) p = os.path.dirname(params[k]) if '*' in params[k] else params[k] assert os.path.exists(p), '%s' % p # load saved configuration logging.info('ARG PARAMETERS: \n %s', repr(params)) return params
def arg_parse_params(params=PARAMS): """ SEE: https://docs.python.org/3/library/argparse.html :return: {str: any} """ parser = argparse.ArgumentParser() parser.add_argument('-imgs', '--path_images', type=str, required=False, help='path to directory & name pattern for image', default=params['path_images']) parser.add_argument('-segm', '--path_segms', type=str, required=False, help='path to directory & name pattern for annotation', default=params['path_segms']) parser.add_argument('-out', '--path_out', type=str, required=False, help='path to the output directory', default=params['path_out']) parser.add_argument('--img_type', type=str, required=False, default=params['img_type'], choices=TYPES_LOAD_IMAGE, help='type of image to be loaded') parser.add_argument('--slic_size', type=int, required=False, default=20, help='superpixels size') parser.add_argument('--slic_regul', type=float, required=False, default=0.25, help='superpixel regularization') parser.add_argument('--slico', type=int, required=False, default=0, help='using SLICO (ASLIC)') parser.add_argument('--nb_jobs', type=int, required=False, default=NB_THREADS, help='number of processes in parallel') params = vars(parser.parse_args()) logging.info('ARG PARAMETERS: \n %s', repr(params)) for k in (k for k in params if 'path' in k): params[k] = tl_io.update_path(params[k]) if k == 'path_out' and not os.path.isdir(params[k]): params[k] = '' continue p = os.path.dirname(params[k]) if '*' in params[k] else params[k] assert os.path.exists(p), 'missing (%s) "%s"' % (k, p) # if the config path is set load the it otherwise use default return params
'name': 'imgDisk', 'nb_classes': 3, 'clr_space': 'rgb', 'img_type': '2d_rgb', 'slic_size': 35, 'slic_regul': 0.2, # 'spacing': (12, 1, 1), 'features': FEATURES_SET_COLOR, 'prob_type': 'GMM', 'pca_coef': None, 'gc_regul': 2.0, 'gc_edge_type': 'model', 'gc_use_trans': False, 'estimate': TYPE_GMM[0], } PATH_IMAGES = os.path.join(tl_data.update_path('images'), 'drosophila_disc') # PATH_IMAGES = tl_io.update_path(os.path.join('images', 'langerhans_islets')) PATH_RESULTS = tl_data.update_path('results', absolute=True) NAME_EXPERIMENT = 'experiment_segm-unSupervised' SEGM_PARAMS.update({ 'path_train_list': os.path.join(PATH_IMAGES, 'list_imaginal-disks_short.csv'), 'path_predict_imgs': os.path.join(PATH_IMAGES, 'image', '*.jpg'), 'path_out': PATH_RESULTS, }) def arg_parse_params(params=SEGM_PARAMS): """ argument parser from cmd
""" import os import logging from PIL import Image import numpy as np import segmentation.utils.data_io as tl_io SAMPLE_SEG_SIZE_2D_SMALL = (20, 10) SAMPLE_SEG_SIZE_2D_NORM = (150, 100) SAMPLE_SEG_NB_CLASSES = 3 SAMPLE_SEG_SIZE_3D_SMALL = (10, 5, 6) PATH_IMAGES = tl_io.update_path('images') IMAGE_LENNA = os.path.join('others', 'lena.png') IMAGE_OBJECTS = os.path.join('synthetic', 'reference.jpg') IMAGE_3CLS = os.path.join('textures', 'sample_rgb_3cls.jpg') IMAGE_STAR_1 = os.path.join('see_starfish', 'star_nb1-b.jpg') IMAGE_STAR_2 = os.path.join('see_starfish', 'stars_nb2.jpg') IMAGE_HISTOL_CIMA = \ os.path.join('histology_CIMA', '29-041-Izd2-w35-CD31-3-les1.jpg') IMAGE_HISTOL_FLAGSHIP = \ os.path.join('histology_Flagship', 'Case001_Cytokeratin.jpg') IMAGE_DROSOPHILA_DISC = \ os.path.join('drosophila_disc', 'image', 'img_6.jpg') ANNOT_DROSOPHILA_DISC = \ os.path.join('drosophila_disc', 'annot', 'img_6.png') IMAGE_DROSOPHILA_OVARY_2D = \ os.path.join('drosophila_ovary_slice', 'image', 'insitu7545.jpg')
import matplotlib if os.environ.get('DISPLAY', '') == '': logging.warning('No display found. Using non-interactive Agg backend') matplotlib.use('Agg') import tqdm import numpy as np from PIL import Image from skimage import measure from scipy import ndimage sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root import segmentation.utils.data_io as tl_io NB_THREADS = max(1, int(mproc.cpu_count() * 0.9)) PATH_IMAGES = tl_io.update_path(os.path.join('images', 'drosophila_ovary_slice')) PATH_RESULTS = tl_io.update_path('results', absolute=True) PATHS = { 'annot': os.path.join(PATH_IMAGES, 'annot_eggs', '*.png'), 'image': os.path.join(PATH_IMAGES, 'image', '*.jpg'), 'output': os.path.join(PATH_RESULTS, 'cut_images'), } def arg_parse_params(dict_paths=PATHS): """ SEE: https://docs.python.org/3/library/argparse.html :return: {str: str}, int """ parser = argparse.ArgumentParser() parser.add_argument('-annot', '--path_annot', type=str, required=False,
import logging import os import sys import unittest import matplotlib.pyplot as plt from scipy.misc import imresize sys.path.append(os.path.abspath(os.path.join('..', '..'))) # Add path to root import segmentation.utils.data_samples as d_spl import segmentation.utils.data_io as tl_io import segmentation.utils.drawing as tl_visu import segmentation.pipelines as pipelines import segmentation.descriptors as seg_fts PATH_OUTPUT = tl_io.update_path(os.path.join('output')) # set default feature extracted from image FEATURES_TEXTURE = seg_fts.FEATURES_SET_TEXTURE_SHORT seg_fts.USE_CYTHON = False def show_segm_results_2d(img, seg, path_dir, fig_name='test_segm_.png'): """ show and expert segmentation results :param ndarray img: input image :param ndarray seg: resulting segmentation :param str path_dir: path to the visualisations :param str fig_name: figure name """ fig = tl_visu.figure_image_segm_results(img, seg) path_fig = os.path.join(path_dir, fig_name)
import numpy as np from skimage import draw, transform import matplotlib.pyplot as plt sys.path.append(os.path.abspath(os.path.join('..', '..'))) # Add path to root import segmentation.utils.data_samples as d_spl import segmentation.utils.data_io as tl_io import segmentation.utils.drawing as tl_visu import segmentation.descriptors as seg_fts import segmentation.superpixels as seg_spx # angular step for Ray features ANGULAR_STEP = 15 # size of subfigure for visualise the Filter bank SUBPLOT_SIZE_FILTER_BANK = 3 PATH_OUTPUT = os.path.abspath(tl_io.update_path('output')) PATH_FIGURES_RAY = os.path.join(PATH_OUTPUT, 'test_ray_features') # create the folder for visualisations if not os.path.exists(PATH_FIGURES_RAY): os.mkdir(PATH_FIGURES_RAY) def export_ray_results(seg, center, points, ray_dist_raw, ray_dist, name): """ export result from Ray features extractions :param ndarray seg: segmentation :param (int, int) center: center of the Ray features :param [[int, int]] points: list of reconstructed points :param [[int]] ray_dist_raw: list of raw Ray distances in regular step :param [[int]] ray_dist: list of normalised Ray distances in regular step :param str name: name of particular figure
def image_segmentation(idx_row, params, debug_export=DEBUG_EXPORT): """ image segmentation which prepare inputs (segmentation, centres) and perform segmentation of various segmentation methods :param (int, str) idx_row: input image and centres :param {str: ...} params: segmentation parameters :return str: image name """ _, row_path = idx_row for k in dict(row_path): if isinstance(k, str) and k.startswith('path_'): row_path[k] = tl_data.update_path(row_path[k], absolute=True) logging.debug('segmenting image: "%s"', row_path['path_image']) name = os.path.splitext(os.path.basename(row_path['path_image']))[0] img = load_image(row_path['path_image']) # make the image like RGB img_rgb = np.rollaxis(np.tile(img, (3, 1, 1)), 0, 3) seg = load_image(row_path['path_segm'], 'segm') assert img_rgb.shape[:2] == seg.shape, \ 'image %s and segm %s do not match' \ % (repr(img_rgb.shape[:2]), repr(seg.shape)) if not os.path.isfile(row_path['path_centers']): logging.warning('no center was detected for "%s"', name) return name centers = tl_data.load_landmarks_csv(row_path['path_centers']) centers = tl_data.swap_coord_x_y(centers) if len(centers) == 0: logging.warning('no center was detected for "%s"', name) return name # img = seg / float(seg.max()) slic = seg_spx.segment_slic_img2d(img_rgb, sp_size=params['slic_size'], rltv_compact=params['slic_regul']) path_segm = os.path.join(params['path_exp'], 'input', name + '.png') export_draw_image_segm(path_segm, img_rgb, segm_obj=seg, centers=centers) seg_simple = simplify_segm_3cls(seg) path_segm = os.path.join(params['path_exp'], 'simple', name + '.png') export_draw_image_segm(path_segm, seg_simple - 1.) dict_segment = create_dict_segmentation(params, slic, seg, img, centers) image_name = name + '.png' centre_name = name + '.csv' # iterate over segmentation methods and perform segmentation on this image for method in dict_segment: (fn, args) = dict_segment[method] logging.debug(' -> %s on "%s"', method, name) path_dir = os.path.join(params['path_exp'], method) # n.split('_')[0] path_segm = os.path.join(path_dir, image_name) path_centre = os.path.join(path_dir + DIR_CENTRE_POSIX, centre_name) path_fig = os.path.join(path_dir + DIR_VISUAL_POSIX, image_name) path_debug = os.path.join(path_dir + DIR_DEBUG_POSIX, name) # assuming that segmentation may fail try: t = time.time() if debug_export and 'rg2sp' in method: os.mkdir(path_debug) segm_obj, centers, dict_export = fn(*args, debug_export=path_debug) else: segm_obj, centers, dict_export = fn(*args) # also export ellipse params here or inside the segm fn if dict_export is not None: for k in dict_export: export_partial(k, dict_export[k], path_dir, name) logging.info('running time of %s on image "%s" is %d s', repr(fn.__name__), image_name, time.time() - t) Image.fromarray(segm_obj.astype(np.uint8)).save(path_segm) export_draw_image_segm(path_fig, img_rgb, seg, segm_obj, centers) # export also centers centers = tl_data.swap_coord_x_y(centers) tl_data.save_landmarks_csv(path_centre, centers) except: logging.error('segment fail for "%s" via %s with \n %s', name, method, traceback.format_exc()) return name
# threshold if two segmentation overlap more, keep just one of them SEGM_OVERLAP = 0.5 # paramters for SLIC segmentation SLIC_SIZE = 40 SLIC_REGUL = 0.3 # Region Growing configuration DEBUG_EXPORT = False RG2SP_THRESHOLDS = { # thresholds for updating between iterations 'centre': 20, 'shift': 10, 'volume': 0.05, 'centre_init': 50 } PATH_DATA = tl_data.update_path('data', absolute=True) PATH_IMAGES = os.path.join(tl_data.update_path('images'), 'drosophila_ovary_slice') # sample segmentation methods LIST_SAMPLE_METHODS = ('ellipse_moments', 'ellipse_ransac_mmt', 'ellipse_ransac_crit', 'GC_pixels-large', 'GC_pixels-shape', 'GC_slic-large', 'GC_slic-shape', 'rg2sp_greedy-mixture', 'rg2sp_GC-mixture', 'watershed_morph') # default segmentation configuration SEGM_PARAMS = { # ovary labels: background, funicular cells, nurse cells, cytoplasm 'tab-proba_ellipse': [0.01, 0.95, 0.95, 0.85], 'tab-proba_graphcut': [0.01, 0.6, 0.99, 0.75], 'tab-proba_RG2SP': [0.01, 0.6, 0.95, 0.75], 'path_single-model':
import glob import unittest # import pickle import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.metrics import adjusted_rand_score sys.path.append(os.path.abspath(os.path.join('..', '..'))) # Add path to root import segmentation.utils.data_io as tl_io import segmentation.utils.drawing as tl_visu import segmentation.superpixels as seg_spx import segmentation.region_growing as seg_rg PATH_BASE = tl_io.update_path(os.path.join('images', 'drosophila_ovary_slice'), absolute=True) PATH_IMAGE = os.path.join(PATH_BASE, 'image') PATH_SEGM = os.path.join(PATH_BASE, 'segm') PATH_ANNOT = os.path.join(PATH_BASE, 'annot_eggs') PATH_CENTRE = os.path.join(PATH_BASE, 'center_levels') PATH_OUTPUT = tl_io.update_path('output', absolute=True) NAME_RG2SP_MODEL = 'RG2SP_multi-model_mixture.npz' PATH_PKL_MODEL = os.path.join(PATH_OUTPUT, NAME_RG2SP_MODEL) LABELS_FG_PROB = (0.05, 0.7, 0.9, 0.9) DEFAULT_RG2SP_THRESHOLDS = seg_rg.RG2SP_THRESHOLDS FIG_SIZE = 12. def compute_prior_map(cdist, size=(500, 800), step=5): prior_map = np.zeros(size) centre = np.array(size) / 2
import os import sys import logging import unittest import numpy as np import pandas as pd from sklearn.metrics import adjusted_rand_score sys.path.append(os.path.abspath(os.path.join('..', '..'))) # Add path to root import segmentation.utils.data_io as tl_io import segmentation.utils.drawing as tl_visu import segmentation.ellipse_fitting as seg_fit # set some default paths PATH_OUTPUT = tl_io.update_path(os.path.join('output')) PATH_BASE = tl_io.update_path(os.path.join('images', 'drosophila_ovary_slice')) PATH_IMAGES = os.path.join(PATH_BASE, 'image') PATH_SEGM = os.path.join(PATH_BASE, 'segm') PATH_ANNOT = os.path.join(PATH_BASE, 'annot_eggs') PATH_CENTRE = os.path.join(PATH_BASE, 'center_levels') # color spaces for visualisations COLORS = 'bgrmyck' # set probability to be foreground / background TABLE_FB_PROBA = [[0.01, 0.7, 0.95, 0.8], [0.99, 0.3, 0.05, 0.2]] MAX_FIGURE_SEIZE = 10 class TestEllipseFitting(unittest.TestCase): def test_ellipse_fitting(self, name='insitu7545',
'fts_ray_types': [('up', [0])], # 'fts_ray_types': [('up', [0]), ('down', [1])], 'fts_ray_closer': True, 'fts_ray_smooth': 0, 'pca_coef': None, # 'pca_coef': 0.99, 'balance': 'unique', 'classif': 'RandForest', # 'classif': 'SVM', 'nb_classif_search': 50, 'dict_relabel': None, # 'dict_relabel': {0: [0], 1: [1], 2: [2, 3]}, 'center_dist_thr': 50, # distance to from annotated center as a point } PATH_IMAGES = os.path.join(tl_io.update_path('images'), 'drosophila_ovary_slice') PATH_RESULTS = tl_io.update_path('results', absolute=True) CENTER_PARAMS.update({ 'path_list': os.path.join(PATH_IMAGES, 'list_imgs-segm-center-levels_short.csv'), 'path_images': '', 'path_segms': '', 'path_centers': '', # 'path_images': os.path.join(PATH_IMAGES, 'image', '*.jpg'), # 'path_segms': os.path.join(PATH_IMAGES, 'segm', '*.png'), # 'path_centers': os.path.join(PATH_IMAGES, 'center_levels', '*.png'), 'path_infofile': '', 'path_output': PATH_RESULTS, 'name': 'ovary', })