def arg_parse_params(params): """ SEE: https://docs.python.org/3/library/argparse.html :return dict: """ parser = argparse.ArgumentParser() parser.add_argument('-list', '--path_list', type=str, required=False, help='path to the list of input files', default=params['path_list']) parser.add_argument('-imgs', '--path_images', type=str, required=False, help='path to directory & name pattern for images', default=params['path_images']) parser.add_argument('-segs', '--path_segms', type=str, required=False, help='path to directory & name pattern for segmentation', default=params['path_segms']) parser.add_argument('-centers', '--path_centers', type=str, required=False, help='path to directory & name pattern for centres', default=params['path_centers']) parser.add_argument('-info', '--path_infofile', type=str, required=False, help='path to the global information file', default=params['path_infofile']) parser.add_argument('-out', '--path_output', type=str, required=False, help='path to the output directory', default=params['path_output']) parser.add_argument('-n', '--name', type=str, required=False, help='name of the experiment', default='ovary') parser.add_argument('-cfg', '--path_config', type=str, required=False, help='path to the configuration', default=None) parser.add_argument('--nb_workers', type=int, required=False, default=NB_THREADS, help='number of processes in parallel') params.update(vars(parser.parse_args())) paths = {} for k in (k for k in params if 'path' in k): if not isinstance(params[k], str) or params[k].lower() == 'none': paths[k] = '' continue if k in ['path_images', 'path_segms', 'path_centers', 'path_expt']: p_dir = tl_data.update_path(os.path.dirname(params[k])) paths[k] = os.path.join(p_dir, os.path.basename(params[k])) else: paths[k] = tl_data.update_path(params[k], absolute=True) p_dir = paths[k] assert os.path.exists(p_dir), 'missing (%s) %s' % (k, p_dir) # load saved configuration if params['path_config'] is not None: ext = os.path.splitext(params['path_config'])[-1] assert (ext == '.yaml' or ext == '.yml'), \ 'wrong extension for %s' % params['path_config'] with open(params['path_config'], 'r') as fd: data = yaml.load(fd) params.update(data) params.update(paths) logging.info('ARG PARAMETERS: \n %r', params) return params
def parse_arg_params(parser): """ parse basic args and return as dictionary :param obj parser: argparse :return dict: """ args = vars(parser.parse_args()) # remove not filled parameters args = {k: args[k] for k in args if args[k] is not None} for n in (k for k in args if k.startswith('path_') and args[k] is not None): args[n] = update_path(args[n]) assert os.path.exists(args[n]), '%s' % args[n] for flag in ['nb_patterns', 'method']: if flag in args and not isinstance(args[flag], list): args[flag] = [args[flag]] if 'nb_patterns' in args: if is_list_like(args['nb_patterns']): args.update({'nb_labels': [lb + 1 for lb in args['nb_patterns']]}) else: args['nb_labels'] = args['nb_patterns'] + 1 del args['nb_patterns'] return args
def parse_arg_params(): """ create simple arg parser with default values (input, results, dataset) :return obj: argparse """ parser = argparse.ArgumentParser() parser.add_argument( '-imgs', '--path_images', type=str, required=True, help='path to dir with images', default=PATH_IMAGES ) parser.add_argument( '-m', '--method', type=str, required=False, help='method for quantisation color/position', default='color', choices=['color', 'position'] ) parser.add_argument( '-thr', '--px_threshold', type=float, required=False, help='percentage of pixels of a color to be removed', default=THRESHOLD_INVALID_PIXELS ) parser.add_argument('--nb_workers', type=int, required=False, help='number of jobs in parallel', default=NB_WORKERS) args = vars(parser.parse_args()) p_dir = tl_data.update_path(os.path.dirname(args['path_images'])) if not os.path.isdir(p_dir): raise FileNotFoundError('missing folder: %s' % args['path_images']) args['path_images'] = os.path.join(p_dir, os.path.basename(args['path_images'])) logging.info(tl_expt.string_dict(args, desc='ARG PARAMETERS')) return args
def parse_arg_params(): """ create simple arg parser with default values (input, results, dataset) :return obj: argparse """ parser = argparse.ArgumentParser() parser.add_argument('-imgs', '--path_images', type=str, required=True, help='path to images', default=PATH_IMAGES) parser.add_argument('-out', '--path_output', type=str, required=True, help='path to output dir', default=PATH_OUTPUT) parser.add_argument('--label_old', type=int, required=False, nargs='+', help='labels to be replaced', default=[0]) parser.add_argument('--label_new', type=int, required=False, nargs='+', help='new labels after replacing', default=[0]) parser.add_argument('--nb_workers', type=int, required=False, help='number of jobs in parallel', default=NB_WORKERS) args = vars(parser.parse_args()) for k in ['path_images', 'path_output']: p_dir = tl_data.update_path(os.path.dirname(args[k])) assert os.path.isdir(p_dir), 'missing folder: %s' % args[k] args[k] = os.path.join(p_dir, os.path.basename(args[k])) assert len(args['label_old']) == len(args['label_new']), \ 'length of old (%i) and new (%i) labels should be same' \ % (len(args['label_old']), len(args['label_new'])) logging.info(tl_expt.string_dict(args, desc='ARG PARAMETERS')) return args
def arg_parse_params(): """ SEE: https://docs.python.org/3/library/argparse.html :return {str: str}: """ parser = argparse.ArgumentParser() parser.add_argument( '-annot', '--path_annot', type=str, required=False, help='path to directory & name pattern for annotations', default=PATH_ANNOT) parser.add_argument('-out', '--path_out', type=str, required=False, help='path to the output directory', default=PATH_DATA) parser.add_argument('-nb', '--nb_comp', type=int, required=False, help='number of component in Mixture model', default=2) params = vars(parser.parse_args()) for k in (k for k in params if 'path' in k): params[k] = tl_data.update_path(params[k], absolute=True) p = os.path.dirname(params[k]) if k == 'path_annot' else params[k] assert os.path.exists(p), 'missing: %s' % p # load saved configuration logging.info('ARG PARAMETERS: \n %r', params) return params
def args_parser(): """ create simple arg parser with default values (input, results, dataset) :return obj: argparse """ parser = argparse.ArgumentParser() parser.add_argument('-p', '--path', type=str, required=True, help='path to set of experiments') parser.add_argument('-d', '--datasets', type=str, required=False, nargs='+', help='result file name', default=LIST_DATASETS) parser.add_argument('-s', '--sigma', type=str, required=False, nargs='+', help='Gaussian sigma of additive noise', default=NOISE_RANGE) args = vars(parser.parse_args()) args['path'] = update_path(args['path']) assert os.path.isdir(args['path']), 'missing: %s' % args['path'] return args
def test_experiments_postprocessing(): """ testing experiment postprocessing """ logging.getLogger().setLevel(logging.INFO) params = { 'res_cols': None, 'func_stat': 'none', 'type': 'synth', 'name_results': [RESULTS_CSV], 'name_config': CONFIG_YAML, 'nb_workers': 2, 'path': update_path('results') } dir_expts = glob.glob(os.path.join(params['path'], '*')) # in case the the postporcesing is called before experiment themselves if not [p for p in dir_expts if os.path.isdir(p)]: test_experiments_soa_synth() r_parse.parse_experiments(params) params.update({'name_results': RESULTS_CSV}) r_recomp.parse_experiments(params) name_res = os.path.splitext(RESULTS_CSV)[0] params.update({'name_results': [name_res + '_NEW.csv'], 'nb_workers': 1}) r_parse.parse_experiments(params)
def arg_parse_params(params): """ SEE: https://docs.python.org/3/library/argparse.html :return ({str: str}, int): """ parser = argparse.ArgumentParser() parser.add_argument('-imgs', '--path_images', type=str, required=False, help='path to directory & name pattern for images', default=params['path_images']) parser.add_argument('-segs', '--path_segms', type=str, required=False, help='path to directory & name pattern for segmentation', default=params['path_segms']) parser.add_argument('-centers', '--path_centers', type=str, required=False, help='path to directory & name pattern for centres', default=params['path_centers']) parser.add_argument('-info', '--path_infofile', type=str, required=False, help='path to the global information file', default=params['path_infofile']) parser.add_argument('--stages', type=int, required=False, nargs='+', help='stage indexes', default=[1, 2, 3, 4, 5]) parser.add_argument('-out', '--path_output', type=str, required=False, help='path to the output directory', default=params['path_output']) parser.add_argument('--nb_workers', type=int, required=False, default=NB_THREADS, help='number of processes in parallel') arg_params = vars(parser.parse_args()) params.update(arg_params) for k in (k for k in params if 'path' in k): params[k] = tl_data.update_path(params[k], absolute=True) logging.info('ARG PARAMETERS: \n %r', params) return params
def parse_arg_params(): """ create simple arg parser with default values (input, results, dataset) :return obj: argparse """ parser = argparse.ArgumentParser() parser.add_argument('-imgs', '--path_images', type=str, required=True, help='path to dir with annot', default=PATH_IMAGES) parser.add_argument('--label', type=int, required=False, nargs='+', help='labels to be replaced', default=[-1]) parser.add_argument('--nb_workers', type=int, required=False, help='number of jobs in parallel', default=NB_WORKERS) args = vars(parser.parse_args()) p_dir = tl_data.update_path(os.path.dirname(args['path_images'])) assert os.path.isdir(p_dir), 'missing folder: %s' % args['path_images'] args['path_images'] = os.path.join(p_dir, os.path.basename(args['path_images'])) logging.info(tl_expt.string_dict(args, desc='ARG PARAMETERS')) return args
def arg_parse_params(dict_paths): """ SEE: https://docs.python.org/3/library/argparse.html :return ({str: str}, int): """ parser = argparse.ArgumentParser() parser.add_argument('-annot', '--path_annot', type=str, required=False, help='annotations', default=dict_paths['annot']) parser.add_argument('-imgs', '--path_image', type=str, required=False, help='path to directory & name pattern for images', default=dict_paths['image']) parser.add_argument('-out', '--path_output', type=str, required=False, help='path to the output directory', default=dict_paths['output']) parser.add_argument('--padding', type=int, required=False, help='crop padding [px]', default=25) parser.add_argument('--mask', type=int, required=False, help='mask by the segmentation', default=1) parser.add_argument('-bg', '--background', type=int, required=False, help='using background color', default=None, nargs='+') parser.add_argument('--nb_workers', type=int, required=False, default=NB_WORKERS, help='number of processes in parallel') args = vars(parser.parse_args()) logging.info('ARG PARAMETERS: \n %r', args) _fn_path = lambda k: os.path.join( tl_data.update_path(os.path.dirname(args[k])), os.path.basename(args[k] )) dict_paths = { k.split('_')[-1]: _fn_path(k) for k in args if k.startswith('path_') } for k in dict_paths: assert os.path.exists(os.path.dirname(dict_paths[k])), \ 'missing (%s) "%s"' % (k, os.path.dirname(dict_paths[k])) return dict_paths, args
def load_image(path_img, img_type=TYPES_LOAD_IMAGE[0]): """ load image and annotation according chosen type :param str path_img: :param str img_type: :return ndarray: """ path_img = tl_data.update_path(path_img) if not os.path.isfile(path_img): raise FileNotFoundError('missing: "%s"' % path_img) if img_type == '2d_split': img, _ = tl_data.load_img_double_band_split(path_img) if img.ndim != 2: raise ImageDimensionError('image dims: %r' % img.shape) # img = np.rollaxis(np.tile(img, (3, 1, 1)), 0, 3) # if img.max() > 1: # img = (img / 255.) elif img_type == '2d_rgb': img, _ = tl_data.load_image_2d(path_img) # if img.max() > 1: # img = (img / 255.) elif img_type == '2d_segm': img, _ = tl_data.load_image_2d(path_img) if img.ndim == 3: img = img[:, :, 0] if ANNOT_RELABEL_SEQUENCE: img, _, _ = segmentation.relabel_sequential(img) else: logging.error('not supported loading img_type: %s', img_type) img = None return img
def arg_parse_params(params): """ SEE: https://docs.python.org/3/library/argparse.html :return {str: str}: """ parser = argparse.ArgumentParser() parser.add_argument( '-list', '--path_list', type=str, required=False, help='path to the list of image', default=params['path_list'] ) parser.add_argument( '-out', '--path_out', type=str, required=False, help='path to the output directory', default=params['path_out'] ) parser.add_argument('-n', '--name', type=str, required=False, help='name of the experiment', default='ovary') parser.add_argument( '-cfg', '--path_config', type=str, required=False, help='path to the configuration', default=None ) parser.add_argument( '--nb_workers', type=int, required=False, default=NB_WORKERS, help='number of processes in parallel' ) parser.add_argument( '-m', '--methods', type=str, required=False, nargs='+', help='list of segment. methods', default=None ) arg_params = vars(parser.parse_args()) params.update(arg_params) if not isinstance(arg_params['path_config'], str) \ or arg_params['path_config'].lower() == 'none': params['path_config'] = '' else: params['path_config'] = tl_data.update_path(params['path_config']) if not os.path.isfile(params['path_config']): raise FileNotFoundError('missing file: %s' % params['path_config']) _, ext = os.path.splitext(params['path_config']) if ext not in ('.yaml', '.yml'): raise RuntimeError('"%s" should be YAML file' % os.path.basename(params['path_config'])) data = tl_expt.load_config_yaml(params['path_config']) params.update(data) params.update(arg_params) for k in (k for k in arg_params if 'path' in k): if not arg_params[k]: continue params[k] = tl_data.update_path(arg_params[k], absolute=True) if not os.path.exists(params[k]): raise FileNotFoundError('missing: %s' % params[k]) # load saved configuration logging.info('ARG PARAMETERS: \n %r', params) return params
def arg_parse_params(params): """ SEE: https://docs.python.org/3/library/argparse.html :return {str: str}: """ parser = argparse.ArgumentParser() parser.add_argument('-list', '--path_list', type=str, required=False, help='path to the list of image', default=params['path_list']) parser.add_argument('-out', '--path_out', type=str, required=False, help='path to the output directory', default=params['path_out']) parser.add_argument('-n', '--name', type=str, required=False, help='name of the experiment', default='ovary') parser.add_argument('-cfg', '--path_config', type=str, required=False, help='path to the configuration', default=None) parser.add_argument('--nb_workers', type=int, required=False, default=NB_THREADS, help='number of processes in parallel') parser.add_argument('-m', '--methods', type=str, required=False, nargs='+', help='list of segment. methods', default=None) arg_params = vars(parser.parse_args()) params.update(arg_params) if not isinstance(arg_params['path_config'], str) \ or arg_params['path_config'].lower() == 'none': params['path_config'] = '' else: params['path_config'] = tl_data.update_path(params['path_config']) assert os.path.isfile(params['path_config']), \ 'missing file: %s' % params['path_config'] assert os.path.splitext(params['path_config'])[-1] == '.json', \ '"%s" should be JSON file' % params['path_config'] with open(params['path_config'], 'r') as fd: data = json.load(fd) params.update(data) params.update(arg_params) for k in (k for k in arg_params if 'path' in k): if not arg_params[k]: continue params[k] = tl_data.update_path(arg_params[k], absolute=True) assert os.path.exists(params[k]), 'missing: %s' % params[k] # load saved configuration logging.info('ARG PARAMETERS: \n %r', params) return params
def load_image_segm_center(idx_row, path_out=None, dict_relabel=None): """ by paths load images and segmentation and weather centers exist, load them if the path out is given redraw visualisation of inputs :param (int, DF:row) idx_row: tuple of index and row :param str path_out: path to output directory :param dict dict_relabel: look-up table for relabeling :return(str, ndarray, ndarray, [[int, int]]): idx_name, img_rgb, segm, centers """ idx, row_path = idx_row for k in ['path_image', 'path_segm', 'path_centers']: row_path[k] = tl_data.update_path(row_path[k]) if not os.path.exists(row_path[k]): raise FileNotFoundError('missing %s' % row_path[k]) idx_name = get_idx_name(idx, row_path['path_image']) img_struc, img_gene = tl_data.load_img_double_band_split( row_path['path_image'], im_range=None) # img_rgb = np.array(Image.open(row_path['path_img'])) img_rgb = tl_data.merge_image_channels(img_struc, img_gene) if np.max(img_rgb) > 1: img_rgb = img_rgb / float(np.max(img_rgb)) seg_ext = os.path.splitext(os.path.basename(row_path['path_segm']))[-1] if seg_ext == '.npz': with np.load(row_path['path_segm'], allow_pickle=True) as npzfile: segm = npzfile[npzfile.files[0]] if dict_relabel is not None: segm = seg_lbs.merge_probab_labeling_2d(segm, dict_relabel) else: segm = tl_data.io_imread(row_path['path_segm']) if dict_relabel is not None: segm = seg_lbs.relabel_by_dict(segm, dict_relabel) if row_path['path_centers'] is not None and os.path.isfile( row_path['path_centers']): ext = os.path.splitext(os.path.basename(row_path['path_centers']))[-1] if ext == '.csv': centers = tl_data.load_landmarks_csv(row_path['path_centers']) centers = tl_data.swap_coord_x_y(centers) elif ext == '.png': centers = tl_data.io_imread(row_path['path_centers']) # relabel loaded segm into relevant one centers = np.array(LUT_ANNOT_CENTER_RELABEL)[centers] else: logging.warning('not supported file format %s', ext) centers = None else: centers = None if is_drawing(path_out): export_visual_input_image_segm(path_out, idx_name, img_rgb, segm, centers) return idx_name, img_rgb, segm, centers
def arg_parse_params(paths): """ SEE: https://docs.python.org/3/library/argparse.html :return ({str: ...}, bool, int): """ parser = argparse.ArgumentParser() parser.add_argument('--images', type=str, required=False, help='path to directory & name pattern for images', default=paths['images']) parser.add_argument('--annots', type=str, required=False, help='path to directory & name pattern for annotation', default=paths['annots']) parser.add_argument( '--segments', type=str, required=False, help='path to directory & name pattern for segmentation', default=paths['segments']) parser.add_argument('--centers', type=str, required=False, help='path to directory & name pattern for centres', default=paths['centers']) parser.add_argument('--results', type=str, required=False, help='path to the result directory', default=paths['results']) parser.add_argument('--nb_workers', type=int, required=False, default=NB_WORKERS, help='number of processes in parallel') parser.add_argument('--visual', required=False, action='store_true', default=False, help='export visualisations') arg_params = vars(parser.parse_args()) export_visual = arg_params['visual'] for k in (k for k in arg_params if k not in ('nb_workers', 'visual')): if not isinstance(arg_params[k], str) or arg_params[k].lower() == 'none': paths[k] = None continue paths[k] = tl_data.update_path(arg_params[k], absolute=True) p = paths[k] if k == 'results' else os.path.dirname(paths[k]) if not os.path.exists(p): raise FileNotFoundError('missing: %s' % p) logging.info('ARG PARAMETERS: \n %s', paths) return paths, export_visual, arg_params['nb_workers']
def parse_arg_params(): """ create simple arg parser with default values (input, results, dataset) :return obj: argparse """ parser = argparse.ArgumentParser() parser.add_argument('-imgs', '--path_images', type=str, required=True, help='path to dir with images', default=PATH_INPUT) parser.add_argument('-out', '--path_out', type=str, required=True, help='path to output dir', default=PATH_OUTPUT) parser.add_argument('-clrs', '--path_colors', type=str, required=False, help='json with colour-label dict', default=None) parser.add_argument('--nb_workers', type=int, required=False, help='number of jobs in parallel', default=NB_WORKERS) args = vars(parser.parse_args()) for n in ['path_images', 'path_out']: p_dir = tl_data.update_path(os.path.dirname(args[n])) if not os.path.isdir(p_dir): raise FileNotFoundError('missing: %s' % args[n]) args[n] = os.path.join(p_dir, os.path.basename(args[n])) if args['path_colors'] is not None: args['path_colors'] = tl_data.update_path(args['path_colors']) logging.info(tl_expt.string_dict(args, desc='ARG PARAMETERS')) return args
def arg_parse_params(params): """ SEE: https://docs.python.org/3/library/argparse.html :return dict: """ parser = argparse.ArgumentParser() parser.add_argument( '-imgs', '--path_images', type=str, required=False, help='path to directory & name pattern for image', default=params['path_images'] ) parser.add_argument( '-segm', '--path_segms', type=str, required=False, help='path to directory & name pattern for annotation', default=params['path_segms'] ) parser.add_argument( '-out', '--path_out', type=str, required=False, help='path to the output directory', default=params['path_out'] ) parser.add_argument( '--img_type', type=str, required=False, default=params['img_type'], choices=TYPES_LOAD_IMAGE, help='type of image to be loaded' ) parser.add_argument('--slic_size', type=int, required=False, default=20, help='superpixels size') parser.add_argument('--slic_regul', type=float, required=False, default=0.25, help='superpixel regularization') parser.add_argument('--slico', action='store_true', required=False, default=False, help='using SLICO (ASLIC)') parser.add_argument( '--nb_workers', type=int, required=False, default=NB_WORKERS, help='number of processes in parallel' ) params = vars(parser.parse_args()) logging.info('ARG PARAMETERS: \n %r', params) for k in (k for k in params if 'path' in k): params[k] = tl_data.update_path(params[k]) if k == 'path_out' and not os.path.isdir(params[k]): params[k] = '' continue p = os.path.dirname(params[k]) if '*' in params[k] else params[k] assert os.path.exists(p), 'missing: (%s) "%s"' % (k, p) # if the config path is set load the it otherwise use default return params
def arg_parse_params(params): """ argument parser from cmd SEE: https://docs.python.org/3/library/argparse.html :return {str: ...}: """ parser = argparse.ArgumentParser() parser.add_argument('-l', '--path_train_list', type=str, required=False, help='path to the list of image', default=params['path_train_list']) parser.add_argument('-i', '--path_predict_imgs', type=str, required=False, help='path to folder & name pattern with new image', default=params['path_predict_imgs']) parser.add_argument('-o', '--path_out', type=str, required=False, help='path to the output directory', default=params['path_out']) parser.add_argument('-n', '--name', type=str, required=False, help='name of the experiment', default=params['name']) parser.add_argument('-cfg', '--path_config', type=str, required=False, help='path to the segmentation config', default='') parser.add_argument('--img_type', type=str, required=False, default=params['img_type'], choices=TYPES_LOAD_IMAGE, help='type of image to be loaded') parser.add_argument('--nb_classes', type=int, required=False, help='number of classes for segmentation', default=params.get('nb_classes', 2)) parser.add_argument('--nb_workers', type=int, required=False, help='number of processes in parallel', default=NB_THREADS) parser.add_argument('--visual', required=False, action='store_true', help='export debug visualisations', default=False) parser.add_argument('--unique', required=False, action='store_true', help='each experiment has uniques stamp', default=EACH_UNIQUE_EXPERIMENT) args = vars(parser.parse_args()) logging.info('ARG PARAMETERS: \n %r', args) for k in (k for k in args if 'path' in k): if args[k] == '' or args[k] == 'none': continue args[k] = tl_data.update_path(args[k]) p = os.path.dirname(args[k]) if k == 'path_predict_imgs' else args[k] assert os.path.exists(p), 'missing: (%s) "%s"' % (k, p) # args['visual'] = bool(args['visual']) # if the config path is set load the it otherwise use default if os.path.isfile(args.get('path_config', '')): with open(args['path_config'], 'r') as fd: config = json.load(fd) params.update(config) params.update(args) return params
def get_image_path(name_img, path_base=PATH_IMAGES): """ merge default image path and sample image :param str name_img: :param str path_base: :return str: >>> p = get_image_path(IMAGE_LENNA) >>> os.path.isfile(p) True >>> os.path.basename(p) 'lena.png' """ path_img = os.path.join(path_base, name_img) path_img = update_path(path_img) return path_img
def parse_arg_params(params): """ create simple arg parser with default values (input, results, dataset) :return: argparse """ parser = argparse.ArgumentParser() parser.add_argument('-i', '--path', type=str, required=True, help='path to set of experiments') parser.add_argument('-c', '--name_config', type=str, required=False, help='config file name', default=params['name_config']) parser.add_argument('-r', '--name_results', type=str, required=False, nargs='*', default=params['name_results'], help='result file name') parser.add_argument('--result_columns', type=str, required=False, default=None, nargs='*', help='important columns from results') parser.add_argument('-f', '--func_stat', type=str, required=False, help='type od stat over results', default='none') parser.add_argument('--nb_workers', type=int, required=False, default=NB_WORKERS, help='number of jobs running in parallel') args = vars(parser.parse_args()) args['path'] = update_path(args['path']) assert os.path.isdir(args['path']), 'missing: %s' % args['path'] return args
def aparse_params(dict_paths): """ SEE: https://docs.python.org/3/library/argparse.html :return ({str: str}, obj): """ parser = argparse.ArgumentParser() parser.add_argument('-a', '--path_annot', type=str, required=True, help='path to directory with annotations & name pattern', default=dict_paths['annot']) parser.add_argument('-s', '--path_segm', type=str, required=True, help='path to directory & name pattern for segmentation', default=dict_paths['segm']) parser.add_argument('-i', '--path_image', type=str, required=False, help='path to directory & name pattern for images', default=dict_paths['image']) parser.add_argument('-o', '--path_output', type=str, required=False, help='path to the output directory', default=dict_paths['output']) parser.add_argument('--drop_labels', type=int, required=False, nargs='*', help='list of skipped labels from statistic') parser.add_argument('--nb_workers', type=int, required=False, help='number of processes in parallel', default=NB_WORKERS) parser.add_argument('--overlap', type=float, required=False, help='alpha for segmentation', default=0.2) parser.add_argument('--relabel', required=False, action='store_true', help='relabel to find label relations', default=False) parser.add_argument('--visual', required=False, action='store_true', help='export visualisations', default=False) args = vars(parser.parse_args()) logging.info('ARG PARAMETERS: \n %r', args) if not isinstance(args['path_image'], str) or args['path_image'].lower() == 'none': args['path_image'] = None _fn_path = lambda k: os.path.join(tl_data.update_path(os.path.dirname(args[k])), os.path.basename(args[k])) dict_paths = {k.split('_')[-1]: _fn_path(k) for k in args if k.startswith('path_') and args[k] is not None} for k in dict_paths: assert os.path.isdir(os.path.dirname(dict_paths[k])), \ 'missing: (%s) "%s"' % (k, os.path.dirname(dict_paths[k])) if not args['drop_labels']: args['drop_labels'] = [] return dict_paths, args
def args_parse_params(params): """ create simple arg parser with default values (input, output) :param dict dict_params: :return obj: object argparse<...> """ parser = argparse.ArgumentParser() parser.add_argument('-i', '--path_in', type=str, required=True, default=params['path_in'], help='path to the folder with input image dataset') parser.add_argument('-o', '--path_out', type=str, required=True, default=params['path_out'], help='path to the output with experiment results') parser.add_argument('-t', '--threshold', type=float, required=False, default=0.001, help='threshold for image information') parser.add_argument('-m', '--thr_method', type=str, required=False, default='', choices=METHODS, help='used methods') parser.add_argument('--nb_workers', type=int, required=False, default=NB_WORKERS, help='number of parallel processes') args = vars(parser.parse_args()) for k in (k for k in args if k.startswith('path_')): p = update_path(os.path.dirname(args[k])) assert os.path.exists(p), 'missing (%s): %s' % (k, p) args[k] = os.path.join(p, os.path.basename(args[k])) return args
def arg_parse_params(params): """ SEE: https://docs.python.org/3/library/argparse.html :return dict: """ parser = argparse.ArgumentParser() parser.add_argument('-imgs', '--path_images', type=str, required=False, help='path to directory & name pattern for images', default=params.get('path_images', None)) parser.add_argument('-ells', '--path_ellipses', type=str, required=False, help='path to directory & name pattern for ellipses', default=params.get('path_ellipses', None)) parser.add_argument('-info', '--path_infofile', type=str, required=False, help='path to the global information file', default=params.get('path_infofile', None)) parser.add_argument('-out', '--path_output', type=str, required=False, help='path to the output directory', default=params.get('path_output', None)) parser.add_argument('--nb_workers', type=int, required=False, default=NB_WORKERS, help='number of processes in parallel') arg_params = vars(parser.parse_args()) params.update(arg_params) for k in (k for k in params if 'path' in k and params[k] is not None): params[k] = tl_data.update_path(params[k], absolute=True) logging.info('ARG PARAMETERS: \n %r', params) return params
def parse_arg_params(): """ create simple arg parser with default values (input, output, dataset) :return obj: object argparse<in, out, ant, name> """ parser = argparse.ArgumentParser() parser.add_argument('-imgs', '--path_images', type=str, required=True, help='path to the input images + name pattern') parser.add_argument('-segs', '--path_segms', type=str, required=True, help='path to the input segms') parser.add_argument('-out', '--path_output', type=str, required=True, help='path to the output') parser.add_argument('--overlap', type=float, required=False, help='alpha for segmentation', default=0.) parser.add_argument('--nb_workers', type=int, required=False, help='number of jobs in parallel', default=NB_WORKERS) args = parser.parse_args() paths = dict( zip(['images', 'segms', 'output'], [args.path_images, args.path_segms, args.path_output])) for k in paths: p_dir = tl_data.update_path(os.path.dirname(paths[k])) paths[k] = os.path.join(p_dir, os.path.basename(paths[k])) if not os.path.isdir(p_dir): raise FileNotFoundError('missing: %s' % paths[k]) return paths, args
def parse_arg_params(): """ parse the input parameters :return dict: """ parser = argparse.ArgumentParser() parser.add_argument('-e', '--path_expt', type=str, required=True, help='path to the input experiment') parser.add_argument('-i', '--path_images', type=str, required=False, help='path to the input images', default=None) parser.add_argument('-n', '--name_expt', type=str, required=False, default='*', help='name of experiment') parser.add_argument('--nb_workers', type=int, required=False, help='number of processes running in parallel', default=NB_WORKERS) parser.add_argument('--visual', required=False, action='store_true', help='visualise results', default=False) args = vars(parser.parse_args()) logging.debug('ARG PARAMETERS: \n %r', args) args['path_expt'] = update_path(args['path_expt']) assert os.path.exists(args['path_expt']), 'missing: %s' % args['path_expt'] return args
import glob import logging import multiprocessing as mproc from functools import partial import numpy as np sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root import imsegm.utilities.data_io as tl_data import imsegm.utilities.experiments as tl_expt import run_ellipse_annot_match as r_match IMAGE_CHANNEL = 0 # image channel for mass extraction NB_THREADS = max(1, int(mproc.cpu_count() * 0.8)) PATH_IMAGES = os.path.join(tl_data.update_path('data_images'), 'drosophila_ovary_slice') PATH_RESULTS = tl_data.update_path('results', absolute=True) SWAP_CONDITION = 'cc' DEFAULT_PARAMS = { 'path_images': os.path.join(PATH_IMAGES, 'image_cut-stage-2', '*.png'), 'path_output': os.path.join(PATH_RESULTS, 'image_cut-stage-2'), } def perform_orientation_swap(path_img, path_out, img_template, swap_type=SWAP_CONDITION): """ compute the density in front adn back part of the egg rotate eventually we split the egg into thirds instead half because the middle part variate
import imsegm.utilities.experiments as tl_expt import imsegm.utilities.data_io as tl_data import imsegm.utilities.drawing as tl_visu import imsegm.labeling as seg_lbs EXPORT_VUSIALISATION = False NB_WORKERS = tl_expt.nb_workers(0.9) NAME_DIR_VISUAL_1 = 'ALL_visualisation-1' NAME_DIR_VISUAL_2 = 'ALL_visualisation-2' NAME_DIR_VISUAL_3 = 'ALL_visualisation-3' SKIP_DIRS = [ 'input', 'simple', NAME_DIR_VISUAL_1, NAME_DIR_VISUAL_2, NAME_DIR_VISUAL_3 ] NAME_CSV_STAT = 'segmented-eggs_%s.csv' PATH_IMAGES = tl_data.update_path( os.path.join('data-images', 'drosophila_ovary_slice')) PATH_RESULTS = tl_data.update_path('results', absolute=True) PATHS = { 'images': os.path.join(PATH_IMAGES, 'image', '*.jpg'), 'annots': os.path.join(PATH_IMAGES, 'annot_eggs', '*.png'), 'segments': os.path.join(PATH_IMAGES, 'segm', '*.png'), 'centers': os.path.join(PATH_IMAGES, 'center_levels', '*.csv'), 'results': os.path.join(PATH_RESULTS, 'experiment_egg-segment_ovary'), } LUT_COLOR = np.array([ (1., 1., 1.), (0.75, 0.75, 0.75), (0.5, 0.5, 0.5), (0.3, 0.3, 0.3), ])
def image_segmentation(idx_row, params, debug_export=DEBUG_EXPORT): """ image segmentation which prepare inputs (imsegm, centres) and perform segmentation of various imsegm methods :param (int, str) idx_row: input image and centres :param dict params: segmentation parameters :return str: image name """ _, row_path = idx_row for k in dict(row_path): if isinstance(k, str) and k.startswith('path_'): row_path[k] = tl_data.update_path(row_path[k], absolute=True) logging.debug('segmenting image: "%s"', row_path['path_image']) name = os.path.splitext(os.path.basename(row_path['path_image']))[0] img = load_image(row_path['path_image']) # make the image like RGB img_rgb = np.rollaxis(np.tile(img, (3, 1, 1)), 0, 3) seg = load_image(row_path['path_segm'], 'segm') assert img_rgb.shape[:2] == seg.shape, \ 'image %r and segm %r do not match' % (img_rgb.shape[:2], seg.shape) if not os.path.isfile(row_path['path_centers']): logging.warning('no center was detected for "%s"', name) return name centers = tl_data.load_landmarks_csv(row_path['path_centers']) centers = tl_data.swap_coord_x_y(centers) if not list(centers): logging.warning('no center was detected for "%s"', name) return name # img = seg / float(seg.max()) slic = seg_spx.segment_slic_img2d(img_rgb, sp_size=params['slic_size'], relative_compact=params['slic_regul']) path_segm = os.path.join(params['path_exp'], 'input', name + '.png') export_draw_image_segm(path_segm, img_rgb, segm_obj=seg, centers=centers) seg_simple = simplify_segm_3cls(seg) path_segm = os.path.join(params['path_exp'], 'simple', name + '.png') export_draw_image_segm(path_segm, seg_simple - 1.) dict_segment = create_dict_segmentation(params, slic, seg, img, centers) image_name = name + '.png' centre_name = name + '.csv' # iterate over segmentation methods and perform segmentation on this image for method in dict_segment: (fn, args) = dict_segment[method] logging.debug(' -> %s on "%s"', method, name) path_dir = os.path.join(params['path_exp'], method) # n.split('_')[0] path_segm = os.path.join(path_dir, image_name) path_centre = os.path.join(path_dir + DIR_CENTRE_POSIX, centre_name) path_fig = os.path.join(path_dir + DIR_VISUAL_POSIX, image_name) path_debug = os.path.join(path_dir + DIR_DEBUG_POSIX, name) # assuming that segmentation may fail try: t = time.time() if debug_export and 'rg2sp' in method: os.mkdir(path_debug) segm_obj, centers, dict_export = fn(*args, debug_export=path_debug) else: segm_obj, centers, dict_export = fn(*args) # also export ellipse params here or inside the segm fn if dict_export is not None: for k in dict_export: export_partial(k, dict_export[k], path_dir, name) logging.info('running time of %r on image "%s" is %d s', fn.__name__, image_name, time.time() - t) tl_data.io_imsave(path_segm, segm_obj.astype(np.uint8)) export_draw_image_segm(path_fig, img_rgb, seg, segm_obj, centers) # export also centers centers = tl_data.swap_coord_x_y(centers) tl_data.save_landmarks_csv(path_centre, centers) except Exception: logging.exception('segment fail for "%s" via %s', name, method) return name
SEGM_OVERLAP = 0.5 # paramters for SLIC segmentation SLIC_SIZE = 40 SLIC_REGUL = 0.3 # Region Growing configuration DEBUG_EXPORT = False RG2SP_THRESHOLDS = { # thresholds for updating between iterations 'centre': 20, 'shift': 10, 'volume': 0.05, 'centre_init': 50 } COLUMNS_ELLIPSE = ('xc', 'yc', 'a', 'b', 'theta') PATH_DATA = tl_data.update_path('data-images', absolute=True) PATH_IMAGES = os.path.join(PATH_DATA, 'drosophila_ovary_slice') # sample segmentation methods LIST_SAMPLE_METHODS = ('ellipse_moments', 'ellipse_ransac_mmt', 'ellipse_ransac_crit', 'GC_pixels-large', 'GC_pixels-shape', 'GC_slic-large', 'GC_slic-shape', 'rg2sp_greedy-mixture', 'rg2sp_GC-mixture', 'watershed_morph') # default segmentation configuration SEGM_PARAMS = { # ovary labels: background, funicular cells, nurse cells, cytoplasm 'tab-proba_ellipse': [0.01, 0.95, 0.95, 0.85], 'tab-proba_graphcut': [0.01, 0.6, 0.99, 0.75], 'tab-proba_RG2SP': [0.01, 0.6, 0.95, 0.75], 'path_single-model': os.path.join(PATH_DATA, 'RG2SP_eggs_single-model.pkl'),
sample_color_image_rand_segment) from imsegm.utilities.data_io import update_path from imsegm.utilities.drawing import figure_ray_feature from imsegm.descriptors import (cython_img2d_color_mean, create_filter_bank_lm_2d, compute_ray_features_segm_2d, shift_ray_features, reconstruct_ray_features_2d, FEATURES_SET_ALL, compute_selected_features_color2d) from imsegm.superpixels import segment_slic_img2d # angular step for Ray features ANGULAR_STEP = 15 # size of subfigure for visualise the Filter bank SUBPLOT_SIZE_FILTER_BANK = 3 PATH_OUTPUT = update_path('output', absolute=True) PATH_FIGURES_RAY = os.path.join(PATH_OUTPUT, 'temp_ray-features') # create the folder for visualisations if not os.path.exists(PATH_FIGURES_RAY): os.mkdir(PATH_FIGURES_RAY) def export_ray_results(seg, center, points, ray_dist_raw, ray_dist, name): """ export result from Ray features extractions :param ndarray seg: segmentation :param tuple(int,int) center: center of the Ray features :param [[int, int]] points: list of reconstructed points :param list(list(int)) ray_dist_raw: list of raw Ray distances in regular step :param list(list(int)) ray_dist: list of normalised Ray distances in regular step :param str name: name of particular figure