def parse_params(default_params, methods): """ parse arguments from command line :param dict default_params: :param list(str) methods: list of possible methods :return dict: """ parser = create_args_parser(default_params, methods) params = copy.deepcopy(default_params) arg_params = parse_arg_params(parser) params.update(arg_params) # if YAML config exists update configuration if arg_params.get('path_config', None) is not None \ and os.path.isfile(arg_params['path_config']): logging.info('loading config: %s', arg_params['path_config']) d_config = load_config_yaml(arg_params['path_config']) logging.debug(string_dict(d_config, desc='LOADED CONFIG:')) # skip al keys with path or passed from arg params d_update = { k: d_config[k] for k in d_config if k not in arg_params or arg_params[k] is None } logging.debug(string_dict(d_update, desc='TO BE UPDATED:')) params.update(d_update) return params
def parse_experiment_folder(path_expt, params): """ parse experiment folder, get configuration and results :param str path_expt: path to experiment folder :param dict params: :return dict: """ assert os.path.isdir(path_expt), 'missing %s' % path_expt path_config = os.path.join(path_expt, params['name_config']) assert os.path.exists(path_config), 'missing %s' % path_config if any(path_config.endswith(ext) for ext in ['.yaml', '.yml']): dict_info = load_config_yaml(path_config) else: dict_info = parse_config_txt(path_config) logging.debug(' -> loaded params: %r', dict_info.keys()) dict_info.update(count_folders_subdirs(path_expt)) df_info = pd.DataFrame().from_dict(dict_info, orient='index').T try: func_stat = DICT_STATISTIC_FUNC.get(params['func_stat'], None) df_results = load_multiple_results(path_expt, func_stat, params) except Exception: logging.exception('load_multiple_results: %s', path_expt) df_results = pd.DataFrame() if len(df_results) == 0: return df_results logging.debug(' -> results params: %r', df_results.columns.tolist()) list_cols = [c for c in df_info.columns if c not in df_results.columns] df_infos = pd.concat([df_info[list_cols]] * len(df_results), ignore_index=True) df_results = pd.concat([df_results, df_infos], axis=1) # df_results.fillna(method='pad', inplace=True) return df_results
def get_path_dataset(path, path_imgs=None): if path_imgs is not None and os.path.isdir(path_imgs): return path_imgs path_config = os.path.join(path, NAME_CONFIG) path_imgs = None if os.path.isfile(path_config): config = load_config_yaml(path_config) if all(k in config for k in FIELDS_PATH_IMAGES): path_imgs = os.path.join(config[FIELDS_PATH_IMAGES[0]], config[FIELDS_PATH_IMAGES[1]]) return path_imgs
def arg_parse_params(params): """ SEE: https://docs.python.org/3/library/argparse.html :return dict: """ parser = argparse.ArgumentParser() parser.add_argument('-list', '--path_list', type=str, required=False, help='path to the list of input files', default=params['path_list']) parser.add_argument('-imgs', '--path_images', type=str, required=False, help='path to directory & name pattern for images', default=params['path_images']) parser.add_argument('-segs', '--path_segms', type=str, required=False, help='path to directory & name pattern for segmentation', default=params['path_segms']) parser.add_argument('-centers', '--path_centers', type=str, required=False, help='path to directory & name pattern for centres', default=params['path_centers']) parser.add_argument('-info', '--path_infofile', type=str, required=False, help='path to the global information file', default=params['path_infofile']) parser.add_argument('-out', '--path_output', type=str, required=False, help='path to the output directory', default=params['path_output']) parser.add_argument('-n', '--name', type=str, required=False, help='name of the experiment', default='ovary') parser.add_argument('-cfg', '--path_config', type=str, required=False, help='path to the configuration', default=None) parser.add_argument('--nb_workers', type=int, required=False, default=NB_WORKERS, help='number of processes in parallel') params.update(vars(parser.parse_args())) paths = {} for k in (k for k in params if 'path' in k): if not isinstance(params[k], str) or params[k].lower() == 'none': paths[k] = '' continue if k in ['path_images', 'path_segms', 'path_centers', 'path_expt']: p_dir = tl_data.update_path(os.path.dirname(params[k])) paths[k] = os.path.join(p_dir, os.path.basename(params[k])) else: paths[k] = tl_data.update_path(params[k], absolute=True) p_dir = paths[k] assert os.path.exists(p_dir), 'missing (%s) %s' % (k, p_dir) # load saved configuration if params['path_config'] is not None: ext = os.path.splitext(params['path_config'])[-1] assert (ext == '.yaml' or ext == '.yml'), \ 'wrong extension for %s' % params['path_config'] data = tl_expt.load_config_yaml(params['path_config']) params.update(data) params.update(paths) logging.info('ARG PARAMETERS: \n %r', params) return params
def arg_parse_params(params): """ SEE: https://docs.python.org/3/library/argparse.html :return {str: str}: """ parser = argparse.ArgumentParser() parser.add_argument( '-list', '--path_list', type=str, required=False, help='path to the list of image', default=params['path_list'] ) parser.add_argument( '-out', '--path_out', type=str, required=False, help='path to the output directory', default=params['path_out'] ) parser.add_argument('-n', '--name', type=str, required=False, help='name of the experiment', default='ovary') parser.add_argument( '-cfg', '--path_config', type=str, required=False, help='path to the configuration', default=None ) parser.add_argument( '--nb_workers', type=int, required=False, default=NB_WORKERS, help='number of processes in parallel' ) parser.add_argument( '-m', '--methods', type=str, required=False, nargs='+', help='list of segment. methods', default=None ) arg_params = vars(parser.parse_args()) params.update(arg_params) if not isinstance(arg_params['path_config'], str) \ or arg_params['path_config'].lower() == 'none': params['path_config'] = '' else: params['path_config'] = tl_data.update_path(params['path_config']) if not os.path.isfile(params['path_config']): raise FileNotFoundError('missing file: %s' % params['path_config']) _, ext = os.path.splitext(params['path_config']) if ext not in ('.yaml', '.yml'): raise RuntimeError('"%s" should be YAML file' % os.path.basename(params['path_config'])) data = tl_expt.load_config_yaml(params['path_config']) params.update(data) params.update(arg_params) for k in (k for k in arg_params if 'path' in k): if not arg_params[k]: continue params[k] = tl_data.update_path(arg_params[k], absolute=True) if not os.path.exists(params[k]): raise FileNotFoundError('missing: %s' % params[k]) # load saved configuration logging.info('ARG PARAMETERS: \n %r', params) return params
def arg_parse_params(params): """ argument parser from cmd SEE: https://docs.python.org/3/library/argparse.html :return dict: """ parser = argparse.ArgumentParser() parser.add_argument( '-l', '--path_train_list', type=str, required=False, help='path to the list of image', default=params['path_train_list'] ) parser.add_argument( '-i', '--path_predict_imgs', type=str, required=False, help='path to folder & name pattern with new image', default=params['path_predict_imgs'] ) parser.add_argument( '-o', '--path_out', type=str, required=False, help='path to the output directory', default=params['path_out'] ) parser.add_argument('-n', '--name', type=str, required=False, help='name of the experiment', default=params['name']) parser.add_argument( '-cfg', '--path_config', type=str, required=False, help='path to the segmentation config', default='' ) parser.add_argument( '--img_type', type=str, required=False, default=params['img_type'], choices=TYPES_LOAD_IMAGE, help='type of image to be loaded' ) parser.add_argument( '--nb_classes', type=int, required=False, help='number of classes for segmentation', default=params.get('nb_classes', 2) ) parser.add_argument( '--nb_workers', type=int, required=False, help='number of processes in parallel', default=NB_WORKERS ) parser.add_argument( '--visual', required=False, action='store_true', help='export debug visualisations', default=False ) parser.add_argument( '--unique', required=False, action='store_true', help='each experiment has uniques stamp', default=EACH_UNIQUE_EXPERIMENT ) args = vars(parser.parse_args()) logging.info('ARG PARAMETERS: \n %r', args) for k in (k for k in args if 'path' in k): if args[k] in ('', 'none'): continue args[k] = tl_data.update_path(args[k]) p = os.path.dirname(args[k]) if k == 'path_predict_imgs' else args[k] if not os.path.exists(p): raise FileNotFoundError('missing: (%s) "%s"' % (k, p)) # args['visual'] = bool(args['visual']) # if the config path is set load the it otherwise use default if os.path.isfile(args.get('path_config', '')): config = tl_expt.load_config_yaml(args['path_config']) params.update(config) params.update(args) return params
def parse_experiment_folder(path_expt, params): """ parse experiment folder, get configuration and results :param str path_expt: path to experiment folder :param dict params: """ assert os.path.isdir(path_expt), 'missing EXPERIMENT: %s' % path_expt path_config = os.path.join(path_expt, params['name_config']) assert any(path_config.endswith(ext) for ext in ['.yaml', '.yml']), '%s' % path_config assert os.path.exists(path_config), 'missing config: %s' % path_config dict_info = load_config_yaml(path_config) logging.debug(' -> loaded params: %r', dict_info.keys()) path_results = os.path.join(path_expt, params['name_results']) assert path_results.endswith('.csv'), '%s' % path_results assert os.path.exists(path_results), 'missing result: %s' % path_results df_res = pd.read_csv(path_results, index_col=0) index_name = df_res.index.name df_res[index_name] = df_res.index if dict_info.get('type') != 'synth': logging.debug('non "synthetic" datasets does not have GT atlas') return # TODO: add recompute reconstruction error # load the GT atlas path_atlas = os.path.join(dict_info['path_in'], DIR_NAME_DICTIONARY) atlas_gt = dataset_compose_atlas(path_atlas) path_atlas_gt = os.path.join(dict_info['path_in'], SUB_PATH_GT_ATLAS) atlas_name = str(os.path.splitext(os.path.basename(path_atlas_gt))[0]) export_image(os.path.dirname(path_atlas_gt), atlas_gt, atlas_name) plt.imsave(os.path.splitext(path_atlas_gt)[0] + '_visual.png', atlas_gt) results_new = [] for _, row in df_res.iterrows(): dict_row = dict(row) # if not isinstance(idx, str) and idx - int(idx) == 0: # idx = int(idx) atlas_name = NAME_PATTERN_ATLAS % dict_row['name_suffix'] atlas = load_atlas(os.path.join(path_expt, atlas_name)) # try to find the mest match among patterns / labels atlas = relabel_max_overlap_unique(atlas_gt, atlas) # recompute the similarity measure dict_measure = compute_classif_metrics(atlas_gt.ravel(), atlas.ravel()) dict_measure = {'atlas %s' % k: dict_measure[k] for k in dict_measure} dict_row.update(dict_measure) results_new.append(dict_row) # visualise atlas atlas_name_visu = os.path.splitext(atlas_name)[0] + '_visual.png' path_visu = os.path.join(path_expt, atlas_name_visu) export_atlas_overlap(atlas_gt, atlas, path_visu) export_atlas_both(atlas_gt, atlas, path_visu) df_results_new = pd.DataFrame(results_new).set_index([index_name]) path_results = os.path.join(path_expt, NAME_OUTPUT_RESULT) df_results_new.to_csv(path_results) # just to let it releases memory gc.collect() time.sleep(1)