def update_paths(args, upper_dirs=None, pattern='path'): """ find params with not existing paths :param dict args: dictionary with all parameters :param list(str) upper_dirs: list of keys in parameters with item for which only the parent folder must exist :param str pattern: patter specifying key with path :return list(str): key of missing paths >>> update_paths({'sample': 123})[1] [] >>> update_paths({'path_': '.'})[1] [] >>> params = {'path_out': './nothing'} >>> update_paths(params)[1] ['path_out'] >>> update_paths(params, upper_dirs=['path_out'])[1] [] """ if upper_dirs is None: upper_dirs = [] missing = [] for k in (k for k in args if pattern in k): if '*' in os.path.basename(args[k]) or k in upper_dirs: p = update_path(os.path.dirname(args[k])) args[k] = os.path.join(p, os.path.basename(args[k])) else: args[k] = update_path(args[k]) p = args[k] if not os.path.exists(p): logging.warning('missing "%s": %s', k, p) missing.append(k) return args, missing
def filter_paired_landmarks(item, path_dataset, path_reference, col_source, col_target): """ filter all relevant landmarks which were used and copy them to experiment The case is that in certain challenge stage users had provided just a subset of all image landmarks which could be laos shuffled. The idea is to filter identify all user used (provided in dataset) landmarks and filter them from temporary reference dataset. :param dict|Series item: experiment DataFrame :param str path_dataset: path to provided landmarks :param str path_reference: path to the complete landmark collection :param str col_source: column name of landmarks to be transformed :param str col_target: column name of landmarks to be compared :return tuple(float,ndarray,ndarray): match ratio, filtered ref and move landmarks >>> p_data = update_path('data-images') >>> p_csv = os.path.join(p_data, 'pairs-imgs-lnds_histol.csv') >>> df = pd.read_csv(p_csv) >>> ratio, lnds_ref, lnds_move = filter_paired_landmarks(dict(df.iloc[0]), p_data, p_data, ... ImRegBenchmark.COL_POINTS_MOVE, ImRegBenchmark.COL_POINTS_REF) >>> ratio 1.0 >>> lnds_ref.shape == lnds_move.shape True """ path_ref = update_path(item[col_source], pre_path=path_reference) if not os.path.isfile(path_ref): raise FileNotFoundError('missing landmarks: %s' % path_ref) path_load = update_path(item[col_source], pre_path=path_dataset) if not os.path.isfile(path_load): raise FileNotFoundError('missing landmarks: %s' % path_load) pairs = common_landmarks(load_landmarks(path_ref), load_landmarks(path_load), threshold=1) if not pairs.size: logging.warning( 'there is not pairing between landmarks or dataset and user reference' ) return 0., np.empty([0]), np.empty([0]) pairs = sorted(pairs.tolist(), key=lambda p: p[1]) ind_ref = np.asarray(pairs)[:, 0] nb_common = min([ len(load_landmarks(update_path(item[col], pre_path=path_reference))) for col in (col_target, col_source) ]) ind_ref = ind_ref[ind_ref < nb_common] path_lnd_ref = update_path(item[col_target], pre_path=path_reference) lnds_filter_ref = load_landmarks(path_lnd_ref)[ind_ref] path_lnd_move = update_path(item[col_source], pre_path=path_reference) lnds_filter_move = load_landmarks(path_lnd_move)[ind_ref] ratio_matches = len(ind_ref) / float(nb_common) if ratio_matches > 1: raise ValueError( 'suspicious ratio for %i paired and %i common landmarks' % (len(pairs), nb_common)) return ratio_matches, lnds_filter_ref, lnds_filter_move
def replicate_missing_warped_landmarks(df_experiments, path_dataset, path_experiment): """ if some warped landmarks are missing replace the path by initial landmarks :param DF df_experiments: experiment table :param str path_dataset: path to dataset folder :param str path_experiment: path ti user experiment folder :return DF: experiment table """ # find empty warped landmarks paths missing_mask = df_experiments[ImRegBenchmark.COL_POINTS_MOVE_WARP].isnull() # for the empty place the initial landmarks df_experiments.loc[missing_mask, ImRegBenchmark.COL_POINTS_MOVE_WARP] = \ df_experiments.loc[missing_mask, ImRegBenchmark.COL_POINTS_MOVE] # for the empty place maximal execution time df_experiments.loc[missing_mask, ImRegBenchmark.COL_TIME] = \ df_experiments[ImRegBenchmark.COL_TIME].max() count = 0 # iterate over whole table for idx, row in df_experiments.iterrows(): path_csv = update_path(row[ImRegBenchmark.COL_POINTS_MOVE_WARP], pre_path=path_experiment) if not os.path.isfile(path_csv): path_csv = update_path(row[ImRegBenchmark.COL_POINTS_MOVE], pre_path=path_dataset) df_experiments.loc[idx, ImRegBenchmark.COL_POINTS_MOVE_WARP] = path_csv count += 1 logging.info('Missing warped landmarks: %i', count) return df_experiments
def _visual_image_move_warp_lnds_move_warp(cls, item, path_dataset=None, path_experiment=None): """ visualise the case with warped moving image and landmarks to the reference frame so they are simple to overlap :param dict item: row with the experiment :param str|None path_dataset: path to the dataset folder :param str|None path_experiment: path to the experiment folder :return obj|None: """ assert isinstance(item.get(cls.COL_POINTS_MOVE_WARP, None), str), \ 'Missing registered points in "%s"' % cls.COL_POINTS_MOVE_WARP path_points_warp = update_path(item[cls.COL_POINTS_MOVE_WARP], pre_path=path_experiment) if not os.path.isfile(path_points_warp): logging.warning('missing warped landmarks for: %r', dict(item)) return points_ref, points_move, path_img_ref = cls._load_landmarks(item, path_dataset) image_warp = cls._load_warped_image(item, path_experiment) points_warp = load_landmarks(path_points_warp) if not list(points_warp): return # draw image with landmarks image = draw_image_points(image_warp, points_warp) _path = update_path(item[cls.COL_REG_DIR], pre_path=path_experiment) save_image(os.path.join(_path, cls.NAME_IMAGE_MOVE_WARP_POINTS), image) del image # visualise the landmarks move during registration image_ref = load_image(path_img_ref) fig = draw_images_warped_landmarks(image_ref, image_warp, points_move, points_ref, points_warp) del image_ref, image_warp return fig
def filter_export_landmarks(idx_row, path_output, path_dataset, path_reference): """ filter all relevant landmarks which were used and copy them to experiment The case is that in certain challenge stage users had provided just a subset of all image landmarks which could be laos shuffled. The idea is to filter identify all user used (provided in dataset) landmarks and filter them from temporary reference dataset. :param tuple(idx,dict|Series) idx_row: experiment DataFrame :param str path_output: path to output folder :param str path_dataset: path to provided landmarks :param str path_reference: path to the complete landmark collection :return tuple(idx,float): record index and match ratio """ idx, row = idx_row ratio_matches, lnds_filter_ref, lnds_filter_move = \ filter_paired_landmarks(row, path_dataset, path_reference, ImRegBenchmark.COL_POINTS_MOVE, ImRegBenchmark.COL_POINTS_REF) # moving and reference landmarks for col, lnds_flt in [(ImRegBenchmark.COL_POINTS_REF, lnds_filter_ref), (ImRegBenchmark.COL_POINTS_MOVE, lnds_filter_move)]: path_out = update_path(row[col], pre_path=path_output) create_folder(os.path.dirname(path_out), ok_existing=True) if os.path.isfile(path_out): assert np.array_equal(load_landmarks(path_out), lnds_flt), \ 'overwrite different set of landmarks' save_landmarks(path_out, lnds_flt) return idx, ratio_matches
def visualise_registration(cls, idx_row, path_dataset=None, path_experiment=None): """ visualise the registration results according what landmarks were estimated - in registration or moving frame :param tuple(int,dict) df_row: row from iterated table :param str path_dataset: path to the dataset folder :param str path_experiment: path to the experiment folder """ _, row = idx_row row = dict(row) # convert even series to dictionary fig, path_fig = None, None # visualise particular experiment by idx if isinstance(row.get(cls.COL_POINTS_MOVE_WARP), str): fig = cls._visual_image_move_warp_lnds_move_warp( row, path_dataset, path_experiment) elif isinstance(row.get(cls.COL_POINTS_REF_WARP), str): fig = cls._visual_image_move_warp_lnds_ref_warp( row, path_dataset, path_experiment) else: logging.error('Visualisation: no output image or landmarks') if fig is not None: path_fig = os.path.join( update_path(row[cls.COL_REG_DIR], pre_path=path_experiment), cls.NAME_IMAGE_WARPED_VISUAL) export_figure(path_fig, fig) return path_fig
def generate_reg_pairs(rp_imgs, rp_lnds, pairs, public, path_images=DATASET_IMAGES): """ format a registration pair as dictionaries/rows in cover table for a set :param list(str) rp_imgs: relative paths to images :param rp_lnds: relative paths to related landmarks :param list(tuple(int,int)) pairs: pairing among images/landmarks :param list(bool) public: marks whether the particular pair is training or evaluation :param str path_images: path to the dataset folder :return list(dict): registration pairs """ reg_pairs = [] for k, (i, j) in enumerate(pairs): img_size, img_diag = image_sizes( update_path(rp_imgs[i], pre_path=path_images)) reg_pairs.append({ ImRegBenchmark.COL_IMAGE_REF: rp_imgs[i], ImRegBenchmark.COL_IMAGE_MOVE: rp_imgs[j], ImRegBenchmark.COL_POINTS_REF: rp_lnds[i], ImRegBenchmark.COL_POINTS_MOVE: rp_lnds[j], ImRegBenchmark.COL_STATUS: VAL_STATUS_TRAIN if public[k] else VAL_STATUS_TEST, ImRegBenchmark.COL_IMAGE_SIZE: img_size, ImRegBenchmark.COL_IMAGE_DIAGONAL: img_diag, }) return reg_pairs
def _visual_image_move_warp_lnds_ref_warp(cls, item, path_dataset=None, path_experiment=None): """ visualise the case with warped reference landmarks to the move frame :param dict item: row with the experiment :param str|None path_dataset: path to the dataset folder :param str|None path_experiment: path to the experiment folder :return obj|None: """ if not isinstance(item.get(cls.COL_POINTS_REF_WARP), str): raise ValueError('Missing registered points in "%s"' % cls.COL_POINTS_REF_WARP) path_points_warp = update_path(item[cls.COL_POINTS_REF_WARP], pre_path=path_experiment) if not os.path.isfile(path_points_warp): logging.warning('missing warped landmarks for: %r', dict(item)) return points_ref, points_move, path_img_ref = cls._load_landmarks( item, path_dataset) points_warp = load_landmarks(path_points_warp) if not list(points_warp): return # draw image with landmarks image_move = load_image( update_path(item[cls.COL_IMAGE_MOVE], pre_path=path_dataset)) image = draw_image_points(image_move, points_warp) _path = update_path(item[cls.COL_REG_DIR], pre_path=path_experiment) save_image(os.path.join(_path, cls.NAME_IMAGE_REF_POINTS_WARP), image) del image image_ref = load_image(path_img_ref) image_warp = cls._load_warped_image(item, path_experiment) image = overlap_two_images(image_ref, image_warp) _path = update_path(item[cls.COL_REG_DIR], pre_path=path_experiment) save_image(os.path.join(_path, cls.NAME_IMAGE_REF_WARP), image) del image, image_warp # visualise the landmarks move during registration fig = draw_images_warped_landmarks(image_ref, image_move, points_ref, points_move, points_warp) return fig
def update_path_(path, path_base=None): """ update the image path with possible base path :param str path: the last path of the path :param str|None path_base: optional base path :return str: update path """ path = os.path.join(path_base, str(path)) if path_base else path return update_path(path, absolute=True)
def replicate_missing_warped_landmarks(df_experiments, path_dataset, path_experiment): """ if some warped landmarks are missing replace the path by initial landmarks :param DF df_experiments: experiment table :param str path_dataset: path to dataset folder :param str path_experiment: path ti user experiment folder :return DF: experiment table """ # find empty warped landmarks paths missing_mask = df_experiments[ImRegBenchmark.COL_POINTS_MOVE_WARP].isnull() if ImRegBenchmark.COL_POINTS_REF_WARP in df_experiments.columns: # if there ar elaso target warped landmarks, allow to use them missing_mask &= df_experiments[ ImRegBenchmark.COL_POINTS_REF_WARP].isnull() # for the empty place the initial landmarks df_experiments.loc[missing_mask, ImRegBenchmark.COL_POINTS_MOVE_WARP] = \ df_experiments.loc[missing_mask, ImRegBenchmark.COL_POINTS_MOVE] # for the empty place maximal execution time df_experiments.loc[missing_mask, ImRegBenchmark.COL_TIME] = \ df_experiments[ImRegBenchmark.COL_TIME].max() count = 0 # iterate over whole table and check if the path is valid for idx, row in df_experiments.iterrows(): # select refence/moving warped landmarks use_move_warp = isinstance( row.get(ImRegBenchmark.COL_POINTS_MOVE_WARP, None), str) col_lnds_warp = ImRegBenchmark.COL_POINTS_MOVE_WARP \ if use_move_warp else ImRegBenchmark.COL_POINTS_REF_WARP # extract the CSV path path_csv = update_path(row[col_lnds_warp], pre_path=path_experiment) if not os.path.isfile(path_csv): # if the path is false, put there the initial from dataset path_csv = update_path(row[ImRegBenchmark.COL_POINTS_MOVE], pre_path=path_dataset) df_experiments.loc[idx, ImRegBenchmark.COL_POINTS_MOVE_WARP] = path_csv count += 1 logging.info('Missing warped landmarks: %i', count) return df_experiments
def _update_path(self, path, destination='data'): """ update te path to the dataset or output :param str path: original path :param str destination: type of update - data | output | general :return str: updated path """ if destination == 'data' and 'path_dataset' in self.params: path = os.path.join(self.params['path_dataset'], path) elif destination == 'expt' and 'path_exp' in self.params: path = os.path.join(self.params['path_exp'], path) path = update_path(path, absolute=True) return path
def _absolute_path(self, path, destination='data', base_path=None): """ update te path to the dataset or output :param str path: original path :param str destination: type of update `data` for data source and `expt` for output experimental folder :param str destination: type of update :return str: updated path """ if destination and destination == 'data' and 'path_dataset' in self.params: path = os.path.join(self.params['path_dataset'], path) elif destination and destination == 'expt' and 'path_exp' in self.params: path = os.path.join(self.params['path_exp'], path) path = update_path(path, absolute=True) return path
def assert_paths(args): """ check missing paths :param {} args: dictionary of arguments :return {}: dictionary of updated arguments >>> assert_paths({'path_': 'missing'}) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... AssertionError: missing: (path_) "..." >>> assert_paths({'abc': 123}) {'abc': 123} """ for k in (k for k in args if 'path' in k): args[k] = update_path(args[k]) assert os.path.exists(args[k]), 'missing: (%s) "%s"' % (k, args[k]) return args
def __check_exist_path(self): """Check existence of all paths in parameters. check existence of all parameters dictionary which has contains words: 'path', 'dir', 'file' """ if 'path_out' not in self.params: raise ValueError('missing "path_out" among parameters') self.params['path_out'] = update_path(self.params.get('path_out')) list_names = [n for n in self.params if any(m in n.lower() for m in ['path', 'dir', 'file'])] for n in list_names: p = os.path.abspath(os.path.expanduser(self.params[n])) if not os.path.exists(p): raise FileNotFoundError('given path/file/dir "%s" does not exist!' % p) self.params[n] = p for n in [n for n in self.params if 'exec' in n]: # in case you define executable in your home if os.path.expanduser(self.params[n]) != self.params[n]: self.params[n] = os.path.expanduser(self.params[n])
def _load_warped_image(cls, item, path_experiment=None): """load the wapted image if it exists :param dict item: row with the experiment :param str|None path_experiment: path to the experiment folder :return ndarray: """ name_img = item.get(cls.COL_IMAGE_MOVE_WARP, None) if not isinstance(name_img, str): logging.warning('Missing registered image in "%s"', cls.COL_IMAGE_MOVE_WARP) image_warp = None else: path_img_warp = update_path(name_img, pre_path=path_experiment) if os.path.isfile(path_img_warp): image_warp = load_image(path_img_warp) else: logging.warning('Define image is missing: %s', path_img_warp) image_warp = None return image_warp
def filter_landmarks(idx_row, path_output, path_dataset, path_reference): """ filter all relevant landmarks which were used and copy them to experiment :param tuple(idx,dict|Series) idx_row: experiment DataFrame :param str path_output: path to output folder :param str path_dataset: path to provided landmarks :param str path_reference: path to the complete landmark collection :return tuple(idx,float): record index and match ratio """ idx, row = idx_row path_ref = update_path(row[ImRegBenchmark.COL_POINTS_MOVE], pre_path=path_reference) path_load = update_path(row[ImRegBenchmark.COL_POINTS_MOVE], pre_path=path_dataset) pairs = common_landmarks(load_landmarks(path_ref), load_landmarks(path_load), threshold=1) if not pairs.size: return idx, 0. pairs = sorted(pairs.tolist(), key=lambda p: p[1]) ind_ref = np.asarray(pairs)[:, 0] nb_common = min([ len(load_landmarks(update_path(row[col], pre_path=path_reference))) for col in [ImRegBenchmark.COL_POINTS_REF, ImRegBenchmark.COL_POINTS_MOVE] ]) ind_ref = ind_ref[ind_ref < nb_common] # moving and reference landmarks for col in [ImRegBenchmark.COL_POINTS_REF, ImRegBenchmark.COL_POINTS_MOVE]: path_in = update_path(row[col], pre_path=path_reference) path_out = update_path(row[col], pre_path=path_output) create_folder(os.path.dirname(path_out), ok_existing=True) save_landmarks(path_out, load_landmarks(path_in)[ind_ref]) # save ratio of found landmarks len_lnds_ref = len( load_landmarks( update_path(row[ImRegBenchmark.COL_POINTS_REF], pre_path=path_reference))) ratio_matches = len(pairs) / float(len_lnds_ref) return idx, ratio_matches
def compute_registration_statistic(cls, idx_row, df_experiments, path_dataset=None, path_experiment=None): """ after successful registration load initial nad estimated landmarks afterwords compute various statistic for init, and final alignment :param tuple(int,dict) df_row: row from iterated table :param DF df_experiments: DataFrame with experiments :param str|None path_dataset: path to the dataset folder :param str|None path_experiment: path to the experiment folder """ idx, row = idx_row row = dict(row) # convert even series to dictionary points_ref, points_move, path_img_ref = cls._load_landmarks( row, path_dataset) img_diag = cls._image_diag(row, path_img_ref) df_experiments.loc[idx, cls.COL_IMAGE_DIAGONAL] = img_diag # compute landmarks statistic cls.compute_registration_accuracy(df_experiments, idx, points_ref, points_move, 'init', img_diag, wo_affine=False) # load transformed landmarks if (cls.COL_POINTS_MOVE_WARP not in row) and (cls.COL_POINTS_REF_WARP not in row): logging.error('Statistic: no output landmarks') return # define what is the target and init state according to the experiment results is_move_warp = isinstance(row.get(cls.COL_POINTS_MOVE_WARP, None), str) points_init = points_move if is_move_warp else points_ref points_target = points_ref if is_move_warp else points_move col_lnds_warp = cls.COL_POINTS_MOVE_WARP if is_move_warp else cls.COL_POINTS_REF_WARP # check if there are reference landmarks if points_target is None: logging.warning( 'Missing landmarks in "%s"', cls.COL_POINTS_REF if is_move_warp else cls.COL_POINTS_MOVE) return # load warped landmarks path_lnds_wapr = update_path(row[col_lnds_warp], pre_path=path_experiment) if path_lnds_wapr and os.path.isfile(path_lnds_wapr): points_warp = load_landmarks(path_lnds_wapr) points_warp = np.nan_to_num(points_warp) else: logging.warning('Invalid path to the landmarks: "%s" <- "%s"', path_lnds_wapr, row[col_lnds_warp]) return # compute Affine statistic affine_diff = compute_affine_transf_diff(points_init, points_target, points_warp) for name in affine_diff: df_experiments.loc[idx, name] = affine_diff[name] # compute landmarks statistic cls.compute_registration_accuracy(df_experiments, idx, points_target, points_warp, 'elastic', img_diag, wo_affine=True) # compute landmarks statistic cls.compute_registration_accuracy(df_experiments, idx, points_target, points_warp, 'target', img_diag, wo_affine=False) row_ = dict(df_experiments.loc[idx]) # compute the robustness if 'TRE Mean' in row_: df_experiments.loc[idx, cls.COL_ROBUSTNESS] = \ compute_tre_robustness(points_target, points_init, points_warp)
def compute_registration_statistic( cls, idx_row, df_experiments, path_dataset=None, path_experiment=None, path_reference=None, ): """ after successful registration load initial nad estimated landmarks afterwords compute various statistic for init, and final alignment :param tuple(int,dict) df_row: row from iterated table :param DF df_experiments: DataFrame with experiments :param str|None path_dataset: path to the provided dataset folder :param str|None path_reference: path to the complete landmark collection folder :param str|None path_experiment: path to the experiment folder """ idx, row = idx_row row = dict(row) # convert even series to dictionary # load common landmarks and image size points_ref, points_move, path_img_ref = cls._load_landmarks( row, path_dataset) img_diag = cls._image_diag(row, path_img_ref) df_experiments.loc[idx, cls.COL_IMAGE_DIAGONAL] = img_diag # compute landmarks statistic cls.compute_registration_accuracy(df_experiments, idx, points_ref, points_move, 'init', img_diag, wo_affine=False) # define what is the target and init state according to the experiment results use_move_warp = isinstance(row.get(cls.COL_POINTS_MOVE_WARP), str) if use_move_warp: points_init, points_target = points_move, points_ref col_source, col_target = cls.COL_POINTS_MOVE, cls.COL_POINTS_REF col_lnds_warp = cls.COL_POINTS_MOVE_WARP else: points_init, points_target = points_ref, points_move col_lnds_warp = cls.COL_POINTS_REF_WARP col_source, col_target = cls.COL_POINTS_REF, cls.COL_POINTS_MOVE # optional filtering if path_reference: ratio, points_target, _ = \ filter_paired_landmarks(row, path_dataset, path_reference, col_source, col_target) df_experiments.loc[idx, COL_PAIRED_LANDMARKS] = np.round(ratio, 2) # load transformed landmarks if (cls.COL_POINTS_MOVE_WARP not in row) and (cls.COL_POINTS_REF_WARP not in row): logging.error('Statistic: no output landmarks') return # check if there are reference landmarks if points_target is None: logging.warning( 'Missing landmarks in "%s"', cls.COL_POINTS_REF if use_move_warp else cls.COL_POINTS_MOVE) return # load warped landmarks path_lnds_warp = update_path(row[col_lnds_warp], pre_path=path_experiment) if path_lnds_warp and os.path.isfile(path_lnds_warp): points_warp = load_landmarks(path_lnds_warp) points_warp = np.nan_to_num(points_warp) else: logging.warning('Invalid path to the landmarks: "%s" <- "%s"', path_lnds_warp, row[col_lnds_warp]) return df_experiments.loc[idx, cls.COL_NB_LANDMARKS_INPUT] = min( len(points_init), len(points_target)) df_experiments.loc[idx, cls.COL_NB_LANDMARKS_WARP] = len(points_warp) # compute Affine statistic affine_diff = compute_affine_transf_diff(points_init, points_target, points_warp) for name in affine_diff: df_experiments.loc[idx, name] = affine_diff[name] # compute landmarks statistic cls.compute_registration_accuracy(df_experiments, idx, points_target, points_warp, 'elastic', img_diag, wo_affine=True) # compute landmarks statistic cls.compute_registration_accuracy(df_experiments, idx, points_target, points_warp, 'target', img_diag, wo_affine=False) row_ = dict(df_experiments.loc[idx]) # compute the robustness if 'TRE Mean' in row_: df_experiments.loc[idx, cls.COL_ROBUSTNESS] = \ compute_tre_robustness(points_target, points_init, points_warp)
def _load_landmarks(cls, item, path_dataset): path_img_ref, _, path_lnds_ref, path_lnds_move = \ [update_path(item[col], pre_path=path_dataset) for col in cls.COVER_COLUMNS] points_ref = load_landmarks(path_lnds_ref) points_move = load_landmarks(path_lnds_move) return points_ref, points_move, path_img_ref
import pandas as pd from numpy.testing import assert_raises, assert_array_almost_equal sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root from birl.utilities.data_io import update_path from birl.utilities.dataset import args_expand_parse_images from birl.utilities.experiments import (parse_arg_params, try_decorator) from birl.cls_benchmark import ImRegBenchmark from birl.cls_benchmark import ( NAME_CSV_RESULTS, NAME_TXT_RESULTS, NAME_CSV_REGISTRATION_PAIRS, COVER_COLUMNS, COL_IMAGE_MOVE_WARP, COL_POINTS_REF_WARP, COL_POINTS_MOVE_WARP, _visual_image_move_warp_lnds_move_warp, _visual_image_ref_warp_lnds_move_warp, visualise_registration) from birl.bm_template import BmTemplate PATH_DATA = update_path('data_images') PATH_CSV_COVER_MIX = os.path.join(PATH_DATA, 'pairs-imgs-lnds_mix.csv') PATH_CSV_COVER_ANHIR = os.path.join(PATH_DATA, 'pairs-imgs-lnds_histol.csv') # logging.basicConfig(level=logging.INFO) class TestBmRegistration(unittest.TestCase): @classmethod def setUpClass(cls): logging.basicConfig(level=logging.INFO) path_base = os.path.dirname(update_path('requirements.txt')) cls.path_out = os.path.join(path_base, 'output-test') shutil.rmtree(cls.path_out, ignore_errors=True) os.mkdir(cls.path_out) def _remove_default_experiment(self, bm_name):
class BmUnwarpJ(ImRegBenchmark): """ Benchmark for ImageJ plugin - bUnwarpJ no run test while this method requires manual installation of ImageJ For the app installation details, see module details. EXAMPLE ------- >>> from birl.utilities.data_io import create_folder, update_path >>> path_out = create_folder('temp_results') >>> fn_path_conf = lambda n: os.path.join(update_path('configs'), n) >>> path_csv = os.path.join(update_path('data_images'), 'pairs-imgs-lnds_mix.csv') >>> params = {'path_table': path_csv, ... 'path_out': path_out, ... 'exec_Fiji': 'ImageJ-linux64', ... 'preprocessing': ['hist-matching'], ... 'nb_workers': 2, ... 'unique': False, ... 'path_config': fn_path_conf('ImageJ_bUnwarpJ_histol.yaml')} >>> benchmark = BmUnwarpJ(params) >>> benchmark.run() # doctest: +SKIP >>> params['path_config'] = fn_path_conf('ImageJ_bUnwarpJ-SIFT_histol.yaml') >>> benchmark = BmUnwarpJ(params) >>> benchmark.run() # doctest: +SKIP >>> del benchmark >>> shutil.rmtree(path_out, ignore_errors=True) """ #: required experiment parameters REQUIRED_PARAMS = ImRegBenchmark.REQUIRED_PARAMS + [ 'exec_Fiji', 'path_config' ] #: path to IJ scripts PATH_IJ_SCRIPTS = os.path.join(update_path('scripts'), 'ImageJ') #: path/name of image registration script PATH_SCRIPT_REGISTRATION_BASE = os.path.join( PATH_IJ_SCRIPTS, 'apply-bUnwarpJ-registration.bsh') #: path/name of image registration script with features PATH_SCRIPT_REGISTRATION_SIFT = os.path.join( PATH_IJ_SCRIPTS, 'apply-SIFT-bUnwarpJ-registration.bsh') #: path/name of image/landmarks warping script PATH_SCRIPT_WARP_LANDMARKS = os.path.join(PATH_IJ_SCRIPTS, 'apply-bUnwarpJ-transform.bsh') # PATH_SCRIPT_HIST_MATCH_IJM = os.path.join(PATH_IJ_SCRIPTS, # 'histogram-matching-for-macro.bsh') #: command for executing the image registration COMMAND_REGISTRATION = \ '%(exec_Fiji)s --headless %(path_bsh)s' \ ' %(source)s %(target)s %(params)s' \ ' %(output)s/transform-direct.txt' \ ' %(output)s/transform-inverse.txt' #: internal name of converted landmarks for tranf. script NAME_LANDMARKS = 'source_landmarks.pts' #: name of warped moving landmarks by tranf. script NAME_LANDMARKS_WARPED = 'warped_source_landmarks.pts' #: resulting inverse transformation NAME_TRANSF_INVERSE = 'transform-inverse.txt' #: resulting direct transformation NAME_TRANSF_DIRECT = 'transform-direct.txt' #: command for executing the warping image and landmarks COMMAND_WARP_LANDMARKS = \ '%(exec_Fiji)s --headless %(path_bsh)s' \ ' %(source)s %(target)s' \ ' %(output)s/' + NAME_LANDMARKS + \ ' %(output)s/' + NAME_LANDMARKS_WARPED + \ ' %(transf-inv)s' \ ' %(transf-dir)s' \ ' %(warp)s' #: required parameters in the configuration file for bUnwarpJ REQUIRED_PARAMS_BUNWARPJ = ('mode', 'subsampleFactor', 'minScale', 'maxScale', 'divWeight', 'curlWeight', 'landmarkWeight', 'imageWeight', 'consistencyWeight', 'stopThreshold') #: required parameters in the configuration file for SIFT features REQUIRED_PARAMS_SIFT = ('initialSigma', 'steps', 'minOctaveSize', 'maxOctaveSize', 'fdSize', 'fdBins', 'rod', 'maxEpsilon', 'minInlierRatio', 'modelIndex') #: default bUnwarpJ and SIFT parameters DEFAULT_PARAMS = { 'bUnwarpJ': { 'mode': 1, #: (0-Accurate, 1-Fast, 2-Mono) 'subsampleFactor': 0, # (0 = 2^0, 7 = 2^7) 'minScale': 0, # (0-Very Coarse, 1-Coarse, 2-Fine, 3-Very Fine) 'maxScale': 3, # (0-Very Coarse, 1-Coarse, 2-Fine, 3-Very Fine, 4-Super Fine) # weight to penalize divergence 'divWeight': 0.1, #: weight to penalize curl 'curlWeight': 0.1, #: weight to penalize landmark location error 'landmarkWeight': 0., #: weight to penalize intensity difference 'imageWeight': 1., #: weight to penalize consistency difference 'consistencyWeight': 10., #: error function stopping threshold value 'stopThreshold': 0.01, }, 'SIFT': { # initial Gaussian blur sigma 'initialSigma': 1.6, #: steps per scale octave 'steps': 3, #: minimum image size in pixels 'minOctaveSize': 64, #: maximum image size in pixels 'maxOctaveSize': 1024, #: feature descriptor size 'fdSize': 8, #: feature descriptor orientation bins 'fdBins': 8, #: closest/next closest ratio 'rod': 0.92, #: maximal alignment error in pixels 'maxEpsilon': 25, #: inlier ratio 'minInlierRatio': 0.05, #: expected transformation of range 'modelIndex': 1, # (0:Translation, 1:Rigid, 2:Similarity, 3:Affine, 4:Perspective) } } # assert all(k in DEFAULT_PARAMS['bUnwarpJ'] for k in REQUIRED_PARAMS_BUNWARPJ), \ # 'default params are missing some required parameters for bUnwarpJ' # assert all(k in DEFAULT_PARAMS['SIFT'] for k in REQUIRED_PARAMS_SIFT), \ # 'default params are missing some required parameters for SIFT' def _prepare(self): """ prepare Benchmark - copy configurations """ logging.info('-> copy configuration...') self._copy_config_to_expt('path_config') def _generate_regist_command(self, item): """ generate the registration command(s) :param dict item: dictionary with registration params :return str|list(str): the execution commands """ path_im_ref, path_im_move, _, _ = self._get_paths(item, prefer_pproc=True) path_dir = self._get_path_reg_dir(item) config = self.DEFAULT_PARAMS config = dict_deep_update(config, load_config_yaml(self.params['path_config'])) assert config['bUnwarpJ']['mode'] < 2, 'Mono mode does not supports inverse transform' \ ' which is need for landmarks warping.' config_sift = [config['SIFT'][k] for k in self.REQUIRED_PARAMS_SIFT] \ if config.get('SIFT', False) else [] config_bunwarpj = [ config['bUnwarpJ'][k] for k in self.REQUIRED_PARAMS_BUNWARPJ ] path_reg_script = self.PATH_SCRIPT_REGISTRATION_SIFT if config_sift \ else self.PATH_SCRIPT_REGISTRATION_BASE cmd = self.COMMAND_REGISTRATION % { 'exec_Fiji': self.params['exec_Fiji'], 'path_bsh': path_reg_script, 'target': path_im_ref, 'source': path_im_move, 'output': path_dir, 'params': ' '.join(map(str, config_sift + config_bunwarpj)), } return cmd def _extract_warped_image_landmarks(self, item): """ get registration results - warped registered images and landmarks :param dict item: dictionary with registration params :return dict: paths to warped images/landmarks """ logging.debug('.. warp the registered image and get landmarks') path_dir = self._get_path_reg_dir(item) path_im_ref, path_im_move, _, path_lnds_move = self._get_paths( item, prefer_pproc=False) path_log = os.path.join(path_dir, self.NAME_LOG_REGISTRATION) # warp moving landmarks to reference frame path_img_warp = os.path.join(path_dir, os.path.basename(path_im_move)) dict_params = { 'exec_Fiji': self.params['exec_Fiji'], 'path_bsh': self.PATH_SCRIPT_WARP_LANDMARKS, 'source': path_im_move, 'target': path_im_ref, 'output': path_dir, 'transf-inv': os.path.join(path_dir, self.NAME_TRANSF_INVERSE), 'transf-dir': os.path.join(path_dir, self.NAME_TRANSF_DIRECT), 'warp': path_img_warp, } # export source points to TXT pts_source = load_landmarks(path_lnds_move) save_landmarks(os.path.join(path_dir, self.NAME_LANDMARKS), pts_source) # execute transformation exec_commands(self.COMMAND_WARP_LANDMARKS % dict_params, path_logger=path_log, timeout=self.EXECUTE_TIMEOUT) # load warped landmarks from TXT path_lnds_warp = os.path.join(path_dir, self.NAME_LANDMARKS_WARPED) if os.path.isfile(path_lnds_warp): points_warp = load_landmarks(path_lnds_warp) path_lnds_warp = os.path.join(path_dir, os.path.basename(path_lnds_move)) save_landmarks(path_lnds_warp, points_warp) else: path_lnds_warp = None # return results return { self.COL_IMAGE_MOVE_WARP: path_img_warp, self.COL_POINTS_MOVE_WARP: path_lnds_warp } @staticmethod def extend_parse(arg_parser): """ extent the basic arg parses by some extra required parameters :return object: """ # SEE: https://docs.python.org/3/library/argparse.html arg_parser.add_argument('-Fiji', '--exec_Fiji', type=str, required=True, help='path to the Fiji executable') arg_parser.add_argument('-cfg', '--path_config', required=True, type=str, help='path to the bUnwarpJ configuration') return arg_parser
import os import sys import time import logging import json import shutil sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root from birl.utilities.data_io import update_path, load_landmarks, save_landmarks from birl.utilities.experiments import create_basic_parse, parse_arg_params, exec_commands from birl.cls_benchmark import ImRegBenchmark, NAME_LOG_REGISTRATION, COL_TIME from birl.bm_template import main from bm_experiments import bm_comp_perform PATH_IJ_SCRIPTS = os.path.join(update_path('scripts'), 'ImageJ') PATH_SCRIPT_REGISTRATION = os.path.join(PATH_IJ_SCRIPTS, 'apply-bUnwarpJ-registration.bsh') PATH_SCRIPT_WARP_LANDMARKS = os.path.join(PATH_IJ_SCRIPTS, 'apply-bUnwarpJ-transform.bsh') PATH_SCRIPT_HIST_MATCHING = os.path.join(PATH_IJ_SCRIPTS, 'histogram-matching.bsh') # PATH_SCRIPT_HIST_MATCH_IJM = os.path.join(PATH_IJ_SCRIPTS, 'histogram-matching-for-macro.bsh') NAME_LANDMARKS = 'source_landmarks.txt' NAME_LANDMARKS_WARPED = 'warped_source_landmarks.txt' COMMAND_WARP_LANDMARKS = '%(path_fiji)s --headless %(path_bsh)s' \ ' %(source)s %(target)s' \ ' %(output)s/' + NAME_LANDMARKS + \ ' %(output)s/' + NAME_LANDMARKS_WARPED + \ ' %(output)s/transform-inverse.txt' \ ' %(output)s/transform-direct.txt' \
Check whether it generates correct outputs and resulting values Copyright (C) 2017-2019 Jiri Borovec <*****@*****.**> """ import os import sys import unittest from parameterized import parameterized sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root from birl.utilities.data_io import update_path, load_image from birl.utilities.dataset import image_histogram_matching, CONVERT_RGB PATH_ROOT = os.path.dirname(update_path('birl')) PATH_DATA = update_path('data_images') PATH_IMAGE_REF = os.path.join(PATH_DATA, 'rat-kidney_', 'scale-5pc', 'Rat-Kidney_HE.jpg') PATH_IMAGE_SRC = os.path.join(PATH_DATA, 'rat-kidney_', 'scale-5pc', 'Rat-Kidney_PanCytokeratin.jpg') class TestHistogramMatching(unittest.TestCase): @classmethod def setUpClass(cls): cls.img_ref = load_image(PATH_IMAGE_REF) cls.img_src = load_image(PATH_IMAGE_SRC) @parameterized.expand(list(CONVERT_RGB.keys())) def test_hist_matching(self, clr_space):
def setUpClass(cls): logging.basicConfig(level=logging.INFO) path_base = os.path.dirname(update_path('requirements.txt')) cls.path_out = os.path.join(path_base, 'output-test') shutil.rmtree(cls.path_out, ignore_errors=True) os.mkdir(cls.path_out)