def read(filename): if os.path.exists: with open(filename,'rb') as p: try: newpatient = pickle.load(p) except: raise mincError(' -- Problem reading the pickle %s !' % filename) return newpatient else: raise mincError(' -- Pickle %s does not exists!' % filename)
def build_estimate(description_json, parameters, output_prefix, int_par_count=None): desc=None with open(description_json, 'r') as f: desc=json.load(f) intensity_parameters=parameters velocity_parameters=parameters if int_par_count is not None: intensity_parameters=parameters[:int_par_count] velocity_parameters=parameters[int_par_count:] if len(velocity_parameters)!=len(desc["velocity_model"]["volume"]) or \ len(intensity_parameters)!=len(desc["intensity_model"]["volume"]): print(desc["intensity_model"]["volume"]) print("intensity_parameters={}".format(repr(intensity_parameters))) print(desc["velocity_model"]["volume"]) print("velocity_parameters={}".format(repr(velocity_parameters))) raise mincError("{} inconsisten number of paramters, expected {}". format(repr(intensity_parameters), len(desc["velocity_model"]["volume"]))) velocity=MriDatasetRegress(from_dict=desc["velocity_model"]) intensity=MriDatasetRegress(from_dict=desc["intensity_model"]) output_scan=MriDataset(prefix=os.path.dirname(output_prefix),name=os.path.basename(output_prefix)) output_transform=LDDMriTransform(prefix=os.path.dirname(output_prefix),name=os.path.basename(output_prefix)) build_approximation(intensity, velocity, intensity_parameters, velocity_parameters, output_scan, output_transform)
def apply_linear_model(lin_model, parameters, output_volume): """build a volume, for a given regression model and parameters""" try: with mincTools() as m: if lin_model.N != len(parameters): raise mincError("Expected: {} parameters, got {}".format( lin_model.N, len(parameters))) # create minccalc expression _exp = [] for i in range(0, lin_model.N): _exp.append('A[{}]*{}'.format(i, parameters[i])) exp = '+'.join(_exp) m.calc(lin_model.volume, exp, output_volume) return True except mincError as e: print("Exception in apply_linear_model:{}".format(str(e))) traceback.print_exc(file=sys.stdout) raise except: print("Exception in apply_linear_model:{}".format(sys.exc_info()[0])) traceback.print_exc(file=sys.stdout) raise
def normalize_intensity(in_scan, out_scan, parameters={}, model=None): """ Perform global intensity scale normalization """ # TODO: make output exp file with mincTools() as minc: if not minc.checkfiles(inputs=[in_scan.scan], outputs=[out_scan.scan]): return if parameters is not None and not parameters.get('disable', False): order = parameters.get('order', 1) _model = None # if model is None: _model = parameters.get('model', None) else: _model = model.scan if _model is None: raise mincError('Need model ') scan_mask = None model_mask = None if in_scan.mask is not None and model is not None: scan_mask = in_scan.mask model_mask = model.mask elif parameters.get('nuyl', False): minc.nuyl_normalize(in_scan.scan, _model, out_scan.scan, source_mask=scan_mask, target_mask=model_mask) elif parameters.get('nuyl2', False): hl.nuyl_normalize2( in_scan.scan, _model, out_scan.scan, #source_mask=input_mask,target_mask=model_mask, fwhm=parameters.get('nuyl2_fwhm', 2.0), iterations=parameters.get('nuyl2_iter', 4), ) else: minc.volume_pol(in_scan.scan, _model, out_scan.scan, order=order, source_mask=scan_mask, target_mask=model_mask) else: # HACK just by-pass processing if parameters are empty or disabled shutil.copyfile(in_scan.scan, out_scan.scan)
def main(): options = parse_options() try: if os.path.exists(options.output) and not options.clobber: raise mincError('File exists : {}'.format(options.output)) parameters = None if options.param is not None: with open(options.param, 'r') as f: parameters = json.load(f) if options.method == 'ants': ipl.ants_registration.non_linear_register_ants2( options.source, options.target, options.output, source_mask=options.source_mask, target_mask=options.target_mask, init_xfm=options.init_xfm, parameters=parameters, downsample=options.downsample, level=options.level, start=options.start, ) elif options.method == 'elastix': if parameters is None: levels = '' for i in range(int(math.log(options.start) / math.log(2)), -1, -1): res = 2**i if res >= options.level: levels += '{} {} {} '.format(res, res, res) parameters = { 'pyramid': levels, 'grid_spacing': options.level * 3.0 } ipl.elastix_registration.register_elastix( options.source, options.target, output_xfm=options.output, source_mask=options.source_mask, target_mask=options.target_mask, init_xfm=options.init_xfm, parameters=parameters, downsample=options.downsample, downsample_grid=options.level, nl=True) else: objective = '-' + options.objective if parameters is not None: objective = parameters.get('objective') ipl.registration.non_linear_register_full( options.source, options.target, options.output, source_mask=options.source_mask, target_mask=options.target_mask, init_xfm=options.init_xfm, parameters=parameters, downsample=options.downsample, level=options.level, start=options.start) if options.qc is not None: # create QC image for deformation field create_qc(options.source, options.target, options.output, options.qc) except mincError as e: print(str(e), file=sys.stderr) traceback.print_exc(file=sys.stderr) except: print("Exception :{}".format(sys.exc_info()[0])) traceback.print_exc(file=sys.stderr)
def regress(samples, initial_model=None, initial_int_model=None, initial_def_model=None, output_int_model=None, output_def_model=None, output_residuals_int=None, output_residuals_def=None, prefix='.', options={}): """ perform iterative model creation""" try: # make sure all input scans have parameters N_int = None N_def = None int_design_matrix = [] def_design_matrix = [] nomask = False for s in samples: if N_int is None: N_int = len(s.par_int) elif N_int != len(s.par_int): raise mincError( "Sample {} have inconsisten number of int paramters: {} expected {}" .format(repr(s), len(s), N_int)) if N_def is None: N_def = len(s.par_def) elif N_def != len(s.par_def): raise mincError( "Sample {} have inconsisten number of int paramters: {} expected {}" .format(repr(s), len(s), N_def)) int_design_matrix.append(s.par_int) def_design_matrix.append(s.par_def) if s.mask is None: nomask = True #print("Intensity design matrix=\n{}".format(repr(int_design_matrix))) #print("Velocity design matrix=\n{}".format(repr(def_design_matrix))) ref_model = None # current estimate of template if initial_model is not None: current_int_model = initial_model current_def_model = None ref_model = initial_model.scan else: current_int_model = initial_int_model current_def_model = initial_def_model ref_model = initial_int_model.volume[0] transforms = [] full_transforms = [] protocol = options.get('protocol', [{ 'iter': 4, 'level': 32, 'blur_int': None, 'blur_def': None }, { 'iter': 4, 'level': 16, 'blur_int': None, 'blur_def': None }]) cleanup = options.get('cleanup', False) cleanup_intermediate = options.get('cleanup_intermediate', False) parameters = options.get('parameters', None) refine = options.get('refine', False) qc = options.get('qc', False) downsample = options.get('downsample', None) start_level = options.get('start_level', None) debug = options.get('debug', False) debias = options.get('debias', True) nl_mode = options.get('nl_mode', 'animal') if parameters is None: pass #TODO: make sensible parameters? int_models = [] def_models = [] int_residuals = [] def_residuals = [] int_residual = None def_residual = None prev_def_estimate = None # go through all the iterations it = 0 residuals = [] for (i, p) in enumerate(protocol): blur_int_model = p.get('blur_int', None) blur_def_model = p.get('blur_def', None) for j in range(1, p['iter'] + 1): it += 1 _start_level = None if it == 1: _start_level = start_level # this will be a model for next iteration actually it_prefix = prefix + os.sep + str(it) if not os.path.exists(it_prefix): os.makedirs(it_prefix) next_int_model = MriDatasetRegress(prefix=prefix, name='model_int', iter=it, N=N_int, nomask=nomask) next_def_model = MriDatasetRegress(prefix=prefix, name='model_def', iter=it, N=N_def, nomask=True) print("next_int_model={}".format( next_int_model.volume[0].rsplit('_0.mnc', 1)[0] + '_RMS.mnc')) int_residual = MriDataset( prefix=prefix, scan=next_int_model.volume[0].rsplit('_0.mnc', 1)[0] + '_RMS.mnc') #name=next_int_model.name, iter=it ) def_residual = MriDataset( prefix=prefix, scan=next_def_model.volume[0].rsplit('_0.mnc', 1)[0] + '_RMS.mnc') #name=next_def_model.name, iter=it ) # skip over existing models here! if not next_int_model.exists() or \ not next_def_model.exists() or \ not int_residual.exists() or \ not def_residual.exists(): int_estimate = [] def_estimate = [] r = [] # 1 for each sample generate current approximation # 2. perform non-linear registration between each sample and sample-specific approximation # 3. update transformation # 1+2+3 - all together for (i, s) in enumerate(samples): sample_def = MriTransform(name=s.name, prefix=it_prefix, iter=it) sample_int = MriDataset(name=s.name, prefix=it_prefix, iter=it) previous_def = None if refine and it > 1: previous_def = prev_def_estimate[i] r.append( futures.submit( non_linear_register_step_regress_std, s, current_int_model, current_def_model, None, sample_def, parameters=parameters, level=p['level'], start_level=_start_level, work_dir=prefix, downsample=downsample, debug=debug, previous_def=previous_def, nl_mode=nl_mode)) def_estimate.append(sample_def) #int_estimate.append(sample_int) # wait for jobs to finish futures.wait(r, return_when=futures.ALL_COMPLETED) avg_inv_transform = None if debias: # here all the transforms should exist avg_inv_transform = MriTransform(name='avg_inv', prefix=it_prefix, iter=it) # 2 average all transformations average_transforms(def_estimate, avg_inv_transform, symmetric=False, invert=True, nl=True) corr = [] corr_transforms = [] corr_samples = [] # 3 concatenate correction and resample for (i, s) in enumerate(samples): c = MriDataset(prefix=it_prefix, iter=it, name=s.name) x = MriTransform(name=s.name + '_corr', prefix=it_prefix, iter=it) corr.append( futures.submit(concat_resample_nl, s, def_estimate[i], avg_inv_transform, c, x, current_int_model, p['level'], symmetric=False, qc=qc, invert_transform=True)) corr_transforms.append(x) corr_samples.append(c) futures.wait(corr, return_when=futures.ALL_COMPLETED) # 4. perform regression and create new estimate # 5. calculate residulas (?) # 4+5 result = futures.submit(voxel_regression, int_design_matrix, def_design_matrix, corr_samples, corr_transforms, next_int_model, next_def_model, int_residual, def_residual, blur_int_model=blur_int_model, blur_def_model=blur_def_model, qc=qc) futures.wait([result], return_when=futures.ALL_COMPLETED) # 6. cleanup if cleanup: print("Cleaning up iteration: {}".format(it)) for i in def_estimate: i.cleanup() for i in corr_samples: i.cleanup() if prev_def_estimate is not None: for i in prev_def_estimate: i.cleanup() avg_inv_transform.cleanup() else: # files were there, reuse them print( "Iteration {} already performed, skipping".format(it)) corr_transforms = [] # this is a hack right now for (i, s) in enumerate(samples): x = MriTransform(name=s.name + '_corr', prefix=it_prefix, iter=it) corr_transforms.append(x) int_models.append(current_int_model) def_models.append(current_def_model) int_residuals.append(int_residual) def_residuals.append(def_residual) current_int_model = next_int_model current_def_model = next_def_model result = futures.submit(average_stats_regression, current_int_model, current_def_model, int_residual, def_residual) residuals.append(result) regression_results = { 'int_model': current_int_model, 'def_model': current_def_model, 'int_residuals': int_residual.scan, 'def_residuals': def_residual.scan, } with open(prefix + os.sep + 'results_{:03d}.json'.format(it), 'w') as f: json.dump(regression_results, f, indent=1, cls=MRIEncoder) # save for next iteration # TODO: regularize? prev_def_estimate = corr_transforms # have to use adjusted def estimate # copy output to the destination futures.wait(residuals, return_when=futures.ALL_COMPLETED) with open(prefix + os.sep + 'stats.txt', 'w') as f: for s in residuals: f.write("{}\n".format(s.result())) with open(prefix + os.sep + 'results_final.json', 'w') as f: json.dump(regression_results, f, indent=1, cls=MRIEncoder) if cleanup_intermediate: for i in range(len(int_models) - 1): int_models[i].cleanup() def_models[i].cleanup() int_residuals[i].cleanup() def_residuals[i].cleanup() # delete unneeded models #shutil.rmtree(prefix+os.sep+'reg') return regression_results except mincError as e: print "Exception in regress:{}".format(str(e)) traceback.print_exc(file=sys.stdout) raise except: print "Exception in regress:{}".format(sys.exc_info()[0]) traceback.print_exc(file=sys.stdout) raise
like=target, transform=xfm) qc(m.tmp('source.mnc'), qc_file, mask=target, image_cmap='red', mask_cmap='green', use_max=True) # ,ialpha=0.5,oalpha=0.5 if __name__ == '__main__': options = parse_options() try: if os.path.exists(options.output) and not options.clobber: raise mincError('File exists : {}'.format(options.output)) parameters = None if options.param is not None: with open(options.param, 'r') as f: parameters = json.load(f) if options.method == 'ants': ipl.ants_registration.non_linear_register_ants2( options.source, options.target, options.output, source_mask=options.source_mask, target_mask=options.target_mask, init_xfm=options.init_xfm,
def non_linear_register_full(source, target, output_xfm, source_mask=None, target_mask=None, init_xfm=None, level=4, start=32, parameters=None, work_dir=None, downsample=None): """perform non-linear registration, multiple levels Args: source - name of source minc file target - name of target minc file output_xfm - name of output transformation file source_mask - name of source mask file (optional) target_mask - name of target mask file (optional) init_xfm - name of initial transformation file (optional) parameters - configuration for iterative algorithm dict (optional) work_dir - working directory (optional) , default create one in temp start - initial step size, default 32mm level - final step size, default 4mm downsample - downsample initial files to this step size, default None Returns: resulting XFM file Raises: mincError when tool fails """ with ipl.minc_tools.mincTools() as minc: if not minc.checkfiles(inputs=[source, target], outputs=[output_xfm]): return if parameters is None: parameters = { 'cost': 'corrcoeff', 'weight': 1, 'stiffness': 1, 'similarity': 0.3, 'sub_lattice': 6, 'conf': [ { 'step': 32.0, 'blur_fwhm': 16.0, 'iterations': 20, 'blur': 'blur', }, { 'step': 16.0, 'blur_fwhm': 8.0, 'iterations': 20, 'blur': 'blur', }, { 'step': 12.0, 'blur_fwhm': 6.0, 'iterations': 20, 'blur': 'blur', }, { 'step': 8.0, 'blur_fwhm': 4.0, 'iterations': 20, 'blur': 'blur', }, { 'step': 6.0, 'blur_fwhm': 3.0, 'iterations': 20, 'blur': 'blur', }, { 'step': 4.0, 'blur_fwhm': 2.0, 'iterations': 10, 'blur': 'blur', }, { 'step': 2.0, 'blur_fwhm': 1.0, 'iterations': 10, 'blur': 'blur', }, { 'step': 1.0, 'blur_fwhm': 1.0, 'iterations': 10, 'blur': 'blur', }, { 'step': 1.0, 'blur_fwhm': 0.5, 'iterations': 10, 'blur': 'blur', }, { 'step': 0.5, 'blur_fwhm': 0.25, 'iterations': 10, 'blur': 'blur', }, ] } prev_xfm = None prev_grid = None sources = [] targets = [] if isinstance(source, list): sources.extend(source) else: sources.append(source) if isinstance(target, list): targets.extend(target) else: targets.append(target) if len(sources) != len(targets): raise minc_tools.mincError( ' ** Error: Different number of inputs ') s_base = os.path.basename(sources[0]).rsplit('.gz', 1)[0].rsplit('.mnc', 1)[0] t_base = os.path.basename(targets[0]).rsplit('.gz', 1)[0].rsplit('.mnc', 1)[0] # figure out what to do here: with ipl.minc_tools.cache_files(work_dir=work_dir, context='reg') as tmp: # a fitting we shall go... (sources_lr, targets_lr, source_mask_lr, target_mask_lr) = minc.downsample_registration_files( sources, targets, source_mask, target_mask, downsample) for (i, c) in enumerate(parameters['conf']): if c['step'] > start: continue elif c['step'] < level: break # set up intermediate files tmp_ = tmp.tmp(s_base + '_' + t_base + '_' + str(i)) tmp_xfm = tmp_ + '.xfm' tmp_grid = tmp_ + '_grid_0.mnc' tmp_sources = sources_lr tmp_targets = targets_lr if c['blur_fwhm'] > 0: tmp_sources = [] tmp_targets = [] for s_, _ in enumerate(sources_lr): tmp_source = tmp.cache(s_base + '_' + c['blur'] + '_' + str(c['blur_fwhm']) + '_' + str(s_) + '.mnc') if not os.path.exists(tmp_source): minc.blur(sources_lr[s_], tmp_source, gmag=(c['blur'] == 'dxyz'), fwhm=c['blur_fwhm']) tmp_target = tmp.cache(t_base + '_' + c['blur'] + '_' + str(c['blur_fwhm']) + '_' + str(s_) + '.mnc') if not os.path.exists(tmp_target): minc.blur(targets_lr[s_], tmp_target, gmag=(c['blur'] == 'dxyz'), fwhm=c['blur_fwhm']) tmp_sources.append(tmp_source) tmp_targets.append(tmp_target) # set up registration args = [ 'minctracc', tmp_sources[0], tmp_targets[0], '-clobber', '-nonlinear', parameters['cost'], '-weight', parameters['weight'], '-stiffness', parameters['stiffness'], '-similarity', parameters['similarity'], '-sub_lattice', parameters['sub_lattice'], ] args.extend(['-iterations', c['iterations']]) args.extend([ '-lattice_diam', c['step'] * 3.0, c['step'] * 3.0, c['step'] * 3.0 ]) args.extend(['-step', c['step'], c['step'], c['step']]) if c['step'] < 4: #TODO: check if it's 4*minc_step ? args.append('-no_super') for s_ in range(len(tmp_targets) - 1): args.extend([ '-feature_vol', tmp_sources[s_ + 1], tmp_targets[s_ + 1], parameters['cost'], 1.0 ]) # Current transformation at this step if prev_xfm is not None: args.extend(['-transformation', prev_xfm]) elif init_xfm is not None: args.extend(['-transformation', init_xfm]) else: args.append('-identity') # masks (even if the blurred image is masked, it's still preferable # to use the mask in minctracc) if source_mask is not None: args.extend(['-source_mask', source_mask_lr]) if target_mask is not None: args.extend(['-model_mask', target_mask_lr]) # add files and run registration args.append(tmp_xfm) minc.command([str(ii) for ii in args], inputs=[tmp_source, tmp_target], outputs=[tmp_xfm]) prev_xfm = tmp_xfm prev_grid = tmp_grid # done if prev_xfm is None: raise ipl.minc_tools.mincError("No iterations were performed!") # STOP-gap measure to save space for now # TODO: fix minctracc? # TODO: fix mincreshape too! minc.calc([prev_grid], 'A[0]', tmp.tmp('final_grid_0.mnc'), datatype='-float') shutil.move(tmp.tmp('final_grid_0.mnc'), prev_grid) minc.param2xfm(tmp.tmp('identity.xfm')) minc.xfmconcat([tmp.tmp('identity.xfm'), prev_xfm], output_xfm) return output_xfm
def linear_register(source, target, output_xfm, parameters=None, source_mask=None, target_mask=None, init_xfm=None, objective=None, conf=None, debug=False, close=False, norot=False, noshear=False, noshift=False, noscale=False, work_dir=None, start=None, downsample=None, verbose=0): """Perform linear registration, replacement for bestlinreg.pl script Args: source - name of source minc file target - name of target minc file output_xfm - name of output transformation file parameters - registration parameters (optional), can be '-lsq6', '-lsq9', '-lsq12' source_mask - name of source mask file (optional) target_mask - name of target mask file (optional) init_xfm - name of initial transformation file (optional) objective - name of objective function (optional), could be '-xcorr' (default), '-nmi','-mi' conf - configuration for iterative algorithm (optional) array of dict, or a string describing a flawor bestlinreg (default) bestlinreg_s bestlinreg_s2 bestlinreg_new bestlinreg_20180117 - based on Claude's latest and greatest, circa 2018-01-17 debug - debug flag (optional) , default False close - closeness flag (optional) , default False norot - disable rotation flag (optional) , default False noshear - disable shear flag (optional) , default False noshift - disable shift flag (optional) , default False noscale - disable scale flag (optional) , default False work_dir - working directory (optional) , default create one in temp start - initial blurring level, default 16mm from configuration downsample - downsample initial files to this step size, default None verbose - verbosity level Returns: resulting XFM file Raises: mincError when tool fails """ logger.debug("linear_register s:{} s_m:{} t:{} t_m:{} i:{} ".format( source, source_mask, target, target_mask, init_xfm)) with ipl.minc_tools.mincTools(verbose=verbose) as minc: if not minc.checkfiles(inputs=[source, target], outputs=[output_xfm]): return sources = [] targets = [] if isinstance(source, list): sources.extend(source) else: sources.append(source) if isinstance(target, list): targets.extend(target) else: targets.append(target) if len(sources) != len(targets): raise minc_tools.mincError( ' ** Error: Different number of inputs ') # python version if conf is None: conf = linear_registration_config[ 'bestlinreg_20180117'] # bestlinreg_new ? elif not isinstance(conf, list): # assume that it is a string if conf in linear_registration_config: conf = linear_registration_config[conf] if parameters is None: parameters = '-lsq9' if objective is None: objective = '-xcorr' if not isinstance(conf, list): # assume that it is a string # assume it's external program's name # else run internally # TODO: check if we are given multiple sources/targets? # with ipl.minc_tools.mincTools() as m: cmd = [conf, source, target, output_xfm] if source_mask is not None: cmd.extend(['-source_mask', source_mask]) if target_mask is not None: cmd.extend(['-target_mask', target_mask]) if parameters is not None: cmd.append(parameters) if objective is not None: cmd.append(objective) if init_xfm is not None: cmd.extend(['-init_xfm', init_xfm]) m.command(cmd, inputs=[source, target], outputs=[output_xfm], verbose=2) return output_xfm else: prev_xfm = None s_base = os.path.basename(sources[0]).rsplit('.gz', 1)[0].rsplit( '.mnc', 1)[0] t_base = os.path.basename(targets[0]).rsplit('.gz', 1)[0].rsplit( '.mnc', 1)[0] # figure out what to do here: with ipl.minc_tools.cache_files(work_dir=work_dir, context='reg') as tmp: (sources_lr, targets_lr, source_mask_lr, target_mask_lr) = minc.downsample_registration_files( sources, targets, source_mask, target_mask, downsample) # a fitting we shall go... for (i, c) in enumerate(conf): _parameters = parameters if 'parameters' in c and parameters != '-lsq6': # emulate Claude's approach _parameters = c.get('parameters') #'-lsq7' _reverse = c.get('reverse', False) # swap target and source # set up intermediate files if start is not None and start > c['blur_fwhm']: continue elif close and c['blur_fwhm'] > 8: continue tmp_xfm = tmp.tmp(s_base + '_' + t_base + '_' + str(i) + '.xfm') tmp_sources = sources_lr tmp_targets = targets_lr if c['blur_fwhm'] > 0: tmp_sources = [] tmp_targets = [] for s_, _ in enumerate(sources_lr): tmp_source = tmp.cache(s_base + '_' + c['blur'] + '_' + str(c['blur_fwhm']) + '_' + str(s_) + '.mnc') if not os.path.exists(tmp_source): minc.blur(sources_lr[s_], tmp_source, gmag=(c['blur'] == 'dxyz'), fwhm=c['blur_fwhm']) tmp_target = tmp.cache(t_base + '_' + c['blur'] + '_' + str(c['blur_fwhm']) + '_' + str(s_) + '.mnc') if not os.path.exists(tmp_target): minc.blur(targets_lr[s_], tmp_target, gmag=(c['blur'] == 'dxyz'), fwhm=c['blur_fwhm']) tmp_sources.append(tmp_source) tmp_targets.append(tmp_target) objective_ = objective if isinstance(objective, list): objective_ = objective[0] if _reverse: args = [ 'minctracc', tmp_targets[0], tmp_sources[0], '-clobber', _parameters, objective_, '-simplex', c['simplex'], '-tol', c['tolerance'] ] # additional modalities for s_ in range(len(tmp_targets) - 1): if isinstance(objective, list): objective_ = objective[s_ + 1] args.extend([ '-feature_vol', tmp_targets[s_ + 1], tmp_sources[s_ + 1], objective_.lstrip('-'), 1.0 ]) else: # set up registration args = [ 'minctracc', tmp_sources[0], tmp_targets[0], '-clobber', _parameters, objective_, '-simplex', c['simplex'], '-tol', c['tolerance'] ] for s_ in range(len(tmp_targets) - 1): if isinstance(objective, list): objective_ = objective[s_ + 1] args.extend([ '-feature_vol', tmp_sources[s_ + 1], tmp_targets[s_ + 1], objective_.lstrip('-'), 1.0 ]) args.append('-step') args.extend(c['steps']) # Current transformation at this step if prev_xfm is not None: if _reverse: inv_prev_xfm = tmp.tmp(s_base + '_' + t_base + '_' + str(i) + '_init.xfm') minc.xfminvert(prev_xfm, inv_prev_xfm) args.extend(['-transformation', inv_prev_xfm]) else: args.extend(['-transformation', prev_xfm]) elif init_xfm is not None: # _reverse should not be first? args.extend( ['-transformation', init_xfm, '-est_center']) elif close: args.append('-identity') else: # _reverse should not be first? # Initial transformation will be computed from the from Principal axis # transformation (PAT). if c['trans'] is not None and c['trans'][ 0] != '-est_translations': args.extend(c['trans']) else: # will use manual transformation based on shif of CoM, should be identical to '-est_translations' , but it's not com_src = minc.stats(source, ['-com', '-world_only'], single_value=False) com_trg = minc.stats(target, ['-com', '-world_only'], single_value=False) diff = [com_trg[k] - com_src[k] for k in range(3)] xfm = tmp.cache(s_base + '_init.xfm') minc.param2xfm(xfm, translation=diff) args.extend(['-transformation', xfm]) # masks (even if the blurred image is masked, it's still preferable # to use the mask in minctracc) if _reverse: if source_mask is not None: args.extend(['-model_mask', source_mask_lr]) #disable one mask in this mode #if target_mask is not None: #args.extend(['-source_mask', target_mask_lr]) else: if source_mask is not None: args.extend(['-source_mask', source_mask_lr]) if target_mask is not None: args.extend(['-model_mask', target_mask_lr]) if noshear: args.extend(['-w_shear', 0, 0, 0]) if noscale: args.extend(['-w_scales', 0, 0, 0]) if noshift: args.extend(['-w_translations', 0, 0, 0]) if norot: args.extend(['-w_rotations', 0, 0, 0]) # add files and run registration args.append(tmp_xfm) minc.command([str(ii) for ii in args], inputs=[tmp_source, tmp_target], outputs=[tmp_xfm]) if _reverse: inv_tmp_xfm = tmp.tmp(s_base + '_' + t_base + '_' + str(i) + '_sol.xfm') minc.xfminvert(tmp_xfm, inv_tmp_xfm) prev_xfm = inv_tmp_xfm else: prev_xfm = tmp_xfm shutil.copyfile(prev_xfm, output_xfm) return output_xfm