def group_options(options, timepoint): options = copy.deepcopy(options) if options.mbm.lsq6.target_type == TargetType.pride_of_models: options = copy.deepcopy(options) targets = get_closest_model_from_pride_of_models(pride_of_models_dict=get_pride_of_models_mapping( pride_csv=options.mbm.lsq6.target_file, output_dir=options.application.output_directory, pipeline_name=options.application.pipeline_name), time_point=timepoint) options.mbm.lsq6 = options.mbm.lsq6.replace(target_type=TargetType.initial_model, target_file=targets.registration_standard.path) # resolution = (options.registration.resolution # or get_resolution_from_file(targets.registration_standard.path)) # options.registration = options.registration.replace(resolution=resolution) # FIXME use of registration_standard here is quite wrong ... # part of the trouble is that mbm calls registration_targets itself, # so we can't send this RegistrationTargets to `mbm` directly ... # one option: add yet another optional arg to `mbm` ... else: targets = s.defer(registration_targets(lsq6_conf=options.mbm.lsq6, app_conf=options.application, reg_conf=options.registration, first_input_file=imgs.filename.iloc[0])) resolution = (options.registration.resolution or get_resolution_from_file(targets.registration_standard.path)) # This must happen after calling registration_targets otherwise it will resample to options.registration.resolution options.registration = options.registration.replace(resolution=resolution) return options
def group_options(options, timepoint): options = copy.deepcopy(options) if options.mbm.lsq6.target_type == TargetType.pride_of_models: options = copy.deepcopy(options) targets = get_closest_model_from_pride_of_models(pride_of_models_dict=get_pride_of_models_mapping( pride_csv=options.mbm.lsq6.target_file, output_dir=options.application.output_directory, pipeline_name=options.application.pipeline_name), time_point=timepoint) options.mbm.lsq6 = options.mbm.lsq6.replace(target_type=TargetType.initial_model, target_file=targets.registration_standard.path) # resolution = (options.registration.resolution # or get_resolution_from_file(targets.registration_standard.path)) # options.registration = options.registration.replace(resolution=resolution) # FIXME use of registration_standard here is quite wrong ... # part of the trouble is that mbm calls registration_targets itself, # so we can't send this RegistrationTargets to `mbm` directly ... # one option: add yet another optional arg to `mbm` ... else: targets = s.defer(registration_targets(lsq6_conf=options.mbm.lsq6, app_conf=options.application, reg_conf=options.registration, first_input_file=imgs.filename.iloc[0])) resolution = (options.registration.resolution or get_resolution_from_file(targets.registration_standard.path)) # This must happen after calling registration_targets otherwise it will resample to options.registration.resolution options.registration = options.registration.replace(resolution=resolution) return options
def two_level(grouped_files_df, options: TwoLevelConf): """ grouped_files_df - must contain 'group':<any comparable, sortable type> and 'file':MincAtom columns """ # TODO weird naming since the grouped_files_df isn't a GroupBy object? just files_df? s = Stages() if grouped_files_df.isnull().values.any(): raise ValueError("NaN values in input dataframe; can't go") if options.mbm.lsq6.target_type == TargetType.bootstrap: # won't work since the second level part tries to get the resolution of *its* "first input file", which # hasn't been created. We could instead pass in a resolution to the `mbm` function, # but instead disable for now: raise ValueError( "Bootstrap model building currently doesn't work with this pipeline; " "just specify an initial target instead") elif options.mbm.lsq6.target_type == TargetType.pride_of_models: pride_of_models_mapping = get_pride_of_models_mapping( pride_csv=options.mbm.lsq6.target_file, output_dir=options.application.output_directory, pipeline_name=options.application.pipeline_name) # FIXME this is the same as in the 'tamarack' except for names of arguments/enclosing variables def group_options(options, group): options = copy.deepcopy(options) if options.mbm.lsq6.target_type == TargetType.pride_of_models: targets = get_closest_model_from_pride_of_models( pride_of_models_dict=pride_of_models_mapping, time_point=group) options.mbm.lsq6 = options.mbm.lsq6.replace( target_type=TargetType.initial_model, target_file=targets.registration_standard.path) else: # this will ensure that all groups have the same resolution -- is it necessary? targets = s.defer( registration_targets( lsq6_conf=options.mbm.lsq6, app_conf=options.application, reg_conf=options.registration, first_input_file=grouped_files_df.file.iloc[0])) resolution = (options.registration.resolution or get_resolution_from_file( targets.registration_standard.path)) # This must happen after calling registration_targets otherwise it will resample to options.registration.resolution options.registration = options.registration.replace( resolution=resolution) # no need to check common space settings here since they're turned off at the parser level # (a bit strange) return options first_level_results = ( grouped_files_df.groupby( 'group', as_index=False ) # the usual annoying pattern to do a aggregate with access .aggregate({'file': lambda files: list(files) }) # to the groupby object's keys ... TODO: fix .rename(columns={ 'file': "files" }).assign(build_model=lambda df: df.apply( axis=1, func=lambda row: s.defer( mbm(imgs=row.files, options=group_options(options, row.group), prefix="%s" % row.group, output_dir=os.path.join( options.application.output_directory, options. application.pipeline_name + "_first_level", "%s_processed" % row.group)))))) # TODO replace .assign(...apply(...)...) with just an apply, producing a series right away? # FIXME right now the same options set is being used for both levels -- use options.first/second_level second_level_options = copy.deepcopy(options) second_level_options.mbm.lsq6 = second_level_options.mbm.lsq6.replace( run_lsq6=False) second_level_options.mbm.segmentation.run_maget = False second_level_options.mbm.maget.maget.mask_only = False second_level_options.mbm.maget.maget.mask = False # FIXME this is probably a hack -- instead add a --second-level-init-model option to specify which timepoint should be used # as the initial model in the second level ??? (at this point it doesn't matter due to lack of lsq6 ...) if second_level_options.mbm.lsq6.target_type == TargetType.pride_of_models: second_level_options.mbm.lsq6 = second_level_options.mbm.lsq6.replace( target_type=TargetType. target, # target doesn't really matter as no lsq6 here, just used for resolution... target_file=list(pride_of_models_mapping.values()) [0].registration_standard.path) # NOTE: running lsq6_nuc_inorm here doesn't work in general (but possibly with rotational minctracc) # since the native-space initial model is used, but our images are # already in standard space (as we resampled there after the 1st-level lsq6). # On the other hand, we might want to run it here (although of course NOT nuc/inorm!) in the future, # for instance given a 'pride' of models (one for each group). second_level_results = s.defer( mbm(imgs=first_level_results.build_model.map(lambda m: m.avg_img), options=second_level_options, prefix=os.path.join( options.application.output_directory, options.application.pipeline_name + "_second_level"))) # FIXME sadly, `mbm` doesn't return a pd.Series of xfms, so we don't have convenient indexing ... overall_xfms = [ s.defer(concat_xfmhandlers([xfm_1, xfm_2])) for xfms_1, xfm_2 in zip([r.xfms.lsq12_nlin_xfm for r in first_level_results.build_model], second_level_results.xfms.overall_xfm) for xfm_1 in xfms_1 ] resample = np.vectorize(mincresample_new, excluded={"extra_flags"}) defer = np.vectorize(s.defer) # TODO using the avg_img here is a bit clunky -- maybe better to propagate group indices ... # only necessary since `mbm` doesn't return DataFrames but namespaces ... first_level_determinants = pd.concat(list( first_level_results.build_model.apply( lambda x: x.determinants.assign(first_level_avg=x.avg_img))), ignore_index=True) # first_level_xfms is only necessary because you otherwise have no access to the input file which is necessary # for merging with the input csv. lsq12_nlin_xfm can be used to merge, and rigid_xfm contains the input file. # If for some reason we want to output xfms in the future, just don't drop everything. first_level_xfms = pd.concat( list( first_level_results.build_model.apply( lambda x: x.xfms.assign(first_level_avg=x.avg_img))), ignore_index=True)[["lsq12_nlin_xfm", "rigid_xfm"]] if options.mbm.segmentation.run_maget: maget_df = pd.DataFrame([ { "label_file": x.labels.path, "native_file": x.orig_path } #, "_merge" : basename(x.orig_path)} for x in pd.concat([ namespace.maget_result for namespace in first_level_results.build_model ]) ]) first_level_xfms = pd.merge( left=first_level_xfms.assign(native_file=lambda df: df.rigid_xfm. apply(lambda x: x.source.path)), right=maget_df, on="native_file") first_level_determinants = (pd.merge(left=first_level_determinants, right=first_level_xfms, left_on="inv_xfm", right_on="lsq12_nlin_xfm").drop( ["rigid_xfm", "lsq12_nlin_xfm"], axis=1)) resampled_determinants = (pd.merge( left=first_level_determinants, right=pd.DataFrame({ 'group_xfm': second_level_results.xfms.overall_xfm }).assign(source=lambda df: df.group_xfm.apply(lambda r: r.source)), left_on="first_level_avg", right_on="source").assign( resampled_log_full_det=lambda df: defer( resample(img=df.log_full_det, xfm=df.group_xfm.apply(lambda x: x.xfm), like=second_level_results.avg_img)), resampled_log_nlin_det=lambda df: defer( resample(img=df.log_nlin_det, xfm=df.group_xfm.apply(lambda x: x.xfm), like=second_level_results.avg_img)))) # TODO only resamples the log determinants, but still a bit ugly ... abstract somehow? # TODO shouldn't be called resampled_determinants since this is basically the whole (first_level) thing ... inverted_overall_xfms = [ s.defer(invert_xfmhandler(xfm)) for xfm in overall_xfms ] overall_determinants = (s.defer( determinants_at_fwhms( xfms=inverted_overall_xfms, inv_xfms=overall_xfms, blur_fwhms=options.mbm.stats.stats_kernels)).assign( overall_log_full_det=lambda df: df.log_full_det, overall_log_nlin_det=lambda df: df.log_nlin_det).drop( ['log_full_det', 'log_nlin_det'], axis=1)) # TODO return some MAGeT stuff from two_level function ?? # FIXME running MAGeT from within the `two_level` function has the same problem as running it from within `mbm`: # it will now run when this pipeline is called from within another one (e.g., n-level), which will be # redundant, create filename clashes, etc. -- this should be moved to `two_level_pipeline`. # TODO do we need a `pride of atlases` for MAGeT in this pipeline ?? # TODO at the moment MAGeT is run within the MBM code, but it could be disabled there and run here #if options.mbm.segmentation.run_maget: # maget_options = copy.deepcopy(options) # maget_options.maget = options.mbm.maget # fixup_maget_options(maget_options=maget_options.maget, # lsq12_options=maget_options.mbm.lsq12, # nlin_options=maget_options.mbm.nlin) # maget_options.maget.maget.mask = maget_options.maget.maget.mask_only = False # already done above # del maget_options.mbm # again using a weird combination of vectorized and loop constructs ... # s.defer(maget([xfm.resampled for _ix, m in first_level_results.iterrows() # for xfm in m.build_model.xfms.rigid_xfm], # options=maget_options, # prefix="%s_MAGeT" % options.application.pipeline_name, # output_dir=os.path.join(options.application.output_directory, # options.application.pipeline_name + "_processed"))) # TODO resampling to database model ... # TODO there should be one table containing all determinants (first level, overall, resampled first level) for each file # and another containing some groupwise information (averages and transforms to the common average) return Result(stages=s, output=Namespace( first_level_results=first_level_results, resampled_determinants=resampled_determinants, overall_determinants=overall_determinants))
def two_level(grouped_files_df, options : TwoLevelConf): """ grouped_files_df - must contain 'group':<any comparable, sortable type> and 'file':MincAtom columns """ # TODO weird naming since the grouped_files_df isn't a GroupBy object? just files_df? s = Stages() if grouped_files_df.isnull().values.any(): raise ValueError("NaN values in input dataframe; can't go") if options.mbm.lsq6.target_type == TargetType.bootstrap: # won't work since the second level part tries to get the resolution of *its* "first input file", which # hasn't been created. We could instead pass in a resolution to the `mbm` function, # but instead disable for now: raise ValueError("Bootstrap model building currently doesn't work with this pipeline; " "just specify an initial target instead") elif options.mbm.lsq6.target_type == TargetType.pride_of_models: pride_of_models_mapping = get_pride_of_models_mapping(pride_csv=options.mbm.lsq6.target_file, output_dir=options.application.output_directory, pipeline_name=options.application.pipeline_name) # FIXME this is the same as in the 'tamarack' except for names of arguments/enclosing variables def group_options(options, group): options = copy.deepcopy(options) if options.mbm.lsq6.target_type == TargetType.pride_of_models: targets = get_closest_model_from_pride_of_models(pride_of_models_dict=pride_of_models_mapping, time_point=group) options.mbm.lsq6 = options.mbm.lsq6.replace(target_type=TargetType.initial_model, target_file=targets.registration_standard.path) else: # this will ensure that all groups have the same resolution -- is it necessary? targets = registration_targets(lsq6_conf=options.mbm.lsq6, app_conf=options.application, first_input_file=grouped_files_df.file.iloc[0]) resolution = (options.registration.resolution or get_resolution_from_file(targets.registration_standard.path)) options.registration = options.registration.replace(resolution=resolution) # no need to check common space settings here since they're turned off at the parser level # (a bit strange) return options first_level_results = ( grouped_files_df .groupby('group', as_index=False, sort=False) # the usual annoying pattern to do a aggregate with access .aggregate({ 'file' : lambda files: list(files) }) # to the groupby object's keys ... TODO: fix .rename(columns={ 'file' : "files" }) .assign(build_model=lambda df: df.apply(axis=1, func=lambda row: s.defer(mbm(imgs=row.files, options=group_options(options, row.group), prefix="%s" % row.group, output_dir=os.path.join( options.application.output_directory, options.application.pipeline_name + "_first_level", "%s_processed" % row.group))))) ) # TODO replace .assign(...apply(...)...) with just an apply, producing a series right away? # FIXME right now the same options set is being used for both levels -- use options.first/second_level second_level_options = copy.deepcopy(options) second_level_options.mbm.lsq6 = second_level_options.mbm.lsq6.replace(run_lsq6=False) second_level_options.mbm.segmentation.run_maget = False second_level_options.mbm.maget.maget.mask_only = False second_level_options.mbm.maget.maget.mask = False # FIXME this is probably a hack -- instead add a --second-level-init-model option to specify which timepoint should be used # as the initial model in the second level ??? (at this point it doesn't matter due to lack of lsq6 ...) if second_level_options.mbm.lsq6.target_type == TargetType.pride_of_models: second_level_options.mbm.lsq6 = second_level_options.mbm.lsq6.replace( target_type=TargetType.target, # target doesn't really matter as no lsq6 here, just used for resolution... target_file=list(pride_of_models_mapping.values())[0].registration_standard.path) # NOTE: running lsq6_nuc_inorm here doesn't work in general (but possibly with rotational minctracc) # since the native-space initial model is used, but our images are # already in standard space (as we resampled there after the 1st-level lsq6). # On the other hand, we might want to run it here (although of course NOT nuc/inorm!) in the future, # for instance given a 'pride' of models (one for each group). second_level_results = s.defer(mbm(imgs=first_level_results.build_model.map(lambda m: m.avg_img), options=second_level_options, prefix=os.path.join(options.application.output_directory, options.application.pipeline_name + "_second_level"))) # FIXME sadly, `mbm` doesn't return a pd.Series of xfms, so we don't have convenient indexing ... overall_xfms = [s.defer(concat_xfmhandlers([xfm_1, xfm_2])) for xfms_1, xfm_2 in zip([r.xfms.lsq12_nlin_xfm for r in first_level_results.build_model], second_level_results.xfms.overall_xfm) for xfm_1 in xfms_1] resample = np.vectorize(mincresample_new, excluded={"extra_flags"}) defer = np.vectorize(s.defer) # TODO using the avg_img here is a bit clunky -- maybe better to propagate group indices ... # only necessary since `mbm` doesn't return DataFrames but namespaces ... first_level_determinants = pd.concat(list(first_level_results.build_model.apply( lambda x: x.determinants.assign(first_level_avg=x.avg_img))), ignore_index=True) resampled_determinants = (pd.merge( left=first_level_determinants, right=pd.DataFrame({'group_xfm' : second_level_results.xfms.overall_xfm}) .assign(source=lambda df: df.group_xfm.apply(lambda r: r.source)), left_on="first_level_avg", right_on="source") .assign(resampled_log_full_det=lambda df: defer(resample(img=df.log_full_det, xfm=df.group_xfm.apply(lambda x: x.xfm), like=second_level_results.avg_img)), resampled_log_nlin_det=lambda df: defer(resample(img=df.log_nlin_det, xfm=df.group_xfm.apply(lambda x: x.xfm), like=second_level_results.avg_img)))) # TODO only resamples the log determinants, but still a bit ugly ... abstract somehow? # TODO shouldn't be called resampled_determinants since this is basically the whole (first_level) thing ... inverted_overall_xfms = [s.defer(invert_xfmhandler(xfm)) for xfm in overall_xfms] overall_determinants = (s.defer(determinants_at_fwhms( xfms=inverted_overall_xfms, inv_xfms=overall_xfms, blur_fwhms=options.mbm.stats.stats_kernels)) .assign(overall_log_full_det=lambda df: df.log_full_det, overall_log_nlin_det=lambda df: df.log_nlin_det) .drop(['log_full_det', 'log_nlin_det'], axis=1)) # TODO return some MAGeT stuff from two_level function ?? # FIXME running MAGeT from within the `two_level` function has the same problem as running it from within `mbm`: # it will now run when this pipeline is called from within another one (e.g., n-level), which will be # redundant, create filename clashes, etc. -- this should be moved to `two_level_pipeline`. # TODO do we need a `pride of atlases` for MAGeT in this pipeline ?? # TODO at the moment MAGeT is run within the MBM code, but it could be disabled there and run here #if options.mbm.segmentation.run_maget: # maget_options = copy.deepcopy(options) # maget_options.maget = options.mbm.maget # fixup_maget_options(maget_options=maget_options.maget, # lsq12_options=maget_options.mbm.lsq12, # nlin_options=maget_options.mbm.nlin) # maget_options.maget.maget.mask = maget_options.maget.maget.mask_only = False # already done above # del maget_options.mbm # again using a weird combination of vectorized and loop constructs ... # s.defer(maget([xfm.resampled for _ix, m in first_level_results.iterrows() # for xfm in m.build_model.xfms.rigid_xfm], # options=maget_options, # prefix="%s_MAGeT" % options.application.pipeline_name, # output_dir=os.path.join(options.application.output_directory, # options.application.pipeline_name + "_processed"))) # TODO resampling to database model ... # TODO there should be one table containing all determinants (first level, overall, resampled first level) for each file # and another containing some groupwise information (averages and transforms to the common average) return Result(stages=s, output=Namespace(first_level_results=first_level_results, resampled_determinants=resampled_determinants, overall_determinants=overall_determinants))
def chain(options): """Create a registration chain pipeline from the given options.""" # TODO: # one overall question for this entire piece of code is how # we are going to make sure that we can concatenate/add all # the transformations together. Many of the sub-registrations # that are performed (inter-subject registration, lsq6 using # multiple initial models) are applied to subsets of the entire # data, making it harder to keep the mapping simple/straightforward chain_opts = options.chain # type : ChainConf s = Stages() with open(options.chain.csv_file, 'r') as f: subject_info = parse_csv(rows=f, common_time_pt=options.chain.common_time_point) output_dir = options.application.output_directory pipeline_name = options.application.pipeline_name pipeline_processed_dir = os.path.join(output_dir, pipeline_name + "_processed") pipeline_lsq12_common_dir = os.path.join(output_dir, pipeline_name + "_lsq12_" + options.chain.common_time_point_name) pipeline_nlin_common_dir = os.path.join(output_dir, pipeline_name + "_nlin_" + options.chain.common_time_point_name) pipeline_montage_dir = os.path.join(output_dir, pipeline_name + "_montage") pipeline_subject_info = map_over_time_pt_dict_in_Subject( lambda subj_str: MincAtom(name=subj_str, pipeline_sub_dir=pipeline_processed_dir), subject_info) # type: Dict[str, Subject[MincAtom]] # verify that in input files are proper MINC files, and that there # are no duplicates in the filenames all_Minc_atoms = [] # type: List[MincAtom] for s_id, subj in pipeline_subject_info.items(): for subj_time_pt, subj_filename in subj.time_pt_dict.items(): all_Minc_atoms.append(subj_filename) # check_MINC_input_files takes strings, so pass along those instead of the actual MincAtoms check_MINC_input_files([minc_atom.path for minc_atom in all_Minc_atoms]) if options.registration.input_space == InputSpace.lsq6 or \ options.registration.input_space == InputSpace.lsq12: # the input files are not going through the lsq6 alignment. This is the place # where they will all be resampled using a single like file, and get the same # image dimensions/lengths/resolution. So in order for the subsequent stages to # finish (mincaverage stages for instance), all files need to have the same # image parameters: check_MINC_files_have_equal_dimensions_and_resolution([minc_atom.path for minc_atom in all_Minc_atoms], additional_msg="Given that the input images are " "already in " + str(options.registration.input_space) + " space, all input files need to have " "the same dimensions/starts/step sizes.") if options.registration.input_space not in InputSpace.__members__.values(): raise ValueError('unrecognized input space: %s; choices: %s' % (options.registration.input_space, ','.join(InputSpace.__members__))) if options.registration.input_space == InputSpace.native: if options.lsq6.target_type == TargetType.bootstrap: raise ValueError("\nA bootstrap model is ill-defined for the registration chain. " "(Which file is the 'first' input file?). Please use the --lsq6-target " "flag to specify a target for the lsq6 stage, or use an initial model.") if options.lsq6.target_type == TargetType.pride_of_models: pride_of_models_dict = get_pride_of_models_mapping(pride_csv=options.lsq6.target_file, output_dir=options.application.output_directory, pipeline_name=options.application.pipeline_name) subj_id_to_subj_with_lsq6_xfm_dict = map_with_index_over_time_pt_dict_in_Subject( lambda subj_atom, time_point: s.defer(lsq6_nuc_inorm([subj_atom], registration_targets=get_closest_model_from_pride_of_models( pride_of_models_dict, time_point), resolution=options.registration.resolution, lsq6_options=options.lsq6, lsq6_dir=None, # never used since no average # (could call this "average_dir" with None -> no avg ?) subject_matter=options.registration.subject_matter, create_qc_images=False, create_average=False))[0], pipeline_subject_info) # type: Dict[str, Subject[XfmHandler]] else: # if we are not dealing with a pride of models, we can retrieve a fixed # registration target for all input files: targets = registration_targets(lsq6_conf=options.lsq6, app_conf=options.application) # we want to store the xfm handlers in the same shape as pipeline_subject_info, # as such we will call lsq6_nuc_inorm for each file individually and simply extract # the first (and only) element from the resulting list via s.defer(...)[0]. subj_id_to_subj_with_lsq6_xfm_dict = map_over_time_pt_dict_in_Subject( lambda subj_atom: s.defer(lsq6_nuc_inorm([subj_atom], registration_targets=targets, resolution=options.registration.resolution, lsq6_options=options.lsq6, lsq6_dir=None, # no average will be create, is just one file... create_qc_images=False, create_average=False, subject_matter=options.registration.subject_matter) )[0], pipeline_subject_info) # type: Dict[str, Subject[XfmHandler]] # create verification images to show the 6 parameter alignment montageLSQ6 = pipeline_montage_dir + "/quality_control_montage_lsq6.png" # TODO, base scaling factor on resolution of initial model or target filesToCreateImagesFrom = [] for subj_id, subj in subj_id_to_subj_with_lsq6_xfm_dict.items(): for time_pt, subj_time_pt_xfm in subj.time_pt_dict.items(): filesToCreateImagesFrom.append(subj_time_pt_xfm.resampled) # TODO it's strange that create_quality_control_images gets the montage directory twice # TODO (in montages=output=montageLSQ6 and in montage_dir), suggesting a weirdness in create_q_c_images lsq6VerificationImages = s.defer(create_quality_control_images(filesToCreateImagesFrom, montage_output=montageLSQ6, montage_dir=pipeline_montage_dir, message=" the input images after the lsq6 alignment")) # NB currently LSQ6 expects an array of files, but we have a map. # possibilities: # - note that pairwise is enough (except for efficiency -- redundant blurring, etc.) # and just use the map fn above with an LSQ6 fn taking only a single source # - rewrite LSQ6 to use such a (nested) map # - write conversion which creates a tagged array from the map, performs LSQ6, # and converts back # - write 'over' which takes a registration, a data structure, and 'get/set' fns ...? # Intersubject registration: LSQ12/NLIN registration of common-timepoint images # The assumption here is that all these files are roughly aligned. Here is a toy # schematic of what happens. In this example, the common timepoint is set timepoint 2: # # ------------ # subject A A_time_1 -> | A_time_2 | -> A_time_3 # subject B B_time_1 -> | B_time_2 | -> B_time_3 # subject C C_time_1 -> | C_time_2 | -> C_time_3 # ------------ # | # group_wise registration on time point 2 # # dictionary that holds the transformations from the intersubject images # to the final common space average intersubj_img_to_xfm_to_common_avg_dict = {} # type: Dict[MincAtom, XfmHandler] if options.registration.input_space in (InputSpace.lsq6, InputSpace.lsq12): # no registrations have been performed yet, so we can point to the input files s_id_to_intersubj_img_dict = { s_id : subj.intersubject_registration_image for s_id, subj in pipeline_subject_info.items() } else: # lsq6 aligned images # When we ran the lsq6 alignment, we stored the XfmHandlers in the Subject dictionary. So when we call # xfmhandler.intersubject_registration_image, this returns an XfmHandler. From which # we want to extract the resampled file (in order to continue the registration with) s_id_to_intersubj_img_dict = { s_id : subj_with_xfmhandler.intersubject_registration_image.resampled for s_id, subj_with_xfmhandler in subj_id_to_subj_with_lsq6_xfm_dict.items() } if options.application.verbose: print("\nImages that are used for the inter-subject registration:") print("ID\timage") for subject in s_id_to_intersubj_img_dict: print(subject + '\t' + s_id_to_intersubj_img_dict[subject].path) # determine what configuration to use for the non linear registration nonlinear_configuration = get_nonlinear_configuration_from_options(options.nlin.nlin_protocol, options.nlin.reg_method, options.registration.resolution) if options.registration.input_space in [InputSpace.lsq6, InputSpace.native]: intersubj_xfms = s.defer(lsq12_nlin_build_model(imgs=list(s_id_to_intersubj_img_dict.values()), lsq12_conf=options.lsq12, nlin_conf=nonlinear_configuration, resolution=options.registration.resolution, lsq12_dir=pipeline_lsq12_common_dir, nlin_dir=pipeline_nlin_common_dir, nlin_prefix="common")) #, like={atlas_from_init_model_at_this_tp} elif options.registration.input_space == InputSpace.lsq12: #TODO: write reader that creates a mincANTS configuration out of an input protocol # if we're starting with files that are already aligned with an affine transformation # (overall scaling is also dealt with), then the target for the non linear registration # should be the averge of the current input files. first_nlin_target = s.defer(mincaverage(imgs=list(s_id_to_intersubj_img_dict.values()), name_wo_ext="avg_of_input_files", output_dir=pipeline_nlin_common_dir)) intersubj_xfms = s.defer(mincANTS_NLIN_build_model(imgs=list(s_id_to_intersubj_img_dict.values()), initial_target=first_nlin_target, nlin_dir=pipeline_nlin_common_dir, conf=nonlinear_configuration)) intersubj_img_to_xfm_to_common_avg_dict = { xfm.source : xfm for xfm in intersubj_xfms.output } # create one more convenience data structure: a mapping from subject_ID to the xfm_handler # that contains the transformation from the subject at the common time point to the # common time point average. subj_ID_to_xfm_handler_to_common_avg = {} for s_id, subj_at_common_tp in s_id_to_intersubj_img_dict.items(): subj_ID_to_xfm_handler_to_common_avg[s_id] = intersubj_img_to_xfm_to_common_avg_dict[subj_at_common_tp] # create verification images to show the inter-subject alignment montage_inter_subject = pipeline_montage_dir + "/quality_control_montage_inter_subject_registration.png" avg_and_inter_subject_images = [] avg_and_inter_subject_images.append(intersubj_xfms.avg_img) for xfmh in intersubj_xfms.output: avg_and_inter_subject_images.append(xfmh.resampled) inter_subject_verification_images = s.defer(create_quality_control_images( imgs=avg_and_inter_subject_images, montage_output=montage_inter_subject, montage_dir=pipeline_montage_dir, message=" the result of the inter-subject alignment")) if options.application.verbose: print("\nTransformations for intersubject images to final nlin common space:") print("MincAtom\ttransformation") for subj_atom, xfm_handler in intersubj_img_to_xfm_to_common_avg_dict.items(): print(subj_atom.path + '\t' + xfm_handler.xfm.path) ## within-subject registration # In the toy scenario below: # subject A A_time_1 -> A_time_2 -> A_time_3 # subject B B_time_1 -> B_time_2 -> B_time_3 # subject C C_time_1 -> C_time_2 -> C_time_3 # # The following registrations are run: # 1) A_time_1 -> A_time_2 # 2) A_time_2 -> A_time_3 # # 3) B_time_1 -> B_time_2 # 4) B_time_2 -> B_time_3 # # 5) C_time_1 -> C_time_2 # 6) C_time_2 -> C_time_3 subj_id_to_Subjec_for_within_dict = pipeline_subject_info if options.registration.input_space == InputSpace.native: # we started with input images that were not aligned whatsoever # in this case we should use the images that were rigidly # aligned files to continue the within-subject registration with # # type: Dict[str, Subject[XfmHandler]] subj_id_to_Subjec_for_within_dict = map_over_time_pt_dict_in_Subject(lambda x: x.resampled, subj_id_to_subj_with_lsq6_xfm_dict) if options.application.verbose: print("\n\nWithin subject registrations:") for s_id, subj in subj_id_to_Subjec_for_within_dict.items(): print("ID: ", s_id) for time_pt, subj_img in subj.time_pt_dict.items(): print(time_pt, " ", subj_img.path) print("\n") # dictionary that maps subject IDs to a list containing: # ( [(time_pt_n, time_pt_n+1, XfmHandler_from_n_to_n+1), ..., (,,,)], # index_of_common_time_pt) chain_xfms = { s_id : s.defer(intrasubject_registrations( subj=subj, linear_conf=default_lsq12_multilevel_minctracc, nlin_conf=mincANTS_default_conf.replace( file_resolution=options.registration.resolution, iterations="100x100x100x50"))) for s_id, subj in subj_id_to_Subjec_for_within_dict.items() } # create a montage image for each pair of time points for s_id, output_from_intra in chain_xfms.items(): for time_pt_n, time_pt_n_plus_1, transform in output_from_intra[0]: montage_chain = pipeline_montage_dir + "/quality_control_chain_ID_" + s_id + \ "_timepoint_" + str(time_pt_n) + "_to_" + str(time_pt_n_plus_1) + ".png" chain_images = [transform.resampled, transform.target] chain_verification_images = s.defer(create_quality_control_images(chain_images, montage_output=montage_chain, montage_dir=pipeline_montage_dir, message="the alignment between ID " + s_id + " time point " + str(time_pt_n) + " and " + str(time_pt_n_plus_1))) if options.application.verbose: print("\n\nTransformations gotten from the intrasubject registrations:") for s_id, output_from_intra in chain_xfms.items(): print("ID: ", s_id) for time_pt_n, time_pt_n_plus_1, transform in output_from_intra[0]: print("Time point: ", time_pt_n, " to ", time_pt_n_plus_1, " trans: ", transform.xfm.path) print("\n") ## stats # # The statistic files we want to create are the following: # 1) subject <----- subject_common_time_point (resampled to common average) # 2) subject <----- subject_common_time_point <- common_time_point_average (incorporates inter subject differences) # 3) subject_time_point_n <----- subject_time_point_n+1 (resampled to common average) # create transformation from each subject to the final common time point average, # and from each subject to the subject's common time point (non_rigid_xfms_to_common_avg, non_rigid_xfms_to_common_subj) = s.defer(get_chain_transforms_for_stats(subj_id_to_Subjec_for_within_dict, intersubj_img_to_xfm_to_common_avg_dict, chain_xfms)) # Ad 1) provide transformations from the subject's common time point to each subject # These are temporary, because they still need to be resampled into the # average common time point space determinants_from_subject_common_to_subject = map_over_time_pt_dict_in_Subject( lambda xfm: s.defer(determinants_at_fwhms(xfms=[s.defer(invert_xfmhandler(xfm))], inv_xfms=[xfm], # determinants_at_fwhms now vectorized-unhelpful here blur_fwhms=options.stats.stats_kernels)), non_rigid_xfms_to_common_subj) # the content of determinants_from_subject_common_to_subject is: # # {subject_ID : Subject(inter_subject_time_pt, time_pt_dict) # # where time_pt_dict contains: # # {time_point : Tuple(List[Tuple(float, Tuple(MincAtom, MincAtom))], # List[Tuple(float, Tuple(MincAtom, MincAtom))]) # # And to be a bit more verbose: # # {time_point : Tuple(relative_stat_files, # absolute_stat_files) # # where either the relative_stat_files or the absolute_stat_files look like: # # [blur_kernel_1, (determinant_file_1, log_of_determinant_file_1), # ..., # blur_kernel_n, (determinant_file_n, log_of_determinant_file_n)] # # Now the only thing we really want to do, is to resample the actual log # determinants that were generated into the space of the common average. # To make that a little easier, I'll create a mapping that will contain: # # {subject_ID: Subject(intersubject_timepoint, {time_pt_1: [stat_file_1, ..., stat_file_n], # ..., # time_pt_n: [stat_file_1, ..., stat_file_n]} # } for s_id, subject_with_determinants in determinants_from_subject_common_to_subject.items(): transform_from_common_subj_to_common_avg = subj_ID_to_xfm_handler_to_common_avg[s_id].xfm for time_pt, determinant_info in subject_with_determinants.time_pt_dict.items(): # here, each determinant_info is a DataFrame where each row contains # 'abs_det', 'nlin_det', 'log_nlin_det', 'log_abs_det', 'fwhm' fields # of the log-determinants, blurred at various fwhms (corresponding to different rows) for _ix, row in determinant_info.iterrows(): for log_det_file_to_resample in (row.log_full_det, row.log_nlin_det): # TODO the MincAtoms corresponding to the resampled files are never returned new_name_wo_ext = log_det_file_to_resample.filename_wo_ext + "_resampled_to_common" s.defer(mincresample(img=log_det_file_to_resample, xfm=transform_from_common_subj_to_common_avg, like=log_det_file_to_resample, new_name_wo_ext=new_name_wo_ext, subdir="stats-volumes")) # Ad 2) provide transformations from the common avg to each subject. That's the # inverse of what was provided by get_chain_transforms_for_stats() determinants_from_common_avg_to_subject = map_over_time_pt_dict_in_Subject( lambda xfm: s.defer(determinants_at_fwhms(xfms=[s.defer(invert_xfmhandler(xfm))], inv_xfms=[xfm], # determinants_at_fwhms now vectorized-unhelpful here blur_fwhms=options.stats.stats_kernels)), non_rigid_xfms_to_common_avg) # TODO don't just return an (unnamed-)tuple here return Result(stages=s, output=Namespace(non_rigid_xfms_to_common=non_rigid_xfms_to_common_avg, determinants_from_common_avg_to_subject=determinants_from_common_avg_to_subject, determinants_from_subject_common_to_subject=determinants_from_subject_common_to_subject))
stats_parser, chain_parser]) # TODO could abstract and then parametrize by prefix/ns ?? options = parse(p, sys.argv[1:]) # TODO: the registration resolution should be set somewhat outside # of any actual function? Maybe the right time to set this, is here # when options are gathered? if not options.registration.resolution: # if the target for the registration_chain comes from the pride_of_models # we can not use the registration_targets() function. The pride_of_models # works in a fairly different way, so we will separate out that option. if options.lsq6.target_type == TargetType.pride_of_models: pride_of_models_mapping = get_pride_of_models_mapping(options.lsq6.target_file, options.application.output_directory, options.application.pipeline_name) # all initial models that are part of the pride of models must have # the same resolution (it's currently a requirement). So we can get the # resolution from any of the RegistrationTargets: random_key = list(pride_of_models_mapping)[0] file_for_resolution = pride_of_models_mapping[random_key].registration_standard.path else: file_for_resolution = registration_targets(lsq6_conf=options.lsq6, app_conf=options.application).registration_standard.path options.registration = options.registration.replace( resolution=get_resolution_from_file(file_for_resolution)) # *** *** *** *** *** *** *** *** *** chain_result = chain(options)