s = Stages() # FIXME: why do we have to call registration_targets *outside* of lsq6_nuc_inorm? is it just because of the extra # options required? targets = s.defer( registration_targets(lsq6_conf=options.lsq6, app_conf=options.application, reg_conf=options.registration, first_input_file=imgs[0].path)) # TODO this is quite tedious and duplicates stuff in the registration chain ... resolution = (options.registration.resolution or get_resolution_from_file( targets.registration_standard.path)) # This must happen after calling registration_targets otherwise it will resample to options.registration.resolution options.registration = options.registration.replace(resolution=resolution) lsq6_result = s.defer( lsq6_nuc_inorm(imgs=imgs, resolution=resolution, registration_targets=targets, lsq6_dir=lsq6_dir, lsq6_options=options.lsq6)) return Result(stages=s, output=lsq6_result) lsq6_application = mk_application(parsers=[lsq6_parser], pipeline=lsq6_pipeline) if __name__ == "__main__": lsq6_application()
"Specify whether to use minctracc or ANTS for masking. [Default = %(default)s]." ) group.add_argument( "--masking-nlin-protocol", dest="masking_nlin_protocol", # TODO basically copied from nlin parser type=str, default=None, help= "Can optionally specify a registration protocol that is different from nlin protocol. " "Parameters must be specified as in either or the following examples: \n" "applications_testing/test_data/minctracc_example_nlin_protocol.csv \n" "applications_testing/test_data/mincANTS_example_nlin_protocol.csv \n" "[Default = %(default)s]") return parser # maybe wire the masking-nlin-protocol to the nlin-protocol? maget_parser = AnnotatedParser(parser=BaseParser( _mk_maget_parser(ArgParser(add_help=False)), "maget"), namespace="maget") maget_parsers = CompoundParser([lsq12_parser, nlin_parser, maget_parser]) maget_application = mk_application( parsers=[AnnotatedParser(parser=maget_parsers, namespace="maget")], pipeline=maget_pipeline) if __name__ == "__main__": maget_application()
def _mk_common_space_parser(parser : ArgParser): group = parser.add_argument_group("Common space options", "Options for registration/resampling to common (db) space.") group.add_argument("--common-space-model", dest="common_space_model", type=str, help="Run MAGeT segmentation on the images.") group.add_argument("--no-common-space-registration", dest="do_common_space_registration", default=False, action="store_false", help="Skip registration to common (db) space.") return parser common_space_parser = AnnotatedParser(parser=BaseParser(_mk_common_space_parser(ArgParser(add_help=False)), "common_space"), namespace="common_space") mbm_parser = CompoundParser( [lsq6_parser, lsq12_parser, nlin_parser, stats_parser, common_space_parser, #thickness_parser, AnnotatedParser(parser=maget_parsers, namespace="maget", prefix="maget"), # TODO note that the maget-specific flags (--mask, --masking-method, etc., also get the "maget-" prefix) # which could be changed by putting in the maget-specific parser separately from its lsq12, nlin parsers segmentation_parser]) # TODO cast to MBMConf? mbm_application = mk_application(parsers=[AnnotatedParser(parser=mbm_parser, namespace='mbm')], pipeline=mbm_pipeline) if __name__ == "__main__": mbm_application()
result = s.defer(cortical_thickness(xfms=xfms, atlas=NotImplemented, label_mapping=options.thickness.label_mapping, atlas_fwhm=options.thickness.atlas_fwhm, thickness_fwhm=options.thickness.thickness_fwhm)) return Result(stages=s, output=result) def _mk_thickness_parser(p : ArgumentParser): p.add_argument("--xfm-csv", dest="xfm_csv", type=str, #required=True, help="CSV file containing at least 'source', 'xfm', 'target', and 'resampled' columns") # FIXME p.add_argument("--label-mapping", dest="label_mapping", type=FileAtom, #required=True, help="CSV file containing structure information (see minclaplace/wiki/LaplaceGrid)") p.add_argument("--atlas-fwhm", dest="atlas_fwhm", type=float, required=True, # default ?! help="Blurring kernel (mm) for atlas") p.add_argument("--thickness-fwhm", dest="thickness_fwhm", type=float, required=True, # default?? help="Blurring kernel (mm) for cortical surfaces") return p thickness_parser = NotImplemented cortical_thickness_application = mk_application(parsers=[thickness_parser], pipeline=cortical_thickness_pipeline) if __name__ == "__main__": cortical_thickness_application()
model_building_parser = AnnotatedParser(parser=BaseParser( _mk_model_building_parser(ArgParser(add_help=False)), "model_building"), namespace="model_building") def mk_mbm_parser(with_common_space: bool = True, with_maget: bool = True, lsq6_parser=lsq6_parser): return CompoundParser( [ lsq6_parser, lsq12_parser, nlin_parser, model_building_parser, stats_parser #thickness_parser, ] + # TODO note that the maget-specific flags (--mask, --masking-method, etc., also get the "maget-" prefix) # which could be changed by putting in the maget-specific parser separately from its lsq12, nlin parsers ([common_space_parser] if with_common_space else []) + ([ segmentation_parser, AnnotatedParser( parser=maget_parsers, namespace="maget", prefix="maget") ] if with_maget else [])) # TODO cast to MBMConf? mbm_application = mk_application( parsers=[AnnotatedParser(parser=mk_mbm_parser(), namespace='mbm')], pipeline=mbm_pipeline) if __name__ == "__main__": mbm_application()
help="Create a mask for all images only, do not run full algorithm. [Default = %(default)s]") group.add_argument("--max-templates", dest="max_templates", default=25, type=int, help="Maximum number of templates to generate. [Default = %(default)s]") group.add_argument("--masking-method", dest="mask_method", default="minctracc", type=str, help="Specify whether to use minctracc or mincANTS for masking. [Default = %(default)s].") group.add_argument("--masking-nlin-protocol", dest="masking_nlin_protocol", # TODO basically copied from nlin parser type=str, default=None, help="Can optionally specify a registration protocol that is different from nlin protocol. " "Parameters must be specified as in either or the following examples: \n" "applications_testing/test_data/minctracc_example_nlin_protocol.csv \n" "applications_testing/test_data/mincANTS_example_nlin_protocol.csv \n" "[Default = %(default)s]") return parser # maybe wire the masking-nlin-protocol to the nlin-protocol? maget_parser = AnnotatedParser(parser=BaseParser(_mk_maget_parser(ArgParser(add_help=False)), "maget"), namespace="maget") maget_parsers = CompoundParser([lsq12_parser, nlin_parser, maget_parser]) maget_application = mk_application(parsers=[AnnotatedParser(parser=maget_parsers, namespace="maget")], pipeline=maget_pipeline) if __name__ == "__main__": maget_application()
for img, xfm in zip(df["anatomical_lsq6_MincAtom"], df["lsq6_to_atlas_XfmAtom"])], count_targetspace_MincAtom=lambda df: [s.defer(mincresample_new(img=img, xfm=xfm, like=atlas_target)) for img, xfm in zip(df["count_lsq6_MincAtom"], df["lsq6_to_atlas_XfmAtom"])], atlas_lsq6space_MincAtom=lambda df: [s.defer(mincresample_new(img=atlas_target_label, xfm=xfm, like=like, invert=True, interpolation=Interpolation.nearest_neighbour, extra_flags=('-keep_real_range',))) for xfm, like in zip( df["lsq6_to_atlas_XfmAtom"], df["count_lsq6_MincAtom"])] ) csv.applymap(maybe_deref_path).to_csv("analysis.csv",index=False) s.defer(create_quality_control_images(imgs=csv.count_targetspace_MincAtom.tolist(), montage_dir=output_dir, montage_output=os.path.join(output_dir, pipeline_name + "_resampled", "count_montage"), auto_range=True, message="count_mincs")) return Result(stages=s, output=()) lsq6_parser = AnnotatedParser(parser=BaseParser(_mk_lsq6_parser(with_nuc=False, with_inormalize=False), "LSQ6"), namespace="lsq6", cast=to_lsq6_conf) tv_pipeline_application = mk_application(parsers=[AnnotatedParser(parser=mk_mbm_parser(with_common_space=False, lsq6_parser=lsq6_parser), namespace="mbm"), consensus_to_atlas_parser], pipeline=tissue_vision_pipeline) if __name__ == "__main__": tv_pipeline_application()
return Result(stages=s, output=Namespace(nlin_xfms=nlin_result, avg_img=nlin_result.avg_img, determinants=determinants)) else: # there's no consistency in what gets returned, yikes ... return Result(stages=s, output=Namespace(nlin_xfms=nlin_result, avg_img=nlin_result.avg_img)) #_nlin_parser = _mk_nlin_parser(ArgParser(add_help=False)) #_nlin_parser.add_argument("--target", dest="target", # type=str, # help="Starting target for non-linear alignment. (Often in 'lsq12 space')." # "[Default = %(default)s]") nlin_parser.parser.argparser.add_argument("--target", dest="target", type=str, help="Starting target for non-linear alignment. (Often in 'lsq12 space')." "[Default = %(default)s]") nlin_parser.parser.argparser.add_argument("--target-mask", dest="target_mask", type=str, default=None, help="Starting target for non-linear alignment. (Often in 'lsq12 space')." "[Default = %(default)s]") nlin_application = mk_application(parsers=[nlin_parser, stats_parser], #, namespace='nlin')], pipeline=NLIN_pipeline) if __name__ == "__main__": nlin_application()
return Result(stages=s, output=Namespace(nlin_xfms=nlin_result, avg_img=nlin_result.avg_img)) #_nlin_parser = _mk_nlin_parser(ArgParser(add_help=False)) #_nlin_parser.add_argument("--target", dest="target", # type=str, # help="Starting target for non-linear alignment. (Often in 'lsq12 space')." # "[Default = %(default)s]") nlin_parser.parser.argparser.add_argument( "--target", dest="target", type=str, help="Starting target for non-linear alignment. (Often in 'lsq12 space')." "[Default = %(default)s]") nlin_parser.parser.argparser.add_argument( "--target-mask", dest="target_mask", type=str, default=None, help="Starting target for non-linear alignment. (Often in 'lsq12 space')." "[Default = %(default)s]") nlin_application = mk_application( parsers=[nlin_parser, stats_parser], #, namespace='nlin')], pipeline=NLIN_pipeline) if __name__ == "__main__": nlin_application()
namespace="staging") # TODO: this is a giant hack, but I acutally don't know how # TODO: to properly change the defaults in these parsers... # the registration targets for this pipeline are known, they # are a number of the time points from the 4D atlas. As such, # no target needs to be specified by the user. We'll just initialize # it here with lsq6_parser_with_bootstrap = lsq6_parser lsq6_parser_with_bootstrap.parser.argparser.set_defaults(bootstrap=True) lsq6_parser_with_bootstrap.parser.argparser.set_defaults(nuc=False) lsq6_parser_with_bootstrap.parser.argparser.set_defaults(inormalize=False) lsq6_parser_with_bootstrap.parser.argparser.set_defaults(lsq6_method="lsq6_centre_estimation") staging_parser = CompoundParser([lsq6_parser_with_bootstrap, lsq12_parser, nlin_parser, staging_parser]) stage_embryos_application = mk_application(parsers=[AnnotatedParser(parser=staging_parser, namespace="staging")], pipeline=stage_embryos_pipeline) if __name__ == "__main__": stage_embryos_application()
output_dir, pipeline_name + "_stitched", row.brain_name), axis=1) ############################# # Step 1: Run TV_stitch.py ############################# #TODO surely theres a way around this? df = df.assign(TV_stitch_result="") for index, row in df.iterrows(): df.at[index, "TV_stitch_result"] = s.defer( TV_stitch_wrap(brain_directory=FileAtom(row.brain_directory), brain_name=row.brain_name, slice_directory=row.slice_directory, TV_stitch_options=options.TV_stitch, Zstart=row.Zstart, Zend=row.Zend, output_dir=output_dir)) df.drop(["mosaic_dictionary", "TV_stitch_result"], axis=1).to_csv("TV_brains.csv", index=False) df.explode("TV_stitch_result")\ .assign(slice=lambda df: df.apply(lambda row: row.TV_stitch_result.path, axis=1))\ .drop(["mosaic_dictionary", "TV_stitch_result"], axis=1)\ .to_csv("TV_slices.csv", index=False) return Result(stages=s, output=()) tv_slice_recon_application = mk_application(parsers=[TV_stitch_parser], pipeline=tv_slice_recon_pipeline) if __name__ == "__main__": tv_slice_recon_application()
s = Stages() # TODO this is quite tedious and duplicates stuff in the registration chain ... resolution = (options.registration.resolution or get_resolution_from_file( s.defer(registration_targets(lsq6_conf=options.lsq6, app_conf=options.application, reg_conf=options.registration)).registration_standard.path)) # FIXME: why do we have to call registration_targets *outside* of lsq6_nuc_inorm? is it just because of the extra # options required? targets = s.defer(registration_targets(lsq6_conf=options.lsq6, app_conf=options.application, reg_conf=options.registration, first_input_file=imgs[0])) # This must happen after calling registration_targets otherwise it will resample to options.registration.resolution options.registration = options.registration.replace(resolution=resolution) lsq6_result = s.defer(lsq6_nuc_inorm(imgs=imgs, resolution=resolution, registration_targets=targets, lsq6_dir=lsq6_dir, lsq6_options=options.lsq6)) return Result(stages=s, output=lsq6_result) lsq6_application = mk_application(parsers=[lsq6_parser], pipeline=lsq6_pipeline) if __name__ == "__main__": lsq6_application()
mincs_df = mincs_df.assign( anatomical=lambda df: df.apply(lambda row: row.anatomical_isotropic_result.path, axis=1), count=lambda df: df.apply(lambda row: row.count_isotropic_result.path, axis=1), ) mincs_df.drop(mincs_df.filter(regex='.*_result.*|.*_list.*|.*_MincAtom.*'), axis=1)\ .to_csv("TV_mincs.csv", index=False) #TODO overlay them # s.defer(create_quality_control_images(imgs=reconstructed_mincs, montage_dir = output_dir, # montage_output=os.path.join(output_dir, pipeline_name + "_stacked", "reconstructed_montage"), # message="reconstructed_mincs")) #TODO # s.defer(create_quality_control_images(imgs=all_anatomical_pad_results, montage_dir=output_dir, # montage_output=os.path.join(output_dir, pipeline_name + "_stacked", # "%s_montage" % anatomical), # message="%s_mincs" % anatomical)) # s.defer(create_quality_control_images(imgs=all_count_pad_results, montage_dir=output_dir, # montage_output=os.path.join(output_dir, pipeline_name + "_stacked", # "%s_montage" % count), # auto_range=True, # message="%s_mincs" % count)) return Result(stages=s, output=()) tv_recon_application = mk_application(parsers = [deep_segment_parser, stacks_to_volume_parser, autocrop_parser], pipeline = tv_recon_pipeline) if __name__ == "__main__": tv_recon_application()
########################### # Step 4: crop distortion corrected lsq6 output image ########################### cropped_dc_lsq6_file = MincAtom(os.path.join( lsq6_dir, "average.aug2015_dist_corr.cropped.mnc"), pipeline_sub_dir=lsq6_dir) crop_result = s.defer( crop_to_brain( img=dc_lsq6_file, cropped_img=cropped_dc_lsq6_file, crop_to_brain_options=options.saddle_recon.crop_to_brain)) all_crop_results.append(crop_result) return Result(stages=s, output=Namespace(varian_output=varian_recon_results, lsq6_output=all_lsq6_results, dc_output=all_dc_results, crop_output=all_crop_results)) saddle_recon_parser = CompoundParser( [varian_recon_parser, lsq6_parser, crop_to_brain_parser]) saddle_recon_application = mk_application(parsers=[ AnnotatedParser(parser=saddle_recon_parser, namespace='saddle_recon') ], pipeline=saddle_recon_pipeline) if __name__ == "__main__": saddle_recon_application()