def stage_embryos_pipeline(options):
    s = Stages()

    imgs = get_imgs(options.application)
    rough_volume_imgs = get_volume_estimate(imgs)
    imgs_and_rough_volume = pd.DataFrame({"mincatom" : imgs,
                                          "rough_volume" : pd.Series(rough_volume_imgs, dtype=float)})

    check_MINC_input_files([img.path for img in imgs])

    output_directory = options.application.output_directory
    output_sub_dir = os.path.join(output_directory,
                                  options.application.pipeline_name + "_4D_atlas")

    time_points_in_4D_atlas = instances_in_4D_atlas_from_csv(options.staging.staging.csv_4D,
                                                             output_sub_dir)

    # we can use the resolution of one of the time points in the 4D atlas
    # for all the registrations that will be run.
    resolution = get_resolution_from_file(time_points_in_4D_atlas.loc[0]["mincatom"].orig_path)

    print(options.staging.lsq12)

    lsq12_conf = get_linear_configuration_from_options(options.staging.lsq12,
                                                       transform_type=LinearTransType.lsq12,
                                                       file_resolution=resolution)

    nlin_component = get_nonlinear_component(options.staging.nlin.reg_method)

    # match each of the embryos individually
    for i in range(imgs_and_rough_volume.shape[0]):
        s.defer(match_embryo_to_4D_atlas(imgs_and_rough_volume.loc[i],
                                         time_points_in_4D_atlas,
                                         lsq6_conf=options.staging.lsq6,
                                         lsq12_conf=lsq12_conf,
                                         nlin_module=nlin_component,
                                         resolution=resolution,
                                         nlin_options=options.staging.nlin))


    return Result(stages=s, output=None)
Пример #2
0
def tamarack(imgs: pd.DataFrame, options):
    # columns of the input df: `img` : MincAtom, `timept` : number, ...
    # columns of the pride of models : 'timept' : number, 'model' : MincAtom
    s = Stages()

    # TODO some assertions that the pride_of_models, if provided, is correct, and that this is intended target type

    def group_options(options, timepoint):
        options = copy.deepcopy(options)

        if options.mbm.lsq6.target_type == TargetType.pride_of_models:
            options = copy.deepcopy(options)
            targets = get_closest_model_from_pride_of_models(
                pride_of_models_dict=get_pride_of_models_mapping(
                    pride_csv=options.mbm.lsq6.target_file,
                    output_dir=options.application.output_directory,
                    pipeline_name=options.application.pipeline_name),
                time_point=timepoint)

            options.mbm.lsq6 = options.mbm.lsq6.replace(
                target_type=TargetType.initial_model,
                target_file=targets.registration_standard.path)

        #    resolution = (options.registration.resolution
        #                  or get_resolution_from_file(targets.registration_standard.path))
        #    options.registration = options.registration.replace(resolution=resolution)

        # FIXME use of registration_standard here is quite wrong ...
        # part of the trouble is that mbm calls registration_targets itself,
        # so we can't send this RegistrationTargets to `mbm` directly ...
        # one option: add yet another optional arg to `mbm` ...
        else:
            targets = s.defer(
                registration_targets(lsq6_conf=options.mbm.lsq6,
                                     app_conf=options.application,
                                     reg_conf=options.registration,
                                     first_input_file=imgs.filename.iloc[0]))

        resolution = (options.registration.resolution
                      or get_resolution_from_file(
                          targets.registration_standard.path))

        # This must happen after calling registration_targets otherwise it will resample to options.registration.resolution
        options.registration = options.registration.replace(
            resolution=resolution)

        return options

    # build all first-level models:
    first_level_results = (
        imgs  # TODO 'group' => 'timept' ?
        .groupby('group', as_index=False
                 )  # the usual annoying pattern to do an aggregate with access
        .aggregate({'file': lambda files: list(files)}
                   )  # to the groupby object's keys ... TODO: fix
        .rename(columns={
            'file': "files"
        }).assign(options=lambda df: df.apply(
            axis=1, func=lambda row: group_options(options, row.group))
                  ).assign(build_model=lambda df: df.apply(
                      axis=1,
                      func=lambda row: s.defer(
                          mbm(imgs=row.files,
                              options=row.options,
                              prefix="%s" % row.group,
                              output_dir=os.path.join(
                                  options.application.output_directory, options
                                  .application.pipeline_name + "_first_level",
                                  "%s_processed" % row.group))))
                           ).sort_values(by='group'))

    if all(
            first_level_results.options.map(
                lambda opts: opts.registration.resolution) ==
            first_level_results.options.iloc[0].registration.resolution):
        options.registration = options.registration.replace(
            resolution=first_level_results.options.iloc[0].registration.
            resolution)
    else:
        raise ValueError(
            "some first-level models are run at different resolutions, possibly not what you want ..."
        )

    # construction of the overall inter-average transforms will be done iteratively (for efficiency/aesthetics),
    # which doesn't really fit the DataFrame mold ...

    full_hierarchy = get_nonlinear_configuration_from_options(
        nlin_protocol=options.mbm.nlin.nlin_protocol,
        reg_method=options.mbm.nlin.reg_method,
        file_resolution=options.registration.resolution)

    # FIXME no good can come of this
    nlin_protocol = full_hierarchy.confs[-1] if isinstance(
        full_hierarchy, MultilevelANTSConf) else full_hierarchy
    # first register consecutive averages together:
    average_registrations = (
        first_level_results[:-1].assign(
            next_model=list(first_level_results[1:].build_model))
        # TODO: we should be able to do lsq6 registration here as well!
        .assign(xfm=lambda df: df.apply(
            axis=1,
            func=lambda row: s.defer(
                lsq12_nlin(source=row.build_model.avg_img,
                           target=row.next_model.avg_img,
                           lsq12_conf=get_linear_configuration_from_options(
                               options.mbm.lsq12,
                               transform_type=LinearTransType.lsq12,
                               file_resolution=options.registration.resolution
                           ),
                           nlin_conf=nlin_protocol)))))

    # now compose the above transforms to produce transforms from each average to the common average:
    common_time_pt = options.tamarack.common_time_pt
    common_model = first_level_results[
        first_level_results.group ==
        common_time_pt].iloc[0].build_model.avg_img
    #common = average_registrations[average_registrations.group == common_time_pt].iloc[0]
    before = average_registrations[
        average_registrations.group <
        common_time_pt]  # asymmetry in before/after since
    after = average_registrations[
        average_registrations.group >=
        common_time_pt]  # we used `next_`, not `previous_`

    # compose 1st and 2nd level transforms and resample into the common average space:
    def suffixes(xs):
        if len(xs) == 0:
            return [[]]
        else:
            ys = suffixes(xs[1:])
            return [[xs[0]] + ys[0]] + ys

    def prefixes(xs):
        if len(xs) == 0:
            return [[]]
        else:
            ys = prefixes(xs[1:])
            return ys + [ys[-1] + [xs[0]]]

    xfms_to_common = (first_level_results.assign(
        uncomposed_xfms=suffixes(list(before.xfm))[:-1] + [None] +
        prefixes(list(after.xfm))[1:]).assign(
            xfm_to_common=lambda df: df.apply(
                axis=1,
                func=lambda row: ((lambda x: s.defer(invert_xfmhandler(
                    x)) if row.group >= common_time_pt else x)(s.defer(
                        concat_xfmhandlers(
                            row.uncomposed_xfms,
                            name=("%s_to_common" if row.group < common_time_pt
                                  else "%s_from_common") % row.group))))
                if row.uncomposed_xfms is not None else None)).drop(
                    'uncomposed_xfms', axis=1))  # TODO None => identity??

    # TODO indexing here is not good ...
    first_level_determinants = pd.concat(list(
        first_level_results.build_model.apply(
            lambda x: x.determinants.assign(first_level_avg=x.avg_img))),
                                         ignore_index=True)

    resampled_determinants = (pd.merge(
        left=first_level_determinants,
        right=xfms_to_common.assign(source=lambda df: df.xfm_to_common.apply(
            lambda x: x.source if x is not None else None)),
        left_on="first_level_avg",
        right_on='source').assign(
            resampled_log_full_det=lambda df: df.apply(
                axis=1,
                func=lambda row: s.defer(
                    mincresample_new(img=row.log_full_det,
                                     xfm=row.xfm_to_common.xfm,
                                     like=common_model))
                if row.xfm_to_common is not None else row.img),
            resampled_log_nlin_det=lambda df: df.apply(
                axis=1,
                func=lambda row: s.defer(
                    mincresample_new(img=row.log_nlin_det,
                                     xfm=row.xfm_to_common.xfm,
                                     like=common_model))
                if row.xfm_to_common is not None else row.img)))

    inverted_overall_xfms = pd.Series({
        xfm: (s.defer(concat_xfmhandlers([xfm, row.xfm_to_common]))
              if row.xfm_to_common is not None else xfm)
        for _ix, row in xfms_to_common.iterrows()
        for xfm in row.build_model.xfms.lsq12_nlin_xfm
    })

    overall_xfms = inverted_overall_xfms.apply(
        lambda x: s.defer(invert_xfmhandler(x)))

    overall_determinants = determinants_at_fwhms(
        xfms=overall_xfms,
        blur_fwhms=options.mbm.stats.stats_kernels,
        inv_xfms=inverted_overall_xfms)

    # TODO turn off bootstrap as with two-level code?

    # TODO combine into one data frame
    return Result(stages=s,
                  output=Namespace(
                      first_level_results=first_level_results,
                      overall_determinants=overall_determinants,
                      resampled_determinants=resampled_determinants.drop(
                          ['options'], axis=1)))
def tamarack(imgs : pd.DataFrame, options):
    # columns of the input df: `img` : MincAtom, `timept` : number, ...
    # columns of the pride of models : 'timept' : number, 'model' : MincAtom
    s = Stages()

    # TODO some assertions that the pride_of_models, if provided, is correct, and that this is intended target type

    def group_options(options, timepoint):
        options = copy.deepcopy(options)

        if options.mbm.lsq6.target_type == TargetType.pride_of_models:
            options = copy.deepcopy(options)
            targets = get_closest_model_from_pride_of_models(pride_of_models_dict=get_pride_of_models_mapping(
                                                                 pride_csv=options.mbm.lsq6.target_file,
                                                                 output_dir=options.application.output_directory,
                                                                 pipeline_name=options.application.pipeline_name),
                                                             time_point=timepoint)

            options.mbm.lsq6 = options.mbm.lsq6.replace(target_type=TargetType.initial_model,
                                                        target_file=targets.registration_standard.path)

        #    resolution = (options.registration.resolution
        #                  or get_resolution_from_file(targets.registration_standard.path))
        #    options.registration = options.registration.replace(resolution=resolution)

                                                        # FIXME use of registration_standard here is quite wrong ...
                                                        # part of the trouble is that mbm calls registration_targets itself,
                                                        # so we can't send this RegistrationTargets to `mbm` directly ...
                                                        # one option: add yet another optional arg to `mbm` ...
        else:
            targets = s.defer(registration_targets(lsq6_conf=options.mbm.lsq6,
                                           app_conf=options.application, reg_conf=options.registration,
                                           first_input_file=imgs.filename.iloc[0]))

        resolution = (options.registration.resolution or
                        get_resolution_from_file(targets.registration_standard.path))

        # This must happen after calling registration_targets otherwise it will resample to options.registration.resolution
        options.registration = options.registration.replace(resolution=resolution)

        return options

    # build all first-level models:
    first_level_results = (
        imgs  # TODO 'group' => 'timept' ?
        .groupby('group', as_index=False)       # the usual annoying pattern to do an aggregate with access
        .aggregate({ 'file' : lambda files: list(files) })  # to the groupby object's keys ... TODO: fix
        .rename(columns={ 'file' : "files" })
        .assign(options=lambda df: df.apply(axis=1, func=lambda row: group_options(options, row.group)))
        .assign(build_model=lambda df:
                              df.apply(axis=1,
                                       func=lambda row: s.defer(
                                           mbm(imgs=row.files,
                                               options=row.options,
                                               prefix="%s" % row.group,
                                               output_dir=os.path.join(
                                               options.application.output_directory,
                                               options.application.pipeline_name + "_first_level",
                                               "%s_processed" % row.group)))))
        .sort_values(by='group')

        )

    if all(first_level_results.options.map(lambda opts: opts.registration.resolution)
             == first_level_results.options.iloc[0].registration.resolution):
        options.registration = options.registration.replace(
            resolution=first_level_results.options.iloc[0].registration.resolution)
    else:
        raise ValueError("some first-level models are run at different resolutions, possibly not what you want ...")

    # construction of the overall inter-average transforms will be done iteratively (for efficiency/aesthetics),
    # which doesn't really fit the DataFrame mold ...


    full_hierarchy = get_nonlinear_configuration_from_options(
      nlin_protocol=options.mbm.nlin.nlin_protocol,
      reg_method=options.mbm.nlin.reg_method,
      file_resolution=options.registration.resolution)

    # FIXME no good can come of this
    nlin_protocol = full_hierarchy.confs[-1] if isinstance(full_hierarchy, MultilevelANTSConf) else full_hierarchy
    # first register consecutive averages together:
    average_registrations = (
        first_level_results[:-1]
            .assign(next_model=list(first_level_results[1:].build_model))
            # TODO: we should be able to do lsq6 registration here as well!
            .assign(xfm=lambda df: df.apply(axis=1, func=lambda row: s.defer(
                                                      lsq12_nlin(source=row.build_model.avg_img,
                                                                 target=row.next_model.avg_img,
                                                                 lsq12_conf=get_linear_configuration_from_options(
                                                                     options.mbm.lsq12,
                                                                     transform_type=LinearTransType.lsq12,
                                                                     file_resolution=options.registration.resolution),
                                                                 nlin_conf=nlin_protocol)))))

    # now compose the above transforms to produce transforms from each average to the common average:
    common_time_pt = options.tamarack.common_time_pt
    common_model   = first_level_results[first_level_results.group == common_time_pt].iloc[0].build_model.avg_img
    #common = average_registrations[average_registrations.group == common_time_pt].iloc[0]
    before = average_registrations[average_registrations.group <  common_time_pt]  # asymmetry in before/after since
    after  = average_registrations[average_registrations.group >= common_time_pt]  # we used `next_`, not `previous_`

    # compose 1st and 2nd level transforms and resample into the common average space:
    def suffixes(xs):
        if len(xs) == 0:
            return [[]]
        else:
            ys = suffixes(xs[1:])
            return [[xs[0]] + ys[0]] + ys


    def prefixes(xs):
        if len(xs) == 0:
            return [[]]
        else:
            ys = prefixes(xs[1:])
            return ys + [ys[-1] + [xs[0]]]

    xfms_to_common = (
        first_level_results
        .assign(uncomposed_xfms=suffixes(list(before.xfm))[:-1] + [None] + prefixes(list(after.xfm))[1:])
        .assign(xfm_to_common=lambda df: df.apply(axis=1, func=lambda row:
                                ((lambda x: s.defer(invert_xfmhandler(x)) if row.group >= common_time_pt else x)
                                   (s.defer(concat_xfmhandlers(row.uncomposed_xfms,
                                                               name=("%s_to_common"
                                                                     if row.group < common_time_pt
                                                                     else "%s_from_common") % row.group))))
                                  if row.uncomposed_xfms is not None else None))
        .drop('uncomposed_xfms', axis=1))  # TODO None => identity??

    # TODO indexing here is not good ...
    first_level_determinants = pd.concat(list(first_level_results.build_model.apply(
                                                lambda x: x.determinants.assign(first_level_avg=x.avg_img))),
                                         ignore_index=True)

    resampled_determinants = (
        pd.merge(left=first_level_determinants,
                 right=xfms_to_common.assign(source=lambda df: df.xfm_to_common.apply(
                                                              lambda x:
                                                                x.source if x is not None else None)),
                 left_on="first_level_avg", right_on='source')
        .assign(resampled_log_full_det=lambda df: df.apply(axis=1, func=lambda row:
                                         s.defer(mincresample_new(img=row.log_full_det,
                                                                  xfm=row.xfm_to_common.xfm,
                                                                  like=common_model))
                                                 if row.xfm_to_common is not None else row.img),
                resampled_log_nlin_det=lambda df: df.apply(axis=1, func=lambda row:
                                         s.defer(mincresample_new(img=row.log_nlin_det,
                                                                  xfm=row.xfm_to_common.xfm,
                                                                  like=common_model))
                                                 if row.xfm_to_common is not None else row.img))
    )

    inverted_overall_xfms = pd.Series({ xfm : (s.defer(concat_xfmhandlers([xfm, row.xfm_to_common]))
                                                 if row.xfm_to_common is not None else xfm)
                                        for _ix, row in xfms_to_common.iterrows()
                                        for xfm in row.build_model.xfms.lsq12_nlin_xfm })

    overall_xfms = inverted_overall_xfms.apply(lambda x: s.defer(invert_xfmhandler(x)))

    overall_determinants = determinants_at_fwhms(xfms=overall_xfms,
                                                 blur_fwhms=options.mbm.stats.stats_kernels,
                                                 inv_xfms=inverted_overall_xfms)


    # TODO turn off bootstrap as with two-level code?

    # TODO combine into one data frame
    return Result(stages=s, output=Namespace(first_level_results=first_level_results,
                                             overall_determinants=overall_determinants,
                                             resampled_determinants=resampled_determinants.drop(
                                                 ['options'],
                                                 axis=1)))
Пример #4
0
def mbm(imgs : List[MincAtom], options : MBMConf, prefix : str, output_dir : str = ""):

    # TODO could also allow pluggable pipeline parts e.g. LSQ6 could be substituted out for the modified LSQ6
    # for the kidney tips, etc...

    # TODO this is tedious and annoyingly similar to the registration chain ...
    lsq6_dir  = os.path.join(output_dir, prefix + "_lsq6")
    lsq12_dir = os.path.join(output_dir, prefix + "_lsq12")
    nlin_dir  = os.path.join(output_dir, prefix + "_nlin")

    s = Stages()

    if len(imgs) == 0:
        raise ValueError("Please, some files!")

    # FIXME: why do we have to call registration_targets *outside* of lsq6_nuc_inorm? is it just because of the extra
    # options required?  Also, shouldn't options.registration be a required input (as it contains `input_space`) ...?
    targets = registration_targets(lsq6_conf=options.mbm.lsq6,
                                   app_conf=options.application,
                                   first_input_file=imgs[0].path)

    # TODO this is quite tedious and duplicates stuff in the registration chain ...
    resolution = (options.registration.resolution or
                  get_resolution_from_file(targets.registration_standard.path))
    options.registration = options.registration.replace(resolution=resolution)

    # FIXME it probably makes most sense if the lsq6 module itself (even within lsq6_nuc_inorm) handles the run_lsq6
    # setting (via use of the identity transform) since then this doesn't have to be implemented for every pipeline
    if options.mbm.lsq6.run_lsq6:
        lsq6_result = s.defer(lsq6_nuc_inorm(imgs=imgs,
                                             resolution=resolution,
                                             registration_targets=targets,
                                             lsq6_dir=lsq6_dir,
                                             lsq6_options=options.mbm.lsq6))
    else:
        # TODO don't actually do this resampling if not required (i.e., if the imgs already have the same grids)
        identity_xfm = s.defer(param2xfm(out_xfm=FileAtom(name="identity.xfm")))
        lsq6_result  = [XfmHandler(source=img, target=img, xfm=identity_xfm,
                                   resampled=s.defer(mincresample_new(img=img,
                                                                      like=targets.registration_standard,
                                                                      xfm=identity_xfm)))
                        for img in imgs]
    # what about running nuc/inorm without a linear registration step??

    full_hierarchy = get_nonlinear_configuration_from_options(nlin_protocol=options.mbm.nlin.nlin_protocol,
                                                              reg_method=options.mbm.nlin.reg_method,
                                                              file_resolution=resolution)

    lsq12_nlin_result = s.defer(lsq12_nlin_build_model(imgs=[xfm.resampled for xfm in lsq6_result],
                                                       resolution=resolution,
                                                       lsq12_dir=lsq12_dir,
                                                       nlin_dir=nlin_dir,
                                                       nlin_prefix=prefix,
                                                       lsq12_conf=options.mbm.lsq12,
                                                       nlin_conf=full_hierarchy))

    inverted_xfms = [s.defer(invert_xfmhandler(xfm)) for xfm in lsq12_nlin_result.output]

    determinants = s.defer(determinants_at_fwhms(
                             xfms=inverted_xfms,
                             inv_xfms=lsq12_nlin_result.output,
                             blur_fwhms=options.mbm.stats.stats_kernels))

    overall_xfms = [s.defer(concat_xfmhandlers([rigid_xfm, lsq12_nlin_xfm]))
                    for rigid_xfm, lsq12_nlin_xfm in zip(lsq6_result, lsq12_nlin_result.output)]

    output_xfms = (pd.DataFrame({ "rigid_xfm"      : lsq6_result,  # maybe don't return this if LSQ6 not run??
                                  "lsq12_nlin_xfm" : lsq12_nlin_result.output,
                                  "overall_xfm"    : overall_xfms }))
    # we could `merge` the determinants with this table, but preserving information would cause lots of duplication
    # of the transforms (or storing determinants in more columns, but iterating over dynamically known columns
    # seems a bit odd ...)

                            # TODO transpose these fields?})
                            #avg_img=lsq12_nlin_result.avg_img,  # inconsistent w/ WithAvgImgs[...]-style outputs
                           # "determinants"    : determinants })

    #output.avg_img = lsq12_nlin_result.avg_img
    #output.determinants = determinants   # TODO temporary - remove once incorporated properly into `output` proper
    # TODO add more of lsq12_nlin_result?

    # FIXME: this needs to go outside of the `mbm` function to avoid being run from within other pipelines (or
    # those other pipelines need to turn off this option)
    # TODO return some MAGeT stuff from MBM function ??
    # if options.mbm.mbm.run_maget:
    #     import copy
    #     maget_options = copy.deepcopy(options)  #Namespace(maget=options)
    #     #maget_options
    #     #maget_options.maget = maget_options.mbm
    #     #maget_options.execution = options.execution
    #     #maget_options.application = options.application
    #     maget_options.maget = options.mbm.maget
    #     del maget_options.mbm
    #
    #     s.defer(maget([xfm.resampled for xfm in lsq6_result],
    #                   options=maget_options,
    #                   prefix="%s_MAGeT" % prefix,
    #                   output_dir=os.path.join(output_dir, prefix + "_processed")))

    # should also move outside `mbm` function ...
    #if options.mbm.thickness.run_thickness:
    #    if not options.mbm.segmentation.run_maget:
    #        warnings.warn("MAGeT files (atlases, protocols) are needed to run thickness calculation.")
    #    # run MAGeT to segment the nlin average:
    #    import copy
    #    maget_options = copy.deepcopy(options)  #Namespace(maget=options)
    #    maget_options.maget = options.mbm.maget
    #    del maget_options.mbm
    #    segmented_avg = s.defer(maget(imgs=[lsq12_nlin_result.avg_img],
    #                                  options=maget_options,
    #                                  output_dir=os.path.join(options.application.output_directory,
    #                                                          prefix + "_processed"),
    #                                  prefix="%s_thickness_MAGeT" % prefix)).ix[0].img
    #    thickness = s.defer(cortical_thickness(xfms=pd.Series(inverted_xfms), atlas=segmented_avg,
    #                                           label_mapping=FileAtom(options.mbm.thickness.label_mapping),
    #                                           atlas_fwhm=0.56, thickness_fwhm=0.56))  # TODO magic fwhms
    #    # TODO write CSV -- should `cortical_thickness` do this/return a table?


    # FIXME: this needs to go outside of the `mbm` function to avoid being run from within other pipelines (or
    # those other pipelines need to turn off this option)
    if options.mbm.common_space.do_common_space_registration:
        warnings.warn("This feature is experimental ...")
        if not options.mbm.common_space.common_space_model:
            raise ValueError("No common space template provided!")
        # TODO allow lsq6 registration as well ...
        common_space_model = MincAtom(options.mbm.common_space.common_space_model,
                                      pipeline_sub_dir=os.path.join(options.application.output_directory,
                                                         options.application.pipeline_name + "_processed"))
        # TODO allow different lsq12/nlin config params than the ones used in MBM ...
        # WEIRD ... see comment in lsq12_nlin code ...
        nlin_conf  = full_hierarchy.confs[-1] if isinstance(full_hierarchy, MultilevelMincANTSConf) else full_hierarchy
        # also weird that we need to call get_linear_configuration_from_options here ... ?
        lsq12_conf = get_linear_configuration_from_options(conf=options.mbm.lsq12,
                                                           transform_type=LinearTransType.lsq12,
                                                           file_resolution=resolution)
        xfm_to_common = s.defer(lsq12_nlin(source=lsq12_nlin_result.avg_img, target=common_space_model,
                                           lsq12_conf=lsq12_conf, nlin_conf=nlin_conf,
                                           resample_source=True))

        model_common = s.defer(mincresample_new(img=lsq12_nlin_result.avg_img,
                                                xfm=xfm_to_common.xfm, like=common_space_model,
                                                postfix="_common"))

        overall_xfms_common = [s.defer(concat_xfmhandlers([rigid_xfm, nlin_xfm, xfm_to_common]))
                               for rigid_xfm, nlin_xfm in zip(lsq6_result, lsq12_nlin_result.output)]

        xfms_common = [s.defer(concat_xfmhandlers([nlin_xfm, xfm_to_common]))
                       for nlin_xfm in lsq12_nlin_result.output]

        output_xfms = output_xfms.assign(xfm_common=xfms_common, overall_xfm_common=overall_xfms_common)

        log_nlin_det_common, log_full_det_common = [dets.map(lambda d:
                                                      s.defer(mincresample_new(
                                                        img=d,
                                                        xfm=xfm_to_common.xfm,
                                                        like=common_space_model,
                                                        postfix="_common",
                                                        extra_flags=("-keep_real_range",),
                                                        interpolation=Interpolation.nearest_neighbour)))
                                                    for dets in (determinants.log_nlin_det, determinants.log_full_det)]

        determinants = determinants.assign(log_nlin_det_common=log_nlin_det_common,
                                           log_full_det_common=log_full_det_common)

    output = Namespace(avg_img=lsq12_nlin_result.avg_img, xfms=output_xfms, determinants=determinants)

    if options.mbm.common_space.do_common_space_registration:
        output.model_common = model_common

    return Result(stages=s, output=output)
Пример #5
0
def maget(imgs: List[MincAtom],
          options,
          prefix,
          output_dir,
          build_model_xfms=None):
    # FIXME prefix, output_dir aren't used !!

    s = Stages()

    maget_options = options.maget.maget

    resolution = options.registration.resolution  # TODO or get_resolution_from_file(...) -- only if file always exists!

    pipeline_sub_dir = os.path.join(
        options.application.output_directory,
        options.application.pipeline_name + "_atlases")

    atlases = get_atlases(maget_options, pipeline_sub_dir)

    lsq12_conf = get_linear_configuration_from_options(
        options.maget.lsq12,
        transform_type=LinearTransType.lsq12,
        file_resolution=resolution)

    nlin_component = get_nonlinear_component(options.maget.nlin.reg_method)

    # TODO should this be here or outside `maget` call?
    #imgs = [s.defer(nlin_component.ToMinc.from_mnc(img)) for img in imgs]

    #nlin_hierarchy = get_nonlinear_configuration_from_options(options.maget.nlin.nlin_protocol,
    #                                                          next(iter(options.maget.nlin.flags_.nlin_protocol)),
    #                                                          reg_method=options.maget.nlin.reg_method,
    #                                                          file_resolution=resolution)

    if maget_options.mask or maget_options.mask_only:

        # this used to return alignments but doesn't currently do so
        masked_img = s.defer(
            maget_mask(
                imgs=imgs,
                maget_options=options.maget,
                atlases=atlases,
                pipeline_sub_dir=pipeline_sub_dir +
                "_masking",  # FIXME repeats all alignments!!!
                resolution=resolution))

        # now propagate only the masked form of the images and atlases:
        imgs = masked_img
        #atlases = masked_atlases  # TODO is this needed?

    if maget_options.mask_only:
        # register each input to each atlas, creating a mask
        return Result(
            stages=s,
            output=masked_img)  # TODO rename `alignments` to `registrations`??
    else:
        if maget_options.mask:
            del masked_img
        # this `del` is just to verify that we don't accidentally use this later, since these potentially
        # coarser alignments shouldn't be re-used (but if the protocols for masking and alignment are the same,
        # hash-consing will take care of things), just the masked images they create; can be removed later
        # if a sensible use is found

        # images with labels from atlases
        # N.B.: Even though we've already registered each image to each initial atlas, this happens again here,
        #       but using `nlin_hierarchy` instead of `masking_nlin_hierarchy` as options.
        #       This is not 'work-efficient' in the sense that this computation happens twice (although
        #       hopefully at greater precision the second time!), but the idea is to run a coarse initial
        #       registration to get a mask and then do a better registration with that mask (though I'm not
        #       sure exactly when this is faster than doing a single registration).
        #       This _can_ allow the overall computation to finish more rapidly
        #       (depending on the relative speed of the two alignment methods/parameters,
        #       number of atlases and other templates used, number of cores available, etc.).
        atlas_labelled_imgs = (
            pd.DataFrame({
                'img':
                img,
                'label_file':
                s.defer(  # can't use `label` in a pd.DataFrame index!
                    mincresample_new(
                        img=atlas.labels,
                        xfm=s.defer(
                            lsq12_nlin(
                                source=img,
                                target=atlas,
                                nlin_module=nlin_component,
                                lsq12_conf=lsq12_conf,
                                nlin_options=options.maget.nlin.nlin_protocol,
                                resolution=resolution,
                                #nlin_conf=nlin_hierarchy,
                                resample_source=False)).xfm,
                        like=img,
                        invert=True,
                        interpolation=Interpolation.nearest_neighbour,
                        extra_flags=('-keep_real_range', '-labels')))
            } for img in imgs for atlas in atlases))

        if maget_options.pairwise:

            def choose_new_templates(ts, n):
                # currently silly, but we might implement a smarter method ...
                # FIXME what if there aren't enough other imgs around?!  This silently goes weird ...
                return pd.Series(
                    ts[:n + 1]
                )  # n+1 instead of n: choose one more since we won't use image as its own template ...

            # FIXME: the --max-templates flag is ambiguously named ... should be --max-new-templates
            # (and just use all atlases)
            # TODO we could have a separate templates_csv (or --template-files f [f ...]) but you can just
            # run a separate MAGeT pipeline and
            #if maget_options.templates_csv:
            #    templates = pd.read_csv(maget_options.templates_csv).template
            #else:
            templates = pd.DataFrame({
                'template':
                choose_new_templates(ts=imgs,
                                     n=maget_options.max_templates -
                                     len(atlases))
            })
            # note these images are the masked ones if masking was done ...

            # the templates together with their (multiple) labels from the atlases (this merge just acts as a filter)
            labelled_templates = pd.merge(left=atlas_labelled_imgs,
                                          right=templates,
                                          left_on="img",
                                          right_on="template").drop('img',
                                                                    axis=1)

            # images with new labels from the templates
            imgs_and_templates = pd.merge(  #left=atlas_labelled_imgs,
                left=pd.DataFrame({
                    "img": imgs
                }).assign(fake=1),
                right=labelled_templates.assign(fake=1),
                on='fake')
            #left_on='img', right_on='template')  # TODO do select here instead of below?

            #if build_model_xfms is not None:
            #    # use path instead of full mincatom as key in case we're reading these in from a CSV:
            #    xfm_dict = { x.source.path : x.xfm for x in build_model_xfms }

            template_labelled_imgs = (
                imgs_and_templates.rename(
                    columns={'label_file': 'template_label_file'})
                # don't register template to itself, since otherwise atlases would vote on that template twice
                .loc[lambda df: df.index.map(lambda ix: df.img[ix].path != df.
                                             template[ix].path)].
                assign(label_file=lambda df: df.apply(
                    axis=1,
                    func=lambda row: s.defer(
                        # TODO switch to uses of nlin_component.whatever(...) in several places below?
                        mincresample_new(
                            #nlin_component.Algorithms.resample(
                            img=row.template_label_file,
                            xfm=s.defer(
                                lsq12_nlin(
                                    source=row.img,
                                    target=row.template,
                                    lsq12_conf=lsq12_conf,
                                    resolution=resolution,
                                    nlin_module=nlin_component,
                                    nlin_options=options.maget.nlin.
                                    nlin_protocol,
                                    #nlin_conf=nlin_hierarchy,
                                    resample_source=False)).xfm
                            if build_model_xfms is None
                            # use transforms from model building if we have them:
                            else s.defer(
                                xfmconcat(
                                    #nlin_component.Algorithms.concat(
                                    [
                                        build_model_xfms[row.img.path],
                                        s.defer(
                                            xfminvert(
                                                #nlin_component.Algorithms.invert(
                                                build_model_xfms[row.template.
                                                                 path],
                                                subdir="tmp"))
                                    ])),
                            like=row.img,
                            invert=True,
                            #use_nn_interpolation=True
                            interpolation=Interpolation.nearest_neighbour,
                            extra_flags=('-keep_real_range', '-labels')))))
            ) if len(imgs) > 1 else pd.DataFrame({
                'img': [],
                'label_file': []
            })
            # ... as no distinct templates to align if only one image supplied (#320)

            imgs_with_all_labels = pd.concat([
                atlas_labelled_imgs[['img', 'label_file']],
                template_labelled_imgs[['img', 'label_file']]
            ],
                                             ignore_index=True)
        else:
            imgs_with_all_labels = atlas_labelled_imgs

        #imgs_with_all_labels = imgs_with_all_labels.applymap(
        #    lambda x: s.defer(nlin_component.ToMinc.to_mnc(x)))
        segmented_imgs = (imgs_with_all_labels.groupby('img').aggregate({
            'label_file':
            lambda resampled_label_files: list(resampled_label_files)
        }).rename(columns={
            'label_file': 'label_files'
        }).reset_index().assign(voted_labels=lambda df: df.apply(
            axis=1,
            func=lambda row: s.defer(
                voxel_vote(label_files=row.label_files,
                           output_dir=os.path.join(row.img.pipeline_sub_dir,
                                                   row.img.output_sub_dir),
                           name=row.img.filename_wo_ext + "_voted"))
        )).apply(axis=1,
                 func=lambda row: row.img._replace(labels=row.voted_labels)))

        return Result(stages=s, output=segmented_imgs)
Пример #6
0
def maget_mask(imgs: List[MincAtom],
               maget_options,
               resolution: float,
               pipeline_sub_dir: str,
               atlases=None):

    s = Stages()

    original_imgs = imgs
    imgs = copy.deepcopy(imgs)
    original_imgs = pd.Series(original_imgs,
                              index=[img.path for img in original_imgs])
    for img in imgs:
        img.output_sub_dir = os.path.join(img.output_sub_dir, "masking")

    # TODO dereference maget_options -> maget_options.maget outside maget_mask call?
    if atlases is None:
        atlases = get_atlases(maget_options.maget,
                              pipeline_sub_dir=pipeline_sub_dir)

    lsq12_conf = get_linear_configuration_from_options(maget_options.lsq12,
                                                       LinearTransType.lsq12,
                                                       resolution)

    #nlin_module = get_nonlinear_component(reg_method=options.mbm.nlin.reg_method)

    #masking_nlin_hierarchy = get_nonlinear_configuration_from_options(maget_options.maget.masking_nlin_protocol,
    #                                                                  next(iter(maget_options.maget.flags_.masking_nlin_protocol)),
    #                                                                  maget_options.maget.mask_method,
    #                                                                  resolution)

    masking_nlin_component = get_nonlinear_component(
        reg_method=maget_options.maget.mask_method)
    algorithms = masking_nlin_component.Algorithms
    #masking_nlin_conf = (masking_nlin_component.parse_protocol_file(
    #                       maget_options.maget.masking_nlin_protocol, resolution=resolution)
    #                     if maget_options.maget.masking_nlin_protocol is not None
    #                     else masking_nlin_component.get_default_conf(resolution=resolution))

    # TODO lift outside then delete
    #masking_imgs = copy.deepcopy(imgs)
    #for img in masking_imgs:
    #    img.pipeline_sub_dir = os.path.join(img.pipeline_sub_dir, "masking")

    masking_alignments = pd.DataFrame({
        'img':
        img,
        'atlas':
        atlas,
        'xfm':
        s.defer(
            lsq12_nlin(
                source=img,
                target=atlas,
                lsq12_conf=lsq12_conf,
                nlin_options=maget_options.maget.masking_nlin_protocol,
                #masking_nlin_conf,
                resolution=resolution,
                nlin_module=masking_nlin_component,
                resample_source=False))
    } for img in imgs for atlas in atlases)

    # propagate a mask to each image using the above `alignments` as follows:
    # - for each image, voxel_vote on the masks propagated to that image to get a suitable mask
    # - run mincmath -clobber -mult <img> <voted_mask> to apply the mask to the files
    masked_img = (
        masking_alignments.assign(resampled_mask=lambda df: df.apply(
            axis=1,
            func=lambda row: s.defer(
                algorithms.resample(
                    img=row.atlas.mask,  #apply(lambda x: x.mask),
                    xfm=row.xfm.xfm,  #apply(lambda x: x.xfm),
                    like=row.img,
                    invert=True,
                    #interpolation=Interpolation.nearest_neighbour,
                    postfix="-input-mask",
                    subdir="tmp",
                    # TODO annoying hack; fix mincresample(_mask) ...:
                    #new_name_wo_ext=df.apply(lambda row:
                    #    "%s_to_%s-input-mask" % (row.atlas.filename_wo_ext,
                    #                             row.img.filename_wo_ext),
                    #    axis=1),
                    use_nn_interpolation=True)))).groupby('img',
                                                          as_index=False).
        aggregate({
            'resampled_mask': lambda masks: list(masks)
        }).rename(columns={
            "resampled_mask": "resampled_masks"
        }).assign(voted_mask=lambda df: df.apply(
            axis=1,
            func=lambda row:
            # FIXME cannot use mincmath here !!!
            s.defer(
                mincmath(op="max",
                         vols=sorted(row.resampled_masks),
                         new_name="%s_max_mask" % row.img.filename_wo_ext,
                         subdir="tmp")))).apply(axis=1,
                                                func=lambda row: row.img.
                                                _replace(mask=row.voted_mask)))

    # resample the atlas images back to the input images:
    # (note: this doesn't modify `masking_alignments`, but only stages additional outputs)
    masking_alignments.assign(resampled_img=lambda df: df.apply(
        axis=1,
        func=lambda row: s.defer(
            algorithms.resample(
                img=row.atlas,
                xfm=row.xfm.xfm,  #.apply(lambda x: x.xfm),
                subdir="tmp",
                # TODO delete this stupid hack:
                #new_name_wo_ext=df.apply(lambda row:
                #  "%s_to_%s-resampled" % (row.atlas.filename_wo_ext,
                #                          row.img.filename_wo_ext),
                #                          axis=1),
                like=row.img,
                invert=True))))

    for img in masked_img:
        img.output_sub_dir = original_imgs.loc[img.path].output_sub_dir

    return Result(stages=s, output=masked_img)
Пример #7
0
def common_space(mbm_result, options):
    s = Stages()

    # TODO: the interface of this function (basically a destructive 'id' function) is horrific
    # TODO: instead, copy the mbm_result here ??

    if not options.mbm.common_space.common_space_model:
        raise ValueError("No common space template provided!")
    if not options.mbm.common_space.common_space_mask:
        warnings.warn(
            "No common space mask provided ... might be OK if your consensus average mask is OK"
        )
    # TODO allow lsq6 registration as well ...
    common_space_model = MincAtom(
        options.mbm.common_space.common_space_model,
        # TODO fix the subdirectories!
        mask=MincAtom(options.mbm.common_space.common_space_mask,
                      pipeline_sub_dir=os.path.join(
                          options.application.output_directory,
                          options.application.pipeline_name + "_processed"))
        if options.mbm.common_space.common_space_mask else None,
        pipeline_sub_dir=os.path.join(
            options.application.output_directory,
            options.application.pipeline_name + "_processed"))

    # TODO allow different lsq12/nlin config params than the ones used in MBM ...
    # full_hierarchy = get_nonlinear_configuration_from_options(nlin_protocol=options.mbm.nlin.nlin_protocol,
    #                                                          reg_method=options.mbm.nlin.reg_method,
    #                                                          file_resolution=options.registration.resolution)
    # WEIRD ... see comment in lsq12_nlin code ...
    # nlin_conf  = full_hierarchy.confs[-1] if isinstance(full_hierarchy, MultilevelANTSConf) else full_hierarchy
    # also weird that we need to call get_linear_configuration_from_options here ... ?
    #    nlin_build_model_component = model_building_with_initial_target_generation(
    #                                   final_model_building_component=nlin_build_model_component,
    #                                   prelim_model_building_component=prelim_nlin_build_model_component)

    # TODO don't use name 'x_module' for something that's technically not a module ... perhaps unit/component?
    nlin_component = get_nonlinear_component(
        reg_method=options.mbm.nlin.reg_method)

    lsq12_conf = get_linear_configuration_from_options(
        conf=options.mbm.lsq12,
        transform_type=LinearTransType.lsq12,
        file_resolution=options.registration.resolution)
    # N.B.: options.registration.resolution has been *updated* correctly by mbm( ). sigh ...
    model_to_common = s.defer(
        lsq12_nlin(
            source=mbm_result.avg_img,
            target=common_space_model,
            lsq12_conf=lsq12_conf,
            nlin_module=nlin_component,
            resolution=options.registration.resolution,
            nlin_options=options.mbm.nlin.nlin_protocol,  # =nlin_conf,
            resample_source=True))

    model_common = s.defer(
        mincresample_new(img=mbm_result.avg_img,
                         xfm=model_to_common.xfm,
                         like=common_space_model,
                         postfix="_common"))

    overall_xfms_to_common = [
        s.defer(concat_xfmhandlers([rigid_xfm, nlin_xfm, model_to_common]))
        for rigid_xfm, nlin_xfm in zip(mbm_result.xfms.rigid_xfm,
                                       mbm_result.xfms.lsq12_nlin_xfm)
    ]

    overall_xfms_to_common_inv = [
        s.defer(invert_xfmhandler(xfmhandler)) for xfmhandler in [
            s.defer(concat_xfmhandlers([rigid_xfm, nlin_xfm, model_to_common]))
            for rigid_xfm, nlin_xfm in zip(mbm_result.xfms.rigid_xfm,
                                           mbm_result.xfms.lsq12_nlin_xfm)
        ]
    ]

    xfms_to_common = [
        s.defer(concat_xfmhandlers([nlin_xfm, model_to_common]))
        for nlin_xfm in mbm_result.xfms.lsq12_nlin_xfm
    ]

    mbm_result.xfms = mbm_result.xfms.assign(
        xfm_to_common=xfms_to_common,
        overall_xfm_to_common=overall_xfms_to_common)

    if options.mbm.stats.calc_stats:
        log_nlin_det_common, log_full_det_common = ([
            dets.map(lambda d: s.defer(
                mincresample_new(img=d,
                                 xfm=model_to_common.xfm,
                                 like=common_space_model,
                                 postfix="_common")))
            for dets in (mbm_result.determinants.log_nlin_det,
                         mbm_result.determinants.log_full_det)
        ])

        overall_determinants = s.defer(
            determinants_at_fwhms(xfms=overall_xfms_to_common_inv,
                                  blur_fwhms=options.mbm.stats.stats_kernels))

        mbm_result.determinants = \
            mbm_result.determinants.assign(log_nlin_det_common=log_nlin_det_common,
                                           log_full_det_common=log_full_det_common,
                                           log_nlin_overall_det_common=overall_determinants.log_nlin_det,
                                           log_full_overall_det_common=overall_determinants.log_full_det
                                           )

    mbm_result.model_common = model_common

    return Result(stages=s, output=mbm_result)
Пример #8
0
def tissue_vision_pipeline(options):
    output_dir = options.application.output_directory
    pipeline_name = options.application.pipeline_name

    csv = original_csv = get_imgs(options.application)
    # check_MINC_input_files([img.path for img in imgs])

    s = Stages()

    s.defer(create_quality_control_images(imgs=csv['anatomical_MincAtom'].tolist(), montage_dir=output_dir,
                                          montage_output=os.path.join(output_dir, pipeline_name + "_resampled",
                                                                      "input_montage"),
                                          auto_range=True,
                                          message="input_mincs"))
#############################
# Step 1: Run MBM.py to create a consensus average
#############################
    mbm_result = s.defer(mbm(imgs=csv['anatomical_MincAtom'].tolist(), options=options,
                             prefix=options.application.pipeline_name,
                             output_dir=output_dir,
                             with_maget=False))

    #TODO remove
    transforms = mbm_result.xfms.assign(native_file=lambda df: df.rigid_xfm.apply(lambda x: x.source),
                            lsq6_file=lambda df: df.lsq12_nlin_xfm.apply(lambda x: x.source),
                            lsq6_mask_file=lambda df:
                              df.lsq12_nlin_xfm.apply(lambda x: x.source.mask if x.source.mask else ""),
                            nlin_file=lambda df: df.lsq12_nlin_xfm.apply(lambda x: x.resampled),
                            nlin_mask_file=lambda df:
                              df.lsq12_nlin_xfm.apply(lambda x: x.resampled.mask if x.resampled.mask else ""))\
        .applymap(maybe_deref_path)
    determinants = mbm_result.determinants.drop(["full_det", "nlin_det"], axis=1)\
        .applymap(maybe_deref_path)

    csv = csv.assign(anatomical_lsq6_MincAtom=mbm_result.xfms.lsq12_nlin_xfm.apply(lambda xfm: xfm.source),
                     mbm_lsq6_XfmAtom=mbm_result.xfms.rigid_xfm.apply(lambda x: x.xfm),
                     mbm_lsq12_nlin_XfmAtom=mbm_result.xfms.lsq12_nlin_xfm.apply(lambda x: x.xfm),
                     mbm_full_XfmAtom=mbm_result.xfms.overall_xfm.apply(lambda x: x.xfm))

    # x.assign(count_lsq6_MincAtom=lambda df: [x + y for x, y in zip(df["x"], df["y"])])
    csv = csv.assign(count_lsq6_MincAtom = lambda df:
    [s.defer(mincresample_new(img = img,
                              xfm = xfm,
                              like = like))
     for img, xfm, like in zip(df["count_MincAtom"],
                               df["mbm_lsq6_XfmAtom"],
                               df["anatomical_lsq6_MincAtom"])])


#############################
# Step 2: Register consensus average to ABI tissuevision Atlas
#############################
    lsq12_conf = get_linear_configuration_from_options(conf=options.mbm.lsq12,
                                                       transform_type=LinearTransType.lsq12,
                                                       file_resolution=options.registration.resolution)
    nlin_component = get_nonlinear_component(reg_method=options.mbm.nlin.reg_method)

    atlas_target = MincAtom(name=options.consensus_to_atlas.atlas_target,
                            orig_name=options.consensus_to_atlas.atlas_target,
                            mask=MincAtom(name=options.consensus_to_atlas.atlas_target_mask,
                                          orig_name=options.consensus_to_atlas.atlas_target_mask))
    atlas_target_label = MincAtom(name=options.consensus_to_atlas.atlas_target_label,
                                  orig_name=options.consensus_to_atlas.atlas_target_label,
                                  mask=MincAtom(name=options.consensus_to_atlas.atlas_target_mask,
                                                orig_name=options.consensus_to_atlas.atlas_target_mask))

    lsq12_nlin_result = s.defer(lsq12_nlin(source=mbm_result.avg_img,
                                           target=atlas_target,
                                           lsq12_conf=lsq12_conf,
                                           nlin_module=nlin_component,
                                           nlin_options=options.mbm.nlin.nlin_protocol,
                                           resolution=options.registration.resolution,
                                           resample_source=False
                                           ))

#############################
# Step 3: Resample count volumes to ABI tissuevision Atlas space and vice versa
#############################

    csv = csv.assign(lsq6_to_atlas_XfmAtom = lambda df: df['mbm_lsq12_nlin_XfmAtom'].apply(lambda xfm:
                            s.defer(xfmconcat([xfm, lsq12_nlin_result.xfm]))))

    csv = csv.assign(
        anatomical_targetspace_MincAtom=lambda df:
        [s.defer(mincresample_new(img=img, xfm=xfm, like=atlas_target))
         for img, xfm in zip(df["anatomical_lsq6_MincAtom"], df["lsq6_to_atlas_XfmAtom"])],
        count_targetspace_MincAtom=lambda df:
        [s.defer(mincresample_new(img=img, xfm=xfm, like=atlas_target))
         for img, xfm in zip(df["count_lsq6_MincAtom"], df["lsq6_to_atlas_XfmAtom"])],
        atlas_lsq6space_MincAtom=lambda df:
        [s.defer(mincresample_new(img=atlas_target_label, xfm=xfm, like=like, invert=True,
                                  interpolation=Interpolation.nearest_neighbour,
                                  extra_flags=('-keep_real_range',)))
         for xfm, like in zip( df["lsq6_to_atlas_XfmAtom"], df["count_lsq6_MincAtom"])]
    )

    csv.applymap(maybe_deref_path).to_csv("analysis.csv",index=False)

    s.defer(create_quality_control_images(imgs=csv.count_targetspace_MincAtom.tolist(), montage_dir=output_dir,
                                          montage_output=os.path.join(output_dir, pipeline_name + "_resampled",
                                                                      "count_montage"),
                                          auto_range=True,
                                          message="count_mincs"))
    return Result(stages=s, output=())
Пример #9
0
def maget_mask(imgs : List[MincAtom], atlases, options):

    s = Stages()

    resample  = np.vectorize(mincresample_new, excluded={"extra_flags"})
    defer     = np.vectorize(s.defer)

    lsq12_conf = get_linear_configuration_from_options(options.maget.lsq12,
                                                       LinearTransType.lsq12,
                                                       options.registration.resolution)

    masking_nlin_hierarchy = get_nonlinear_configuration_from_options(options.maget.maget.masking_nlin_protocol,
                                                                      options.maget.maget.mask_method,
                                                                      options.registration.resolution)

    masking_alignments = pd.DataFrame({ 'img'   : img,
                                        'atlas' : atlas,
                                        'xfm'   : s.defer(lsq12_nlin(source=img, target=atlas,
                                                                     lsq12_conf=lsq12_conf,
                                                                     nlin_conf=masking_nlin_hierarchy,
                                                                     resample_source=False))}
                                      for img in imgs for atlas in atlases)
    # propagate a mask to each image using the above `alignments` as follows:
    # - for each image, voxel_vote on the masks propagated to that image to get a suitable mask
    # - run mincmath -clobber -mult <img> <voted_mask> to apply the mask to the files
    masked_img = (
        masking_alignments
        .assign(resampled_mask=lambda df: defer(resample(img=df.atlas.apply(lambda x: x.mask),
                                                         xfm=df.xfm.apply(lambda x: x.xfm),
                                                         like=df.img,
                                                         invert=True,
                                                         interpolation=Interpolation.nearest_neighbour,
                                                         postfix="-input-mask",
                                                         subdir="tmp",
                                                         # TODO annoying hack; fix mincresample(_mask) ...:
                                                         #new_name_wo_ext=df.apply(lambda row:
                                                         #    "%s_to_%s-input-mask" % (row.atlas.filename_wo_ext,
                                                         #                             row.img.filename_wo_ext),
                                                         #    axis=1),
                                                         extra_flags=("-keep_real_range",))))
        .groupby('img', sort=False, as_index=False)
        # sort=False: just for speed (might also need to implement more comparison methods on `MincAtom`s)
        .aggregate({'resampled_mask' : lambda masks: list(masks)})
        .rename(columns={"resampled_mask" : "resampled_masks"})
        .assign(voted_mask=lambda df: df.apply(axis=1,
                                               func=lambda row:
                                                 s.defer(voxel_vote(label_files=row.resampled_masks,
                                                                    name="%s_voted_mask" % row.img.filename_wo_ext,
                                                                    output_dir=os.path.join(row.img.output_sub_dir,
                                                                                            "tmp")))))
        .assign(masked_img=lambda df:
          df.apply(axis=1,
                 func=lambda row:
                   s.defer(mincmath(op="mult",
                                    # img must precede mask here
                                    # for output image range to be correct:
                                    vols=[row.img, row.voted_mask],
                                    new_name="%s_masked" % row.img.filename_wo_ext,
                                    subdir="resampled")))))  #['img']

    # resample the atlas images back to the input images:
    # (note: this doesn't modify `masking_alignments`, but only stages additional outputs)
    masking_alignments.assign(resampled_img=lambda df:
    defer(resample(img=df.atlas,
                   xfm=df.xfm.apply(lambda x: x.xfm),
                   subdir="tmp",
                   # TODO delete this stupid hack:
                   #new_name_wo_ext=df.apply(lambda row:
                   #  "%s_to_%s-resampled" % (row.atlas.filename_wo_ext,
                   #                          row.img.filename_wo_ext),
                   #                          axis=1),
                   like=df.img, invert=True)))

    # replace the table of alignments with a new one with masked images
    masking_alignments = (pd.merge(left=masking_alignments.assign(unmasked_img=lambda df: df.img),
                                   right=masked_img,
                                   on=["img"], how="right", sort=False)
                          .assign(img=lambda df: df.masked_img))

    return Result(stages=s, output=masking_alignments)
Пример #10
0
def maget(imgs : List[MincAtom], options, prefix, output_dir):     # FIXME prefix, output_dir aren't used !!

    s = Stages()

    maget_options = options.maget.maget

    pipeline_sub_dir = os.path.join(options.application.output_directory,
                                    options.application.pipeline_name + "_atlases")

    if maget_options.atlas_lib is None:
        raise ValueError("Need some atlases ...")

    #atlas_dir = os.path.join(output_dir, "input_atlases") ???

    # TODO should alternately accept a CSV file ...
    atlas_library = read_atlas_dir(atlas_lib=maget_options.atlas_lib, pipeline_sub_dir=pipeline_sub_dir)

    if len(atlas_library) == 0:
        raise ValueError("No atlases found in specified directory '%s' ..." % options.maget.maget.atlas_lib)

    num_atlases_needed = min(maget_options.max_templates, len(atlas_library))
    # TODO arbitrary; could choose atlases better ...
    atlases = atlas_library[:num_atlases_needed]
    # TODO issue a warning if not all atlases used or if more atlases requested than available?
    # TODO also, doesn't slicing with a higher number (i.e., if max_templates > n) go to the end of the list anyway?

    lsq12_conf = get_linear_configuration_from_options(options.maget.lsq12,
                                                       LinearTransType.lsq12,
                                                       options.registration.resolution)

    masking_nlin_hierarchy = get_nonlinear_configuration_from_options(options.maget.maget.masking_nlin_protocol,
                                                                      options.maget.maget.mask_method,
                                                                      options.registration.resolution)

    nlin_hierarchy = get_nonlinear_configuration_from_options(options.maget.nlin.nlin_protocol,
                                                              options.maget.nlin.reg_method,
                                                              options.registration.resolution)

    resample  = np.vectorize(mincresample_new, excluded={"extra_flags"})
    defer     = np.vectorize(s.defer)

    # plan the basic registrations between all image-atlas pairs; store the result paths in a table
    masking_alignments = pd.DataFrame({ 'img'   : img,
                                        'atlas' : atlas,
                                        'xfm'   : s.defer(lsq12_nlin(source=img, target=atlas,
                                                                     lsq12_conf=lsq12_conf,
                                                                     nlin_conf=masking_nlin_hierarchy,
                                                                     resample_source=False))}
                                      for img in imgs for atlas in atlases)

    if maget_options.mask or maget_options.mask_only:

        masking_alignments = s.defer(maget_mask(imgs, atlases, options))

        masked_atlases = atlases.apply(lambda atlas:
                           s.defer(mincmath(op='mult', vols=[atlas, atlas.mask], subdir="resampled",
                                            new_name="%s_masked" % atlas.filename_wo_ext)))

        # now propagate only the masked form of the images and atlases:
        imgs    = masking_alignments.img
        atlases = masked_atlases  # TODO is this needed?

    if maget_options.mask_only:
        # register each input to each atlas, creating a mask
        return Result(stages=s, output=masking_alignments)   # TODO rename `alignments` to `registrations`??
    else:
        del masking_alignments
        # this `del` is just to verify that we don't accidentally use this later, since my intent is that these
        # coarser alignments shouldn't be re-used, just the masked images they create; can be removed later
        # if a sensible use is found

        if maget_options.pairwise:

            def choose_new_templates(ts, n):
                # currently silly, but we might implement a smarter method ...
                # FIXME what if there aren't enough other imgs around?!  This silently goes weird ...
                return ts[:n+1]  # n+1 instead of n: choose one more since we won't use image as its own template ...

            new_templates = choose_new_templates(ts=imgs, n=maget_options.max_templates)
            # note these images are the masked ones if masking was done ...

            # TODO write a function to do these alignments and the image->atlas one above
            # align the new templates chosen from the images to the initial atlases:
            new_template_to_atlas_alignments = (
                pd.DataFrame({ 'img'   : template,
                               'atlas' : atlas,
                               'xfm'   : s.defer(lsq12_nlin(source=template, target=atlas,
                                                            lsq12_conf=lsq12_conf,
                                                            nlin_conf=nlin_hierarchy,
                                                            resample_source=False))}
                             for template in new_templates for atlas in atlases))
                             # ... and these atlases are multiplied by their masks (but is this necessary?)

            # label the new templates from resampling the atlas labels onto them:
            # TODO now vote on the labels to be used for the new templates ...
            # TODO extract into procedure?
            new_templates_labelled = (
                new_template_to_atlas_alignments
                .assign(resampled_labels=lambda df: defer(
                                               resample(img=df.atlas.apply(lambda x: x.labels),
                                                                      xfm=df.xfm.apply(lambda x: x.xfm),
                                                                      interpolation=Interpolation.nearest_neighbour,
                                                                      extra_flags=("-keep_real_range",),
                                                                      like=df.img, invert=True)))
                .groupby('img', sort=False, as_index=False)
                .aggregate({'resampled_labels' : lambda labels: list(labels)})
                .assign(voted_labels=lambda df: df.apply(axis=1,
                                                         func=lambda row:
                                                           s.defer(voxel_vote(label_files=row.resampled_labels,
                                                                              name="%s_template_labels" %
                                                                                   row.img.filename_wo_ext,
                                                                              output_dir=os.path.join(
                                                                                  row.img.pipeline_sub_dir,
                                                                                  row.img.output_sub_dir,
                                                                                  "labels"))))))

            # TODO write a procedure for this assign-groupby-aggregate-rename...
            # FIXME should be in above algebraic manipulation but MincAtoms don't support flexible immutable updating
            for row in pd.merge(left=new_template_to_atlas_alignments, right=new_templates_labelled,
                                on=["img"], how="right", sort=False).itertuples():
                row.img.labels = s.defer(mincresample_new(img=row.voted_labels, xfm=row.xfm.xfm, like=row.img,
                                                          invert=True, interpolation=Interpolation.nearest_neighbour,
                                                          #postfix="-input-labels",
                                                          # this makes names really long ...:
                                                          # TODO this doesn't work for running MAGeT on the nlin avg:
                                                          #new_name_wo_ext="%s_on_%s" %
                                                          #                (row.voted_labels.filename_wo_ext,
                                                          #                 row.img.filename_wo_ext),
                                                          #postfix="_labels_via_%s" % row.xfm.xfm.filename_wo_ext,
                                                          new_name_wo_ext="%s_via_%s" % (row.voted_labels.filename_wo_ext,
                                                                                         row.xfm.xfm.filename_wo_ext),
                                                          extra_flags=("-keep_real_range",)))

            # now that the new templates have been labelled, combine with the atlases:
            # FIXME use the masked atlases created earlier ??
            all_templates = pd.concat([new_templates_labelled.img, atlases], ignore_index=True)

            # now take union of the resampled labels from the new templates with labels from the original atlases:
            #all_alignments = pd.concat([image_to_template_alignments,
            #                            alignments.rename(columns={ "atlas" : "template" })],
            #                           ignore_index=True, join="inner")

        else:
            all_templates = atlases

        # now register each input to each selected template
        # N.B.: Even though we've already registered each image to each initial atlas, this happens again here,
        #       but using `nlin_hierarchy` instead of `masking_nlin_hierarchy` as options.
        #       This is not 'work-efficient' in the sense that this computation happens twice (although
        #       hopefully at greater precision the second time!), but the idea is to run a coarse initial
        #       registration to get a mask and then do a better registration with that mask (though I'm not
        #       sure exactly when this is faster than doing a single registration).
        #       This _can_ allow the overall computation to finish more rapidly
        #       (depending on the relative speed of the two alignment methods/parameters,
        #       number of atlases and other templates used, number of cores available, etc.).
        image_to_template_alignments = (
            pd.DataFrame({ "img"      : img,
                           "template" : template_img,
                           "xfm"      : xfm }
                         for img in imgs      # TODO use the masked imgs here?
                         for template_img in
                             all_templates
                             # FIXME delete this one alignment
                             #labelled_templates[labelled_templates.img != img]
                             # since equality is equality of filepaths (a bit dangerous)
                             # TODO is there a more direct/faster way just to delete the template?
                         for xfm in [s.defer(lsq12_nlin(source=img, target=template_img,
                                                        lsq12_conf=lsq12_conf,
                                                        nlin_conf=nlin_hierarchy))]
                         )
        )

        # now do a voxel_vote on all resampled template labels, just as earlier with the masks
        voted = (image_to_template_alignments
                 .assign(resampled_labels=lambda df:
                                            defer(resample(img=df.template.apply(lambda x: x.labels),
                                                           # FIXME bug: at this point templates from template_alignments
                                                           # don't have associated labels (i.e., `None`s) -- fatal
                                                           xfm=df.xfm.apply(lambda x: x.xfm),
                                                           interpolation=Interpolation.nearest_neighbour,
                                                           extra_flags=("-keep_real_range",),
                                                           like=df.img, invert=True)))
                 .groupby('img', sort=False)
                 # TODO the pattern groupby-aggregate(lambda x: list(x))-reset_index-assign is basically a hack
                 # to do a groupby-assign with access to the group name;
                 # see http://stackoverflow.com/a/30224447/849272 for a better solution
                 # (note this pattern occurs several times in MAGeT and two-level code)
                 .aggregate({'resampled_labels' : lambda labels: list(labels)})
                 .reset_index()
                 .assign(voted_labels=lambda df: defer(np.vectorize(voxel_vote)(label_files=df.resampled_labels,
                                                                                output_dir=df.img.apply(
                                                                                    lambda x: os.path.join(
                                                                                        x.pipeline_sub_dir,
                                                                                        x.output_sub_dir))))))

        # TODO doing mincresample -invert separately for the img->atlas xfm for mask, labels is silly
        # (when Pydpiper's `mincresample` does both automatically)?

        # blargh, another destructive update ...
        for row in voted.itertuples():
            row.img.labels = row.voted_labels

        # returning voted_labels as a column is slightly redundant, but possibly useful ...
        return Result(stages=s, output=voted)  # voted.drop("voted_labels", axis=1))
Пример #11
0
def maget(imgs : List[MincAtom], options, prefix, output_dir, build_model_xfms=None):
    # FIXME prefix, output_dir aren't used !!

    s = Stages()

    maget_options = options.maget.maget

    resolution = options.registration.resolution  # TODO or get_resolution_from_file(...) -- only if file always exists!

    pipeline_sub_dir = os.path.join(options.application.output_directory,
                                    options.application.pipeline_name + "_atlases")

    atlases = get_atlases(maget_options, pipeline_sub_dir)

    lsq12_conf = get_linear_configuration_from_options(options.maget.lsq12,
                                                       transform_type=LinearTransType.lsq12,
                                                       file_resolution=resolution)

    nlin_component = get_nonlinear_component(options.maget.nlin.reg_method)

    # TODO should this be here or outside `maget` call?
    #imgs = [s.defer(nlin_component.ToMinc.from_mnc(img)) for img in imgs]

    #nlin_hierarchy = get_nonlinear_configuration_from_options(options.maget.nlin.nlin_protocol,
    #                                                          next(iter(options.maget.nlin.flags_.nlin_protocol)),
    #                                                          reg_method=options.maget.nlin.reg_method,
    #                                                          file_resolution=resolution)

    if maget_options.mask or maget_options.mask_only:

        # this used to return alignments but doesn't currently do so
        masked_img = s.defer(maget_mask(imgs=imgs,
                                        maget_options=options.maget, atlases=atlases,
                                        pipeline_sub_dir=pipeline_sub_dir + "_masking", # FIXME repeats all alignments!!!
                                        resolution=resolution))

        # now propagate only the masked form of the images and atlases:
        imgs    = masked_img
        #atlases = masked_atlases  # TODO is this needed?

    if maget_options.mask_only:
        # register each input to each atlas, creating a mask
        return Result(stages=s, output=masked_img)   # TODO rename `alignments` to `registrations`??
    else:
        if maget_options.mask:
            del masked_img
        # this `del` is just to verify that we don't accidentally use this later, since these potentially
        # coarser alignments shouldn't be re-used (but if the protocols for masking and alignment are the same,
        # hash-consing will take care of things), just the masked images they create; can be removed later
        # if a sensible use is found

        # images with labels from atlases
        # N.B.: Even though we've already registered each image to each initial atlas, this happens again here,
        #       but using `nlin_hierarchy` instead of `masking_nlin_hierarchy` as options.
        #       This is not 'work-efficient' in the sense that this computation happens twice (although
        #       hopefully at greater precision the second time!), but the idea is to run a coarse initial
        #       registration to get a mask and then do a better registration with that mask (though I'm not
        #       sure exactly when this is faster than doing a single registration).
        #       This _can_ allow the overall computation to finish more rapidly
        #       (depending on the relative speed of the two alignment methods/parameters,
        #       number of atlases and other templates used, number of cores available, etc.).
        atlas_labelled_imgs = (
            pd.DataFrame({ 'img'        : img,
                           'label_file' : s.defer(  # can't use `label` in a pd.DataFrame index!
                              mincresample_new(img=atlas.labels,
                                               xfm=s.defer(lsq12_nlin(source=img,
                                                                      target=atlas,
                                                                      nlin_module=nlin_component,
                                                                      lsq12_conf=lsq12_conf,
                                                                      nlin_options=options.maget.nlin.nlin_protocol,
                                                                      resolution=resolution,
                                                                      #nlin_conf=nlin_hierarchy,
                                                                      resample_source=False)).xfm,
                                               like=img,
                                               invert=True,
                                               interpolation=Interpolation.nearest_neighbour,
                                               extra_flags=('-keep_real_range', '-labels')))}
                         for img in imgs for atlas in atlases)
        )

        if maget_options.pairwise:

            def choose_new_templates(ts, n):
                # currently silly, but we might implement a smarter method ...
                # FIXME what if there aren't enough other imgs around?!  This silently goes weird ...
                return pd.Series(ts[:n+1])  # n+1 instead of n: choose one more since we won't use image as its own template ...

            # FIXME: the --max-templates flag is ambiguously named ... should be --max-new-templates
            # (and just use all atlases)
            # TODO we could have a separate templates_csv (or --template-files f [f ...]) but you can just
            # run a separate MAGeT pipeline and
            #if maget_options.templates_csv:
            #    templates = pd.read_csv(maget_options.templates_csv).template
            #else:
            templates = pd.DataFrame({ 'template' : choose_new_templates(ts=imgs,
                                                                         n=maget_options.max_templates - len(atlases))})
            # note these images are the masked ones if masking was done ...

            # the templates together with their (multiple) labels from the atlases (this merge just acts as a filter)
            labelled_templates = pd.merge(left=atlas_labelled_imgs, right=templates,
                                          left_on="img", right_on="template").drop('img', axis=1)

            # images with new labels from the templates
            imgs_and_templates = pd.merge(#left=atlas_labelled_imgs,
                                          left=pd.DataFrame({ "img" : imgs }).assign(fake=1),
                                          right=labelled_templates.assign(fake=1),
                                          on='fake')
                                          #left_on='img', right_on='template')  # TODO do select here instead of below?

            #if build_model_xfms is not None:
            #    # use path instead of full mincatom as key in case we're reading these in from a CSV:
            #    xfm_dict = { x.source.path : x.xfm for x in build_model_xfms }

            template_labelled_imgs = (
                imgs_and_templates
                .rename(columns={ 'label_file' : 'template_label_file' })
                # don't register template to itself, since otherwise atlases would vote on that template twice
                .loc[lambda df: df.index.map(lambda ix: df.img[ix].path
                                               != df.template[ix].path)]
                .assign(label_file=lambda df: df.apply(axis=1, func=lambda row:
                          s.defer(
                            # TODO switch to uses of nlin_component.whatever(...) in several places below?
                            mincresample_new(
                            #nlin_component.Algorithms.resample(
                              img=row.template_label_file,
                              xfm=s.defer(
                                    lsq12_nlin(source=row.img,
                                               target=row.template,
                                               lsq12_conf=lsq12_conf,
                                               resolution=resolution,
                                               nlin_module=nlin_component,
                                               nlin_options=options.maget.nlin.nlin_protocol,
                                               #nlin_conf=nlin_hierarchy,
                                               resample_source=False)).xfm
                                  if build_model_xfms is None
                                  # use transforms from model building if we have them:
                                  else s.defer(
                                         xfmconcat(
                                         #nlin_component.Algorithms.concat(
                                          [build_model_xfms[row.img.path],
                                           s.defer(
                                             xfminvert(
                                             #nlin_component.Algorithms.invert(
                                               build_model_xfms[row.template.path],
                                               subdir="tmp"))])),
                              like=row.img,
                              invert=True,
                              #use_nn_interpolation=True
                              interpolation=Interpolation.nearest_neighbour,
                              extra_flags=('-keep_real_range', '-labels')
                            ))))
            ) if len(imgs) > 1 else pd.DataFrame({ 'img' : [], 'label_file' : [] })
              # ... as no distinct templates to align if only one image supplied (#320)

            imgs_with_all_labels = pd.concat([atlas_labelled_imgs[['img', 'label_file']],
                                              template_labelled_imgs[['img', 'label_file']]],
                                             ignore_index=True)
        else:
            imgs_with_all_labels = atlas_labelled_imgs


        #imgs_with_all_labels = imgs_with_all_labels.applymap(
        #    lambda x: s.defer(nlin_component.ToMinc.to_mnc(x)))
        segmented_imgs = (
                imgs_with_all_labels
                .groupby('img')
                .aggregate({'label_file' : lambda resampled_label_files: list(resampled_label_files)})
                .rename(columns={ 'label_file' : 'label_files' })
                .reset_index()
                .assign(voted_labels=lambda df: df.apply(axis=1, func=lambda row:
                          s.defer(voxel_vote(label_files=row.label_files,
                                             output_dir=os.path.join(row.img.pipeline_sub_dir, row.img.output_sub_dir),
                                             name=row.img.filename_wo_ext+"_voted"))))
                .apply(axis=1, func=lambda row: row.img._replace(labels=row.voted_labels))
        )

        return Result(stages=s, output=segmented_imgs)
Пример #12
0
def maget_mask(imgs : List[MincAtom], maget_options, resolution : float,
               pipeline_sub_dir : str, atlases=None):

    s = Stages()

    original_imgs = imgs
    imgs = copy.deepcopy(imgs)
    original_imgs = pd.Series(original_imgs, index=[img.path for img in original_imgs])
    for img in imgs:
        img.output_sub_dir = os.path.join(img.output_sub_dir, "masking")

    # TODO dereference maget_options -> maget_options.maget outside maget_mask call?
    if atlases is None:
        atlases = get_atlases(maget_options.maget, pipeline_sub_dir=pipeline_sub_dir)

    lsq12_conf = get_linear_configuration_from_options(maget_options.lsq12,
                                                       LinearTransType.lsq12,
                                                       resolution)

    #nlin_module = get_nonlinear_component(reg_method=options.mbm.nlin.reg_method)

    #masking_nlin_hierarchy = get_nonlinear_configuration_from_options(maget_options.maget.masking_nlin_protocol,
    #                                                                  next(iter(maget_options.maget.flags_.masking_nlin_protocol)),
    #                                                                  maget_options.maget.mask_method,
    #                                                                  resolution)

    masking_nlin_component = get_nonlinear_component(reg_method=maget_options.maget.mask_method)
    algorithms = masking_nlin_component.Algorithms
    #masking_nlin_conf = (masking_nlin_component.parse_protocol_file(
    #                       maget_options.maget.masking_nlin_protocol, resolution=resolution)
    #                     if maget_options.maget.masking_nlin_protocol is not None
    #                     else masking_nlin_component.get_default_conf(resolution=resolution))

    # TODO lift outside then delete
    #masking_imgs = copy.deepcopy(imgs)
    #for img in masking_imgs:
    #    img.pipeline_sub_dir = os.path.join(img.pipeline_sub_dir, "masking")

    masking_alignments = pd.DataFrame({ 'img'   : img,
                                        'atlas' : atlas,
                                        'xfm'   : s.defer(
                                          lsq12_nlin(source=img, target=atlas,
                                                     lsq12_conf=lsq12_conf,
                                                     nlin_options=maget_options.maget.masking_nlin_protocol,
                                                     #masking_nlin_conf,
                                                     resolution=resolution,
                                                     nlin_module=masking_nlin_component,
                                                     resample_source=False))}
                                      for img in imgs for atlas in atlases)

    # propagate a mask to each image using the above `alignments` as follows:
    # - for each image, voxel_vote on the masks propagated to that image to get a suitable mask
    # - run mincmath -clobber -mult <img> <voted_mask> to apply the mask to the files
    masked_img = (
        masking_alignments
        .assign(resampled_mask=lambda df: df.apply(axis=1, func=lambda row:
           s.defer(algorithms.resample(img=row.atlas.mask, #apply(lambda x: x.mask),
                                       xfm=row.xfm.xfm,  #apply(lambda x: x.xfm),
                                       like=row.img,
                                       invert=True,
                                       #interpolation=Interpolation.nearest_neighbour,
                                       postfix="-input-mask",
                                       subdir="tmp",
                                       # TODO annoying hack; fix mincresample(_mask) ...:
                                       #new_name_wo_ext=df.apply(lambda row:
                                       #    "%s_to_%s-input-mask" % (row.atlas.filename_wo_ext,
                                       #                             row.img.filename_wo_ext),
                                       #    axis=1),
                                       use_nn_interpolation=True
                                       ))))
        .groupby('img', as_index=False)
        .aggregate({'resampled_mask' : lambda masks: list(masks)})
        .rename(columns={"resampled_mask" : "resampled_masks"})
        .assign(voted_mask=lambda df: df.apply(axis=1,
                                               func=lambda row:
                  # FIXME cannot use mincmath here !!!
                  s.defer(mincmath(op="max", vols=sorted(row.resampled_masks),
                                   new_name="%s_max_mask" % row.img.filename_wo_ext,
                                   subdir="tmp"))))
        .apply(axis=1, func=lambda row: row.img._replace(mask=row.voted_mask)))

    # resample the atlas images back to the input images:
    # (note: this doesn't modify `masking_alignments`, but only stages additional outputs)
    masking_alignments.assign(resampled_img=lambda df: df.apply(axis=1, func=lambda row:
      s.defer(algorithms.resample(
                img=row.atlas,
                xfm=row.xfm.xfm, #.apply(lambda x: x.xfm),
                subdir="tmp",
                # TODO delete this stupid hack:
                #new_name_wo_ext=df.apply(lambda row:
                #  "%s_to_%s-resampled" % (row.atlas.filename_wo_ext,
                #                          row.img.filename_wo_ext),
                #                          axis=1),
                like=row.img, invert=True))))

    for img in masked_img:
        img.output_sub_dir = original_imgs.loc[img.path].output_sub_dir

    return Result(stages=s, output=masked_img)
Пример #13
0
def maget(imgs : List[MincAtom], options, prefix, output_dir):     # FIXME prefix, output_dir aren't used !!

    s = Stages()

    maget_options = options.maget.maget

    resolution = options.registration.resolution  # TODO or get_resolution_from_file(...) -- only if file always exists!

    pipeline_sub_dir = os.path.join(options.application.output_directory,
                                    options.application.pipeline_name + "_atlases")

    if maget_options.atlas_lib is None:
        raise ValueError("Need some atlases ...")

    # TODO should alternately accept a CSV file ...
    atlases = atlases_from_dir(atlas_lib=maget_options.atlas_lib,
                               max_templates=maget_options.max_templates,
                               pipeline_sub_dir=pipeline_sub_dir)

    lsq12_conf = get_linear_configuration_from_options(options.maget.lsq12,
                                                       transform_type=LinearTransType.lsq12,
                                                       file_resolution=resolution)

    nlin_hierarchy = get_nonlinear_configuration_from_options(options.maget.nlin.nlin_protocol,
                                                              reg_method=options.maget.nlin.reg_method,
                                                              file_resolution=resolution)

    if maget_options.mask or maget_options.mask_only:

        # this used to return alignments but doesn't currently do so
        masked_img = s.defer(maget_mask(imgs=imgs,
                                        maget_options=options.maget, atlases=atlases,
                                        pipeline_sub_dir=pipeline_sub_dir + "_masking", # FIXME repeats all alignments!!!
                                        resolution=resolution))

        # now propagate only the masked form of the images and atlases:
        imgs    = masked_img
        #atlases = masked_atlases  # TODO is this needed?

    if maget_options.mask_only:
        # register each input to each atlas, creating a mask
        return Result(stages=s, output=masked_img)   # TODO rename `alignments` to `registrations`??
    else:
        if maget_options.mask:
            del masked_img
        # this `del` is just to verify that we don't accidentally use this later, since these potentially
        # coarser alignments shouldn't be re-used (but if the protocols for masking and alignment are the same,
        # hash-consing will take care of things), just the masked images they create; can be removed later
        # if a sensible use is found

        # images with labels from atlases
        # N.B.: Even though we've already registered each image to each initial atlas, this happens again here,
        #       but using `nlin_hierarchy` instead of `masking_nlin_hierarchy` as options.
        #       This is not 'work-efficient' in the sense that this computation happens twice (although
        #       hopefully at greater precision the second time!), but the idea is to run a coarse initial
        #       registration to get a mask and then do a better registration with that mask (though I'm not
        #       sure exactly when this is faster than doing a single registration).
        #       This _can_ allow the overall computation to finish more rapidly
        #       (depending on the relative speed of the two alignment methods/parameters,
        #       number of atlases and other templates used, number of cores available, etc.).
        atlas_labelled_imgs = (
            pd.DataFrame({ 'img'        : img,
                           'label_file' : s.defer(  # can't use `label` in a pd.DataFrame index!
                              mincresample_new(img=atlas.labels,
                                               xfm=s.defer(lsq12_nlin(source=img,
                                                                      target=atlas,
                                                                      lsq12_conf=lsq12_conf,
                                                                      nlin_conf=nlin_hierarchy,
                                                                      resample_source=False)).xfm,
                                               like=img,
                                               invert=True,
                                               interpolation=Interpolation.nearest_neighbour,
                                               extra_flags=('-keep_real_range',)))}
                         for img in imgs for atlas in atlases)
        )

        if maget_options.pairwise:

            def choose_new_templates(ts, n):
                # currently silly, but we might implement a smarter method ...
                # FIXME what if there aren't enough other imgs around?!  This silently goes weird ...
                return pd.Series(ts[:n+1])  # n+1 instead of n: choose one more since we won't use image as its own template ...

            # FIXME: the --max-templates flag is ambiguously named ... should be --max-new-templates
            # (and just use all atlases)
            templates = pd.DataFrame({ 'template' : choose_new_templates(ts=imgs,
                                                                         n=maget_options.max_templates - len(atlases))})
            # note these images are the masked ones if masking was done ...

            # the templates together with their (multiple) labels from the atlases (this merge just acts as a filter)
            labelled_templates = pd.merge(left=atlas_labelled_imgs, right=templates,
                                          left_on="img", right_on="template").drop('img', axis=1)

            # images with new labels from the templates
            imgs_and_templates = pd.merge(#left=atlas_labelled_imgs,
                                          left=pd.DataFrame({ "img" : imgs }).assign(fake=1),
                                          right=labelled_templates.assign(fake=1),
                                          on='fake')
                                          #left_on='img', right_on='template')  # TODO do select here instead of below?

            template_labelled_imgs = (
                imgs_and_templates
                .rename(columns={ 'label_file' : 'template_label_file' })
                # don't register template to itself, since otherwise atlases would vote on that template twice
                .select(lambda ix: imgs_and_templates.img[ix].path
                                     != imgs_and_templates.template[ix].path)  # TODO hardcoded name
                .assign(label_file=lambda df: df.apply(axis=1, func=lambda row:
                           s.defer(mincresample_new(img=row.template_label_file,
                                                    xfm=s.defer(lsq12_nlin(source=row.img,
                                                                           target=row.template,
                                                                           lsq12_conf=lsq12_conf,
                                                                           nlin_conf=nlin_hierarchy,
                                                                           resample_source=False)).xfm,
                                                    like=row.img,
                                                    invert=True,
                                                    interpolation=Interpolation.nearest_neighbour,
                                                    extra_flags=('-keep_real_range',)))))
            )

            imgs_with_all_labels = pd.concat([atlas_labelled_imgs[['img', 'label_file']],
                                              template_labelled_imgs[['img', 'label_file']]],
                                             ignore_index=True)
        else:
            imgs_with_all_labels = atlas_labelled_imgs

        segmented_imgs = (
                imgs_with_all_labels
                .groupby('img')
                .aggregate({'label_file' : lambda resampled_label_files: list(resampled_label_files)})
                .rename(columns={ 'label_file' : 'label_files' })
                .reset_index()
                .assign(voted_labels=lambda df: df.apply(axis=1, func=lambda row:
                          s.defer(voxel_vote(label_files=row.label_files,
                                             output_dir=os.path.join(row.img.pipeline_sub_dir, row.img.output_sub_dir)))))
                .apply(axis=1, func=lambda row: row.img._replace(labels=row.voted_labels))
        )

        return Result(stages=s, output=segmented_imgs)
Пример #14
0
def maget_mask(imgs : List[MincAtom], maget_options, resolution : float, pipeline_sub_dir : str, atlases=None):

    s = Stages()

    resample  = np.vectorize(mincresample_new, excluded={"extra_flags"})
    defer     = np.vectorize(s.defer)

    original_imgs = imgs
    imgs = copy.deepcopy(imgs)
    original_imgs = pd.Series(original_imgs, index=[img.path for img in original_imgs])
    for img in imgs:
        img.output_sub_dir = os.path.join(img.output_sub_dir, "masking")

    # TODO dereference maget_options -> maget_options.maget outside maget_mask call?
    if atlases is None:
        if maget_options.maget.atlas_lib is None:
            raise ValueError("need some atlases for MAGeT-based masking ...")
        atlases = atlases_from_dir(atlas_lib=maget_options.maget.atlas_lib,
                                   max_templates=maget_options.maget.max_templates,
                                   pipeline_sub_dir=pipeline_sub_dir)

    lsq12_conf = get_linear_configuration_from_options(maget_options.lsq12,
                                                       LinearTransType.lsq12,
                                                       resolution)

    masking_nlin_hierarchy = get_nonlinear_configuration_from_options(maget_options.maget.masking_nlin_protocol,
                                                                      maget_options.maget.mask_method,
                                                                      resolution)

    # TODO lift outside then delete
    #masking_imgs = copy.deepcopy(imgs)
    #for img in masking_imgs:
    #    img.pipeline_sub_dir = os.path.join(img.pipeline_sub_dir, "masking")

    masking_alignments = pd.DataFrame({ 'img'   : img,
                                        'atlas' : atlas,
                                        'xfm'   : s.defer(lsq12_nlin(source=img, target=atlas,
                                                                     lsq12_conf=lsq12_conf,
                                                                     nlin_conf=masking_nlin_hierarchy,
                                                                     resample_source=False))}
                                      for img in imgs for atlas in atlases)

    # propagate a mask to each image using the above `alignments` as follows:
    # - for each image, voxel_vote on the masks propagated to that image to get a suitable mask
    # - run mincmath -clobber -mult <img> <voted_mask> to apply the mask to the files
    masked_img = (
        masking_alignments
        .assign(resampled_mask=lambda df: defer(resample(img=df.atlas.apply(lambda x: x.mask),
                                                         xfm=df.xfm.apply(lambda x: x.xfm),
                                                         like=df.img,
                                                         invert=True,
                                                         interpolation=Interpolation.nearest_neighbour,
                                                         postfix="-input-mask",
                                                         subdir="tmp",
                                                         # TODO annoying hack; fix mincresample(_mask) ...:
                                                         #new_name_wo_ext=df.apply(lambda row:
                                                         #    "%s_to_%s-input-mask" % (row.atlas.filename_wo_ext,
                                                         #                             row.img.filename_wo_ext),
                                                         #    axis=1),
                                                         extra_flags=("-keep_real_range",))))
        .groupby('img', as_index=False)
        .aggregate({'resampled_mask' : lambda masks: list(masks)})
        .rename(columns={"resampled_mask" : "resampled_masks"})
        .assign(voted_mask=lambda df: df.apply(axis=1,
                                               func=lambda row:
                                                 s.defer(mincmath(op="max", vols=sorted(row.resampled_masks),
                                                                  new_name="%s_max_mask" % row.img.filename_wo_ext,
                                                                  subdir="tmp"))))
        .apply(axis=1, func=lambda row: row.img._replace(mask=row.voted_mask)))

    # resample the atlas images back to the input images:
    # (note: this doesn't modify `masking_alignments`, but only stages additional outputs)
    masking_alignments.assign(resampled_img=lambda df:
      defer(resample(img=df.atlas,
                     xfm=df.xfm.apply(lambda x: x.xfm),
                     subdir="tmp",
                     # TODO delete this stupid hack:
                     #new_name_wo_ext=df.apply(lambda row:
                     #  "%s_to_%s-resampled" % (row.atlas.filename_wo_ext,
                     #                          row.img.filename_wo_ext),
                     #                          axis=1),
                     like=df.img, invert=True)))

    for img in masked_img:
        img.output_sub_dir = original_imgs.ix[img.path].output_sub_dir

    return Result(stages=s, output=masked_img)