Пример #1
0
def det_and_log_det(
    displacement_grid: MincAtom,
    fwhm: Optional[float],
    annotation: str = ""
) -> Result[Namespace]:  # (det=MincAtom, log_det=MincAtom)]:
    """
    When this function is called, you might (or should) know what kind of
    deformation grid is passed along. This allows you to provide a proper
    annotation for the produced log determinant file. For instance "absolute"
    or "relative" for transformations that include an affine linear part, or
    that have the linear part taken out respectively.
    """
    s = Stages()
    # TODO: naming doesn't correspond with the (automagic) file naming: d-1 <=> det(f), det <=> det+1(f)
    det = s.defer(
        determinant(
            s.defer(smooth_vector(source=displacement_grid, fwhm=fwhm)
                    ) if fwhm else displacement_grid))

    output_filename_wo_ext = displacement_grid.filename_wo_ext + "_log_det" + annotation
    if fwhm:
        output_filename_wo_ext += "_fwhm" + str(fwhm)
    log_det = s.defer(
        mincmath(op='log',
                 vols=[det],
                 subdir="stats-volumes",
                 new_name=output_filename_wo_ext))
    return Result(stages=s, output=Namespace(det=det, log_det=log_det))
Пример #2
0
def mincblob(op : str, grid : MincAtom, subdir : str = "tmp") -> Result[MincAtom]:
    """
    Low-level mincblob wrapper with the one exception being the determinant option. By
    default the inner clockwork of mincblob subtracts 1 from all determinant values that
    are being calculated. As such, 1 needs to be added to the result of the mincblob call.
    We will do that here, because it makes most sense here.
    >>> stages = mincblob('determinant', MincAtom("/images/img_1.mnc", pipeline_sub_dir="/tmp")).stages
    >>> [s.render() for s in stages]
    ['mincblob -clobber -determinant /images/img_1.mnc /tmp/img_1/img_1_determinant.mnc']
    """
    if op not in ["determinant", "trace", "translation", "magnitude"]:
        raise ValueError('mincblob: invalid operation %s' % op)

    # if we are calculating the determinant, the first file produced is a temp file:
    if op == "determinant":
        out_file = grid.newname_with_suffix("_temp_det", subdir=subdir)
    else:
        out_file = grid.newname_with_suffix('_' + op, subdir=subdir)

    stage = CmdStage(inputs=(grid,), outputs=(out_file,),
                 cmd=['mincblob', '-clobber', '-' + op, grid.path, out_file.path])

    s = Stages([stage])
    # now create the proper determinant if that's what was asked for
    if op == "determinant":
        result_file = s.defer(mincmath(op='add',
                                       const=1,
                                       vols=[out_file],
                                       subdir=subdir,
                                       new_name=grid.filename_wo_ext + "_det"))
    else:
        result_file = out_file

    return Result(stages=s, output=result_file)
Пример #3
0
def det_and_log_det(displacement_grid : MincAtom,
                    fwhm : Optional[float],
                    annotation: str = "") -> Result[Namespace]:  # (det=MincAtom, log_det=MincAtom)]:
    """
    When this function is called, you might (or should) know what kind of
    deformation grid is passed along. This allows you to provide a proper
    annotation for the produced log determinant file. For instance "absolute"
    or "relative" for transformations that include an affine linear part, or
    that have the linear part taken out respectively.
    """
    s = Stages()
    # TODO: naming doesn't correspond with the (automagic) file naming: d-1 <=> det(f), det <=> det+1(f)
    det = s.defer(determinant(s.defer(smooth_vector(source=displacement_grid, fwhm=fwhm))
                              if fwhm else displacement_grid))

    output_filename_wo_ext = displacement_grid.filename_wo_ext + "_log_det" + annotation
    if fwhm:
        output_filename_wo_ext += "_fwhm" + str(fwhm)
    log_det = s.defer(mincmath(op='log',
                               vols=[det],
                               subdir="stats-volumes",
                               new_name=output_filename_wo_ext))
    return Result(stages=s, output=Namespace(det=det, log_det=log_det))
Пример #4
0
def mincblob(op: str, grid: MincAtom, subdir: str = "tmp") -> Result[MincAtom]:
    """
    Low-level mincblob wrapper with the one exception being the determinant option. By
    default the inner clockwork of mincblob subtracts 1 from all determinant values that
    are being calculated. As such, 1 needs to be added to the result of the mincblob call.
    We will do that here, because it makes most sense here.
    >>> stages = mincblob('determinant', MincAtom("/images/img_1.mnc", pipeline_sub_dir="/tmp")).stages
    >>> [s.render() for s in stages]
    ['mincblob -clobber -determinant /images/img_1.mnc /tmp/img_1/img_1_determinant.mnc']
    """
    if op not in ["determinant", "trace", "translation", "magnitude"]:
        raise ValueError('mincblob: invalid operation %s' % op)

    # if we are calculating the determinant, the first file produced is a temp file:
    if op == "determinant":
        out_file = grid.newname_with_suffix("_temp_det", subdir=subdir)
    else:
        out_file = grid.newname_with_suffix('_' + op, subdir=subdir)

    stage = CmdStage(
        inputs=(grid, ),
        outputs=(out_file, ),
        cmd=['mincblob', '-clobber', '-' + op, grid.path, out_file.path])

    s = Stages([stage])
    # now create the proper determinant if that's what was asked for
    if op == "determinant":
        result_file = s.defer(
            mincmath(op='add',
                     const=1,
                     vols=[out_file],
                     subdir=subdir,
                     new_name=grid.filename_wo_ext + "_det"))
    else:
        result_file = out_file

    return Result(stages=s, output=result_file)
Пример #5
0
def maget_mask(imgs: List[MincAtom],
               maget_options,
               resolution: float,
               pipeline_sub_dir: str,
               atlases=None):

    s = Stages()

    original_imgs = imgs
    imgs = copy.deepcopy(imgs)
    original_imgs = pd.Series(original_imgs,
                              index=[img.path for img in original_imgs])
    for img in imgs:
        img.output_sub_dir = os.path.join(img.output_sub_dir, "masking")

    # TODO dereference maget_options -> maget_options.maget outside maget_mask call?
    if atlases is None:
        atlases = get_atlases(maget_options.maget,
                              pipeline_sub_dir=pipeline_sub_dir)

    lsq12_conf = get_linear_configuration_from_options(maget_options.lsq12,
                                                       LinearTransType.lsq12,
                                                       resolution)

    #nlin_module = get_nonlinear_component(reg_method=options.mbm.nlin.reg_method)

    #masking_nlin_hierarchy = get_nonlinear_configuration_from_options(maget_options.maget.masking_nlin_protocol,
    #                                                                  next(iter(maget_options.maget.flags_.masking_nlin_protocol)),
    #                                                                  maget_options.maget.mask_method,
    #                                                                  resolution)

    masking_nlin_component = get_nonlinear_component(
        reg_method=maget_options.maget.mask_method)
    algorithms = masking_nlin_component.Algorithms
    #masking_nlin_conf = (masking_nlin_component.parse_protocol_file(
    #                       maget_options.maget.masking_nlin_protocol, resolution=resolution)
    #                     if maget_options.maget.masking_nlin_protocol is not None
    #                     else masking_nlin_component.get_default_conf(resolution=resolution))

    # TODO lift outside then delete
    #masking_imgs = copy.deepcopy(imgs)
    #for img in masking_imgs:
    #    img.pipeline_sub_dir = os.path.join(img.pipeline_sub_dir, "masking")

    masking_alignments = pd.DataFrame({
        'img':
        img,
        'atlas':
        atlas,
        'xfm':
        s.defer(
            lsq12_nlin(
                source=img,
                target=atlas,
                lsq12_conf=lsq12_conf,
                nlin_options=maget_options.maget.masking_nlin_protocol,
                #masking_nlin_conf,
                resolution=resolution,
                nlin_module=masking_nlin_component,
                resample_source=False))
    } for img in imgs for atlas in atlases)

    # propagate a mask to each image using the above `alignments` as follows:
    # - for each image, voxel_vote on the masks propagated to that image to get a suitable mask
    # - run mincmath -clobber -mult <img> <voted_mask> to apply the mask to the files
    masked_img = (
        masking_alignments.assign(resampled_mask=lambda df: df.apply(
            axis=1,
            func=lambda row: s.defer(
                algorithms.resample(
                    img=row.atlas.mask,  #apply(lambda x: x.mask),
                    xfm=row.xfm.xfm,  #apply(lambda x: x.xfm),
                    like=row.img,
                    invert=True,
                    #interpolation=Interpolation.nearest_neighbour,
                    postfix="-input-mask",
                    subdir="tmp",
                    # TODO annoying hack; fix mincresample(_mask) ...:
                    #new_name_wo_ext=df.apply(lambda row:
                    #    "%s_to_%s-input-mask" % (row.atlas.filename_wo_ext,
                    #                             row.img.filename_wo_ext),
                    #    axis=1),
                    use_nn_interpolation=True)))).groupby('img',
                                                          as_index=False).
        aggregate({
            'resampled_mask': lambda masks: list(masks)
        }).rename(columns={
            "resampled_mask": "resampled_masks"
        }).assign(voted_mask=lambda df: df.apply(
            axis=1,
            func=lambda row:
            # FIXME cannot use mincmath here !!!
            s.defer(
                mincmath(op="max",
                         vols=sorted(row.resampled_masks),
                         new_name="%s_max_mask" % row.img.filename_wo_ext,
                         subdir="tmp")))).apply(axis=1,
                                                func=lambda row: row.img.
                                                _replace(mask=row.voted_mask)))

    # resample the atlas images back to the input images:
    # (note: this doesn't modify `masking_alignments`, but only stages additional outputs)
    masking_alignments.assign(resampled_img=lambda df: df.apply(
        axis=1,
        func=lambda row: s.defer(
            algorithms.resample(
                img=row.atlas,
                xfm=row.xfm.xfm,  #.apply(lambda x: x.xfm),
                subdir="tmp",
                # TODO delete this stupid hack:
                #new_name_wo_ext=df.apply(lambda row:
                #  "%s_to_%s-resampled" % (row.atlas.filename_wo_ext,
                #                          row.img.filename_wo_ext),
                #                          axis=1),
                like=row.img,
                invert=True))))

    for img in masked_img:
        img.output_sub_dir = original_imgs.loc[img.path].output_sub_dir

    return Result(stages=s, output=masked_img)
Пример #6
0
def maget_mask(imgs : List[MincAtom], atlases, options):

    s = Stages()

    resample  = np.vectorize(mincresample_new, excluded={"extra_flags"})
    defer     = np.vectorize(s.defer)

    lsq12_conf = get_linear_configuration_from_options(options.maget.lsq12,
                                                       LinearTransType.lsq12,
                                                       options.registration.resolution)

    masking_nlin_hierarchy = get_nonlinear_configuration_from_options(options.maget.maget.masking_nlin_protocol,
                                                                      options.maget.maget.mask_method,
                                                                      options.registration.resolution)

    masking_alignments = pd.DataFrame({ 'img'   : img,
                                        'atlas' : atlas,
                                        'xfm'   : s.defer(lsq12_nlin(source=img, target=atlas,
                                                                     lsq12_conf=lsq12_conf,
                                                                     nlin_conf=masking_nlin_hierarchy,
                                                                     resample_source=False))}
                                      for img in imgs for atlas in atlases)
    # propagate a mask to each image using the above `alignments` as follows:
    # - for each image, voxel_vote on the masks propagated to that image to get a suitable mask
    # - run mincmath -clobber -mult <img> <voted_mask> to apply the mask to the files
    masked_img = (
        masking_alignments
        .assign(resampled_mask=lambda df: defer(resample(img=df.atlas.apply(lambda x: x.mask),
                                                         xfm=df.xfm.apply(lambda x: x.xfm),
                                                         like=df.img,
                                                         invert=True,
                                                         interpolation=Interpolation.nearest_neighbour,
                                                         postfix="-input-mask",
                                                         subdir="tmp",
                                                         # TODO annoying hack; fix mincresample(_mask) ...:
                                                         #new_name_wo_ext=df.apply(lambda row:
                                                         #    "%s_to_%s-input-mask" % (row.atlas.filename_wo_ext,
                                                         #                             row.img.filename_wo_ext),
                                                         #    axis=1),
                                                         extra_flags=("-keep_real_range",))))
        .groupby('img', sort=False, as_index=False)
        # sort=False: just for speed (might also need to implement more comparison methods on `MincAtom`s)
        .aggregate({'resampled_mask' : lambda masks: list(masks)})
        .rename(columns={"resampled_mask" : "resampled_masks"})
        .assign(voted_mask=lambda df: df.apply(axis=1,
                                               func=lambda row:
                                                 s.defer(voxel_vote(label_files=row.resampled_masks,
                                                                    name="%s_voted_mask" % row.img.filename_wo_ext,
                                                                    output_dir=os.path.join(row.img.output_sub_dir,
                                                                                            "tmp")))))
        .assign(masked_img=lambda df:
          df.apply(axis=1,
                 func=lambda row:
                   s.defer(mincmath(op="mult",
                                    # img must precede mask here
                                    # for output image range to be correct:
                                    vols=[row.img, row.voted_mask],
                                    new_name="%s_masked" % row.img.filename_wo_ext,
                                    subdir="resampled")))))  #['img']

    # resample the atlas images back to the input images:
    # (note: this doesn't modify `masking_alignments`, but only stages additional outputs)
    masking_alignments.assign(resampled_img=lambda df:
    defer(resample(img=df.atlas,
                   xfm=df.xfm.apply(lambda x: x.xfm),
                   subdir="tmp",
                   # TODO delete this stupid hack:
                   #new_name_wo_ext=df.apply(lambda row:
                   #  "%s_to_%s-resampled" % (row.atlas.filename_wo_ext,
                   #                          row.img.filename_wo_ext),
                   #                          axis=1),
                   like=df.img, invert=True)))

    # replace the table of alignments with a new one with masked images
    masking_alignments = (pd.merge(left=masking_alignments.assign(unmasked_img=lambda df: df.img),
                                   right=masked_img,
                                   on=["img"], how="right", sort=False)
                          .assign(img=lambda df: df.masked_img))

    return Result(stages=s, output=masking_alignments)
Пример #7
0
def maget(imgs : List[MincAtom], options, prefix, output_dir):     # FIXME prefix, output_dir aren't used !!

    s = Stages()

    maget_options = options.maget.maget

    pipeline_sub_dir = os.path.join(options.application.output_directory,
                                    options.application.pipeline_name + "_atlases")

    if maget_options.atlas_lib is None:
        raise ValueError("Need some atlases ...")

    #atlas_dir = os.path.join(output_dir, "input_atlases") ???

    # TODO should alternately accept a CSV file ...
    atlas_library = read_atlas_dir(atlas_lib=maget_options.atlas_lib, pipeline_sub_dir=pipeline_sub_dir)

    if len(atlas_library) == 0:
        raise ValueError("No atlases found in specified directory '%s' ..." % options.maget.maget.atlas_lib)

    num_atlases_needed = min(maget_options.max_templates, len(atlas_library))
    # TODO arbitrary; could choose atlases better ...
    atlases = atlas_library[:num_atlases_needed]
    # TODO issue a warning if not all atlases used or if more atlases requested than available?
    # TODO also, doesn't slicing with a higher number (i.e., if max_templates > n) go to the end of the list anyway?

    lsq12_conf = get_linear_configuration_from_options(options.maget.lsq12,
                                                       LinearTransType.lsq12,
                                                       options.registration.resolution)

    masking_nlin_hierarchy = get_nonlinear_configuration_from_options(options.maget.maget.masking_nlin_protocol,
                                                                      options.maget.maget.mask_method,
                                                                      options.registration.resolution)

    nlin_hierarchy = get_nonlinear_configuration_from_options(options.maget.nlin.nlin_protocol,
                                                              options.maget.nlin.reg_method,
                                                              options.registration.resolution)

    resample  = np.vectorize(mincresample_new, excluded={"extra_flags"})
    defer     = np.vectorize(s.defer)

    # plan the basic registrations between all image-atlas pairs; store the result paths in a table
    masking_alignments = pd.DataFrame({ 'img'   : img,
                                        'atlas' : atlas,
                                        'xfm'   : s.defer(lsq12_nlin(source=img, target=atlas,
                                                                     lsq12_conf=lsq12_conf,
                                                                     nlin_conf=masking_nlin_hierarchy,
                                                                     resample_source=False))}
                                      for img in imgs for atlas in atlases)

    if maget_options.mask or maget_options.mask_only:

        masking_alignments = s.defer(maget_mask(imgs, atlases, options))

        masked_atlases = atlases.apply(lambda atlas:
                           s.defer(mincmath(op='mult', vols=[atlas, atlas.mask], subdir="resampled",
                                            new_name="%s_masked" % atlas.filename_wo_ext)))

        # now propagate only the masked form of the images and atlases:
        imgs    = masking_alignments.img
        atlases = masked_atlases  # TODO is this needed?

    if maget_options.mask_only:
        # register each input to each atlas, creating a mask
        return Result(stages=s, output=masking_alignments)   # TODO rename `alignments` to `registrations`??
    else:
        del masking_alignments
        # this `del` is just to verify that we don't accidentally use this later, since my intent is that these
        # coarser alignments shouldn't be re-used, just the masked images they create; can be removed later
        # if a sensible use is found

        if maget_options.pairwise:

            def choose_new_templates(ts, n):
                # currently silly, but we might implement a smarter method ...
                # FIXME what if there aren't enough other imgs around?!  This silently goes weird ...
                return ts[:n+1]  # n+1 instead of n: choose one more since we won't use image as its own template ...

            new_templates = choose_new_templates(ts=imgs, n=maget_options.max_templates)
            # note these images are the masked ones if masking was done ...

            # TODO write a function to do these alignments and the image->atlas one above
            # align the new templates chosen from the images to the initial atlases:
            new_template_to_atlas_alignments = (
                pd.DataFrame({ 'img'   : template,
                               'atlas' : atlas,
                               'xfm'   : s.defer(lsq12_nlin(source=template, target=atlas,
                                                            lsq12_conf=lsq12_conf,
                                                            nlin_conf=nlin_hierarchy,
                                                            resample_source=False))}
                             for template in new_templates for atlas in atlases))
                             # ... and these atlases are multiplied by their masks (but is this necessary?)

            # label the new templates from resampling the atlas labels onto them:
            # TODO now vote on the labels to be used for the new templates ...
            # TODO extract into procedure?
            new_templates_labelled = (
                new_template_to_atlas_alignments
                .assign(resampled_labels=lambda df: defer(
                                               resample(img=df.atlas.apply(lambda x: x.labels),
                                                                      xfm=df.xfm.apply(lambda x: x.xfm),
                                                                      interpolation=Interpolation.nearest_neighbour,
                                                                      extra_flags=("-keep_real_range",),
                                                                      like=df.img, invert=True)))
                .groupby('img', sort=False, as_index=False)
                .aggregate({'resampled_labels' : lambda labels: list(labels)})
                .assign(voted_labels=lambda df: df.apply(axis=1,
                                                         func=lambda row:
                                                           s.defer(voxel_vote(label_files=row.resampled_labels,
                                                                              name="%s_template_labels" %
                                                                                   row.img.filename_wo_ext,
                                                                              output_dir=os.path.join(
                                                                                  row.img.pipeline_sub_dir,
                                                                                  row.img.output_sub_dir,
                                                                                  "labels"))))))

            # TODO write a procedure for this assign-groupby-aggregate-rename...
            # FIXME should be in above algebraic manipulation but MincAtoms don't support flexible immutable updating
            for row in pd.merge(left=new_template_to_atlas_alignments, right=new_templates_labelled,
                                on=["img"], how="right", sort=False).itertuples():
                row.img.labels = s.defer(mincresample_new(img=row.voted_labels, xfm=row.xfm.xfm, like=row.img,
                                                          invert=True, interpolation=Interpolation.nearest_neighbour,
                                                          #postfix="-input-labels",
                                                          # this makes names really long ...:
                                                          # TODO this doesn't work for running MAGeT on the nlin avg:
                                                          #new_name_wo_ext="%s_on_%s" %
                                                          #                (row.voted_labels.filename_wo_ext,
                                                          #                 row.img.filename_wo_ext),
                                                          #postfix="_labels_via_%s" % row.xfm.xfm.filename_wo_ext,
                                                          new_name_wo_ext="%s_via_%s" % (row.voted_labels.filename_wo_ext,
                                                                                         row.xfm.xfm.filename_wo_ext),
                                                          extra_flags=("-keep_real_range",)))

            # now that the new templates have been labelled, combine with the atlases:
            # FIXME use the masked atlases created earlier ??
            all_templates = pd.concat([new_templates_labelled.img, atlases], ignore_index=True)

            # now take union of the resampled labels from the new templates with labels from the original atlases:
            #all_alignments = pd.concat([image_to_template_alignments,
            #                            alignments.rename(columns={ "atlas" : "template" })],
            #                           ignore_index=True, join="inner")

        else:
            all_templates = atlases

        # now register each input to each selected template
        # N.B.: Even though we've already registered each image to each initial atlas, this happens again here,
        #       but using `nlin_hierarchy` instead of `masking_nlin_hierarchy` as options.
        #       This is not 'work-efficient' in the sense that this computation happens twice (although
        #       hopefully at greater precision the second time!), but the idea is to run a coarse initial
        #       registration to get a mask and then do a better registration with that mask (though I'm not
        #       sure exactly when this is faster than doing a single registration).
        #       This _can_ allow the overall computation to finish more rapidly
        #       (depending on the relative speed of the two alignment methods/parameters,
        #       number of atlases and other templates used, number of cores available, etc.).
        image_to_template_alignments = (
            pd.DataFrame({ "img"      : img,
                           "template" : template_img,
                           "xfm"      : xfm }
                         for img in imgs      # TODO use the masked imgs here?
                         for template_img in
                             all_templates
                             # FIXME delete this one alignment
                             #labelled_templates[labelled_templates.img != img]
                             # since equality is equality of filepaths (a bit dangerous)
                             # TODO is there a more direct/faster way just to delete the template?
                         for xfm in [s.defer(lsq12_nlin(source=img, target=template_img,
                                                        lsq12_conf=lsq12_conf,
                                                        nlin_conf=nlin_hierarchy))]
                         )
        )

        # now do a voxel_vote on all resampled template labels, just as earlier with the masks
        voted = (image_to_template_alignments
                 .assign(resampled_labels=lambda df:
                                            defer(resample(img=df.template.apply(lambda x: x.labels),
                                                           # FIXME bug: at this point templates from template_alignments
                                                           # don't have associated labels (i.e., `None`s) -- fatal
                                                           xfm=df.xfm.apply(lambda x: x.xfm),
                                                           interpolation=Interpolation.nearest_neighbour,
                                                           extra_flags=("-keep_real_range",),
                                                           like=df.img, invert=True)))
                 .groupby('img', sort=False)
                 # TODO the pattern groupby-aggregate(lambda x: list(x))-reset_index-assign is basically a hack
                 # to do a groupby-assign with access to the group name;
                 # see http://stackoverflow.com/a/30224447/849272 for a better solution
                 # (note this pattern occurs several times in MAGeT and two-level code)
                 .aggregate({'resampled_labels' : lambda labels: list(labels)})
                 .reset_index()
                 .assign(voted_labels=lambda df: defer(np.vectorize(voxel_vote)(label_files=df.resampled_labels,
                                                                                output_dir=df.img.apply(
                                                                                    lambda x: os.path.join(
                                                                                        x.pipeline_sub_dir,
                                                                                        x.output_sub_dir))))))

        # TODO doing mincresample -invert separately for the img->atlas xfm for mask, labels is silly
        # (when Pydpiper's `mincresample` does both automatically)?

        # blargh, another destructive update ...
        for row in voted.itertuples():
            row.img.labels = row.voted_labels

        # returning voted_labels as a column is slightly redundant, but possibly useful ...
        return Result(stages=s, output=voted)  # voted.drop("voted_labels", axis=1))
Пример #8
0
def maget_mask(imgs : List[MincAtom], maget_options, resolution : float,
               pipeline_sub_dir : str, atlases=None):

    s = Stages()

    original_imgs = imgs
    imgs = copy.deepcopy(imgs)
    original_imgs = pd.Series(original_imgs, index=[img.path for img in original_imgs])
    for img in imgs:
        img.output_sub_dir = os.path.join(img.output_sub_dir, "masking")

    # TODO dereference maget_options -> maget_options.maget outside maget_mask call?
    if atlases is None:
        atlases = get_atlases(maget_options.maget, pipeline_sub_dir=pipeline_sub_dir)

    lsq12_conf = get_linear_configuration_from_options(maget_options.lsq12,
                                                       LinearTransType.lsq12,
                                                       resolution)

    #nlin_module = get_nonlinear_component(reg_method=options.mbm.nlin.reg_method)

    #masking_nlin_hierarchy = get_nonlinear_configuration_from_options(maget_options.maget.masking_nlin_protocol,
    #                                                                  next(iter(maget_options.maget.flags_.masking_nlin_protocol)),
    #                                                                  maget_options.maget.mask_method,
    #                                                                  resolution)

    masking_nlin_component = get_nonlinear_component(reg_method=maget_options.maget.mask_method)
    algorithms = masking_nlin_component.Algorithms
    #masking_nlin_conf = (masking_nlin_component.parse_protocol_file(
    #                       maget_options.maget.masking_nlin_protocol, resolution=resolution)
    #                     if maget_options.maget.masking_nlin_protocol is not None
    #                     else masking_nlin_component.get_default_conf(resolution=resolution))

    # TODO lift outside then delete
    #masking_imgs = copy.deepcopy(imgs)
    #for img in masking_imgs:
    #    img.pipeline_sub_dir = os.path.join(img.pipeline_sub_dir, "masking")

    masking_alignments = pd.DataFrame({ 'img'   : img,
                                        'atlas' : atlas,
                                        'xfm'   : s.defer(
                                          lsq12_nlin(source=img, target=atlas,
                                                     lsq12_conf=lsq12_conf,
                                                     nlin_options=maget_options.maget.masking_nlin_protocol,
                                                     #masking_nlin_conf,
                                                     resolution=resolution,
                                                     nlin_module=masking_nlin_component,
                                                     resample_source=False))}
                                      for img in imgs for atlas in atlases)

    # propagate a mask to each image using the above `alignments` as follows:
    # - for each image, voxel_vote on the masks propagated to that image to get a suitable mask
    # - run mincmath -clobber -mult <img> <voted_mask> to apply the mask to the files
    masked_img = (
        masking_alignments
        .assign(resampled_mask=lambda df: df.apply(axis=1, func=lambda row:
           s.defer(algorithms.resample(img=row.atlas.mask, #apply(lambda x: x.mask),
                                       xfm=row.xfm.xfm,  #apply(lambda x: x.xfm),
                                       like=row.img,
                                       invert=True,
                                       #interpolation=Interpolation.nearest_neighbour,
                                       postfix="-input-mask",
                                       subdir="tmp",
                                       # TODO annoying hack; fix mincresample(_mask) ...:
                                       #new_name_wo_ext=df.apply(lambda row:
                                       #    "%s_to_%s-input-mask" % (row.atlas.filename_wo_ext,
                                       #                             row.img.filename_wo_ext),
                                       #    axis=1),
                                       use_nn_interpolation=True
                                       ))))
        .groupby('img', as_index=False)
        .aggregate({'resampled_mask' : lambda masks: list(masks)})
        .rename(columns={"resampled_mask" : "resampled_masks"})
        .assign(voted_mask=lambda df: df.apply(axis=1,
                                               func=lambda row:
                  # FIXME cannot use mincmath here !!!
                  s.defer(mincmath(op="max", vols=sorted(row.resampled_masks),
                                   new_name="%s_max_mask" % row.img.filename_wo_ext,
                                   subdir="tmp"))))
        .apply(axis=1, func=lambda row: row.img._replace(mask=row.voted_mask)))

    # resample the atlas images back to the input images:
    # (note: this doesn't modify `masking_alignments`, but only stages additional outputs)
    masking_alignments.assign(resampled_img=lambda df: df.apply(axis=1, func=lambda row:
      s.defer(algorithms.resample(
                img=row.atlas,
                xfm=row.xfm.xfm, #.apply(lambda x: x.xfm),
                subdir="tmp",
                # TODO delete this stupid hack:
                #new_name_wo_ext=df.apply(lambda row:
                #  "%s_to_%s-resampled" % (row.atlas.filename_wo_ext,
                #                          row.img.filename_wo_ext),
                #                          axis=1),
                like=row.img, invert=True))))

    for img in masked_img:
        img.output_sub_dir = original_imgs.loc[img.path].output_sub_dir

    return Result(stages=s, output=masked_img)
Пример #9
0
def maget_mask(imgs : List[MincAtom], maget_options, resolution : float, pipeline_sub_dir : str, atlases=None):

    s = Stages()

    resample  = np.vectorize(mincresample_new, excluded={"extra_flags"})
    defer     = np.vectorize(s.defer)

    original_imgs = imgs
    imgs = copy.deepcopy(imgs)
    original_imgs = pd.Series(original_imgs, index=[img.path for img in original_imgs])
    for img in imgs:
        img.output_sub_dir = os.path.join(img.output_sub_dir, "masking")

    # TODO dereference maget_options -> maget_options.maget outside maget_mask call?
    if atlases is None:
        if maget_options.maget.atlas_lib is None:
            raise ValueError("need some atlases for MAGeT-based masking ...")
        atlases = atlases_from_dir(atlas_lib=maget_options.maget.atlas_lib,
                                   max_templates=maget_options.maget.max_templates,
                                   pipeline_sub_dir=pipeline_sub_dir)

    lsq12_conf = get_linear_configuration_from_options(maget_options.lsq12,
                                                       LinearTransType.lsq12,
                                                       resolution)

    masking_nlin_hierarchy = get_nonlinear_configuration_from_options(maget_options.maget.masking_nlin_protocol,
                                                                      maget_options.maget.mask_method,
                                                                      resolution)

    # TODO lift outside then delete
    #masking_imgs = copy.deepcopy(imgs)
    #for img in masking_imgs:
    #    img.pipeline_sub_dir = os.path.join(img.pipeline_sub_dir, "masking")

    masking_alignments = pd.DataFrame({ 'img'   : img,
                                        'atlas' : atlas,
                                        'xfm'   : s.defer(lsq12_nlin(source=img, target=atlas,
                                                                     lsq12_conf=lsq12_conf,
                                                                     nlin_conf=masking_nlin_hierarchy,
                                                                     resample_source=False))}
                                      for img in imgs for atlas in atlases)

    # propagate a mask to each image using the above `alignments` as follows:
    # - for each image, voxel_vote on the masks propagated to that image to get a suitable mask
    # - run mincmath -clobber -mult <img> <voted_mask> to apply the mask to the files
    masked_img = (
        masking_alignments
        .assign(resampled_mask=lambda df: defer(resample(img=df.atlas.apply(lambda x: x.mask),
                                                         xfm=df.xfm.apply(lambda x: x.xfm),
                                                         like=df.img,
                                                         invert=True,
                                                         interpolation=Interpolation.nearest_neighbour,
                                                         postfix="-input-mask",
                                                         subdir="tmp",
                                                         # TODO annoying hack; fix mincresample(_mask) ...:
                                                         #new_name_wo_ext=df.apply(lambda row:
                                                         #    "%s_to_%s-input-mask" % (row.atlas.filename_wo_ext,
                                                         #                             row.img.filename_wo_ext),
                                                         #    axis=1),
                                                         extra_flags=("-keep_real_range",))))
        .groupby('img', as_index=False)
        .aggregate({'resampled_mask' : lambda masks: list(masks)})
        .rename(columns={"resampled_mask" : "resampled_masks"})
        .assign(voted_mask=lambda df: df.apply(axis=1,
                                               func=lambda row:
                                                 s.defer(mincmath(op="max", vols=sorted(row.resampled_masks),
                                                                  new_name="%s_max_mask" % row.img.filename_wo_ext,
                                                                  subdir="tmp"))))
        .apply(axis=1, func=lambda row: row.img._replace(mask=row.voted_mask)))

    # resample the atlas images back to the input images:
    # (note: this doesn't modify `masking_alignments`, but only stages additional outputs)
    masking_alignments.assign(resampled_img=lambda df:
      defer(resample(img=df.atlas,
                     xfm=df.xfm.apply(lambda x: x.xfm),
                     subdir="tmp",
                     # TODO delete this stupid hack:
                     #new_name_wo_ext=df.apply(lambda row:
                     #  "%s_to_%s-resampled" % (row.atlas.filename_wo_ext,
                     #                          row.img.filename_wo_ext),
                     #                          axis=1),
                     like=df.img, invert=True)))

    for img in masked_img:
        img.output_sub_dir = original_imgs.ix[img.path].output_sub_dir

    return Result(stages=s, output=masked_img)