Example #1
0
def nlin_part(xfm: XfmHandler,
              inv_xfm: Optional[XfmHandler] = None) -> Result[XfmHandler]:
    """
    *** = non linear deformations
    --- = linear (affine) deformations

    Input:
    xfm     :     ******------>
    inv_xfm :    <******------ (optional)

    Calculated:
    inv_lin_xfm :      <------

    Returned:
    concat :      ******------> +
                       <------
    equals :      ******>

    Compute the nonlinear part of a transform as follows:
    go forwards across xfm and then backwards across the linear part
    of the inverse xfm (by first calculating the inverse or using the one supplied) 
    Finally, use minc_displacement to compute the resulting gridfile of the purely 
    nonlinear part.

    The optional inv_xfm (which must be the inverse!) is an optimization -
    we don't go looking for an inverse by filename munging and don't programmatically
    keep a log of operations applied, so any preexisting inverse must be supplied explicitly.
    """
    s = Stages()
    inv_xfm = inv_xfm or s.defer(invert_xfmhandler(xfm))
    inv_lin_part = s.defer(lin_from_nlin(inv_xfm))
    xfm = s.defer(concat_xfmhandlers([xfm, inv_lin_part]))
    return Result(stages=s, output=xfm)
Example #2
0
    def blur(img, fwhm, gradient=True, subdir='tmp'):
        # note c3d can take voxel rather than fwhm specification, but the Algorithms interface
        # currently doesn't allow this to be used ... maybe an argument from switching from mincblur
        if fwhm in (-1, 0, None):
            if gradient:
                raise ValueError(
                    "can't compute gradient without a positive FWHM")
            return Result(stages=Stages(), output=Namespace(img=img))

        if gradient:
            out_gradient = img.newname_with("_blur%s_grad" % fwhm)
        else:
            out_gradient = None

        out_img = img.newname_with("_blurred%s" % fwhm)

        cmd = CmdStage(cmd=[
            'c3d', '-smooth',
            "%smm" % fwhm, '-o', out_img.path, img.path
        ] + (['-gradient', '-o', out_gradient.path] if gradient else []),
                       inputs=(img),
                       outputs=(out_img, out_gradient) if gradient else
                       (out_img, ))
        return Result(stages=Stages((cmd, )),
                      output=Namespace(img=out_img, gradient=out_gradient)
                      if gradient else Namespace(img=out_img))
Example #3
0
def det_and_log_det(
    displacement_grid: MincAtom,
    fwhm: Optional[float],
    annotation: str = ""
) -> Result[Namespace]:  # (det=MincAtom, log_det=MincAtom)]:
    """
    When this function is called, you might (or should) know what kind of
    deformation grid is passed along. This allows you to provide a proper
    annotation for the produced log determinant file. For instance "absolute"
    or "relative" for transformations that include an affine linear part, or
    that have the linear part taken out respectively.
    """
    s = Stages()
    # TODO: naming doesn't correspond with the (automagic) file naming: d-1 <=> det(f), det <=> det+1(f)
    det = s.defer(
        determinant(
            s.defer(smooth_vector(source=displacement_grid, fwhm=fwhm)
                    ) if fwhm else displacement_grid))

    output_filename_wo_ext = displacement_grid.filename_wo_ext + "_log_det" + annotation
    if fwhm:
        output_filename_wo_ext += "_fwhm" + str(fwhm)
    log_det = s.defer(
        mincmath(op='log',
                 vols=[det],
                 subdir="stats-volumes",
                 new_name=output_filename_wo_ext))
    return Result(stages=s, output=Namespace(det=det, log_det=log_det))
    def f(
        imgs: List[MincAtom],
        nlin_dir: str,
        conf: nlin_module.MultilevelConf,
        initial_target: MincAtom,
        nlin_prefix: str,
        #output_dir_for_avg: str = None,
        #output_name_wo_ext: str = None
    ):
        s = Stages()

        pairwise_result = s.defer(
            pairwise(nlin_module, max_images=25, max_pairs=None).build_model(
                imgs=imgs,
                nlin_dir=nlin_dir,
                conf=nlin_module.hierarchical_to_single(conf)[-1]
                if conf else None,
                initial_target=initial_target,
                nlin_prefix=nlin_prefix
                #, output_name_wo_ext=output_name_wo_ext  #, algorithms=nlin_module.algorithms
            ))

        build_model_result = s.defer(
            build_model(nlin_module).build_model(
                imgs=imgs,
                nlin_dir=nlin_dir,
                conf=conf,
                initial_target=pairwise_result.avg_img,
                nlin_prefix=nlin_prefix
                #, output_name_wo_ext=output_name_wo_ext  #, algorithms=algorithms
            ))

        return Result(stages=s, output=build_model_result)
Example #5
0
def asymmetry_pipeline(options):

    output_dir = options.application.output_directory
    pipeline_name = options.application.pipeline_name
    processed_dir = os.path.join(output_dir, pipeline_name + "_processed")

    s = Stages()

    #imgs_ = [MincAtom(f, pipeline_sub_dir=processed_dir) for f in options.application.files]

    imgs_ = get_imgs(options.application)

    check_MINC_input_files([img.path for img in imgs_])

    imgs = pd.Series(imgs_, index=[img.filename_wo_ext for img in imgs_])
    flipped_imgs = imgs.apply(lambda img: s.defer(volflip(img))
                              )  # TODO add flags to control flip axis ...

    # TODO ugly - MincAtom API should allow this somehow without mutation (also, how to pass into `volflip`, etc.?)
    for f_i in flipped_imgs:
        f_i.output_sub_dir += "_flipped"

    check_MINC_input_files(imgs.apply(lambda img: img.path))

    grouped_files_df = pd.DataFrame({
        'file': pd.concat([imgs, flipped_imgs])
    }).assign(group=lambda df: df.index)

    two_level_result = s.defer(two_level(grouped_files_df, options=options))

    return Result(stages=s, output=two_level_result)
def mincblob(op : str, grid : MincAtom, subdir : str = "tmp") -> Result[MincAtom]:
    """
    Low-level mincblob wrapper with the one exception being the determinant option. By
    default the inner clockwork of mincblob subtracts 1 from all determinant values that
    are being calculated. As such, 1 needs to be added to the result of the mincblob call.
    We will do that here, because it makes most sense here.
    >>> stages = mincblob('determinant', MincAtom("/images/img_1.mnc", pipeline_sub_dir="/tmp")).stages
    >>> [s.render() for s in stages]
    ['mincblob -clobber -determinant /images/img_1.mnc /tmp/img_1/img_1_determinant.mnc']
    """
    if op not in ["determinant", "trace", "translation", "magnitude"]:
        raise ValueError('mincblob: invalid operation %s' % op)

    # if we are calculating the determinant, the first file produced is a temp file:
    if op == "determinant":
        out_file = grid.newname_with_suffix("_temp_det", subdir=subdir)
    else:
        out_file = grid.newname_with_suffix('_' + op, subdir=subdir)

    stage = CmdStage(inputs=(grid,), outputs=(out_file,),
                 cmd=['mincblob', '-clobber', '-' + op, grid.path, out_file.path])

    s = Stages([stage])
    # now create the proper determinant if that's what was asked for
    if op == "determinant":
        result_file = s.defer(mincmath(op='add',
                                       const=1,
                                       vols=[out_file],
                                       subdir=subdir,
                                       new_name=grid.filename_wo_ext + "_det"))
    else:
        result_file = out_file

    return Result(stages=s, output=result_file)
def cortical_thickness_pipeline(options):
    s = Stages()

    #imgs = [MincAtom(name, pipeline_sub_dir=os.path.join(options.application.output_directory,
    #                                                     options.application.pipeline_name + "_processed"))
    #        for name in options.application.files]

    pipeline_sub_dir = os.path.join(options.application.output_directory,
                                    options.application.pipeline_name + "_processed")

    #def atom(atom_type, file):
    #    return atom_type(file, pipeline_sub_dir=pipeline_sub_dir)  # TODO output_sub_dir, ....

    # TODO are all these fields actually used?  If not, omit from CSV?
    xfms = (pd.read_csv(options.thickness.xfm_csv)
            .apply(axis=1,  # TODO fill out <..>Atom(...) fields ...
                   func=lambda row: XfmHandler(
                          source=MincAtom(row.source, pipeline_sub_dir=pipeline_sub_dir),
                          target=MincAtom(row.target, pipeline_sub_dir=pipeline_sub_dir),
                          resampled=None,   #MincAtom(row.resampled, pipeline_sub_dir=pipeline_sub_dir),
                          xfm=XfmAtom(row.xfm, pipeline_sub_dir=pipeline_sub_dir))))
    # TODO better way to unpack?

    result = s.defer(cortical_thickness(xfms=xfms,
                                        atlas=NotImplemented,
                                        label_mapping=options.thickness.label_mapping,
                                        atlas_fwhm=options.thickness.atlas_fwhm,
                                        thickness_fwhm=options.thickness.thickness_fwhm))

    return Result(stages=s, output=result)
def nlin_part(xfm : XfmHandler, inv_xfm : Optional[XfmHandler] = None) -> Result[XfmHandler]:
    """
    *** = non linear deformations
    --- = linear (affine) deformations

    Input:
    xfm     :     ******------>
    inv_xfm :    <******------ (optional)

    Calculated:
    inv_lin_xfm :      <------

    Returned:
    concat :      ******------> +
                       <------
    equals :      ******>

    Compute the nonlinear part of a transform as follows:
    go forwards across xfm and then backwards across the linear part
    of the inverse xfm (by first calculating the inverse or using the one supplied) 
    Finally, use minc_displacement to compute the resulting gridfile of the purely 
    nonlinear part.

    The optional inv_xfm (which must be the inverse!) is an optimization -
    we don't go looking for an inverse by filename munging and don't programmatically
    keep a log of operations applied, so any preexisting inverse must be supplied explicitly.
    """
    s = Stages()
    inv_xfm = inv_xfm or s.defer(invert_xfmhandler(xfm))
    inv_lin_part = s.defer(lin_from_nlin(inv_xfm)) 
    xfm = s.defer(concat_xfmhandlers([xfm, inv_lin_part]))
    return Result(stages=s, output=xfm)
Example #9
0
def lsq6_pipeline(options):
    # TODO could also allow pluggable pipeline parts e.g. LSQ6 could be substituted out for the modified LSQ6
    # for the kidney tips, etc...
    output_dir = options.application.output_directory
    pipeline_name = options.application.pipeline_name

    # TODO this is tedious and annoyingly similar to the registration chain and MBM ...
    lsq6_dir = os.path.join(output_dir, pipeline_name + "_lsq6")
    imgs = get_imgs(options.application)

    s = Stages()

    # FIXME: why do we have to call registration_targets *outside* of lsq6_nuc_inorm? is it just because of the extra
    # options required?
    targets = s.defer(
        registration_targets(lsq6_conf=options.lsq6,
                             app_conf=options.application,
                             reg_conf=options.registration,
                             first_input_file=imgs[0].path))

    # TODO this is quite tedious and duplicates stuff in the registration chain ...
    resolution = (options.registration.resolution or get_resolution_from_file(
        targets.registration_standard.path))

    # This must happen after calling registration_targets otherwise it will resample to options.registration.resolution
    options.registration = options.registration.replace(resolution=resolution)
    lsq6_result = s.defer(
        lsq6_nuc_inorm(imgs=imgs,
                       resolution=resolution,
                       registration_targets=targets,
                       lsq6_dir=lsq6_dir,
                       lsq6_options=options.lsq6))

    return Result(stages=s, output=lsq6_result)
def tamarack_pipeline(options):

    output_dir    = options.application.output_directory
    pipeline_name = options.application.pipeline_name
    #processed_dir = os.path.join(output_dir, pipeline_name + "_processed")
    first_level_dir = os.path.join(output_dir, pipeline_name + "_first_level")

    s = Stages()

    with open(options.application.csv_file, 'r') as f:
        files_df = (pd.read_csv(filepath_or_buffer=f,
                                usecols=['group', 'filename'])
                    .assign(file=lambda df:
                                   df.apply(axis="columns",
                                            func=lambda r:
                                                   MincAtom(r.filename.strip(),
                                                            pipeline_sub_dir=os.path.join(first_level_dir,
                                                                                          "%s_processed" % r.group.strip())))))

    check_MINC_input_files(files_df.file.apply(lambda img: img.path))

    #grouped_files_df = pd.DataFrame({'file' : pd.concat([imgs])}).assign(group=lambda df: df.index)

    tamarack_result = s.defer(tamarack(files_df, options=options))

    tamarack_result.first_level_results.applymap(maybe_deref_path).to_csv("first_level_results.csv", index=False)
    tamarack_result.resampled_determinants.applymap(maybe_deref_path).to_csv("resampled_determinants.csv", index=False)
    tamarack_result.overall_determinants.applymap(maybe_deref_path).to_csv("overall_determinants.csv", index=False)

    return Result(stages=s, output=tamarack_result)
def tamarack_pipeline(options):

    output_dir = options.application.output_directory
    pipeline_name = options.application.pipeline_name
    #processed_dir = os.path.join(output_dir, pipeline_name + "_processed")
    first_level_dir = os.path.join(output_dir, pipeline_name + "_first_level")

    s = Stages()

    with open(options.application.csv_file, 'r') as f:
        files_df = (pd.read_csv(
            filepath_or_buffer=f,
            usecols=['group', 'filename']).assign(file=lambda df: df.apply(
                axis="columns",
                func=lambda r: MincAtom(r.filename.strip(),
                                        pipeline_sub_dir=os.path.join(
                                            first_level_dir, "%s_processed" % r
                                            .group.strip())))))

    check_MINC_input_files(files_df.file.apply(lambda img: img.path))

    #grouped_files_df = pd.DataFrame({'file' : pd.concat([imgs])}).assign(group=lambda df: df.index)

    tamarack_result = s.defer(tamarack(files_df, options=options))

    tamarack_result.first_level_results.applymap(maybe_deref_path).to_csv(
        "first_level_results.csv", index=False)
    tamarack_result.resampled_determinants.applymap(maybe_deref_path).to_csv(
        "resampled_determinants.csv", index=False)
    tamarack_result.overall_determinants.applymap(maybe_deref_path).to_csv(
        "overall_determinants.csv", index=False)

    return Result(stages=s, output=tamarack_result)
Example #12
0
def asymmetry_pipeline(options):

    output_dir    = options.application.output_directory
    pipeline_name = options.application.pipeline_name
    processed_dir = os.path.join(output_dir, pipeline_name + "_processed")

    s = Stages()

    #imgs_ = [MincAtom(f, pipeline_sub_dir=processed_dir) for f in options.application.files]

    imgs_ = get_imgs(options.application)

    check_MINC_input_files([img.path for img in imgs_])

    imgs  = pd.Series(imgs_, index=[img.filename_wo_ext for img in imgs_])
    flipped_imgs = imgs.apply(lambda img: s.defer(volflip(img)))  # TODO add flags to control flip axis ...

    # TODO ugly - MincAtom API should allow this somehow without mutation (also, how to pass into `volflip`, etc.?)
    for f_i in flipped_imgs:
        f_i.output_sub_dir += "_flipped"

    check_MINC_input_files(imgs.apply(lambda img: img.path))

    grouped_files_df = pd.DataFrame({'file' : pd.concat([imgs, flipped_imgs])}).assign(group=lambda df: df.index)

    two_level_result = s.defer(two_level(grouped_files_df, options=options))

    return Result(stages=s, output=two_level_result)
Example #13
0
def lsq6_pipeline(options):
    # TODO could also allow pluggable pipeline parts e.g. LSQ6 could be substituted out for the modified LSQ6
    # for the kidney tips, etc...
    output_dir    = options.application.output_directory
    pipeline_name = options.application.pipeline_name

    # TODO this is tedious and annoyingly similar to the registration chain and MBM ...
    lsq6_dir      = os.path.join(output_dir, pipeline_name + "_lsq6")
    processed_dir = os.path.join(output_dir, pipeline_name + "_processed")
    imgs = get_imgs(options.application)

    s = Stages()

    # TODO this is quite tedious and duplicates stuff in the registration chain ...
    resolution = (options.registration.resolution or
                  get_resolution_from_file(
                      s.defer(registration_targets(lsq6_conf=options.lsq6,
                                                   app_conf=options.application,
                                                   reg_conf=options.registration)).registration_standard.path))

    # FIXME: why do we have to call registration_targets *outside* of lsq6_nuc_inorm? is it just because of the extra
    # options required?
    targets = s.defer(registration_targets(lsq6_conf=options.lsq6,
                                   app_conf=options.application,
                                   reg_conf=options.registration,
                                   first_input_file=imgs[0]))
    # This must happen after calling registration_targets otherwise it will resample to options.registration.resolution
    options.registration = options.registration.replace(resolution=resolution)
    lsq6_result = s.defer(lsq6_nuc_inorm(imgs=imgs,
                                         resolution=resolution,
                                         registration_targets=targets,
                                         lsq6_dir=lsq6_dir,
                                         lsq6_options=options.lsq6))

    return Result(stages=s, output=lsq6_result)
Example #14
0
 def scale_transform(xfm, scale, newname_wo_ext):
     s = Stages()
     defs = s.defer(as_deformation(transform=xfm.xfm, reference=xfm.source))
     scaled_defs = (defs.xfm.newname(newname_wo_ext) if newname_wo_ext else
                     defs.xfm.newname_with_suffix("_scaled_%s" % scale))
     s.defer(CmdStage(cmd=['c3d', '-scale', str(scale), defs.path, "-o", scaled_defs.path],
                      inputs=(defs,), outputs=(scaled_defs,)))
     return Result(stages=s, output=scaled_defs)
Example #15
0
def determinant(displacement_grid: MincAtom) -> Result[MincAtom]:
    """
    Takes a displacement field (deformation grid, vector field, those are
    all the same thing) and calculates the proper determinant (mincblob()
    takes care of adding 1 to the silly output of running mincblob directly)
    """
    s = Stages()
    det = s.defer(mincblob(op='determinant', grid=displacement_grid))
    return Result(stages=s, output=det)
Example #16
0
def determinant(displacement_grid : MincAtom) -> Result[MincAtom]:
    """
    Takes a displacement field (deformation grid, vector field, those are
    all the same thing) and calculates the proper determinant (mincblob()
    takes care of adding 1 to the silly output of running mincblob directly)
    """
    s = Stages()
    det = s.defer(mincblob(op='determinant', grid=displacement_grid))
    return Result(stages=s, output=det)
Example #17
0
def dramms_warp(
        img: NiiAtom,  # TODO change to ITKAtom ?!
        xfm: XfmAtom,  # TODO: update to handler?
        like: NiiAtom,
        invert: bool = False,
        use_nn_interpolation=None,
        new_name_wo_ext: str = None,
        subdir: str = None,
        postfix: str = None) -> Result[NiiAtom]:

    s = Stages()

    if not subdir:
        subdir = 'resampled'

    # we need to get the filename without extension here in case we have
    # masks/labels associated with the input file. When that's the case,
    # we supply its name with "_mask" and "_labels" for which we need
    # to know what the main file will be resampled as
    if not new_name_wo_ext:
        # FIXME this is wrong when invert=True
        new_name_wo_ext = xfm.filename_wo_ext + '-resampled'

    new_img = s.defer(
        dramms_warp_simple(
            img=img,
            xfm=xfm,
            like=like,
            #extra_flags=extra_flags,
            invert=invert,
            #interpolation=interpolation,
            use_nn_interpolation=use_nn_interpolation,
            new_name_wo_ext=new_name_wo_ext,
            subdir=subdir))
    new_img.mask = s.defer(
        dramms_warp_simple(img=img.mask,
                           xfm=xfm,
                           like=like,
                           use_nn_interpolation=True,
                           invert=invert,
                           new_name_wo_ext=new_name_wo_ext + "_mask",
                           subdir=subdir)) if img.mask is not None else None
    new_img.labels = s.defer(
        dramms_warp_simple(img=img.labels,
                           xfm=xfm,
                           like=like,
                           use_nn_interpolation=True,
                           invert=invert,
                           new_name_wo_ext=new_name_wo_ext + "_labels",
                           subdir=subdir)) if img.labels is not None else None

    # Note that new_img can't be used for anything until the mask/label files are also resampled.
    # This shouldn't create a problem with stage dependencies as long as masks/labels appear in inputs/outputs of CmdStages.
    # (If this isn't automatic, a relevant helper function would be trivial.)
    # TODO: can/should this be done semi-automatically? probably ...
    return Result(stages=s, output=new_img)
Example #18
0
def convert(infile : ImgAtom, out_ext : str) -> Result[ImgAtom]:
    s = Stages()
    outfile = infile.newext(ext=out_ext)
    if infile.mask is not None:
        outfile.mask = s.defer(convert(infile.mask, out_ext=out_ext))
    if infile.labels is not None:
        outfile.mask = s.defer(convert(infile.labels, out_ext=out_ext))
    s.add(CmdStage(inputs=(infile,), outputs=(outfile,),
                   cmd = ['c3d', infile.path, '-o', outfile.path]))
    return Result(stages=s, output=outfile)
Example #19
0
def itk_convert_xfm(xfm: ITKXfmAtom, out_ext: str) -> Result[ITKXfmAtom]:
    if xfm.ext == out_ext:
        return Result(stages=Stages(), output=xfm)
    else:
        out_xfm = xfm.newext(out_ext)
        cmd = CmdStage(
            inputs=(xfm, ),
            outputs=(out_xfm, ),
            cmd=["itk_convert_xfm", "--clobber", xfm.path, out_xfm.path])
        return Result(stages=Stages((cmd, )), output=out_xfm)
Example #20
0
 def to_mni_xfm(xfm):
     s = Stages()
     defs = xfm.newname_with_suffix("_defs", subdir="tmp")
     s.add(CmdStage(cmd=["transformix", "-def", "all",
                         "-out", defs.dir,
                         "-tp", xfm.path,
                         "-xfm", os.path.join(defs.filename_wo_ext, defs.ext)],
                    inputs=(xfm,), outputs=(defs,)))
     out_xfm = s.defer(itk.itk_convert_xfm(defs, out_ext=".mnc"))
     return Result(stages=s, output=out_xfm)
Example #21
0
 def average_transforms(xfms, avg_xfm):
     s = Stages()
     defs = [s.defer(as_deformation(transform=xfm.xfm, reference_image=xfm.source)) for xfm in xfms]
     #avg_img = NotImplemented
     avg = imageToXfm(s.defer(average_images(defs,
                                             avg_file=xfmToImage(avg_xfm),
                                             #output_dir=os.path.join(defs[0].pipeline_sub_dir,
                                             #                        defs[0].output_sub_dir,
                                             #                        "transforms")
                                             )))
     return Result(stages=s, output=avg)
Example #22
0
def NLIN_pipeline(options):

    # if options.application.files is None:
    #     raise ValueError("Please, some files! (or try '--help')")  # TODO make a util procedure for this

    output_dir    = options.application.output_directory
    pipeline_name = options.application.pipeline_name

    # TODO this is tedious and annoyingly similar to the registration chain and MBM and LSQ6 ...
    processed_dir = os.path.join(output_dir, pipeline_name + "_processed")
    nlin_dir      = os.path.join(output_dir, pipeline_name + "_nlin")

    resolution = (options.registration.resolution  # TODO does using the finest resolution here make sense?
                  or min([get_resolution_from_file(f) for f in options.application.files]))

    imgs = get_imgs(options.application)

    # imgs = [MincAtom(f, pipeline_sub_dir=processed_dir) for f in options.application.files]

    # determine NLIN settings by overriding defaults with
    # any settings present in protocol file, if it exists
    # could add a hook to print a message announcing completion, output files,
    # add more stages here to make a CSV

    initial_target_mask = MincAtom(options.nlin.target_mask) if options.nlin.target_mask else None
    initial_target = MincAtom(options.nlin.target, mask=initial_target_mask)

    full_hierarchy = get_nonlinear_configuration_from_options(nlin_protocol=options.nlin.nlin_protocol,
                                                              reg_method=options.nlin.reg_method,
                                                              file_resolution=resolution)

    s = Stages()

    nlin_result = s.defer(nlin_build_model(imgs, initial_target=initial_target, conf=full_hierarchy, nlin_dir=nlin_dir))

    # TODO return these?
    inverted_xfms = [s.defer(invert_xfmhandler(xfm)) for xfm in nlin_result.output]

    if options.stats.calc_stats:
        # TODO: put the stats part behind a flag ...

        determinants = [s.defer(determinants_at_fwhms(
                                  xfm=inv_xfm,
                                  inv_xfm=xfm,
                                  blur_fwhms=options.stats.stats_kernels))
                        for xfm, inv_xfm in zip(nlin_result.output, inverted_xfms)]

        return Result(stages=s,
                      output=Namespace(nlin_xfms=nlin_result,
                                       avg_img=nlin_result.avg_img,
                                       determinants=determinants))
    else:
        # there's no consistency in what gets returned, yikes ...
        return Result(stages=s, output=Namespace(nlin_xfms=nlin_result, avg_img=nlin_result.avg_img))
Example #23
0
 def average_transforms(xfms, avg_xfm):
     s = Stages()
     itk_xfms = copy.deepcopy(xfms)
     for xfm in itk_xfms:
         xfm.xfm = s.defer(dramms_to_itk(xfm.xfm))
     return Result(stages=s,
                   output=s.defer(
                       itk_to_dramms(
                           s.defer(
                               itk.Algorithms.average_transforms(
                                   itk_xfms, avg_xfm)))))
Example #24
0
def NLIN_pipeline(options):

    if options.application.files is None:
        raise ValueError("Please, some files! (or try '--help')")  # TODO make a util procedure for this

    output_dir    = options.application.output_directory
    pipeline_name = options.application.pipeline_name

    # TODO this is tedious and annoyingly similar to the registration chain and MBM and LSQ6 ...
    processed_dir = os.path.join(output_dir, pipeline_name + "_processed")
    nlin_dir      = os.path.join(output_dir, pipeline_name + "_nlin")

    resolution = (options.registration.resolution  # TODO does using the finest resolution here make sense?
                  or min([get_resolution_from_file(f) for f in options.application.files]))

    imgs = [MincAtom(f, pipeline_sub_dir=processed_dir) for f in options.application.files]

    # determine NLIN settings by overriding defaults with
    # any settings present in protocol file, if it exists
    # could add a hook to print a message announcing completion, output files,
    # add more stages here to make a CSV

    initial_target_mask = MincAtom(options.nlin.target_mask) if options.nlin.target_mask else None
    initial_target = MincAtom(options.nlin.target, mask=initial_target_mask)

    full_hierarchy = get_nonlinear_configuration_from_options(nlin_protocol=options.nlin.nlin_protocol,
                                                              flag_nlin_protocol=next(iter(options.nlin.flags_.nlin_protocol)),
                                                              reg_method=options.nlin.reg_method,
                                                              file_resolution=resolution)

    s = Stages()

    nlin_result = s.defer(nlin_build_model(imgs, initial_target=initial_target, conf=full_hierarchy, nlin_dir=nlin_dir))

    # TODO return these?
    inverted_xfms = [s.defer(invert_xfmhandler(xfm)) for xfm in nlin_result.output]

    if options.stats.calc_stats:
        # TODO: put the stats part behind a flag ...

        determinants = [s.defer(determinants_at_fwhms(
                                  xfm=inv_xfm,
                                  inv_xfm=xfm,
                                  blur_fwhms=options.stats.stats_kernels))
                        for xfm, inv_xfm in zip(nlin_result.output, inverted_xfms)]

        return Result(stages=s,
                      output=Namespace(nlin_xfms=nlin_result,
                                       avg_img=nlin_result.avg_img,
                                       determinants=determinants))
    else:
        # there's no consistency in what gets returned, yikes ...
        return Result(stages=s, output=Namespace(nlin_xfms=nlin_result, avg_img=nlin_result.avg_img))
 def f(imgs: List[MincAtom],
       initial_target: MincAtom,
       conf: reg_module.Conf,
       nlin_dir: str,
       nlin_prefix: str,
       tournament_name_wo_ext: str = "tournament") -> Result[List[XfmHandler]]:
   s = Stages()
   Weight = int
   def h(xfms : List[XfmHandler], name_wo_ext : str) -> List[XfmHandler]:
       # TODO add weights to each return
       # TODO check len(...) == 0 case??
       if len(xfms) <= 1:
           return xfms
       else:
           first_half  = xfms[: len(xfms)//2]
           second_half = xfms[len(xfms)//2 :]
           first_half_result  = h(first_half,  name_wo_ext=name_wo_ext + "_L")
           second_half_result = h(second_half, name_wo_ext=name_wo_ext + "_R")
           A_halfway_to_B, B_halfway_to_A, avg_img = s.defer(
               nonlinear_midpoint_xfm(
                   img_A = first_half_result[0].resampled,
                   img_B = second_half_result[0].resampled,
                   out_name_wo_ext=name_wo_ext,
                   nlin_algorithm=reg_module,
                   conf=conf,
                   out_dir=nlin_dir))
           xfms_to_midpoint = ([XfmHandler(source=xfm.source,
                                           target=avg_img,
                                           resampled=A_halfway_to_B.resampled,
                                           xfm=s.defer(xfmconcat([xfm.xfm, A_halfway_to_B.xfm],
                                                                 name="%s_%s" % (xfm.source.filename_wo_ext,
                                                                                 name_wo_ext))))
                                for xfm in first_half_result]
                               + [XfmHandler(source=xfm.source,
                                             target=avg_img,
                                             resampled=B_halfway_to_A.resampled,
                                             xfm=s.defer(xfmconcat([xfm.xfm, B_halfway_to_A.xfm],
                                                                   name="%s_%s" % (xfm.source.filename_wo_ext,
                                                                                   name_wo_ext))))
                                  for xfm in second_half_result])
           return xfms_to_midpoint
   identity_xfm = s.defer(param2xfm(out_xfm=XfmAtom(pipeline_sub_dir=imgs[0].pipeline_sub_dir,
                                                    output_sub_dir=imgs[0].output_sub_dir,
                                                    name=os.path.join(imgs[0].pipeline_sub_dir,
                                                                      imgs[0].output_sub_dir,
                                                                      "id.xfm"))))
   initial_xfms = [XfmHandler(source=img, target=img,
                              resampled=img, xfm=identity_xfm) for img in imgs]
   xfms_to_avg = h(initial_xfms, tournament_name_wo_ext)
   avg_img = xfms_to_avg[0].target
   return Result(stages=s, output=WithAvgImgs(avg_img=avg_img, avg_imgs=[avg_img],
                                              output=xfms_to_avg))
Example #26
0
 def scale_transform(xfm, scale, newname_wo_ext):
     s = Stages()
     defs = s.defer(as_deformation(transform=xfm.xfm, reference=xfm.source))
     scaled_defs = (defs.xfm.newname(newname_wo_ext) if newname_wo_ext else
                    defs.xfm.newname_with_suffix("_scaled_%s" % scale))
     s.defer(
         CmdStage(cmd=[
             'c3d', '-scale',
             str(scale), defs.path, "-o", scaled_defs.path
         ],
                  inputs=(defs, ),
                  outputs=(scaled_defs, )))
     return Result(stages=s, output=scaled_defs)
Example #27
0
 def to_mni_xfm(xfm):
     s = Stages()
     defs = xfm.newname_with_suffix("_defs", subdir="tmp")
     s.add(
         CmdStage(cmd=[
             "transformix", "-def", "all", "-out", defs.dir, "-tp",
             xfm.path, "-xfm",
             os.path.join(defs.filename_wo_ext, defs.ext)
         ],
                  inputs=(xfm, ),
                  outputs=(defs, )))
     out_xfm = s.defer(itk.itk_convert_xfm(defs, out_ext=".mnc"))
     return Result(stages=s, output=out_xfm)
Example #28
0
 def average_transforms(xfms, avg_xfm):
     intermediate_xfm = avg_xfm.newname_with_suffix("_inter", subdir="tmp")
     s = Stages()
     s.add(CmdStage(cmd=["echo", ('(Transform "WeightedCombinationTransform")\n'
                                  '(SubTransforms %s)\n'
                                  '(NormalizeCombinationsWeights "true")\n') %
                                    ' '.join(sorted(xfm.path for xfm in xfms))],
                    inputs=xfms, outputs=(intermediate_xfm,)))
     s.add(CmdStage(cmd=["transformix", "-def", "all",
                         "-out", os.path.dirname(avg_xfm.path),
                         "-tp", intermediate_xfm.path,
                         "-xfm", avg_xfm.path],
                    inputs=(intermediate_xfm,), outputs=(avg_xfm,)))
Example #29
0
def tv_slice_recon_pipeline(options):
    output_dir = options.application.output_directory
    pipeline_name = options.application.pipeline_name

    s = Stages()

    df = pd.read_csv(options.application.csv_file,
                     dtype={
                         "brain_name": str,
                         "brain_directory": str
                     })
    # transforms = (mbm_result.xfms.assign(
    #     native_file=lambda df: df.rigid_xfm.apply(lambda x: x.source),

    df["mosaic_file"] = df.apply(lambda row: find_mosaic_file(row), axis=1)
    df["mosaic_dictionary"] = df.apply(
        lambda row: read_mosaic_file(row.mosaic_file), axis=1)
    df["number_of_slices"] = df.apply(
        lambda row: int(row.mosaic_dictionary["sections"]), axis=1)
    df["interslice_distance"] = df.apply(
        lambda row: float(row.mosaic_dictionary["sectionres"]) / 1000, axis=1)
    df["Zstart"] = df.apply(lambda row: 1 if isnan(row.Zstart) else row.Zstart,
                            axis=1)
    df["Zend"] = df.apply(lambda row: row.number_of_slices - row.Zstart + 1
                          if isnan(row.Zend) else row.Zend,
                          axis=1)
    df["slice_directory"] = df.apply(lambda row: os.path.join(
        output_dir, pipeline_name + "_stitched", row.brain_name),
                                     axis=1)

    #############################
    # Step 1: Run TV_stitch.py
    #############################
    #TODO surely theres a way around this?
    df = df.assign(TV_stitch_result="")
    for index, row in df.iterrows():
        df.at[index, "TV_stitch_result"] = s.defer(
            TV_stitch_wrap(brain_directory=FileAtom(row.brain_directory),
                           brain_name=row.brain_name,
                           slice_directory=row.slice_directory,
                           TV_stitch_options=options.TV_stitch,
                           Zstart=row.Zstart,
                           Zend=row.Zend,
                           output_dir=output_dir))
    df.drop(["mosaic_dictionary", "TV_stitch_result"],
            axis=1).to_csv("TV_brains.csv", index=False)
    df.explode("TV_stitch_result")\
        .assign(slice=lambda df: df.apply(lambda row: row.TV_stitch_result.path, axis=1))\
        .drop(["mosaic_dictionary", "TV_stitch_result"], axis=1)\
        .to_csv("TV_slices.csv", index=False)
    return Result(stages=s, output=())
def shift_modify_header(img: MincAtom,
                        shifted_img: MincAtom,
                        newx: float,
                        newy: float,
                        newz: float):
    s = Stages()
    #Copy file to new location
    stage = CmdStage(inputs=(img,), outputs=(shifted_img,), memory=1,
                     cmd=['cp', img.path, shifted_img.path])
    print(stage.render())
    s.add(stage)
    #Alter header of copied image to shift
    xspace_start = 'xspace:start='+newx
    yspace_start = 'yspace:start='+newy
    zspace_start = 'zspace:start='+newz
    stage = CmdStage(inputs=(shifted_img,), outputs=(shifted_img,), memory=1,
                     cmd=['minc_modify_header','-dinsert',xspace_start,
                          '-dinsert',yspace_start,
                          '-dinsert',zspace_start,
                          shifted_img.path])
    print(stage.render())
    s.add(stage)
    #Alter header of copied image with header modification
    append_history_string = ':history= >>> copy and shift: '+ shifted_img.path +' to '+ shifted_img.path
    stage = CmdStage(inputs=(shifted_img,), outputs=(shifted_img,), memory=1,
                     cmd=['minc_modify_header','-sappend',
                          append_history_string,
                          shifted_img.path])
    print(stage.render())
    s.add(stage)
    return Result(stages=s, output=shifted_img)
Example #31
0
def nlin_displacement(xfm : XfmHandler, inv_xfm : Optional[XfmHandler] = None) -> Result[MincAtom]:
    """
    See: nlin_part().

    This returns the nonlinear part of the input
    transformation (xfm) in the form of a grid file (vector field).
    All transformations are encapsulated in this field (linear parts
    that are normally specified in the .xfm file are placed in the
    vector field)
    """
    
    s = Stages()
    return Result(stages=s,
                  output=s.defer(minc_displacement(
                                   s.defer(nlin_part(xfm, inv_xfm=inv_xfm)))))
    def f(
        imgs: List[MincAtom],
        initial_target: MincAtom,
        conf: reg_module.MultilevelConf,
        use_robust_averaging: bool,
        nlin_dir: str,
        nlin_prefix: str,
        #algorithms : Type[Algorithms]
    ) -> Result[WithAvgImgs[List[XfmHandler]]]:
        confs = reg_module.hierarchical_to_single(
            conf) if conf is not None else None
        if confs is None or len(confs) == 0:
            raise ValueError("No configurations supplied ...")
        s = Stages()

        avg = initial_target
        avg_imgs = []
        xfms = [None] * len(imgs)
        for i, conf in enumerate(confs, start=1):
            xfms = s.defer_map([
                reg_module.register(
                    source=img,
                    # in the case the registration algorithm doesn't accept
                    # an initial transform,
                    # we could use the resampled output of the previous
                    # step for a more efficient registration process,
                    # although this would require more careful bookkeeping
                    # of transforms and incur additional resampling error
                    target=avg,
                    conf=conf,
                    initial_source_transform=xfm.xfm
                    if reg_module.accepts_initial_transform() and
                    (xfm is not None) else None,
                    ##generation=i,
                    # TODO reduce unneeded resamplings if accepts_initial_transform?
                    resample_source=True) for img, xfm in zip(imgs, xfms)
            ])
            avg = s.defer(
                reg_module.Algorithms.average([xfm.resampled for xfm in xfms],
                                              robust=use_robust_averaging,
                                              name_wo_ext='%s-nlin-%d' %
                                              (nlin_prefix, i),
                                              output_dir=nlin_dir))
            avg_imgs.append(avg)
        return Result(stages=s,
                      output=WithAvgImgs(output=xfms,
                                         avg_img=avg,
                                         avg_imgs=avg_imgs))
def deep_segment(
    image: FileAtom,
    deep_segment_pipeline: FileAtom,
    anatomical_suffix: str,
    count_suffix: str,
    outline_suffix: str = None,
    cell_min_area: int = None,
    cell_mean_area: float = None,
    cell_max_area: int = None,
    temp_dir: str = None,
):
    anatomical = image.newname_with_suffix("_" + anatomical_suffix)
    count = image.newname_with_suffix("_" + count_suffix)
    outline = image.newname_with_suffix(
        "_" + outline_suffix) if outline_suffix else None
    stage = CmdStage(inputs=(image, deep_segment_pipeline),
                     outputs=(anatomical, count),
                     cmd=['deep_segment.py',
                          '--segment-intensity 1',
                          '--temp-dir %s' % temp_dir if temp_dir else "",
                          '--learner %s' % deep_segment_pipeline.path,
                          '--image %s' % image.path,
                          '--image-output %s' % anatomical.path,
                          '--centroids-output %s' % count.path,
                          '--outlines-output %s' % outline.path if outline_suffix else ""
                          '--cell-min-area %s' % cell_min_area if cell_min_area else "",
                          '--process-clusters --cell-mean-area %s --cell-max-area %s' % (cell_mean_area, cell_max_area)\
                              if (cell_mean_area and cell_max_area) else ""
                          ])
    return Result(stages=Stages([stage]), output=(anatomical, count, outline))
def antsRegistration(fixed: MincAtom,
                     moving: MincAtom,
                     transform: XfmAtom,
                     output_dir: str,
                     warped: str = "Warped.nii.gz",
                     inversewarped: str = "InverseWarped.nii.gz",
                     dimensionality: int = 3):
    #TODO warped and inversewarped output to the working directory
    stage = CmdStage(
        inputs=(fixed, moving),
        outputs=(transform, ),
        cmd=[
            'antsRegistration',
            '--verbose 1',
            '--float 0',
            '--minc',
            '--dimensionality %s' % dimensionality,
            '--output [%s,%s,%s]' % (transform.path.replace(
                '0_GenericAffine.xfm', ''), warped, inversewarped),
            '--interpolation Linear',
            '--use-histogram-matching 0',
            '--winsorize-image-intensities [0.01,0.99]',
            '--initial-moving-transform [%s,%s,1]' %
            (fixed.path, moving.path),  #1 indicates center of mass
            '--transform Translation[0.1]',
            '--metric MI[%s,%s,1,32,Regular,0.25]' % (fixed.path, moving.path),
            '--convergence [1000x500x250x0,1e-6,10]',
            '--shrink-factors 12x8x4x2',
            '--smoothing-sigmas 4x3x2x1vox'
        ],
        log_file=os.path.join(output_dir, "join_sections.log"))

    return Result(stages=Stages([stage]), output=(transform))
Example #35
0
 def scale_transform(xfm, scale, newname_wo_ext):
     scaled_xfm = xfm.newname_with_suffix("_scaled%s" % scale)
     c = CmdStage(
         cmd=["dramms-defop", "-m",
              str(scale), xfm.path, scaled_xfm.path],
         inputs=(xfm, ),
         outputs=(scaled_xfm, ))
     return Result(stages=Stages([c]), output=scaled_xfm)
Example #36
0
def as_deformation(xfm):
    c = CmdStage(cmd=[
        "transformix", "-def", "all", "-out", dirname, "-tp", xfm.path, "-xfm",
        out_path
    ],
                 inputs=(xfm, ),
                 outputs=NotImplemented)
    return Result(stages=Stages([c]), output=NotImplemented)
        def build_model(imgs,
                        conf,
                        nlin_dir,
                        nlin_prefix,
                        initial_target,
                        output_name_wo_ext = None):
            s = Stages()
            mincify = base_build_model.ToMinc
            imgs = tuple(s.defer(mincify.from_mnc(img)) for img in imgs)
            result = s.defer(base_build_model.build_model(imgs=imgs, conf=conf,
                                                  nlin_dir=nlin_dir, nlin_prefix=nlin_prefix,
                                                  initial_target=s.defer(mincify.from_mnc(initial_target))
                                                  #output_name_wo_ext=output_name_wo_ext
                                                  ))

            def wrap_output_xfmh(xfmh):
                return XfmHandler(source=s.defer(mincify.to_mnc(xfmh.source)) if xfmh.source else None,
                                  target=s.defer(mincify.to_mnc(xfmh.target)) if xfmh.target else None,
                                  resampled=s.defer(mincify.to_mnc(xfmh.resampled)) if xfmh.has_resampled() else None,
                                  xfm=s.defer(mincify.to_mni_xfm(xfmh.xfm)),
                                  inverse=wrap_output_xfmh(xfmh.inverse) if xfmh.has_inverse() else None)

            return Result(stages=s, output=WithAvgImgs(avg_imgs=[s.defer(mincify.to_mnc(img))
                                                                 for img in result.avg_imgs],
                                                       avg_img=s.defer(mincify.to_mnc(result.avg_img)),
                                                       output=[wrap_output_xfmh(x)
                                                               for x in result.output]))
Example #38
0
def surface_mask2(input: MincAtom,
                  surface: FileAtom,
                  args: List[str] = []) -> Result[MincAtom]:
    mask_vol = surface.newname_with_suffix("_mask", ext=".mnc")
    stage = CmdStage(inputs=(input, surface),
                     outputs=(mask_vol, ),
                     cmd=["surface_mask2", "-clobber"] + args +
                     [input.path, surface.path, mask_vol.path])
    return Result(stages=Stages([stage]), output=mask_vol)
Example #39
0
def smooth_vector(source: MincAtom, fwhm: float) -> Result[MincAtom]:
    outf = source.newname_with_suffix(
        "_smooth_fwhm%s" % fwhm, subdir="tmp")  # TODO smooth_displacement_?
    cmd = [
        'smooth_vector', '--clobber', '--filter',
        '--fwhm=%s' % fwhm, source.path, outf.path
    ]
    stage = CmdStage(inputs=(source, ), outputs=(outf, ), cmd=cmd)
    return Result(stages=Stages([stage]), output=outf)
Example #40
0
def nlin_displacement(
        xfm: XfmHandler,
        inv_xfm: Optional[XfmHandler] = None) -> Result[MincAtom]:
    """
    See: nlin_part().

    This returns the nonlinear part of the input
    transformation (xfm) in the form of a grid file (vector field).
    All transformations are encapsulated in this field (linear parts
    that are normally specified in the .xfm file are placed in the
    vector field)
    """

    s = Stages()
    return Result(stages=s,
                  output=s.defer(
                      minc_displacement(
                          s.defer(nlin_part(xfm, inv_xfm=inv_xfm)))))
Example #41
0
 def average_transforms(xfms, avg_xfm):
     s = Stages()
     defs = [
         s.defer(
             as_deformation(transform=xfm.xfm, reference_image=xfm.source))
         for xfm in xfms
     ]
     #avg_img = NotImplemented
     avg = imageToXfm(
         s.defer(
             average_images(
                 defs,
                 avg_file=xfmToImage(avg_xfm),
                 #output_dir=os.path.join(defs[0].pipeline_sub_dir,
                 #                        defs[0].output_sub_dir,
                 #                        "transforms")
             )))
     return Result(stages=s, output=avg)
Example #42
0
    def register(
            source: MincAtom,
            target: MincAtom,
            conf: Conf,
            initial_source_transform: Optional[XfmAtom] = None,
            transform_name_wo_ext: str = None,
            generation: int = None,  # not used; remove from API (fix ANTS)
            resample_source: bool = False,
            resample_subdir: str = "resampled") -> Result[XfmHandler]:
        if conf is None:
            raise ValueError("no configuration supplied")

        out_dir = os.path.join(
            source.pipeline_sub_dir, source.output_sub_dir,
            "%s_elastix_to_%s" %
            (source.filename_wo_ext, target.filename_wo_ext))

        # elastix chooses this for us:
        out_img = NiiAtom(
            name=os.path.join(out_dir, "result.%d.mnc" %
                              0),  # TODO number of param files ?!?!
            pipeline_sub_dir=source.pipeline_sub_dir,
            output_sub_dir=source.output_sub_dir)
        #out_xfm = XfmAtom(name = "%s_elastix_to_%s.xfm" % (source.filename_wo_ext, target.filename_wo_ext),
        #                  pipeline_sub_dir=source.pipeline_sub_dir, output_sub_dir=source.output_sub_dir)
        out_xfm = XfmAtom(
            name=os.path.join(out_dir, "TransformParameters.%d.txt" %
                              0),  # TODO number of param files ?!?!
            pipeline_sub_dir=source.pipeline_sub_dir,
            output_sub_dir=source.output_sub_dir)
        cmd = (['elastix', '-f', source.path, '-m', target.path] +
               (flatten(*[["-p", x] for x in conf])) +
               (["-fMask", source.mask.path] if source.mask else []) +
               (["-mMask", target.mask.path] if target.mask else []) +
               (["-t0", initial_source_transform.path]
                if initial_source_transform else []) + (["-out", out_dir]))
        s = CmdStage(cmd=cmd,
                     inputs=(source, target) +
                     ((source.mask, ) if source.mask else
                      ()) + ((target.mask, ) if target.mask else ()),
                     outputs=(out_xfm, out_img))

        #s2 = CmdStage(cmd=['transformix', '-out', os.path.join(resample_subdir, "%s" % c),
        #                   "-tp", out_xfm, "-in", out_name],
        #              inputs=(), outputs=())

        xfm = XfmHandler(source=source,
                         target=target,
                         xfm=out_xfm,
                         resampled=out_img)
        return Result(stages=Stages([s]), output=xfm)


# one question is whether we should have separate NLIN/LSQ12/LSQ6 interfaces or not, given that these differences seem
# like they should be rather irrelevant to general registration procedures ... at present minctracc
# is the main difficulty, since it uses different
Example #43
0
def resample(img,
             xfm,  # TODO: update to handler?
             like,
             invert = False,
             use_nn_interpolation = None,
             new_name_wo_ext: str = None,
             subdir: str = None,
             postfix: str = None):

    s = Stages()

    if not subdir:
        subdir = 'resampled'

    # we need to get the filename without extension here in case we have
    # masks/labels associated with the input file. When that's the case,
    # we supply its name with "_mask" and "_labels" for which we need
    # to know what the main file will be resampled as
    if not new_name_wo_ext:
        # FIXME this is wrong when invert=True
        new_name_wo_ext = xfm.filename_wo_ext + '-resampled'

    new_img = s.defer(resample_simple(img=img, xfm=xfm, like=like,
                                      invert=invert,
                                      use_nn_interpolation=use_nn_interpolation,
                                      new_name_wo_ext=new_name_wo_ext,
                                      subdir=subdir))
    new_img.mask = s.defer(resample_simple(img=img.mask, xfm=xfm, like=like,
                                           use_nn_interpolation=True,
                                           invert=invert,
                                           new_name_wo_ext=new_name_wo_ext + "_mask",
                                           subdir=subdir)) if img.mask is not None else None
    new_img.labels = s.defer(resample_simple(img=img.labels, xfm=xfm, like=like,
                                             use_nn_interpolation=True,
                                             invert=invert,
                                             new_name_wo_ext=new_name_wo_ext + "_labels",
                                             subdir=subdir)) if img.labels is not None else None

    # Note that new_img can't be used for anything until the mask/label files are also resampled.
    # This shouldn't create a problem with stage dependencies as long as masks/labels appear in inputs/outputs of CmdStages.
    # (If this isn't automatic, a relevant helper function would be trivial.)
    # TODO: can/should this be done semi-automatically? probably ...
    return Result(stages=s, output=new_img)
Example #44
0
def transform_objects(
        input_obj: FileAtom,
        xfm: XfmAtom) -> Result[FileAtom]:  # XfmAtom -> XfmHandler??
    output_obj = input_obj.newname_with_suffix("_resampled_via_%s" %
                                               xfm.filename_wo_ext)
    stage = CmdStage(
        inputs=(input_obj, xfm),
        outputs=(output_obj, ),
        cmd=["transform_objects", input_obj.path, xfm.path, output_obj.path])
    return Result(stages=Stages([stage]), output=output_obj)
Example #45
0
def average_images(imgs        : Sequence[ImgAtom],
                   dimensions  : int = 3,
                   normalize   : bool = False,
                   output_dir  : str = '.',
                   name_wo_ext : str = "average",
                   out_ext     : Optional[str] = None,
                   avg_file    : Optional[ITKImgAtom] = None) -> Result[ITKImgAtom]:

    s = Stages()

    if len(imgs) == 0:
        raise ValueError("`AverageImages` arg `imgs` is empty (can't average zero files)")

    ext = out_ext or imgs[0].ext

    # the output_dir basically gives us the equivalent of the pipeline_sub_dir for
    # regular input files to a pipeline, so use that here
    avg = avg_file or ImgAtom(name=os.path.join(output_dir, '%s.todo' % name_wo_ext),
                              orig_name=None,
                              pipeline_sub_dir=output_dir)
    avg.ext = ext

    # if all input files have masks associated with them, add the combined mask to
    # the average:
    # TODO what if avg_file has a mask ... should that be used instead? (then rename avg -> avg_file above)
    all_inputs_have_masks = all((img.mask for img in imgs))
    if all_inputs_have_masks:
        combined_mask = (ImgAtom(name=os.path.join(avg_file.dir, '%s_mask.todo' % avg_file.filename_wo_ext),
                                 orig_name=None,
                                 pipeline_sub_dir=avg_file.pipeline_sub_dir)
                         if avg_file is not None else
                         ImgAtom(name=os.path.join(output_dir, '%s_mask.todo' % name_wo_ext),
                                 orig_name=None,
                                 pipeline_sub_dir=output_dir))
        combined_mask.ext = ext
        s.defer(max(imgs=sorted({img_inst.mask for img_inst in imgs}),
                    out_img=combined_mask))
        avg.mask = combined_mask
    s.add(CmdStage(inputs = imgs,
                   outputs = (avg,),
                   cmd = ["AverageImages", str(dimensions), avg.path, "%d" % normalize]
                         + [img.path for img in imgs]))
    return Result(stages=s, output=avg)
Example #46
0
def dramms_invert(defs: DrammsXfmAtom,
                  out_defs: Optional[DrammsXfmAtom] = None):
    out_defs = out_defs or defs.newname_with_suffix("_inverted")

    return Result(stages=Stages([
        CmdStage(cmd=['dramms-defop', '-i', defs.path, out_defs.path],
                 inputs=(defs, ),
                 outputs=(out_defs, ))
    ]),
                  output=out_defs)
Example #47
0
def mbm_pipeline(options : MBMConf):
    s = Stages()
    imgs = [MincAtom(name, pipeline_sub_dir=os.path.join(options.application.output_directory,
                                                         options.application.pipeline_name + "_processed"))
            for name in options.application.files]

    check_MINC_input_files([img.path for img in imgs])

    prefix = options.application.pipeline_name

    mbm_result = s.defer(mbm(imgs=imgs, options=options,
                         prefix=prefix,
                         output_dir=options.application.output_directory))

    # create useful CSVs (note the files listed therein won't yet exist ...)
    for filename, dataframe in (("transforms.csv", mbm_result.xfms),
                                ("determinants.csv", mbm_result.determinants)):
        with open(filename, 'w') as f:
            f.write(dataframe.applymap(maybe_deref_path).to_csv(index=False))

    # TODO moved here from inside `mbm` for now ... does this make most sense?
    if options.mbm.segmentation.run_maget:
        import copy
        maget_options = copy.deepcopy(options)  #Namespace(maget=options)
        #maget_options
        #maget_options.maget = maget_options.mbm
        #maget_options.execution = options.execution
        #maget_options.application = options.application
        maget_options.application.output_directory = os.path.join(options.application.output_directory, "segmentation")
        maget_options.maget = options.mbm.maget

        fixup_maget_options(maget_options=maget_options.maget,
                            nlin_options=maget_options.mbm.nlin,
                            lsq12_options=maget_options.mbm.lsq12)
        del maget_options.mbm

        s.defer(maget([xfm.resampled for _ix, xfm in mbm_result.xfms.rigid_xfm.iteritems()],
                       options=maget_options,
                       prefix="%s_MAGeT" % prefix,
                       output_dir=os.path.join(options.application.output_directory, prefix + "_processed")))

    return Result(stages=s, output=mbm_result)
def stage_embryos_pipeline(options):
    s = Stages()

    imgs = get_imgs(options.application)
    rough_volume_imgs = get_volume_estimate(imgs)
    imgs_and_rough_volume = pd.DataFrame({"mincatom" : imgs,
                                          "rough_volume" : pd.Series(rough_volume_imgs, dtype=float)})

    check_MINC_input_files([img.path for img in imgs])

    output_directory = options.application.output_directory
    output_sub_dir = os.path.join(output_directory,
                                  options.application.pipeline_name + "_4D_atlas")

    time_points_in_4D_atlas = instances_in_4D_atlas_from_csv(options.staging.staging.csv_4D,
                                                             output_sub_dir)

    # we can use the resolution of one of the time points in the 4D atlas
    # for all the registrations that will be run.
    resolution = get_resolution_from_file(time_points_in_4D_atlas.loc[0]["mincatom"].orig_path)

    print(options.staging.lsq12)

    lsq12_conf = get_linear_configuration_from_options(options.staging.lsq12,
                                                       transform_type=LinearTransType.lsq12,
                                                       file_resolution=resolution)

    nlin_component = get_nonlinear_component(options.staging.nlin.reg_method)

    # match each of the embryos individually
    for i in range(imgs_and_rough_volume.shape[0]):
        s.defer(match_embryo_to_4D_atlas(imgs_and_rough_volume.loc[i],
                                         time_points_in_4D_atlas,
                                         lsq6_conf=options.staging.lsq6,
                                         lsq12_conf=lsq12_conf,
                                         nlin_module=nlin_component,
                                         resolution=resolution,
                                         nlin_options=options.staging.nlin))


    return Result(stages=s, output=None)
def match_embryo_to_4D_atlas(embryo_with_volume_est,
                             full_4D_atlas_info,
                             lsq6_conf: LSQ6Conf,
                             lsq12_conf: MinctraccConf,
                             nlin_module: NLIN,
                             resolution: float,
                             nlin_options):
    s = Stages()

    # 1 what's the closest match in the 4D atlas?
    mid_index = get_index_closest_volume_match(embryo_with_volume_est["rough_volume"].astype(float), full_4D_atlas_info)

    print("Best initial match for: \n", embryo_with_volume_est["mincatom"].orig_path, " ", full_4D_atlas_info.loc[mid_index]["timepoint"])

    # register embryo to closest match +/- 5 time points
    # make sure we don't index outside the possible range
    lowest_index  = max(0, mid_index - 7)
    highest_index = min(full_4D_atlas_info.shape[0] - 1, mid_index + 7)

    all_transforms = [s.defer(lsq6_lsq12_nlin(source=embryo_with_volume_est["mincatom"],
                                              target=full_4D_atlas_info.loc[i]["mincatom"],
                                              lsq6_conf=lsq6_conf,
                                              lsq12_conf=lsq12_conf,
                                              nlin_module=nlin_module,
                                              resolution=resolution,
                                              nlin_options=nlin_options.nlin_protocol,
                                              resampled_post_fix_string="E" + str(full_4D_atlas_info.loc[i]["timepoint"]))) for
                      i in range(lowest_index, highest_index + 1, 1)]

    # gather stats on those registrations
    # the match is determined by the sum of the magnitude
    # of the inverse transformation from 4D instance -> embryo
    # using the mask of the 4D instance to limit the total sum
    # 1) calculate inverse
    all_inv_transforms = [s.defer(invert_xfmhandler(xfm)) for xfm in all_transforms]
    minc_displacement_grids = [s.defer(minc_displacement(inv_xfm)) for inv_xfm in all_inv_transforms]
    magnitudes = [s.defer(mincblob(op='magnitude', grid=disp_grid)) for disp_grid in minc_displacement_grids]

    return Result(stages=s, output=all_transforms)
    def f(imgs: List[MincAtom],
          initial_target: MincAtom,
          conf: reg_module.MultilevelConf,
          nlin_dir: str,
          nlin_prefix: str,
          #algorithms : Type[Algorithms]
          ) -> Result[WithAvgImgs[List[XfmHandler]]]:
        confs = reg_module.hierarchical_to_single(conf) if conf is not None else None
        if confs is None or len(confs) == 0:
            raise ValueError("No configurations supplied ...")
        s = Stages()

        avg = initial_target
        avg_imgs = []
        xfms = [None] * len(imgs)
        for i, conf in enumerate(confs, start=1):
            xfms = [s.defer(reg_module.register(source=img,
                                                # in the case the registration algorithm doesn't accept
                                                # an initial transform,
                                                # we could use the resampled output of the previous
                                                # step for a more efficient registration process,
                                                # although this would require more careful bookkeeping
                                                # of transforms and incur additional resampling error
                                                target=avg,
                                                conf=conf,
                                                initial_source_transform=xfm.xfm
                                                              if reg_module.accepts_initial_transform()
                                                                   and (xfm is not None)
                                                              else None,
                                                ##generation=i,
                                                # TODO reduce unneeded resamplings if accepts_initial_transform?
                                                resample_source=True))
                    for img, xfm in zip(imgs, xfms)]
            avg = s.defer(reg_module.Algorithms.average([xfm.resampled for xfm in xfms],
                                                        name_wo_ext='%s-nlin-%d' % (nlin_prefix, i),
                                                        output_dir=nlin_dir))
            avg_imgs.append(avg)
        return Result(stages=s, output=WithAvgImgs(output=xfms, avg_img=avg, avg_imgs=avg_imgs))
    def f(imgs: List[MincAtom],
          nlin_dir: str,
          conf: nlin_module.MultilevelConf,
          initial_target: MincAtom,
          nlin_prefix: str,
          #output_dir_for_avg: str = None,
          #output_name_wo_ext: str = None
          ):
        s = Stages()

        pairwise_result = s.defer(pairwise(nlin_module, max_images=25, max_pairs=None).build_model(
            imgs=imgs, nlin_dir=nlin_dir, conf=nlin_module.hierarchical_to_single(conf)[-1] if conf else None,
            initial_target=initial_target, nlin_prefix=nlin_prefix
            #, output_name_wo_ext=output_name_wo_ext  #, algorithms=nlin_module.algorithms
        ))

        build_model_result = s.defer(build_model(nlin_module).build_model(
            imgs=imgs, nlin_dir=nlin_dir, conf=conf, initial_target=pairwise_result.avg_img,
            nlin_prefix=nlin_prefix
            #, output_name_wo_ext=output_name_wo_ext  #, algorithms=algorithms
        ))

        return Result(stages=s, output=build_model_result)
Example #52
0
def det_and_log_det(displacement_grid : MincAtom,
                    fwhm : Optional[float],
                    annotation: str = "") -> Result[Namespace]:  # (det=MincAtom, log_det=MincAtom)]:
    """
    When this function is called, you might (or should) know what kind of
    deformation grid is passed along. This allows you to provide a proper
    annotation for the produced log determinant file. For instance "absolute"
    or "relative" for transformations that include an affine linear part, or
    that have the linear part taken out respectively.
    """
    s = Stages()
    # TODO: naming doesn't correspond with the (automagic) file naming: d-1 <=> det(f), det <=> det+1(f)
    det = s.defer(determinant(s.defer(smooth_vector(source=displacement_grid, fwhm=fwhm))
                              if fwhm else displacement_grid))

    output_filename_wo_ext = displacement_grid.filename_wo_ext + "_log_det" + annotation
    if fwhm:
        output_filename_wo_ext += "_fwhm" + str(fwhm)
    log_det = s.defer(mincmath(op='log',
                               vols=[det],
                               subdir="stats-volumes",
                               new_name=output_filename_wo_ext))
    return Result(stages=s, output=Namespace(det=det, log_det=log_det))
Example #53
0
def determinants_at_fwhms(xfms       : List[XfmHandler],  # TODO change to pd.Series to get indexing (hence safer inv_xfm)?
                          blur_fwhms : str, # TODO: change back to List[float]; should unblurred dets be found automatically?
                          inv_xfms   : Optional[List[XfmHandler]] = None)   \
                       -> Result[pd.DataFrame]:  # TODO how to write down a Pandas type here ?!
    """
    The most common way to use this function is by providing
    it with transformations that go from the final average
    to an individual. I.e.:

    *** = non linear deformations
    --- = linear (affine) deformations

    xfm     = final-nlin  ******------> individual_input
    inv_xfm = final-nlin <******------  individual_input

    Takes a transformation (xfm) containing
    both lsq12 (scaling and shearing, the 6-parameter
    rotations/translations should not be part of this) and
    non-linear parts of a subject to a common/shared average
    and returns the determinants of both the (forward) nonlinear
    part of the xfm at the given fwhms as well as the determinants
    of the full (forward) transformation.  The inverse transform
    may optionally be specified to avoid its recomputation (e.g.,
    when passing an inverted xfm to determinants_at_fwhms,
    specify the original here).
    """
    s = Stages()

    inv_xfms = [s.defer(invert_xfmhandler(xfm)) for xfm in xfms] if inv_xfms is None else inv_xfms

    fwhms = [float(x) for x in blur_fwhms.split(',')]

    df = pd.DataFrame([{"xfm" : xfm, "inv_xfm" : inv_xfm, "fwhm" : fwhm,
                        "nlin_det" : nlin_det, "log_nlin_det" : nlin_log_det,
                        "full_det" : full_det, "log_full_det" : full_log_det }
                       for fwhm in fwhms + [0]  # was: None, but this turns to NaN in Pandas ...
                       for xfm, inv_xfm in zip(xfms, inv_xfms)
                       for full_det_and_log_det in
                         [s.defer(det_and_log_det(displacement_grid=s.defer(minc_displacement(xfm)),
                                                  fwhm=fwhm,
                                                  annotation="_abs"))]
                       for full_det, full_log_det in [(full_det_and_log_det.det, full_det_and_log_det.log_det)]
                       for nlin_det_and_log_det in
                         [s.defer(det_and_log_det(displacement_grid=s.defer(nlin_displacement(xfm,
                                                                                              inv_xfm=inv_xfm)),
                                                  fwhm=fwhm,
                                                  annotation="_rel"))]
                       for nlin_det, nlin_log_det in [(nlin_det_and_log_det.det, nlin_det_and_log_det.log_det)]])
    # TODO this is terrible, and should probably be done with joins, but one gets the idea ...
    # TODO remove 'inv_xfm' column?
    # TODO the return of this function is 'everything', not really just 'determinants_at_fwhms' ...
    return Result(stages=s, output=df)
Example #54
0
def maget_mask(imgs : List[MincAtom], atlases, options):

    s = Stages()

    resample  = np.vectorize(mincresample_new, excluded={"extra_flags"})
    defer     = np.vectorize(s.defer)

    lsq12_conf = get_linear_configuration_from_options(options.maget.lsq12,
                                                       LinearTransType.lsq12,
                                                       options.registration.resolution)

    masking_nlin_hierarchy = get_nonlinear_configuration_from_options(options.maget.maget.masking_nlin_protocol,
                                                                      options.maget.maget.mask_method,
                                                                      options.registration.resolution)

    masking_alignments = pd.DataFrame({ 'img'   : img,
                                        'atlas' : atlas,
                                        'xfm'   : s.defer(lsq12_nlin(source=img, target=atlas,
                                                                     lsq12_conf=lsq12_conf,
                                                                     nlin_conf=masking_nlin_hierarchy,
                                                                     resample_source=False))}
                                      for img in imgs for atlas in atlases)
    # propagate a mask to each image using the above `alignments` as follows:
    # - for each image, voxel_vote on the masks propagated to that image to get a suitable mask
    # - run mincmath -clobber -mult <img> <voted_mask> to apply the mask to the files
    masked_img = (
        masking_alignments
        .assign(resampled_mask=lambda df: defer(resample(img=df.atlas.apply(lambda x: x.mask),
                                                         xfm=df.xfm.apply(lambda x: x.xfm),
                                                         like=df.img,
                                                         invert=True,
                                                         interpolation=Interpolation.nearest_neighbour,
                                                         postfix="-input-mask",
                                                         subdir="tmp",
                                                         # TODO annoying hack; fix mincresample(_mask) ...:
                                                         #new_name_wo_ext=df.apply(lambda row:
                                                         #    "%s_to_%s-input-mask" % (row.atlas.filename_wo_ext,
                                                         #                             row.img.filename_wo_ext),
                                                         #    axis=1),
                                                         extra_flags=("-keep_real_range",))))
        .groupby('img', sort=False, as_index=False)
        # sort=False: just for speed (might also need to implement more comparison methods on `MincAtom`s)
        .aggregate({'resampled_mask' : lambda masks: list(masks)})
        .rename(columns={"resampled_mask" : "resampled_masks"})
        .assign(voted_mask=lambda df: df.apply(axis=1,
                                               func=lambda row:
                                                 s.defer(voxel_vote(label_files=row.resampled_masks,
                                                                    name="%s_voted_mask" % row.img.filename_wo_ext,
                                                                    output_dir=os.path.join(row.img.output_sub_dir,
                                                                                            "tmp")))))
        .assign(masked_img=lambda df:
          df.apply(axis=1,
                 func=lambda row:
                   s.defer(mincmath(op="mult",
                                    # img must precede mask here
                                    # for output image range to be correct:
                                    vols=[row.img, row.voted_mask],
                                    new_name="%s_masked" % row.img.filename_wo_ext,
                                    subdir="resampled")))))  #['img']

    # resample the atlas images back to the input images:
    # (note: this doesn't modify `masking_alignments`, but only stages additional outputs)
    masking_alignments.assign(resampled_img=lambda df:
    defer(resample(img=df.atlas,
                   xfm=df.xfm.apply(lambda x: x.xfm),
                   subdir="tmp",
                   # TODO delete this stupid hack:
                   #new_name_wo_ext=df.apply(lambda row:
                   #  "%s_to_%s-resampled" % (row.atlas.filename_wo_ext,
                   #                          row.img.filename_wo_ext),
                   #                          axis=1),
                   like=df.img, invert=True)))

    # replace the table of alignments with a new one with masked images
    masking_alignments = (pd.merge(left=masking_alignments.assign(unmasked_img=lambda df: df.img),
                                   right=masked_img,
                                   on=["img"], how="right", sort=False)
                          .assign(img=lambda df: df.masked_img))

    return Result(stages=s, output=masking_alignments)
Example #55
0
  def register(source: MincAtom,
               target: MincAtom,
               conf: ANTSConf,
               initial_source_transform: Optional[XfmAtom] = None,
               transform_name_wo_ext: str = None,
               generation: int = None,
               resample_source: bool = False,
               #resample_name_wo_ext: Optional[str] = None,
               resample_subdir: str = "resampled") -> Result[XfmHandler]:
    """
    ...
    transform_name_wo_ext -- to use for the output transformation (without the extension)
    generation            -- if provided, the transformation name will be:
                             source.filename_wo_ext + "_ANTS_nlin-" + generation
    resample_source       -- whether or not to resample the source file   
    
    Construct a single call to ANTS.
    Also does blurring according to the specified options
    since the cost function might use these.
    """
    s = Stages()

    if initial_source_transform is not None:
        raise ValueError("ANTs doesn't accept an initial transform")

    # if we resample the source, and place it in the "tmp" directory, we should do
    # the same with the transformation that is created:
    trans_output_dir = "transforms"
    if resample_source and resample_subdir == "tmp":
        trans_output_dir = "tmp"

    if transform_name_wo_ext:
        name = os.path.join(source.pipeline_sub_dir, source.output_sub_dir, trans_output_dir,
                            "%s.xfm" % (transform_name_wo_ext))
    elif generation is not None:
        name = os.path.join(source.pipeline_sub_dir, source.output_sub_dir, trans_output_dir,
                            "%s_ANTS_nlin-%s.xfm" % (source.filename_wo_ext, generation))
    else:
        name = os.path.join(source.pipeline_sub_dir, source.output_sub_dir, trans_output_dir,
                            "%s_ANTS_to_%s.xfm" % (source.filename_wo_ext, target.filename_wo_ext))
    out_xfm = XfmAtom(name=name, pipeline_sub_dir=source.pipeline_sub_dir, output_sub_dir=source.output_sub_dir)

    similarity_cmds = []       # type: List[str]
    similarity_inputs = set()  # type: Set[MincAtom]
    # TODO: similarity_inputs should be a set, but `MincAtom`s aren't hashable
    for sim_metric_conf in conf.sim_metric_confs:
        if sim_metric_conf.use_gradient_image:
            if sim_metric_conf.blur is not None:
                gradient_blur_resolution = sim_metric_conf.blur
            elif conf.file_resolution is not None:
                gradient_blur_resolution = conf.file_resolution
            else:
                gradient_blur_resolution = None
                raise ValueError("A similarity metric in the ANTS configuration "
                                 "wants to use the gradients, but I know neither the file resolution nor "
                                 "an intended nonnegative blur fwhm.")
            if gradient_blur_resolution <= 0:
                warnings.warn("Not blurring the gradients as this was explicitly disabled")
            src = s.defer(mincblur(source, fwhm=gradient_blur_resolution)).gradient
            dest = s.defer(mincblur(target, fwhm=gradient_blur_resolution)).gradient
        else:
            # these are not gradient image terms; only blur if explicitly specified:
            if sim_metric_conf.blur is not None and sim_metric_conf.blur > 0:
                src  = s.defer(mincblur(source, fwhm=sim_metric_conf.blur)).img
                dest = s.defer(mincblur(source, fwhm=sim_metric_conf.blur)).img
            else:
                src  = source
                dest = target

        similarity_inputs.add(src)
        similarity_inputs.add(dest)
        inner = ','.join([src.path, dest.path,
                          str(sim_metric_conf.weight), str(sim_metric_conf.radius_or_bins)])
        subcmd = "'" + "".join([sim_metric_conf.metric, '[', inner, ']']) + "'"
        similarity_cmds.extend(["-m", subcmd])
    stage = CmdStage(
        inputs=(source, target) + tuple(similarity_inputs) + cast(tuple, ((source.mask,) if source.mask else ())),
        # need to cast to tuple due to mypy bug; see mypy/issues/622
        outputs=(out_xfm,),
        cmd=['ANTS', '3',
             '--number-of-affine-iterations', '0']
            + similarity_cmds
            + ['-t', conf.transformation_model,
               '-r', conf.regularization,
               '-i', conf.iterations,
               '-o', out_xfm.path]
            + (['-x', source.mask.path] if conf.use_mask and source.mask else []))

    # see comments re: mincblur memory configuration
    stage.when_runnable_hooks.append(lambda st: set_memory(st, source=source, conf=conf,
                                                           mem_cfg=default_ANTS_mem_cfg))

    s.add(stage)
    resampled = (s.defer(mincresample(img=source, xfm=out_xfm, like=target,
                                      interpolation=Interpolation.sinc,
                                      #new_name_wo_ext=resample_name_wo_ext,
                                      subdir=resample_subdir))
                 if resample_source else None)  # type: Optional[MincAtom]
    return Result(stages=s,
                  output=XfmHandler(source=source,
                                    target=target,
                                    xfm=out_xfm,
                                    resampled=resampled))
def two_level(grouped_files_df, options : TwoLevelConf):
    """
    grouped_files_df - must contain 'group':<any comparable, sortable type> and 'file':MincAtom columns
    """  # TODO weird naming since the grouped_files_df isn't a GroupBy object?  just files_df?
    s = Stages()

    if grouped_files_df.isnull().values.any():
        raise ValueError("NaN values in input dataframe; can't go")

    if options.mbm.lsq6.target_type == TargetType.bootstrap:
        # won't work since the second level part tries to get the resolution of *its* "first input file", which
        # hasn't been created.  We could instead pass in a resolution to the `mbm` function,
        # but instead disable for now:
        raise ValueError("Bootstrap model building currently doesn't work with this pipeline; "
                         "just specify an initial target instead")
    elif options.mbm.lsq6.target_type == TargetType.pride_of_models:
        pride_of_models_mapping = get_pride_of_models_mapping(pride_csv=options.mbm.lsq6.target_file,
                                                              output_dir=options.application.output_directory,
                                                              pipeline_name=options.application.pipeline_name)

    # FIXME this is the same as in the 'tamarack' except for names of arguments/enclosing variables
    def group_options(options, group):
        options = copy.deepcopy(options)

        if options.mbm.lsq6.target_type == TargetType.pride_of_models:

            targets = get_closest_model_from_pride_of_models(pride_of_models_dict=pride_of_models_mapping,
                                                             time_point=group)

            options.mbm.lsq6 = options.mbm.lsq6.replace(target_type=TargetType.initial_model,
                                                        target_file=targets.registration_standard.path)
        else:
            # this will ensure that all groups have the same resolution -- is it necessary?
            targets = registration_targets(lsq6_conf=options.mbm.lsq6,
                                           app_conf=options.application,
                                           first_input_file=grouped_files_df.file.iloc[0])

        resolution = (options.registration.resolution
                        or get_resolution_from_file(targets.registration_standard.path))
        options.registration = options.registration.replace(resolution=resolution)
        # no need to check common space settings here since they're turned off at the parser level
        # (a bit strange)
        return options

    first_level_results = (
        grouped_files_df
        .groupby('group', as_index=False, sort=False)       # the usual annoying pattern to do a aggregate with access
        .aggregate({ 'file' : lambda files: list(files) })  # to the groupby object's keys ... TODO: fix
        .rename(columns={ 'file' : "files" })
        .assign(build_model=lambda df:
                              df.apply(axis=1,
                                       func=lambda row:
                                              s.defer(mbm(imgs=row.files,
                                                          options=group_options(options, row.group),
                                                          prefix="%s" % row.group,
                                                          output_dir=os.path.join(
                                                              options.application.output_directory,
                                                              options.application.pipeline_name + "_first_level",
                                                              "%s_processed" % row.group)))))
        )
    # TODO replace .assign(...apply(...)...) with just an apply, producing a series right away?

    # FIXME right now the same options set is being used for both levels -- use options.first/second_level
    second_level_options = copy.deepcopy(options)
    second_level_options.mbm.lsq6 = second_level_options.mbm.lsq6.replace(run_lsq6=False)
    second_level_options.mbm.segmentation.run_maget = False
    second_level_options.mbm.maget.maget.mask_only = False
    second_level_options.mbm.maget.maget.mask = False

    # FIXME this is probably a hack -- instead add a --second-level-init-model option to specify which timepoint should be used
    # as the initial model in the second level ???  (at this point it doesn't matter due to lack of lsq6 ...)
    if second_level_options.mbm.lsq6.target_type == TargetType.pride_of_models:
        second_level_options.mbm.lsq6 = second_level_options.mbm.lsq6.replace(
            target_type=TargetType.target,  # target doesn't really matter as no lsq6 here, just used for resolution...
            target_file=list(pride_of_models_mapping.values())[0].registration_standard.path)

    # NOTE: running lsq6_nuc_inorm here doesn't work in general (but possibly with rotational minctracc)
    # since the native-space initial model is used, but our images are
    # already in standard space (as we resampled there after the 1st-level lsq6).
    # On the other hand, we might want to run it here (although of course NOT nuc/inorm!) in the future,
    # for instance given a 'pride' of models (one for each group).

    second_level_results = s.defer(mbm(imgs=first_level_results.build_model.map(lambda m: m.avg_img),
                                       options=second_level_options,
                                       prefix=os.path.join(options.application.output_directory,
                                                           options.application.pipeline_name + "_second_level")))

    # FIXME sadly, `mbm` doesn't return a pd.Series of xfms, so we don't have convenient indexing ...
    overall_xfms = [s.defer(concat_xfmhandlers([xfm_1, xfm_2]))
                    for xfms_1, xfm_2 in zip([r.xfms.lsq12_nlin_xfm for r in first_level_results.build_model],
                                             second_level_results.xfms.overall_xfm)
                    for xfm_1 in xfms_1]
    resample  = np.vectorize(mincresample_new, excluded={"extra_flags"})
    defer     = np.vectorize(s.defer)

    # TODO using the avg_img here is a bit clunky -- maybe better to propagate group indices ...
    # only necessary since `mbm` doesn't return DataFrames but namespaces ...
    first_level_determinants = pd.concat(list(first_level_results.build_model.apply(
                                                lambda x: x.determinants.assign(first_level_avg=x.avg_img))),
                                         ignore_index=True)

    resampled_determinants = (pd.merge(
        left=first_level_determinants,
        right=pd.DataFrame({'group_xfm' : second_level_results.xfms.overall_xfm})
              .assign(source=lambda df: df.group_xfm.apply(lambda r: r.source)),
        left_on="first_level_avg",
        right_on="source")
        .assign(resampled_log_full_det=lambda df: defer(resample(img=df.log_full_det,
                                                                 xfm=df.group_xfm.apply(lambda x: x.xfm),
                                                                 like=second_level_results.avg_img)),
                resampled_log_nlin_det=lambda df: defer(resample(img=df.log_nlin_det,
                                                                 xfm=df.group_xfm.apply(lambda x: x.xfm),
                                                                 like=second_level_results.avg_img))))
    # TODO only resamples the log determinants, but still a bit ugly ... abstract somehow?
    # TODO shouldn't be called resampled_determinants since this is basically the whole (first_level) thing ...

    inverted_overall_xfms = [s.defer(invert_xfmhandler(xfm)) for xfm in overall_xfms]

    overall_determinants = (s.defer(determinants_at_fwhms(
                                     xfms=inverted_overall_xfms,
                                     inv_xfms=overall_xfms,
                                     blur_fwhms=options.mbm.stats.stats_kernels))
                            .assign(overall_log_full_det=lambda df: df.log_full_det,
                                    overall_log_nlin_det=lambda df: df.log_nlin_det)
                            .drop(['log_full_det', 'log_nlin_det'], axis=1))

    # TODO return some MAGeT stuff from two_level function ??
    # FIXME running MAGeT from within the `two_level` function has the same problem as running it from within `mbm`:
    # it will now run when this pipeline is called from within another one (e.g., n-level), which will be
    # redundant, create filename clashes, etc. -- this should be moved to `two_level_pipeline`.
    # TODO do we need a `pride of atlases` for MAGeT in this pipeline ??
    # TODO at the moment MAGeT is run within the MBM code, but it could be disabled there and run here
    #if options.mbm.segmentation.run_maget:
    #    maget_options = copy.deepcopy(options)
    #    maget_options.maget = options.mbm.maget
    #    fixup_maget_options(maget_options=maget_options.maget,
    #                        lsq12_options=maget_options.mbm.lsq12,
    #                        nlin_options=maget_options.mbm.nlin)
    #    maget_options.maget.maget.mask = maget_options.maget.maget.mask_only = False   # already done above
    #    del maget_options.mbm

        # again using a weird combination of vectorized and loop constructs ...
    #    s.defer(maget([xfm.resampled for _ix, m in first_level_results.iterrows()
    #                   for xfm in m.build_model.xfms.rigid_xfm],
    #                  options=maget_options,
    #                  prefix="%s_MAGeT" % options.application.pipeline_name,
    #                  output_dir=os.path.join(options.application.output_directory,
    #                                          options.application.pipeline_name + "_processed")))

    # TODO resampling to database model ...

    # TODO there should be one table containing all determinants (first level, overall, resampled first level) for each file
    # and another containing some groupwise information (averages and transforms to the common average)
    return Result(stages=s, output=Namespace(first_level_results=first_level_results,
                                             resampled_determinants=resampled_determinants,
                                             overall_determinants=overall_determinants))
def tamarack(imgs : pd.DataFrame, options):
    # columns of the input df: `img` : MincAtom, `timept` : number, ...
    # columns of the pride of models : 'timept' : number, 'model' : MincAtom
    s = Stages()

    # TODO some assertions that the pride_of_models, if provided, is correct, and that this is intended target type

    def group_options(options, timepoint):
        options = copy.deepcopy(options)

        if options.mbm.lsq6.target_type == TargetType.pride_of_models:
            options = copy.deepcopy(options)
            targets = get_closest_model_from_pride_of_models(pride_of_models_dict=get_pride_of_models_mapping(
                                                                 pride_csv=options.mbm.lsq6.target_file,
                                                                 output_dir=options.application.output_directory,
                                                                 pipeline_name=options.application.pipeline_name),
                                                             time_point=timepoint)

            options.mbm.lsq6 = options.mbm.lsq6.replace(target_type=TargetType.initial_model,
                                                        target_file=targets.registration_standard.path)

        #    resolution = (options.registration.resolution
        #                  or get_resolution_from_file(targets.registration_standard.path))
        #    options.registration = options.registration.replace(resolution=resolution)

                                                        # FIXME use of registration_standard here is quite wrong ...
                                                        # part of the trouble is that mbm calls registration_targets itself,
                                                        # so we can't send this RegistrationTargets to `mbm` directly ...
                                                        # one option: add yet another optional arg to `mbm` ...
        else:
            targets = s.defer(registration_targets(lsq6_conf=options.mbm.lsq6,
                                           app_conf=options.application, reg_conf=options.registration,
                                           first_input_file=imgs.filename.iloc[0]))

        resolution = (options.registration.resolution or
                        get_resolution_from_file(targets.registration_standard.path))

        # This must happen after calling registration_targets otherwise it will resample to options.registration.resolution
        options.registration = options.registration.replace(resolution=resolution)

        return options

    # build all first-level models:
    first_level_results = (
        imgs  # TODO 'group' => 'timept' ?
        .groupby('group', as_index=False)       # the usual annoying pattern to do an aggregate with access
        .aggregate({ 'file' : lambda files: list(files) })  # to the groupby object's keys ... TODO: fix
        .rename(columns={ 'file' : "files" })
        .assign(options=lambda df: df.apply(axis=1, func=lambda row: group_options(options, row.group)))
        .assign(build_model=lambda df:
                              df.apply(axis=1,
                                       func=lambda row: s.defer(
                                           mbm(imgs=row.files,
                                               options=row.options,
                                               prefix="%s" % row.group,
                                               output_dir=os.path.join(
                                               options.application.output_directory,
                                               options.application.pipeline_name + "_first_level",
                                               "%s_processed" % row.group)))))
        .sort_values(by='group')

        )

    if all(first_level_results.options.map(lambda opts: opts.registration.resolution)
             == first_level_results.options.iloc[0].registration.resolution):
        options.registration = options.registration.replace(
            resolution=first_level_results.options.iloc[0].registration.resolution)
    else:
        raise ValueError("some first-level models are run at different resolutions, possibly not what you want ...")

    # construction of the overall inter-average transforms will be done iteratively (for efficiency/aesthetics),
    # which doesn't really fit the DataFrame mold ...


    full_hierarchy = get_nonlinear_configuration_from_options(
      nlin_protocol=options.mbm.nlin.nlin_protocol,
      reg_method=options.mbm.nlin.reg_method,
      file_resolution=options.registration.resolution)

    # FIXME no good can come of this
    nlin_protocol = full_hierarchy.confs[-1] if isinstance(full_hierarchy, MultilevelANTSConf) else full_hierarchy
    # first register consecutive averages together:
    average_registrations = (
        first_level_results[:-1]
            .assign(next_model=list(first_level_results[1:].build_model))
            # TODO: we should be able to do lsq6 registration here as well!
            .assign(xfm=lambda df: df.apply(axis=1, func=lambda row: s.defer(
                                                      lsq12_nlin(source=row.build_model.avg_img,
                                                                 target=row.next_model.avg_img,
                                                                 lsq12_conf=get_linear_configuration_from_options(
                                                                     options.mbm.lsq12,
                                                                     transform_type=LinearTransType.lsq12,
                                                                     file_resolution=options.registration.resolution),
                                                                 nlin_conf=nlin_protocol)))))

    # now compose the above transforms to produce transforms from each average to the common average:
    common_time_pt = options.tamarack.common_time_pt
    common_model   = first_level_results[first_level_results.group == common_time_pt].iloc[0].build_model.avg_img
    #common = average_registrations[average_registrations.group == common_time_pt].iloc[0]
    before = average_registrations[average_registrations.group <  common_time_pt]  # asymmetry in before/after since
    after  = average_registrations[average_registrations.group >= common_time_pt]  # we used `next_`, not `previous_`

    # compose 1st and 2nd level transforms and resample into the common average space:
    def suffixes(xs):
        if len(xs) == 0:
            return [[]]
        else:
            ys = suffixes(xs[1:])
            return [[xs[0]] + ys[0]] + ys


    def prefixes(xs):
        if len(xs) == 0:
            return [[]]
        else:
            ys = prefixes(xs[1:])
            return ys + [ys[-1] + [xs[0]]]

    xfms_to_common = (
        first_level_results
        .assign(uncomposed_xfms=suffixes(list(before.xfm))[:-1] + [None] + prefixes(list(after.xfm))[1:])
        .assign(xfm_to_common=lambda df: df.apply(axis=1, func=lambda row:
                                ((lambda x: s.defer(invert_xfmhandler(x)) if row.group >= common_time_pt else x)
                                   (s.defer(concat_xfmhandlers(row.uncomposed_xfms,
                                                               name=("%s_to_common"
                                                                     if row.group < common_time_pt
                                                                     else "%s_from_common") % row.group))))
                                  if row.uncomposed_xfms is not None else None))
        .drop('uncomposed_xfms', axis=1))  # TODO None => identity??

    # TODO indexing here is not good ...
    first_level_determinants = pd.concat(list(first_level_results.build_model.apply(
                                                lambda x: x.determinants.assign(first_level_avg=x.avg_img))),
                                         ignore_index=True)

    resampled_determinants = (
        pd.merge(left=first_level_determinants,
                 right=xfms_to_common.assign(source=lambda df: df.xfm_to_common.apply(
                                                              lambda x:
                                                                x.source if x is not None else None)),
                 left_on="first_level_avg", right_on='source')
        .assign(resampled_log_full_det=lambda df: df.apply(axis=1, func=lambda row:
                                         s.defer(mincresample_new(img=row.log_full_det,
                                                                  xfm=row.xfm_to_common.xfm,
                                                                  like=common_model))
                                                 if row.xfm_to_common is not None else row.img),
                resampled_log_nlin_det=lambda df: df.apply(axis=1, func=lambda row:
                                         s.defer(mincresample_new(img=row.log_nlin_det,
                                                                  xfm=row.xfm_to_common.xfm,
                                                                  like=common_model))
                                                 if row.xfm_to_common is not None else row.img))
    )

    inverted_overall_xfms = pd.Series({ xfm : (s.defer(concat_xfmhandlers([xfm, row.xfm_to_common]))
                                                 if row.xfm_to_common is not None else xfm)
                                        for _ix, row in xfms_to_common.iterrows()
                                        for xfm in row.build_model.xfms.lsq12_nlin_xfm })

    overall_xfms = inverted_overall_xfms.apply(lambda x: s.defer(invert_xfmhandler(x)))

    overall_determinants = determinants_at_fwhms(xfms=overall_xfms,
                                                 blur_fwhms=options.mbm.stats.stats_kernels,
                                                 inv_xfms=inverted_overall_xfms)


    # TODO turn off bootstrap as with two-level code?

    # TODO combine into one data frame
    return Result(stages=s, output=Namespace(first_level_results=first_level_results,
                                             overall_determinants=overall_determinants,
                                             resampled_determinants=resampled_determinants.drop(
                                                 ['options'],
                                                 axis=1)))
Example #58
0
def mbm(imgs : List[MincAtom], options : MBMConf, prefix : str, output_dir : str = ""):

    # TODO could also allow pluggable pipeline parts e.g. LSQ6 could be substituted out for the modified LSQ6
    # for the kidney tips, etc...

    # TODO this is tedious and annoyingly similar to the registration chain ...
    lsq6_dir  = os.path.join(output_dir, prefix + "_lsq6")
    lsq12_dir = os.path.join(output_dir, prefix + "_lsq12")
    nlin_dir  = os.path.join(output_dir, prefix + "_nlin")

    s = Stages()

    if len(imgs) == 0:
        raise ValueError("Please, some files!")

    # FIXME: why do we have to call registration_targets *outside* of lsq6_nuc_inorm? is it just because of the extra
    # options required?  Also, shouldn't options.registration be a required input (as it contains `input_space`) ...?
    targets = registration_targets(lsq6_conf=options.mbm.lsq6,
                                   app_conf=options.application,
                                   first_input_file=imgs[0].path)

    # TODO this is quite tedious and duplicates stuff in the registration chain ...
    resolution = (options.registration.resolution or
                  get_resolution_from_file(targets.registration_standard.path))
    options.registration = options.registration.replace(resolution=resolution)

    # FIXME it probably makes most sense if the lsq6 module itself (even within lsq6_nuc_inorm) handles the run_lsq6
    # setting (via use of the identity transform) since then this doesn't have to be implemented for every pipeline
    if options.mbm.lsq6.run_lsq6:
        lsq6_result = s.defer(lsq6_nuc_inorm(imgs=imgs,
                                             resolution=resolution,
                                             registration_targets=targets,
                                             lsq6_dir=lsq6_dir,
                                             lsq6_options=options.mbm.lsq6))
    else:
        # TODO don't actually do this resampling if not required (i.e., if the imgs already have the same grids)
        identity_xfm = s.defer(param2xfm(out_xfm=FileAtom(name="identity.xfm")))
        lsq6_result  = [XfmHandler(source=img, target=img, xfm=identity_xfm,
                                   resampled=s.defer(mincresample_new(img=img,
                                                                      like=targets.registration_standard,
                                                                      xfm=identity_xfm)))
                        for img in imgs]
    # what about running nuc/inorm without a linear registration step??

    full_hierarchy = get_nonlinear_configuration_from_options(nlin_protocol=options.mbm.nlin.nlin_protocol,
                                                              reg_method=options.mbm.nlin.reg_method,
                                                              file_resolution=resolution)

    lsq12_nlin_result = s.defer(lsq12_nlin_build_model(imgs=[xfm.resampled for xfm in lsq6_result],
                                                       resolution=resolution,
                                                       lsq12_dir=lsq12_dir,
                                                       nlin_dir=nlin_dir,
                                                       nlin_prefix=prefix,
                                                       lsq12_conf=options.mbm.lsq12,
                                                       nlin_conf=full_hierarchy))

    inverted_xfms = [s.defer(invert_xfmhandler(xfm)) for xfm in lsq12_nlin_result.output]

    determinants = s.defer(determinants_at_fwhms(
                             xfms=inverted_xfms,
                             inv_xfms=lsq12_nlin_result.output,
                             blur_fwhms=options.mbm.stats.stats_kernels))

    overall_xfms = [s.defer(concat_xfmhandlers([rigid_xfm, lsq12_nlin_xfm]))
                    for rigid_xfm, lsq12_nlin_xfm in zip(lsq6_result, lsq12_nlin_result.output)]

    output_xfms = (pd.DataFrame({ "rigid_xfm"      : lsq6_result,  # maybe don't return this if LSQ6 not run??
                                  "lsq12_nlin_xfm" : lsq12_nlin_result.output,
                                  "overall_xfm"    : overall_xfms }))
    # we could `merge` the determinants with this table, but preserving information would cause lots of duplication
    # of the transforms (or storing determinants in more columns, but iterating over dynamically known columns
    # seems a bit odd ...)

                            # TODO transpose these fields?})
                            #avg_img=lsq12_nlin_result.avg_img,  # inconsistent w/ WithAvgImgs[...]-style outputs
                           # "determinants"    : determinants })

    #output.avg_img = lsq12_nlin_result.avg_img
    #output.determinants = determinants   # TODO temporary - remove once incorporated properly into `output` proper
    # TODO add more of lsq12_nlin_result?

    # FIXME: this needs to go outside of the `mbm` function to avoid being run from within other pipelines (or
    # those other pipelines need to turn off this option)
    # TODO return some MAGeT stuff from MBM function ??
    # if options.mbm.mbm.run_maget:
    #     import copy
    #     maget_options = copy.deepcopy(options)  #Namespace(maget=options)
    #     #maget_options
    #     #maget_options.maget = maget_options.mbm
    #     #maget_options.execution = options.execution
    #     #maget_options.application = options.application
    #     maget_options.maget = options.mbm.maget
    #     del maget_options.mbm
    #
    #     s.defer(maget([xfm.resampled for xfm in lsq6_result],
    #                   options=maget_options,
    #                   prefix="%s_MAGeT" % prefix,
    #                   output_dir=os.path.join(output_dir, prefix + "_processed")))

    # should also move outside `mbm` function ...
    #if options.mbm.thickness.run_thickness:
    #    if not options.mbm.segmentation.run_maget:
    #        warnings.warn("MAGeT files (atlases, protocols) are needed to run thickness calculation.")
    #    # run MAGeT to segment the nlin average:
    #    import copy
    #    maget_options = copy.deepcopy(options)  #Namespace(maget=options)
    #    maget_options.maget = options.mbm.maget
    #    del maget_options.mbm
    #    segmented_avg = s.defer(maget(imgs=[lsq12_nlin_result.avg_img],
    #                                  options=maget_options,
    #                                  output_dir=os.path.join(options.application.output_directory,
    #                                                          prefix + "_processed"),
    #                                  prefix="%s_thickness_MAGeT" % prefix)).ix[0].img
    #    thickness = s.defer(cortical_thickness(xfms=pd.Series(inverted_xfms), atlas=segmented_avg,
    #                                           label_mapping=FileAtom(options.mbm.thickness.label_mapping),
    #                                           atlas_fwhm=0.56, thickness_fwhm=0.56))  # TODO magic fwhms
    #    # TODO write CSV -- should `cortical_thickness` do this/return a table?


    # FIXME: this needs to go outside of the `mbm` function to avoid being run from within other pipelines (or
    # those other pipelines need to turn off this option)
    if options.mbm.common_space.do_common_space_registration:
        warnings.warn("This feature is experimental ...")
        if not options.mbm.common_space.common_space_model:
            raise ValueError("No common space template provided!")
        # TODO allow lsq6 registration as well ...
        common_space_model = MincAtom(options.mbm.common_space.common_space_model,
                                      pipeline_sub_dir=os.path.join(options.application.output_directory,
                                                         options.application.pipeline_name + "_processed"))
        # TODO allow different lsq12/nlin config params than the ones used in MBM ...
        # WEIRD ... see comment in lsq12_nlin code ...
        nlin_conf  = full_hierarchy.confs[-1] if isinstance(full_hierarchy, MultilevelMincANTSConf) else full_hierarchy
        # also weird that we need to call get_linear_configuration_from_options here ... ?
        lsq12_conf = get_linear_configuration_from_options(conf=options.mbm.lsq12,
                                                           transform_type=LinearTransType.lsq12,
                                                           file_resolution=resolution)
        xfm_to_common = s.defer(lsq12_nlin(source=lsq12_nlin_result.avg_img, target=common_space_model,
                                           lsq12_conf=lsq12_conf, nlin_conf=nlin_conf,
                                           resample_source=True))

        model_common = s.defer(mincresample_new(img=lsq12_nlin_result.avg_img,
                                                xfm=xfm_to_common.xfm, like=common_space_model,
                                                postfix="_common"))

        overall_xfms_common = [s.defer(concat_xfmhandlers([rigid_xfm, nlin_xfm, xfm_to_common]))
                               for rigid_xfm, nlin_xfm in zip(lsq6_result, lsq12_nlin_result.output)]

        xfms_common = [s.defer(concat_xfmhandlers([nlin_xfm, xfm_to_common]))
                       for nlin_xfm in lsq12_nlin_result.output]

        output_xfms = output_xfms.assign(xfm_common=xfms_common, overall_xfm_common=overall_xfms_common)

        log_nlin_det_common, log_full_det_common = [dets.map(lambda d:
                                                      s.defer(mincresample_new(
                                                        img=d,
                                                        xfm=xfm_to_common.xfm,
                                                        like=common_space_model,
                                                        postfix="_common",
                                                        extra_flags=("-keep_real_range",),
                                                        interpolation=Interpolation.nearest_neighbour)))
                                                    for dets in (determinants.log_nlin_det, determinants.log_full_det)]

        determinants = determinants.assign(log_nlin_det_common=log_nlin_det_common,
                                           log_full_det_common=log_full_det_common)

    output = Namespace(avg_img=lsq12_nlin_result.avg_img, xfms=output_xfms, determinants=determinants)

    if options.mbm.common_space.do_common_space_registration:
        output.model_common = model_common

    return Result(stages=s, output=output)