Esempio n. 1
0
def process_atlas_files(filenames: List[str],
                        pipeline_sub_dir) -> List[MincAtom]:

    suffixes = ["_average.mnc", "_mask.mnc", "_labels.mnc"
                ] + [".mnc"]  # N.B.: order matters ...

    d = defaultdict(dict)  # TODO: rename `d`
    for filename in filenames:
        suffix = find_by(filename.endswith, suffixes)
        base = filename[:-len(suffix)]  # :-l
        d[base][suffix] = filename

    grouped_atlas_files = {}
    for group, files in d.items():
        img_file = files.get("_average.mnc") or files.get(
            ".mnc")  # FIXME: is this OK when just _mask, _labels exist?
        mask_file = files.get("_mask.mnc")
        label_file = files.get("_labels.mnc")
        if len(files) > 3 or not (img_file and mask_file and label_file):
            raise ValueError(
                "atlas filename conventions are wrong for atlas '%s'" % group)
        else:
            # TODO: screw around with filenames/directories as usual
            grouped_atlas_files[group] = MincAtom(
                name=img_file,
                mask=MincAtom(name=mask_file,
                              pipeline_sub_dir=pipeline_sub_dir),
                labels=MincAtom(name=label_file,
                                pipeline_sub_dir=pipeline_sub_dir),
                pipeline_sub_dir=pipeline_sub_dir)

    return pd.Series(list(grouped_atlas_files.values()))
Esempio n. 2
0
def mincblob(op : str, grid : MincAtom, subdir : str = "tmp") -> Result[MincAtom]:
    """
    Low-level mincblob wrapper with the one exception being the determinant option. By
    default the inner clockwork of mincblob subtracts 1 from all determinant values that
    are being calculated. As such, 1 needs to be added to the result of the mincblob call.
    We will do that here, because it makes most sense here.
    >>> stages = mincblob('determinant', MincAtom("/images/img_1.mnc", pipeline_sub_dir="/tmp")).stages
    >>> [s.render() for s in stages]
    ['mincblob -clobber -determinant /images/img_1.mnc /tmp/img_1/img_1_determinant.mnc']
    """
    if op not in ["determinant", "trace", "translation", "magnitude"]:
        raise ValueError('mincblob: invalid operation %s' % op)

    # if we are calculating the determinant, the first file produced is a temp file:
    if op == "determinant":
        out_file = grid.newname_with_suffix("_temp_det", subdir=subdir)
    else:
        out_file = grid.newname_with_suffix('_' + op, subdir=subdir)

    stage = CmdStage(inputs=(grid,), outputs=(out_file,),
                 cmd=['mincblob', '-clobber', '-' + op, grid.path, out_file.path])

    s = Stages([stage])
    # now create the proper determinant if that's what was asked for
    if op == "determinant":
        result_file = s.defer(mincmath(op='add',
                                       const=1,
                                       vols=[out_file],
                                       subdir=subdir,
                                       new_name=grid.filename_wo_ext + "_det"))
    else:
        result_file = out_file

    return Result(stages=s, output=result_file)
Esempio n. 3
0
def NLIN_pipeline(options):

    if options.application.files is None:
        raise ValueError("Please, some files! (or try '--help')")  # TODO make a util procedure for this

    output_dir    = options.application.output_directory
    pipeline_name = options.application.pipeline_name

    # TODO this is tedious and annoyingly similar to the registration chain and MBM and LSQ6 ...
    processed_dir = os.path.join(output_dir, pipeline_name + "_processed")
    nlin_dir      = os.path.join(output_dir, pipeline_name + "_nlin")

    resolution = (options.registration.resolution  # TODO does using the finest resolution here make sense?
                  or min([get_resolution_from_file(f) for f in options.application.files]))

    imgs = [MincAtom(f, pipeline_sub_dir=processed_dir) for f in options.application.files]

    # determine NLIN settings by overriding defaults with
    # any settings present in protocol file, if it exists
    # could add a hook to print a message announcing completion, output files,
    # add more stages here to make a CSV

    initial_target_mask = MincAtom(options.nlin.target_mask) if options.nlin.target_mask else None
    initial_target = MincAtom(options.nlin.target, mask=initial_target_mask)

    full_hierarchy = get_nonlinear_configuration_from_options(nlin_protocol=options.nlin.nlin_protocol,
                                                              flag_nlin_protocol=next(iter(options.nlin.flags_.nlin_protocol)),
                                                              reg_method=options.nlin.reg_method,
                                                              file_resolution=resolution)

    s = Stages()

    nlin_result = s.defer(nlin_build_model(imgs, initial_target=initial_target, conf=full_hierarchy, nlin_dir=nlin_dir))

    # TODO return these?
    inverted_xfms = [s.defer(invert_xfmhandler(xfm)) for xfm in nlin_result.output]

    if options.stats.calc_stats:
        # TODO: put the stats part behind a flag ...

        determinants = [s.defer(determinants_at_fwhms(
                                  xfm=inv_xfm,
                                  inv_xfm=xfm,
                                  blur_fwhms=options.stats.stats_kernels))
                        for xfm, inv_xfm in zip(nlin_result.output, inverted_xfms)]

        return Result(stages=s,
                      output=Namespace(nlin_xfms=nlin_result,
                                       avg_img=nlin_result.avg_img,
                                       determinants=determinants))
    else:
        # there's no consistency in what gets returned, yikes ...
        return Result(stages=s, output=Namespace(nlin_xfms=nlin_result, avg_img=nlin_result.avg_img))
Esempio n. 4
0
def get_imgs(options):
    if options.csv_file and options.files:
        raise ValueError("both --csv-file and --files specified ...")

    if options.csv_file:
        try:
            csv = pd.read_csv(
                options.csv_file, index_col=False
            )  # because this is just weird, and may cause 'wrong' cols to be read!
            # TODO turn on index_col=False in other read_csv calls!
        except:
            warnings.warn(
                "couldn't read csv ... did you supply `file` column?")
            raise
        # FIXME check `file` column is present ...
        # TODO check for zero length file ...

        csv_base = os.path.dirname(
            options.csv_file) if not options.csv_paths_relative_to_wd else ""

        if hasattr(csv, 'mask_file'):
            masks = [
                MincAtom(os.path.join(csv_base, mask.strip()),
                         pipeline_sub_dir=os.path.join(
                             options.output_directory, options.pipeline_name +
                             "_processed")) if isinstance(mask, str) else
                None  # better way to handle missing (nan) values?
                for mask in csv.mask_file
            ]
        else:
            masks = [None] * len(csv.file)

        imgs = [
            MincAtom(os.path.join(csv_base, name.strip()),
                     mask=mask,
                     pipeline_sub_dir=os.path.join(
                         options.output_directory,
                         options.pipeline_name + "_processed"))
            # TODO does anything break if we make imgs a pd.Series?
            for name, mask in zip(csv.file, masks)
        ]
    elif options.files:
        imgs = [
            MincAtom(name,
                     pipeline_sub_dir=os.path.join(
                         options.output_directory,
                         options.pipeline_name + "_processed"))
            for name in options.files
        ]
    else:
        raise ValueError("No images supplied")
    return imgs
Esempio n. 5
0
def reconstitute_laplacian_grid(cortex  : MincAtom,
                                grid    : MincAtom,
                                midline : MincAtom) -> Result[MincAtom]:
    output_grid = grid.newname_with_suffix("_reconstituted")
    stage = CmdStage(inputs=(cortex, grid, midline), outputs=(output_grid,),
                     cmd=["reconstitute_laplacian_grid", midline.path, cortex.path, grid.path, output_grid.path])
    return Result(stages=Stages([stage]), output=output_grid)
def tamarack_pipeline(options):

    output_dir = options.application.output_directory
    pipeline_name = options.application.pipeline_name
    #processed_dir = os.path.join(output_dir, pipeline_name + "_processed")
    first_level_dir = os.path.join(output_dir, pipeline_name + "_first_level")

    s = Stages()

    with open(options.application.csv_file, 'r') as f:
        files_df = (pd.read_csv(
            filepath_or_buffer=f,
            usecols=['group', 'filename']).assign(file=lambda df: df.apply(
                axis="columns",
                func=lambda r: MincAtom(r.filename.strip(),
                                        pipeline_sub_dir=os.path.join(
                                            first_level_dir, "%s_processed" % r
                                            .group.strip())))))

    check_MINC_input_files(files_df.file.apply(lambda img: img.path))

    #grouped_files_df = pd.DataFrame({'file' : pd.concat([imgs])}).assign(group=lambda df: df.index)

    tamarack_result = s.defer(tamarack(files_df, options=options))

    tamarack_result.first_level_results.applymap(maybe_deref_path).to_csv(
        "first_level_results.csv", index=False)
    tamarack_result.resampled_determinants.applymap(maybe_deref_path).to_csv(
        "resampled_determinants.csv", index=False)
    tamarack_result.overall_determinants.applymap(maybe_deref_path).to_csv(
        "overall_determinants.csv", index=False)

    return Result(stages=s, output=tamarack_result)
Esempio n. 7
0
def LSQ12_pipeline(options):

    output_dir = options.application.output_directory
    pipeline_name = options.application.pipeline_name

    # TODO this is tedious and annoyingly similar to the registration chain and MBM and LSQ6 ...
    processed_dir = os.path.join(output_dir, pipeline_name + "_processed")
    lsq12_dir = os.path.join(output_dir, pipeline_name + "_lsq12")

    resolution = (
        options.registration.
        resolution  # TODO does using the finest resolution here make sense?
        or min(
            [get_resolution_from_file(f) for f in options.application.files]))

    imgs = [
        MincAtom(f, pipeline_sub_dir=processed_dir)
        for f in options.application.files
    ]

    # determine LSQ12 settings by overriding defaults with
    # any settings present in protocol file, if it exists
    # could add a hook to print a message announcing completion, output files,
    # add more stages here to make a CSV

    return lsq12_pairwise(imgs,
                          lsq12_conf=options.lsq12,
                          lsq12_dir=lsq12_dir,
                          resolution=resolution)
Esempio n. 8
0
def get_imgs(options):
    if options.files:
        raise ValueError("you used --files; please use --csv-file")

    csv = pd.read_csv(options.csv_file, dtype='str', index_col=False)
    csv_base = os.path.dirname(options.csv_file)


    csv = csv.assign(anatomical_MincAtom = lambda df: df["anatomical"].apply(lambda file:
                        MincAtom(os.path.join(csv_base, file.strip()),
                                 pipeline_sub_dir=os.path.join(options.output_directory,
                                                               options.pipeline_name + "_processed"))),
                     count_MincAtom = lambda df: df["count"].apply(lambda file:
                        MincAtom(os.path.join(csv_base, file.strip()),
                                 pipeline_sub_dir=os.path.join(options.output_directory,
                                                               options.pipeline_name + "_processed"))))
    return csv
Esempio n. 9
0
def atlases_from_csv(atlas_csv: str, pipeline_sub_dir: str) -> pd.Series:
    d = os.path.dirname(atlas_csv)
    df = (pd.read_csv(atlas_csv, usecols=[
        "file", "mask_file", "label_file"
    ]).apply(axis=1,
             func=lambda row: MincAtom(
                 name=os.path.join(d, row.file),
                 pipeline_sub_dir=pipeline_sub_dir,
                 mask=MincAtom(os.path.join(d, row.mask_file),
                               pipeline_sub_dir=pipeline_sub_dir),
                 labels=MincAtom(os.path.join(d, row.label_file),
                                 pipeline_sub_dir=pipeline_sub_dir))))

    #if np.isnan(df).any().any():
    #    raise ValueError("missing values in atlas CSV, currently not supported")

    return df
Esempio n. 10
0
def smooth_vector(source: MincAtom, fwhm: float) -> Result[MincAtom]:
    outf = source.newname_with_suffix(
        "_smooth_fwhm%s" % fwhm, subdir="tmp")  # TODO smooth_displacement_?
    cmd = [
        'smooth_vector', '--clobber', '--filter',
        '--fwhm=%s' % fwhm, source.path, outf.path
    ]
    stage = CmdStage(inputs=(source, ), outputs=(outf, ), cmd=cmd)
    return Result(stages=Stages([stage]), output=outf)
Esempio n. 11
0
def reconstitute_laplacian_grid(cortex: MincAtom, grid: MincAtom,
                                midline: MincAtom) -> Result[MincAtom]:
    output_grid = grid.newname_with_suffix("_reconstituted")
    stage = CmdStage(inputs=(cortex, grid, midline),
                     outputs=(output_grid, ),
                     cmd=[
                         "reconstitute_laplacian_grid", midline.path,
                         cortex.path, grid.path, output_grid.path
                     ])
    return Result(stages=Stages([stage]), output=output_grid)
Esempio n. 12
0
def minclaplace(input_grid        : MincAtom,
                extra_args        : List[str] = [],
                solution_vertices : Optional[FileAtom] = None,
                create_surface    : bool = True,) -> Result[FileAtom]:
    # TODO the ambiguity of the return type is slightly annoying ...
    # best to create separate minclaplace_at_vertices for the case when `--solve-at-vertices` is used?
    solved = input_grid.newname_with_suffix("_solved", ext=".txt" if solution_vertices else ".mnc")
    if create_surface:
        out_surface = input_grid.newname_with_suffix("_surface", ext=".obj")
    stage = CmdStage(inputs=(input_grid,), outputs=(solved,) + ((out_surface,) if create_surface else ()),
                     cmd=["minclaplace"]
                         + (["--solve-at-vertices=%s" % solution_vertices.path]
                            if solution_vertices is not None else [])
                         + (["--create-surface=%s" % out_surface.path] if create_surface else [])
                         + extra_args
                         + [input_grid.path, solved.path])

    return Result(stages=Stages([stage]),
                  output=Namespace(solved=solved, surface=out_surface)
                         if create_surface else Namespace(solved=solved))
Esempio n. 13
0
def mincblob(op: str, grid: MincAtom, subdir: str = "tmp") -> Result[MincAtom]:
    """
    Low-level mincblob wrapper with the one exception being the determinant option. By
    default the inner clockwork of mincblob subtracts 1 from all determinant values that
    are being calculated. As such, 1 needs to be added to the result of the mincblob call.
    We will do that here, because it makes most sense here.
    >>> stages = mincblob('determinant', MincAtom("/images/img_1.mnc", pipeline_sub_dir="/tmp")).stages
    >>> [s.render() for s in stages]
    ['mincblob -clobber -determinant /images/img_1.mnc /tmp/img_1/img_1_determinant.mnc']
    """
    if op not in ["determinant", "trace", "translation", "magnitude"]:
        raise ValueError('mincblob: invalid operation %s' % op)

    # if we are calculating the determinant, the first file produced is a temp file:
    if op == "determinant":
        out_file = grid.newname_with_suffix("_temp_det", subdir=subdir)
    else:
        out_file = grid.newname_with_suffix('_' + op, subdir=subdir)

    stage = CmdStage(
        inputs=(grid, ),
        outputs=(out_file, ),
        cmd=['mincblob', '-clobber', '-' + op, grid.path, out_file.path])

    s = Stages([stage])
    # now create the proper determinant if that's what was asked for
    if op == "determinant":
        result_file = s.defer(
            mincmath(op='add',
                     const=1,
                     vols=[out_file],
                     subdir=subdir,
                     new_name=grid.filename_wo_ext + "_det"))
    else:
        result_file = out_file

    return Result(stages=s, output=result_file)
Esempio n. 14
0
def minclaplace(
    input_grid: MincAtom,
    extra_args: List[str] = [],
    solution_vertices: Optional[FileAtom] = None,
    create_surface: bool = True,
) -> Result[FileAtom]:
    # TODO the ambiguity of the return type is slightly annoying ...
    # best to create separate minclaplace_at_vertices for the case when `--solve-at-vertices` is used?
    solved = input_grid.newname_with_suffix(
        "_solved", ext=".txt" if solution_vertices else ".mnc")
    if create_surface:
        out_surface = input_grid.newname_with_suffix("_surface", ext=".obj")
    stage = CmdStage(
        inputs=(input_grid, ),
        outputs=(solved, ) + ((out_surface, ) if create_surface else ()),
        cmd=["minclaplace"] +
        (["--solve-at-vertices=%s" %
          solution_vertices.path] if solution_vertices is not None else []) +
        (["--create-surface=%s" % out_surface.path] if create_surface else [])
        + extra_args + [input_grid.path, solved.path])

    return Result(stages=Stages([stage]),
                  output=Namespace(solved=solved, surface=out_surface)
                  if create_surface else Namespace(solved=solved))
Esempio n. 15
0
def marching_cubes(in_volume : MincAtom,
                   min_threshold : float = None,
                   max_threshold : float = None,
                   threshold     : float = None):

    if not xor(threshold is None, min_threshold is None and max_threshold is None):
        raise ValueError("specify either threshold or min and max thresholds")

    out_volume = FileAtom(in_volume.newname_with(NotImplemented))  # forget MINCy fields  # FIXME this coercion doesn't work

    stage = CmdStage(inputs=(in_volume,), outputs=(out_volume,),
                     cmd=["marching_cubes", in_volume.path, out_volume.path]
                         + ([str(threshold)] if threshold is not None else [str(min_threshold), str(max_threshold)]))

    return Result(stages=Stages([stage]), output=out_volume)
Esempio n. 16
0
def voxel_vote(label_files: List[MincAtom],
               output_dir: str,
               name: str = "voted"):  # TODO too stringy ...

    if len(label_files) == 0:
        raise ValueError("can't vote with 0 files")

    out = MincAtom(name=os.path.join(output_dir, "%s.mnc" % name),
                   output_sub_dir=output_dir)  # FIXME better naming

    s = CmdStage(cmd=["voxel_vote", "--clobber"] +
                 [l.path for l in sorted(label_files)] + [out.path],
                 inputs=tuple(label_files),
                 outputs=(out, ))

    return Result(stages=Stages([s]), output=out)
Esempio n. 17
0
def marching_cubes(in_volume: MincAtom,
                   min_threshold: float = None,
                   max_threshold: float = None,
                   threshold: float = None):

    if not xor(threshold is None, min_threshold is None
               and max_threshold is None):
        raise ValueError("specify either threshold or min and max thresholds")

    out_volume = FileAtom(
        in_volume.newname_with(NotImplemented)
    )  # forget MINCy fields  # FIXME this coercion doesn't work

    stage = CmdStage(
        inputs=(in_volume, ),
        outputs=(out_volume, ),
        cmd=["marching_cubes", in_volume.path, out_volume.path] +
        ([str(threshold)] if threshold is not None else
         [str(min_threshold), str(max_threshold)]))

    return Result(stages=Stages([stage]), output=out_volume)
Esempio n. 18
0
def NLIN_pipeline(options):

    output_dir = options.application.output_directory
    pipeline_name = options.application.pipeline_name

    # TODO this is tedious and annoyingly similar to the registration chain and MBM and LSQ6 ...
    processed_dir = os.path.join(output_dir, pipeline_name + "_processed")
    nlin_dir = os.path.join(output_dir, pipeline_name + "_nlin")

    resolution = (
        options.registration.
        resolution  # TODO does using the finest resolution here make sense?
        or min(
            [get_resolution_from_file(f) for f in options.application.files]))

    imgs = get_imgs(options.application)

    initial_target_mask = MincAtom(
        options.nlin.target_mask) if options.nlin.target_mask else None
    initial_target = MincAtom(options.nlin.target, mask=initial_target_mask)

    nlin_module = get_nonlinear_component(reg_method=options.nlin.reg_method)

    nlin_build_model_component = get_model_building_procedure(
        options.nlin.reg_strategy, reg_module=nlin_module)

    nlin_conf = (nlin_build_model_component.parse_build_model_protocol(
        options.nlin.nlin_protocol, resolution=resolution)
                 if options.nlin.nlin_protocol is not None else
                 nlin_build_model_component.get_default_build_model_conf(
                     resolution=resolution))

    s = Stages()

    nlin_result = s.defer(
        nlin_build_model_component.build_model(
            imgs=imgs,
            initial_target=initial_target,
            conf=nlin_conf,
            nlin_dir=nlin_dir,
            use_robust_averaging=options.nlin.use_robust_averaging,
            nlin_prefix=""))

    inverted_xfms = [
        s.defer(invert_xfmhandler(xfm)) for xfm in nlin_result.output
    ]

    if options.stats.calc_stats:

        determinants = s.defer(
            determinants_at_fwhms(xfms=inverted_xfms,
                                  inv_xfms=nlin_result.output,
                                  blur_fwhms=options.stats.stats_kernels))

        return Result(stages=s,
                      output=Namespace(nlin_xfms=nlin_result,
                                       avg_img=nlin_result.avg_img,
                                       determinants=determinants))
    else:
        # there's no consistency in what gets returned, yikes ...
        return Result(stages=s,
                      output=Namespace(nlin_xfms=nlin_result,
                                       avg_img=nlin_result.avg_img))
Esempio n. 19
0
def common_space(mbm_result, options):
    s = Stages()

    # TODO: the interface of this function (basically a destructive 'id' function) is horrific
    # TODO: instead, copy the mbm_result here ??

    if not options.mbm.common_space.common_space_model:
        raise ValueError("No common space template provided!")
    if not options.mbm.common_space.common_space_mask:
        warnings.warn(
            "No common space mask provided ... might be OK if your consensus average mask is OK"
        )
    # TODO allow lsq6 registration as well ...
    common_space_model = MincAtom(
        options.mbm.common_space.common_space_model,
        # TODO fix the subdirectories!
        mask=MincAtom(options.mbm.common_space.common_space_mask,
                      pipeline_sub_dir=os.path.join(
                          options.application.output_directory,
                          options.application.pipeline_name + "_processed"))
        if options.mbm.common_space.common_space_mask else None,
        pipeline_sub_dir=os.path.join(
            options.application.output_directory,
            options.application.pipeline_name + "_processed"))

    # TODO allow different lsq12/nlin config params than the ones used in MBM ...
    # full_hierarchy = get_nonlinear_configuration_from_options(nlin_protocol=options.mbm.nlin.nlin_protocol,
    #                                                          reg_method=options.mbm.nlin.reg_method,
    #                                                          file_resolution=options.registration.resolution)
    # WEIRD ... see comment in lsq12_nlin code ...
    # nlin_conf  = full_hierarchy.confs[-1] if isinstance(full_hierarchy, MultilevelANTSConf) else full_hierarchy
    # also weird that we need to call get_linear_configuration_from_options here ... ?
    #    nlin_build_model_component = model_building_with_initial_target_generation(
    #                                   final_model_building_component=nlin_build_model_component,
    #                                   prelim_model_building_component=prelim_nlin_build_model_component)

    # TODO don't use name 'x_module' for something that's technically not a module ... perhaps unit/component?
    nlin_component = get_nonlinear_component(
        reg_method=options.mbm.nlin.reg_method)

    lsq12_conf = get_linear_configuration_from_options(
        conf=options.mbm.lsq12,
        transform_type=LinearTransType.lsq12,
        file_resolution=options.registration.resolution)
    # N.B.: options.registration.resolution has been *updated* correctly by mbm( ). sigh ...
    model_to_common = s.defer(
        lsq12_nlin(
            source=mbm_result.avg_img,
            target=common_space_model,
            lsq12_conf=lsq12_conf,
            nlin_module=nlin_component,
            resolution=options.registration.resolution,
            nlin_options=options.mbm.nlin.nlin_protocol,  # =nlin_conf,
            resample_source=True))

    model_common = s.defer(
        mincresample_new(img=mbm_result.avg_img,
                         xfm=model_to_common.xfm,
                         like=common_space_model,
                         postfix="_common"))

    overall_xfms_to_common = [
        s.defer(concat_xfmhandlers([rigid_xfm, nlin_xfm, model_to_common]))
        for rigid_xfm, nlin_xfm in zip(mbm_result.xfms.rigid_xfm,
                                       mbm_result.xfms.lsq12_nlin_xfm)
    ]

    overall_xfms_to_common_inv = [
        s.defer(invert_xfmhandler(xfmhandler)) for xfmhandler in [
            s.defer(concat_xfmhandlers([rigid_xfm, nlin_xfm, model_to_common]))
            for rigid_xfm, nlin_xfm in zip(mbm_result.xfms.rigid_xfm,
                                           mbm_result.xfms.lsq12_nlin_xfm)
        ]
    ]

    xfms_to_common = [
        s.defer(concat_xfmhandlers([nlin_xfm, model_to_common]))
        for nlin_xfm in mbm_result.xfms.lsq12_nlin_xfm
    ]

    mbm_result.xfms = mbm_result.xfms.assign(
        xfm_to_common=xfms_to_common,
        overall_xfm_to_common=overall_xfms_to_common)

    if options.mbm.stats.calc_stats:
        log_nlin_det_common, log_full_det_common = ([
            dets.map(lambda d: s.defer(
                mincresample_new(img=d,
                                 xfm=model_to_common.xfm,
                                 like=common_space_model,
                                 postfix="_common")))
            for dets in (mbm_result.determinants.log_nlin_det,
                         mbm_result.determinants.log_full_det)
        ])

        overall_determinants = s.defer(
            determinants_at_fwhms(xfms=overall_xfms_to_common_inv,
                                  blur_fwhms=options.mbm.stats.stats_kernels))

        mbm_result.determinants = \
            mbm_result.determinants.assign(log_nlin_det_common=log_nlin_det_common,
                                           log_full_det_common=log_full_det_common,
                                           log_nlin_overall_det_common=overall_determinants.log_nlin_det,
                                           log_full_overall_det_common=overall_determinants.log_full_det
                                           )

    mbm_result.model_common = model_common

    return Result(stages=s, output=mbm_result)
 def map_to_MincAtom(row):
     return MincAtom(name=row.file,
                     pipeline_sub_dir=pipeline_sub_dir,
                     mask=MincAtom(name=row.mask_file,
                                   pipeline_sub_dir=pipeline_sub_dir))
Esempio n. 21
0
def smooth_vector(source : MincAtom, fwhm : float) -> Result[MincAtom]:
    outf = source.newname_with_suffix("_smooth_fwhm%s" % fwhm, subdir="tmp") # TODO smooth_displacement_?
    cmd  = ['smooth_vector', '--clobber', '--filter', '--fwhm=%s' % fwhm,
            source.path, outf.path]
    stage = CmdStage(inputs=(source,), outputs=(outf,), cmd=cmd)
    return Result(stages=Stages([stage]), output=outf)
Esempio n. 22
0
def img():
    return MincAtom('/images/img_1.mnc')
Esempio n. 23
0
def imgs():
    return [MincAtom('/images/img_%d.mnc' % i) for i in range(1, 4)]
Esempio n. 24
0
def tissue_vision_pipeline(options):
    output_dir = options.application.output_directory
    pipeline_name = options.application.pipeline_name

    csv = original_csv = get_imgs(options.application)
    # check_MINC_input_files([img.path for img in imgs])

    s = Stages()

    s.defer(create_quality_control_images(imgs=csv['anatomical_MincAtom'].tolist(), montage_dir=output_dir,
                                          montage_output=os.path.join(output_dir, pipeline_name + "_resampled",
                                                                      "input_montage"),
                                          auto_range=True,
                                          message="input_mincs"))
#############################
# Step 1: Run MBM.py to create a consensus average
#############################
    mbm_result = s.defer(mbm(imgs=csv['anatomical_MincAtom'].tolist(), options=options,
                             prefix=options.application.pipeline_name,
                             output_dir=output_dir,
                             with_maget=False))

    #TODO remove
    transforms = mbm_result.xfms.assign(native_file=lambda df: df.rigid_xfm.apply(lambda x: x.source),
                            lsq6_file=lambda df: df.lsq12_nlin_xfm.apply(lambda x: x.source),
                            lsq6_mask_file=lambda df:
                              df.lsq12_nlin_xfm.apply(lambda x: x.source.mask if x.source.mask else ""),
                            nlin_file=lambda df: df.lsq12_nlin_xfm.apply(lambda x: x.resampled),
                            nlin_mask_file=lambda df:
                              df.lsq12_nlin_xfm.apply(lambda x: x.resampled.mask if x.resampled.mask else ""))\
        .applymap(maybe_deref_path)
    determinants = mbm_result.determinants.drop(["full_det", "nlin_det"], axis=1)\
        .applymap(maybe_deref_path)

    csv = csv.assign(anatomical_lsq6_MincAtom=mbm_result.xfms.lsq12_nlin_xfm.apply(lambda xfm: xfm.source),
                     mbm_lsq6_XfmAtom=mbm_result.xfms.rigid_xfm.apply(lambda x: x.xfm),
                     mbm_lsq12_nlin_XfmAtom=mbm_result.xfms.lsq12_nlin_xfm.apply(lambda x: x.xfm),
                     mbm_full_XfmAtom=mbm_result.xfms.overall_xfm.apply(lambda x: x.xfm))

    # x.assign(count_lsq6_MincAtom=lambda df: [x + y for x, y in zip(df["x"], df["y"])])
    csv = csv.assign(count_lsq6_MincAtom = lambda df:
    [s.defer(mincresample_new(img = img,
                              xfm = xfm,
                              like = like))
     for img, xfm, like in zip(df["count_MincAtom"],
                               df["mbm_lsq6_XfmAtom"],
                               df["anatomical_lsq6_MincAtom"])])


#############################
# Step 2: Register consensus average to ABI tissuevision Atlas
#############################
    lsq12_conf = get_linear_configuration_from_options(conf=options.mbm.lsq12,
                                                       transform_type=LinearTransType.lsq12,
                                                       file_resolution=options.registration.resolution)
    nlin_component = get_nonlinear_component(reg_method=options.mbm.nlin.reg_method)

    atlas_target = MincAtom(name=options.consensus_to_atlas.atlas_target,
                            orig_name=options.consensus_to_atlas.atlas_target,
                            mask=MincAtom(name=options.consensus_to_atlas.atlas_target_mask,
                                          orig_name=options.consensus_to_atlas.atlas_target_mask))
    atlas_target_label = MincAtom(name=options.consensus_to_atlas.atlas_target_label,
                                  orig_name=options.consensus_to_atlas.atlas_target_label,
                                  mask=MincAtom(name=options.consensus_to_atlas.atlas_target_mask,
                                                orig_name=options.consensus_to_atlas.atlas_target_mask))

    lsq12_nlin_result = s.defer(lsq12_nlin(source=mbm_result.avg_img,
                                           target=atlas_target,
                                           lsq12_conf=lsq12_conf,
                                           nlin_module=nlin_component,
                                           nlin_options=options.mbm.nlin.nlin_protocol,
                                           resolution=options.registration.resolution,
                                           resample_source=False
                                           ))

#############################
# Step 3: Resample count volumes to ABI tissuevision Atlas space and vice versa
#############################

    csv = csv.assign(lsq6_to_atlas_XfmAtom = lambda df: df['mbm_lsq12_nlin_XfmAtom'].apply(lambda xfm:
                            s.defer(xfmconcat([xfm, lsq12_nlin_result.xfm]))))

    csv = csv.assign(
        anatomical_targetspace_MincAtom=lambda df:
        [s.defer(mincresample_new(img=img, xfm=xfm, like=atlas_target))
         for img, xfm in zip(df["anatomical_lsq6_MincAtom"], df["lsq6_to_atlas_XfmAtom"])],
        count_targetspace_MincAtom=lambda df:
        [s.defer(mincresample_new(img=img, xfm=xfm, like=atlas_target))
         for img, xfm in zip(df["count_lsq6_MincAtom"], df["lsq6_to_atlas_XfmAtom"])],
        atlas_lsq6space_MincAtom=lambda df:
        [s.defer(mincresample_new(img=atlas_target_label, xfm=xfm, like=like, invert=True,
                                  interpolation=Interpolation.nearest_neighbour,
                                  extra_flags=('-keep_real_range',)))
         for xfm, like in zip( df["lsq6_to_atlas_XfmAtom"], df["count_lsq6_MincAtom"])]
    )

    csv.applymap(maybe_deref_path).to_csv("analysis.csv",index=False)

    s.defer(create_quality_control_images(imgs=csv.count_targetspace_MincAtom.tolist(), montage_dir=output_dir,
                                          montage_output=os.path.join(output_dir, pipeline_name + "_resampled",
                                                                      "count_montage"),
                                          auto_range=True,
                                          message="count_mincs"))
    return Result(stages=s, output=())
def tv_recon_pipeline(options):
    output_dir = options.application.output_directory
    pipeline_name = options.application.pipeline_name

    s = Stages()

    slices_df = pd.read_csv(options.application.csv_file,
                     dtype={"brain_name": str, "brain_directory": str, "slice_directory": str})

    if "qc" not in slices_df.columns:
        slices_df["qc"] = False
        step = int(1 / options.deep_segment.qc_fraction)
        slices_df.qc[0::step] = True

#############################
# Step 1: Run deep_segment.py
#############################
    #TODO surely theres a way around deep_segment_result=""?
    slices_df = slices_df.assign(
        deep_segment_result="",
        segmentation_directory = lambda df: df.apply(
            lambda row: os.path.join(output_dir, pipeline_name + "_deep_segmentation", row.brain_name), axis=1)
    )
    for index, row in slices_df.iterrows():
        slices_df.at[index,"deep_segment_result"] = s.defer(deep_segment(image = FileAtom(row.slice,
                                                                                          output_sub_dir = row.segmentation_directory),
                                                                         deep_segment_pipeline = FileAtom(options.deep_segment.deep_segment_pipeline),
                                                                         anatomical_suffix = options.deep_segment.anatomical_name,
                                                                         count_suffix = options.deep_segment.count_name,
                                                                         outline_suffix = options.deep_segment.outline_name if row.qc else None,
                                                                         cell_min_area = options.deep_segment.cell_min_area,
                                                                         cell_mean_area = options.deep_segment.cell_mean_area,
                                                                         cell_max_area = options.deep_segment.cell_max_area,
                                                                         temp_dir = options.deep_segment.temp_dir
                                                                         ))
        #hacky solution requires deep_segment() returns in that order
        #https://stackoverflow.com/questions/35491274/pandas-split-column-of-lists-into-multiple-columns
        slices_df[["anatomical_result", "count_result", "outline_result"]] = \
            pd.DataFrame(slices_df.deep_segment_result.values.tolist())

#############################
# Step 2: Run stacks_to_volume.py
#############################
    #This is annoying... If I add anything to slices_df, I will have to delete it here as well
    mincs_df = slices_df.drop(['slice', 'deep_segment_result', "anatomical_result", "count_result", "outline_result", "qc"], axis=1) \
        .drop_duplicates().reset_index(drop=True)\
        .assign(
        anatomical_list = slices_df.groupby("brain_name")['anatomical_result'].apply(list).reset_index(drop=True),
        count_list=slices_df.groupby("brain_name")['count_result'].apply(list).reset_index(drop=True),
        #the above is so hacky...
        stacked_directory=lambda df: df.apply(
            lambda row: os.path.join(output_dir, pipeline_name + "_stacked", row.brain_name), axis=1),
    )
    mincs_df = mincs_df.assign(
        anatomical_stacked_MincAtom=lambda df: df.apply(
            lambda row: MincAtom(
                os.path.join(row.stacked_directory,
                             row.brain_name + "_" + options.deep_segment.anatomical_name + "_stacked.mnc")
            ), axis=1
        ),
        count_stacked_MincAtom=lambda df: df.apply(
            lambda row: MincAtom(
                os.path.join(row.stacked_directory,
                             row.brain_name + "_" + options.deep_segment.count_name + "_stacked.mnc")
            ), axis=1
        )
    )
    if not options.stacks_to_volume.manual_scale_output:
        mincs_df["scale_output"] = mincs_df.interslice_distance/mincs_df.interslice_distance.min()

    for index, row in mincs_df.iterrows():
        s.defer(stacks_to_volume(
            slices = row.anatomical_list,
            output_volume = row.anatomical_stacked_MincAtom,
            z_resolution = row.interslice_distance,
            stacks_to_volume_options=options.stacks_to_volume,
            uniform_sum=False
        ))
        s.defer(stacks_to_volume(
            slices=row.count_list,
            output_volume=row.count_stacked_MincAtom,
            z_resolution=row.interslice_distance,
            stacks_to_volume_options=options.stacks_to_volume,
            scale_output = row.scale_output,
            uniform_sum=True
        ))
#############################
# Step 3: Run autocrop to resample to isotropic
#############################
    for index, row in mincs_df.iterrows():
        mincs_df.at[index,"anatomical_isotropic_result"] = s.defer(autocrop(
            img = row.anatomical_stacked_MincAtom,
            isostep = options.stacks_to_volume.plane_resolution,
            suffix = "isotropic"
        ))
        mincs_df.at[index, "count_isotropic_result"] = s.defer(autocrop(
            img=row.count_stacked_MincAtom,
            isostep=options.stacks_to_volume.plane_resolution,
            suffix="isotropic",
            nearest_neighbour = True
        ))

#############################
    slices_df = slices_df.assign(
        anatomical_slice = lambda df: df.apply(lambda row: row.anatomical_result.path, axis=1),
        count_slice=lambda df: df.apply(lambda row: row.count_result.path, axis=1),
        outline_slice=lambda df: df.apply(lambda row: row.outline_result.path if row.outline_result else None, axis=1),
    )
    slices_df.drop(slices_df.filter(regex='.*_directory.*|.*_result.*'), axis=1)\
        .to_csv("TV_processed_slices.csv", index=False)

    mincs_df = mincs_df.assign(
        anatomical=lambda df: df.apply(lambda row: row.anatomical_isotropic_result.path, axis=1),
        count=lambda df: df.apply(lambda row: row.count_isotropic_result.path, axis=1),
    )
    mincs_df.drop(mincs_df.filter(regex='.*_result.*|.*_list.*|.*_MincAtom.*'), axis=1)\
        .to_csv("TV_mincs.csv", index=False)
    #TODO overlay them
    # s.defer(create_quality_control_images(imgs=reconstructed_mincs, montage_dir = output_dir,
    #     montage_output=os.path.join(output_dir, pipeline_name + "_stacked", "reconstructed_montage"),
    #                                       message="reconstructed_mincs"))

    #TODO
    # s.defer(create_quality_control_images(imgs=all_anatomical_pad_results, montage_dir=output_dir,
    #                                       montage_output=os.path.join(output_dir, pipeline_name + "_stacked",
    #                                                                   "%s_montage" % anatomical),
    #                                       message="%s_mincs" % anatomical))
    # s.defer(create_quality_control_images(imgs=all_count_pad_results, montage_dir=output_dir,
    #                                       montage_output=os.path.join(output_dir, pipeline_name + "_stacked",
    #                                                                   "%s_montage" % count),
    #                                       auto_range=True,
    #                                       message="%s_mincs" % count))
    return Result(stages=s, output=())
def two_level_pipeline(options: TwoLevelConf):
    def relativize_path(fp):
        #this annoying function takes care of the csv_paths_relative_to_wd flag.
        return os.path.join(os.path.dirname(options.application.csv_file),fp) \
            if not options.application.csv_paths_relative_to_wd \
            else fp

    first_level_dir = options.application.pipeline_name + "_first_level"

    if options.application.files:
        warnings.warn("Got extra arguments: '%s'" % options.application.files)
    with open(options.application.csv_file, 'r') as f:
        try:
            files_df = (pd.read_csv(
                filepath_or_buffer=f,
                usecols=['group', 'file'],
                index_col=False).assign(file=lambda df: df.apply(
                    axis="columns",
                    func=lambda r: MincAtom(relativize_path(r.file).strip(),
                                            pipeline_sub_dir=os.path.join(
                                                first_level_dir,
                                                "%s_processed" % r.group,
                                            )))))
        except AttributeError:
            warnings.warn(
                "Something went wrong ... does your .csv file have `group` and `file` columns?"
            )
            raise

    # TODO is it actually sufficient that *within-study* filenames are distinct, as follows??
    for name, g in files_df.groupby("group"):  # TODO: collect the outputs
        check_MINC_input_files(g.file.map(lambda x: x.path))
    #check_MINC_input_files(files_df.file.map(lambda x: x.path))

    pipeline = two_level(grouped_files_df=files_df, options=options)

    # TODO write these into the appropriate subdirectory ...
    overall = (pipeline.output.overall_determinants.drop(
        'inv_xfm', axis=1).applymap(maybe_deref_path))
    overall.to_csv("overall_determinants.csv", index=False)
    resampled = (pipeline.output.resampled_determinants.drop(
        ['inv_xfm', 'full_det', 'nlin_det'],
        axis=1).applymap(maybe_deref_path))
    resampled.to_csv("resampled_determinants.csv", index=False)

    # rename/drop some columns, bind the dfs and write to "analysis.csv" as it should be.
    # deprecate the two csvs next release.
    analysis = pd.read_csv(options.application.csv_file).assign(
        native_file=lambda df: df.file.apply(relativize_path))

    overall = (overall.drop(["full_det", "nlin_det"],
                            axis=1).rename(columns={"overall_xfm": "xfm"}))
    resampled = resampled.rename(
        columns={
            "first_level_log_full_det": "log_full_det",
            "first_level_log_nlin_det": "log_nlin_det",
            "first_level_xfm": "xfm",
            "first_level_log_full_det_resampled": "resampled_log_full_det",
            "first_level_log_nlin_det_resampled": "resampled_log_nlin_det"
        })
    (analysis.merge(pd.concat([resampled, overall],
                              axis=1)).to_csv("analysis.csv", index=False))

    return pipeline
def saddle_recon_pipeline(options):

    output_dir = options.application.output_directory
    pipeline_name = options.application.pipeline_name
    fid_input_dir = options.saddle_recon.varian_recon.fid_input_directory

    # TODO check that the varian recon "output_file_name" doesn't name a directory, or if it does, that it matches the output directory

    #The input directory should contain a host of fid files that will be used for the reconstruction of the mnc files
    # TODO check that there are fid files in this directory
    # TODO check that all mandatory inputs are provided
    #Make list of input fid files, with location, and create a "FileAtom" type
    varian_fid_files = [
        fid_input_dir + "/fid" + str(num_fid)
        for num_fid in range(0, options.saddle_recon.varian_recon.num_fids)
    ]
    fids = [FileAtom(name) for name in varian_fid_files]

    # Varian recon will spit out images of the format <output_file_name>.<coil#>.<rep#>.mnc
    # TODO If .mnc isn't provided at the end of the output_file_name, then there is no "." before the coil number. Need to check and correct for this.
    # Al/ the files created will be spit out to the output_dir
    # Create list of files we expect to be produced by varian recon
    coil_list_0based = [
        int(x) for x in options.saddle_recon.varian_recon.mouse_list.split(',')
    ]
    coil_list = [x + 1 for x in coil_list_0based]
    file_name_base = os.path.splitext(
        options.saddle_recon.varian_recon.output_file_name)[0]
    varian_mnc_output = [
        os.path.join(
            output_dir,
            file_name_base + "." + str(coil) + "_" + str(rep) + ".mnc")
        for coil in coil_list
        for rep in range(0, options.saddle_recon.varian_recon.num_reps)
    ]
    varian_coil_output = [
        str(coil) for coil in coil_list
        for rep in range(0, options.saddle_recon.varian_recon.num_reps)
    ]
    recon_sub_dir = [
        file_name[:-6] + "_processed" for file_name in varian_mnc_output
    ]
    imgs = [
        MincAtom(varian_mnc_output[k], pipeline_sub_dir=recon_sub_dir[k])
        for k in range(0, len(varian_mnc_output))
    ]

    s = Stages()

    #############################
    # Step 1: Run varian_recon.py
    #############################
    varian_recon_results = s.defer(
        varian_recon_ge3dmice_saddle(
            fids=fids,
            imgs=imgs,
            varian_recon_options=options.saddle_recon.varian_recon,
            output_dir=output_dir))

    # Hold results obtained in the loop
    all_lsq6_results = []
    all_dc_results = []
    all_crop_results = []

    # Loop through all the coils
    for icoil in coil_list:
        icoil_imgs = numpy.array(imgs)[numpy.where(
            numpy.array(varian_coil_output) == str(icoil))[0]]
        icoil_varian_mnc_output = numpy.array(varian_mnc_output)[numpy.where(
            numpy.array(varian_coil_output) == str(icoil))[0]]

        ###########################
        # Step 2: lsq6 registration
        ###########################
        # TODO change functionality of get_resolution_from_file so that it can be deferred
        lsq6_dir = os.path.join(output_dir,
                                file_name_base + "." + str(icoil) + "_lsq6")
        target_dir = os.path.join(
            output_dir, file_name_base + "." + str(icoil) + "_target_file")

        resolution = options.registration.resolution
        #resolution = (options.registration.resolution or
        #              get_resolution_from_file(icoil_varian_mnc_output[0]))
        options.registration = options.registration.replace(
            resolution=resolution)

        target_file = MincAtom(name=icoil_varian_mnc_output[0],
                               pipeline_sub_dir=target_dir)
        targets = RegistrationTargets(registration_standard=target_file,
                                      xfm_to_standard=None,
                                      registration_native=None)

        lsq6_result = s.defer(
            lsq6_nuc_inorm(imgs=icoil_imgs,
                           resolution=resolution,
                           registration_targets=targets,
                           lsq6_dir=lsq6_dir,
                           lsq6_options=options.saddle_recon.lsq6))
        all_lsq6_results.append(lsq6_result)

        ###########################
        # Step 3: distortion correct lsq6 output image
        ###########################
        lsq6_file = MincAtom(os.path.join(lsq6_dir, "average.mnc"),
                             pipeline_sub_dir=lsq6_dir)
        dc_lsq6_file = MincAtom(os.path.join(lsq6_dir,
                                             "average.aug2015_dist_corr.mnc"),
                                pipeline_sub_dir=lsq6_dir)
        dc_result = s.defer(
            dist_corr_saddle(img=lsq6_file, dc_img=dc_lsq6_file))
        all_dc_results.append(dc_result)

        ###########################
        # Step 4: crop distortion corrected lsq6 output image
        ###########################
        cropped_dc_lsq6_file = MincAtom(os.path.join(
            lsq6_dir, "average.aug2015_dist_corr.cropped.mnc"),
                                        pipeline_sub_dir=lsq6_dir)
        crop_result = s.defer(
            crop_to_brain(
                img=dc_lsq6_file,
                cropped_img=cropped_dc_lsq6_file,
                crop_to_brain_options=options.saddle_recon.crop_to_brain))
        all_crop_results.append(crop_result)

    return Result(stages=s,
                  output=Namespace(varian_output=varian_recon_results,
                                   lsq6_output=all_lsq6_results,
                                   dc_output=all_dc_results,
                                   crop_output=all_crop_results))