Ejemplo n.º 1
0
 def test_combine_paths(self):
     self.assertEqual(
         libmag.combine_paths("foo/bar/item", "file", "_", "py"),
         "foo/bar/item_file.py")
     self.assertEqual(libmag.combine_paths("foo/bar/item", "file.py"),
                      "foo/bar/item_file.py")
     self.assertEqual(
         libmag.combine_paths("foo/bar/item", "file.py", "_", "ext"),
         "foo/bar/item_file.ext")
     self.assertEqual(
         libmag.combine_paths("foo/bar/item", "file.py", ext="ext"),
         "foo/bar/item_file.ext")
     self.assertEqual(libmag.combine_paths("foo/bar/item", "file.py", "_"),
                      "foo/bar/item_file.py")
Ejemplo n.º 2
0
def cluster_blobs(img_path, suffix=None):
    """Cluster blobs and save to Numpy archive.
    
    Args:
        img_path (str): Base path from which registered labels and blobs files
            will be found and output blobs file save location will be
            constructed.
        suffix (str): Suffix for ``path``; defaults to None.

    Returns:

    """
    mod_path = img_path
    if suffix is not None:
        mod_path = libmag.insert_before_ext(img_path, suffix)
    labels_img_np = sitk_io.load_registered_img(
        mod_path, config.RegNames.IMG_LABELS.value)
    blobs = detector.Blobs().load_blobs(np_io.img_to_blobs_path(img_path))
    scaling, res = np_io.find_scaling(img_path, labels_img_np.shape)
    if blobs is None:
        libmag.warn("unable to load nuclei coordinates")
        return

    # append label IDs to blobs and scale to make isotropic
    blobs_clus = ClusterByLabel.cluster_by_label(blobs.blobs[:, :3],
                                                 labels_img_np, scaling, res)
    print(blobs_clus)
    out_path = libmag.combine_paths(mod_path, config.SUFFIX_BLOB_CLUSTERS)
    np.save(out_path, blobs_clus)
Ejemplo n.º 3
0
def make_subimage_name(
        base: str, offset: Optional[Tuple[int, int, int]] = None,
        shape: Optional[Tuple[int, int, int]] = None,
        suffix: Optional[str] = None) -> str:
    """Make name of subimage for a given offset and shape.

    The order of ``offset`` and ``shape`` are assumed to be in ``z, y, x`` but
    will be reversed for the output name since the user-oriented ordering
    is ``x, y, z``.
    
    Args:
        base: Start of name, which can include full parent path.
        offset: Offset as a tuple; defaults to None to ignore sub-image.
        shape: Shape as a tuple; defaults to None to ignore sub-image.
        suffix: Suffix to append, replacing any existing extension
            in ``base``; defaults to None.
    
    Returns:
        Name (or path) to subimage.
    """
    name = base
    if offset is not None and shape is not None:
        # sub-image offset/shape stored as z,y,x, but file named as x,y,z
        roi_site = "{}x{}".format(offset[::-1], shape[::-1]).replace(" ", "")
        name = libmag.insert_before_ext(base, roi_site, "_")
    if suffix:
        name = libmag.combine_paths(name, suffix)
    print("subimage name: {}".format(name))
    return name
Ejemplo n.º 4
0
    def save(self):
        """Save the metadata.
        
        Also copies the reference file to the metadata's directory.
        
        """
        labels_ref_name = None
        if self.path_ref:
            # if provided, copy labels reference file to output dir
            labels_ref_name = pathlib.Path(self.path_ref).name
            labels_ref_out = labels_ref_name
            if self.prefix:
                labels_ref_out = libmag.combine_paths(self.prefix,
                                                      labels_ref_name,
                                                      check_dir=True)
                labels_ref_name = pathlib.Path(labels_ref_out).name
            libmag.copy_backup(self.path_ref, labels_ref_out)

        # save metadata as YAML file
        meta = {
            # reference filename is relative to output directory
            "path_ref": labels_ref_name,
            "region_ids_orig": self.region_ids_orig,
        }
        yaml_io.save_yaml(self.save_path, meta, True, True)
Ejemplo n.º 5
0
def animate_imgs(base_path, plotted_imgs, delay, ext=None, suffix=None):
    """Export to an animated image.
    
    Defaults to an animated GIF unless ``ext`` specifies otherwise.
    Requires ``FFMpeg`` for MP4 file format exports and ``ImageMagick`` for
    all other types of exports.
    
    Args:
        base_path (str): String from which an output path will be constructed.
        plotted_imgs (List[:obj:`matplotlib.image.AxesImage]): Sequence of
            images to include in the animation.
        delay (int): Delay between image display in ms. If None, the delay will
            defaul to 100ms.
        ext (str): Extension to use when saving, without the period. Defaults
            to None, in which case "gif" will be used.
        suffix (str): String to append to output path before extension;
            defaults to None to ignore.

    """
    # set up animation output path and time interval
    if ext is None: ext = "gif"
    out_path = libmag.combine_paths(base_path, "animated", ext=ext)
    if suffix: out_path = libmag.insert_before_ext(out_path, suffix, "_")
    libmag.backup_file(out_path)
    if delay is None:
        delay = 100
    if plotted_imgs and len(plotted_imgs[0]) > 0:
        fig = plotted_imgs[0][0].figure
    else:
        libmag.warn("No images available to animate")
        return

    # WORKAROUND: FFMpeg may give a "height not divisible by 2" error, fixed
    # by padding with a pixel
    # TODO: check if needed for width
    # TODO: account for difference in FFMpeg height and fig height
    for fn, size in {
            # fig.set_figwidth: fig.get_figwidth(),
            fig.set_figheight:
            fig.get_figheight()
    }.items():
        if size * fig.dpi % 2 != 0:
            fn(size + 1. / fig.dpi)
            print("Padded size with", fn, fig.get_figwidth(), "to new size of",
                  fig.get_figheight())

    # generate and save animation
    anim = animation.ArtistAnimation(fig,
                                     plotted_imgs,
                                     interval=delay,
                                     repeat_delay=0,
                                     blit=False)
    try:
        writer = "ffmpeg" if ext == "mp4" else "imagemagick"
        anim.save(out_path, writer=writer)
        print("saved animation file to {}".format(out_path))
    except ValueError as e:
        print(e)
        libmag.warn("No animation writer available for Matplotlib")
Ejemplo n.º 6
0
def merge_images(img_paths,
                 reg_name,
                 prefix=None,
                 suffix=None,
                 fn_combine=np.sum):
    """Merge images from multiple paths.
    
    Assumes that the images are relatively similar in size, but will resize 
    them to the size of the first image to combine the images.
    
    Args:
        img_paths: Paths from which registered paths will be found.
        reg_name: Registration suffix to load for the given paths 
            in ``img_paths``.
        prefix: Start of output path; defaults to None to use the first 
           path in ``img_paths`` instead.
        suffix: Portion of path to be combined with each path 
            in ``img_paths`` and output path; defaults to None.
        fn_combine: Function to apply to combine images with ``axis=0``. 
            Defaults to :func:``np.sum``. If None, each image will be 
            inserted as a separate channel.
    
    Returns:
        The combined image in SimpleITK format.
    """
    if len(img_paths) < 1: return None

    img_sitk = None
    img_nps = []
    for img_path in img_paths:
        mod_path = img_path
        if suffix is not None:
            # adjust image path with suffix
            mod_path = libmag.insert_before_ext(mod_path, suffix)
        print("loading", mod_path)
        # load and resize images to shape of first loaded image
        img, _ = _load_reg_img_to_combine(mod_path, reg_name, img_nps)
        if img_sitk is None: img_sitk = img

    # combine images and write single combo image
    if fn_combine is None:
        # combine raw images into separate channels
        img_combo = np.stack(img_nps, axis=img_nps[0].ndim)
    else:
        # merge by custom function
        img_combo = fn_combine(img_nps, axis=0)
    combined_sitk = replace_sitk_with_numpy(img_sitk, img_combo)
    # fallback to using first image's name as base
    output_base = img_paths[0] if prefix is None else prefix
    if suffix is not None:
        output_base = libmag.insert_before_ext(output_base, suffix)
    output_reg = libmag.combine_paths(reg_name, config.RegNames.COMBINED.value)
    write_reg_images({output_reg: combined_sitk}, output_base)
    return combined_sitk
Ejemplo n.º 7
0
def verify_stack(filename_base, subimg_path_base, settings, segments_all,
                 channels, overlap_base):
    db_path_base = os.path.basename(subimg_path_base)
    stats_detection = None
    fdbk = None
    try:
        # Truth databases are any database stored with manually
        # verified blobs and loaded at command-line with the
        # `--truth_db` flag or loaded here. While all experiments
        # can be stored in a single database, this verification also
        # supports experiments saved to separate databases in the
        # software root directory and named as a sub-image but with
        # the `sqlite.DB_SUFFIX_TRUTH` suffix. Experiments in the
        # database are also assumed to be named based on the full
        # image or the sub-image filename, without any directories.
        
        # load ROIs from previously loaded truth database or one loaded
        # based on sub-image filename
        exp_name, rois = _get_truth_db_rois(
            subimg_path_base, filename_base,
            db_path_base if config.truth_db is None else None)
        if rois is None:
            # load alternate truth database based on sub-image filename
            print("Loading truth ROIs from experiment:", exp_name)
            exp_name, rois = _get_truth_db_rois(
                subimg_path_base, filename_base, db_path_base)
        if config.truth_db is None:
            raise LookupError(
                "No truth database found for experiment {}, will "
                "skip detection verification".format(exp_name))
        if rois is None:
            raise LookupError(
                "No truth set ROIs found for experiment {}, will "
                "skip detection verification".format(exp_name))
        
        # verify each ROI and store results in a separate database
        exp_id = sqlite.insert_experiment(
            config.verified_db.conn, config.verified_db.cur,
            exp_name, None)
        verify_tol = np.multiply(
            overlap_base, settings["verify_tol_factor"])
        stats_detection, fdbk, df_verify = verify_rois(
            rois, segments_all, config.truth_db.blobs_truth,
            verify_tol, config.verified_db, exp_id, exp_name,
            channels)
        df_io.data_frames_to_csv(df_verify, libmag.combine_paths(
            exp_name, "verify.csv"))
    except FileNotFoundError:
        libmag.warn("Could not load truth DB from {}; "
                    "will not verify ROIs".format(db_path_base))
    except LookupError as e:
        libmag.warn(str(e))
    return stats_detection, fdbk
Ejemplo n.º 8
0
    def save_path(self):
        """Get the save path.
        
        Returns:
            The save path if set, otherwise a path constructed from
            :attr:`prefix` if set and :const:`PATH_LABELS_META`, or the
            constant alone.

        """
        if not self._save_path:
            if self.prefix:
                return libmag.combine_paths(self.prefix,
                                            self.PATH_LABELS_META,
                                            check_dir=True)
            return self.PATH_LABELS_META
        return self._save_path
Ejemplo n.º 9
0
def animate_imgs(base_path, plotted_imgs, delay, ext=None, suffix=None):
    """Export to an animated image.
    
    Defaults to an animated GIF unless ``ext`` specifies otherwise.
    Requires ``FFMpeg`` for MP4 file format exports and ``ImageMagick`` for
    all other types of exports.
    
    Args:
        base_path (str): String from which an output path will be constructed.
        plotted_imgs (List[:obj:`matplotlib.image.AxesImage]): Sequence of
            images to include in the animation.
        delay (int): Delay between image display in ms. If None, the delay will
            defaul to 100ms.
        ext (str): Extension to use when saving, without the period. Defaults
            to None, in which case "gif" will be used.
        suffix (str): String to append to output path before extension;
            defaults to None to ignore.

    """
    if ext is None: ext = "gif"
    out_path = libmag.combine_paths(base_path, "animated", ext=ext)
    if suffix: out_path = libmag.insert_before_ext(out_path, suffix, "_")
    libmag.backup_file(out_path)
    if delay is None:
        delay = 100
    if plotted_imgs and len(plotted_imgs[0]) > 0:
        fig = plotted_imgs[0][0].figure
    else:
        libmag.warn("No images available to animate")
        return
    anim = animation.ArtistAnimation(fig,
                                     plotted_imgs,
                                     interval=delay,
                                     repeat_delay=0,
                                     blit=False)
    try:
        writer = "ffmpeg" if ext == "mp4" else "imagemagick"
        anim.save(out_path, writer=writer)
        print("saved animation file to {}".format(out_path))
    except ValueError as e:
        print(e)
        libmag.warn("No animation writer available for Matplotlib")
Ejemplo n.º 10
0
def img_to_blobs_path(path):
    """Get the blobs path associated with an image or user-supplied.
    
    The user-supplied blobs path stored in :attr:`magmap.io.config.load_data`
    takes precedence over ``path``.
    
    Args:
        path (str): Image base path, without extension or MagellanMapper
            suffixes.

    Returns:
        str: Default MagellanMapper blobs path based on image path, or
        the config path if it is a string.

    """
    path_blobs = config.load_data[config.LoadData.BLOBS]
    if isinstance(path_blobs, str):
        # user-supplied path takes precedence
        return path_blobs
    return libmag.combine_paths(path, config.SUFFIX_BLOBS)
Ejemplo n.º 11
0
def make_subimage_name(base, offset, shape, suffix=None):
    """Make name of subimage for a given offset and shape.

    The order of ``offset`` and ``shape`` are assumed to be in z,y,x but
    will be reversed for the output name since the user-oriented ordering
    is x,y,z.
    
    Args:
        base (str): Start of name, which can include full parent path.
        offset (Tuple[int]): Offset, generally given as a tuple.
        shape (Tuple[int]): Shape, generally given as a tuple.
        suffix (str): Suffix to append, replacing any existing extension
            in ``base``; defaults to None.
    
    Returns:
        str: Name (or path) to subimage.
    """
    # sub-image offset/shape stored as z,y,x, but file named as x,y,z
    roi_site = "{}x{}".format(offset[::-1], shape[::-1]).replace(" ", "")
    name = libmag.insert_before_ext(base, roi_site, "_")
    if suffix:
        name = libmag.combine_paths(name, suffix)
    print("subimage name: {}".format(name))
    return name
Ejemplo n.º 12
0
def setup_images(path=None,
                 series=None,
                 offset=None,
                 size=None,
                 proc_mode=None,
                 allow_import=True):
    """Sets up an image and all associated images and metadata.

    Paths for related files such as registered images will generally be
    constructed from ``path``. If :attr:`config.prefix` is set, it will
    be used in place of ``path`` for registered labels.
    
    Args:
        path (str): Path to image from which MagellanMapper-style paths will 
            be generated.
        series (int): Image series number; defaults to None.
        offset (List[int]): Sub-image offset given in z,y,x; defaults to None.
        size (List[int]): Sub-image shape given in z,y,x; defaults to None.
        proc_mode (str): Processing mode, which should be a key in 
            :class:`config.ProcessTypes`, case-insensitive; defaults to None.
        allow_import (bool): True to allow importing the image if it
            cannot be loaded; defaults to True.
    
    """
    def add_metadata():
        # override metadata set from command-line metadata args if available
        md = {
            config.MetaKeys.RESOLUTIONS:
            config.meta_dict[config.MetaKeys.RESOLUTIONS],
            config.MetaKeys.MAGNIFICATION:
            config.meta_dict[config.MetaKeys.MAGNIFICATION],
            config.MetaKeys.ZOOM:
            config.meta_dict[config.MetaKeys.ZOOM],
            config.MetaKeys.SHAPE:
            config.meta_dict[config.MetaKeys.SHAPE],
            config.MetaKeys.DTYPE:
            config.meta_dict[config.MetaKeys.DTYPE],
        }
        for key, val in md.items():
            if val is not None:
                # explicitly set metadata takes precedence over extracted vals
                import_md[key] = val

    # LOAD MAIN IMAGE

    # reset image5d
    config.image5d = None
    config.image5d_is_roi = False
    load_subimage = offset is not None and size is not None
    config.resolutions = None

    # reset label images
    config.labels_img = None
    config.borders_img = None

    filename_base = importer.filename_to_base(path, series)
    subimg_base = None

    if load_subimage and not config.save_subimg:
        # load a saved sub-image file if available and not set to save one
        subimg_base = stack_detect.make_subimage_name(filename_base, offset,
                                                      size)
        filename_subimg = libmag.combine_paths(subimg_base,
                                               config.SUFFIX_SUBIMG)

        try:
            # load sub-image if available
            config.image5d = np.load(filename_subimg, mmap_mode="r")
            config.image5d = importer.roi_to_image5d(config.image5d)
            config.image5d_is_roi = True
            config.image5d_io = config.LoadIO.NP
            print("Loaded sub-image from {} with shape {}".format(
                filename_subimg, config.image5d.shape))

            # after loading sub-image, load original image's metadata
            # for essential data such as vmin/vmax; will only warn if
            # fails to load since metadata could be specified elsewhere
            _, orig_info = importer.make_filenames(path, series)
            print("load original image metadata from:", orig_info)
            importer.load_metadata(orig_info)
        except IOError:
            print("Ignored sub-image file from {} as unable to load".format(
                filename_subimg))

    proc_type = libmag.get_enum(proc_mode, config.ProcessTypes)
    if proc_type in (config.ProcessTypes.LOAD, config.ProcessTypes.EXPORT_ROIS,
                     config.ProcessTypes.EXPORT_BLOBS,
                     config.ProcessTypes.DETECT):
        # load a blobs archive
        try:
            if subimg_base:
                try:
                    # load blobs generated from sub-image
                    config.blobs = load_blobs(subimg_base)
                except (FileNotFoundError, KeyError):
                    # fallback to loading from full image blobs and getting
                    # a subset, shifting them relative to sub-image offset
                    print("Unable to load blobs file based on {}, will try "
                          "from {}".format(subimg_base, filename_base))
                    config.blobs = load_blobs(filename_base)
                    config.blobs, _ = detector.get_blobs_in_roi(config.blobs,
                                                                offset,
                                                                size,
                                                                reverse=False)
                    detector.shift_blob_rel_coords(config.blobs,
                                                   np.multiply(offset, -1))
            else:
                # load full image blobs
                config.blobs = load_blobs(filename_base)
        except (FileNotFoundError, KeyError) as e2:
            print("Unable to load blobs file")
            if proc_type in (config.ProcessTypes.LOAD,
                             config.ProcessTypes.EXPORT_BLOBS):
                # blobs expected but not found
                raise e2

    if path and config.image5d is None:
        # load or import the main image stack
        print("Loading main image")
        try:
            if path.endswith(sitk_io.EXTS_3D):
                # attempt to format supported by SimpleITK and prepend time axis
                config.image5d = sitk_io.read_sitk_files(path)[None]
                config.image5d_io = config.LoadIO.SITK
            else:
                # load or import from MagellanMapper Numpy format
                import_only = proc_type is config.ProcessTypes.IMPORT_ONLY
                if not import_only:
                    # load previously imported image
                    config.image5d = importer.read_file(path, series)
                if allow_import:
                    # re-import over existing image or import new image
                    if os.path.isdir(path) and all(
                        [r is None for r in config.reg_suffixes.values()]):
                        # import directory of single plane images to single
                        # stack if no register suffixes are set
                        chls, import_md = importer.setup_import_dir(path)
                        add_metadata()
                        prefix = config.prefix
                        if not prefix:
                            prefix = os.path.join(
                                os.path.dirname(path),
                                importer.DEFAULT_IMG_STACK_NAME)
                        config.image5d = importer.import_planes_to_stack(
                            chls, prefix, import_md)
                    elif import_only or config.image5d is None:
                        # import multi-plane image
                        chls, import_path = importer.setup_import_multipage(
                            path)
                        prefix = config.prefix if config.prefix else import_path
                        import_md = importer.setup_import_metadata(
                            chls, config.channel, series)
                        add_metadata()
                        config.image5d = importer.import_multiplane_images(
                            chls,
                            prefix,
                            import_md,
                            series,
                            channel=config.channel)
                config.image5d_io = config.LoadIO.NP
        except FileNotFoundError as e:
            print(e)
            print("Could not load {}, will fall back to any associated "
                  "registered image".format(path))

    if config.metadatas and config.metadatas[0]:
        # assign metadata from alternate file if given to supersede settings
        # for any loaded image5d
        # TODO: access metadata directly from given image5d's dict to allow
        # loading multiple image5d images simultaneously
        importer.assign_metadata(config.metadatas[0])

    # main image is currently required since many parameters depend on it
    atlas_suffix = config.reg_suffixes[config.RegSuffixes.ATLAS]
    if atlas_suffix is None and config.image5d is None:
        # fallback to atlas if main image not already loaded
        atlas_suffix = config.RegNames.IMG_ATLAS.value
        print(
            "main image is not set, falling back to registered "
            "image with suffix", atlas_suffix)
    # use prefix to get images registered to a different image, eg a
    # downsampled version, or a different version of registered images
    path = config.prefix if config.prefix else path
    if path and atlas_suffix is not None:
        try:
            # will take the place of any previously loaded image5d
            config.image5d = sitk_io.read_sitk_files(
                path, reg_names=atlas_suffix)[None]
            config.image5d_io = config.LoadIO.SITK
        except FileNotFoundError as e:
            print(e)

    annotation_suffix = config.reg_suffixes[config.RegSuffixes.ANNOTATION]
    if annotation_suffix is not None:
        # load labels image, set up scaling, and load labels file
        try:
            # TODO: need to support multichannel labels images
            config.labels_img = sitk_io.read_sitk_files(
                path, reg_names=annotation_suffix)
            if config.image5d is not None:
                config.labels_scaling = importer.calc_scaling(
                    config.image5d, config.labels_img)
            if config.load_labels is not None:
                labels_ref = ontology.load_labels_ref(config.load_labels)
                if isinstance(labels_ref, pd.DataFrame):
                    # parse CSV files loaded into data frame
                    config.labels_ref_lookup = ontology.create_lookup_pd(
                        labels_ref)
                else:
                    # parse dict from ABA JSON file
                    config.labels_ref_lookup = (
                        ontology.create_aba_reverse_lookup(labels_ref))
        except FileNotFoundError as e:
            print(e)

    borders_suffix = config.reg_suffixes[config.RegSuffixes.BORDERS]
    if borders_suffix is not None:
        # load borders image, which can also be another labels image
        try:
            config.borders_img = sitk_io.read_sitk_files(
                path, reg_names=borders_suffix)
        except FileNotFoundError as e:
            print(e)

    if (config.atlas_labels[config.AtlasLabels.ORIG_COLORS]
            and config.load_labels is not None):
        # load original labels image from same directory as ontology
        # file for consistent ID-color mapping, even if labels are missing
        try:
            config.labels_img_orig = sitk_io.load_registered_img(
                config.load_labels, config.RegNames.IMG_LABELS.value)
        except FileNotFoundError as e:
            print(e)
            libmag.warn(
                "could not load original labels image; colors may differ"
                "differ from it")

    load_rot90 = config.roi_profile["load_rot90"]
    if load_rot90 and config.image5d is not None:
        # rotate main image specified num of times x90deg after loading since
        # need to rotate images output by deep learning toolkit
        config.image5d = np.rot90(config.image5d, load_rot90, (2, 3))

    if (config.image5d is not None and load_subimage
            and not config.image5d_is_roi):
        # crop full image to bounds of sub-image
        config.image5d = plot_3d.prepare_subimg(config.image5d, size,
                                                offset)[None]
        config.image5d_is_roi = True

    # add any additional image5d thresholds for multichannel images, such
    # as those loaded without metadata for these settings
    colormaps.setup_cmaps()
    num_channels = get_num_channels(config.image5d)
    config.near_max = libmag.pad_seq(config.near_max, num_channels, -1)
    config.near_min = libmag.pad_seq(config.near_min, num_channels, 0)
    config.vmax_overview = libmag.pad_seq(config.vmax_overview, num_channels)
    colormaps.setup_colormaps(num_channels)
Ejemplo n.º 13
0
def load_blobs(img_path, check_scaling=False, scaled_shape=None, scale=None):
    """Load blobs from an archive.
    
    Scaling can be computed to translate blob coordinates into another
    space, such as a heat map for a downsampled image.
    
    Args:
        img_path (str): Base path to blobs.
        check_scaling (bool): True to check scaling, in which case
            the scaling factor and scaled resolutions will be returned.
            Defaults to False.
        scaled_shape (List): Shape of image to calculate scaling factor if
            this factor cannot be found from a transposed file's metadata;
            defaults to None.
        scale (int, float): Scalar scaling factor, used to find a
            rescaled file; defaults to None. To find a resized file instead,
            set an atlas profile with the resizing factor.

    Returns:
        :obj:`np.ndarray`, List, List: Array of blobs. If ``check_scaling``
        is True, also returns sequence of scaling factors to a scaled or
        resized image, or None if not loaded or given, and the resolutions
        of the full-sized image in which the blobs were detected.

    """
    # load blobs and display counts
    path = libmag.combine_paths(img_path, config.SUFFIX_BLOBS)
    print("Loading blobs from", path)
    with np.load(path) as archive:
        info = read_np_archive(archive)
        blobs = info["segments"]
        print("loaded {} blobs".format(len(blobs)))
        if config.verbose:
            detector.show_blobs_per_channel(blobs)
            print(info)
    if not check_scaling:
        return blobs

    # get scaling and resolutions from blob space to that of a down/upsampled
    # image space
    load_size = config.atlas_profile["target_size"]
    img_path_transposed = transformer.get_transposed_image_path(
        img_path, scale, load_size)
    scaling = None
    res = None
    if scale is not None or load_size is not None:
        # retrieve scaling from a rescaled/resized image
        _, img_info = importer.read_file(img_path_transposed,
                                         config.series,
                                         return_info=True)
        scaling = img_info["scaling"]
        res = np.multiply(config.resolutions[0], scaling)
        print("retrieved scaling from resized image:", scaling)
        print("rescaled resolution for full-scale image:", res)
    elif scaled_shape is not None:
        # fall back to scaling based on comparison to original image
        image5d = importer.read_file(img_path_transposed, config.series)
        scaling = importer.calc_scaling(image5d,
                                        None,
                                        scaled_shape=scaled_shape)
        res = config.resolutions[0]
        print("using scaling compared to full image:", scaling)
        print("resolution from full-scale image:", res)
    return blobs, scaling, res
Ejemplo n.º 14
0
def detect_blobs_large_image(filename_base, image5d, offset, size,
                             verify=False, save_dfs=True, full_roi=False):
    """Detect blobs within a large image through parallel processing of 
    smaller chunks.
    
    Args:
        filename_base: Base path to use file output.
        image5d: Large image to process as a Numpy array of t,z,y,x,[c]
        offset: Sub-image offset given as coordinates in z,y,x.
        size: Sub-image shape given in z,y,x.
        verify: True to verify detections against truth database; defaults 
            to False.
        save_dfs: True to save data frames to file; defaults to True.
        full_roi (bool): True to treat ``image5d`` as the full ROI; defaults
            to False.
    """
    time_start = time()
    if size is None or offset is None:
        # uses the entire stack if no size or offset specified
        size = image5d.shape[1:4]
        offset = (0, 0, 0)
    else:
        # change base filename for ROI-based partial stack
        filename_base = make_subimage_name(filename_base, offset, size)
    filename_subimg = libmag.combine_paths(filename_base, config.SUFFIX_SUBIMG)
    filename_blobs = libmag.combine_paths(filename_base, config.SUFFIX_BLOBS)
    
    # get ROI for given region, including all channels
    if full_roi:
        # treat the full image as the ROI
        roi = image5d[0]
    else:
        roi = plot_3d.prepare_subimg(image5d, size, offset)
    _, channels = plot_3d.setup_channels(roi, config.channel, 3)
    
    # prep chunking ROI into sub-ROIs with size based on segment_size, scaling
    # by physical units to make more independent of resolution
    time_detection_start = time()
    settings = config.roi_profile  # use default settings
    scaling_factor = detector.calc_scaling_factor()
    print("microsope scaling factor based on resolutions: {}"
          .format(scaling_factor))
    denoise_size = config.roi_profile["denoise_size"]
    denoise_max_shape = None
    if denoise_size:
        # further subdivide each sub-ROI for local preprocessing
        denoise_max_shape = np.ceil(
            np.multiply(scaling_factor, denoise_size)).astype(int)

    # overlap sub-ROIs to minimize edge effects
    overlap_base = chunking.calc_overlap()
    tol = np.multiply(overlap_base, settings["prune_tol_factor"]).astype(int)
    overlap_padding = np.copy(tol)
    overlap = np.copy(overlap_base)
    exclude_border = config.roi_profile["exclude_border"]
    if exclude_border is not None:
        # exclude border to avoid blob detector edge effects, where blobs
        # often collect at the faces of the sub-ROI;
        # ensure that overlap is greater than twice the border exclusion per
        # axis so that no plane will be excluded from both overlapping sub-ROIs
        exclude_border_thresh = np.multiply(2, exclude_border)
        overlap_less = np.less(overlap, exclude_border_thresh)
        overlap[overlap_less] = exclude_border_thresh[overlap_less]
        excluded = np.greater(exclude_border, 0)
        overlap[excluded] += 1  # additional padding
        overlap_padding[excluded] = 0  # no need to prune past excluded border
    print("sub-ROI overlap: {}, pruning tolerance: {}, padding beyond "
          "overlap for pruning: {}, exclude borders: {}"
          .format(overlap, tol, overlap_padding, exclude_border))
    max_pixels = np.ceil(np.multiply(
        scaling_factor, 
        config.roi_profile["segment_size"])).astype(int)
    print("preprocessing max shape: {}, detection max pixels: {}"
          .format(denoise_max_shape, max_pixels))
    sub_roi_slices, sub_rois_offsets = chunking.stack_splitter(
        roi.shape, max_pixels, overlap)
    # TODO: option to distribute groups of sub-ROIs to different servers 
    # for blob detection
    seg_rois = detect_blobs_sub_rois(
        roi, sub_roi_slices, sub_rois_offsets, denoise_max_shape, exclude_border)
    detection_time = time() - time_detection_start
    print("blob detection time (s):", detection_time)
    
    # prune blobs in overlapping portions of sub-ROIs
    time_pruning_start = time()
    segments_all, df_pruning = _prune_blobs_mp(
        roi, seg_rois, overlap, tol, sub_roi_slices, sub_rois_offsets, channels,
        overlap_padding)
    pruning_time = time() - time_pruning_start
    print("blob pruning time (s):", pruning_time)
    #print("maxes:", np.amax(segments_all, axis=0))
    
    # get weighted mean of ratios
    if df_pruning is not None:
        print("\nBlob pruning ratios:")
        path_pruning = "blob_ratios.csv" if save_dfs else None
        df_pruning_all = df_io.data_frames_to_csv(
            df_pruning, path_pruning, show=" ")
        cols = df_pruning_all.columns.tolist()
        blob_pruning_means = {}
        if "blobs" in cols:
            blobs_unpruned = df_pruning_all["blobs"]
            num_blobs_unpruned = np.sum(blobs_unpruned)
            for col in cols[1:]:
                blob_pruning_means["mean_{}".format(col)] = [
                    np.sum(np.multiply(df_pruning_all[col], blobs_unpruned)) 
                    / num_blobs_unpruned]
            path_pruning_means = "blob_ratios_means.csv" if save_dfs else None
            df_pruning_means = df_io.dict_to_data_frame(
                blob_pruning_means, path_pruning_means, show=" ")
        else:
            print("no blob ratios found")
    
    '''# report any remaining duplicates
    np.set_printoptions(linewidth=500, threshold=10000000)
    print("all blobs (len {}):".format(len(segments_all)))
    sort = np.lexsort(
        (segments_all[:, 2], segments_all[:, 1], segments_all[:, 0]))
    blobs = segments_all[sort]
    print(blobs)
    print("checking for duplicates in all:")
    print(detector.remove_duplicate_blobs(blobs, slice(0, 3)))
    '''
    
    stats_detection = None
    fdbk = None
    if segments_all is not None:
        # remove the duplicated elements that were used for pruning
        detector.replace_rel_with_abs_blob_coords(segments_all)
        segments_all = detector.remove_abs_blob_coords(segments_all)
        
        # compare detected blobs with truth blobs
        # TODO: assumes ground truth is relative to any ROI offset,
        # but should make customizable
        if verify:
            db_path_base = None
            exp_name = os.path.splitext(os.path.basename(config.filename))[0]
            try:
                if config.truth_db is None:
                    # find and load truth DB based on filename and subimage
                    db_path_base = os.path.basename(filename_base)
                    print("about to verify with truth db from {}"
                          .format(db_path_base))
                    sqlite.load_truth_db(db_path_base)
                if config.truth_db is not None:
                    # truth DB may contain multiple experiments for different
                    # subimages; series not included in exp name since in ROI
                    rois = config.truth_db.get_rois(exp_name)
                    if rois is None:
                        # exp may have been named by ROI
                        print("{} experiment name not found, will try with"
                              "ROI offset/size".format(exp_name))
                        exp_name = make_subimage_name(exp_name, offset, size)
                        rois = config.truth_db.get_rois(exp_name)
                    if rois is None:
                        raise LookupError(
                            "No truth set ROIs found for experiment {}, will "
                            "skip detection verification".format(exp_name))
                    print("load ROIs from exp: {}".format(exp_name))
                    exp_id = sqlite.insert_experiment(
                        config.verified_db.conn, config.verified_db.cur, 
                        exp_name, None)
                    verify_tol = np.multiply(
                        overlap_base, settings["verify_tol_factor"])
                    stats_detection, fdbk = detector.verify_rois(
                        rois, segments_all, config.truth_db.blobs_truth, 
                        verify_tol, config.verified_db, exp_id, config.channel)
            except FileNotFoundError:
                libmag.warn("Could not load truth DB from {}; "
                            "will not verify ROIs".format(db_path_base))
            except LookupError as e:
                libmag.warn(str(e))
    
    file_time_start = time()
    if config.save_subimg:
        if (isinstance(config.image5d, np.memmap) and 
                config.image5d.filename == os.path.abspath(filename_subimg)):
            # file at sub-image save path may have been opened as a memmap
            # file, in which case saving would fail
            libmag.warn("{} is currently open, cannot save sub-image"
                        .format(filename_subimg))
        else:
            # write sub-image, which is in ROI (3D) format
            with open(filename_subimg, "wb") as f:
                np.save(f, roi)

    # save blobs
    # TODO: only segments used; consider removing the rest except ver
    outfile_blobs = open(filename_blobs, "wb")
    np.savez(outfile_blobs, ver=BLOBS_NP_VER, segments=segments_all,
             resolutions=config.resolutions,
             basename=os.path.basename(config.filename),  # only save name
             offset=offset, roi_size=size)  # None unless explicitly set
    outfile_blobs.close()
    file_save_time = time() - file_time_start
    
    # whole image benchmarking time
    times = (
        [detection_time], 
        [pruning_time], 
        time() - time_start)
    times_dict = {}
    for key, val in zip(StackTimes, times):
        times_dict[key] = val
    if segments_all is None:
        print("\nNo blobs detected")
    else:
        print("\nTotal blobs found:", len(segments_all))
        detector.show_blobs_per_channel(segments_all)
    print("file save time:", file_save_time)
    print("\nTotal detection processing times (s):")
    path_times = "stack_detection_times.csv" if save_dfs else None
    df_io.dict_to_data_frame(times_dict, path_times, show=" ")
    
    return stats_detection, fdbk, segments_all
Ejemplo n.º 15
0
def process_file(path,
                 proc_mode,
                 series=None,
                 subimg_offset=None,
                 subimg_size=None,
                 roi_offset=None,
                 roi_size=None):
    """Processes a single image file non-interactively.

    Assumes that the image has already been set up.
    
    Args:
        path (str): Path to image from which MagellanMapper-style paths will 
            be generated.
        proc_mode (str): Processing mode, which should be a key in
            :class:`config.ProcessTypes`, case-insensitive.
        series (int): Image series number; defaults to None.
        subimg_offset (List[int]): Sub-image offset as (z,y,x) to load;
            defaults to None.
        subimg_size (List[int]): Sub-image size as (z,y,x) to load;
            defaults to None.
        roi_offset (List[int]): Region of interest offset as (x, y, z) to
            process; defaults to None.
        roi_size (List[int]): Region of interest size of region to process,
            given as (x, y, z); defaults to None.
    
    Returns:
        Tuple of stats from processing, or None if no stats, and 
        text feedback from the processing, or None if no feedback.
    """
    # PROCESS BY TYPE
    stats = None
    fdbk = None
    filename_base = importer.filename_to_base(path, series)
    proc_type = libmag.get_enum(proc_mode, config.ProcessTypes)
    if proc_type is config.ProcessTypes.LOAD:
        # loading completed
        return None, None

    elif proc_type is config.ProcessTypes.LOAD:
        # already imported so does nothing
        print("imported {}, will exit".format(path))

    elif proc_type is config.ProcessTypes.EXPORT_ROIS:
        # export ROIs; assumes that info_proc was already loaded to
        # give smaller region from which smaller ROIs from the truth DB
        # will be extracted
        from magmap.io import export_rois
        db = config.db if config.truth_db is None else config.truth_db
        export_rois.export_rois(db, config.image5d, config.channel,
                                filename_base,
                                config.plot_labels[config.PlotLabels.PADDING],
                                config.unit_factor, config.truth_db_mode,
                                os.path.basename(config.filename))

    elif proc_type is config.ProcessTypes.TRANSFORM:
        # transpose, rescale, and/or resize whole large image
        transformer.transpose_img(
            path,
            series,
            plane=config.plane,
            rescale=config.transform[config.Transforms.RESCALE],
            target_size=config.roi_size)

    elif proc_type in (config.ProcessTypes.EXTRACT,
                       config.ProcessTypes.ANIMATED):
        # generate animated GIF or extract single plane
        from magmap.io import export_stack
        export_stack.stack_to_img(config.filenames, roi_offset, roi_size,
                                  series, subimg_offset, subimg_size,
                                  proc_type is config.ProcessTypes.ANIMATED,
                                  config.suffix)

    elif proc_type is config.ProcessTypes.EXPORT_BLOBS:
        # export blobs to CSV file
        from magmap.io import export_rois
        export_rois.blobs_to_csv(config.blobs, filename_base)

    elif proc_type is config.ProcessTypes.DETECT:
        # detect blobs in the full image
        stats, fdbk, segments_all = stack_detect.detect_blobs_large_image(
            filename_base, config.image5d, subimg_offset, subimg_size,
            config.truth_db_mode is config.TruthDBModes.VERIFY,
            not config.grid_search_profile, config.image5d_is_roi)

    elif proc_type is config.ProcessTypes.EXPORT_PLANES:
        # export each plane as a separate image file
        from magmap.io import export_stack
        export_stack.export_planes(config.image5d, config.prefix,
                                   config.savefig, config.channel)

    elif proc_type is config.ProcessTypes.EXPORT_RAW:
        # export the main image as a raw data file
        out_path = libmag.combine_paths(config.filename, ".raw", sep="")
        libmag.backup_file(out_path)
        np_io.write_raw_file(config.image5d, out_path)

    elif proc_type is config.ProcessTypes.PREPROCESS:
        # pre-process a whole image and save to file
        # TODO: consider chunking option for larger images
        profile = config.get_roi_profile(0)
        out_path = config.prefix
        if not out_path:
            out_path = libmag.insert_before_ext(config.filename, "_preproc")
        transformer.preprocess_img(config.image5d, profile["preprocess"],
                                   config.channel, out_path)

    return stats, fdbk
Ejemplo n.º 16
0
def plot_clusters_by_label(path, z, suffix=None, show=True, scaling=None):
    """Plot separate sets of clusters for each label.
    
    Args:
        path (str): Base path to blobs file with clusters.
        z (int): z-plane to plot.
        suffix (str): Suffix for ``path``; defaults to None.
        show (bool): True to show; defaults to True.
        scaling (List): Sequence of scaling from blobs' coordinate space
             to that of :attr:`config.labels_img`.

    """
    mod_path = path
    if suffix is not None:
        mod_path = libmag.insert_before_ext(path, suffix)
    blobs = np.load(libmag.combine_paths(mod_path,
                                         config.SUFFIX_BLOB_CLUSTERS))
    label_ids = np.unique(blobs[:, 3])
    fig, gs = plot_support.setup_fig(
        1, 1, config.plot_labels[config.PlotLabels.SIZE])
    ax = fig.add_subplot(gs[0, 0])
    plot_support.hide_axes(ax)

    # plot underlying atlas
    np_io.setup_images(mod_path)
    if config.reg_suffixes[config.RegSuffixes.ATLAS]:
        # use atlas if explicitly set
        img = config.image5d
    else:
        # default to black background
        img = np.zeros_like(config.labels_img)[None]
    stacker = export_stack.setup_stack(img,
                                       mod_path,
                                       slice_vals=(z, z + 1),
                                       labels_imgs=(config.labels_img,
                                                    config.borders_img))
    stacker.build_stack(ax, config.plot_labels[config.PlotLabels.SCALE_BAR])
    # export_stack.reg_planes_to_img(
    #     (np.zeros(config.labels_img.shape[1:], dtype=int),
    #      config.labels_img[z]), ax=ax)

    if scaling is not None:
        print("scaling blobs cluster coordinates by", scaling)
        blobs = blobs.astype(float)
        blobs[:, :3] = np.multiply(blobs[:, :3], scaling)
        blobs[:, 0] = np.floor(blobs[:, 0])

    # plot nuclei by label, colored based on cluster size within each label
    colors = colormaps.discrete_colormap(len(np.unique(blobs[:, 4])),
                                         prioritize_default="cn") / 255.
    col_noise = (1, 1, 1, 1)
    for label_id in label_ids:
        if label_id == 0:
            # skip blobs in background
            continue
        # sort blobs within label by cluster size (descending order),
        # including clusters within all z-planes to keep same order across zs
        blobs_lbl = blobs[blobs[:, 3] == label_id]
        clus_lbls, clus_lbls_counts = np.unique(blobs_lbl[:, 4],
                                                return_counts=True)
        clus_lbls = clus_lbls[np.argsort(clus_lbls_counts)][::-1]
        blobs_lbl = blobs_lbl[blobs_lbl[:, 0] == z]
        for i, (clus_lbl, color) in enumerate(zip(clus_lbls, colors)):
            blobs_clus = blobs_lbl[blobs_lbl[:, 4] == clus_lbl]
            if len(blobs_clus) < 1: continue
            # default to small, translucent dominant cluster points
            size = 0.1
            alpha = 0.5
            if clus_lbl == -1:
                # color all noise points the same and emphasize points
                color = col_noise
                size = 0.5
                alpha = 1
            print(label_id, clus_lbl, color, len(blobs_clus))
            ax.scatter(blobs_clus[:, 2],
                       blobs_clus[:, 1],
                       color=color,
                       s=size,
                       alpha=alpha)
    plot_support.save_fig(mod_path, config.savefig, "_clusplot")
    if show: plot_support.show()
Ejemplo n.º 17
0
def edge_aware_segmentation(
        path_atlas: str, atlas_profile: atlas_prof.AtlasProfile,
        show: bool = True, atlas: bool = True, suffix: Optional[str] = None,
        exclude_labels: Optional[pd.DataFrame] = None, mirror_mult: int = -1):
    """Segment an atlas using its previously generated edge map.
    
    Labels may not match their own underlying atlas image well, 
    particularly in the orthogonal directions in which the labels 
    were not constructed. To improve alignment between the labels 
    and the atlas itself, register the labels to an automated, roughly 
    segmented version of the atlas. The goal is to improve the 
    labels' alignment so that the atlas/labels combination can be 
    used for another form of automated segmentation by registering 
    them to experimental brains via :func:``register``.
    
    Edge files are assumed to have been generated by 
    :func:``make_edge_images``.
    
    Args:
        path_atlas: Path to the fixed file, typically the atlas file 
            with stained sections. The corresponding edge and labels 
            files will be loaded based on this path.
        atlas_profile: Atlas profile.
        show: True if the output images should be displayed; defaults 
            to True.
        atlas: True if the primary image is an atlas, which is assumed 
            to be symmetrical. False if the image is an experimental/sample 
            image, in which case segmentation will be performed on the full 
            images, and stats will not be performed.
        suffix: Modifier to append to end of ``path_atlas`` basename for 
            registered image files that were output to a modified name; 
            defaults to None. If ``atlas`` is True, ``suffix`` will only 
            be applied to saved files, with files still loaded based on the 
            original path.
        exclude_labels: Sequence of labels to exclude from the
            segmentation; defaults to None.
        mirror_mult: Multiplier for mirrored labels; defaults to -1
            to make mirrored labels the inverse of their source labels.
    """
    # adjust image path with suffix
    load_path = path_atlas
    mod_path = path_atlas
    if suffix is not None:
        mod_path = libmag.insert_before_ext(mod_path, suffix)
        if atlas: load_path = mod_path
    
    # load corresponding files via SimpleITK
    atlas_sitk = sitk_io.load_registered_img(
        load_path, config.RegNames.IMG_ATLAS.value, get_sitk=True)
    atlas_sitk_edge = sitk_io.load_registered_img(
        load_path, config.RegNames.IMG_ATLAS_EDGE.value, get_sitk=True)
    labels_sitk = sitk_io.load_registered_img(
        load_path, config.RegNames.IMG_LABELS.value, get_sitk=True)
    labels_sitk_markers = sitk_io.load_registered_img(
        load_path, config.RegNames.IMG_LABELS_MARKERS.value, get_sitk=True)
    
    # get Numpy arrays of images
    atlas_img_np = sitk.GetArrayFromImage(atlas_sitk)
    atlas_edge = sitk.GetArrayFromImage(atlas_sitk_edge)
    labels_img_np = sitk.GetArrayFromImage(labels_sitk)
    markers = sitk.GetArrayFromImage(labels_sitk_markers)
    
    # segment image from markers
    sym_axis = atlas_refiner.find_symmetric_axis(atlas_img_np)
    mirrorred = atlas and sym_axis >= 0
    len_half = None
    seg_args = {"exclude_labels": exclude_labels}
    edge_prof = atlas_profile[profiles.RegKeys.EDGE_AWARE_REANNOTATION]
    if edge_prof:
        edge_filt = edge_prof[profiles.RegKeys.WATERSHED_MASK_FILTER]
        if edge_filt and len(edge_filt) > 1:
            # watershed mask filter settings from atlas profile
            seg_args["mask_filt"] = edge_filt[0]
            seg_args["mask_filt_size"] = edge_filt[1]
    if mirrorred:
        # segment only half of image, assuming symmetry
        len_half = atlas_img_np.shape[sym_axis] // 2
        slices = [slice(None)] * labels_img_np.ndim
        slices[sym_axis] = slice(len_half)
        sl = tuple(slices)
        labels_seg = segmenter.segment_from_labels(
            atlas_edge[sl], markers[sl], labels_img_np[sl], **seg_args)
    else:
        # segment the full image, including excluded labels on the opposite side
        exclude_labels = exclude_labels.tolist().extend(
            (mirror_mult * exclude_labels).tolist())
        seg_args["exclude_labels"] = exclude_labels
        labels_seg = segmenter.segment_from_labels(
            atlas_edge, markers, labels_img_np, **seg_args)
    
    smoothing = atlas_profile["smooth"]
    smoothing_mode = atlas_profile["smoothing_mode"]
    cond = ["edge-aware_seg"]
    if smoothing is not None:
        # smoothing by opening operation based on profile setting
        meas_smoothing = atlas_profile["meas_smoothing"]
        cond.append("smoothing")
        df_aggr, df_raw = atlas_refiner.smooth_labels(
            labels_seg, smoothing, smoothing_mode,
            meas_smoothing, labels_sitk.GetSpacing()[::-1])
        df_base_path = os.path.splitext(mod_path)[0]
        if df_raw is not None:
            # write raw smoothing metrics
            df_io.data_frames_to_csv(
                df_raw, f"{df_base_path}_{config.PATH_SMOOTHING_RAW_METRICS}")
        if df_aggr is not None:
            # write aggregated smoothing metrics
            df_io.data_frames_to_csv(
                df_aggr, f"{df_base_path}_{config.PATH_SMOOTHING_METRICS}")
    
    if mirrorred:
        # mirror back to other half
        labels_seg = _mirror_imported_labels(
            labels_seg, len_half, mirror_mult, sym_axis)
    
    # expand background to smoothed background of original labels to 
    # roughly match background while still allowing holes to be filled
    crop = atlas_profile["crop_to_orig"]
    atlas_refiner.crop_to_orig(
        labels_img_np, labels_seg, crop)
    
    if labels_seg.dtype != labels_img_np.dtype:
        # watershed may give different output type, so cast back if so
        labels_seg = labels_seg.astype(labels_img_np.dtype)
    labels_sitk_seg = sitk_io.replace_sitk_with_numpy(labels_sitk, labels_seg)
    
    # show DSCs for labels
    _logger.info(
        "\nMeasuring overlap of individual original and watershed labels:")
    dsc_lbls_comb = atlas_refiner.measure_overlap_labels(
        labels_sitk, labels_sitk_seg)
    _logger.info(
        "\nMeasuring overlap of combined original and watershed labels:")
    dsc_lbls_indiv = atlas_refiner.measure_overlap_labels(
        atlas_refiner.make_labels_fg(labels_sitk), 
        atlas_refiner.make_labels_fg(labels_sitk_seg))
    _logger.info("")
    
    # measure and save whole atlas metrics
    metrics = {
        config.AtlasMetrics.SAMPLE: [os.path.basename(mod_path)],
        config.AtlasMetrics.REGION: config.REGION_ALL,
        config.AtlasMetrics.CONDITION: "|".join(cond),
        config.AtlasMetrics.DSC_LABELS_ORIG_NEW_COMBINED: dsc_lbls_comb,
        config.AtlasMetrics.DSC_LABELS_ORIG_NEW_INDIV: dsc_lbls_indiv,
    }
    df_metrics_path = libmag.combine_paths(
        mod_path, config.PATH_ATLAS_IMPORT_METRICS)
    atlas_refiner.measure_atlas_refinement(
        metrics, atlas_sitk, labels_sitk_seg, atlas_profile, df_metrics_path)

    # show and write image to same directory as atlas with appropriate suffix
    sitk_io.write_reg_images(
        {config.RegNames.IMG_LABELS.value: labels_sitk_seg}, mod_path)
    if show: sitk.Show(labels_sitk_seg)
    return path_atlas
Ejemplo n.º 18
0
def setup_images(path: str,
                 series: Optional[int] = None,
                 offset: Optional[Sequence[int]] = None,
                 size: Optional[Sequence[int]] = None,
                 proc_type: Optional["config.ProcessTypes"] = None,
                 allow_import: bool = True,
                 fallback_main_img: bool = True):
    """Sets up an image and all associated images and metadata.

    Paths for related files such as registered images will generally be
    constructed from ``path``. If :attr:`config.prefix` is set, it will
    be used in place of ``path`` for registered labels.
    
    Args:
        path: Path to image from which MagellanMapper-style paths will 
            be generated.
        series: Image series number; defaults to None.
        offset: Sub-image offset given in z,y,x; defaults to None.
        size: Sub-image shape given in z,y,x; defaults to None.
        proc_type: Processing type.
        allow_import: True to allow importing the image if it
            cannot be loaded; defaults to True.
        fallback_main_img: True to fall back to loading a registered image
            if possible if the main image could not be loaded; defaults to True.
    
    """
    def add_metadata():
        # override metadata set from command-line metadata args if available
        md = {
            config.MetaKeys.RESOLUTIONS:
            config.meta_dict[config.MetaKeys.RESOLUTIONS],
            config.MetaKeys.MAGNIFICATION:
            config.meta_dict[config.MetaKeys.MAGNIFICATION],
            config.MetaKeys.ZOOM:
            config.meta_dict[config.MetaKeys.ZOOM],
            config.MetaKeys.SHAPE:
            config.meta_dict[config.MetaKeys.SHAPE],
            config.MetaKeys.DTYPE:
            config.meta_dict[config.MetaKeys.DTYPE],
        }
        for key, val in md.items():
            if val is not None:
                # explicitly set metadata takes precedence over extracted vals
                import_md[key] = val

        res = import_md[config.MetaKeys.RESOLUTIONS]
        if res is None:
            # default to 1 for x,y,z since image resolutions are required
            res = [1] * 3
            import_md[config.MetaKeys.RESOLUTIONS] = res
            _logger.warn("No image resolutions found. Defaulting to: %s", res)

    # LOAD MAIN IMAGE

    # reset image5d
    config.image5d = None
    config.image5d_is_roi = False
    config.img5d = Image5d()
    load_subimage = offset is not None and size is not None
    config.resolutions = None

    # reset label images
    config.labels_img = None
    config.labels_img_sitk = None
    config.labels_img_orig = None
    config.borders_img = None
    config.labels_meta = None
    config.labels_ref = None

    # reset blobs
    config.blobs = None

    filename_base = importer.filename_to_base(path, series)
    subimg_base = None
    blobs = None

    # registered images set to load
    atlas_suffix = config.reg_suffixes[config.RegSuffixes.ATLAS]
    annotation_suffix = config.reg_suffixes[config.RegSuffixes.ANNOTATION]
    borders_suffix = config.reg_suffixes[config.RegSuffixes.BORDERS]

    if load_subimage and not config.save_subimg:
        # load a saved sub-image file if available and not set to save one
        subimg_base = naming.make_subimage_name(filename_base, offset, size)
        filename_subimg = libmag.combine_paths(subimg_base,
                                               config.SUFFIX_SUBIMG)

        try:
            # load sub-image if available
            config.image5d = np.load(filename_subimg, mmap_mode="r")
            config.image5d = importer.roi_to_image5d(config.image5d)
            config.image5d_is_roi = True
            config.img5d.img = config.image5d
            config.img5d.path_img = filename_subimg
            config.img5d.img_io = config.LoadIO.NP
            config.img5d.subimg_offset = offset
            config.img5d.subimg_size = size
            print("Loaded sub-image from {} with shape {}".format(
                filename_subimg, config.image5d.shape))

            # after loading sub-image, load original image's metadata
            # for essential data such as vmin/vmax; will only warn if
            # fails to load since metadata could be specified elsewhere
            _, orig_info = importer.make_filenames(path, series)
            print("load original image metadata from:", orig_info)
            importer.load_metadata(orig_info)
        except IOError:
            print("Ignored sub-image file from {} as unable to load".format(
                filename_subimg))

    if config.load_data[config.LoadData.BLOBS] or proc_type in (
            config.ProcessTypes.LOAD, config.ProcessTypes.COLOC_MATCH,
            config.ProcessTypes.EXPORT_ROIS, config.ProcessTypes.EXPORT_BLOBS):
        # load a blobs archive
        blobs = detector.Blobs()
        try:
            if subimg_base:
                try:
                    # load blobs generated from sub-image
                    config.blobs = blobs.load_blobs(
                        img_to_blobs_path(subimg_base))
                except (FileNotFoundError, KeyError):
                    # fallback to loading from full image blobs and getting
                    # a subset, shifting them relative to sub-image offset
                    print("Unable to load blobs file based on {}, will try "
                          "from {}".format(subimg_base, filename_base))
                    config.blobs = blobs.load_blobs(
                        img_to_blobs_path(filename_base))
                    blobs.blobs, _ = detector.get_blobs_in_roi(blobs.blobs,
                                                               offset,
                                                               size,
                                                               reverse=False)
                    detector.Blobs.shift_blob_rel_coords(
                        blobs.blobs, np.multiply(offset, -1))
            else:
                # load full image blobs
                config.blobs = blobs.load_blobs(
                    img_to_blobs_path(filename_base))
        except (FileNotFoundError, KeyError) as e2:
            print("Unable to load blobs file")
            if proc_type in (config.ProcessTypes.LOAD,
                             config.ProcessTypes.EXPORT_BLOBS):
                # blobs expected but not found
                raise e2

    if path and config.image5d is None and not atlas_suffix:
        # load or import the main image stack
        print("Loading main image")
        try:
            path_lower = path.lower()
            import_only = proc_type is config.ProcessTypes.IMPORT_ONLY
            if path_lower.endswith(sitk_io.EXTS_3D):
                # load format supported by SimpleITK and prepend time axis;
                # if 2D, convert to 3D
                img5d = sitk_io.read_sitk_files(path, make_3d=True)
            elif not import_only and path_lower.endswith((".tif", ".tiff")):
                # load TIF file directly
                img5d, meta = read_tif(path)
                config.resolutions = meta[config.MetaKeys.RESOLUTIONS]
            else:
                # load or import from MagellanMapper Numpy format
                img5d = None
                if not import_only:
                    # load previously imported image
                    img5d = importer.read_file(path, series)
                if allow_import and (img5d is None or img5d.img is None):
                    # import image; will re-import over any existing image file
                    if os.path.isdir(path) and all(
                        [r is None for r in config.reg_suffixes.values()]):
                        # import directory of single plane images to single
                        # stack if no register suffixes are set
                        chls, import_md = importer.setup_import_dir(path)
                        add_metadata()
                        prefix = config.prefix
                        if not prefix:
                            prefix = os.path.join(
                                os.path.dirname(path),
                                importer.DEFAULT_IMG_STACK_NAME)
                        img5d = importer.import_planes_to_stack(
                            chls, prefix, import_md)
                    elif import_only:
                        # import multi-plane image
                        chls, import_path = importer.setup_import_multipage(
                            path)
                        prefix = config.prefix if config.prefix else import_path
                        import_md = importer.setup_import_metadata(
                            chls, config.channel, series)
                        add_metadata()
                        img5d = importer.import_multiplane_images(
                            chls,
                            prefix,
                            import_md,
                            series,
                            channel=config.channel)
            if img5d is not None:
                # set loaded main image in config
                config.img5d = img5d
                config.image5d = config.img5d.img
        except FileNotFoundError as e:
            _logger.exception(e)
            _logger.info("Could not load %s", path)

    if config.metadatas and config.metadatas[0]:
        # assign metadata from alternate file if given to supersede settings
        # for any loaded image5d
        # TODO: access metadata directly from given image5d's dict to allow
        # loading multiple image5d images simultaneously
        importer.assign_metadata(config.metadatas[0])

    # main image is currently required since many parameters depend on it
    if fallback_main_img and atlas_suffix is None and config.image5d is None:
        # fallback to atlas if main image not already loaded
        atlas_suffix = config.RegNames.IMG_ATLAS.value
        _logger.info(
            "Main image is not set, falling back to registered image with "
            "suffix %s", atlas_suffix)
    # use prefix to get images registered to a different image, eg a
    # downsampled version, or a different version of registered images
    path = config.prefix if config.prefix else path
    if path and atlas_suffix is not None:
        try:
            # will take the place of any previously loaded image5d
            config.img5d = sitk_io.read_sitk_files(path,
                                                   atlas_suffix,
                                                   make_3d=True)
            config.image5d = config.img5d.img
        except FileNotFoundError as e:
            print(e)

    # load metadata related to the labels image
    config.labels_metadata = labels_meta.LabelsMeta(
        f"{path}." if config.prefix else path).load()

    # load labels reference file, prioritizing path given by user
    # and falling back to any extension matching PATH_LABELS_REF
    path_labels_refs = [config.load_labels]
    labels_path_ref = config.labels_metadata.path_ref
    if labels_path_ref:
        path_labels_refs.append(labels_path_ref)
    labels_ref = None
    for ref in path_labels_refs:
        if not ref: continue
        try:
            # load labels reference file
            labels_ref = ontology.LabelsRef(ref).load()
            if labels_ref.ref_lookup is not None:
                config.labels_ref = labels_ref
                _logger.debug("Loaded labels reference file from %s", ref)
                break
        except (FileNotFoundError, KeyError):
            pass
    if path_labels_refs and (labels_ref is None
                             or labels_ref.ref_lookup is None):
        # warn if labels path given but none found
        _logger.warn(
            "Unable to load labels reference file from '%s', skipping",
            path_labels_refs)

    if annotation_suffix is not None:
        try:
            # load labels image
            # TODO: need to support multichannel labels images
            img5d, config.labels_img_sitk = sitk_io.read_sitk_files(
                path, annotation_suffix, True, True)
            config.labels_img = img5d.img[0]
        except FileNotFoundError as e:
            print(e)
            if config.image5d is not None:
                # create a blank labels images for custom annotation; colormap
                # can be generated for the original labels loaded below
                config.labels_img = np.zeros(config.image5d.shape[1:4],
                                             dtype=int)
                print("Created blank labels image from main image")
        if config.image5d is not None and config.labels_img is not None:
            # set up scaling factors by dimension between intensity and
            # labels images
            config.labels_scaling = importer.calc_scaling(
                config.image5d, config.labels_img)

    if borders_suffix is not None:
        # load borders image, which can also be another labels image
        try:
            config.borders_img = sitk_io.read_sitk_files(path,
                                                         borders_suffix,
                                                         make_3d=True).img[0]
        except FileNotFoundError as e:
            print(e)

    if config.atlas_labels[config.AtlasLabels.ORIG_COLORS]:
        labels_orig_ids = config.labels_metadata.region_ids_orig
        if labels_orig_ids is None:
            if config.load_labels is not None:
                # load original labels image from same directory as ontology
                # file for consistent ID-color mapping, even if labels are missing
                try:
                    config.labels_img_orig = sitk_io.load_registered_img(
                        config.load_labels, config.RegNames.IMG_LABELS.value)
                except FileNotFoundError as e:
                    print(e)
            if config.labels_img is not None and config.labels_img_orig is None:
                _logger.warn(
                    "Could not load original labels image IDs; colors may "
                    "differ from the original image")

    load_rot90 = config.roi_profile["load_rot90"]
    if load_rot90 and config.image5d is not None:
        # rotate main image specified num of times x90deg after loading since
        # need to rotate images output by deep learning toolkit
        config.image5d = np.rot90(config.image5d, load_rot90, (2, 3))

    if (config.image5d is not None and load_subimage
            and not config.image5d_is_roi):
        # crop full image to bounds of sub-image
        config.image5d = plot_3d.prepare_subimg(config.image5d, offset,
                                                size)[None]
        config.image5d_is_roi = True

    # add any additional image5d thresholds for multichannel images, such
    # as those loaded without metadata for these settings
    colormaps.setup_cmaps()
    num_channels = get_num_channels(config.image5d)
    config.near_max = libmag.pad_seq(config.near_max, num_channels, -1)
    config.near_min = libmag.pad_seq(config.near_min, num_channels, 0)
    config.vmax_overview = libmag.pad_seq(config.vmax_overview, num_channels)
    colormaps.setup_colormaps(num_channels)

    if config.labels_img is not None:
        # make discrete colormap for labels image
        config.cmap_labels = colormaps.setup_labels_cmap(config.labels_img)

    if (blobs is not None and blobs.blobs is not None
            and config.img5d.img is not None and blobs.roi_size is not None):
        # scale blob coordinates to main image if shapes differ
        scaling = np.divide(config.img5d.img.shape[1:4], blobs.roi_size)
        # scale radius by mean of other dimensions' scaling
        scaling = np.append(scaling, np.mean(scaling))
        if not np.all(scaling == 1):
            _logger.debug("Scaling blobs to main image by factor: %s", scaling)
            blobs.blobs[:, :4] = ontology.scale_coords(blobs.blobs[:, :4],
                                                       scaling)
        blobs.scaling = scaling
Ejemplo n.º 19
0
def detect_blobs_blocks(filename_base, image5d, offset, size, channels,
                        verify=False, save_dfs=True, full_roi=False,
                        coloc=False):
    """Detect blobs by block processing of a large image.
    
    All channels are processed in the same blocks.
    
    Args:
        filename_base: Base path to use file output.
        image5d: Large image to process as a Numpy array of t,z,y,x,[c]
        offset: Sub-image offset given as coordinates in z,y,x.
        size: Sub-image shape given in z,y,x.
        channels (Sequence[int]): Sequence of channels, where None detects
            in all channels.
        verify: True to verify detections against truth database; defaults 
            to False.
        save_dfs: True to save data frames to file; defaults to True.
        full_roi (bool): True to treat ``image5d`` as the full ROI; defaults
            to False.
        coloc (bool): True to perform blob co-localizations; defaults to False.
    
    Returns:
        tuple[int, int, int], str, :class:`magmap.cv.detector.Blobs`:
        Accuracy metrics from :class:`magmap.cv.detector.verify_rois`,
        feedback message from this same function, and detected blobs.
    
    """
    time_start = time()
    subimg_path_base = filename_base
    if size is None or offset is None:
        # uses the entire stack if no size or offset specified
        size = image5d.shape[1:4]
        offset = (0, 0, 0)
    else:
        # get base path for sub-image
        subimg_path_base = naming.make_subimage_name(
            filename_base, offset, size)
    filename_blobs = libmag.combine_paths(subimg_path_base, config.SUFFIX_BLOBS)
    
    # get ROI for given region, including all channels
    if full_roi:
        # treat the full image as the ROI
        roi = image5d[0]
    else:
        roi = plot_3d.prepare_subimg(image5d, offset, size)
    num_chls_roi = 1 if len(roi.shape) < 4 else roi.shape[3]
    if num_chls_roi < 2:
        coloc = False
        print("Unable to co-localize as image has only 1 channel")
    
    # prep chunking ROI into sub-ROIs with size based on segment_size, scaling
    # by physical units to make more independent of resolution; use profile
    # from first channel to be processed for block settings
    time_detection_start = time()
    settings = config.get_roi_profile(channels[0])
    print("Profile for block settings:", settings[settings.NAME_KEY])
    sub_roi_slices, sub_rois_offsets, denoise_max_shape, exclude_border, \
        tol, overlap_base, overlap, overlap_padding = setup_blocks(
            settings, roi.shape)
    
    # TODO: option to distribute groups of sub-ROIs to different servers 
    # for blob detection
    seg_rois = StackDetector.detect_blobs_sub_rois(
        roi, sub_roi_slices, sub_rois_offsets, denoise_max_shape,
        exclude_border, coloc, channels)
    detection_time = time() - time_detection_start
    print("blob detection time (s):", detection_time)
    
    # prune blobs in overlapping portions of sub-ROIs
    time_pruning_start = time()
    segments_all, df_pruning = StackPruner.prune_blobs_mp(
        roi, seg_rois, overlap, tol, sub_roi_slices, sub_rois_offsets, channels,
        overlap_padding)
    pruning_time = time() - time_pruning_start
    print("blob pruning time (s):", pruning_time)
    #print("maxes:", np.amax(segments_all, axis=0))
    
    # get weighted mean of ratios
    if df_pruning is not None:
        print("\nBlob pruning ratios:")
        path_pruning = "blob_ratios.csv" if save_dfs else None
        df_pruning_all = df_io.data_frames_to_csv(
            df_pruning, path_pruning, show=" ")
        cols = df_pruning_all.columns.tolist()
        blob_pruning_means = {}
        if "blobs" in cols:
            blobs_unpruned = df_pruning_all["blobs"]
            num_blobs_unpruned = np.sum(blobs_unpruned)
            for col in cols[1:]:
                blob_pruning_means["mean_{}".format(col)] = [
                    np.sum(np.multiply(df_pruning_all[col], blobs_unpruned)) 
                    / num_blobs_unpruned]
            path_pruning_means = "blob_ratios_means.csv" if save_dfs else None
            df_pruning_means = df_io.dict_to_data_frame(
                blob_pruning_means, path_pruning_means, show=" ")
        else:
            print("no blob ratios found")
    
    '''# report any remaining duplicates
    np.set_printoptions(linewidth=500, threshold=10000000)
    print("all blobs (len {}):".format(len(segments_all)))
    sort = np.lexsort(
        (segments_all[:, 2], segments_all[:, 1], segments_all[:, 0]))
    blobs = segments_all[sort]
    print(blobs)
    print("checking for duplicates in all:")
    print(detector.remove_duplicate_blobs(blobs, slice(0, 3)))
    '''
    
    stats_detection = None
    fdbk = None
    colocs = None
    if segments_all is not None:
        # remove the duplicated elements that were used for pruning
        detector.replace_rel_with_abs_blob_coords(segments_all)
        if coloc:
            colocs = segments_all[:, 10:10+num_chls_roi].astype(np.uint8)
        # remove absolute coordinate and any co-localization columns
        segments_all = detector.remove_abs_blob_coords(segments_all)
        
        # compare detected blobs with truth blobs
        # TODO: assumes ground truth is relative to any ROI offset,
        # but should make customizable
        if verify:
            stats_detection, fdbk = verifier.verify_stack(
                filename_base, subimg_path_base, settings, segments_all,
                channels, overlap_base)
    
    if config.save_subimg:
        subimg_base_path = libmag.combine_paths(
            subimg_path_base, config.SUFFIX_SUBIMG)
        if (isinstance(config.image5d, np.memmap) and 
                config.image5d.filename == os.path.abspath(subimg_base_path)):
            # file at sub-image save path may have been opened as a memmap
            # file, in which case saving would fail
            libmag.warn("{} is currently open, cannot save sub-image"
                        .format(subimg_base_path))
        else:
            # write sub-image, which is in ROI (3D) format
            with open(subimg_base_path, "wb") as f:
                np.save(f, roi)

    # store blobs in Blobs instance
    # TODO: consider separating into blobs and blobs metadata archives
    blobs = detector.Blobs(
        segments_all, colocalizations=colocs, path=filename_blobs)
    blobs.resolutions = config.resolutions
    blobs.basename = os.path.basename(config.filename)
    blobs.roi_offset = offset
    blobs.roi_size = size
    
    # whole image benchmarking time
    times = (
        [detection_time], 
        [pruning_time], 
        time() - time_start)
    times_dict = {}
    for key, val in zip(StackTimes, times):
        times_dict[key] = val
    if segments_all is None:
        print("\nNo blobs detected")
    else:
        print("\nTotal blobs found:", len(segments_all))
        detector.show_blobs_per_channel(segments_all)
    print("\nTotal detection processing times (s):")
    path_times = "stack_detection_times.csv" if save_dfs else None
    df_io.dict_to_data_frame(times_dict, path_times, show=" ")
    
    return stats_detection, fdbk, blobs
Ejemplo n.º 20
0
def process_file(
    path: str,
    proc_type: Enum,
    proc_val: Optional[Any] = None,
    series: Optional[int] = None,
    subimg_offset: Optional[List[int]] = None,
    subimg_size: Optional[List[int]] = None,
    roi_offset: Optional[List[int]] = None,
    roi_size: Optional[List[int]] = None
) -> Tuple[Optional[Any], Optional[str]]:
    """Processes a single image file non-interactively.

    Assumes that the image has already been set up.
    
    Args:
        path: Path to image from which MagellanMapper-style paths will 
            be generated.
        proc_type: Processing type, which should be a one of
            :class:`config.ProcessTypes`.
        proc_val: Processing value associated with ``proc_type``; defaults to
            None.
        series: Image series number; defaults to None.
        subimg_offset: Sub-image offset as (z,y,x) to load; defaults to None.
        subimg_size: Sub-image size as (z,y,x) to load; defaults to None.
        roi_offset: Region of interest offset as (x, y, z) to process;
            defaults to None.
        roi_size: Region of interest size of region to process, given as
            ``(x, y, z)``; defaults to None.
    
    Returns:
        Tuple of stats from processing, or None if no stats, and 
        text feedback from the processing, or None if no feedback.
    """
    # PROCESS BY TYPE
    stats = None
    fdbk = None
    filename_base = importer.filename_to_base(path, series)

    print("{}\n".format("-" * 80))
    if proc_type is config.ProcessTypes.LOAD:
        # loading completed
        return None, None

    elif proc_type is config.ProcessTypes.LOAD:
        # already imported so does nothing
        print("imported {}, will exit".format(path))

    elif proc_type is config.ProcessTypes.EXPORT_ROIS:
        # export ROIs; assumes that info_proc was already loaded to
        # give smaller region from which smaller ROIs from the truth DB
        # will be extracted
        from magmap.io import export_rois
        db = config.db if config.truth_db is None else config.truth_db
        export_path = naming.make_subimage_name(filename_base, subimg_offset,
                                                subimg_size)
        export_rois.export_rois(db, config.image5d, config.channel,
                                export_path,
                                config.plot_labels[config.PlotLabels.PADDING],
                                config.unit_factor, config.truth_db_mode,
                                os.path.basename(export_path))

    elif proc_type is config.ProcessTypes.TRANSFORM:
        # transpose, rescale, and/or resize whole large image
        transformer.transpose_img(
            path,
            series,
            plane=config.plane,
            rescale=config.transform[config.Transforms.RESCALE],
            target_size=config.roi_size)

    elif proc_type in (config.ProcessTypes.EXTRACT,
                       config.ProcessTypes.ANIMATED):
        # generate animated GIF or extract single plane
        export_stack.stack_to_img(config.filenames, roi_offset, roi_size,
                                  series, subimg_offset, subimg_size,
                                  proc_type is config.ProcessTypes.ANIMATED,
                                  config.suffix)

    elif proc_type is config.ProcessTypes.EXPORT_BLOBS:
        # export blobs to CSV file
        from magmap.io import export_rois
        export_rois.blobs_to_csv(config.blobs.blobs, filename_base)

    elif proc_type in (config.ProcessTypes.DETECT,
                       config.ProcessTypes.DETECT_COLOC):
        # detect blobs in the full image, +/- co-localization
        coloc = proc_type is config.ProcessTypes.DETECT_COLOC
        stats, fdbk, _ = stack_detect.detect_blobs_stack(
            filename_base, subimg_offset, subimg_size, coloc)

    elif proc_type is config.ProcessTypes.COLOC_MATCH:
        if config.blobs is not None and config.blobs.blobs is not None:
            # colocalize blobs in separate channels by matching blobs
            shape = subimg_size
            if shape is None:
                # get shape from loaded image, falling back to its metadata
                if config.image5d is not None:
                    shape = config.image5d.shape[1:]
                else:
                    shape = config.img5d.meta[config.MetaKeys.SHAPE][1:]
            matches = colocalizer.StackColocalizer.colocalize_stack(
                shape, config.blobs.blobs)
            # insert matches into database
            colocalizer.insert_matches(config.db, matches)
        else:
            print("No blobs loaded to colocalize, skipping")

    elif proc_type in (config.ProcessTypes.EXPORT_PLANES,
                       config.ProcessTypes.EXPORT_PLANES_CHANNELS):
        # export each plane as a separate image file
        export_stack.export_planes(
            config.image5d, config.savefig, config.channel,
            proc_type is config.ProcessTypes.EXPORT_PLANES_CHANNELS)

    elif proc_type is config.ProcessTypes.EXPORT_RAW:
        # export the main image as a raw data file
        out_path = libmag.combine_paths(config.filename, ".raw", sep="")
        libmag.backup_file(out_path)
        np_io.write_raw_file(config.image5d, out_path)

    elif proc_type is config.ProcessTypes.EXPORT_TIF:
        # export the main image as a TIF files for each channel
        np_io.write_tif(config.image5d, config.filename)

    elif proc_type is config.ProcessTypes.PREPROCESS:
        # pre-process a whole image and save to file
        # TODO: consider chunking option for larger images
        out_path = config.prefix
        if not out_path:
            out_path = libmag.insert_before_ext(config.filename, "_preproc")
        transformer.preprocess_img(config.image5d, proc_val, config.channel,
                                   out_path)

    return stats, fdbk