예제 #1
0
    def build_stack(self,
                    axs: List,
                    scale_bar: bool = True,
                    fit: bool = False) -> Optional[List]:
        """Builds a stack of Matploblit 2D images.
        
        Uses multiprocessing to load or resize each image.
        
        Args:
            axs: Sub-plot axes.
            scale_bar: True to include scale bar; defaults to True.
            fit: True to fit the figure frame to the resulting image.
        
        Returns:
            :List[List[:obj:`matplotlib.image.AxesImage`]]: Nested list of 
            axes image objects. The first list level contains planes, and
            the second level are channels within each plane.
        
        """
        def handle_extracted_plane():
            # get sub-plot and hide x/y axes
            ax = axs
            if libmag.is_seq(ax):
                ax = axs[imgi]
            plot_support.hide_axes(ax)

            # multiple artists can be shown at each frame by collecting
            # each group of artists in a list; overlay_images returns
            # a nested list containing a list for each image, which in turn
            # contains a list of artists for each channel
            ax_imgs = plot_support.overlay_images(ax,
                                                  self.aspect,
                                                  self.origin,
                                                  imgs,
                                                  None,
                                                  cmaps_all,
                                                  ignore_invis=True,
                                                  check_single=True)
            if (colorbar is not None and len(ax_imgs) > 0
                    and len(ax_imgs[0]) > 0 and imgi == 0):
                # add colorbar with scientific notation if outside limits
                cbar = ax.figure.colorbar(ax_imgs[0][0], ax=ax, **colorbar)
                plot_support.set_scinot(cbar.ax, lbls=None, units=None)
            plotted_imgs[imgi] = np.array(ax_imgs).flatten()

            if libmag.is_seq(text_pos) and len(text_pos) > 1:
                # write plane index in axes rather than data coordinates
                text = ax.text(*text_pos[:2],
                               "{}-plane: {}".format(
                                   plot_support.get_plane_axis(config.plane),
                                   self.start_planei + imgi),
                               transform=ax.transAxes,
                               color="w")
                plotted_imgs[imgi] = [*plotted_imgs[imgi], text]

            if scale_bar:
                plot_support.add_scale_bar(ax, 1 / self.rescale, config.plane)

        # number of image types (eg atlas, labels) and corresponding planes
        num_image_types = len(self.images)
        if num_image_types < 1: return None
        num_images = len(self.images[0])
        if num_images < 1: return None

        # import the images as Matplotlib artists via multiprocessing
        plotted_imgs: List = [None] * num_images
        img_shape = self.images[0][0].shape
        target_size = np.multiply(img_shape, self.rescale).astype(int)
        multichannel = self.images[0][0].ndim >= 3
        if multichannel:
            print("building stack for channel: {}".format(config.channel))
            target_size = target_size[:-1]

        # setup imshow parameters
        colorbar = config.roi_profile["colorbar"]
        cmaps_all = [config.cmaps, *self.cmaps_labels]
        text_pos = config.plot_labels[config.PlotLabels.TEXT_POS]

        StackPlaneIO.set_data(self.images)
        pool_results = None
        pool = None
        multiprocess = self.rescale != 1
        if multiprocess:
            # set up multiprocessing
            initializer = None
            initargs = None
            if not chunking.is_fork():
                # set up labels image as a shared array for spawned mode
                initializer, initargs = StackPlaneIO.build_pool_init(
                    OrderedDict([(i, img)
                                 for i, img in enumerate(self.images)]))

            pool = chunking.get_mp_pool(initializer, initargs)
            pool_results = []

        for i in range(num_images):
            # add rotation argument if necessary
            args = (i, target_size)
            if pool is None:
                # extract and handle without multiprocessing
                imgi, imgs = self.fn_process(*args)
                handle_extracted_plane()
            else:
                # extract plane in multiprocessing
                pool_results.append(
                    pool.apply_async(self.fn_process, args=args))

        if multiprocess:
            # handle multiprocessing output
            for result in pool_results:
                imgi, imgs = result.get()
                handle_extracted_plane()
            pool.close()
            pool.join()

        if fit and plotted_imgs:
            # fit each figure to its first available image
            for ax_img in plotted_imgs:
                # images may be flattened AxesImage, array of AxesImage and
                # Text, or None if alpha set to 0
                if libmag.is_seq(ax_img):
                    ax_img = ax_img[0]
                if isinstance(ax_img, AxesImage):
                    plot_support.fit_frame_to_image(ax_img.figure,
                                                    ax_img.get_array().shape,
                                                    self.aspect)

        return plotted_imgs
예제 #2
0
    def colocalize_stack(cls, shape, blobs):
        """Entry point to colocalizing blobs within a stack.

        Args:
            shape (List[int]): Image shape in z,y,x.
            blobs (:obj:`np.ndarray`): 2D Numpy array of blobs.

        Returns:
            dict[tuple[int, int], :class:`BlobMatch`]: The
            dictionary of matches, where keys are tuples of the channel pairs,
            and values are blob match objects. 

        """
        print("Colocalizing blobs based on matching blobs in each pair of "
              "channels")
        # set up ROI blocks from which to select blobs in each block
        sub_roi_slices, sub_rois_offsets, _, _, _, overlap_base, _, _ \
            = stack_detect.setup_blocks(config.roi_profile, shape)
        match_tol = np.multiply(
            overlap_base, config.roi_profile["verify_tol_factor"])
        
        is_fork = chunking.is_fork()
        if is_fork:
            # set shared data in forked multiprocessing
            cls.blobs = blobs
            cls.match_tol = match_tol
        pool = mp.Pool(processes=config.cpus)
        pool_results = []
        for z in range(sub_roi_slices.shape[0]):
            for y in range(sub_roi_slices.shape[1]):
                for x in range(sub_roi_slices.shape[2]):
                    coord = (z, y, x)
                    offset = sub_rois_offsets[coord]
                    slices = sub_roi_slices[coord]
                    shape = [s.stop - s.start for s in slices]
                    if is_fork:
                        # use variables stored as class attributes
                        pool_results.append(pool.apply_async(
                            StackColocalizer.colocalize_block,
                            args=(coord, offset, shape)))
                    else:
                        # pickle full set of variables
                        pool_results.append(pool.apply_async(
                            StackColocalizer.colocalize_block,
                            args=(coord, offset, shape,
                                  detector.get_blobs_in_roi(
                                      blobs, offset, shape)[0], match_tol,
                                  True)))
        
        # dict of channel combos to blob matches data frame
        matches_all = {}
        for result in pool_results:
            coord, matches = result.get()
            count = 0
            for key, val in matches.items():
                matches_all.setdefault(key, []).append(val.df)
                count += len(val.df)
            print("adding {} matches from block at {} of {}"
                  .format(count, coord, np.add(sub_roi_slices.shape, -1)))
        
        pool.close()
        pool.join()
        
        # prune duplicates by taking matches with shortest distance
        for key in matches_all.keys():
            matches_all[key] = pd.concat(matches_all[key])
            for blobi in (BlobMatch.Cols.BLOB1, BlobMatch.Cols.BLOB2):
                # convert blob column to ndarray to extract coords by column
                matches = matches_all[key]
                matches_uniq, matches_i, matches_inv, matches_cts = np.unique(
                    np.vstack(matches[blobi.value])[:, :3], axis=0,
                    return_index=True, return_inverse=True, return_counts=True)
                if np.sum(matches_cts > 1) > 0:
                    # prune if at least one blob has been matched to multiple
                    # other blobs
                    singles = matches.iloc[matches_i[matches_cts == 1]]
                    dups = []
                    for i, ct in enumerate(matches_cts):
                        # include non-duplicates to retain index
                        if ct <= 1: continue
                        # get indices in orig matches at given unique array
                        # index and take match with lowest dist
                        matches_mult = matches.loc[matches_inv == i]
                        dists = matches_mult[BlobMatch.Cols.DIST.value]
                        min_dist = np.amin(dists)
                        num_matches = len(matches_mult)
                        if config.verbose and num_matches > 1:
                            print("pruning from", num_matches,
                                  "matches of dist:", dists)
                        matches_mult = matches_mult.loc[dists == min_dist]
                        # take first in case of any ties
                        dups.append(matches_mult.iloc[[0]])
                    matches_all[key] = pd.concat((singles, pd.concat(dups)))
            print("Colocalization matches for channels {}: {}"
                  .format(key, len(matches_all[key])))
            libmag.printv(print(matches_all[key]))
            # store data frame in BlobMatch object
            matches_all[key] = BlobMatch(df=matches_all[key])
        
        return matches_all
예제 #3
0
 def prune_blobs_mp(cls, img, seg_rois, overlap, tol, sub_roi_slices,
                    sub_rois_offsets, channels, overlap_padding=None):
     """Prune close blobs within overlapping regions by checking within
     entire planes across the ROI in parallel with multiprocessing.
     
     Args:
         img (:obj:`np.ndarray`): Array in which to detect blobs.
         seg_rois (:obj:`np.ndarray`): Blobs from each sub-region.
         overlap: 1D array of size 3 with the number of overlapping pixels 
             for each image axis.
         tol: Tolerance as (z, y, x), within which a segment will be 
             considered a duplicate of a segment in the master array and
             removed.
         sub_roi_slices (:obj:`np.ndarray`): Object array of ub-regions, used
             to check size.
         sub_rois_offsets: Offsets of each sub-region.
         channels (Sequence[int]): Sequence of channels; defaults to None
             to detect in all channels.
         overlap_padding: Sequence of z,y,x for additional padding beyond
             ``overlap``. Defaults to None to use ``tol`` as padding.
     
     Returns:
         :obj:`np.ndarray`, :obj:`pd.DataFrame`: All blobs as a Numpy array
         and a data frame of pruning stats, or None for both if no blobs are
         in the ``seg_rois``.
     
     """
     # collect all blobs in master array to group all overlapping regions,
     # with sub-ROI coordinates as last 3 columns
     blobs_merged = chunking.merge_blobs(seg_rois)
     if blobs_merged is None:
         return None, None
     print("total blobs before pruning:", len(blobs_merged))
     
     print("pruning with overlap: {}, overlap tol: {}, pruning tol: {}"
           .format(overlap, overlap_padding, tol))
     blobs_all = []
     blob_ratios = {}
     cols = ("blobs", "ratio_pruning", "ratio_adjacent")
     if overlap_padding is None: overlap_padding = tol
     for i in channels:
         # prune blobs from each channel separately to avoid pruning based on 
         # co-localized channel signals
         blobs = detector.blobs_in_channel(blobs_merged, i)
         for axis in range(3):
             # prune planes with all the overlapping regions within a given axis,
             # skipping if this axis has no overlapping sub-regions
             num_sections = sub_rois_offsets.shape[axis]
             if num_sections <= 1:
                 continue
             
             # multiprocess pruning by overlapping planes
             blobs_all_non_ol = None # all blobs from non-overlapping regions
             blobs_to_prune = []
             coord_last = tuple(np.subtract(sub_roi_slices.shape, 1))
             for j in range(num_sections):
                 # build overlapping region dimensions based on size of 
                 # sub-region in the given axis
                 coord = np.zeros(3, dtype=np.int)
                 coord[axis] = j
                 print("** setting up blob pruning in axis {}, section {} "
                       "of {}".format(axis, j, num_sections - 1))
                 offset = sub_rois_offsets[tuple(coord)]
                 sub_roi = img[sub_roi_slices[tuple(coord)]]
                 size = sub_roi.shape
                 _logger.debug(f"offset: {offset}, size: {size}")
                 
                 # overlapping region: each region but the last extends 
                 # into the next region, with the overlapping volume from 
                 # the end of the region, minus the overlap and a tolerance 
                 # space, to the region's end plus this tolerance space; 
                 # non-overlapping region: the region before the overlap, 
                 # after any overlap with the prior region (n > 1) 
                 # to the start of the overlap (n < last region)
                 blobs_ol = None
                 blobs_ol_next = None
                 blobs_in_non_ol = []
                 shift = overlap[axis] + overlap_padding[axis]
                 offset_axis = offset[axis]
                 if j < num_sections - 1:
                     bounds = [offset_axis + size[axis] - shift,
                               offset_axis + size[axis]
                               + overlap_padding[axis]]
                     libmag.printv(
                         "axis {}, boundaries: {}".format(axis, bounds))
                     blobs_ol = blobs[np.all([
                         blobs[:, axis] >= bounds[0], 
                         blobs[:, axis] < bounds[1]], axis=0)]
                     
                     # get blobs from immediatley adjacent region of the same 
                     # size as that of the overlapping region; keep same 
                     # starting point with or without overlap_tol
                     start = offset_axis + size[axis] + tol[axis]
                     bounds_next = [
                         start,
                         start + overlap[axis] + 2 * overlap_padding[axis]]
                     shape = np.add(
                         sub_rois_offsets[coord_last], sub_roi.shape[:3])
                     libmag.printv(
                         "axis {}, boundaries (next): {}, max bounds: {}"
                         .format(axis, bounds_next, shape[axis]))
                     if np.all(np.less(bounds_next, shape[axis])):
                         # ensure that next overlapping region is within ROI
                         blobs_ol_next = blobs[np.all([
                             blobs[:, axis] >= bounds_next[0], 
                             blobs[:, axis] < bounds_next[1]], axis=0)]
                     # non-overlapping region extends up this overlap
                     blobs_in_non_ol.append(blobs[:, axis] < bounds[0])
                 else:
                     # last non-overlapping region extends to end of region
                     blobs_in_non_ol.append(
                         blobs[:, axis] < offset_axis + size[axis])
                 
                 # get non-overlapping area
                 start = offset_axis
                 if j > 0:
                     # shift past overlapping part at start of region
                     start += shift
                 blobs_in_non_ol.append(blobs[:, axis] >= start)
                 blobs_non_ol = blobs[np.all(blobs_in_non_ol, axis=0)]
                 # collect all non-overlapping region blobs
                 if blobs_all_non_ol is None:
                     blobs_all_non_ol = blobs_non_ol
                 elif blobs_non_ol is not None:
                     blobs_all_non_ol = np.concatenate(
                         (blobs_all_non_ol, blobs_non_ol))
 
                 blobs_to_prune.append((blobs_ol, axis, tol, blobs_ol_next))
 
             is_fork = chunking.is_fork()
             if is_fork:
                 # set data as class variables to share across forks
                 cls.blobs_to_prune = blobs_to_prune
             pool = chunking.get_mp_pool()
             pool_results = []
             for j in range(len(blobs_to_prune)):
                 if is_fork:
                     # prune blobs in overlapping regions by multiprocessing,
                     # using a separate class to avoid pickling input blobs
                     pool_results.append(pool.apply_async(
                         StackPruner.prune_overlap_by_index, args=(j, )))
                 else:
                     # for spawned methods, need to pickle the blobs
                     pool_results.append(pool.apply_async(
                         StackPruner.prune_overlap, args=(
                             j, blobs_to_prune[j])))
             
             # collect all the pruned blob lists
             blobs_all_ol = None
             for result in pool_results:
                 blobs_ol_pruned, ratios = result.get()
                 if blobs_all_ol is None:
                     blobs_all_ol = blobs_ol_pruned
                 elif blobs_ol_pruned is not None:
                     blobs_all_ol = np.concatenate(
                         (blobs_all_ol, blobs_ol_pruned))
                 if ratios:
                     for col, val in zip(cols, ratios):
                         blob_ratios.setdefault(col, []).append(val)
             
             # recombine blobs from the non-overlapping with the pruned  
             # overlapping regions from the entire stack to re-prune along 
             # any remaining axes
             pool.close()
             pool.join()
             if blobs_all_ol is None:
                 blobs = blobs_all_non_ol
             elif blobs_all_non_ol is None:
                 blobs = blobs_all_ol
             else:
                 blobs = np.concatenate((blobs_all_non_ol, blobs_all_ol))
         # build up list from each channel
         blobs_all.append(blobs)
     
     # merge blobs into Numpy array and remove sub-ROI coordinate columns
     blobs_all = np.vstack(blobs_all)[:, :-3]
     print("total blobs after pruning:", len(blobs_all))
     
     # export blob ratios as data frame
     df = pd.DataFrame(blob_ratios)
     
     return blobs_all, df
예제 #4
0
    def detect_blobs_sub_rois(cls, img, sub_roi_slices, sub_rois_offsets,
                              denoise_max_shape, exclude_border, coloc,
                              channel):
        """Process blobs in chunked sub-ROIs via multiprocessing.

        Args:
            img (:obj:`np.ndarray`): Array in which to detect blobs.
            sub_roi_slices (:obj:`np.ndarray`): Numpy object array containing
                chunked sub-ROIs within a stack.
            sub_rois_offsets (:obj:`np.ndarray`): Numpy object array of the same
                shape as ``sub_rois`` with offsets in z,y,x corresponding to
                each sub-ROI. Offets are used to transpose blobs into 
                absolute coordinates.
            denoise_max_shape (Tuple[int]): Maximum shape of each unit within
                each sub-ROI for denoising.
            exclude_border (Tuple[int]): Sequence of border pixels in x,y,z to
                exclude; defaults to None.
            coloc (bool): True to perform blob co-localizations; defaults to
                False.
            channel (Sequence[int]): Sequence of channels, where None detects
                in all channels.

        Returns:
            :obj:`np.ndarray`: Numpy object array of blobs corresponding to
            ``sub_rois``, with each set of blobs given as a Numpy array in the
            format, ``[n, [z, row, column, radius, ...]]``, including additional
            elements as given in :meth:``StackDetect.detect_sub_roi``.
        
        """
        # detect nuclei in each sub-ROI, passing an index to access each 
        # sub-ROI to minimize pickling
        is_fork = chunking.is_fork()
        last_coord = np.subtract(sub_roi_slices.shape, 1)
        if is_fork:
            # set data as class attributes for direct access during forked
            # multiprocessing
            cls.img = img
            cls.last_coord = last_coord
            cls.denoise_max_shape = denoise_max_shape
            cls.exclude_border = exclude_border
            cls.coloc = coloc
            cls.channel = channel
        
        pool = chunking.get_mp_pool()
        pool_results = []
        for z in range(sub_roi_slices.shape[0]):
            for y in range(sub_roi_slices.shape[1]):
                for x in range(sub_roi_slices.shape[2]):
                    coord = (z, y, x)
                    if is_fork:
                        # use variables stored in class
                        pool_results.append(pool.apply_async(
                            StackDetector.detect_sub_roi_from_data,
                            args=(coord, sub_roi_slices[coord],
                                  sub_rois_offsets[coord])))
                    else:
                        # pickle full set of variables including sub-ROI and
                        # filename from which to load image parameters
                        pool_results.append(pool.apply_async(
                            StackDetector.detect_sub_roi,
                            args=(coord, sub_rois_offsets[coord], last_coord,
                                  denoise_max_shape, exclude_border,
                                  img[sub_roi_slices[coord]], channel,
                                  config.filename, coloc)))
    
        # retrieve blobs and assign to object array corresponding to sub_rois
        seg_rois = np.zeros(sub_roi_slices.shape, dtype=object)
        for result in pool_results:
            coord, segments = result.get()
            num_blobs = 0 if segments is None else len(segments)
            print("adding {} blobs from sub_roi at {} of {}"
                  .format(num_blobs, coord, np.add(sub_roi_slices.shape, -1)))
            seg_rois[coord] = segments
    
        pool.close()
        pool.join()
        return seg_rois
예제 #5
0
def labels_to_markers_erosion(
        labels_img: np.ndarray,
        filter_size: int = 8,
        target_frac: Optional[float] = None,
        min_filter_size: Optional[int] = None,
        use_min_filter: bool = False,
        skel_eros_filt_size: Optional[int] = None,
        wt_dists: Optional[np.ndarray] = None,
        multiprocess: bool = True) -> Tuple[np.ndarray, pd.DataFrame]:
    """Convert a labels image to markers as eroded labels via multiprocessing.
    
    These markers can be used in segmentation algorithms such as 
    watershed.
    
    Args:
        labels_img: Labels image as an integer Numpy array,
            where each unique int is a separate label.
        filter_size: Size of structing element for erosion, which should
            be > 0; defaults to 8.
        target_frac: Target fraction of original label to erode,
            passed to :func:`LabelToMarkerErosion.erode_label`. Defaults
            to None.
        min_filter_size: Minimum erosion filter size; defaults to None
            to use half of ``filter_size``, rounded down.
        use_min_filter: True to erode even if ``min_filter_size``
            is reached; defaults to False to avoid any erosion if this size
            is reached.
        skel_eros_filt_size: Erosion filter size before skeletonization
            in :func:`LabelToMarkerErosion.erode_labels`. Defaults to None to
            use the minimum filter size, which is half of ``filter_size``.
        wt_dists: Array of distances by which to weight
            the filter size, such as a distance transform to the outer
            perimeter of ``labels_img`` to weight central labels more
            heavily. Defaults to None.
        multiprocess: True to use multiprocessing; defaults to True.
    
    Returns:
        Tuple of an image array of the same shape as ``img`` and the
        same number of labels as in ``labels_img``, with eroded labels, and
        a data frame of erosion metrics.
    
    """
    def handle_eroded_label():
        # mutate markers outside of mp for changes to persist and collect stats
        markers[tuple(slices)][filtered] = stats_eros[0]
        for col, stat in zip(cols, stats_eros):
            sizes_dict.setdefault(col, []).append(stat)

    # set up labels erosion
    start_time = time()
    _logger.info(
        "Eroding labels to markers with filter size %s, min filter size %s, "
        "and target fraction %s", filter_size, min_filter_size, target_frac)
    markers = np.zeros_like(labels_img)
    labels_unique = np.unique(labels_img)
    if min_filter_size is None:
        min_filter_size = filter_size // 2
    if skel_eros_filt_size is None:
        skel_eros_filt_size = filter_size // 2
    sizes_dict = {}
    cols = (config.AtlasMetrics.REGION.value, "SizeOrig", "SizeMarker",
            config.SmoothingMetrics.FILTER_SIZE.value)

    # share large images as class attributes for forked or non-multiprocessing
    LabelToMarkerErosion.set_labels_img(labels_img, wt_dists)

    is_fork = False
    pool_results = None
    pool = None
    if multiprocess:
        # set up multiprocessing
        is_fork = chunking.is_fork()
        initializer = None
        initargs = None
        if not is_fork:
            # set up labels image as a shared array for spawned mode
            initializer, initargs = LabelToMarkerErosion.build_pool_init(
                {config.RegNames.IMG_LABELS: labels_img})

        pool = chunking.get_mp_pool(initializer, initargs)
        pool_results = []

    for label_id in labels_unique:
        if label_id == 0: continue
        # erode labels to generate markers, excluding labels small enough
        # that they would require a filter smaller than half of original size
        args = [
            label_id, filter_size, target_frac, min_filter_size,
            use_min_filter, skel_eros_filt_size
        ]
        if not is_fork:
            # pickle distance weight directly in spawned mode (not necessary
            # for non-multiprocessed but equivalent)
            if wt_dists is not None:
                args.append(
                    LabelToMarkerErosion.meas_wt(labels_img, label_id,
                                                 wt_dists))
        if pool is None:
            # process labels without multiprocessing
            stats_eros, slices, filtered = LabelToMarkerErosion.erode_label(
                *args)
            handle_eroded_label()
        else:
            # process in multiprocessing
            pool_results.append(
                pool.apply_async(LabelToMarkerErosion.erode_label, args=args))

    if multiprocess:
        # handle multiprocessing output
        for result in pool_results:
            stats_eros, slices, filtered = result.get()
            handle_eroded_label()
        pool.close()
        pool.join()

    # show erosion stats
    df = df_io.dict_to_data_frame(sizes_dict, show=True)

    _logger.info("Time elapsed to erode labels into markers: %s",
                 time() - start_time)
    return markers, df
예제 #6
0
def transpose_img(filename,
                  series,
                  plane=None,
                  rescale=None,
                  target_size=None):
    """Transpose Numpy NPY saved arrays into new planar orientations and 
    rescaling or resizing.
    
    Rescaling/resizing take place in multiprocessing. Files are saved
    through memmap-based arrays to minimize RAM usage. Output filenames
    are based on the ``make_modifer_[task]`` functions. Currently transposes
    all channels, ignoring :attr:``config.channel`` parameter.
    
    Args:
        filename: Full file path in :attribute:cli:`filename` format.
        series: Series within multi-series file.
        plane: Planar orientation (see :attribute:plot_2d:`PLANES`). Defaults 
            to None, in which case no planar transformation will occur.
        rescale: Rescaling factor; defaults to None. Takes precedence over
            ``target_size``.
        target_size (List[int]): Target shape in x,y,z; defaults to None,
            in which case the target size will be extracted from the register
            profile if available if available.

    """
    if target_size is None:
        target_size = config.atlas_profile["target_size"]
    if plane is None and rescale is None and target_size is None:
        print("No transposition to perform, skipping")
        return

    time_start = time()
    # even if loaded already, reread to get image metadata
    # TODO: consider saving metadata in config and retrieving from there
    img5d = importer.read_file(filename, series)
    info = img5d.meta
    image5d = img5d.img
    sizes = info["sizes"]

    # make filenames based on transpositions
    modifier = ""
    if plane is not None:
        modifier = make_modifier_plane(plane)
    # either rescaling or resizing
    if rescale is not None:
        modifier += make_modifier_scale(rescale)
    elif target_size:
        # target size may differ from final output size but allows a known
        # size to be used for finding the file later
        modifier += make_modifier_resized(target_size)
    filename_image5d_npz, filename_info_npz = importer.make_filenames(
        filename, series, modifier=modifier)

    # TODO: image5d should assume 4/5 dimensions
    offset = 0 if image5d.ndim <= 3 else 1
    multichannel = image5d.ndim >= 5
    image5d_swapped = image5d

    if plane is not None and plane != config.PLANE[0]:
        # swap z-y to get (y, z, x) order for xz orientation
        image5d_swapped = np.swapaxes(image5d_swapped, offset, offset + 1)
        config.resolutions[0] = libmag.swap_elements(config.resolutions[0], 0,
                                                     1)
        if plane == config.PLANE[2]:
            # swap new y-x to get (x, z, y) order for yz orientation
            image5d_swapped = np.swapaxes(image5d_swapped, offset, offset + 2)
            config.resolutions[0] = libmag.swap_elements(
                config.resolutions[0], 0, 2)

    scaling = None
    if rescale is not None or target_size is not None:
        # rescale based on scaling factor or target specific size
        rescaled = image5d_swapped
        # TODO: generalize for more than 1 preceding dimension?
        if offset > 0:
            rescaled = rescaled[0]
        max_pixels = [100, 500, 500]
        sub_roi_size = None
        if target_size:
            # to avoid artifacts from thin chunks, fit image into even
            # number of pixels per chunk by rounding up number of chunks
            # and resizing each chunk by ratio of total size to chunk num
            target_size = target_size[::-1]  # change to z,y,x
            shape = rescaled.shape[:3]
            num_chunks = np.ceil(np.divide(shape, max_pixels))
            max_pixels = np.ceil(np.divide(shape, num_chunks)).astype(np.int)
            sub_roi_size = np.floor(np.divide(target_size,
                                              num_chunks)).astype(np.int)
            print("Resizing image of shape {} to target_size: {}, using "
                  "num_chunks: {}, max_pixels: {}, sub_roi_size: {}".format(
                      rescaled.shape, target_size, num_chunks, max_pixels,
                      sub_roi_size))
        else:
            print("Rescaling image of shape {} by factor of {}".format(
                rescaled.shape, rescale))

        # rescale in chunks with multiprocessing
        sub_roi_slices, _ = chunking.stack_splitter(rescaled.shape, max_pixels)
        is_fork = chunking.is_fork()
        if is_fork:
            Downsampler.set_data(rescaled)
        sub_rois = np.zeros_like(sub_roi_slices)
        pool = chunking.get_mp_pool()
        pool_results = []
        for z in range(sub_roi_slices.shape[0]):
            for y in range(sub_roi_slices.shape[1]):
                for x in range(sub_roi_slices.shape[2]):
                    coord = (z, y, x)
                    slices = sub_roi_slices[coord]
                    args = [coord, slices, rescale, sub_roi_size, multichannel]
                    if not is_fork:
                        # pickle chunk if img not directly available
                        args.append(rescaled[slices])
                    pool_results.append(
                        pool.apply_async(Downsampler.rescale_sub_roi,
                                         args=args))
        for result in pool_results:
            coord, sub_roi = result.get()
            print("replacing sub_roi at {} of {}".format(
                coord, np.add(sub_roi_slices.shape, -1)))
            sub_rois[coord] = sub_roi

        pool.close()
        pool.join()
        rescaled_shape = chunking.get_split_stack_total_shape(sub_rois)
        if offset > 0:
            rescaled_shape = np.concatenate(([1], rescaled_shape))
        print("rescaled_shape: {}".format(rescaled_shape))
        # rescale chunks directly into memmap-backed array to minimize RAM usage
        image5d_transposed = np.lib.format.open_memmap(
            filename_image5d_npz,
            mode="w+",
            dtype=sub_rois[0, 0, 0].dtype,
            shape=tuple(rescaled_shape))
        chunking.merge_split_stack2(sub_rois, None, offset, image5d_transposed)

        if rescale is not None:
            # scale resolutions based on single rescaling factor
            config.resolutions = np.multiply(config.resolutions, 1 / rescale)
        else:
            # scale resolutions based on size ratio for each dimension
            config.resolutions = np.multiply(config.resolutions,
                                             (image5d_swapped.shape /
                                              rescaled_shape)[1:4])
        sizes[0] = rescaled_shape
        scaling = importer.calc_scaling(image5d_swapped, image5d_transposed)
    else:
        # transfer directly to memmap-backed array
        image5d_transposed = np.lib.format.open_memmap(
            filename_image5d_npz,
            mode="w+",
            dtype=image5d_swapped.dtype,
            shape=image5d_swapped.shape)
        if plane == config.PLANE[1] or plane == config.PLANE[2]:
            # flip upside-down if re-orienting planes
            if offset:
                image5d_transposed[0, :] = np.fliplr(image5d_swapped[0, :])
            else:
                image5d_transposed[:] = np.fliplr(image5d_swapped[:])
        else:
            image5d_transposed[:] = image5d_swapped[:]
        sizes[0] = image5d_swapped.shape

    # save image metadata
    print("detector.resolutions: {}".format(config.resolutions))
    print("sizes: {}".format(sizes))
    image5d.flush()
    importer.save_image_info(
        filename_info_npz, info["names"], sizes, config.resolutions,
        info["magnification"], info["zoom"],
        *importer.calc_intensity_bounds(image5d_transposed), scaling, plane)
    print("saved transposed file to {} with shape {}".format(
        filename_image5d_npz, image5d_transposed.shape))
    print("time elapsed (s): {}".format(time() - time_start))