예제 #1
0
def load_numpy_to_sitk(numpy_file, rotate=False, channel=None):
    """Load Numpy image array to SimpleITK Image object.

    Use ``channel`` to extract a single channel before generating a
    :obj:`sitk.Image` object for many SimpleITK filters that require
    single-channel ("scalar" rather than "vector") images.
    
    Args:
        numpy_file (str): Path to Numpy archive file.
        rotate (bool): True if the image should be rotated 180 deg; defaults to
            False.
        channel (int, Tuple[int]): Integer or sequence of integers specifying
            channels to keep.
    
    Returns:
        :obj:`sitk.Image`: The image in SimpleITK format.
    
    """
    img5d = importer.read_file(numpy_file, config.series)
    image5d = img5d.img
    roi = image5d[0, ...]  # not using time dimension
    if channel is not None and len(roi.shape) >= 4:
        roi = roi[..., channel]
        print("extracted channel(s) for SimpleITK image:", channel)
    if rotate:
        roi = np.rot90(roi, 2, (1, 2))
    sitk_img = sitk.GetImageFromArray(roi)
    spacing = config.resolutions[0]
    sitk_img.SetSpacing(spacing[::-1])
    # TODO: consider setting z-origin to 0 since image generally as
    # tightly bound to subject as possible
    #sitk_img.SetOrigin([0, 0, 0])
    sitk_img.SetOrigin([0, 0, -roi.shape[0] // 2])
    #sitk_img.SetOrigin([0, 0, -roi.shape[0]])
    return sitk_img
예제 #2
0
def find_scaling(img_path, scaled_shape=None, scale=None):
    """Find scaling between two images.
    
    Scaling can be computed to translate blob coordinates into another
    space, such as a heat map for a downsampled image.
    
    Args:
        img_path (str): Base path to image.
        scaled_shape (List): Shape of image to calculate scaling factor if
            this factor cannot be found from a transposed file's metadata;
            defaults to None.
        scale (int, float): Scalar scaling factor, used to find a
            rescaled file; defaults to None. To find a resized file instead,
            set an atlas profile with the resizing factor.

    Returns:
        list[float], list[float]: Sequence of scaling factors to a scaled
        or resized image, or None if not loaded or given, and the resolutions
        of the full-sized image found based on ``img_path``.

    """
    # get scaling and resolutions from blob space to that of a down/upsampled
    # image space
    load_size = config.atlas_profile["target_size"]
    img_path_transposed = transformer.get_transposed_image_path(
        img_path, scale, load_size)
    scaling = None
    res = None
    if scale is not None or load_size is not None:
        # retrieve scaling from a rescaled/resized image
        _, img_info = importer.read_file(
            img_path_transposed, config.series, return_info=True)
        scaling = img_info["scaling"]
        res = np.multiply(config.resolutions[0], scaling)
        print("retrieved scaling from resized image:", scaling)
        print("rescaled resolution for full-scale image:", res)
    elif scaled_shape is not None:
        # fall back to scaling based on comparison to original image
        img5d = importer.read_file(img_path_transposed, config.series)
        scaling = importer.calc_scaling(
            img5d.img, None, scaled_shape=scaled_shape)
        res = config.resolutions[0]
        print("using scaling compared to full image:", scaling)
        print("resolution from full-scale image:", res)
    return scaling, res
예제 #3
0
 def test_load_image(self):
     config.image5d = importer.read_file(config.filename, config.series)
     if config.image5d is None:
         chls, import_path = importer.setup_import_multipage(
             config.filename)
         import_md = importer.setup_import_metadata(chls, config.channel)
         config.image5d = importer.import_multiplane_images(
             chls, import_path, import_md, channel=config.channel)
     self.assertEqual(config.image5d.shape, (1, 51, 200, 200, 2))
예제 #4
0
def load_blobs(img_path, check_scaling=False, scaled_shape=None, scale=None):
    """Load blobs from an archive.
    
    Scaling can be computed to translate blob coordinates into another
    space, such as a heat map for a downsampled image.
    
    Args:
        img_path (str): Base path to blobs.
        check_scaling (bool): True to check scaling, in which case
            the scaling factor and scaled resolutions will be returned.
            Defaults to False.
        scaled_shape (List): Shape of image to calculate scaling factor if
            this factor cannot be found from a transposed file's metadata;
            defaults to None.
        scale (int, float): Scalar scaling factor, used to find a
            rescaled file; defaults to None. To find a resized file instead,
            set an atlas profile with the resizing factor.

    Returns:
        :obj:`np.ndarray`, List, List: Array of blobs. If ``check_scaling``
        is True, also returns sequence of scaling factors to a scaled or
        resized image, or None if not loaded or given, and the resolutions
        of the full-sized image in which the blobs were detected.

    """
    # load blobs and display counts
    path = libmag.combine_paths(img_path, config.SUFFIX_BLOBS)
    print("Loading blobs from", path)
    with np.load(path) as archive:
        info = read_np_archive(archive)
        blobs = info["segments"]
        print("loaded {} blobs".format(len(blobs)))
        if config.verbose:
            detector.show_blobs_per_channel(blobs)
            print(info)
    if not check_scaling:
        return blobs

    # get scaling and resolutions from blob space to that of a down/upsampled
    # image space
    load_size = config.atlas_profile["target_size"]
    img_path_transposed = transformer.get_transposed_image_path(
        img_path, scale, load_size)
    scaling = None
    res = None
    if scale is not None or load_size is not None:
        # retrieve scaling from a rescaled/resized image
        _, img_info = importer.read_file(img_path_transposed,
                                         config.series,
                                         return_info=True)
        scaling = img_info["scaling"]
        res = np.multiply(config.resolutions[0], scaling)
        print("retrieved scaling from resized image:", scaling)
        print("rescaled resolution for full-scale image:", res)
    elif scaled_shape is not None:
        # fall back to scaling based on comparison to original image
        image5d = importer.read_file(img_path_transposed, config.series)
        scaling = importer.calc_scaling(image5d,
                                        None,
                                        scaled_shape=scaled_shape)
        res = config.resolutions[0]
        print("using scaling compared to full image:", scaling)
        print("resolution from full-scale image:", res)
    return blobs, scaling, res
예제 #5
0
def setup_images(path=None,
                 series=None,
                 offset=None,
                 size=None,
                 proc_mode=None,
                 allow_import=True):
    """Sets up an image and all associated images and metadata.

    Paths for related files such as registered images will generally be
    constructed from ``path``. If :attr:`config.prefix` is set, it will
    be used in place of ``path`` for registered labels.
    
    Args:
        path (str): Path to image from which MagellanMapper-style paths will 
            be generated.
        series (int): Image series number; defaults to None.
        offset (List[int]): Sub-image offset given in z,y,x; defaults to None.
        size (List[int]): Sub-image shape given in z,y,x; defaults to None.
        proc_mode (str): Processing mode, which should be a key in 
            :class:`config.ProcessTypes`, case-insensitive; defaults to None.
        allow_import (bool): True to allow importing the image if it
            cannot be loaded; defaults to True.
    
    """
    def add_metadata():
        # override metadata set from command-line metadata args if available
        md = {
            config.MetaKeys.RESOLUTIONS:
            config.meta_dict[config.MetaKeys.RESOLUTIONS],
            config.MetaKeys.MAGNIFICATION:
            config.meta_dict[config.MetaKeys.MAGNIFICATION],
            config.MetaKeys.ZOOM:
            config.meta_dict[config.MetaKeys.ZOOM],
            config.MetaKeys.SHAPE:
            config.meta_dict[config.MetaKeys.SHAPE],
            config.MetaKeys.DTYPE:
            config.meta_dict[config.MetaKeys.DTYPE],
        }
        for key, val in md.items():
            if val is not None:
                # explicitly set metadata takes precedence over extracted vals
                import_md[key] = val

    # LOAD MAIN IMAGE

    # reset image5d
    config.image5d = None
    config.image5d_is_roi = False
    load_subimage = offset is not None and size is not None
    config.resolutions = None

    # reset label images
    config.labels_img = None
    config.borders_img = None

    filename_base = importer.filename_to_base(path, series)
    subimg_base = None

    if load_subimage and not config.save_subimg:
        # load a saved sub-image file if available and not set to save one
        subimg_base = stack_detect.make_subimage_name(filename_base, offset,
                                                      size)
        filename_subimg = libmag.combine_paths(subimg_base,
                                               config.SUFFIX_SUBIMG)

        try:
            # load sub-image if available
            config.image5d = np.load(filename_subimg, mmap_mode="r")
            config.image5d = importer.roi_to_image5d(config.image5d)
            config.image5d_is_roi = True
            config.image5d_io = config.LoadIO.NP
            print("Loaded sub-image from {} with shape {}".format(
                filename_subimg, config.image5d.shape))

            # after loading sub-image, load original image's metadata
            # for essential data such as vmin/vmax; will only warn if
            # fails to load since metadata could be specified elsewhere
            _, orig_info = importer.make_filenames(path, series)
            print("load original image metadata from:", orig_info)
            importer.load_metadata(orig_info)
        except IOError:
            print("Ignored sub-image file from {} as unable to load".format(
                filename_subimg))

    proc_type = libmag.get_enum(proc_mode, config.ProcessTypes)
    if proc_type in (config.ProcessTypes.LOAD, config.ProcessTypes.EXPORT_ROIS,
                     config.ProcessTypes.EXPORT_BLOBS,
                     config.ProcessTypes.DETECT):
        # load a blobs archive
        try:
            if subimg_base:
                try:
                    # load blobs generated from sub-image
                    config.blobs = load_blobs(subimg_base)
                except (FileNotFoundError, KeyError):
                    # fallback to loading from full image blobs and getting
                    # a subset, shifting them relative to sub-image offset
                    print("Unable to load blobs file based on {}, will try "
                          "from {}".format(subimg_base, filename_base))
                    config.blobs = load_blobs(filename_base)
                    config.blobs, _ = detector.get_blobs_in_roi(config.blobs,
                                                                offset,
                                                                size,
                                                                reverse=False)
                    detector.shift_blob_rel_coords(config.blobs,
                                                   np.multiply(offset, -1))
            else:
                # load full image blobs
                config.blobs = load_blobs(filename_base)
        except (FileNotFoundError, KeyError) as e2:
            print("Unable to load blobs file")
            if proc_type in (config.ProcessTypes.LOAD,
                             config.ProcessTypes.EXPORT_BLOBS):
                # blobs expected but not found
                raise e2

    if path and config.image5d is None:
        # load or import the main image stack
        print("Loading main image")
        try:
            if path.endswith(sitk_io.EXTS_3D):
                # attempt to format supported by SimpleITK and prepend time axis
                config.image5d = sitk_io.read_sitk_files(path)[None]
                config.image5d_io = config.LoadIO.SITK
            else:
                # load or import from MagellanMapper Numpy format
                import_only = proc_type is config.ProcessTypes.IMPORT_ONLY
                if not import_only:
                    # load previously imported image
                    config.image5d = importer.read_file(path, series)
                if allow_import:
                    # re-import over existing image or import new image
                    if os.path.isdir(path) and all(
                        [r is None for r in config.reg_suffixes.values()]):
                        # import directory of single plane images to single
                        # stack if no register suffixes are set
                        chls, import_md = importer.setup_import_dir(path)
                        add_metadata()
                        prefix = config.prefix
                        if not prefix:
                            prefix = os.path.join(
                                os.path.dirname(path),
                                importer.DEFAULT_IMG_STACK_NAME)
                        config.image5d = importer.import_planes_to_stack(
                            chls, prefix, import_md)
                    elif import_only or config.image5d is None:
                        # import multi-plane image
                        chls, import_path = importer.setup_import_multipage(
                            path)
                        prefix = config.prefix if config.prefix else import_path
                        import_md = importer.setup_import_metadata(
                            chls, config.channel, series)
                        add_metadata()
                        config.image5d = importer.import_multiplane_images(
                            chls,
                            prefix,
                            import_md,
                            series,
                            channel=config.channel)
                config.image5d_io = config.LoadIO.NP
        except FileNotFoundError as e:
            print(e)
            print("Could not load {}, will fall back to any associated "
                  "registered image".format(path))

    if config.metadatas and config.metadatas[0]:
        # assign metadata from alternate file if given to supersede settings
        # for any loaded image5d
        # TODO: access metadata directly from given image5d's dict to allow
        # loading multiple image5d images simultaneously
        importer.assign_metadata(config.metadatas[0])

    # main image is currently required since many parameters depend on it
    atlas_suffix = config.reg_suffixes[config.RegSuffixes.ATLAS]
    if atlas_suffix is None and config.image5d is None:
        # fallback to atlas if main image not already loaded
        atlas_suffix = config.RegNames.IMG_ATLAS.value
        print(
            "main image is not set, falling back to registered "
            "image with suffix", atlas_suffix)
    # use prefix to get images registered to a different image, eg a
    # downsampled version, or a different version of registered images
    path = config.prefix if config.prefix else path
    if path and atlas_suffix is not None:
        try:
            # will take the place of any previously loaded image5d
            config.image5d = sitk_io.read_sitk_files(
                path, reg_names=atlas_suffix)[None]
            config.image5d_io = config.LoadIO.SITK
        except FileNotFoundError as e:
            print(e)

    annotation_suffix = config.reg_suffixes[config.RegSuffixes.ANNOTATION]
    if annotation_suffix is not None:
        # load labels image, set up scaling, and load labels file
        try:
            # TODO: need to support multichannel labels images
            config.labels_img = sitk_io.read_sitk_files(
                path, reg_names=annotation_suffix)
            if config.image5d is not None:
                config.labels_scaling = importer.calc_scaling(
                    config.image5d, config.labels_img)
            if config.load_labels is not None:
                labels_ref = ontology.load_labels_ref(config.load_labels)
                if isinstance(labels_ref, pd.DataFrame):
                    # parse CSV files loaded into data frame
                    config.labels_ref_lookup = ontology.create_lookup_pd(
                        labels_ref)
                else:
                    # parse dict from ABA JSON file
                    config.labels_ref_lookup = (
                        ontology.create_aba_reverse_lookup(labels_ref))
        except FileNotFoundError as e:
            print(e)

    borders_suffix = config.reg_suffixes[config.RegSuffixes.BORDERS]
    if borders_suffix is not None:
        # load borders image, which can also be another labels image
        try:
            config.borders_img = sitk_io.read_sitk_files(
                path, reg_names=borders_suffix)
        except FileNotFoundError as e:
            print(e)

    if (config.atlas_labels[config.AtlasLabels.ORIG_COLORS]
            and config.load_labels is not None):
        # load original labels image from same directory as ontology
        # file for consistent ID-color mapping, even if labels are missing
        try:
            config.labels_img_orig = sitk_io.load_registered_img(
                config.load_labels, config.RegNames.IMG_LABELS.value)
        except FileNotFoundError as e:
            print(e)
            libmag.warn(
                "could not load original labels image; colors may differ"
                "differ from it")

    load_rot90 = config.roi_profile["load_rot90"]
    if load_rot90 and config.image5d is not None:
        # rotate main image specified num of times x90deg after loading since
        # need to rotate images output by deep learning toolkit
        config.image5d = np.rot90(config.image5d, load_rot90, (2, 3))

    if (config.image5d is not None and load_subimage
            and not config.image5d_is_roi):
        # crop full image to bounds of sub-image
        config.image5d = plot_3d.prepare_subimg(config.image5d, size,
                                                offset)[None]
        config.image5d_is_roi = True

    # add any additional image5d thresholds for multichannel images, such
    # as those loaded without metadata for these settings
    colormaps.setup_cmaps()
    num_channels = get_num_channels(config.image5d)
    config.near_max = libmag.pad_seq(config.near_max, num_channels, -1)
    config.near_min = libmag.pad_seq(config.near_min, num_channels, 0)
    config.vmax_overview = libmag.pad_seq(config.vmax_overview, num_channels)
    colormaps.setup_colormaps(num_channels)
예제 #6
0
def transpose_img(filename,
                  series,
                  plane=None,
                  rescale=None,
                  target_size=None):
    """Transpose Numpy NPY saved arrays into new planar orientations and 
    rescaling or resizing.
    
    Rescaling/resizing take place in multiprocessing. Files are saved
    through memmap-based arrays to minimize RAM usage. Output filenames
    are based on the ``make_modifer_[task]`` functions. Currently transposes
    all channels, ignoring :attr:``config.channel`` parameter.
    
    Args:
        filename: Full file path in :attribute:cli:`filename` format.
        series: Series within multi-series file.
        plane: Planar orientation (see :attribute:plot_2d:`PLANES`). Defaults 
            to None, in which case no planar transformation will occur.
        rescale: Rescaling factor; defaults to None. Takes precedence over
            ``target_size``.
        target_size (List[int]): Target shape in x,y,z; defaults to None,
            in which case the target size will be extracted from the register
            profile if available if available.

    """
    if target_size is None:
        target_size = config.atlas_profile["target_size"]
    if plane is None and rescale is None and target_size is None:
        print("No transposition to perform, skipping")
        return

    time_start = time()
    # even if loaded already, reread to get image metadata
    # TODO: consider saving metadata in config and retrieving from there
    img5d = importer.read_file(filename, series)
    info = img5d.meta
    image5d = img5d.img
    sizes = info["sizes"]

    # make filenames based on transpositions
    modifier = ""
    if plane is not None:
        modifier = make_modifier_plane(plane)
    # either rescaling or resizing
    if rescale is not None:
        modifier += make_modifier_scale(rescale)
    elif target_size:
        # target size may differ from final output size but allows a known
        # size to be used for finding the file later
        modifier += make_modifier_resized(target_size)
    filename_image5d_npz, filename_info_npz = importer.make_filenames(
        filename, series, modifier=modifier)

    # TODO: image5d should assume 4/5 dimensions
    offset = 0 if image5d.ndim <= 3 else 1
    multichannel = image5d.ndim >= 5
    image5d_swapped = image5d

    if plane is not None and plane != config.PLANE[0]:
        # swap z-y to get (y, z, x) order for xz orientation
        image5d_swapped = np.swapaxes(image5d_swapped, offset, offset + 1)
        config.resolutions[0] = libmag.swap_elements(config.resolutions[0], 0,
                                                     1)
        if plane == config.PLANE[2]:
            # swap new y-x to get (x, z, y) order for yz orientation
            image5d_swapped = np.swapaxes(image5d_swapped, offset, offset + 2)
            config.resolutions[0] = libmag.swap_elements(
                config.resolutions[0], 0, 2)

    scaling = None
    if rescale is not None or target_size is not None:
        # rescale based on scaling factor or target specific size
        rescaled = image5d_swapped
        # TODO: generalize for more than 1 preceding dimension?
        if offset > 0:
            rescaled = rescaled[0]
        max_pixels = [100, 500, 500]
        sub_roi_size = None
        if target_size:
            # to avoid artifacts from thin chunks, fit image into even
            # number of pixels per chunk by rounding up number of chunks
            # and resizing each chunk by ratio of total size to chunk num
            target_size = target_size[::-1]  # change to z,y,x
            shape = rescaled.shape[:3]
            num_chunks = np.ceil(np.divide(shape, max_pixels))
            max_pixels = np.ceil(np.divide(shape, num_chunks)).astype(np.int)
            sub_roi_size = np.floor(np.divide(target_size,
                                              num_chunks)).astype(np.int)
            print("Resizing image of shape {} to target_size: {}, using "
                  "num_chunks: {}, max_pixels: {}, sub_roi_size: {}".format(
                      rescaled.shape, target_size, num_chunks, max_pixels,
                      sub_roi_size))
        else:
            print("Rescaling image of shape {} by factor of {}".format(
                rescaled.shape, rescale))

        # rescale in chunks with multiprocessing
        sub_roi_slices, _ = chunking.stack_splitter(rescaled.shape, max_pixels)
        is_fork = chunking.is_fork()
        if is_fork:
            Downsampler.set_data(rescaled)
        sub_rois = np.zeros_like(sub_roi_slices)
        pool = chunking.get_mp_pool()
        pool_results = []
        for z in range(sub_roi_slices.shape[0]):
            for y in range(sub_roi_slices.shape[1]):
                for x in range(sub_roi_slices.shape[2]):
                    coord = (z, y, x)
                    slices = sub_roi_slices[coord]
                    args = [coord, slices, rescale, sub_roi_size, multichannel]
                    if not is_fork:
                        # pickle chunk if img not directly available
                        args.append(rescaled[slices])
                    pool_results.append(
                        pool.apply_async(Downsampler.rescale_sub_roi,
                                         args=args))
        for result in pool_results:
            coord, sub_roi = result.get()
            print("replacing sub_roi at {} of {}".format(
                coord, np.add(sub_roi_slices.shape, -1)))
            sub_rois[coord] = sub_roi

        pool.close()
        pool.join()
        rescaled_shape = chunking.get_split_stack_total_shape(sub_rois)
        if offset > 0:
            rescaled_shape = np.concatenate(([1], rescaled_shape))
        print("rescaled_shape: {}".format(rescaled_shape))
        # rescale chunks directly into memmap-backed array to minimize RAM usage
        image5d_transposed = np.lib.format.open_memmap(
            filename_image5d_npz,
            mode="w+",
            dtype=sub_rois[0, 0, 0].dtype,
            shape=tuple(rescaled_shape))
        chunking.merge_split_stack2(sub_rois, None, offset, image5d_transposed)

        if rescale is not None:
            # scale resolutions based on single rescaling factor
            config.resolutions = np.multiply(config.resolutions, 1 / rescale)
        else:
            # scale resolutions based on size ratio for each dimension
            config.resolutions = np.multiply(config.resolutions,
                                             (image5d_swapped.shape /
                                              rescaled_shape)[1:4])
        sizes[0] = rescaled_shape
        scaling = importer.calc_scaling(image5d_swapped, image5d_transposed)
    else:
        # transfer directly to memmap-backed array
        image5d_transposed = np.lib.format.open_memmap(
            filename_image5d_npz,
            mode="w+",
            dtype=image5d_swapped.dtype,
            shape=image5d_swapped.shape)
        if plane == config.PLANE[1] or plane == config.PLANE[2]:
            # flip upside-down if re-orienting planes
            if offset:
                image5d_transposed[0, :] = np.fliplr(image5d_swapped[0, :])
            else:
                image5d_transposed[:] = np.fliplr(image5d_swapped[:])
        else:
            image5d_transposed[:] = image5d_swapped[:]
        sizes[0] = image5d_swapped.shape

    # save image metadata
    print("detector.resolutions: {}".format(config.resolutions))
    print("sizes: {}".format(sizes))
    image5d.flush()
    importer.save_image_info(
        filename_info_npz, info["names"], sizes, config.resolutions,
        info["magnification"], info["zoom"],
        *importer.calc_intensity_bounds(image5d_transposed), scaling, plane)
    print("saved transposed file to {} with shape {}".format(
        filename_image5d_npz, image5d_transposed.shape))
    print("time elapsed (s): {}".format(time() - time_start))
예제 #7
0
def find_scaling(
    img_path: str,
    scaled_shape: Optional[Sequence[int]] = None,
    scale: float = None,
    load_size: Optional[Sequence[int]] = None
) -> Tuple[Sequence[float], Sequence[float]]:
    """Find scaling between two images.
    
    Scaling can be computed to translate blob coordinates into another
    space, such as a downsampled image. These compressed coordinates can be
    used to generate a heat map of blobs.
    
    Args:
        img_path: Base path to image.
        scaled_shape: Shape of image to calculate scaling factor if
            this factor cannot be found from a transposed file's metadata;
            defaults to None.
        scale: Scalar scaling factor, used to find a
            rescaled file; defaults to None. To find a resized file instead,
            set an atlas profile with the resizing factor.
        load_size: Size of image to load in ``x, y, z``, typically given by an
            atlas profile and used to identify the path of the scaled
            image to load; defaults to None.

    Returns:
        Tuple of sequence of scaling factors to a scaled
        or resized image, or None if not loaded or given, and the resolutions
        of the full-sized image found based on ``img_path``.

    """
    # path to image, which may have been resized
    img_path_transposed = transformer.get_transposed_image_path(
        img_path, scale, load_size)
    scaling = None
    res = None
    if scale is not None or load_size is not None:
        # retrieve scaling from a rescaled/resized image
        img_info = importer.read_file(img_path_transposed, config.series).meta
        scaling = img_info["scaling"]
        res = np.multiply(config.resolutions[0], scaling)
        _logger.info("Retrieved scaling from resized image: %s", scaling)
        _logger.info("Rescaled resolution for full-scale image: %s", res)

    elif scaled_shape is not None:
        # scale by comparing to original image
        img5d = importer.read_file(img_path_transposed, config.series)
        img5d_shape = None
        if img5d.img is not None:
            # get the shape from the original image
            img5d_shape = img5d.img.shape
        elif img5d.meta is not None:
            # get the shape from the original image's metadata
            img5d_shape = img5d.meta[config.MetaKeys.SHAPE][1:4]

        if img5d_shape is not None:
            # find the scaling factor using the original and resized image's
            # shapes
            scaling = importer.calc_scaling(None, None, img5d_shape,
                                            scaled_shape)
            res = config.resolutions[0]
            _logger.info("Using scaling compared to full image: %s", scaling)
            _logger.info("Resolution from full-scale image: %s", res)

    return scaling, res
예제 #8
0
def setup_images(path: str,
                 series: Optional[int] = None,
                 offset: Optional[Sequence[int]] = None,
                 size: Optional[Sequence[int]] = None,
                 proc_type: Optional["config.ProcessTypes"] = None,
                 allow_import: bool = True,
                 fallback_main_img: bool = True):
    """Sets up an image and all associated images and metadata.

    Paths for related files such as registered images will generally be
    constructed from ``path``. If :attr:`config.prefix` is set, it will
    be used in place of ``path`` for registered labels.
    
    Args:
        path: Path to image from which MagellanMapper-style paths will 
            be generated.
        series: Image series number; defaults to None.
        offset: Sub-image offset given in z,y,x; defaults to None.
        size: Sub-image shape given in z,y,x; defaults to None.
        proc_type: Processing type.
        allow_import: True to allow importing the image if it
            cannot be loaded; defaults to True.
        fallback_main_img: True to fall back to loading a registered image
            if possible if the main image could not be loaded; defaults to True.
    
    """
    def add_metadata():
        # override metadata set from command-line metadata args if available
        md = {
            config.MetaKeys.RESOLUTIONS:
            config.meta_dict[config.MetaKeys.RESOLUTIONS],
            config.MetaKeys.MAGNIFICATION:
            config.meta_dict[config.MetaKeys.MAGNIFICATION],
            config.MetaKeys.ZOOM:
            config.meta_dict[config.MetaKeys.ZOOM],
            config.MetaKeys.SHAPE:
            config.meta_dict[config.MetaKeys.SHAPE],
            config.MetaKeys.DTYPE:
            config.meta_dict[config.MetaKeys.DTYPE],
        }
        for key, val in md.items():
            if val is not None:
                # explicitly set metadata takes precedence over extracted vals
                import_md[key] = val

        res = import_md[config.MetaKeys.RESOLUTIONS]
        if res is None:
            # default to 1 for x,y,z since image resolutions are required
            res = [1] * 3
            import_md[config.MetaKeys.RESOLUTIONS] = res
            _logger.warn("No image resolutions found. Defaulting to: %s", res)

    # LOAD MAIN IMAGE

    # reset image5d
    config.image5d = None
    config.image5d_is_roi = False
    config.img5d = Image5d()
    load_subimage = offset is not None and size is not None
    config.resolutions = None

    # reset label images
    config.labels_img = None
    config.labels_img_sitk = None
    config.labels_img_orig = None
    config.borders_img = None
    config.labels_meta = None
    config.labels_ref = None

    # reset blobs
    config.blobs = None

    filename_base = importer.filename_to_base(path, series)
    subimg_base = None
    blobs = None

    # registered images set to load
    atlas_suffix = config.reg_suffixes[config.RegSuffixes.ATLAS]
    annotation_suffix = config.reg_suffixes[config.RegSuffixes.ANNOTATION]
    borders_suffix = config.reg_suffixes[config.RegSuffixes.BORDERS]

    if load_subimage and not config.save_subimg:
        # load a saved sub-image file if available and not set to save one
        subimg_base = naming.make_subimage_name(filename_base, offset, size)
        filename_subimg = libmag.combine_paths(subimg_base,
                                               config.SUFFIX_SUBIMG)

        try:
            # load sub-image if available
            config.image5d = np.load(filename_subimg, mmap_mode="r")
            config.image5d = importer.roi_to_image5d(config.image5d)
            config.image5d_is_roi = True
            config.img5d.img = config.image5d
            config.img5d.path_img = filename_subimg
            config.img5d.img_io = config.LoadIO.NP
            config.img5d.subimg_offset = offset
            config.img5d.subimg_size = size
            print("Loaded sub-image from {} with shape {}".format(
                filename_subimg, config.image5d.shape))

            # after loading sub-image, load original image's metadata
            # for essential data such as vmin/vmax; will only warn if
            # fails to load since metadata could be specified elsewhere
            _, orig_info = importer.make_filenames(path, series)
            print("load original image metadata from:", orig_info)
            importer.load_metadata(orig_info)
        except IOError:
            print("Ignored sub-image file from {} as unable to load".format(
                filename_subimg))

    if config.load_data[config.LoadData.BLOBS] or proc_type in (
            config.ProcessTypes.LOAD, config.ProcessTypes.COLOC_MATCH,
            config.ProcessTypes.EXPORT_ROIS, config.ProcessTypes.EXPORT_BLOBS):
        # load a blobs archive
        blobs = detector.Blobs()
        try:
            if subimg_base:
                try:
                    # load blobs generated from sub-image
                    config.blobs = blobs.load_blobs(
                        img_to_blobs_path(subimg_base))
                except (FileNotFoundError, KeyError):
                    # fallback to loading from full image blobs and getting
                    # a subset, shifting them relative to sub-image offset
                    print("Unable to load blobs file based on {}, will try "
                          "from {}".format(subimg_base, filename_base))
                    config.blobs = blobs.load_blobs(
                        img_to_blobs_path(filename_base))
                    blobs.blobs, _ = detector.get_blobs_in_roi(blobs.blobs,
                                                               offset,
                                                               size,
                                                               reverse=False)
                    detector.Blobs.shift_blob_rel_coords(
                        blobs.blobs, np.multiply(offset, -1))
            else:
                # load full image blobs
                config.blobs = blobs.load_blobs(
                    img_to_blobs_path(filename_base))
        except (FileNotFoundError, KeyError) as e2:
            print("Unable to load blobs file")
            if proc_type in (config.ProcessTypes.LOAD,
                             config.ProcessTypes.EXPORT_BLOBS):
                # blobs expected but not found
                raise e2

    if path and config.image5d is None and not atlas_suffix:
        # load or import the main image stack
        print("Loading main image")
        try:
            path_lower = path.lower()
            import_only = proc_type is config.ProcessTypes.IMPORT_ONLY
            if path_lower.endswith(sitk_io.EXTS_3D):
                # load format supported by SimpleITK and prepend time axis;
                # if 2D, convert to 3D
                img5d = sitk_io.read_sitk_files(path, make_3d=True)
            elif not import_only and path_lower.endswith((".tif", ".tiff")):
                # load TIF file directly
                img5d, meta = read_tif(path)
                config.resolutions = meta[config.MetaKeys.RESOLUTIONS]
            else:
                # load or import from MagellanMapper Numpy format
                img5d = None
                if not import_only:
                    # load previously imported image
                    img5d = importer.read_file(path, series)
                if allow_import and (img5d is None or img5d.img is None):
                    # import image; will re-import over any existing image file
                    if os.path.isdir(path) and all(
                        [r is None for r in config.reg_suffixes.values()]):
                        # import directory of single plane images to single
                        # stack if no register suffixes are set
                        chls, import_md = importer.setup_import_dir(path)
                        add_metadata()
                        prefix = config.prefix
                        if not prefix:
                            prefix = os.path.join(
                                os.path.dirname(path),
                                importer.DEFAULT_IMG_STACK_NAME)
                        img5d = importer.import_planes_to_stack(
                            chls, prefix, import_md)
                    elif import_only:
                        # import multi-plane image
                        chls, import_path = importer.setup_import_multipage(
                            path)
                        prefix = config.prefix if config.prefix else import_path
                        import_md = importer.setup_import_metadata(
                            chls, config.channel, series)
                        add_metadata()
                        img5d = importer.import_multiplane_images(
                            chls,
                            prefix,
                            import_md,
                            series,
                            channel=config.channel)
            if img5d is not None:
                # set loaded main image in config
                config.img5d = img5d
                config.image5d = config.img5d.img
        except FileNotFoundError as e:
            _logger.exception(e)
            _logger.info("Could not load %s", path)

    if config.metadatas and config.metadatas[0]:
        # assign metadata from alternate file if given to supersede settings
        # for any loaded image5d
        # TODO: access metadata directly from given image5d's dict to allow
        # loading multiple image5d images simultaneously
        importer.assign_metadata(config.metadatas[0])

    # main image is currently required since many parameters depend on it
    if fallback_main_img and atlas_suffix is None and config.image5d is None:
        # fallback to atlas if main image not already loaded
        atlas_suffix = config.RegNames.IMG_ATLAS.value
        _logger.info(
            "Main image is not set, falling back to registered image with "
            "suffix %s", atlas_suffix)
    # use prefix to get images registered to a different image, eg a
    # downsampled version, or a different version of registered images
    path = config.prefix if config.prefix else path
    if path and atlas_suffix is not None:
        try:
            # will take the place of any previously loaded image5d
            config.img5d = sitk_io.read_sitk_files(path,
                                                   atlas_suffix,
                                                   make_3d=True)
            config.image5d = config.img5d.img
        except FileNotFoundError as e:
            print(e)

    # load metadata related to the labels image
    config.labels_metadata = labels_meta.LabelsMeta(
        f"{path}." if config.prefix else path).load()

    # load labels reference file, prioritizing path given by user
    # and falling back to any extension matching PATH_LABELS_REF
    path_labels_refs = [config.load_labels]
    labels_path_ref = config.labels_metadata.path_ref
    if labels_path_ref:
        path_labels_refs.append(labels_path_ref)
    labels_ref = None
    for ref in path_labels_refs:
        if not ref: continue
        try:
            # load labels reference file
            labels_ref = ontology.LabelsRef(ref).load()
            if labels_ref.ref_lookup is not None:
                config.labels_ref = labels_ref
                _logger.debug("Loaded labels reference file from %s", ref)
                break
        except (FileNotFoundError, KeyError):
            pass
    if path_labels_refs and (labels_ref is None
                             or labels_ref.ref_lookup is None):
        # warn if labels path given but none found
        _logger.warn(
            "Unable to load labels reference file from '%s', skipping",
            path_labels_refs)

    if annotation_suffix is not None:
        try:
            # load labels image
            # TODO: need to support multichannel labels images
            img5d, config.labels_img_sitk = sitk_io.read_sitk_files(
                path, annotation_suffix, True, True)
            config.labels_img = img5d.img[0]
        except FileNotFoundError as e:
            print(e)
            if config.image5d is not None:
                # create a blank labels images for custom annotation; colormap
                # can be generated for the original labels loaded below
                config.labels_img = np.zeros(config.image5d.shape[1:4],
                                             dtype=int)
                print("Created blank labels image from main image")
        if config.image5d is not None and config.labels_img is not None:
            # set up scaling factors by dimension between intensity and
            # labels images
            config.labels_scaling = importer.calc_scaling(
                config.image5d, config.labels_img)

    if borders_suffix is not None:
        # load borders image, which can also be another labels image
        try:
            config.borders_img = sitk_io.read_sitk_files(path,
                                                         borders_suffix,
                                                         make_3d=True).img[0]
        except FileNotFoundError as e:
            print(e)

    if config.atlas_labels[config.AtlasLabels.ORIG_COLORS]:
        labels_orig_ids = config.labels_metadata.region_ids_orig
        if labels_orig_ids is None:
            if config.load_labels is not None:
                # load original labels image from same directory as ontology
                # file for consistent ID-color mapping, even if labels are missing
                try:
                    config.labels_img_orig = sitk_io.load_registered_img(
                        config.load_labels, config.RegNames.IMG_LABELS.value)
                except FileNotFoundError as e:
                    print(e)
            if config.labels_img is not None and config.labels_img_orig is None:
                _logger.warn(
                    "Could not load original labels image IDs; colors may "
                    "differ from the original image")

    load_rot90 = config.roi_profile["load_rot90"]
    if load_rot90 and config.image5d is not None:
        # rotate main image specified num of times x90deg after loading since
        # need to rotate images output by deep learning toolkit
        config.image5d = np.rot90(config.image5d, load_rot90, (2, 3))

    if (config.image5d is not None and load_subimage
            and not config.image5d_is_roi):
        # crop full image to bounds of sub-image
        config.image5d = plot_3d.prepare_subimg(config.image5d, offset,
                                                size)[None]
        config.image5d_is_roi = True

    # add any additional image5d thresholds for multichannel images, such
    # as those loaded without metadata for these settings
    colormaps.setup_cmaps()
    num_channels = get_num_channels(config.image5d)
    config.near_max = libmag.pad_seq(config.near_max, num_channels, -1)
    config.near_min = libmag.pad_seq(config.near_min, num_channels, 0)
    config.vmax_overview = libmag.pad_seq(config.vmax_overview, num_channels)
    colormaps.setup_colormaps(num_channels)

    if config.labels_img is not None:
        # make discrete colormap for labels image
        config.cmap_labels = colormaps.setup_labels_cmap(config.labels_img)

    if (blobs is not None and blobs.blobs is not None
            and config.img5d.img is not None and blobs.roi_size is not None):
        # scale blob coordinates to main image if shapes differ
        scaling = np.divide(config.img5d.img.shape[1:4], blobs.roi_size)
        # scale radius by mean of other dimensions' scaling
        scaling = np.append(scaling, np.mean(scaling))
        if not np.all(scaling == 1):
            _logger.debug("Scaling blobs to main image by factor: %s", scaling)
            blobs.blobs[:, :4] = ontology.scale_coords(blobs.blobs[:, :4],
                                                       scaling)
        blobs.scaling = scaling