コード例 #1
0
def cluster_blobs(img_path, suffix=None):
    """Cluster blobs and save to Numpy archive.
    
    Args:
        img_path (str): Base path from which registered labels and blobs files
            will be found and output blobs file save location will be
            constructed.
        suffix (str): Suffix for ``path``; defaults to None.

    Returns:

    """
    mod_path = img_path
    if suffix is not None:
        mod_path = libmag.insert_before_ext(img_path, suffix)
    labels_img_np = sitk_io.load_registered_img(
        mod_path, config.RegNames.IMG_LABELS.value)
    blobs = detector.Blobs().load_blobs(np_io.img_to_blobs_path(img_path))
    scaling, res = np_io.find_scaling(img_path, labels_img_np.shape)
    if blobs is None:
        libmag.warn("unable to load nuclei coordinates")
        return

    # append label IDs to blobs and scale to make isotropic
    blobs_clus = ClusterByLabel.cluster_by_label(blobs.blobs[:, :3],
                                                 labels_img_np, scaling, res)
    print(blobs_clus)
    out_path = libmag.combine_paths(mod_path, config.SUFFIX_BLOB_CLUSTERS)
    np.save(out_path, blobs_clus)
コード例 #2
0
def plot_knns(img_paths, suffix=None, show=False, names=None):
    """Plot k-nearest-neighbor distances for multiple sets of blobs,
    overlaying on a single plot.

    Args:
        img_paths (List[str]): Base paths from which registered labels and
            blobs files will be found and output blobs file save location
            will be constructed.
        suffix (str): Suffix for ``path``; defaults to None.
        show (bool): True to plot the distances; defaults to False.
        names (List[str]): Sequence of names corresponding to ``img_paths``
            for the plot legend.

    """
    cluster_settings = config.atlas_profile[profiles.RegKeys.METRICS_CLUSTER]
    knn_n = cluster_settings[profiles.RegKeys.KNN_N]
    if not knn_n:
        knn_n = cluster_settings[profiles.RegKeys.DBSCAN_MINPTS] - 1
    print("Calculating k-nearest-neighbor distances and plotting distances "
          "for neighbor {}".format(knn_n))

    # set up combined data frames for all samples at each zoom level
    df_keys = ("ov", "zoom")
    dfs_comb = {key: [] for key in df_keys}
    names_disp = names if names else []
    for i, img_path in enumerate(img_paths):
        # load blobs associated with image
        mod_path = img_path
        if suffix is not None:
            mod_path = libmag.insert_before_ext(img_path, suffix)
        labels_img_np = sitk_io.load_registered_img(
            mod_path, config.RegNames.IMG_LABELS.value)
        blobs = detector.Blobs().load_blobs(np_io.img_to_blobs_path(img_path))
        scaling, res = np_io.find_scaling(img_path, labels_img_np.shape)
        if blobs is None:
            libmag.warn("unable to load nuclei coordinates for", img_path)
            continue
        # convert to physical units and display k-nearest-neighbors for nuclei
        blobs_phys = np.multiply(blobs.blobs[:, :3], res)
        # TESTING: given the same blobs, simply shift
        #blobs = np.multiply(blobs[i*10000000:, :3], res)
        _, _, dfs = knn_dist(blobs_phys, knn_n, 2, 1000000, False)
        if names is None:
            # default to naming from filename
            names_disp.append(os.path.basename(mod_path))
        for j, df in enumerate(dfs):
            dfs_comb[df_keys[j]].append(df)

    for key in dfs_comb:
        # combine data frames at each zoom level, save, and plot with
        # different colors for each image
        df = df_io.join_dfs(dfs_comb[key], "point")
        dist_cols = [col for col in df.columns if col.startswith("dist")]
        rename_cols = {col: name for col, name in zip(dist_cols, names_disp)}
        df = df.rename(rename_cols, axis=1)
        out_path = "knn_dist_combine_{}".format(key)
        df_io.data_frames_to_csv(df, out_path)
        plot_2d.plot_lines(out_path,
                           "point",
                           rename_cols.values(),
                           df=df,
                           show=show,
                           title=config.plot_labels[config.PlotLabels.TITLE])
コード例 #3
0
def make_density_image(
    img_path: str,
    scale: Optional[float] = None,
    shape: Optional[Sequence[int]] = None,
    suffix: Optional[str] = None,
    labels_img_sitk: Optional[sitk.Image] = None,
    channel: Optional[Sequence[int]] = None,
    matches: Dict[Tuple[int, int], "colocalizer.BlobMatch"] = None,
    atlas_profile: Optional["atlas_prof.AtlasProfile"] = None
) -> Tuple[np.ndarray, str]:
    """Make a density image based on associated blobs.
    
    Uses the size and resolutions of the original image stores in the blobs
    if available to determine scaling between the blobs and the output image.
    Otherwise, uses the shape of the registered labels image to set 
    the voxel sizes for the blobs.
    
    If ``matches`` is given, a heat map will be generated for each set
    of channels given in the dictionary. Otherwise, if the loaded blobs
    file has intensity-based colocalizations, a heat map will be generated
    for each combination of channels.
    
    Args:
        img_path: Path to image, which will be used to indentify the blobs file.
        scale: Scaling factor between the blobs' space and the output space;
            defaults to None to use the register. Scaling is found by
            :meth:`magmap.np_io.find_scaling`.
        shape: Output shape, used for scaling; defaults to None.
        suffix: Modifier to append to end of ``img_path`` basename for 
            registered image files that were output to a modified name; 
            defaults to None.
        labels_img_sitk: Labels image; defaults to None to load from a
            registered labels image.
        channel: Sequence of channels to include in density image. For
            multiple channels, blobs from all these channels are combined
            into one heatmap.  Defaults to None to use all channels.
        matches: Dictionary of channel combinations to blob matches; defaults
            to None.
        atlas_profile: Atlas profile, used for scaling; defaults to None.
    
    Returns:
        Tuple of the density image as a Numpy array in the
        same shape as the opened image and the original and ``img_path``
        to track such as for multiprocessing.
    
    """
    def make_heat_map():
        # build heat map to store densities per label px and save to file
        coord_scaled = ontology.scale_coords(blobs_chl[:, :3], scaling,
                                             labels_img.shape)
        _logger.debug("Scaled coords:\n%s", coord_scaled)
        return cv_nd.build_heat_map(labels_img.shape, coord_scaled)

    # set up paths and get labels image
    _logger.info("\n\nGenerating heat map from blobs")
    mod_path = img_path
    if suffix is not None:
        mod_path = libmag.insert_before_ext(img_path, suffix)

    # load blobs
    blobs = detector.Blobs().load_blobs(np_io.img_to_blobs_path(img_path))

    is_2d = False
    if (shape is not None and blobs.roi_size is not None
            and blobs.resolutions is not None):
        # prepare output image and scaling factor from it to the blobs
        scaling = np.divide(shape, blobs.roi_size)
        labels_spacing = np.divide(blobs.resolutions[0], scaling)
        labels_img = np.zeros(shape, dtype=np.uint8)
        labels_img_sitk = sitk.GetImageFromArray(labels_img)
        labels_img_sitk.SetSpacing(labels_spacing[::-1])

    else:
        # default to use labels image as the size of the output image
        if labels_img_sitk is None:
            labels_img_sitk = sitk_io.load_registered_img(
                mod_path, config.RegNames.IMG_LABELS.value, get_sitk=True)
        labels_img = sitk.GetArrayFromImage(labels_img_sitk)

        is_2d = labels_img.ndim == 2
        if is_2d:
            # temporarily convert 2D images to 3D
            labels_img = labels_img[None]

        # find the scaling between the blobs and the labels image
        target_size = (None if atlas_profile is None else
                       atlas_profile["target_size"])
        scaling = np_io.find_scaling(img_path, labels_img.shape, scale,
                                     target_size)[0]

        if shape is not None:
            # scale blob coordinates and heat map to an alternative final shape
            scaling = np.divide(shape, np.divide(labels_img.shape, scaling))
            labels_spacing = np.multiply(labels_img_sitk.GetSpacing()[::-1],
                                         np.divide(labels_img.shape, shape))
            labels_img = np.zeros(shape, dtype=labels_img.dtype)
            labels_img_sitk.SetSpacing(labels_spacing[::-1])
    _logger.debug("Using image scaling: {}".format(scaling))

    # annotate blobs based on position
    blobs_chl = blobs.blobs
    if channel is not None:
        _logger.info(
            "Using blobs from channel(s), combining if multiple channels: %s",
            channel)
        blobs_chl = blobs_chl[np.isin(
            detector.Blobs.get_blobs_channel(blobs_chl), channel)]
    heat_map = make_heat_map()
    if is_2d:
        # convert back to 3D
        heat_map = heat_map[0]
    imgs_write = {
        config.RegNames.IMG_HEAT_MAP.value:
        sitk_io.replace_sitk_with_numpy(labels_img_sitk, heat_map)
    }

    heat_colocs = None
    if matches:
        # create heat maps for match-based colocalization combos
        heat_colocs = []
        for chl_combo, chl_matches in matches.items():
            _logger.info(
                "Generating match-based colocalization heat map "
                "for channel combo: %s", chl_combo)
            # use blobs in first channel of each channel pair for simplicity
            blobs_chl = chl_matches.get_blobs(1)
            heat_colocs.append(make_heat_map())

    elif blobs.colocalizations is not None:
        # create heat map for each intensity-based colocalization combo
        # as a separate channel in output image
        blob_chls = range(blobs.colocalizations.shape[1])
        blob_chls_len = len(blob_chls)
        if blob_chls_len > 1:
            # get all channel combos that include given channels
            combos = []
            chls = blob_chls if channel is None else channel
            for r in range(2, blob_chls_len + 1):
                combos.extend([
                    tuple(c) for c in itertools.combinations(blob_chls, r)
                    if all([h in c for h in chls])
                ])

            heat_colocs = []
            for combo in combos:
                _logger.info(
                    "Generating intensity-based colocalization heat map "
                    "for channel combo: %s", combo)
                blobs_chl = blobs.blobs[np.all(np.equal(
                    blobs.colocalizations[:, combo], 1),
                                               axis=1)]
                heat_colocs.append(make_heat_map())

    if heat_colocs is not None:
        # combine heat maps into single image
        heat_colocs = np.stack(heat_colocs, axis=3)
        imgs_write[config.RegNames.IMG_HEAT_COLOC.value] = \
            sitk_io.replace_sitk_with_numpy(
                labels_img_sitk, heat_colocs)

    # write images to file
    sitk_io.write_reg_images(imgs_write, mod_path)
    return heat_map, img_path
コード例 #4
0
def detect_blobs_blocks(filename_base, image5d, offset, size, channels,
                        verify=False, save_dfs=True, full_roi=False,
                        coloc=False):
    """Detect blobs by block processing of a large image.
    
    All channels are processed in the same blocks.
    
    Args:
        filename_base: Base path to use file output.
        image5d: Large image to process as a Numpy array of t,z,y,x,[c]
        offset: Sub-image offset given as coordinates in z,y,x.
        size: Sub-image shape given in z,y,x.
        channels (Sequence[int]): Sequence of channels, where None detects
            in all channels.
        verify: True to verify detections against truth database; defaults 
            to False.
        save_dfs: True to save data frames to file; defaults to True.
        full_roi (bool): True to treat ``image5d`` as the full ROI; defaults
            to False.
        coloc (bool): True to perform blob co-localizations; defaults to False.
    
    Returns:
        tuple[int, int, int], str, :class:`magmap.cv.detector.Blobs`:
        Accuracy metrics from :class:`magmap.cv.detector.verify_rois`,
        feedback message from this same function, and detected blobs.
    
    """
    time_start = time()
    subimg_path_base = filename_base
    if size is None or offset is None:
        # uses the entire stack if no size or offset specified
        size = image5d.shape[1:4]
        offset = (0, 0, 0)
    else:
        # get base path for sub-image
        subimg_path_base = naming.make_subimage_name(
            filename_base, offset, size)
    filename_blobs = libmag.combine_paths(subimg_path_base, config.SUFFIX_BLOBS)
    
    # get ROI for given region, including all channels
    if full_roi:
        # treat the full image as the ROI
        roi = image5d[0]
    else:
        roi = plot_3d.prepare_subimg(image5d, offset, size)
    num_chls_roi = 1 if len(roi.shape) < 4 else roi.shape[3]
    if num_chls_roi < 2:
        coloc = False
        print("Unable to co-localize as image has only 1 channel")
    
    # prep chunking ROI into sub-ROIs with size based on segment_size, scaling
    # by physical units to make more independent of resolution; use profile
    # from first channel to be processed for block settings
    time_detection_start = time()
    settings = config.get_roi_profile(channels[0])
    print("Profile for block settings:", settings[settings.NAME_KEY])
    sub_roi_slices, sub_rois_offsets, denoise_max_shape, exclude_border, \
        tol, overlap_base, overlap, overlap_padding = setup_blocks(
            settings, roi.shape)
    
    # TODO: option to distribute groups of sub-ROIs to different servers 
    # for blob detection
    seg_rois = StackDetector.detect_blobs_sub_rois(
        roi, sub_roi_slices, sub_rois_offsets, denoise_max_shape,
        exclude_border, coloc, channels)
    detection_time = time() - time_detection_start
    print("blob detection time (s):", detection_time)
    
    # prune blobs in overlapping portions of sub-ROIs
    time_pruning_start = time()
    segments_all, df_pruning = StackPruner.prune_blobs_mp(
        roi, seg_rois, overlap, tol, sub_roi_slices, sub_rois_offsets, channels,
        overlap_padding)
    pruning_time = time() - time_pruning_start
    print("blob pruning time (s):", pruning_time)
    #print("maxes:", np.amax(segments_all, axis=0))
    
    # get weighted mean of ratios
    if df_pruning is not None:
        print("\nBlob pruning ratios:")
        path_pruning = "blob_ratios.csv" if save_dfs else None
        df_pruning_all = df_io.data_frames_to_csv(
            df_pruning, path_pruning, show=" ")
        cols = df_pruning_all.columns.tolist()
        blob_pruning_means = {}
        if "blobs" in cols:
            blobs_unpruned = df_pruning_all["blobs"]
            num_blobs_unpruned = np.sum(blobs_unpruned)
            for col in cols[1:]:
                blob_pruning_means["mean_{}".format(col)] = [
                    np.sum(np.multiply(df_pruning_all[col], blobs_unpruned)) 
                    / num_blobs_unpruned]
            path_pruning_means = "blob_ratios_means.csv" if save_dfs else None
            df_pruning_means = df_io.dict_to_data_frame(
                blob_pruning_means, path_pruning_means, show=" ")
        else:
            print("no blob ratios found")
    
    '''# report any remaining duplicates
    np.set_printoptions(linewidth=500, threshold=10000000)
    print("all blobs (len {}):".format(len(segments_all)))
    sort = np.lexsort(
        (segments_all[:, 2], segments_all[:, 1], segments_all[:, 0]))
    blobs = segments_all[sort]
    print(blobs)
    print("checking for duplicates in all:")
    print(detector.remove_duplicate_blobs(blobs, slice(0, 3)))
    '''
    
    stats_detection = None
    fdbk = None
    colocs = None
    if segments_all is not None:
        # remove the duplicated elements that were used for pruning
        detector.replace_rel_with_abs_blob_coords(segments_all)
        if coloc:
            colocs = segments_all[:, 10:10+num_chls_roi].astype(np.uint8)
        # remove absolute coordinate and any co-localization columns
        segments_all = detector.remove_abs_blob_coords(segments_all)
        
        # compare detected blobs with truth blobs
        # TODO: assumes ground truth is relative to any ROI offset,
        # but should make customizable
        if verify:
            stats_detection, fdbk = verifier.verify_stack(
                filename_base, subimg_path_base, settings, segments_all,
                channels, overlap_base)
    
    if config.save_subimg:
        subimg_base_path = libmag.combine_paths(
            subimg_path_base, config.SUFFIX_SUBIMG)
        if (isinstance(config.image5d, np.memmap) and 
                config.image5d.filename == os.path.abspath(subimg_base_path)):
            # file at sub-image save path may have been opened as a memmap
            # file, in which case saving would fail
            libmag.warn("{} is currently open, cannot save sub-image"
                        .format(subimg_base_path))
        else:
            # write sub-image, which is in ROI (3D) format
            with open(subimg_base_path, "wb") as f:
                np.save(f, roi)

    # store blobs in Blobs instance
    # TODO: consider separating into blobs and blobs metadata archives
    blobs = detector.Blobs(
        segments_all, colocalizations=colocs, path=filename_blobs)
    blobs.resolutions = config.resolutions
    blobs.basename = os.path.basename(config.filename)
    blobs.roi_offset = offset
    blobs.roi_size = size
    
    # whole image benchmarking time
    times = (
        [detection_time], 
        [pruning_time], 
        time() - time_start)
    times_dict = {}
    for key, val in zip(StackTimes, times):
        times_dict[key] = val
    if segments_all is None:
        print("\nNo blobs detected")
    else:
        print("\nTotal blobs found:", len(segments_all))
        detector.show_blobs_per_channel(segments_all)
    print("\nTotal detection processing times (s):")
    path_times = "stack_detection_times.csv" if save_dfs else None
    df_io.dict_to_data_frame(times_dict, path_times, show=" ")
    
    return stats_detection, fdbk, blobs
コード例 #5
0
def setup_images(path: str,
                 series: Optional[int] = None,
                 offset: Optional[Sequence[int]] = None,
                 size: Optional[Sequence[int]] = None,
                 proc_type: Optional["config.ProcessTypes"] = None,
                 allow_import: bool = True,
                 fallback_main_img: bool = True):
    """Sets up an image and all associated images and metadata.

    Paths for related files such as registered images will generally be
    constructed from ``path``. If :attr:`config.prefix` is set, it will
    be used in place of ``path`` for registered labels.
    
    Args:
        path: Path to image from which MagellanMapper-style paths will 
            be generated.
        series: Image series number; defaults to None.
        offset: Sub-image offset given in z,y,x; defaults to None.
        size: Sub-image shape given in z,y,x; defaults to None.
        proc_type: Processing type.
        allow_import: True to allow importing the image if it
            cannot be loaded; defaults to True.
        fallback_main_img: True to fall back to loading a registered image
            if possible if the main image could not be loaded; defaults to True.
    
    """
    def add_metadata():
        # override metadata set from command-line metadata args if available
        md = {
            config.MetaKeys.RESOLUTIONS:
            config.meta_dict[config.MetaKeys.RESOLUTIONS],
            config.MetaKeys.MAGNIFICATION:
            config.meta_dict[config.MetaKeys.MAGNIFICATION],
            config.MetaKeys.ZOOM:
            config.meta_dict[config.MetaKeys.ZOOM],
            config.MetaKeys.SHAPE:
            config.meta_dict[config.MetaKeys.SHAPE],
            config.MetaKeys.DTYPE:
            config.meta_dict[config.MetaKeys.DTYPE],
        }
        for key, val in md.items():
            if val is not None:
                # explicitly set metadata takes precedence over extracted vals
                import_md[key] = val

        res = import_md[config.MetaKeys.RESOLUTIONS]
        if res is None:
            # default to 1 for x,y,z since image resolutions are required
            res = [1] * 3
            import_md[config.MetaKeys.RESOLUTIONS] = res
            _logger.warn("No image resolutions found. Defaulting to: %s", res)

    # LOAD MAIN IMAGE

    # reset image5d
    config.image5d = None
    config.image5d_is_roi = False
    config.img5d = Image5d()
    load_subimage = offset is not None and size is not None
    config.resolutions = None

    # reset label images
    config.labels_img = None
    config.labels_img_sitk = None
    config.labels_img_orig = None
    config.borders_img = None
    config.labels_meta = None
    config.labels_ref = None

    # reset blobs
    config.blobs = None

    filename_base = importer.filename_to_base(path, series)
    subimg_base = None
    blobs = None

    # registered images set to load
    atlas_suffix = config.reg_suffixes[config.RegSuffixes.ATLAS]
    annotation_suffix = config.reg_suffixes[config.RegSuffixes.ANNOTATION]
    borders_suffix = config.reg_suffixes[config.RegSuffixes.BORDERS]

    if load_subimage and not config.save_subimg:
        # load a saved sub-image file if available and not set to save one
        subimg_base = naming.make_subimage_name(filename_base, offset, size)
        filename_subimg = libmag.combine_paths(subimg_base,
                                               config.SUFFIX_SUBIMG)

        try:
            # load sub-image if available
            config.image5d = np.load(filename_subimg, mmap_mode="r")
            config.image5d = importer.roi_to_image5d(config.image5d)
            config.image5d_is_roi = True
            config.img5d.img = config.image5d
            config.img5d.path_img = filename_subimg
            config.img5d.img_io = config.LoadIO.NP
            config.img5d.subimg_offset = offset
            config.img5d.subimg_size = size
            print("Loaded sub-image from {} with shape {}".format(
                filename_subimg, config.image5d.shape))

            # after loading sub-image, load original image's metadata
            # for essential data such as vmin/vmax; will only warn if
            # fails to load since metadata could be specified elsewhere
            _, orig_info = importer.make_filenames(path, series)
            print("load original image metadata from:", orig_info)
            importer.load_metadata(orig_info)
        except IOError:
            print("Ignored sub-image file from {} as unable to load".format(
                filename_subimg))

    if config.load_data[config.LoadData.BLOBS] or proc_type in (
            config.ProcessTypes.LOAD, config.ProcessTypes.COLOC_MATCH,
            config.ProcessTypes.EXPORT_ROIS, config.ProcessTypes.EXPORT_BLOBS):
        # load a blobs archive
        blobs = detector.Blobs()
        try:
            if subimg_base:
                try:
                    # load blobs generated from sub-image
                    config.blobs = blobs.load_blobs(
                        img_to_blobs_path(subimg_base))
                except (FileNotFoundError, KeyError):
                    # fallback to loading from full image blobs and getting
                    # a subset, shifting them relative to sub-image offset
                    print("Unable to load blobs file based on {}, will try "
                          "from {}".format(subimg_base, filename_base))
                    config.blobs = blobs.load_blobs(
                        img_to_blobs_path(filename_base))
                    blobs.blobs, _ = detector.get_blobs_in_roi(blobs.blobs,
                                                               offset,
                                                               size,
                                                               reverse=False)
                    detector.Blobs.shift_blob_rel_coords(
                        blobs.blobs, np.multiply(offset, -1))
            else:
                # load full image blobs
                config.blobs = blobs.load_blobs(
                    img_to_blobs_path(filename_base))
        except (FileNotFoundError, KeyError) as e2:
            print("Unable to load blobs file")
            if proc_type in (config.ProcessTypes.LOAD,
                             config.ProcessTypes.EXPORT_BLOBS):
                # blobs expected but not found
                raise e2

    if path and config.image5d is None and not atlas_suffix:
        # load or import the main image stack
        print("Loading main image")
        try:
            path_lower = path.lower()
            import_only = proc_type is config.ProcessTypes.IMPORT_ONLY
            if path_lower.endswith(sitk_io.EXTS_3D):
                # load format supported by SimpleITK and prepend time axis;
                # if 2D, convert to 3D
                img5d = sitk_io.read_sitk_files(path, make_3d=True)
            elif not import_only and path_lower.endswith((".tif", ".tiff")):
                # load TIF file directly
                img5d, meta = read_tif(path)
                config.resolutions = meta[config.MetaKeys.RESOLUTIONS]
            else:
                # load or import from MagellanMapper Numpy format
                img5d = None
                if not import_only:
                    # load previously imported image
                    img5d = importer.read_file(path, series)
                if allow_import and (img5d is None or img5d.img is None):
                    # import image; will re-import over any existing image file
                    if os.path.isdir(path) and all(
                        [r is None for r in config.reg_suffixes.values()]):
                        # import directory of single plane images to single
                        # stack if no register suffixes are set
                        chls, import_md = importer.setup_import_dir(path)
                        add_metadata()
                        prefix = config.prefix
                        if not prefix:
                            prefix = os.path.join(
                                os.path.dirname(path),
                                importer.DEFAULT_IMG_STACK_NAME)
                        img5d = importer.import_planes_to_stack(
                            chls, prefix, import_md)
                    elif import_only:
                        # import multi-plane image
                        chls, import_path = importer.setup_import_multipage(
                            path)
                        prefix = config.prefix if config.prefix else import_path
                        import_md = importer.setup_import_metadata(
                            chls, config.channel, series)
                        add_metadata()
                        img5d = importer.import_multiplane_images(
                            chls,
                            prefix,
                            import_md,
                            series,
                            channel=config.channel)
            if img5d is not None:
                # set loaded main image in config
                config.img5d = img5d
                config.image5d = config.img5d.img
        except FileNotFoundError as e:
            _logger.exception(e)
            _logger.info("Could not load %s", path)

    if config.metadatas and config.metadatas[0]:
        # assign metadata from alternate file if given to supersede settings
        # for any loaded image5d
        # TODO: access metadata directly from given image5d's dict to allow
        # loading multiple image5d images simultaneously
        importer.assign_metadata(config.metadatas[0])

    # main image is currently required since many parameters depend on it
    if fallback_main_img and atlas_suffix is None and config.image5d is None:
        # fallback to atlas if main image not already loaded
        atlas_suffix = config.RegNames.IMG_ATLAS.value
        _logger.info(
            "Main image is not set, falling back to registered image with "
            "suffix %s", atlas_suffix)
    # use prefix to get images registered to a different image, eg a
    # downsampled version, or a different version of registered images
    path = config.prefix if config.prefix else path
    if path and atlas_suffix is not None:
        try:
            # will take the place of any previously loaded image5d
            config.img5d = sitk_io.read_sitk_files(path,
                                                   atlas_suffix,
                                                   make_3d=True)
            config.image5d = config.img5d.img
        except FileNotFoundError as e:
            print(e)

    # load metadata related to the labels image
    config.labels_metadata = labels_meta.LabelsMeta(
        f"{path}." if config.prefix else path).load()

    # load labels reference file, prioritizing path given by user
    # and falling back to any extension matching PATH_LABELS_REF
    path_labels_refs = [config.load_labels]
    labels_path_ref = config.labels_metadata.path_ref
    if labels_path_ref:
        path_labels_refs.append(labels_path_ref)
    labels_ref = None
    for ref in path_labels_refs:
        if not ref: continue
        try:
            # load labels reference file
            labels_ref = ontology.LabelsRef(ref).load()
            if labels_ref.ref_lookup is not None:
                config.labels_ref = labels_ref
                _logger.debug("Loaded labels reference file from %s", ref)
                break
        except (FileNotFoundError, KeyError):
            pass
    if path_labels_refs and (labels_ref is None
                             or labels_ref.ref_lookup is None):
        # warn if labels path given but none found
        _logger.warn(
            "Unable to load labels reference file from '%s', skipping",
            path_labels_refs)

    if annotation_suffix is not None:
        try:
            # load labels image
            # TODO: need to support multichannel labels images
            img5d, config.labels_img_sitk = sitk_io.read_sitk_files(
                path, annotation_suffix, True, True)
            config.labels_img = img5d.img[0]
        except FileNotFoundError as e:
            print(e)
            if config.image5d is not None:
                # create a blank labels images for custom annotation; colormap
                # can be generated for the original labels loaded below
                config.labels_img = np.zeros(config.image5d.shape[1:4],
                                             dtype=int)
                print("Created blank labels image from main image")
        if config.image5d is not None and config.labels_img is not None:
            # set up scaling factors by dimension between intensity and
            # labels images
            config.labels_scaling = importer.calc_scaling(
                config.image5d, config.labels_img)

    if borders_suffix is not None:
        # load borders image, which can also be another labels image
        try:
            config.borders_img = sitk_io.read_sitk_files(path,
                                                         borders_suffix,
                                                         make_3d=True).img[0]
        except FileNotFoundError as e:
            print(e)

    if config.atlas_labels[config.AtlasLabels.ORIG_COLORS]:
        labels_orig_ids = config.labels_metadata.region_ids_orig
        if labels_orig_ids is None:
            if config.load_labels is not None:
                # load original labels image from same directory as ontology
                # file for consistent ID-color mapping, even if labels are missing
                try:
                    config.labels_img_orig = sitk_io.load_registered_img(
                        config.load_labels, config.RegNames.IMG_LABELS.value)
                except FileNotFoundError as e:
                    print(e)
            if config.labels_img is not None and config.labels_img_orig is None:
                _logger.warn(
                    "Could not load original labels image IDs; colors may "
                    "differ from the original image")

    load_rot90 = config.roi_profile["load_rot90"]
    if load_rot90 and config.image5d is not None:
        # rotate main image specified num of times x90deg after loading since
        # need to rotate images output by deep learning toolkit
        config.image5d = np.rot90(config.image5d, load_rot90, (2, 3))

    if (config.image5d is not None and load_subimage
            and not config.image5d_is_roi):
        # crop full image to bounds of sub-image
        config.image5d = plot_3d.prepare_subimg(config.image5d, offset,
                                                size)[None]
        config.image5d_is_roi = True

    # add any additional image5d thresholds for multichannel images, such
    # as those loaded without metadata for these settings
    colormaps.setup_cmaps()
    num_channels = get_num_channels(config.image5d)
    config.near_max = libmag.pad_seq(config.near_max, num_channels, -1)
    config.near_min = libmag.pad_seq(config.near_min, num_channels, 0)
    config.vmax_overview = libmag.pad_seq(config.vmax_overview, num_channels)
    colormaps.setup_colormaps(num_channels)

    if config.labels_img is not None:
        # make discrete colormap for labels image
        config.cmap_labels = colormaps.setup_labels_cmap(config.labels_img)

    if (blobs is not None and blobs.blobs is not None
            and config.img5d.img is not None and blobs.roi_size is not None):
        # scale blob coordinates to main image if shapes differ
        scaling = np.divide(config.img5d.img.shape[1:4], blobs.roi_size)
        # scale radius by mean of other dimensions' scaling
        scaling = np.append(scaling, np.mean(scaling))
        if not np.all(scaling == 1):
            _logger.debug("Scaling blobs to main image by factor: %s", scaling)
            blobs.blobs[:, :4] = ontology.scale_coords(blobs.blobs[:, :4],
                                                       scaling)
        blobs.scaling = scaling
コード例 #6
0
ファイル: np_io.py プロジェクト: kaparna126/magellanmapper
def setup_images(path=None, series=None, offset=None, size=None,
                 proc_mode=None, allow_import=True):
    """Sets up an image and all associated images and metadata.

    Paths for related files such as registered images will generally be
    constructed from ``path``. If :attr:`config.prefix` is set, it will
    be used in place of ``path`` for registered labels.
    
    Args:
        path (str): Path to image from which MagellanMapper-style paths will 
            be generated.
        series (int): Image series number; defaults to None.
        offset (List[int]): Sub-image offset given in z,y,x; defaults to None.
        size (List[int]): Sub-image shape given in z,y,x; defaults to None.
        proc_mode (str): Processing mode, which should be a key in 
            :class:`config.ProcessTypes`, case-insensitive; defaults to None.
        allow_import (bool): True to allow importing the image if it
            cannot be loaded; defaults to True.
    
    """
    def add_metadata():
        # override metadata set from command-line metadata args if available
        md = {
            config.MetaKeys.RESOLUTIONS: config.meta_dict[
                config.MetaKeys.RESOLUTIONS],
            config.MetaKeys.MAGNIFICATION: config.meta_dict[
                config.MetaKeys.MAGNIFICATION],
            config.MetaKeys.ZOOM: config.meta_dict[config.MetaKeys.ZOOM],
            config.MetaKeys.SHAPE: config.meta_dict[config.MetaKeys.SHAPE],
            config.MetaKeys.DTYPE: config.meta_dict[config.MetaKeys.DTYPE],
        }
        for key, val in md.items():
            if val is not None:
                # explicitly set metadata takes precedence over extracted vals
                import_md[key] = val
    
    # LOAD MAIN IMAGE
    
    # reset image5d
    config.image5d = None
    config.image5d_is_roi = False
    config.img5d = Image5d()
    load_subimage = offset is not None and size is not None
    config.resolutions = None
    
    # reset label images
    config.labels_img = None
    config.labels_img_sitk = None
    config.borders_img = None
    
    # reset blobs
    config.blobs = None

    filename_base = importer.filename_to_base(path, series)
    subimg_base = None
    blobs = None

    if load_subimage and not config.save_subimg:
        # load a saved sub-image file if available and not set to save one
        subimg_base = naming.make_subimage_name(
            filename_base, offset, size)
        filename_subimg = libmag.combine_paths(
            subimg_base, config.SUFFIX_SUBIMG)

        try:
            # load sub-image if available
            config.image5d = np.load(filename_subimg, mmap_mode="r")
            config.image5d = importer.roi_to_image5d(config.image5d)
            config.image5d_is_roi = True
            config.img5d.img = config.image5d
            config.img5d.path_img = filename_subimg
            config.img5d.img_io = config.LoadIO.NP
            config.img5d.subimg_offset = offset
            config.img5d.subimg_size = size
            print("Loaded sub-image from {} with shape {}"
                  .format(filename_subimg, config.image5d.shape))

            # after loading sub-image, load original image's metadata
            # for essential data such as vmin/vmax; will only warn if
            # fails to load since metadata could be specified elsewhere
            _, orig_info = importer.make_filenames(path, series)
            print("load original image metadata from:", orig_info)
            importer.load_metadata(orig_info)
        except IOError:
            print("Ignored sub-image file from {} as unable to load"
                  .format(filename_subimg))

    proc_type = libmag.get_enum(proc_mode, config.ProcessTypes)
    if config.load_data[config.LoadData.BLOBS] or proc_type in (
            config.ProcessTypes.LOAD,
            config.ProcessTypes.COLOC_MATCH,
            config.ProcessTypes.EXPORT_ROIS,
            config.ProcessTypes.EXPORT_BLOBS,
            config.ProcessTypes.DETECT):
        # load a blobs archive
        blobs = detector.Blobs()
        try:
            if subimg_base:
                try:
                    # load blobs generated from sub-image
                    config.blobs = blobs.load_blobs(
                        img_to_blobs_path(subimg_base))
                except (FileNotFoundError, KeyError):
                    # fallback to loading from full image blobs and getting
                    # a subset, shifting them relative to sub-image offset
                    print("Unable to load blobs file based on {}, will try "
                          "from {}".format(subimg_base, filename_base))
                    config.blobs = blobs.load_blobs(
                        img_to_blobs_path(filename_base))
                    blobs.blobs, _ = detector.get_blobs_in_roi(
                        blobs.blobs, offset, size, reverse=False)
                    detector.shift_blob_rel_coords(
                        blobs.blobs, np.multiply(offset, -1))
            else:
                # load full image blobs
                config.blobs = blobs.load_blobs(
                    img_to_blobs_path(filename_base))
        except (FileNotFoundError, KeyError) as e2:
            print("Unable to load blobs file")
            if proc_type in (
                    config.ProcessTypes.LOAD, config.ProcessTypes.EXPORT_BLOBS):
                # blobs expected but not found
                raise e2
    
    if path and config.image5d is None:
        # load or import the main image stack
        print("Loading main image")
        try:
            if path.endswith(sitk_io.EXTS_3D):
                # attempt to format supported by SimpleITK and prepend time axis
                config.image5d = sitk_io.read_sitk_files(path)[None]
                config.img5d.img = config.image5d
                config.img5d.path_img = path
                config.img5d.img_io = config.LoadIO.SITK
            else:
                # load or import from MagellanMapper Numpy format
                import_only = proc_type is config.ProcessTypes.IMPORT_ONLY
                img5d = None
                if not import_only:
                    # load previously imported image
                    img5d = importer.read_file(path, series)
                if allow_import:
                    # re-import over existing image or import new image
                    if os.path.isdir(path) and all(
                            [r is None for r in config.reg_suffixes.values()]):
                        # import directory of single plane images to single
                        # stack if no register suffixes are set
                        chls, import_md = importer.setup_import_dir(path)
                        add_metadata()
                        prefix = config.prefix
                        if not prefix:
                            prefix = os.path.join(
                                os.path.dirname(path),
                                importer.DEFAULT_IMG_STACK_NAME)
                        img5d = importer.import_planes_to_stack(
                            chls, prefix, import_md)
                    elif import_only or img5d is None:
                        # import multi-plane image
                        chls, import_path = importer.setup_import_multipage(
                            path)
                        prefix = config.prefix if config.prefix else import_path
                        import_md = importer.setup_import_metadata(
                            chls, config.channel, series)
                        add_metadata()
                        img5d = importer.import_multiplane_images(
                            chls, prefix, import_md, series,
                            channel=config.channel)
                if img5d is not None:
                    # set loaded main image in config
                    config.img5d = img5d
                    config.image5d = config.img5d.img
        except FileNotFoundError as e:
            print(e)
            print("Could not load {}, will fall back to any associated "
                  "registered image".format(path))
    
    if config.metadatas and config.metadatas[0]:
        # assign metadata from alternate file if given to supersede settings
        # for any loaded image5d
        # TODO: access metadata directly from given image5d's dict to allow
        # loading multiple image5d images simultaneously
        importer.assign_metadata(config.metadatas[0])
    
    # main image is currently required since many parameters depend on it
    atlas_suffix = config.reg_suffixes[config.RegSuffixes.ATLAS]
    if atlas_suffix is None and config.image5d is None:
        # fallback to atlas if main image not already loaded
        atlas_suffix = config.RegNames.IMG_ATLAS.value
        print("main image is not set, falling back to registered "
              "image with suffix", atlas_suffix)
    # use prefix to get images registered to a different image, eg a
    # downsampled version, or a different version of registered images
    path = config.prefix if config.prefix else path
    if path and atlas_suffix is not None:
        try:
            # will take the place of any previously loaded image5d
            config.image5d = sitk_io.read_sitk_files(
                path, reg_names=atlas_suffix)[None]
            config.img5d.img = config.image5d
            config.img5d.img_io = config.LoadIO.SITK
        except FileNotFoundError as e:
            print(e)
    
    annotation_suffix = config.reg_suffixes[config.RegSuffixes.ANNOTATION]
    if annotation_suffix is not None:
        try:
            # load labels image
            # TODO: need to support multichannel labels images
            config.labels_img, config.labels_img_sitk = sitk_io.read_sitk_files(
                path, reg_names=annotation_suffix, return_sitk=True)
        except FileNotFoundError as e:
            print(e)
            if config.image5d is not None:
                # create a blank labels images for custom annotation; colormap
                # can be generated for the original labels loaded below
                config.labels_img = np.zeros(
                    config.image5d.shape[1:4], dtype=int)
                print("Created blank labels image from main image")
        if config.image5d is not None and config.labels_img is not None:
            # set up scaling factors by dimension between intensity and
            # labels images
            config.labels_scaling = importer.calc_scaling(
                config.image5d, config.labels_img)
        try:
            if config.load_labels is not None:
                # load labels reference file
                labels_ref = ontology.load_labels_ref(config.load_labels)
                if isinstance(labels_ref, pd.DataFrame):
                    # parse CSV files loaded into data frame
                    config.labels_ref_lookup = ontology.create_lookup_pd(
                        labels_ref)
                else:
                    # parse dict from ABA JSON file
                    config.labels_ref_lookup = (
                        ontology.create_aba_reverse_lookup(labels_ref))
        except FileNotFoundError as e:
            print(e)
    
    borders_suffix = config.reg_suffixes[config.RegSuffixes.BORDERS]
    if borders_suffix is not None:
        # load borders image, which can also be another labels image
        try:
            config.borders_img = sitk_io.read_sitk_files(
                path, reg_names=borders_suffix)
        except FileNotFoundError as e:
            print(e)
    
    if (config.atlas_labels[config.AtlasLabels.ORIG_COLORS]
            and config.load_labels is not None):
        # load original labels image from same directory as ontology
        # file for consistent ID-color mapping, even if labels are missing
        try:
            config.labels_img_orig = sitk_io.load_registered_img(
                config.load_labels, config.RegNames.IMG_LABELS.value)
        except FileNotFoundError as e:
            print(e)
            libmag.warn(
                "could not load original labels image; colors may differ"
                "differ from it")
    
    load_rot90 = config.roi_profile["load_rot90"]
    if load_rot90 and config.image5d is not None:
        # rotate main image specified num of times x90deg after loading since 
        # need to rotate images output by deep learning toolkit
        config.image5d = np.rot90(config.image5d, load_rot90, (2, 3))

    if (config.image5d is not None and load_subimage
            and not config.image5d_is_roi):
        # crop full image to bounds of sub-image
        config.image5d = plot_3d.prepare_subimg(
            config.image5d, offset, size)[None]
        config.image5d_is_roi = True

    # add any additional image5d thresholds for multichannel images, such 
    # as those loaded without metadata for these settings
    colormaps.setup_cmaps()
    num_channels = get_num_channels(config.image5d)
    config.near_max = libmag.pad_seq(config.near_max, num_channels, -1)
    config.near_min = libmag.pad_seq(config.near_min, num_channels, 0)
    config.vmax_overview = libmag.pad_seq(
        config.vmax_overview, num_channels)
    colormaps.setup_colormaps(num_channels)
    
    if config.labels_img is not None:
        # make discrete colormap for labels image
        config.cmap_labels = colormaps.setup_labels_cmap(config.labels_img)
    
    if (blobs is not None and blobs.blobs is not None
            and config.img5d.img is not None):
        # scale blob coordinates to main image if shapes differ
        scaling = np.divide(config.img5d.img.shape[1:4], blobs.roi_size)
        if not np.all(scaling == 1):
            print("Scaling blobs to main image by factor:", scaling)
            blobs.blobs[:, :3] = ontology.scale_coords(
                blobs.blobs[:, :3], scaling)
コード例 #7
0
def make_density_image(img_path, scale=None, shape=None, suffix=None, 
                       labels_img_sitk=None, channel=None, matches=None):
    """Make a density image based on associated blobs.
    
    Uses the shape of the registered labels image by default to set 
    the voxel sizes for the blobs.
    
    If ``matches`` is given, a heat map will be generated for each set
    of channels given in the dictionary. Otherwise, if the loaded blobs
    file has intensity-based colocalizations, a heat map will be generated
    for each combination of channels.
    
    Args:
        img_path: Path to image, which will be used to indentify the blobs file.
        scale: Rescaling factor as a scalar value to find the corresponding 
            full-sized image. Defaults to None to use the register 
            setting ``target_size`` instead if available, falling back 
            to load the full size image to find its shape if necessary.
        shape: Final shape size; defaults to None to use the shape of 
            the labels image.
        suffix: Modifier to append to end of ``img_path`` basename for 
            registered image files that were output to a modified name; 
            defaults to None.
        labels_img_sitk: Labels image as a SimpleITK ``Image`` object; 
            defaults to None, in which case the registered labels image file 
            corresponding to ``img_path`` with any ``suffix`` modifier 
            will be opened.
        channel (List[int]): Sequence of channels to include in density image;
            defaults to None to combine blobs from all channels.
        matches (dict[tuple[int, int], :class:`magmap.cv.colocalizer`):
            Dictionary of channel combinations to blob matches; defaults to
            None.
    
    Returns:
        :obj:`np.ndarray`, str: The density image as a Numpy array in the
        same shape as the opened image and the original and ``img_path``
        to track such as for multiprocessing.
    """
    def make_heat_map():
        # build heat map to store densities per label px and save to file
        coord_scaled = ontology.scale_coords(
            blobs_chl[:, :3], scaling, labels_img.shape)
        print("coords", coord_scaled)
        return cv_nd.build_heat_map(labels_img.shape, coord_scaled)
    
    # set up paths and get labels image
    mod_path = img_path
    if suffix is not None:
        mod_path = libmag.insert_before_ext(img_path, suffix)
    if labels_img_sitk is None:
        labels_img_sitk = sitk_io.load_registered_img(
            mod_path, config.RegNames.IMG_LABELS.value, get_sitk=True)
    labels_img = sitk.GetArrayFromImage(labels_img_sitk)
    
    # load blobs
    blobs = detector.Blobs().load_blobs(np_io.img_to_blobs_path(img_path))
    scaling = np_io.find_scaling(img_path, labels_img.shape, scale)[0]
    if shape is not None:
        # scale blob coordinates and heat map to an alternative final shape
        scaling = np.divide(shape, np.divide(labels_img.shape, scaling))
        labels_spacing = np.multiply(
            labels_img_sitk.GetSpacing()[::-1], 
            np.divide(labels_img.shape, shape))
        labels_img = np.zeros(shape, dtype=labels_img.dtype)
        labels_img_sitk.SetSpacing(labels_spacing[::-1])
    print("using scaling: {}".format(scaling))
    
    # annotate blobs based on position
    blobs_chl = blobs.blobs
    if channel is not None:
        blobs_chl = blobs_chl[np.isin(detector.get_blobs_channel(
            blobs_chl), channel)]
    heat_map = make_heat_map()
    print("heat map", heat_map.shape, heat_map.dtype, labels_img.shape)
    imgs_write = {
        config.RegNames.IMG_HEAT_MAP.value:
            sitk_io.replace_sitk_with_numpy(labels_img_sitk, heat_map)}
    
    heat_colocs = None
    if matches:
        # create heat maps for match-based colocalization combos
        heat_colocs = []
        for chl_combo, chl_matches in matches.items():
            print("Generating match-based colocalization heat map "
                  "for channel combo:", chl_combo)
            # use blobs in first channel of each channel pair for simplicity
            blobs_chl = chl_matches.get_blobs(1)
            heat_colocs.append(make_heat_map())
    
    elif blobs.colocalizations is not None:
        # create heat map for each intensity-based colocalization combo
        # as a separate channel in output image
        blob_chls = range(blobs.colocalizations.shape[1])
        blob_chls_len = len(blob_chls)
        if blob_chls_len > 1:
            # get all channel combos that include given channels
            combos = []
            chls = blob_chls if channel is None else channel
            for r in range(2, blob_chls_len + 1):
                combos.extend(
                    [tuple(c) for c in itertools.combinations(blob_chls, r)
                     if all([h in c for h in chls])])
            
            heat_colocs = []
            for combo in combos:
                print("Generating intensity-based colocalization heat map "
                      "for channel combo:", combo)
                blobs_chl = blobs.blobs[np.all(np.equal(
                    blobs.colocalizations[:, combo], 1), axis=1)]
                heat_colocs.append(make_heat_map())
    
    if heat_colocs is not None:
        # combine heat maps into single image
        heat_colocs = np.stack(heat_colocs, axis=3)
        imgs_write[config.RegNames.IMG_HEAT_COLOC.value] = \
            sitk_io.replace_sitk_with_numpy(
                labels_img_sitk, heat_colocs)
    
    # write images to file
    sitk_io.write_reg_images(imgs_write, mod_path)
    return heat_map, img_path