コード例 #1
0
 def test_insert_before_ext(self):
     self.assertEqual(
         libmag.insert_before_ext("foo/bar/item.py", "totest", "_"),
         "foo/bar/item_totest.py")
     self.assertEqual(libmag.insert_before_ext("foo/bar/item.py", "totest"),
                      "foo/bar/itemtotest.py")
     self.assertEqual(
         libmag.insert_before_ext("foo/bar/item", "totest", "_"),
         "foo/bar/item_totest")
コード例 #2
0
ファイル: sitk_io.py プロジェクト: clifduhn/magellanmapper
def merge_images(img_paths,
                 reg_name,
                 prefix=None,
                 suffix=None,
                 fn_combine=np.sum):
    """Merge images from multiple paths.
    
    Assumes that the images are relatively similar in size, but will resize 
    them to the size of the first image to combine the images.
    
    Args:
        img_paths: Paths from which registered paths will be found.
        reg_name: Registration suffix to load for the given paths 
            in ``img_paths``.
        prefix: Start of output path; defaults to None to use the first 
           path in ``img_paths`` instead.
        suffix: Portion of path to be combined with each path 
            in ``img_paths`` and output path; defaults to None.
        fn_combine: Function to apply to combine images with ``axis=0``. 
            Defaults to :func:``np.sum``. If None, each image will be 
            inserted as a separate channel.
    
    Returns:
        The combined image in SimpleITK format.
    """
    if len(img_paths) < 1: return None

    img_sitk = None
    img_nps = []
    for img_path in img_paths:
        mod_path = img_path
        if suffix is not None:
            # adjust image path with suffix
            mod_path = libmag.insert_before_ext(mod_path, suffix)
        print("loading", mod_path)
        # load and resize images to shape of first loaded image
        img, _ = _load_reg_img_to_combine(mod_path, reg_name, img_nps)
        if img_sitk is None: img_sitk = img

    # combine images and write single combo image
    if fn_combine is None:
        # combine raw images into separate channels
        img_combo = np.stack(img_nps, axis=img_nps[0].ndim)
    else:
        # merge by custom function
        img_combo = fn_combine(img_nps, axis=0)
    combined_sitk = replace_sitk_with_numpy(img_sitk, img_combo)
    # fallback to using first image's name as base
    output_base = img_paths[0] if prefix is None else prefix
    if suffix is not None:
        output_base = libmag.insert_before_ext(output_base, suffix)
    output_reg = libmag.combine_paths(reg_name, config.RegNames.COMBINED.value)
    write_reg_images({output_reg: combined_sitk}, output_base)
    return combined_sitk
コード例 #3
0
def make_labels_diff_img(img_path, df_path, meas, fn_avg, prefix=None, 
                         show=False, level=None, meas_path_name=None, 
                         col_wt=None):
    """Replace labels in an image with the differences in metrics for 
    each given region between two conditions.
    
    Args:
        img_path: Path to the base image from which the corresponding 
            registered image will be found.
        df_path: Path to data frame with metrics for the labels.
        meas: Name of colum in data frame with the chosen measurement.
        fn_avg: Function to apply to the set of measurements, such as a mean. 
            Can be None if ``df_path`` points to a stats file from which 
            to extract metrics directly in :meth:``vols.map_meas_to_labels``.
        prefix: Start of path for output image; defaults to None to 
            use ``img_path`` instead.
        show: True to show the images after generating them; defaults to False.
        level: Ontological level at which to look up and show labels. 
            Assume that labels level image corresponding to this value 
            has already been generated by :meth:``make_labels_level_img``. 
            Defaults to None to use only drawn labels.
        meas_path_name: Name to use in place of `meas` in output path; 
            defaults to None.
        col_wt (str): Name of column to use for weighting; defaults to None.
    """
    # load labels image and data frame before generating map for the 
    # given metric of the chosen measurement
    print("Generating labels difference image for", meas, "from", df_path)
    reg_name = (config.RegNames.IMG_LABELS.value if level is None 
                else config.RegNames.IMG_LABELS_LEVEL.value.format(level))
    labels_sitk = sitk_io.load_registered_img(img_path, reg_name, get_sitk=True)
    labels_np = sitk.GetArrayFromImage(labels_sitk)
    df = pd.read_csv(df_path)
    labels_diff = vols.map_meas_to_labels(
        labels_np, df, meas, fn_avg, reverse=True, col_wt=col_wt)
    if labels_diff is None: return
    labels_diff_sitk = sitk_io.replace_sitk_with_numpy(labels_sitk, labels_diff)
    
    # save and show labels difference image using measurement name in 
    # output path or overriding with custom name
    meas_path = meas if meas_path_name is None else meas_path_name
    reg_diff = libmag.insert_before_ext(
        config.RegNames.IMG_LABELS_DIFF.value, meas_path, "_")
    if fn_avg is not None:
        # add function name to output path if given
        reg_diff = libmag.insert_before_ext(
            reg_diff, fn_avg.__name__, "_")
    imgs_write = {reg_diff: labels_diff_sitk}
    out_path = prefix if prefix else img_path
    sitk_io.write_reg_images(imgs_write, out_path)
    if show:
        for img in imgs_write.values():
            if img: sitk.Show(img)
コード例 #4
0
def meas_plot_zscores(path,
                      metric_cols,
                      extra_cols,
                      composites,
                      size=None,
                      show=True):
    """Measure and plot z-scores for given columns in a data frame.
    
    Args:
        path (str): Path to data frame.
        metric_cols (List[str]): Sequence of column names for which to 
            compute z-scores.
        extra_cols (List[str]): Additional columns to included in the 
            output data frame.
        composites (List[Enum]): Sequence of enums specifying the 
            combination, typically from :class:`vols.MetricCombos`.
        size (List[int]): Sequence of ``width, height`` to size the figure; 
            defaults to None.
        show (bool): True to display the image; defaults to True.

    """
    # generate z-scores
    df = pd.read_csv(path)
    df = df_io.zscore_df(df, "Region", metric_cols, extra_cols, True)

    # generate composite score column
    df_comb = df_io.combine_cols(df, composites)
    df_io.data_frames_to_csv(
        df_comb, libmag.insert_before_ext(config.filename, "_zhomogeneity"))

    # shift metrics from each condition to separate columns
    conds = np.unique(df["Condition"])
    df = df_io.cond_to_cols_df(df, ["Sample", "Region"], "Condition",
                               "original", metric_cols)
    path = libmag.insert_before_ext(config.filename, "_zscore")
    df_io.data_frames_to_csv(df, path)

    # display as probability plot
    lims = (-3, 3)
    plot_2d.plot_probability(path,
                             conds,
                             metric_cols,
                             "Volume",
                             xlim=lims,
                             ylim=lims,
                             title="Region Match Z-Scores",
                             fig_size=size,
                             show=show,
                             suffix=None,
                             df=df)
コード例 #5
0
def make_subimage_name(
        base: str, offset: Optional[Tuple[int, int, int]] = None,
        shape: Optional[Tuple[int, int, int]] = None,
        suffix: Optional[str] = None) -> str:
    """Make name of subimage for a given offset and shape.

    The order of ``offset`` and ``shape`` are assumed to be in ``z, y, x`` but
    will be reversed for the output name since the user-oriented ordering
    is ``x, y, z``.
    
    Args:
        base: Start of name, which can include full parent path.
        offset: Offset as a tuple; defaults to None to ignore sub-image.
        shape: Shape as a tuple; defaults to None to ignore sub-image.
        suffix: Suffix to append, replacing any existing extension
            in ``base``; defaults to None.
    
    Returns:
        Name (or path) to subimage.
    """
    name = base
    if offset is not None and shape is not None:
        # sub-image offset/shape stored as z,y,x, but file named as x,y,z
        roi_site = "{}x{}".format(offset[::-1], shape[::-1]).replace(" ", "")
        name = libmag.insert_before_ext(base, roi_site, "_")
    if suffix:
        name = libmag.combine_paths(name, suffix)
    print("subimage name: {}".format(name))
    return name
コード例 #6
0
def get_transposed_image_path(img_path: str,
                              scale: float = None,
                              target_size: Sequence[int] = None) -> str:
    """Get path modified for any transposition.
    
    Args:
        img_path: Unmodified image path.
        scale: Scaling factor, which takes precedence over ``target_size``;
            defaults to None.
        target_size: Target size in ``x, y, z``, typically given by an atlas
            profile; defaults to None.
    
    Returns:
        Modified path for the given transposition, or ``img_path`` unmodified 
        if all transposition factors are None.
    """
    img_path_modified = img_path
    if scale is not None or target_size is not None:
        # use scaled image for pixel comparison, retrieving
        # saved scaling as of v.0.6.0
        if scale is not None:
            # scale takes priority as command-line argument
            modifier = make_modifier_scale(scale)
            print("loading scaled file with {} modifier".format(modifier))
        else:
            # otherwise assume set target size
            modifier = make_modifier_resized(target_size)
            print("loading resized file with {} modifier".format(modifier))
        img_path_modified = libmag.insert_before_ext(img_path, "_" + modifier)
    return img_path_modified
コード例 #7
0
def get_transposed_image_path(img_path, scale=None, target_size=None):
    """Get path, modified for any transposition by :func:``transpose_npy`` 
    naming conventions.
    
    Args:
        img_path: Unmodified image path.
        scale: Scaling factor; defaults to None, which ignores scaling.
        target_size: Target size, typically given by a register profile; 
            defaults to None, which ignores target size.
    
    Returns:
        Modified path for the given transposition, or ``img_path`` unmodified 
        if all transposition factors are None.
    """
    img_path_modified = img_path
    if scale is not None or target_size is not None:
        # use scaled image for pixel comparison, retrieving
        # saved scaling as of v.0.6.0
        modifier = None
        if scale is not None:
            # scale takes priority as command-line argument
            modifier = make_modifier_scale(scale)
            print("loading scaled file with {} modifier".format(modifier))
        else:
            # otherwise assume set target size
            modifier = make_modifier_resized(target_size)
            print("loading resized file with {} modifier".format(modifier))
        img_path_modified = libmag.insert_before_ext(img_path, "_" + modifier)
    return img_path_modified
コード例 #8
0
def cluster_blobs(img_path, suffix=None):
    """Cluster blobs and save to Numpy archive.
    
    Args:
        img_path (str): Base path from which registered labels and blobs files
            will be found and output blobs file save location will be
            constructed.
        suffix (str): Suffix for ``path``; defaults to None.

    Returns:

    """
    mod_path = img_path
    if suffix is not None:
        mod_path = libmag.insert_before_ext(img_path, suffix)
    labels_img_np = sitk_io.load_registered_img(
        mod_path, config.RegNames.IMG_LABELS.value)
    blobs = detector.Blobs().load_blobs(np_io.img_to_blobs_path(img_path))
    scaling, res = np_io.find_scaling(img_path, labels_img_np.shape)
    if blobs is None:
        libmag.warn("unable to load nuclei coordinates")
        return

    # append label IDs to blobs and scale to make isotropic
    blobs_clus = ClusterByLabel.cluster_by_label(blobs.blobs[:, :3],
                                                 labels_img_np, scaling, res)
    print(blobs_clus)
    out_path = libmag.combine_paths(mod_path, config.SUFFIX_BLOB_CLUSTERS)
    np.save(out_path, blobs_clus)
コード例 #9
0
def meas_plot_coefvar(path, id_cols, cond_col, cond_base, metric_cols, 
                      composites, size_col=None, size=None, show=True):
    """Measure and plot coefficient of variation (CV) as a scatter plot.
    
    CV is computed two ways:
    
    - Based on columns and equation specified in ``composites``, applied 
      across all samples regardless of group
    - For each metric in ``metric_cols``, separated by groups
    
    Args:
        path (str): Path to data frame.
        id_cols (List[str]): Sequence of columns to serve as index/indices.
        cond_col (str): Name of the condition column.
        cond_base (str): Name of the condition to which all other conditions 
            will be normalized.
        metric_cols (List[str]): Sequence of column names for which to 
            compute z-scores.
        composites (List[Enum]): Sequence of enums specifying the 
            combination, typically from :class:`vols.MetricCombos`.
        size_col (str): Name of weighting column for coefficient of 
            variation measurement; defaults to None.
        size (List[int]): Sequence of ``width, height`` to size the figure; 
            defaults to None.
        show (bool): True to display the image; defaults to True.

    """
    # measure coefficient of variation per sample-region regardless of group
    df = pd.read_csv(path)
    df = df_io.combine_cols(df, composites)
    df_io.data_frames_to_csv(
        df, libmag.insert_before_ext(config.filename, "_coefvar"))
    
    # measure CV within each condition and shift metrics from each 
    # condition to separate columns
    df = df_io.coefvar_df(df, [*id_cols, cond_col], metric_cols, size_col)
    conds = np.unique(df[cond_col])
    df = df_io.cond_to_cols_df(df, id_cols, cond_col, cond_base, metric_cols)
    path = libmag.insert_before_ext(config.filename, "_coefvartransp")
    df_io.data_frames_to_csv(df, path)
    
    # display CV measured by condition as probability plot
    lims = (0, 0.7)
    plot_2d.plot_probability(
        path, conds, metric_cols, "Volume",
        xlim=lims, ylim=lims, title="Coefficient of Variation", 
        fig_size=size, show=show, suffix=None, df=df)
コード例 #10
0
def animate_imgs(base_path, plotted_imgs, delay, ext=None, suffix=None):
    """Export to an animated image.
    
    Defaults to an animated GIF unless ``ext`` specifies otherwise.
    Requires ``FFMpeg`` for MP4 file format exports and ``ImageMagick`` for
    all other types of exports.
    
    Args:
        base_path (str): String from which an output path will be constructed.
        plotted_imgs (List[:obj:`matplotlib.image.AxesImage]): Sequence of
            images to include in the animation.
        delay (int): Delay between image display in ms. If None, the delay will
            defaul to 100ms.
        ext (str): Extension to use when saving, without the period. Defaults
            to None, in which case "gif" will be used.
        suffix (str): String to append to output path before extension;
            defaults to None to ignore.

    """
    # set up animation output path and time interval
    if ext is None: ext = "gif"
    out_path = libmag.combine_paths(base_path, "animated", ext=ext)
    if suffix: out_path = libmag.insert_before_ext(out_path, suffix, "_")
    libmag.backup_file(out_path)
    if delay is None:
        delay = 100
    if plotted_imgs and len(plotted_imgs[0]) > 0:
        fig = plotted_imgs[0][0].figure
    else:
        libmag.warn("No images available to animate")
        return

    # WORKAROUND: FFMpeg may give a "height not divisible by 2" error, fixed
    # by padding with a pixel
    # TODO: check if needed for width
    # TODO: account for difference in FFMpeg height and fig height
    for fn, size in {
            # fig.set_figwidth: fig.get_figwidth(),
            fig.set_figheight:
            fig.get_figheight()
    }.items():
        if size * fig.dpi % 2 != 0:
            fn(size + 1. / fig.dpi)
            print("Padded size with", fn, fig.get_figwidth(), "to new size of",
                  fig.get_figheight())

    # generate and save animation
    anim = animation.ArtistAnimation(fig,
                                     plotted_imgs,
                                     interval=delay,
                                     repeat_delay=0,
                                     blit=False)
    try:
        writer = "ffmpeg" if ext == "mp4" else "imagemagick"
        anim.save(out_path, writer=writer)
        print("saved animation file to {}".format(out_path))
    except ValueError as e:
        print(e)
        libmag.warn("No animation writer available for Matplotlib")
コード例 #11
0
def animate_imgs(base_path, plotted_imgs, delay, ext=None, suffix=None):
    """Export to an animated image.
    
    Defaults to an animated GIF unless ``ext`` specifies otherwise.
    Requires ``FFMpeg`` for MP4 file format exports and ``ImageMagick`` for
    all other types of exports.
    
    Args:
        base_path (str): String from which an output path will be constructed.
        plotted_imgs (List[:obj:`matplotlib.image.AxesImage]): Sequence of
            images to include in the animation.
        delay (int): Delay between image display in ms. If None, the delay will
            defaul to 100ms.
        ext (str): Extension to use when saving, without the period. Defaults
            to None, in which case "gif" will be used.
        suffix (str): String to append to output path before extension;
            defaults to None to ignore.

    """
    if ext is None: ext = "gif"
    out_path = libmag.combine_paths(base_path, "animated", ext=ext)
    if suffix: out_path = libmag.insert_before_ext(out_path, suffix, "_")
    libmag.backup_file(out_path)
    if delay is None:
        delay = 100
    if plotted_imgs and len(plotted_imgs[0]) > 0:
        fig = plotted_imgs[0][0].figure
    else:
        libmag.warn("No images available to animate")
        return
    anim = animation.ArtistAnimation(fig,
                                     plotted_imgs,
                                     interval=delay,
                                     repeat_delay=0,
                                     blit=False)
    try:
        writer = "ffmpeg" if ext == "mp4" else "imagemagick"
        anim.save(out_path, writer=writer)
        print("saved animation file to {}".format(out_path))
    except ValueError as e:
        print(e)
        libmag.warn("No animation writer available for Matplotlib")
コード例 #12
0
ファイル: edge_seg.py プロジェクト: kaparna126/magellanmapper
def make_sub_segmented_labels(img_path, suffix=None):
    """Divide each label based on anatomical borders to create a 
    sub-segmented image.
    
    The segmented labels image will be loaded, or if not available, the 
    non-segmented labels will be loaded instead.
    
    Args:
        img_path: Path to main image from which registered images will 
            be loaded.
        suffix: Modifier to append to end of ``img_path`` basename for 
            registered image files that were output to a modified name; 
            defaults to None.
    
    Returns:
        Sub-segmented image as a Numpy array of the same shape as 
        the image at ``img_path``.
    """
    # adjust image path with suffix
    mod_path = img_path
    if suffix is not None:
        mod_path = libmag.insert_before_ext(mod_path, suffix)
    
    # load labels
    labels_sitk = sitk_io.load_registered_img(
        mod_path, config.RegNames.IMG_LABELS.value, get_sitk=True)
    
    # atlas edge image is associated with original, not modified image
    atlas_edge = sitk_io.load_registered_img(
        img_path, config.RegNames.IMG_ATLAS_EDGE.value)
    
    # sub-divide the labels and save to file
    labels_img_np = sitk.GetArrayFromImage(labels_sitk)
    labels_subseg = segmenter.sub_segment_labels(labels_img_np, atlas_edge)
    labels_subseg_sitk = sitk_io.replace_sitk_with_numpy(
        labels_sitk, labels_subseg)
    sitk_io.write_reg_images(
        {config.RegNames.IMG_LABELS_SUBSEG.value: labels_subseg_sitk}, mod_path)
    return labels_subseg
コード例 #13
0
def make_subimage_name(base, offset, shape, suffix=None):
    """Make name of subimage for a given offset and shape.

    The order of ``offset`` and ``shape`` are assumed to be in z,y,x but
    will be reversed for the output name since the user-oriented ordering
    is x,y,z.
    
    Args:
        base (str): Start of name, which can include full parent path.
        offset (Tuple[int]): Offset, generally given as a tuple.
        shape (Tuple[int]): Shape, generally given as a tuple.
        suffix (str): Suffix to append, replacing any existing extension
            in ``base``; defaults to None.
    
    Returns:
        str: Name (or path) to subimage.
    """
    # sub-image offset/shape stored as z,y,x, but file named as x,y,z
    roi_site = "{}x{}".format(offset[::-1], shape[::-1]).replace(" ", "")
    name = libmag.insert_before_ext(base, roi_site, "_")
    if suffix:
        name = libmag.combine_paths(name, suffix)
    print("subimage name: {}".format(name))
    return name
コード例 #14
0
ファイル: cli.py プロジェクト: sanderslab/magellanmapper
def process_file(
    path: str,
    proc_type: Enum,
    proc_val: Optional[Any] = None,
    series: Optional[int] = None,
    subimg_offset: Optional[List[int]] = None,
    subimg_size: Optional[List[int]] = None,
    roi_offset: Optional[List[int]] = None,
    roi_size: Optional[List[int]] = None
) -> Tuple[Optional[Any], Optional[str]]:
    """Processes a single image file non-interactively.

    Assumes that the image has already been set up.
    
    Args:
        path: Path to image from which MagellanMapper-style paths will 
            be generated.
        proc_type: Processing type, which should be a one of
            :class:`config.ProcessTypes`.
        proc_val: Processing value associated with ``proc_type``; defaults to
            None.
        series: Image series number; defaults to None.
        subimg_offset: Sub-image offset as (z,y,x) to load; defaults to None.
        subimg_size: Sub-image size as (z,y,x) to load; defaults to None.
        roi_offset: Region of interest offset as (x, y, z) to process;
            defaults to None.
        roi_size: Region of interest size of region to process, given as
            ``(x, y, z)``; defaults to None.
    
    Returns:
        Tuple of stats from processing, or None if no stats, and 
        text feedback from the processing, or None if no feedback.
    """
    # PROCESS BY TYPE
    stats = None
    fdbk = None
    filename_base = importer.filename_to_base(path, series)

    print("{}\n".format("-" * 80))
    if proc_type is config.ProcessTypes.LOAD:
        # loading completed
        return None, None

    elif proc_type is config.ProcessTypes.LOAD:
        # already imported so does nothing
        print("imported {}, will exit".format(path))

    elif proc_type is config.ProcessTypes.EXPORT_ROIS:
        # export ROIs; assumes that info_proc was already loaded to
        # give smaller region from which smaller ROIs from the truth DB
        # will be extracted
        from magmap.io import export_rois
        db = config.db if config.truth_db is None else config.truth_db
        export_path = naming.make_subimage_name(filename_base, subimg_offset,
                                                subimg_size)
        export_rois.export_rois(db, config.image5d, config.channel,
                                export_path,
                                config.plot_labels[config.PlotLabels.PADDING],
                                config.unit_factor, config.truth_db_mode,
                                os.path.basename(export_path))

    elif proc_type is config.ProcessTypes.TRANSFORM:
        # transpose, rescale, and/or resize whole large image
        transformer.transpose_img(
            path,
            series,
            plane=config.plane,
            rescale=config.transform[config.Transforms.RESCALE],
            target_size=config.roi_size)

    elif proc_type in (config.ProcessTypes.EXTRACT,
                       config.ProcessTypes.ANIMATED):
        # generate animated GIF or extract single plane
        export_stack.stack_to_img(config.filenames, roi_offset, roi_size,
                                  series, subimg_offset, subimg_size,
                                  proc_type is config.ProcessTypes.ANIMATED,
                                  config.suffix)

    elif proc_type is config.ProcessTypes.EXPORT_BLOBS:
        # export blobs to CSV file
        from magmap.io import export_rois
        export_rois.blobs_to_csv(config.blobs.blobs, filename_base)

    elif proc_type in (config.ProcessTypes.DETECT,
                       config.ProcessTypes.DETECT_COLOC):
        # detect blobs in the full image, +/- co-localization
        coloc = proc_type is config.ProcessTypes.DETECT_COLOC
        stats, fdbk, _ = stack_detect.detect_blobs_stack(
            filename_base, subimg_offset, subimg_size, coloc)

    elif proc_type is config.ProcessTypes.COLOC_MATCH:
        if config.blobs is not None and config.blobs.blobs is not None:
            # colocalize blobs in separate channels by matching blobs
            shape = subimg_size
            if shape is None:
                # get shape from loaded image, falling back to its metadata
                if config.image5d is not None:
                    shape = config.image5d.shape[1:]
                else:
                    shape = config.img5d.meta[config.MetaKeys.SHAPE][1:]
            matches = colocalizer.StackColocalizer.colocalize_stack(
                shape, config.blobs.blobs)
            # insert matches into database
            colocalizer.insert_matches(config.db, matches)
        else:
            print("No blobs loaded to colocalize, skipping")

    elif proc_type in (config.ProcessTypes.EXPORT_PLANES,
                       config.ProcessTypes.EXPORT_PLANES_CHANNELS):
        # export each plane as a separate image file
        export_stack.export_planes(
            config.image5d, config.savefig, config.channel,
            proc_type is config.ProcessTypes.EXPORT_PLANES_CHANNELS)

    elif proc_type is config.ProcessTypes.EXPORT_RAW:
        # export the main image as a raw data file
        out_path = libmag.combine_paths(config.filename, ".raw", sep="")
        libmag.backup_file(out_path)
        np_io.write_raw_file(config.image5d, out_path)

    elif proc_type is config.ProcessTypes.EXPORT_TIF:
        # export the main image as a TIF files for each channel
        np_io.write_tif(config.image5d, config.filename)

    elif proc_type is config.ProcessTypes.PREPROCESS:
        # pre-process a whole image and save to file
        # TODO: consider chunking option for larger images
        out_path = config.prefix
        if not out_path:
            out_path = libmag.insert_before_ext(config.filename, "_preproc")
        transformer.preprocess_img(config.image5d, proc_val, config.channel,
                                   out_path)

    return stats, fdbk
コード例 #15
0
def meas_improvement(path,
                     col_effect,
                     col_p,
                     thresh_impr=0,
                     thresh_p=0.05,
                     col_wt=None,
                     suffix=None,
                     df=None):
    """Measure overall improvement and worsening for a column in a data frame.
    
    Args:
        path (str): Path of file to load into data frame.
        col_effect (str): Name of column with metric to measure.
        col_p (str): Name of column with p-values.
        thresh_impr (float): Threshold of effects below which are considered
            improved.
        thresh_p (float): Threshold of p-values below which are considered
            statistically significant.
        col_wt (str): Name of column for weighting.
        suffix (str): Output path suffix; defaults to None.
        df (:obj:`pd.DataFrame`): Data fram to use instead of loading from
            ``path``; defaults to None.

    Returns:
        :obj:`pd.DataFrame`: Data frame with improvement measurements.
        The data frame will be saved to a filename based on ``path``.

    """
    def add_wt(mask_cond, mask_cond_ss, name):
        # add weighted metrics for the given condition, such as improved
        # vs. worsened
        metrics[col_wt] = [np.sum(df[col_wt])]
        wt_cond = df.loc[mask_cond, col_wt]
        wt_cond_ss = df.loc[mask_cond_ss, col_wt]
        # sum of weighting column fitting the condition (all and statistically
        # significant)
        metrics["{}_{}".format(col_wt, name)] = [np.sum(wt_cond)]
        metrics["{}_{}_ss".format(col_wt, name)] = [np.sum(wt_cond_ss)]
        # sum of filtered effect multiplied by weighting
        metrics["{}_{}_by_{}".format(col_effect, name, col_wt)] = [
            np.sum(wt_cond.multiply(df.loc[mask_cond, col_effect]))
        ]
        metrics["{}_{}_by_{}_ss".format(col_effect, name, col_wt)] = [
            np.sum(wt_cond_ss.multiply(df.loc[mask_cond_ss, col_effect]))
        ]

    if df is None:
        df = pd.read_csv(path)

    # masks of improved and worsened, all and statistically significant
    # for each, where improvement is above the given threshold
    effects = df[col_effect]
    mask_impr = effects > thresh_impr
    mask_ss = df[col_p] < thresh_p
    mask_impr_ss = mask_impr & mask_ss
    mask_wors = effects < thresh_impr
    mask_wors_ss = mask_wors & mask_ss
    metrics = {
        "n": [len(effects)],
        "n_impr": [np.sum(mask_impr)],
        "n_impr_ss": [np.sum(mask_impr_ss)],
        "n_wors": [np.sum(mask_wors)],
        "n_wors_ss": [np.sum(mask_wors_ss)],
        col_effect: [np.sum(effects)],
        "{}_impr".format(col_effect): [np.sum(effects[mask_impr])],
        "{}_impr_ss".format(col_effect): [np.sum(effects[mask_impr_ss])],
        "{}_wors".format(col_effect): [np.sum(effects[mask_wors])],
        "{}_wors_ss".format(col_effect): [np.sum(effects[mask_wors_ss])],
    }
    if col_wt:
        # add columns based on weighting column
        add_wt(mask_impr, mask_impr_ss, "impr")
        add_wt(mask_wors, mask_wors_ss, "wors")

    out_path = libmag.insert_before_ext(path, "_impr")
    if suffix:
        out_path = libmag.insert_before_ext(out_path, suffix)
    df_impr = df_io.dict_to_data_frame(metrics, out_path)
    # display transposed version for more compact view given large number
    # of columns, but save un-transposed to preserve data types
    df_io.print_data_frame(df_impr.T, index=True, header=False)
    return df_impr
コード例 #16
0
def plot_clusters_by_label(path, z, suffix=None, show=True, scaling=None):
    """Plot separate sets of clusters for each label.
    
    Args:
        path (str): Base path to blobs file with clusters.
        z (int): z-plane to plot.
        suffix (str): Suffix for ``path``; defaults to None.
        show (bool): True to show; defaults to True.
        scaling (List): Sequence of scaling from blobs' coordinate space
             to that of :attr:`config.labels_img`.

    """
    mod_path = path
    if suffix is not None:
        mod_path = libmag.insert_before_ext(path, suffix)
    blobs = np.load(libmag.combine_paths(mod_path,
                                         config.SUFFIX_BLOB_CLUSTERS))
    label_ids = np.unique(blobs[:, 3])
    fig, gs = plot_support.setup_fig(
        1, 1, config.plot_labels[config.PlotLabels.SIZE])
    ax = fig.add_subplot(gs[0, 0])
    plot_support.hide_axes(ax)

    # plot underlying atlas
    np_io.setup_images(mod_path)
    if config.reg_suffixes[config.RegSuffixes.ATLAS]:
        # use atlas if explicitly set
        img = config.image5d
    else:
        # default to black background
        img = np.zeros_like(config.labels_img)[None]
    stacker = export_stack.setup_stack(img,
                                       mod_path,
                                       slice_vals=(z, z + 1),
                                       labels_imgs=(config.labels_img,
                                                    config.borders_img))
    stacker.build_stack(ax, config.plot_labels[config.PlotLabels.SCALE_BAR])
    # export_stack.reg_planes_to_img(
    #     (np.zeros(config.labels_img.shape[1:], dtype=int),
    #      config.labels_img[z]), ax=ax)

    if scaling is not None:
        print("scaling blobs cluster coordinates by", scaling)
        blobs = blobs.astype(float)
        blobs[:, :3] = np.multiply(blobs[:, :3], scaling)
        blobs[:, 0] = np.floor(blobs[:, 0])

    # plot nuclei by label, colored based on cluster size within each label
    colors = colormaps.discrete_colormap(len(np.unique(blobs[:, 4])),
                                         prioritize_default="cn") / 255.
    col_noise = (1, 1, 1, 1)
    for label_id in label_ids:
        if label_id == 0:
            # skip blobs in background
            continue
        # sort blobs within label by cluster size (descending order),
        # including clusters within all z-planes to keep same order across zs
        blobs_lbl = blobs[blobs[:, 3] == label_id]
        clus_lbls, clus_lbls_counts = np.unique(blobs_lbl[:, 4],
                                                return_counts=True)
        clus_lbls = clus_lbls[np.argsort(clus_lbls_counts)][::-1]
        blobs_lbl = blobs_lbl[blobs_lbl[:, 0] == z]
        for i, (clus_lbl, color) in enumerate(zip(clus_lbls, colors)):
            blobs_clus = blobs_lbl[blobs_lbl[:, 4] == clus_lbl]
            if len(blobs_clus) < 1: continue
            # default to small, translucent dominant cluster points
            size = 0.1
            alpha = 0.5
            if clus_lbl == -1:
                # color all noise points the same and emphasize points
                color = col_noise
                size = 0.5
                alpha = 1
            print(label_id, clus_lbl, color, len(blobs_clus))
            ax.scatter(blobs_clus[:, 2],
                       blobs_clus[:, 1],
                       color=color,
                       s=size,
                       alpha=alpha)
    plot_support.save_fig(mod_path, config.savefig, "_clusplot")
    if show: plot_support.show()
コード例 #17
0
ファイル: aws.py プロジェクト: sanderslab/magellanmapper
def list_s3_bucket(name, keys=None, prefix=None, suffix=None, versions=False):
    """List all objects or object versions in an AWS S3 bucket.

    Args:
        name (str): Name of bucket.
        keys (List[str]): Sequence of keys within the bucket to include
            sizes of only these files; defaults to None.
        prefix (str): Filter only keys starting with this string; defaults
            to None.
        suffix (str): String to append to output CSV file; defaults to None.
        versions (bool): True to get all object versions, including
            deleted objects; False to get only the current versions; defaults
            to False.

    Returns:
        float, :obj:`pd.DataFrame`, :obj:`pd.DataFrame`: Size of bucket in
        bytes; a dataframe of keys and associated sizes; and a dataframe
        of missing keys from ``keys``, or None if ``keys`` is not given.

    """
    s3 = boto3.resource("s3")
    bucket = s3.Bucket(name)
    tot_size = 0
    obj_sizes = {}
    # get latest version of objects or all object version, filtering
    # for paths starting with prefix if set
    objs = bucket.object_versions if versions else bucket.objects
    objs = objs.filter(Prefix=prefix) if prefix else objs.all()
    for obj in objs:
        if not keys or obj.key in keys:
            # only check keys in list if given
            obj_sizes.setdefault("Bucket", []).append(bucket.name)
            obj_sizes.setdefault("Key", []).append(obj.key)
            size = obj.size
            obj_sizes.setdefault("Size", []).append(size)
            if size:
                # skip delete markers, which have a size of None
                tot_size += obj.size
            if versions:
                # add columns for version info
                obj_sizes.setdefault("Version_id", []).append(obj.version_id)
                obj_sizes.setdefault("Last_modified",
                                     []).append(obj.last_modified)

    out_path = "bucket_{}".format(bucket.name)
    if suffix:
        out_path = libmag.insert_before_ext(out_path, suffix, "_")
    df_missing = None
    if keys:
        # if list of keys given, show all keys that were not found
        keys_missing = []
        obj_keys = obj_sizes.keys()
        for key in keys:
            if key not in obj_keys:
                keys_missing.append(key)
        # print("Missing keys:\n", "\n".join(keys_missing))
        df_missing = df_io.dict_to_data_frame({"Keys_missing": keys_missing},
                                              libmag.insert_before_ext(
                                                  out_path, "_missing"))

    df = df_io.dict_to_data_frame(obj_sizes, out_path)
    print("{} bucket total tot_size (GiB): {}".format(
        bucket.name, libmag.convert_bin_magnitude(tot_size, 3)))
    return tot_size, df, df_missing
コード例 #18
0
ファイル: cli.py プロジェクト: clifduhn/magellanmapper
def process_file(path,
                 proc_mode,
                 series=None,
                 subimg_offset=None,
                 subimg_size=None,
                 roi_offset=None,
                 roi_size=None):
    """Processes a single image file non-interactively.

    Assumes that the image has already been set up.
    
    Args:
        path (str): Path to image from which MagellanMapper-style paths will 
            be generated.
        proc_mode (str): Processing mode, which should be a key in
            :class:`config.ProcessTypes`, case-insensitive.
        series (int): Image series number; defaults to None.
        subimg_offset (List[int]): Sub-image offset as (z,y,x) to load;
            defaults to None.
        subimg_size (List[int]): Sub-image size as (z,y,x) to load;
            defaults to None.
        roi_offset (List[int]): Region of interest offset as (x, y, z) to
            process; defaults to None.
        roi_size (List[int]): Region of interest size of region to process,
            given as (x, y, z); defaults to None.
    
    Returns:
        Tuple of stats from processing, or None if no stats, and 
        text feedback from the processing, or None if no feedback.
    """
    # PROCESS BY TYPE
    stats = None
    fdbk = None
    filename_base = importer.filename_to_base(path, series)
    proc_type = libmag.get_enum(proc_mode, config.ProcessTypes)
    if proc_type is config.ProcessTypes.LOAD:
        # loading completed
        return None, None

    elif proc_type is config.ProcessTypes.LOAD:
        # already imported so does nothing
        print("imported {}, will exit".format(path))

    elif proc_type is config.ProcessTypes.EXPORT_ROIS:
        # export ROIs; assumes that info_proc was already loaded to
        # give smaller region from which smaller ROIs from the truth DB
        # will be extracted
        from magmap.io import export_rois
        db = config.db if config.truth_db is None else config.truth_db
        export_rois.export_rois(db, config.image5d, config.channel,
                                filename_base,
                                config.plot_labels[config.PlotLabels.PADDING],
                                config.unit_factor, config.truth_db_mode,
                                os.path.basename(config.filename))

    elif proc_type is config.ProcessTypes.TRANSFORM:
        # transpose, rescale, and/or resize whole large image
        transformer.transpose_img(
            path,
            series,
            plane=config.plane,
            rescale=config.transform[config.Transforms.RESCALE],
            target_size=config.roi_size)

    elif proc_type in (config.ProcessTypes.EXTRACT,
                       config.ProcessTypes.ANIMATED):
        # generate animated GIF or extract single plane
        from magmap.io import export_stack
        export_stack.stack_to_img(config.filenames, roi_offset, roi_size,
                                  series, subimg_offset, subimg_size,
                                  proc_type is config.ProcessTypes.ANIMATED,
                                  config.suffix)

    elif proc_type is config.ProcessTypes.EXPORT_BLOBS:
        # export blobs to CSV file
        from magmap.io import export_rois
        export_rois.blobs_to_csv(config.blobs, filename_base)

    elif proc_type is config.ProcessTypes.DETECT:
        # detect blobs in the full image
        stats, fdbk, segments_all = stack_detect.detect_blobs_large_image(
            filename_base, config.image5d, subimg_offset, subimg_size,
            config.truth_db_mode is config.TruthDBModes.VERIFY,
            not config.grid_search_profile, config.image5d_is_roi)

    elif proc_type is config.ProcessTypes.EXPORT_PLANES:
        # export each plane as a separate image file
        from magmap.io import export_stack
        export_stack.export_planes(config.image5d, config.prefix,
                                   config.savefig, config.channel)

    elif proc_type is config.ProcessTypes.EXPORT_RAW:
        # export the main image as a raw data file
        out_path = libmag.combine_paths(config.filename, ".raw", sep="")
        libmag.backup_file(out_path)
        np_io.write_raw_file(config.image5d, out_path)

    elif proc_type is config.ProcessTypes.PREPROCESS:
        # pre-process a whole image and save to file
        # TODO: consider chunking option for larger images
        profile = config.get_roi_profile(0)
        out_path = config.prefix
        if not out_path:
            out_path = libmag.insert_before_ext(config.filename, "_preproc")
        transformer.preprocess_img(config.image5d, profile["preprocess"],
                                   config.channel, out_path)

    return stats, fdbk
コード例 #19
0
ファイル: edge_seg.py プロジェクト: kaparna126/magellanmapper
def merge_atlas_segmentations(img_paths, show=True, atlas=True, suffix=None):
    """Merge atlas segmentations for a list of files as a multiprocessing 
    wrapper for :func:``merge_atlas_segmentations``, after which 
    edge image post-processing is performed separately since it 
    contains tasks also performed in multiprocessing.
    
    Args:
        img_paths (List[str]): Sequence of image paths to load.
        show (bool): True if the output images should be displayed; defaults 
            to True.
        atlas (bool): True if the image is an atlas; defaults to True.
        suffix (str): Modifier to append to end of ``img_path`` basename for 
            registered image files that were output to a modified name; 
            defaults to None.
    """
    start_time = time()
    
    # erode all labels images into markers for watershed; not multiprocessed
    # since erosion is itself multiprocessed
    erode = config.atlas_profile["erode_labels"]
    erosion = config.atlas_profile[profiles.RegKeys.EDGE_AWARE_REANNOTATION]
    erosion_frac = config.atlas_profile["erosion_frac"]
    mirrored = atlas and _is_profile_mirrored()
    mirror_mult = _get_mirror_mult()
    dfs_eros = []
    for img_path in img_paths:
        mod_path = img_path
        if suffix is not None:
            mod_path = libmag.insert_before_ext(mod_path, suffix)
        labels_sitk = sitk_io.load_registered_img(
            mod_path, config.RegNames.IMG_LABELS.value, get_sitk=True)
        print("Eroding labels to generate markers for atlas segmentation")
        df = None
        if erode["markers"]:
            # use default minimal post-erosion size (not setting erosion frac)
            markers, df = erode_labels(
                sitk.GetArrayFromImage(labels_sitk), erosion,
                mirrored=mirrored, mirror_mult=mirror_mult)
            labels_sitk_markers = sitk_io.replace_sitk_with_numpy(
                labels_sitk, markers)
            sitk_io.write_reg_images(
                {config.RegNames.IMG_LABELS_MARKERS.value: labels_sitk_markers},
                mod_path)
            df_io.data_frames_to_csv(
                df, "{}_markers.csv".format(os.path.splitext(mod_path)[0]))
        dfs_eros.append(df)
    
    pool = chunking.get_mp_pool()
    pool_results = []
    for img_path, df in zip(img_paths, dfs_eros):
        print("setting up atlas segmentation merge for", img_path)
        # convert labels image into markers
        exclude = df.loc[
            np.isnan(df[config.SmoothingMetrics.FILTER_SIZE.value]),
            config.AtlasMetrics.REGION.value]
        print("excluding these labels from re-segmentation:\n", exclude)
        pool_results.append(pool.apply_async(
            edge_aware_segmentation,
            args=(img_path, show, atlas, suffix, exclude, mirror_mult)))
    for result in pool_results:
        # edge distance calculation and labels interior image generation 
        # are multiprocessed, so run them as post-processing tasks to 
        # avoid nested multiprocessing
        path = result.get()
        mod_path = path
        if suffix is not None:
            mod_path = libmag.insert_before_ext(path, suffix)
        
        # make edge distance images and stats
        labels_sitk = sitk_io.load_registered_img(
            mod_path, config.RegNames.IMG_LABELS.value, get_sitk=True)
        labels_np = sitk.GetArrayFromImage(labels_sitk)
        dist_to_orig, labels_edge = edge_distances(
            labels_np, path=path, spacing=labels_sitk.GetSpacing()[::-1])
        dist_sitk = sitk_io.replace_sitk_with_numpy(labels_sitk, dist_to_orig)
        labels_sitk_edge = sitk_io.replace_sitk_with_numpy(
            labels_sitk, labels_edge)

        labels_sitk_interior = None
        if erode["interior"]:
            # make interior images from labels using given targeted 
            # post-erosion frac
            interior, _ = erode_labels(
                labels_np, erosion, erosion_frac=erosion_frac, 
                mirrored=mirrored, mirror_mult=mirror_mult)
            labels_sitk_interior = sitk_io.replace_sitk_with_numpy(
                labels_sitk, interior)
        
        # write images to same directory as atlas
        imgs_write = {
            config.RegNames.IMG_LABELS_DIST.value: dist_sitk, 
            config.RegNames.IMG_LABELS_EDGE.value: labels_sitk_edge, 
            config.RegNames.IMG_LABELS_INTERIOR.value: labels_sitk_interior, 
        }
        sitk_io.write_reg_images(imgs_write, mod_path)
        if show:
            for img in imgs_write.values():
                if img: sitk.Show(img)
        print("finished {}".format(path))
    pool.close()
    pool.join()
    print("time elapsed for merging atlas segmentations:", time() - start_time)
コード例 #20
0
ファイル: edge_seg.py プロジェクト: kaparna126/magellanmapper
def make_edge_images(path_img, show=True, atlas=True, suffix=None, 
                     path_atlas_dir=None):
    """Make edge-detected atlas and associated labels images.
    
    The atlas is assumed to be a sample (eg microscopy) image on which 
    an edge-detection filter will be applied. The labels image is 
    assumed to be an annotated image whose edges will be found by 
    obtaining the borders of all separate labels.
    
    Args:
        path_img: Path to the image atlas. The labels image will be 
            found as a corresponding, registered image, unless 
            ``path_atlas_dir`` is given.
        show (bool): True if the output images should be displayed; defaults
            to True.
        atlas: True if the primary image is an atlas, which is assumed 
            to be symmetrical. False if the image is an experimental/sample 
            image, in which case erosion will be performed on the full 
            images, and stats will not be performed.
        suffix: Modifier to append to end of ``path_img`` basename for 
            registered image files that were output to a modified name; 
            defaults to None.
        path_atlas_dir: Path to atlas directory to use labels from that 
            directory rather than from labels image registered to 
            ``path_img``, such as when the sample image is registered 
            to an atlas rather than the other way around. Typically 
            coupled with ``suffix`` to compare same sample against 
            different labels. Defaults to None.
    """
    
    # load intensity image from which to detect edges
    atlas_suffix = config.reg_suffixes[config.RegSuffixes.ATLAS]
    if not atlas_suffix:
        if atlas:
            # atlases default to using the atlas volume image
            print("generating edge images for atlas")
            atlas_suffix = config.RegNames.IMG_ATLAS.value
        else:
            # otherwise, use the experimental image
            print("generating edge images for experiment/sample image")
            atlas_suffix = config.RegNames.IMG_EXP.value
    
    # adjust image path with suffix
    mod_path = path_img
    if suffix is not None:
        mod_path = libmag.insert_before_ext(mod_path, suffix)
    
    labels_from_atlas_dir = path_atlas_dir and os.path.isdir(path_atlas_dir)
    if labels_from_atlas_dir:
        # load labels from atlas directory
        # TODO: consider applying suffix to labels dir
        path_atlas = path_img
        path_labels = os.path.join(
            path_atlas_dir, config.RegNames.IMG_LABELS.value)
        print("loading labels from", path_labels)
        labels_sitk = sitk.ReadImage(path_labels)
    else:
        # load labels registered to sample image
        path_atlas = mod_path
        labels_sitk = sitk_io.load_registered_img(
            mod_path, config.RegNames.IMG_LABELS.value, get_sitk=True)
    labels_img_np = sitk.GetArrayFromImage(labels_sitk)
    
    # load atlas image, set resolution from it
    atlas_sitk = sitk_io.load_registered_img(
        path_atlas, atlas_suffix, get_sitk=True)
    config.resolutions = np.array([atlas_sitk.GetSpacing()[::-1]])
    atlas_np = sitk.GetArrayFromImage(atlas_sitk)
    
    # output images
    atlas_sitk_log = None
    atlas_sitk_edge = None
    labels_sitk_interior = None
    
    log_sigma = config.atlas_profile["log_sigma"]
    if log_sigma is not None and suffix is None:
        # generate LoG and edge-detected images for original image
        print("generating LoG edge-detected images with sigma", log_sigma)
        thresh = (config.atlas_profile["atlas_threshold"]
                  if config.atlas_profile["log_atlas_thresh"] else None)
        atlas_log = cv_nd.laplacian_of_gaussian_img(
            atlas_np, sigma=log_sigma, labels_img=labels_img_np, thresh=thresh)
        atlas_sitk_log = sitk_io.replace_sitk_with_numpy(atlas_sitk, atlas_log)
        atlas_edge = cv_nd.zero_crossing(atlas_log, 1).astype(np.uint8)
        atlas_sitk_edge = sitk_io.replace_sitk_with_numpy(
            atlas_sitk, atlas_edge)
    else:
        # if sigma not set or if using suffix to compare two images, 
        # load from original image to compare against common image
        atlas_edge = sitk_io.load_registered_img(
            path_img, config.RegNames.IMG_ATLAS_EDGE.value)

    erode = config.atlas_profile["erode_labels"]
    if erode["interior"]:
        # make map of label interiors for interior/border comparisons
        print("Eroding labels to generate interior labels image")
        erosion = config.atlas_profile[
            profiles.RegKeys.EDGE_AWARE_REANNOTATION]
        erosion_frac = config.atlas_profile["erosion_frac"]
        interior, _ = erode_labels(
            labels_img_np, erosion, erosion_frac, 
            atlas and _is_profile_mirrored(), _get_mirror_mult())
        labels_sitk_interior = sitk_io.replace_sitk_with_numpy(
            labels_sitk, interior)
    
    # make labels edge and edge distance images
    dist_to_orig, labels_edge = edge_distances(
        labels_img_np, atlas_edge, spacing=atlas_sitk.GetSpacing()[::-1])
    dist_sitk = sitk_io.replace_sitk_with_numpy(atlas_sitk, dist_to_orig)
    labels_sitk_edge = sitk_io.replace_sitk_with_numpy(labels_sitk, labels_edge)
    
    # show all images
    imgs_write = {
        config.RegNames.IMG_ATLAS_LOG.value: atlas_sitk_log, 
        config.RegNames.IMG_ATLAS_EDGE.value: atlas_sitk_edge, 
        config.RegNames.IMG_LABELS_EDGE.value: labels_sitk_edge, 
        config.RegNames.IMG_LABELS_INTERIOR.value: labels_sitk_interior, 
        config.RegNames.IMG_LABELS_DIST.value: dist_sitk, 
    }
    if show:
        for img in imgs_write.values():
            if img: sitk.Show(img)
    
    # write images to same directory as atlas with appropriate suffix
    sitk_io.write_reg_images(imgs_write, mod_path)
コード例 #21
0
def stack_to_img(paths,
                 roi_offset,
                 roi_size,
                 series=None,
                 subimg_offset=None,
                 subimg_size=None,
                 animated=False,
                 suffix=None):
    """Build an image file from a stack of images in a directory or an 
    array, exporting as an animated GIF or movie for multiple planes or 
    extracting a single plane to a standard image file format.
    
    Writes the file to the parent directory of path.
    
    Args:
        paths (List[str]): Image paths, which can each be either an image 
            directory or a base path to a single image, including 
            volumetric images.
        roi_offset (Sequence[int]): Tuple of offset given in user order
            ``x,y,z``; defaults to None. Requires ``roi_size`` to not be None.
        roi_size (Sequence[int]): Size of the region of interest in user order 
            ``x,y,z``; defaults to None. Requires ``roi_offset`` to not be None.
        series (int): Image series number; defaults to None.
        subimg_offset (List[int]): Sub-image offset as (z,y,x) to load;
            defaults to None.
        subimg_size (List[int]): Sub-image size as (z,y,x) to load;
            defaults to None.
        animated (bool): True to export as an animated image; defaults to False.
        suffix (str): String to append to output path before extension; 
            defaults to None to ignore.

    """
    # set up figure layout for collages
    size = config.plot_labels[config.PlotLabels.LAYOUT]
    ncols, nrows = size if size else (1, 1)
    num_paths = len(paths)
    collage = num_paths > 1
    figs = {}

    for i in range(nrows):
        for j in range(ncols):
            n = i * ncols + j
            if n >= num_paths: break

            # load an image and set up its image stacker
            path_sub = paths[n]
            axs = []
            # TODO: test directory of images
            # TODO: consider not reloading first image
            np_io.setup_images(path_sub, series, subimg_offset, subimg_size)
            stacker = setup_stack(
                config.image5d,
                path_sub,
                offset=roi_offset,
                roi_size=roi_size,
                slice_vals=config.slice_vals,
                rescale=config.transform[config.Transforms.RESCALE],
                labels_imgs=(config.labels_img, config.borders_img))

            # add sub-plot title unless groups given as empty string
            title = None
            if config.groups:
                title = libmag.get_if_within(config.groups, n)
            elif num_paths > 1:
                title = os.path.basename(path_sub)

            if not stacker.images: continue
            ax = None
            for k in range(len(stacker.images[0])):
                # create or retrieve fig; animation has only 1 fig
                planei = 0 if animated else (stacker.img_slice.start +
                                             k * stacker.img_slice.step)
                fig_dict = figs.get(planei)
                if not fig_dict:
                    # set up new fig
                    fig, gs = plot_support.setup_fig(
                        nrows, ncols,
                        config.plot_labels[config.PlotLabels.SIZE])
                    fig_dict = {"fig": fig, "gs": gs, "imgs": []}
                    figs[planei] = fig_dict
                if ax is None:
                    # generate new axes for the gridspec position
                    ax = fig_dict["fig"].add_subplot(fig_dict["gs"][i, j])
                if title:
                    ax.title.set_text(title)
                axs.append(ax)

            # export planes
            plotted_imgs = stacker.build_stack(
                axs, config.plot_labels[config.PlotLabels.SCALE_BAR],
                size is None or ncols * nrows == 1)

            if animated:
                # store all plotted images in single fig
                fig_dict = figs.get(0)
                if fig_dict:
                    fig_dict["imgs"] = plotted_imgs
            else:
                # store one plotted image per fig; not used currently
                for fig_dict, img in zip(figs.values(), plotted_imgs):
                    fig_dict["imgs"].append(img)

    path_base = paths[0]
    for planei, fig_dict in figs.items():
        if animated:
            # generate animated image (eg animated GIF or movie file)
            animate_imgs(path_base, fig_dict["imgs"], config.delay,
                         config.savefig, suffix)
        else:
            # generate single figure with axis and plane index in filename
            if collage:
                # output filename as a collage of images
                if not os.path.isdir(path_base):
                    path_base = os.path.dirname(path_base)
                path_base = os.path.join(path_base, "collage")

            # insert mod as suffix, then add any additional suffix;
            # can use config.prefix_out for make_out_path prefix
            mod = "_plane_{}{}".format(
                plot_support.get_plane_axis(config.plane), planei)
            out_path = libmag.make_out_path(path_base, suffix=mod)
            if suffix:
                out_path = libmag.insert_before_ext(out_path, suffix)
            plot_support.save_fig(out_path,
                                  config.savefig,
                                  fig=fig_dict["fig"])
コード例 #22
0
ファイル: edge_seg.py プロジェクト: kaparna126/magellanmapper
def edge_aware_segmentation(path_atlas, show=True, atlas=True, suffix=None,
                            exclude_labels=None, mirror_mult=-1):
    """Segment an atlas using its previously generated edge map.
    
    Labels may not match their own underlying atlas image well, 
    particularly in the orthogonal directions in which the labels 
    were not constructed. To improve alignment between the labels 
    and the atlas itself, register the labels to an automated, roughly 
    segmented version of the atlas. The goal is to improve the 
    labels' alignment so that the atlas/labels combination can be 
    used for another form of automated segmentation by registering 
    them to experimental brains via :func:``register``.
    
    Edge files are assumed to have been generated by 
    :func:``make_edge_images``.
    
    Args:
        path_atlas (str): Path to the fixed file, typically the atlas file 
            with stained sections. The corresponding edge and labels 
            files will be loaded based on this path.
        show (bool): True if the output images should be displayed; defaults 
            to True.
        atlas (bool): True if the primary image is an atlas, which is assumed 
            to be symmetrical. False if the image is an experimental/sample 
            image, in which case segmentation will be performed on the full 
            images, and stats will not be performed.
        suffix (str): Modifier to append to end of ``path_atlas`` basename for 
            registered image files that were output to a modified name; 
            defaults to None. If ``atlas`` is True, ``suffix`` will only 
            be applied to saved files, with files still loaded based on the 
            original path.
        exclude_labels (List[int]): Sequence of labels to exclude from the
            segmentation; defaults to None.
        mirror_mult (int): Multiplier for mirrored labels; defaults to -1
            to make mirrored labels the inverse of their source labels.
    """
    # adjust image path with suffix
    load_path = path_atlas
    mod_path = path_atlas
    if suffix is not None:
        mod_path = libmag.insert_before_ext(mod_path, suffix)
        if atlas: load_path = mod_path
    
    # load corresponding files via SimpleITK
    atlas_sitk = sitk_io.load_registered_img(
        load_path, config.RegNames.IMG_ATLAS.value, get_sitk=True)
    atlas_sitk_edge = sitk_io.load_registered_img(
        load_path, config.RegNames.IMG_ATLAS_EDGE.value, get_sitk=True)
    labels_sitk = sitk_io.load_registered_img(
        load_path, config.RegNames.IMG_LABELS.value, get_sitk=True)
    labels_sitk_markers = sitk_io.load_registered_img(
        load_path, config.RegNames.IMG_LABELS_MARKERS.value, get_sitk=True)
    
    # get Numpy arrays of images
    atlas_img_np = sitk.GetArrayFromImage(atlas_sitk)
    atlas_edge = sitk.GetArrayFromImage(atlas_sitk_edge)
    labels_img_np = sitk.GetArrayFromImage(labels_sitk)
    markers = sitk.GetArrayFromImage(labels_sitk_markers)
    
    # segment image from markers
    sym_axis = atlas_refiner.find_symmetric_axis(atlas_img_np)
    mirrorred = atlas and sym_axis >= 0
    len_half = None
    seg_args = {"exclude_labels": exclude_labels}
    edge_prof = config.atlas_profile[profiles.RegKeys.EDGE_AWARE_REANNOTATION]
    if edge_prof:
        edge_filt = edge_prof[profiles.RegKeys.WATERSHED_MASK_FILTER]
        if edge_filt and len(edge_filt) > 1:
            # watershed mask filter settings from atlas profile
            seg_args["mask_filt"] = edge_filt[0]
            seg_args["mask_filt_size"] = edge_filt[1]
    if mirrorred:
        # segment only half of image, assuming symmetry
        len_half = atlas_img_np.shape[sym_axis] // 2
        slices = [slice(None)] * labels_img_np.ndim
        slices[sym_axis] = slice(len_half)
        sl = tuple(slices)
        labels_seg = segmenter.segment_from_labels(
            atlas_edge[sl], markers[sl], labels_img_np[sl], **seg_args)
    else:
        # segment the full image, including excluded labels on the opposite side
        exclude_labels = exclude_labels.tolist().extend(
            (mirror_mult * exclude_labels).tolist())
        seg_args["exclude_labels"] = exclude_labels
        labels_seg = segmenter.segment_from_labels(
            atlas_edge, markers, labels_img_np, **seg_args)
    
    smoothing = config.atlas_profile["smooth"]
    if smoothing is not None:
        # smoothing by opening operation based on profile setting
        atlas_refiner.smooth_labels(
            labels_seg, smoothing, config.SmoothingModes.opening)
    
    if mirrorred:
        # mirror back to other half
        labels_seg = _mirror_imported_labels(
            labels_seg, len_half, mirror_mult, sym_axis)
    
    # expand background to smoothed background of original labels to 
    # roughly match background while still allowing holes to be filled
    crop = config.atlas_profile["crop_to_orig"]
    atlas_refiner.crop_to_orig(
        labels_img_np, labels_seg, crop)
    
    if labels_seg.dtype != labels_img_np.dtype:
        # watershed may give different output type, so cast back if so
        labels_seg = labels_seg.astype(labels_img_np.dtype)
    labels_sitk_seg = sitk_io.replace_sitk_with_numpy(labels_sitk, labels_seg)
    
    # show DSCs for labels
    print("\nMeasuring overlap of atlas and combined watershed labels:")
    atlas_refiner.measure_overlap_combined_labels(atlas_sitk, labels_sitk_seg)
    print("Measuring overlap of individual original and watershed labels:")
    atlas_refiner.measure_overlap_labels(labels_sitk, labels_sitk_seg)
    print("\nMeasuring overlap of combined original and watershed labels:")
    atlas_refiner.measure_overlap_labels(
        atlas_refiner.make_labels_fg(labels_sitk), 
        atlas_refiner.make_labels_fg(labels_sitk_seg))
    print()
    
    # show and write image to same directory as atlas with appropriate suffix
    sitk_io.write_reg_images(
        {config.RegNames.IMG_LABELS.value: labels_sitk_seg}, mod_path)
    if show: sitk.Show(labels_sitk_seg)
    return path_atlas
コード例 #23
0
def make_density_image(img_path, scale=None, shape=None, suffix=None, 
                       labels_img_sitk=None, channel=None, matches=None):
    """Make a density image based on associated blobs.
    
    Uses the shape of the registered labels image by default to set 
    the voxel sizes for the blobs.
    
    If ``matches`` is given, a heat map will be generated for each set
    of channels given in the dictionary. Otherwise, if the loaded blobs
    file has intensity-based colocalizations, a heat map will be generated
    for each combination of channels.
    
    Args:
        img_path: Path to image, which will be used to indentify the blobs file.
        scale: Rescaling factor as a scalar value to find the corresponding 
            full-sized image. Defaults to None to use the register 
            setting ``target_size`` instead if available, falling back 
            to load the full size image to find its shape if necessary.
        shape: Final shape size; defaults to None to use the shape of 
            the labels image.
        suffix: Modifier to append to end of ``img_path`` basename for 
            registered image files that were output to a modified name; 
            defaults to None.
        labels_img_sitk: Labels image as a SimpleITK ``Image`` object; 
            defaults to None, in which case the registered labels image file 
            corresponding to ``img_path`` with any ``suffix`` modifier 
            will be opened.
        channel (List[int]): Sequence of channels to include in density image;
            defaults to None to combine blobs from all channels.
        matches (dict[tuple[int, int], :class:`magmap.cv.colocalizer`):
            Dictionary of channel combinations to blob matches; defaults to
            None.
    
    Returns:
        :obj:`np.ndarray`, str: The density image as a Numpy array in the
        same shape as the opened image and the original and ``img_path``
        to track such as for multiprocessing.
    """
    def make_heat_map():
        # build heat map to store densities per label px and save to file
        coord_scaled = ontology.scale_coords(
            blobs_chl[:, :3], scaling, labels_img.shape)
        print("coords", coord_scaled)
        return cv_nd.build_heat_map(labels_img.shape, coord_scaled)
    
    # set up paths and get labels image
    mod_path = img_path
    if suffix is not None:
        mod_path = libmag.insert_before_ext(img_path, suffix)
    if labels_img_sitk is None:
        labels_img_sitk = sitk_io.load_registered_img(
            mod_path, config.RegNames.IMG_LABELS.value, get_sitk=True)
    labels_img = sitk.GetArrayFromImage(labels_img_sitk)
    
    # load blobs
    blobs = detector.Blobs().load_blobs(np_io.img_to_blobs_path(img_path))
    scaling = np_io.find_scaling(img_path, labels_img.shape, scale)[0]
    if shape is not None:
        # scale blob coordinates and heat map to an alternative final shape
        scaling = np.divide(shape, np.divide(labels_img.shape, scaling))
        labels_spacing = np.multiply(
            labels_img_sitk.GetSpacing()[::-1], 
            np.divide(labels_img.shape, shape))
        labels_img = np.zeros(shape, dtype=labels_img.dtype)
        labels_img_sitk.SetSpacing(labels_spacing[::-1])
    print("using scaling: {}".format(scaling))
    
    # annotate blobs based on position
    blobs_chl = blobs.blobs
    if channel is not None:
        blobs_chl = blobs_chl[np.isin(detector.get_blobs_channel(
            blobs_chl), channel)]
    heat_map = make_heat_map()
    print("heat map", heat_map.shape, heat_map.dtype, labels_img.shape)
    imgs_write = {
        config.RegNames.IMG_HEAT_MAP.value:
            sitk_io.replace_sitk_with_numpy(labels_img_sitk, heat_map)}
    
    heat_colocs = None
    if matches:
        # create heat maps for match-based colocalization combos
        heat_colocs = []
        for chl_combo, chl_matches in matches.items():
            print("Generating match-based colocalization heat map "
                  "for channel combo:", chl_combo)
            # use blobs in first channel of each channel pair for simplicity
            blobs_chl = chl_matches.get_blobs(1)
            heat_colocs.append(make_heat_map())
    
    elif blobs.colocalizations is not None:
        # create heat map for each intensity-based colocalization combo
        # as a separate channel in output image
        blob_chls = range(blobs.colocalizations.shape[1])
        blob_chls_len = len(blob_chls)
        if blob_chls_len > 1:
            # get all channel combos that include given channels
            combos = []
            chls = blob_chls if channel is None else channel
            for r in range(2, blob_chls_len + 1):
                combos.extend(
                    [tuple(c) for c in itertools.combinations(blob_chls, r)
                     if all([h in c for h in chls])])
            
            heat_colocs = []
            for combo in combos:
                print("Generating intensity-based colocalization heat map "
                      "for channel combo:", combo)
                blobs_chl = blobs.blobs[np.all(np.equal(
                    blobs.colocalizations[:, combo], 1), axis=1)]
                heat_colocs.append(make_heat_map())
    
    if heat_colocs is not None:
        # combine heat maps into single image
        heat_colocs = np.stack(heat_colocs, axis=3)
        imgs_write[config.RegNames.IMG_HEAT_COLOC.value] = \
            sitk_io.replace_sitk_with_numpy(
                labels_img_sitk, heat_colocs)
    
    # write images to file
    sitk_io.write_reg_images(imgs_write, mod_path)
    return heat_map, img_path
コード例 #24
0
def make_density_image(
    img_path: str,
    scale: Optional[float] = None,
    shape: Optional[Sequence[int]] = None,
    suffix: Optional[str] = None,
    labels_img_sitk: Optional[sitk.Image] = None,
    channel: Optional[Sequence[int]] = None,
    matches: Dict[Tuple[int, int], "colocalizer.BlobMatch"] = None,
    atlas_profile: Optional["atlas_prof.AtlasProfile"] = None
) -> Tuple[np.ndarray, str]:
    """Make a density image based on associated blobs.
    
    Uses the size and resolutions of the original image stores in the blobs
    if available to determine scaling between the blobs and the output image.
    Otherwise, uses the shape of the registered labels image to set 
    the voxel sizes for the blobs.
    
    If ``matches`` is given, a heat map will be generated for each set
    of channels given in the dictionary. Otherwise, if the loaded blobs
    file has intensity-based colocalizations, a heat map will be generated
    for each combination of channels.
    
    Args:
        img_path: Path to image, which will be used to indentify the blobs file.
        scale: Scaling factor between the blobs' space and the output space;
            defaults to None to use the register. Scaling is found by
            :meth:`magmap.np_io.find_scaling`.
        shape: Output shape, used for scaling; defaults to None.
        suffix: Modifier to append to end of ``img_path`` basename for 
            registered image files that were output to a modified name; 
            defaults to None.
        labels_img_sitk: Labels image; defaults to None to load from a
            registered labels image.
        channel: Sequence of channels to include in density image. For
            multiple channels, blobs from all these channels are combined
            into one heatmap.  Defaults to None to use all channels.
        matches: Dictionary of channel combinations to blob matches; defaults
            to None.
        atlas_profile: Atlas profile, used for scaling; defaults to None.
    
    Returns:
        Tuple of the density image as a Numpy array in the
        same shape as the opened image and the original and ``img_path``
        to track such as for multiprocessing.
    
    """
    def make_heat_map():
        # build heat map to store densities per label px and save to file
        coord_scaled = ontology.scale_coords(blobs_chl[:, :3], scaling,
                                             labels_img.shape)
        _logger.debug("Scaled coords:\n%s", coord_scaled)
        return cv_nd.build_heat_map(labels_img.shape, coord_scaled)

    # set up paths and get labels image
    _logger.info("\n\nGenerating heat map from blobs")
    mod_path = img_path
    if suffix is not None:
        mod_path = libmag.insert_before_ext(img_path, suffix)

    # load blobs
    blobs = detector.Blobs().load_blobs(np_io.img_to_blobs_path(img_path))

    is_2d = False
    if (shape is not None and blobs.roi_size is not None
            and blobs.resolutions is not None):
        # prepare output image and scaling factor from it to the blobs
        scaling = np.divide(shape, blobs.roi_size)
        labels_spacing = np.divide(blobs.resolutions[0], scaling)
        labels_img = np.zeros(shape, dtype=np.uint8)
        labels_img_sitk = sitk.GetImageFromArray(labels_img)
        labels_img_sitk.SetSpacing(labels_spacing[::-1])

    else:
        # default to use labels image as the size of the output image
        if labels_img_sitk is None:
            labels_img_sitk = sitk_io.load_registered_img(
                mod_path, config.RegNames.IMG_LABELS.value, get_sitk=True)
        labels_img = sitk.GetArrayFromImage(labels_img_sitk)

        is_2d = labels_img.ndim == 2
        if is_2d:
            # temporarily convert 2D images to 3D
            labels_img = labels_img[None]

        # find the scaling between the blobs and the labels image
        target_size = (None if atlas_profile is None else
                       atlas_profile["target_size"])
        scaling = np_io.find_scaling(img_path, labels_img.shape, scale,
                                     target_size)[0]

        if shape is not None:
            # scale blob coordinates and heat map to an alternative final shape
            scaling = np.divide(shape, np.divide(labels_img.shape, scaling))
            labels_spacing = np.multiply(labels_img_sitk.GetSpacing()[::-1],
                                         np.divide(labels_img.shape, shape))
            labels_img = np.zeros(shape, dtype=labels_img.dtype)
            labels_img_sitk.SetSpacing(labels_spacing[::-1])
    _logger.debug("Using image scaling: {}".format(scaling))

    # annotate blobs based on position
    blobs_chl = blobs.blobs
    if channel is not None:
        _logger.info(
            "Using blobs from channel(s), combining if multiple channels: %s",
            channel)
        blobs_chl = blobs_chl[np.isin(
            detector.Blobs.get_blobs_channel(blobs_chl), channel)]
    heat_map = make_heat_map()
    if is_2d:
        # convert back to 3D
        heat_map = heat_map[0]
    imgs_write = {
        config.RegNames.IMG_HEAT_MAP.value:
        sitk_io.replace_sitk_with_numpy(labels_img_sitk, heat_map)
    }

    heat_colocs = None
    if matches:
        # create heat maps for match-based colocalization combos
        heat_colocs = []
        for chl_combo, chl_matches in matches.items():
            _logger.info(
                "Generating match-based colocalization heat map "
                "for channel combo: %s", chl_combo)
            # use blobs in first channel of each channel pair for simplicity
            blobs_chl = chl_matches.get_blobs(1)
            heat_colocs.append(make_heat_map())

    elif blobs.colocalizations is not None:
        # create heat map for each intensity-based colocalization combo
        # as a separate channel in output image
        blob_chls = range(blobs.colocalizations.shape[1])
        blob_chls_len = len(blob_chls)
        if blob_chls_len > 1:
            # get all channel combos that include given channels
            combos = []
            chls = blob_chls if channel is None else channel
            for r in range(2, blob_chls_len + 1):
                combos.extend([
                    tuple(c) for c in itertools.combinations(blob_chls, r)
                    if all([h in c for h in chls])
                ])

            heat_colocs = []
            for combo in combos:
                _logger.info(
                    "Generating intensity-based colocalization heat map "
                    "for channel combo: %s", combo)
                blobs_chl = blobs.blobs[np.all(np.equal(
                    blobs.colocalizations[:, combo], 1),
                                               axis=1)]
                heat_colocs.append(make_heat_map())

    if heat_colocs is not None:
        # combine heat maps into single image
        heat_colocs = np.stack(heat_colocs, axis=3)
        imgs_write[config.RegNames.IMG_HEAT_COLOC.value] = \
            sitk_io.replace_sitk_with_numpy(
                labels_img_sitk, heat_colocs)

    # write images to file
    sitk_io.write_reg_images(imgs_write, mod_path)
    return heat_map, img_path
コード例 #25
0
ファイル: edge_seg.py プロジェクト: sanderslab/magellanmapper
def edge_aware_segmentation(
        path_atlas: str, atlas_profile: atlas_prof.AtlasProfile,
        show: bool = True, atlas: bool = True, suffix: Optional[str] = None,
        exclude_labels: Optional[pd.DataFrame] = None, mirror_mult: int = -1):
    """Segment an atlas using its previously generated edge map.
    
    Labels may not match their own underlying atlas image well, 
    particularly in the orthogonal directions in which the labels 
    were not constructed. To improve alignment between the labels 
    and the atlas itself, register the labels to an automated, roughly 
    segmented version of the atlas. The goal is to improve the 
    labels' alignment so that the atlas/labels combination can be 
    used for another form of automated segmentation by registering 
    them to experimental brains via :func:``register``.
    
    Edge files are assumed to have been generated by 
    :func:``make_edge_images``.
    
    Args:
        path_atlas: Path to the fixed file, typically the atlas file 
            with stained sections. The corresponding edge and labels 
            files will be loaded based on this path.
        atlas_profile: Atlas profile.
        show: True if the output images should be displayed; defaults 
            to True.
        atlas: True if the primary image is an atlas, which is assumed 
            to be symmetrical. False if the image is an experimental/sample 
            image, in which case segmentation will be performed on the full 
            images, and stats will not be performed.
        suffix: Modifier to append to end of ``path_atlas`` basename for 
            registered image files that were output to a modified name; 
            defaults to None. If ``atlas`` is True, ``suffix`` will only 
            be applied to saved files, with files still loaded based on the 
            original path.
        exclude_labels: Sequence of labels to exclude from the
            segmentation; defaults to None.
        mirror_mult: Multiplier for mirrored labels; defaults to -1
            to make mirrored labels the inverse of their source labels.
    """
    # adjust image path with suffix
    load_path = path_atlas
    mod_path = path_atlas
    if suffix is not None:
        mod_path = libmag.insert_before_ext(mod_path, suffix)
        if atlas: load_path = mod_path
    
    # load corresponding files via SimpleITK
    atlas_sitk = sitk_io.load_registered_img(
        load_path, config.RegNames.IMG_ATLAS.value, get_sitk=True)
    atlas_sitk_edge = sitk_io.load_registered_img(
        load_path, config.RegNames.IMG_ATLAS_EDGE.value, get_sitk=True)
    labels_sitk = sitk_io.load_registered_img(
        load_path, config.RegNames.IMG_LABELS.value, get_sitk=True)
    labels_sitk_markers = sitk_io.load_registered_img(
        load_path, config.RegNames.IMG_LABELS_MARKERS.value, get_sitk=True)
    
    # get Numpy arrays of images
    atlas_img_np = sitk.GetArrayFromImage(atlas_sitk)
    atlas_edge = sitk.GetArrayFromImage(atlas_sitk_edge)
    labels_img_np = sitk.GetArrayFromImage(labels_sitk)
    markers = sitk.GetArrayFromImage(labels_sitk_markers)
    
    # segment image from markers
    sym_axis = atlas_refiner.find_symmetric_axis(atlas_img_np)
    mirrorred = atlas and sym_axis >= 0
    len_half = None
    seg_args = {"exclude_labels": exclude_labels}
    edge_prof = atlas_profile[profiles.RegKeys.EDGE_AWARE_REANNOTATION]
    if edge_prof:
        edge_filt = edge_prof[profiles.RegKeys.WATERSHED_MASK_FILTER]
        if edge_filt and len(edge_filt) > 1:
            # watershed mask filter settings from atlas profile
            seg_args["mask_filt"] = edge_filt[0]
            seg_args["mask_filt_size"] = edge_filt[1]
    if mirrorred:
        # segment only half of image, assuming symmetry
        len_half = atlas_img_np.shape[sym_axis] // 2
        slices = [slice(None)] * labels_img_np.ndim
        slices[sym_axis] = slice(len_half)
        sl = tuple(slices)
        labels_seg = segmenter.segment_from_labels(
            atlas_edge[sl], markers[sl], labels_img_np[sl], **seg_args)
    else:
        # segment the full image, including excluded labels on the opposite side
        exclude_labels = exclude_labels.tolist().extend(
            (mirror_mult * exclude_labels).tolist())
        seg_args["exclude_labels"] = exclude_labels
        labels_seg = segmenter.segment_from_labels(
            atlas_edge, markers, labels_img_np, **seg_args)
    
    smoothing = atlas_profile["smooth"]
    smoothing_mode = atlas_profile["smoothing_mode"]
    cond = ["edge-aware_seg"]
    if smoothing is not None:
        # smoothing by opening operation based on profile setting
        meas_smoothing = atlas_profile["meas_smoothing"]
        cond.append("smoothing")
        df_aggr, df_raw = atlas_refiner.smooth_labels(
            labels_seg, smoothing, smoothing_mode,
            meas_smoothing, labels_sitk.GetSpacing()[::-1])
        df_base_path = os.path.splitext(mod_path)[0]
        if df_raw is not None:
            # write raw smoothing metrics
            df_io.data_frames_to_csv(
                df_raw, f"{df_base_path}_{config.PATH_SMOOTHING_RAW_METRICS}")
        if df_aggr is not None:
            # write aggregated smoothing metrics
            df_io.data_frames_to_csv(
                df_aggr, f"{df_base_path}_{config.PATH_SMOOTHING_METRICS}")
    
    if mirrorred:
        # mirror back to other half
        labels_seg = _mirror_imported_labels(
            labels_seg, len_half, mirror_mult, sym_axis)
    
    # expand background to smoothed background of original labels to 
    # roughly match background while still allowing holes to be filled
    crop = atlas_profile["crop_to_orig"]
    atlas_refiner.crop_to_orig(
        labels_img_np, labels_seg, crop)
    
    if labels_seg.dtype != labels_img_np.dtype:
        # watershed may give different output type, so cast back if so
        labels_seg = labels_seg.astype(labels_img_np.dtype)
    labels_sitk_seg = sitk_io.replace_sitk_with_numpy(labels_sitk, labels_seg)
    
    # show DSCs for labels
    _logger.info(
        "\nMeasuring overlap of individual original and watershed labels:")
    dsc_lbls_comb = atlas_refiner.measure_overlap_labels(
        labels_sitk, labels_sitk_seg)
    _logger.info(
        "\nMeasuring overlap of combined original and watershed labels:")
    dsc_lbls_indiv = atlas_refiner.measure_overlap_labels(
        atlas_refiner.make_labels_fg(labels_sitk), 
        atlas_refiner.make_labels_fg(labels_sitk_seg))
    _logger.info("")
    
    # measure and save whole atlas metrics
    metrics = {
        config.AtlasMetrics.SAMPLE: [os.path.basename(mod_path)],
        config.AtlasMetrics.REGION: config.REGION_ALL,
        config.AtlasMetrics.CONDITION: "|".join(cond),
        config.AtlasMetrics.DSC_LABELS_ORIG_NEW_COMBINED: dsc_lbls_comb,
        config.AtlasMetrics.DSC_LABELS_ORIG_NEW_INDIV: dsc_lbls_indiv,
    }
    df_metrics_path = libmag.combine_paths(
        mod_path, config.PATH_ATLAS_IMPORT_METRICS)
    atlas_refiner.measure_atlas_refinement(
        metrics, atlas_sitk, labels_sitk_seg, atlas_profile, df_metrics_path)

    # show and write image to same directory as atlas with appropriate suffix
    sitk_io.write_reg_images(
        {config.RegNames.IMG_LABELS.value: labels_sitk_seg}, mod_path)
    if show: sitk.Show(labels_sitk_seg)
    return path_atlas
コード例 #26
0
def make_density_image(img_path,
                       scale=None,
                       shape=None,
                       suffix=None,
                       labels_img_sitk=None):
    """Make a density image based on associated blobs.
    
    Uses the shape of the registered labels image by default to set 
    the voxel sizes for the blobs.
    
    Args:
        img_path: Path to image, which will be used to indentify the blobs file.
        scale: Rescaling factor as a scalar value to find the corresponding 
            full-sized image. Defaults to None to use the register 
            setting ``target_size`` instead if available, falling back 
            to load the full size image to find its shape if necessary.
        shape: Final shape size; defaults to None to use the shape of 
            the labels image.
        suffix: Modifier to append to end of ``img_path`` basename for 
            registered image files that were output to a modified name; 
            defaults to None.
        labels_img_sitk: Labels image as a SimpleITK ``Image`` object; 
            defaults to None, in which case the registered labels image file 
            corresponding to ``img_path`` with any ``suffix`` modifier 
            will be opened.
    
    Returns:
        Tuple of the density image as a Numpy array in the same shape as 
        the opened image; Numpy array of blob IDs; and the original 
        ``img_path`` to track such as for multiprocessing.
    """
    mod_path = img_path
    if suffix is not None:
        mod_path = libmag.insert_before_ext(img_path, suffix)
    if labels_img_sitk is None:
        labels_img_sitk = sitk_io.load_registered_img(
            mod_path, config.RegNames.IMG_LABELS.value, get_sitk=True)
    labels_img = sitk.GetArrayFromImage(labels_img_sitk)
    # load blobs
    blobs, scaling, _ = np_io.load_blobs(img_path, True, labels_img.shape,
                                         scale)
    if shape is not None:
        # scale blob coordinates and heat map to an alternative final shape
        scaling = np.divide(shape, np.divide(labels_img.shape, scaling))
        labels_spacing = np.multiply(labels_img_sitk.GetSpacing()[::-1],
                                     np.divide(labels_img.shape, shape))
        labels_img = np.zeros(shape, dtype=labels_img.dtype)
        labels_img_sitk.SetSpacing(labels_spacing[::-1])
    print("using scaling: {}".format(scaling))
    # annotate blobs based on position
    blobs_ids, coord_scaled = ontology.get_label_ids_from_position(
        blobs[:, :3], labels_img, scaling, return_coord_scaled=True)
    print("blobs_ids: {}".format(blobs_ids))

    # build heat map to store densities per label px and save to file
    heat_map = cv_nd.build_heat_map(labels_img.shape, coord_scaled)
    out_path = sitk_io.reg_out_path(mod_path,
                                    config.RegNames.IMG_HEAT_MAP.value)
    print("writing {}".format(out_path))
    heat_map_sitk = sitk_io.replace_sitk_with_numpy(labels_img_sitk, heat_map)
    sitk.WriteImage(heat_map_sitk, out_path, False)
    return heat_map, blobs_ids, img_path
コード例 #27
0
def stack_to_img(paths,
                 roi_offset,
                 roi_size,
                 series=None,
                 subimg_offset=None,
                 subimg_size=None,
                 animated=False,
                 suffix=None):
    """Build an image file from a stack of images in a directory or an 
    array, exporting as an animated GIF or movie for multiple planes or 
    extracting a single plane to a standard image file format.
    
    Writes the file to the parent directory of path.
    
    Args:
        paths (List[str]): Image paths, which can each be either an image 
            directory or a base path to a single image, including 
            volumetric images.
        roi_offset (Sequence[int]): Tuple of offset given in user order
            ``x,y,z``; defaults to None. Requires ``roi_size`` to not be None.
        roi_size (Sequence[int]): Size of the region of interest in user order 
            ``x,y,z``; defaults to None. Requires ``roi_offset`` to not be None.
        series (int): Image series number; defaults to None.
        subimg_offset (List[int]): Sub-image offset as (z,y,x) to load;
            defaults to None.
        subimg_size (List[int]): Sub-image size as (z,y,x) to load;
            defaults to None.
        animated (bool): True to export as an animated image; defaults to False.
        suffix (str): String to append to output path before extension; 
            defaults to None to ignore.

    """
    size = config.plot_labels[config.PlotLabels.LAYOUT]
    ncols, nrows = size if size else (1, 1)
    fig, gs = plot_support.setup_fig(
        nrows, ncols, config.plot_labels[config.PlotLabels.SIZE])
    plotted_imgs = None
    num_paths = len(paths)
    for i in range(nrows):
        for j in range(ncols):
            n = i * ncols + j
            if n >= num_paths: break
            ax = fig.add_subplot(gs[i, j])
            path_sub = paths[n]
            # TODO: test directory of images
            # TODO: avoid reloading first image
            np_io.setup_images(path_sub, series, subimg_offset, subimg_size)
            plotted_imgs = stack_to_ax_imgs(
                ax,
                config.image5d,
                path_sub,
                offset=roi_offset,
                roi_size=roi_size,
                slice_vals=config.slice_vals,
                rescale=config.transform[config.Transforms.RESCALE],
                labels_imgs=(config.labels_img, config.borders_img),
                multiplane=animated,
                fit=(size is None or ncols * nrows == 1))
    path_base = paths[0]
    if animated:
        # generate animated image (eg animated GIF or movie file)
        animate_imgs(path_base, plotted_imgs, config.delay, config.savefig,
                     suffix)
    else:
        # save image as single file
        if roi_offset:
            # get plane index from coordinate at the given axis in ROI offset
            planei = roi_offset[::-1][plot_support.get_plane_axis(
                config.plane, get_index=True)]
        else:
            # get plane index from slice start
            planei = config.slice_vals[0]
        if num_paths > 1:
            # output filename as a collage of images
            if not os.path.isdir(path_base):
                path_base = os.path.dirname(path_base)
            path_base = os.path.join(path_base, "collage")
        mod = "_plane_{}{}".format(plot_support.get_plane_axis(config.plane),
                                   planei)
        if suffix: path_base = libmag.insert_before_ext(path_base, suffix)
        plot_support.save_fig(path_base, config.savefig, mod)
コード例 #28
0
def plot_knns(img_paths, suffix=None, show=False, names=None):
    """Plot k-nearest-neighbor distances for multiple sets of blobs,
    overlaying on a single plot.

    Args:
        img_paths (List[str]): Base paths from which registered labels and
            blobs files will be found and output blobs file save location
            will be constructed.
        suffix (str): Suffix for ``path``; defaults to None.
        show (bool): True to plot the distances; defaults to False.
        names (List[str]): Sequence of names corresponding to ``img_paths``
            for the plot legend.

    """
    cluster_settings = config.atlas_profile[profiles.RegKeys.METRICS_CLUSTER]
    knn_n = cluster_settings[profiles.RegKeys.KNN_N]
    if not knn_n:
        knn_n = cluster_settings[profiles.RegKeys.DBSCAN_MINPTS] - 1
    print("Calculating k-nearest-neighbor distances and plotting distances "
          "for neighbor {}".format(knn_n))

    # set up combined data frames for all samples at each zoom level
    df_keys = ("ov", "zoom")
    dfs_comb = {key: [] for key in df_keys}
    names_disp = names if names else []
    for i, img_path in enumerate(img_paths):
        # load blobs associated with image
        mod_path = img_path
        if suffix is not None:
            mod_path = libmag.insert_before_ext(img_path, suffix)
        labels_img_np = sitk_io.load_registered_img(
            mod_path, config.RegNames.IMG_LABELS.value)
        blobs = detector.Blobs().load_blobs(np_io.img_to_blobs_path(img_path))
        scaling, res = np_io.find_scaling(img_path, labels_img_np.shape)
        if blobs is None:
            libmag.warn("unable to load nuclei coordinates for", img_path)
            continue
        # convert to physical units and display k-nearest-neighbors for nuclei
        blobs_phys = np.multiply(blobs.blobs[:, :3], res)
        # TESTING: given the same blobs, simply shift
        #blobs = np.multiply(blobs[i*10000000:, :3], res)
        _, _, dfs = knn_dist(blobs_phys, knn_n, 2, 1000000, False)
        if names is None:
            # default to naming from filename
            names_disp.append(os.path.basename(mod_path))
        for j, df in enumerate(dfs):
            dfs_comb[df_keys[j]].append(df)

    for key in dfs_comb:
        # combine data frames at each zoom level, save, and plot with
        # different colors for each image
        df = df_io.join_dfs(dfs_comb[key], "point")
        dist_cols = [col for col in df.columns if col.startswith("dist")]
        rename_cols = {col: name for col, name in zip(dist_cols, names_disp)}
        df = df.rename(rename_cols, axis=1)
        out_path = "knn_dist_combine_{}".format(key)
        df_io.data_frames_to_csv(df, out_path)
        plot_2d.plot_lines(out_path,
                           "point",
                           rename_cols.values(),
                           df=df,
                           show=show,
                           title=config.plot_labels[config.PlotLabels.TITLE])
コード例 #29
0
def main():
    """Process stats based on command-line mode."""
    
    # process stats based on command-line argument
    
    df_task = libmag.get_enum(config.df_task, config.DFTasks)

    if df_task is config.DFTasks.MERGE_CSVS:
        # merge multiple CSV files into single CSV file
        merge_csvs(config.filenames, config.prefix)

    elif df_task is config.DFTasks.MERGE_CSVS_COLS:
        # join multiple CSV files based on a given index column into single
        # CSV file
        dfs = [pd.read_csv(f) for f in config.filenames]
        df = join_dfs(
            dfs, config.plot_labels[config.PlotLabels.ID_COL],
            config.plot_labels[config.PlotLabels.DROP_DUPS])
        out_path = config.prefix
        if not out_path:
            out_path = libmag.insert_before_ext(
                config.filename, "_joined")
        data_frames_to_csv(df, out_path)

    elif df_task is config.DFTasks.APPEND_CSVS_COLS:
        # join multiple CSV files based on a given index column into single
        # CSV file
        dfs = [pd.read_csv(f) for f in config.filenames]
        labels = libmag.to_seq(
            config.plot_labels[config.PlotLabels.X_LABEL])
        extra_cols = libmag.to_seq(
            config.plot_labels[config.PlotLabels.X_COL])
        data_cols = libmag.to_seq(
            config.plot_labels[config.PlotLabels.Y_COL])
        df = append_cols(
            dfs, labels, extra_cols=extra_cols, data_cols=data_cols)
        out_path = config.prefix
        if not out_path:
            out_path = libmag.insert_before_ext(
                config.filename, "_appended")
        data_frames_to_csv(df, out_path)

    elif df_task is config.DFTasks.EXPS_BY_REGION:
        # convert volume stats data frame to experiments by region
        exps_by_regions(config.filename)

    elif df_task is config.DFTasks.EXTRACT_FROM_CSV:
        # extract rows from CSV file based on matching rows in given col, where 
        # "X_COL" = name of column on which to filter, and 
        # "Y_COL" = values in this column for which rows should be kept
        df = pd.read_csv(config.filename)
        df_filt, _ = filter_dfs_on_vals(
            [df], None, 
            [(config.plot_labels[config.PlotLabels.X_COL],
              config.plot_labels[config.PlotLabels.Y_COL])])
        out_path = config.prefix
        if not out_path:
            out_path = "filtered.csv"
        data_frames_to_csv(df_filt, out_path)

    elif df_task is config.DFTasks.ADD_CSV_COLS:
        # add columns with corresponding values for all rows, where 
        # "X_COL" = name of column(s) to add, and 
        # "Y_COL" = value(s) for corresponding cols
        df = pd.read_csv(config.filename)
        cols = {k: v for k, v in zip(
            libmag.to_seq(config.plot_labels[config.PlotLabels.X_COL]),
            libmag.to_seq(config.plot_labels[config.PlotLabels.Y_COL]))}
        df = add_cols_df(df, cols)
        out_path = config.prefix
        if not out_path:
            out_path = libmag.insert_before_ext(
                config.filename, "_appended")
        data_frames_to_csv(df, out_path)

    elif df_task is config.DFTasks.NORMALIZE:
        # normalize values in each group to that of a base group, where
        # "ID_COL" = ID column(s),
        # "X_COL" = condition column
        # "Y_COL" = base condition to which values will be normalized,
        # "GROUP_COL" = metric columns to normalize,
        # "WT_COL" = extra columns to keep
        df = pd.read_csv(config.filename)
        df = normalize_df(
            df, config.plot_labels[config.PlotLabels.ID_COL],
            config.plot_labels[config.PlotLabels.X_COL],
            config.plot_labels[config.PlotLabels.Y_COL], 
            config.plot_labels[config.PlotLabels.GROUP_COL],
            config.plot_labels[config.PlotLabels.WT_COL])
        out_path = config.prefix
        if not out_path:
            out_path = libmag.insert_before_ext(config.filename, "_norm")
        data_frames_to_csv(df, out_path)

    elif df_task is config.DFTasks.MERGE_EXCELS:
        # merge multiple Excel files into single Excel file, with each
        # original Excel file as a separate sheet in the combined file
        merge_excels(
            config.filenames, config.prefix,
            config.plot_labels[config.PlotLabels.LEGEND_NAMES])
    
    elif df_task in _ARITHMETIC_TASKS:
        # perform arithmetic operations on pairs of columns in a data frame
        df = pd.read_csv(config.filename)
        fn = _ARITHMETIC_TASKS[df_task]
        for col_x, col_y, col_id in zip(
                libmag.to_seq(config.plot_labels[config.PlotLabels.X_COL]),
                libmag.to_seq(config.plot_labels[config.PlotLabels.Y_COL]),
                libmag.to_seq(config.plot_labels[config.PlotLabels.ID_COL])):
            # perform the arithmetic operation specified by the specific
            # task on the pair of columns, inserting the results in a new
            # column specified by ID
            func_to_paired_cols(df, col_x, col_y, fn, col_id)
        
        # output modified data frame to CSV file
        out_path = config.prefix
        if not out_path:
            suffix = config.suffix if config.suffix else ""
            out_path = libmag.insert_before_ext(config.filename, suffix)
        data_frames_to_csv(df, out_path)