Пример #1
0
def segment_ws(roi, channel, thresholded=None, blobs=None):
    """Segment an image using a compact watershed, including the option 
    to use a 3D-seeded watershed approach.
    
    Args:
        roi: ROI as a Numpy array in (z, y, x) order.
        channel: Channel to pass to :func:``plot_3d.setup_channels``.
        thresholded: Thresholded image such as a segmentation into foreground/
            background given by Random-walker (:func:``segment_rw``). 
            Defaults to None, in which case Otsu thresholding will be performed.
        blobs: Blobs as a Numpy array in [[z, y, x, ...], ...] order, which 
            are used as seeds for the watershed. Defaults to None, in which 
            case peaks on a distance transform will be used.
    
    Returns:
        List of watershed labels for each given channel, with each set 
        of labels given as an image of the same shape as ``roi``.
    """
    labels = []
    labels_ws = None
    multichannel, channels = plot_3d.setup_channels(roi, channel, 3)
    for i in channels:
        roi_segment = roi[..., i] if multichannel else roi
        if thresholded is None:
            # Ostu thresholing and object separate based on local max
            # rather than seeded watershed approach
            roi_thresh = filters.threshold_otsu(roi, 64)
            thresholded = roi_segment > roi_thresh
        else:
            # r-w assigned 0 values to > 0 val labels
            thresholded = thresholded[0] - 1

        if blobs is None:
            # default to finding peaks of distance transform if no blobs
            # given, using an anisotropic footprint
            distance = ndimage.distance_transform_edt(thresholded)
            try:
                local_max = feature.peak_local_max(distance,
                                                   indices=False,
                                                   footprint=np.ones(
                                                       (1, 3, 3)),
                                                   labels=thresholded)
            except IndexError as e:
                print(e)
                raise e
            markers = measure.label(local_max)
        else:
            markers = _markers_from_blobs(thresholded, blobs)

        # watershed with slight increase in compactness to give basins with
        # more regular, larger shape
        labels_ws = watershed_distance(thresholded, markers, compactness=0.1)

        # clean up segmentation
        labels_ws = _carve_segs(labels_ws, blobs)
        labels_ws = morphology.remove_small_objects(labels_ws, min_size=100)
        #print("num ws blobs: {}".format(len(np.unique(labels_ws)) - 1))
        labels_ws = labels_ws[None]
        labels.append(labels_ws)
    return labels_ws
Пример #2
0
def export_planes(image5d, ext, channel=None, separate_chls=False):
    """Export each plane and channel combination into separate 2D image files

    Args:
        image5d (:obj:`np.ndarray`): Image in ``t,z,y,x[,c]`` format.
        ext (str): Save format given as an extension without period.
        channel (int): Channel to save; defaults to None for all channels.
        separate_chls (bool): True to export all channels from each plane to
            a separate image; defaults to False. 

    """
    suffix = "_export" if config.suffix is None else config.suffix
    out_path = libmag.make_out_path(suffix=suffix)
    output_dir = os.path.dirname(out_path)
    basename = os.path.splitext(os.path.basename(out_path))[0]
    if not os.path.isdir(output_dir):
        os.makedirs(output_dir)
    roi = image5d[0]
    multichannel, channels = plot_3d.setup_channels(roi, channel, 3)
    num_digits = len(str(len(roi)))
    for i, plane in enumerate(roi):
        path = os.path.join(output_dir,
                            "{}_{:0{}d}".format(basename, i, num_digits))
        if separate_chls and multichannel:
            for chl in channels:
                # save each channel as separate file
                plane_chl = plane[..., chl]
                path_chl = "{}{}{}.{}".format(path, importer.CHANNEL_SEPARATOR,
                                              chl, ext)
                print("Saving image plane {} to {}".format(i, path_chl))
                io.imsave(path_chl, plane_chl)
        else:
            # save single channel plane
            path = "{}.{}".format(path, ext)
            print("Saving image plane {} to {}".format(i, path))
            io.imsave(path, plane)
Пример #3
0
def export_planes(image5d, prefix, ext, channel=None):
    """Export each plane and channel combination into separate 2D image files

    Args:
        image5d (:obj:`np.ndarray`): Image in ``t,z,y,x[,c]`` format.
        prefix (str): Output path template.
        ext (str): Save format given as an extension without period.
        channel (int): Channel to save; defaults to None for all channels.

    Returns:

    """
    output_dir = os.path.dirname(prefix)
    basename = os.path.splitext(os.path.basename(prefix))[0]
    if not os.path.isdir(output_dir):
        os.makedirs(output_dir)
    roi = image5d[0]
    # TODO: option for RGB(A) images, which skimage otherwise assumes
    multichannel, channels = plot_3d.setup_channels(roi, channel, 3)
    num_digits = len(str(len(roi)))
    for i, plane in enumerate(roi):
        path = os.path.join(output_dir,
                            "{}_{:0{}d}".format(basename, i, num_digits))
        if multichannel:
            for chl in channels:
                # save each channel as separate file
                plane_chl = plane[..., chl]
                path_chl = "{}{}{}.{}".format(path, importer.CHANNEL_SEPARATOR,
                                              chl, ext)
                print("Saving image plane {} to {}".format(i, path_chl))
                io.imsave(path_chl, plane_chl)
        else:
            # save single channel plane
            path = "{}.{}".format(path, ext)
            print("Saving image plane {} to {}".format(i, path))
            io.imsave(path, plane)
Пример #4
0
def detect_blobs(roi, channel, exclude_border=None):
    """Detects objects using 3D blob detection technique.
    
    Args:
        roi: Region of interest to segment.
        channel (Sequence[int]): Sequence of channels to select, which can
            be None to indicate all channels.
        exclude_border: Sequence of border pixels in x,y,z to exclude;
            defaults to None.
    
    Returns:
        Array of detected blobs, each given as 
            (z, row, column, radius, confirmation).
    """
    time_start = time()
    shape = roi.shape
    multichannel, channels = plot_3d.setup_channels(roi, channel, 3)
    isotropic = config.get_roi_profile(channels[0])["isotropic"]
    if isotropic is not None:
        # interpolate for (near) isotropy during detection, using only the 
        # first process settings since applies to entire ROI
        roi = cv_nd.make_isotropic(roi, isotropic)
    
    blobs_all = []
    for chl in channels:
        roi_detect = roi[..., chl] if multichannel else roi
        settings = config.get_roi_profile(chl)
        # scaling as a factor in pixel/um, where scaling of 1um/pixel  
        # corresponds to factor of 1, and 0.25um/pixel corresponds to
        # 1 / 0.25 = 4 pixels/um; currently simplified to be based on 
        # x scaling alone
        scale = calc_scaling_factor()
        scaling_factor = scale[2]
        
        # find blobs; sigma factors can be sequences by axes for anisotropic 
        # detection in skimage >= 0.15, or images can be interpolated to 
        # isotropy using the "isotropic" MagellanMapper setting
        min_sigma = settings["min_sigma_factor"] * scaling_factor
        max_sigma = settings["max_sigma_factor"] * scaling_factor
        num_sigma = settings["num_sigma"]
        threshold = settings["detection_threshold"]
        overlap = settings["overlap"]
        blobs_log = blob_log(
            roi_detect, min_sigma=min_sigma, max_sigma=max_sigma,
            num_sigma=num_sigma, threshold=threshold, overlap=overlap)
        if config.verbose:
            print("detecting blobs with min size {}, max {}, num std {}, "
                  "threshold {}, overlap {}"
                  .format(min_sigma, max_sigma, num_sigma, threshold, overlap))
            print("time for 3D blob detection: {}".format(time() - time_start))
        if blobs_log.size < 1:
            libmag.printv("no blobs detected")
            continue
        blobs_log[:, 3] = blobs_log[:, 3] * math.sqrt(3)
        blobs = format_blobs(blobs_log, chl)
        #print(blobs)
        blobs_all.append(blobs)
    if not blobs_all:
        return None
    blobs_all = np.vstack(blobs_all)
    if isotropic is not None:
        # if detected on isotropic ROI, need to reposition blob coordinates 
        # for original, non-isotropic ROI
        isotropic_factor = cv_nd.calc_isotropic_factor(isotropic)
        blobs_all = multiply_blob_rel_coords(blobs_all, 1 / isotropic_factor)
        blobs_all = multiply_blob_abs_coords(blobs_all, 1 / isotropic_factor)
    
    if exclude_border is not None:
        # exclude blobs from the border in x,y,z
        blobs_all = get_blobs_interior(blobs_all, shape, *exclude_border)
    
    return blobs_all
Пример #5
0
def segment_rw(roi,
               channel,
               beta=50.0,
               vmin=0.6,
               vmax=0.65,
               remove_small=None,
               erosion=None,
               blobs=None,
               get_labels=False):
    """Segments an image using the Random-Walker algorithm.
    
    Args:
        roi: Region of interest to segment.
        channel: Channel to pass to :func:``plot_3d.setup_channels``.
        beta: Random-Walker beta term.
        vmin: Values under which to exclude in markers; defaults to 0.6. 
            Ignored if ``blobs`` is given.
        vmax: Values above which to exclude in markers; defaults to 0.65. 
            Ignored if ``blobs`` is given.
        remove_small: Threshold size of small objects to remove; defaults 
            to None to ignore.
        erosion: Structuring element size for erosion; defaults 
            to None to ignore.
        blobs: Blobs to use for markers; defaults to None, in which 
            case markers will be determined based on ``vmin``/``vmax`` 
            thresholds.
        get_labels: True to measure and return labels from the 
            resulting segmentation instead of returning the segmentations 
            themselves; defaults to False.
    
    Returns:
        List of the Random-Walker segmentations for the given channels, 
        If ``get_labels`` is True, the measured labels for the segmented 
        regions will be returned instead of the segmentations themselves.
    """
    print("Random-Walker based segmentation...")
    labels = []
    walkers = []
    multichannel, channels = plot_3d.setup_channels(roi, channel, 3)
    for i in channels:
        roi_segment = roi[..., i] if multichannel else roi
        if blobs is None:
            # mark unknown pixels as 0 by distinguishing known background
            # and foreground
            markers = np.zeros(roi_segment.shape, dtype=np.uint8)
            markers[roi_segment < vmin] = 2
            markers[roi_segment >= vmax] = 1
        else:
            # derive markers from blobs
            markers = _markers_from_blobs(roi_segment, blobs)

        # perform the segmentation; conjugate gradient with multigrid
        # preconditioner option (cg_mg), which is faster but req pyamg
        walker = segmentation.random_walker(roi_segment,
                                            markers,
                                            beta=beta,
                                            mode="cg_mg")

        # clean up segmentation

        #lib_clrbrain.show_full_arrays()
        walker = _carve_segs(walker, blobs)
        if remove_small:
            # remove artifacts
            walker = morphology.remove_small_objects(walker, remove_small)
        if erosion:
            # attempt to reduce label connections by eroding
            walker = morphology.erosion(walker, morphology.octahedron(erosion))

        if get_labels:
            # label neighboring pixels to segmented regions
            # TODO: check if necessary; useful only if blobs not given?
            label = measure.label(walker, background=0)
            labels.append(label)
            #print("label:\n", label)

        walkers.append(walker)
        #print("walker:\n", walker)

    if get_labels:
        return labels
    return walkers
Пример #6
0
def export_planes(image5d: np.ndarray,
                  ext: str,
                  channel: Optional[int] = None,
                  separate_chls: bool = False):
    """Export all planes of a 3D+ image into separate 2D image files.
    
    Unlike :meth:`stack_to_img`, this method exports raw planes and
    each channels into separate files, without processing through Matplotlib.
    Supports image rotation set in :attr:`magmap.settings.config.transform`.
    
    By default, all z-planes are exported, with plane indices specified through
    :attr:`config.slice_vals`. Alternatively, regions of interest can be
    specified by :attr:`config.roi_offset` and :attr:`config.roi_size`.
    The planar orientation can be configured through :attr:`config.plane`.

    Args:
        image5d: Image in ``t,z,y,x[,c]`` format.
        ext: Save format given as an extension without period.
        channel: Channel to save; defaults to None for all channels.
        separate_chls (bool): True to export all channels from each plane to
            a separate image; defaults to False. 

    """
    # set up output path
    suffix = "_export" if config.suffix is None else config.suffix
    out_path = libmag.make_out_path(suffix=suffix)
    output_dir = os.path.dirname(out_path)
    basename = os.path.splitext(os.path.basename(out_path))[0]
    if not os.path.isdir(output_dir):
        os.makedirs(output_dir)

    # set up image and apply any rotation
    roi = image5d[0]
    multichannel, channels = plot_3d.setup_channels(roi, channel, 3)
    rotate = config.transform[config.Transforms.ROTATE]
    roi = cv_nd.rotate90(roi, rotate, multichannel=multichannel)
    stacker = setup_stack(roi[np.newaxis, :],
                          offset=config.roi_offset,
                          roi_size=config.roi_size,
                          slice_vals=config.slice_vals,
                          rescale=config.transform[config.Transforms.RESCALE])
    roi = stacker.images[0]

    num_planes = len(roi)
    img_sl = stacker.img_slice
    for i, plane in enumerate(roi):
        # add plane to output path
        out_name = f"{basename}_plane_" \
                   f"{plot_support.get_plane_axis(config.plane)}" \
                   f"{img_sl.start + img_sl.step * i}"
        path = os.path.join(output_dir, out_name)
        if separate_chls and multichannel:
            for chl in channels:
                # save each channel as separate file
                plane_chl = plane[..., chl]
                path_chl = "{}{}{}.{}".format(path, importer.CHANNEL_SEPARATOR,
                                              chl, ext)
                print("Saving image plane {} to {}".format(i, path_chl))
                io.imsave(path_chl, plane_chl)
        else:
            # save single channel plane
            path = "{}.{}".format(path, ext)
            print("Saving image plane {} to {}".format(i, path))
            io.imsave(path, plane)
Пример #7
0
    def plot_3d_points(self, roi, channel, flipz=False, offset=None):
        """Plots all pixels as points in 3D space.

        Points falling below a given threshold will be removed, allowing
        the viewer to see through the presumed background to masses within
        the region of interest.

        Args:
            roi (:class:`numpy.ndarray`): Region of interest either as a 3D
                ``z,y,x`` or 4D ``z,y,x,c`` array.
            channel (int): Channel to select, which can be None to indicate all
                channels.
            flipz (bool): True to invert the ROI along the z-axis to match
                the handedness of Matplotlib with z progressing upward;
                defaults to False.
            offset (Sequence[int]): Origin coordinates in ``z,y,x``; defaults
                to None.

        Returns:
            bool: True if points were rendered, False if no points to render.
        
        """
        print("Plotting ROI as 3D points")

        # streamline the image
        if roi is None or roi.size < 1: return False
        roi = plot_3d.saturate_roi(roi, clip_vmax=98.5, channel=channel)
        roi = np.clip(roi, 0.2, 0.8)
        roi = restoration.denoise_tv_chambolle(roi, weight=0.1)

        # separate parallel arrays for each dimension of all coordinates for
        # Mayavi input format, with the ROI itself given as a 1D scalar array ;
        # TODO: consider using np.mgrid to construct the x,y,z arrays
        time_start = time()
        shape = roi.shape
        isotropic = plot_3d.get_isotropic_vis(config.roi_profile)
        z = np.ones((shape[0], shape[1] * shape[2]))
        for i in range(shape[0]):
            z[i] = z[i] * i
        if flipz:
            # invert along z-axis to match handedness of Matplotlib with z up
            z *= -1
            if offset is not None:
                offset = np.copy(offset)
                offset[0] *= -1
        y = np.ones((shape[0] * shape[1], shape[2]))
        for i in range(shape[0]):
            for j in range(shape[1]):
                y[i * shape[1] + j] = y[i * shape[1] + j] * j
        x = np.ones((shape[0] * shape[1], shape[2]))
        for i in range(shape[0] * shape[1]):
            x[i] = np.arange(shape[2])

        if offset is not None:
            offset = np.multiply(offset, isotropic)
        coords = [z, y, x]
        for i, _ in enumerate(coords):
            # scale coordinates for isotropy
            coords[i] *= isotropic[i]
            if offset is not None:
                # translate by offset
                coords[i] += offset[i]

        multichannel, channels = plot_3d.setup_channels(roi, channel, 3)
        for chl in channels:
            roi_show = roi[..., chl] if multichannel else roi
            roi_show_1d = roi_show.reshape(roi_show.size)
            if chl == 0:
                x = np.reshape(x, roi_show.size)
                y = np.reshape(y, roi_show.size)
                z = np.reshape(z, roi_show.size)
            settings = config.get_roi_profile(chl)

            # clear background points to see remaining structures
            thresh = 0
            if len(np.unique(roi_show)) > 1:
                # need > 1 val to threshold
                try:
                    thresh = filters.threshold_otsu(roi_show, 64)
                except ValueError as e:
                    thresh = np.median(roi_show)
                    print("could not determine Otsu threshold, taking median "
                          "({}) instead".format(thresh))
                thresh *= settings["points_3d_thresh"]
            print("removing 3D points below threshold of {}".format(thresh))
            remove = np.where(roi_show_1d < thresh)
            roi_show_1d = np.delete(roi_show_1d, remove)

            # adjust range from 0-1 to region of colormap to use
            roi_show_1d = libmag.normalize(roi_show_1d, 0.6, 1.0)
            points_len = roi_show_1d.size
            if points_len == 0:
                print("no 3D points to display")
                return False
            mask = math.ceil(points_len / self._MASK_DIVIDEND)
            print("points: {}, mask: {}".format(points_len, mask))
            if any(np.isnan(roi_show_1d)):
                # TODO: see if some NaNs are permissible
                print(
                    "NaN values for 3D points, will not show 3D visualization")
                return False
            pts = self.scene.mlab.points3d(np.delete(x, remove),
                                           np.delete(y, remove),
                                           np.delete(z, remove),
                                           roi_show_1d,
                                           mode="sphere",
                                           scale_mode="scalar",
                                           mask_points=mask,
                                           line_width=1.0,
                                           vmax=1.0,
                                           vmin=0.0,
                                           transparent=True)
            cmap = colormaps.get_cmap(config.cmaps, chl)
            if cmap is not None:
                pts.module_manager.scalar_lut_manager.lut.table = cmap(
                    range(0, 256)) * 255

            # scale glyphs to partially fill in gaps from isotropic scaling;
            # do not use actor scaling as it also translates the points when
            # not positioned at the origin
            pts.glyph.glyph.scale_factor = 2 * max(isotropic)

        # keep visual ordering of surfaces when opacity is reduced
        self.scene.renderer.use_depth_peeling = True
        print("time for 3D points display: {}".format(time() - time_start))
        return True
Пример #8
0
    def plot_3d_surface(self,
                        roi,
                        channel,
                        segment=False,
                        flipz=False,
                        offset=None):
        """Plots areas with greater intensity as 3D surfaces.

        The scene will be cleared before display.
        
        Args:
            roi (:class:`numpy.ndarray`): Region of interest either as a 3D
                ``z,y,x`` or 4D ``z,y,x,c`` array.
            channel (int): Channel to select, which can be None to indicate all
                channels.
            segment (bool): True to denoise and segment ``roi`` before
                displaying, which may remove artifacts that might otherwise
                lead to spurious surfaces. Defaults to False.
            flipz: True to invert ``roi`` along z-axis to match handedness
                of Matplotlib with z progressing upward; defaults to False.
            offset (Sequence[int]): Origin coordinates in ``z,y,x``; defaults
                to None.

        Returns:
            list: List of Mayavi surfaces for each displayed channel, which
            are also stored in :attr:`surfaces`.

        """
        # Plot in Mayavi
        print("viewing 3D surface")
        pipeline = self.scene.mlab.pipeline
        settings = config.roi_profile
        if flipz:
            # invert along z-axis to match handedness of Matplotlib with z up
            roi = roi[::-1]
            if offset is not None:
                # invert z-offset and translate by ROI z-size so ROI is
                # mirrored across the xy-plane
                offset = np.copy(offset)
                offset[0] = -offset[0] - roi.shape[0]
        isotropic = plot_3d.get_isotropic_vis(settings)

        # saturate to remove noise and normalize values
        roi = plot_3d.saturate_roi(roi, channel=channel)

        # turn off segmentation if ROI too big (arbitrarily set here as
        # > 10 million pixels) to avoid performance hit and since likely showing
        # large region of downsampled image anyway, where don't need hi res
        num_pixels = np.prod(roi.shape)
        to_segment = num_pixels < 10000000

        time_start = time()
        multichannel, channels = plot_3d.setup_channels(roi, channel, 3)
        surfaces = []
        for chl in channels:
            roi_show = roi[..., chl] if multichannel else roi

            # clip to minimize sub-nuclear variation
            roi_show = np.clip(roi_show, 0.2, 0.8)

            if segment:
                # denoising makes for much cleaner images but also seems to
                # allow structures to blend together
                # TODO: consider segmenting individual structures and rendering
                # as separate surfaces to avoid blending
                roi_show = restoration.denoise_tv_chambolle(roi_show,
                                                            weight=0.1)

                # build surface from segmented ROI
                if to_segment:
                    vmin, vmax = np.percentile(roi_show, (40, 70))
                    walker = segmenter.segment_rw(roi_show,
                                                  chl,
                                                  vmin=vmin,
                                                  vmax=vmax)
                    roi_show *= np.subtract(walker[0], 1)
                else:
                    print("deferring segmentation as {} px is above threshold".
                          format(num_pixels))

            # ROI is in (z, y, x) order, so need to transpose or swap x,z axes
            roi_show = np.transpose(roi_show)
            surface = pipeline.scalar_field(roi_show)

            # Contour -> Surface pipeline

            # create the surface
            surface = pipeline.contour(surface)
            # remove many more extraneous points
            surface = pipeline.user_defined(surface,
                                            filter="SmoothPolyDataFilter")
            surface.filter.number_of_iterations = 400
            surface.filter.relaxation_factor = 0.015
            # distinguishing pos vs neg curvatures?
            surface = pipeline.user_defined(surface, filter="Curvatures")
            surface = self.scene.mlab.pipeline.surface(surface)
            module_manager = surface.module_manager
            module_manager.scalar_lut_manager.data_range = np.array([-2, 0])
            module_manager.scalar_lut_manager.lut_mode = "gray"
            '''
            # Surface pipleline with contours enabled (similar to above?)
            surface = pipeline.contour_surface(
                surface, color=(0.7, 1, 0.7), line_width=6.0)
            surface.actor.property.representation = 'wireframe'
            #surface.actor.property.line_width = 6.0
            surface.actor.mapper.scalar_visibility = False
            '''
            '''
            # IsoSurface pipeline

            # uses unique IsoSurface module but appears to have 
            # similar output to contour_surface
            surface = pipeline.iso_surface(surface)

            # limit contours for simpler surfaces including smaller file sizes; 
            # TODO: consider making settable as arg or through profile
            surface.contour.number_of_contours = 1
            try:
                # increase min to further reduce complexity
                surface.contour.minimum_contour = 0.5
                surface.contour.maximum_contour = 0.8
            except Exception as e:
                print(e)
                print("ignoring min/max contour for now")
            '''

            if offset is not None:
                # translate to offset scaled by isotropic factor
                surface.actor.actor.position = np.multiply(offset,
                                                           isotropic)[::-1]
            # scale surfaces, which expands/contracts but does not appear
            # to translate the surface position
            surface.actor.actor.scale = isotropic[::-1]
            surfaces.append(surface)

        # keep visual ordering of surfaces when opacity is reduced
        self.scene.renderer.use_depth_peeling = True
        print("time to render 3D surface: {}".format(time() - time_start))
        self.surfaces = surfaces
        return surfaces
Пример #9
0
def overlay_images(ax,
                   aspect,
                   origin,
                   imgs2d,
                   channels,
                   cmaps,
                   alphas=None,
                   vmins=None,
                   vmaxs=None,
                   ignore_invis=False,
                   check_single=False):
    """Show multiple, overlaid images.
    
    Wrapper function calling :meth:`imshow_multichannel` for multiple 
    images. The first image is treated as a sample image with potential 
    for multiple channels. Subsequent images are typically label images, 
    which may or may not have multple channels.
    
    Args:
        ax: Axes.
        aspect: Aspect ratio.
        origin: Image origin.
        imgs2d (List[:obj:`np.ndarray`]): Sequence of 2D images to display,
            where the first image may be 2D+channel.
        channels (List[List[int]): A nested list of channels to display for
            each image, or None to use :attr:``config.channel`` for the
            first image and 0 for all subsequent images.
        cmaps: Either a single colormap for all images or a list of 
            colormaps corresponding to each image. Colormaps of type 
            :class:`colormaps.DiscreteColormap` will have their 
            normalization object applied as well. If a color is given for
            :obj:`config.AtlasLabels.BINARY` in :attr:`config.atlas_labels`,
            images with :class:`colormaps.DiscreteColormap` will be
            converted to NaN for foreground to use this color.
        alphas: Either a single alpha for all images or a list of 
            alphas corresponding to each image. Defaults to None to use
            :attr:`config.alphas`, filling with 0.9 for any additional
            values required and :attr:`config.plot_labels` for the first value.
        vmins: A list of vmins for each image; defaults to None to use 
            :attr:``config.vmins`` for the first image and None for all others.
        vmaxs: A list of vmaxs for each image; defaults to None to use 
            :attr:``config.vmax_overview`` for the first image and None 
            for all others.
        ignore_invis (bool): True to avoid creating ``AxesImage`` objects
            for images that would be invisible; defaults to False.
        check_single (bool): True to check for images with a single unique
            value displayed with a :class:`colormaps.DiscreteColormap`, which
            will not update for unclear reasons. If found, the final value
            will be incremented by one as a workaround to allow updates.
            Defaults to False.
    
    Returns:
        Nested list containing a list of ``AxesImage`` objects 
        corresponding to display of each ``imgs2d`` image.
    """
    ax_imgs = []
    num_imgs2d = len(imgs2d)
    if num_imgs2d < 1: return None

    # fill default values for each set of 2D images
    img_norm_setting = config.roi_profile["norm"]
    if channels is None:
        # list of first channel for each set of 2D images except config
        # channels for main (first) image
        channels = [[0]] * num_imgs2d
        channels[0] = config.channel
    _, channels_main = plot_3d.setup_channels(imgs2d[0], None, 2)
    if vmins is None:
        vmins = [None] * num_imgs2d
    if vmaxs is None:
        vmaxs = [None] * num_imgs2d
    if alphas is None:
        # start with config alphas and pad the remaining values
        alphas = libmag.pad_seq(config.alphas, num_imgs2d, 0.9)

    for i in range(num_imgs2d):
        # generate a multichannel display image for each 2D image
        img = imgs2d[i]
        if img is None: continue
        cmap = cmaps[i]
        norm = None
        nan_color = config.plot_labels[config.PlotLabels.NAN_COLOR]
        discrete = isinstance(cmap, colormaps.DiscreteColormap)
        if discrete:
            if config.atlas_labels[config.AtlasLabels.BINARY]:
                # binarize copy of labels image plane
                img = np.copy(img)
                img[img != 0] = 1
            # get normalization factor for discrete colormaps and convert
            # the image for this indexing
            img = cmap.convert_img_labels(img)
            norm = [cmap.norm]
            cmap = [cmap]
        alpha = alphas[i]
        vmin = vmins[i]
        vmax = vmaxs[i]
        if i == 0:
            # first image is the main intensity image, potentially multichannel
            len_chls_main = len(channels_main)
            alphas_chl = config.plot_labels[config.PlotLabels.ALPHAS_CHL]
            if alphas_chl is not None:
                alpha = libmag.pad_seq(list(alphas_chl), len_chls_main, 0.5)
            if vmin is None and config.vmins is not None:
                vmin = libmag.pad_seq(list(config.vmins), len_chls_main)
            if vmax is None:
                vmax_fill = config.vmax_overview
                if config.vmaxs is None and img_norm_setting:
                    vmax_fill = [max(img_norm_setting)]
                vmax = libmag.pad_seq(list(vmax_fill), len_chls_main)
            if img_norm_setting:
                # normalize main intensity image
                img = libmag.normalize(img, *img_norm_setting)
        elif not all(np.equal(img.shape[:2], imgs2d[0].shape[:2])):
            # resize the image to the main image's shape if shapes differ in
            # xy; assume that the given image is a labels image whose integer
            # identity values should be preserved
            shape = list(img.shape)
            shape[:2] = imgs2d[0].shape[:2]
            img = transform.resize(img,
                                   shape,
                                   order=0,
                                   anti_aliasing=False,
                                   preserve_range=True,
                                   mode="reflect").astype(np.int)
        if check_single and discrete and len(np.unique(img)) < 2:
            # WORKAROUND: increment the last val of single unique val images
            # shown with a DiscreteColormap (or any ListedColormap) since
            # they otherwise fail to update on subsequent imshow calls
            # for unknown reasons
            img[-1, -1] += 1
        ax_img = imshow_multichannel(ax,
                                     img,
                                     channels[i],
                                     cmap,
                                     aspect,
                                     alpha,
                                     vmin,
                                     vmax,
                                     origin,
                                     interpolation="none",
                                     norms=norm,
                                     nan_color=nan_color,
                                     ignore_invis=ignore_invis)
        ax_imgs.append(ax_img)
    return ax_imgs
Пример #10
0
def imshow_multichannel(ax,
                        img2d,
                        channel,
                        cmaps,
                        aspect,
                        alpha=None,
                        vmin=None,
                        vmax=None,
                        origin=None,
                        interpolation=None,
                        norms=None,
                        nan_color=None,
                        ignore_invis=False):
    """Show multichannel 2D image with channels overlaid over one another.

    Applies :attr:`config.transform` with :obj:`config.Transforms.ROTATE`
    to rotate images. If not available, also checks the first element in
    :attr:``config.flip`` to rotate the image by 180 degrees.
    
    Applies :attr:`config.transform` with :obj:`config.Transforms.FLIP_HORIZ`
    and :obj:`config.Transforms.FLIP_VERT` to invert images.

    Args:
        ax: Axes plot.
        img2d: 2D image either as 2D (y, x) or 3D (y, x, channel) array.
        channel: Channel to display; if None, all channels will be shown.
        cmaps: List of colormaps corresponding to each channel. Colormaps 
            can be the names of specific maps in :mod:``config``.
        aspect: Aspect ratio.
        alpha (float, List[float]): Transparency level for all channels or 
            sequence of levels for each channel. If any value is 0, the
            corresponding image will not be output. Defaults to None to use 1.
        vmin (float, List[float]): Scalar or sequence of vmin levels for
            all channels; defaults to None.
        vmax (float, List[float]): Scalar or sequence of vmax levels for
            all channels; defaults to None.
        origin: Image origin; defaults to None.
        interpolation: Type of interpolation; defaults to None.
        norms: List of normalizations, which should correspond to ``cmaps``.
        nan_color (str): String of color to use for NaN values; defaults to
            None to leave these pixels empty.
        ignore_invis (bool): True to give None instead of an ``AxesImage``
            object that would be invisible; defaults to False.
    
    Returns:
        List of ``AxesImage`` objects.
    """
    # assume that 3D array has a channel dimension
    multichannel, channels = plot_3d.setup_channels(img2d, channel, 2)
    img = []
    num_chls = len(channels)
    if alpha is None:
        alpha = 1
    if num_chls > 1 and not libmag.is_seq(alpha):
        # if alphas not explicitly set per channel, make all channels more
        # translucent at a fixed value that is higher with more channels
        alpha /= np.sqrt(num_chls + 1)

    # transform image based on config parameters
    rotate = config.transform[config.Transforms.ROTATE]
    if rotate is not None:
        last_axis = img2d.ndim - 1
        if multichannel:
            last_axis -= 1
        # use first rotation value
        img2d = np.rot90(img2d, libmag.get_if_within(rotate, 0),
                         (last_axis - 1, last_axis))

    for chl in channels:
        img2d_show = img2d[..., chl] if multichannel else img2d
        cmap = None if cmaps is None else cmaps[chl]
        norm = None if norms is None else norms[chl]
        cmap = colormaps.get_cmap(cmap)
        if cmap is not None and nan_color:
            # given color for masked values such as NaNs to distinguish from 0
            cmap.set_bad(color=nan_color)
        # get setting corresponding to the channel index, or use the value
        # directly if it is a scalar
        vmin_plane = libmag.get_if_within(vmin, chl)
        vmax_plane = libmag.get_if_within(vmax, chl)
        alpha_plane = libmag.get_if_within(alpha, chl)
        img_chl = None
        if not ignore_invis or alpha_plane > 0:
            # skip display if alpha is 0 to avoid outputting a hidden image
            # that may show up in other renderers (eg PDF viewers)
            img_chl = ax.imshow(img2d_show,
                                cmap=cmap,
                                norm=norm,
                                aspect=aspect,
                                alpha=alpha_plane,
                                vmin=vmin_plane,
                                vmax=vmax_plane,
                                origin=origin,
                                interpolation=interpolation)
        img.append(img_chl)

    # flip horizontally or vertically by inverting axes
    if config.transform[config.Transforms.FLIP_HORIZ]:
        if not ax.xaxis_inverted():
            ax.invert_xaxis()
    if config.transform[config.Transforms.FLIP_VERT]:
        inverted = ax.yaxis_inverted()
        if (origin in (None, "lower") and inverted) or (origin == "upper"
                                                        and not inverted):
            # invert only if inversion state is same as expected from origin
            # to avoid repeated inversions with repeated calls
            ax.invert_yaxis()

    return img
Пример #11
0
def detect_blobs_stack(filename_base, subimg_offset, subimg_size, coloc=False):
    """Detect blobs in a full stack, such as a whole large image.
    
    Process channels in separate sets of blocks if their profiles specify
    different block sizes.
    
    Args:
        filename_base (str): 
        subimg_offset (Sequence[int]): Sub-image offset as ``z,y,x`` to load
            from :attr:`config.image5d`; defaults to None.
        subimg_size (Sequence[int]): Sub-image size as ``z,y,x`` to load
            from :attr:`config.image5d`; defaults to None.
        coloc (bool): True to also detect blob-colocalizations based on image
            intensity; defaults to False. For match-based colocalizations,
            use the ``coloc_match`` task
            (:meth:`magmap.colocalizer.StackColocalizer.colocalize_stack`)
            instead.

    Returns:
        tuple[int, int, int], str, :class:`magmap.cv.detector.Blobs`:
        Combined ccuracy metrics from :class:`magmap.cv.detector.verify_rois`,
        feedback message from this same function, and detected blobs across
        all channels in :attr:`magmap.settings.config.channel`.

    """
    channels = plot_3d.setup_channels(config.image5d, config.channel, 4)[1]
    if roi_prof.ROIProfile.is_identical_settings(
            [config.get_roi_profile(c) for c in channels],
            roi_prof.ROIProfile.BLOCK_SIZES):
        print("Will process channels together in the same blocks")
        channels = [channels]
    else:
        print("Will process channels in separate blocks defined by their "
              "profiles")
    
    cols = ("stats", "fdbk", "blobs")
    detection_out = {}
    for chl in channels:
        # detect blobs in each channel separately unless all channels
        # are combined in a single list
        if not libmag.is_seq(chl):
            chl = [chl]
        blobs_out = detect_blobs_blocks(
            filename_base, config.image5d, subimg_offset, subimg_size,
            chl, config.truth_db_mode is config.TruthDBModes.VERIFY, 
            not config.grid_search_profile, config.image5d_is_roi, coloc)
        for col, val in zip(cols, blobs_out):
            detection_out.setdefault(col, []).append(val)
        print("{}\n".format("-" * 80))
    
    stats = None
    fdbk = None
    blobs_all = None
    if "blobs" in detection_out and detection_out["blobs"]:
        # join blobs and colocalizations from all channels and save archive
        blobs_all = detection_out["blobs"][0]
        blobs_all.blobs = libmag.combine_arrs(
            [b.blobs for b in detection_out["blobs"]
             if b.blobs is not None])
        print("\nTotal blobs found across channels:", len(blobs_all.blobs))
        detector.show_blobs_per_channel(blobs_all.blobs)
        blobs_all.colocalizations = libmag.combine_arrs(
            [b.colocalizations for b in detection_out["blobs"]
             if b.colocalizations is not None])
        blobs_all.save_archive()
        print()
        
        # combine verification stats and feedback messages
        stats = libmag.combine_arrs(
            detection_out["stats"], fn=np.sum)
        fdbk = "\n".join(
            [f for f in detection_out["fdbk"] if f is not None])
    return stats, fdbk, blobs_all
Пример #12
0
def detect_blobs_large_image(filename_base, image5d, offset, size,
                             verify=False, save_dfs=True, full_roi=False):
    """Detect blobs within a large image through parallel processing of 
    smaller chunks.
    
    Args:
        filename_base: Base path to use file output.
        image5d: Large image to process as a Numpy array of t,z,y,x,[c]
        offset: Sub-image offset given as coordinates in z,y,x.
        size: Sub-image shape given in z,y,x.
        verify: True to verify detections against truth database; defaults 
            to False.
        save_dfs: True to save data frames to file; defaults to True.
        full_roi (bool): True to treat ``image5d`` as the full ROI; defaults
            to False.
    """
    time_start = time()
    if size is None or offset is None:
        # uses the entire stack if no size or offset specified
        size = image5d.shape[1:4]
        offset = (0, 0, 0)
    else:
        # change base filename for ROI-based partial stack
        filename_base = make_subimage_name(filename_base, offset, size)
    filename_subimg = libmag.combine_paths(filename_base, config.SUFFIX_SUBIMG)
    filename_blobs = libmag.combine_paths(filename_base, config.SUFFIX_BLOBS)
    
    # get ROI for given region, including all channels
    if full_roi:
        # treat the full image as the ROI
        roi = image5d[0]
    else:
        roi = plot_3d.prepare_subimg(image5d, size, offset)
    _, channels = plot_3d.setup_channels(roi, config.channel, 3)
    
    # prep chunking ROI into sub-ROIs with size based on segment_size, scaling
    # by physical units to make more independent of resolution
    time_detection_start = time()
    settings = config.roi_profile  # use default settings
    scaling_factor = detector.calc_scaling_factor()
    print("microsope scaling factor based on resolutions: {}"
          .format(scaling_factor))
    denoise_size = config.roi_profile["denoise_size"]
    denoise_max_shape = None
    if denoise_size:
        # further subdivide each sub-ROI for local preprocessing
        denoise_max_shape = np.ceil(
            np.multiply(scaling_factor, denoise_size)).astype(int)

    # overlap sub-ROIs to minimize edge effects
    overlap_base = chunking.calc_overlap()
    tol = np.multiply(overlap_base, settings["prune_tol_factor"]).astype(int)
    overlap_padding = np.copy(tol)
    overlap = np.copy(overlap_base)
    exclude_border = config.roi_profile["exclude_border"]
    if exclude_border is not None:
        # exclude border to avoid blob detector edge effects, where blobs
        # often collect at the faces of the sub-ROI;
        # ensure that overlap is greater than twice the border exclusion per
        # axis so that no plane will be excluded from both overlapping sub-ROIs
        exclude_border_thresh = np.multiply(2, exclude_border)
        overlap_less = np.less(overlap, exclude_border_thresh)
        overlap[overlap_less] = exclude_border_thresh[overlap_less]
        excluded = np.greater(exclude_border, 0)
        overlap[excluded] += 1  # additional padding
        overlap_padding[excluded] = 0  # no need to prune past excluded border
    print("sub-ROI overlap: {}, pruning tolerance: {}, padding beyond "
          "overlap for pruning: {}, exclude borders: {}"
          .format(overlap, tol, overlap_padding, exclude_border))
    max_pixels = np.ceil(np.multiply(
        scaling_factor, 
        config.roi_profile["segment_size"])).astype(int)
    print("preprocessing max shape: {}, detection max pixels: {}"
          .format(denoise_max_shape, max_pixels))
    sub_roi_slices, sub_rois_offsets = chunking.stack_splitter(
        roi.shape, max_pixels, overlap)
    # TODO: option to distribute groups of sub-ROIs to different servers 
    # for blob detection
    seg_rois = detect_blobs_sub_rois(
        roi, sub_roi_slices, sub_rois_offsets, denoise_max_shape, exclude_border)
    detection_time = time() - time_detection_start
    print("blob detection time (s):", detection_time)
    
    # prune blobs in overlapping portions of sub-ROIs
    time_pruning_start = time()
    segments_all, df_pruning = _prune_blobs_mp(
        roi, seg_rois, overlap, tol, sub_roi_slices, sub_rois_offsets, channels,
        overlap_padding)
    pruning_time = time() - time_pruning_start
    print("blob pruning time (s):", pruning_time)
    #print("maxes:", np.amax(segments_all, axis=0))
    
    # get weighted mean of ratios
    if df_pruning is not None:
        print("\nBlob pruning ratios:")
        path_pruning = "blob_ratios.csv" if save_dfs else None
        df_pruning_all = df_io.data_frames_to_csv(
            df_pruning, path_pruning, show=" ")
        cols = df_pruning_all.columns.tolist()
        blob_pruning_means = {}
        if "blobs" in cols:
            blobs_unpruned = df_pruning_all["blobs"]
            num_blobs_unpruned = np.sum(blobs_unpruned)
            for col in cols[1:]:
                blob_pruning_means["mean_{}".format(col)] = [
                    np.sum(np.multiply(df_pruning_all[col], blobs_unpruned)) 
                    / num_blobs_unpruned]
            path_pruning_means = "blob_ratios_means.csv" if save_dfs else None
            df_pruning_means = df_io.dict_to_data_frame(
                blob_pruning_means, path_pruning_means, show=" ")
        else:
            print("no blob ratios found")
    
    '''# report any remaining duplicates
    np.set_printoptions(linewidth=500, threshold=10000000)
    print("all blobs (len {}):".format(len(segments_all)))
    sort = np.lexsort(
        (segments_all[:, 2], segments_all[:, 1], segments_all[:, 0]))
    blobs = segments_all[sort]
    print(blobs)
    print("checking for duplicates in all:")
    print(detector.remove_duplicate_blobs(blobs, slice(0, 3)))
    '''
    
    stats_detection = None
    fdbk = None
    if segments_all is not None:
        # remove the duplicated elements that were used for pruning
        detector.replace_rel_with_abs_blob_coords(segments_all)
        segments_all = detector.remove_abs_blob_coords(segments_all)
        
        # compare detected blobs with truth blobs
        # TODO: assumes ground truth is relative to any ROI offset,
        # but should make customizable
        if verify:
            db_path_base = None
            exp_name = os.path.splitext(os.path.basename(config.filename))[0]
            try:
                if config.truth_db is None:
                    # find and load truth DB based on filename and subimage
                    db_path_base = os.path.basename(filename_base)
                    print("about to verify with truth db from {}"
                          .format(db_path_base))
                    sqlite.load_truth_db(db_path_base)
                if config.truth_db is not None:
                    # truth DB may contain multiple experiments for different
                    # subimages; series not included in exp name since in ROI
                    rois = config.truth_db.get_rois(exp_name)
                    if rois is None:
                        # exp may have been named by ROI
                        print("{} experiment name not found, will try with"
                              "ROI offset/size".format(exp_name))
                        exp_name = make_subimage_name(exp_name, offset, size)
                        rois = config.truth_db.get_rois(exp_name)
                    if rois is None:
                        raise LookupError(
                            "No truth set ROIs found for experiment {}, will "
                            "skip detection verification".format(exp_name))
                    print("load ROIs from exp: {}".format(exp_name))
                    exp_id = sqlite.insert_experiment(
                        config.verified_db.conn, config.verified_db.cur, 
                        exp_name, None)
                    verify_tol = np.multiply(
                        overlap_base, settings["verify_tol_factor"])
                    stats_detection, fdbk = detector.verify_rois(
                        rois, segments_all, config.truth_db.blobs_truth, 
                        verify_tol, config.verified_db, exp_id, config.channel)
            except FileNotFoundError:
                libmag.warn("Could not load truth DB from {}; "
                            "will not verify ROIs".format(db_path_base))
            except LookupError as e:
                libmag.warn(str(e))
    
    file_time_start = time()
    if config.save_subimg:
        if (isinstance(config.image5d, np.memmap) and 
                config.image5d.filename == os.path.abspath(filename_subimg)):
            # file at sub-image save path may have been opened as a memmap
            # file, in which case saving would fail
            libmag.warn("{} is currently open, cannot save sub-image"
                        .format(filename_subimg))
        else:
            # write sub-image, which is in ROI (3D) format
            with open(filename_subimg, "wb") as f:
                np.save(f, roi)

    # save blobs
    # TODO: only segments used; consider removing the rest except ver
    outfile_blobs = open(filename_blobs, "wb")
    np.savez(outfile_blobs, ver=BLOBS_NP_VER, segments=segments_all,
             resolutions=config.resolutions,
             basename=os.path.basename(config.filename),  # only save name
             offset=offset, roi_size=size)  # None unless explicitly set
    outfile_blobs.close()
    file_save_time = time() - file_time_start
    
    # whole image benchmarking time
    times = (
        [detection_time], 
        [pruning_time], 
        time() - time_start)
    times_dict = {}
    for key, val in zip(StackTimes, times):
        times_dict[key] = val
    if segments_all is None:
        print("\nNo blobs detected")
    else:
        print("\nTotal blobs found:", len(segments_all))
        detector.show_blobs_per_channel(segments_all)
    print("file save time:", file_save_time)
    print("\nTotal detection processing times (s):")
    path_times = "stack_detection_times.csv" if save_dfs else None
    df_io.dict_to_data_frame(times_dict, path_times, show=" ")
    
    return stats_detection, fdbk, segments_all