Esempio n. 1
0
def preprocess_img(image5d, preprocs, channel, out_path):
    """Pre-process an image in 3D.

    Args:
        image5d (:obj:`np.ndarray`): 5D array in t,z,y,x[,c].
        preprocs (List[:obj:`profiles.PreProcessKeys`): Sequence of
            pre-processing tasks to perform in the order given.
        channel (int): Channel to preprocess, or None for all channels.
        out_path (str): Output base path.

    Returns:
        :obj:`np.ndarray`: The pre-processed image array.

    """
    if preprocs is None:
        print("No preprocessing tasks to perform, skipping")
        return

    roi = image5d[0]
    for preproc in preprocs:
        # perform global pre-processing task
        print("Pre-processing task:", preproc)
        if preproc is profiles.PreProcessKeys.SATURATE:
            roi = plot_3d.saturate_roi(roi, channel=channel)
        elif preproc is profiles.PreProcessKeys.DENOISE:
            roi = plot_3d.denoise_roi(roi, channel)
        elif preproc is profiles.PreProcessKeys.REMAP:
            roi = plot_3d.remap_intensity(roi, channel)
        elif preproc is profiles.PreProcessKeys.ROTATE:
            roi = rotate_img(roi)

    # save to new file
    image5d = importer.roi_to_image5d(roi)
    importer.save_np_image(image5d, out_path)
    return image5d
Esempio n. 2
0
def preprocess_img(image5d, preprocs, channel, out_path):
    """Pre-process an image in 3D.

    Args:
        image5d (:obj:`np.ndarray`): 5D array in t,z,y,x[,c].
        preprocs (Union[str, list[str]]): Pre-processing tasks that will be
            converted to enums in :class:`config.PreProcessKeys` to perform
            in the order given.
        channel (int): Channel to preprocess, or None for all channels.
        out_path (str): Output base path.

    Returns:
        :obj:`np.ndarray`: The pre-processed image array.

    """
    if preprocs is None:
        print("No preprocessing tasks to perform, skipping")
        return
    if not libmag.is_seq(preprocs):
        preprocs = [preprocs]

    roi = image5d[0]
    for preproc in preprocs:
        # perform global pre-processing task
        task = libmag.get_enum(preproc, config.PreProcessKeys)
        _logger.info("Pre-processing task: %s", task)
        if task is config.PreProcessKeys.SATURATE:
            roi = plot_3d.saturate_roi(roi, channel=channel)
        elif task is config.PreProcessKeys.DENOISE:
            roi = plot_3d.denoise_roi(roi, channel)
        elif task is config.PreProcessKeys.REMAP:
            roi = plot_3d.remap_intensity(roi, channel)
        elif task is config.PreProcessKeys.ROTATE:
            roi = rotate_img(roi)
        else:
            _logger.warn("No preprocessing task found for: %s", preproc)

    # save to new file
    image5d = importer.roi_to_image5d(roi)
    importer.save_np_image(image5d, out_path)
    return image5d
Esempio n. 3
0
    def plot_3d_points(self, roi, channel, flipz=False, offset=None):
        """Plots all pixels as points in 3D space.

        Points falling below a given threshold will be removed, allowing
        the viewer to see through the presumed background to masses within
        the region of interest.

        Args:
            roi (:class:`numpy.ndarray`): Region of interest either as a 3D
                ``z,y,x`` or 4D ``z,y,x,c`` array.
            channel (int): Channel to select, which can be None to indicate all
                channels.
            flipz (bool): True to invert the ROI along the z-axis to match
                the handedness of Matplotlib with z progressing upward;
                defaults to False.
            offset (Sequence[int]): Origin coordinates in ``z,y,x``; defaults
                to None.

        Returns:
            bool: True if points were rendered, False if no points to render.
        
        """
        print("Plotting ROI as 3D points")

        # streamline the image
        if roi is None or roi.size < 1: return False
        roi = plot_3d.saturate_roi(roi, clip_vmax=98.5, channel=channel)
        roi = np.clip(roi, 0.2, 0.8)
        roi = restoration.denoise_tv_chambolle(roi, weight=0.1)

        # separate parallel arrays for each dimension of all coordinates for
        # Mayavi input format, with the ROI itself given as a 1D scalar array ;
        # TODO: consider using np.mgrid to construct the x,y,z arrays
        time_start = time()
        shape = roi.shape
        isotropic = plot_3d.get_isotropic_vis(config.roi_profile)
        z = np.ones((shape[0], shape[1] * shape[2]))
        for i in range(shape[0]):
            z[i] = z[i] * i
        if flipz:
            # invert along z-axis to match handedness of Matplotlib with z up
            z *= -1
            if offset is not None:
                offset = np.copy(offset)
                offset[0] *= -1
        y = np.ones((shape[0] * shape[1], shape[2]))
        for i in range(shape[0]):
            for j in range(shape[1]):
                y[i * shape[1] + j] = y[i * shape[1] + j] * j
        x = np.ones((shape[0] * shape[1], shape[2]))
        for i in range(shape[0] * shape[1]):
            x[i] = np.arange(shape[2])

        if offset is not None:
            offset = np.multiply(offset, isotropic)
        coords = [z, y, x]
        for i, _ in enumerate(coords):
            # scale coordinates for isotropy
            coords[i] *= isotropic[i]
            if offset is not None:
                # translate by offset
                coords[i] += offset[i]

        multichannel, channels = plot_3d.setup_channels(roi, channel, 3)
        for chl in channels:
            roi_show = roi[..., chl] if multichannel else roi
            roi_show_1d = roi_show.reshape(roi_show.size)
            if chl == 0:
                x = np.reshape(x, roi_show.size)
                y = np.reshape(y, roi_show.size)
                z = np.reshape(z, roi_show.size)
            settings = config.get_roi_profile(chl)

            # clear background points to see remaining structures
            thresh = 0
            if len(np.unique(roi_show)) > 1:
                # need > 1 val to threshold
                try:
                    thresh = filters.threshold_otsu(roi_show, 64)
                except ValueError as e:
                    thresh = np.median(roi_show)
                    print("could not determine Otsu threshold, taking median "
                          "({}) instead".format(thresh))
                thresh *= settings["points_3d_thresh"]
            print("removing 3D points below threshold of {}".format(thresh))
            remove = np.where(roi_show_1d < thresh)
            roi_show_1d = np.delete(roi_show_1d, remove)

            # adjust range from 0-1 to region of colormap to use
            roi_show_1d = libmag.normalize(roi_show_1d, 0.6, 1.0)
            points_len = roi_show_1d.size
            if points_len == 0:
                print("no 3D points to display")
                return False
            mask = math.ceil(points_len / self._MASK_DIVIDEND)
            print("points: {}, mask: {}".format(points_len, mask))
            if any(np.isnan(roi_show_1d)):
                # TODO: see if some NaNs are permissible
                print(
                    "NaN values for 3D points, will not show 3D visualization")
                return False
            pts = self.scene.mlab.points3d(np.delete(x, remove),
                                           np.delete(y, remove),
                                           np.delete(z, remove),
                                           roi_show_1d,
                                           mode="sphere",
                                           scale_mode="scalar",
                                           mask_points=mask,
                                           line_width=1.0,
                                           vmax=1.0,
                                           vmin=0.0,
                                           transparent=True)
            cmap = colormaps.get_cmap(config.cmaps, chl)
            if cmap is not None:
                pts.module_manager.scalar_lut_manager.lut.table = cmap(
                    range(0, 256)) * 255

            # scale glyphs to partially fill in gaps from isotropic scaling;
            # do not use actor scaling as it also translates the points when
            # not positioned at the origin
            pts.glyph.glyph.scale_factor = 2 * max(isotropic)

        # keep visual ordering of surfaces when opacity is reduced
        self.scene.renderer.use_depth_peeling = True
        print("time for 3D points display: {}".format(time() - time_start))
        return True
Esempio n. 4
0
    def plot_3d_surface(self,
                        roi,
                        channel,
                        segment=False,
                        flipz=False,
                        offset=None):
        """Plots areas with greater intensity as 3D surfaces.

        The scene will be cleared before display.
        
        Args:
            roi (:class:`numpy.ndarray`): Region of interest either as a 3D
                ``z,y,x`` or 4D ``z,y,x,c`` array.
            channel (int): Channel to select, which can be None to indicate all
                channels.
            segment (bool): True to denoise and segment ``roi`` before
                displaying, which may remove artifacts that might otherwise
                lead to spurious surfaces. Defaults to False.
            flipz: True to invert ``roi`` along z-axis to match handedness
                of Matplotlib with z progressing upward; defaults to False.
            offset (Sequence[int]): Origin coordinates in ``z,y,x``; defaults
                to None.

        Returns:
            list: List of Mayavi surfaces for each displayed channel, which
            are also stored in :attr:`surfaces`.

        """
        # Plot in Mayavi
        print("viewing 3D surface")
        pipeline = self.scene.mlab.pipeline
        settings = config.roi_profile
        if flipz:
            # invert along z-axis to match handedness of Matplotlib with z up
            roi = roi[::-1]
            if offset is not None:
                # invert z-offset and translate by ROI z-size so ROI is
                # mirrored across the xy-plane
                offset = np.copy(offset)
                offset[0] = -offset[0] - roi.shape[0]
        isotropic = plot_3d.get_isotropic_vis(settings)

        # saturate to remove noise and normalize values
        roi = plot_3d.saturate_roi(roi, channel=channel)

        # turn off segmentation if ROI too big (arbitrarily set here as
        # > 10 million pixels) to avoid performance hit and since likely showing
        # large region of downsampled image anyway, where don't need hi res
        num_pixels = np.prod(roi.shape)
        to_segment = num_pixels < 10000000

        time_start = time()
        multichannel, channels = plot_3d.setup_channels(roi, channel, 3)
        surfaces = []
        for chl in channels:
            roi_show = roi[..., chl] if multichannel else roi

            # clip to minimize sub-nuclear variation
            roi_show = np.clip(roi_show, 0.2, 0.8)

            if segment:
                # denoising makes for much cleaner images but also seems to
                # allow structures to blend together
                # TODO: consider segmenting individual structures and rendering
                # as separate surfaces to avoid blending
                roi_show = restoration.denoise_tv_chambolle(roi_show,
                                                            weight=0.1)

                # build surface from segmented ROI
                if to_segment:
                    vmin, vmax = np.percentile(roi_show, (40, 70))
                    walker = segmenter.segment_rw(roi_show,
                                                  chl,
                                                  vmin=vmin,
                                                  vmax=vmax)
                    roi_show *= np.subtract(walker[0], 1)
                else:
                    print("deferring segmentation as {} px is above threshold".
                          format(num_pixels))

            # ROI is in (z, y, x) order, so need to transpose or swap x,z axes
            roi_show = np.transpose(roi_show)
            surface = pipeline.scalar_field(roi_show)

            # Contour -> Surface pipeline

            # create the surface
            surface = pipeline.contour(surface)
            # remove many more extraneous points
            surface = pipeline.user_defined(surface,
                                            filter="SmoothPolyDataFilter")
            surface.filter.number_of_iterations = 400
            surface.filter.relaxation_factor = 0.015
            # distinguishing pos vs neg curvatures?
            surface = pipeline.user_defined(surface, filter="Curvatures")
            surface = self.scene.mlab.pipeline.surface(surface)
            module_manager = surface.module_manager
            module_manager.scalar_lut_manager.data_range = np.array([-2, 0])
            module_manager.scalar_lut_manager.lut_mode = "gray"
            '''
            # Surface pipleline with contours enabled (similar to above?)
            surface = pipeline.contour_surface(
                surface, color=(0.7, 1, 0.7), line_width=6.0)
            surface.actor.property.representation = 'wireframe'
            #surface.actor.property.line_width = 6.0
            surface.actor.mapper.scalar_visibility = False
            '''
            '''
            # IsoSurface pipeline

            # uses unique IsoSurface module but appears to have 
            # similar output to contour_surface
            surface = pipeline.iso_surface(surface)

            # limit contours for simpler surfaces including smaller file sizes; 
            # TODO: consider making settable as arg or through profile
            surface.contour.number_of_contours = 1
            try:
                # increase min to further reduce complexity
                surface.contour.minimum_contour = 0.5
                surface.contour.maximum_contour = 0.8
            except Exception as e:
                print(e)
                print("ignoring min/max contour for now")
            '''

            if offset is not None:
                # translate to offset scaled by isotropic factor
                surface.actor.actor.position = np.multiply(offset,
                                                           isotropic)[::-1]
            # scale surfaces, which expands/contracts but does not appear
            # to translate the surface position
            surface.actor.actor.scale = isotropic[::-1]
            surfaces.append(surface)

        # keep visual ordering of surfaces when opacity is reduced
        self.scene.renderer.use_depth_peeling = True
        print("time to render 3D surface: {}".format(time() - time_start))
        self.surfaces = surfaces
        return surfaces
Esempio n. 5
0
    def detect_sub_roi(cls, coord, offset, last_coord, denoise_max_shape,
                       exclude_border, sub_roi, channel, img_path=None,
                       coloc=False):
        """Perform 3D blob detection within a sub-ROI without accessing
        class attributes, such as for spawned multiprocessing.
        
        Args:
            coord (Tuple[int]): Coordinate of the sub-ROI in the order z,y,x.
            offset (Tuple[int]): Offset of the sub-ROI within the full ROI,
                in z,y,x.
            last_coord (:obj:`np.ndarray`): See attributes.
            denoise_max_shape (Tuple[int]): See attributes.
            exclude_border (bool): See attributes.
            sub_roi (:obj:`np.ndarray`): Array in which to perform detections.
            img_path (str): Path from which to load metadatat; defaults to None.
                If given, the command line arguments will be reloaded to
                set up the image and processing parameters.
            coloc (bool): True to perform blob co-localizations; defaults
                to False.
            channel (Sequence[int]): Sequence of channels, where None detects
                in all channels.
        
        Returns:
            Tuple[int], :obj:`np.ndarray`: The coordinate given back again to
            identify the sub-ROI position and an array of detected blobs.

        """
        if img_path:
            # reload command-line parameters and image metadata, which is
            # required if run from a spawned (not forked) process
            cli.process_cli_args()
            _, orig_info = importer.make_filenames(img_path)
            importer.load_metadata(orig_info)
        print("detecting blobs in sub-ROI at {} of {}, offset {}, shape {}..."
              .format(coord, last_coord, tuple(offset.astype(int)),
                      sub_roi.shape))
        
        if denoise_max_shape is not None:
            # further split sub-ROI for preprocessing locally
            denoise_roi_slices, _ = chunking.stack_splitter(
                sub_roi.shape, denoise_max_shape)
            for z in range(denoise_roi_slices.shape[0]):
                for y in range(denoise_roi_slices.shape[1]):
                    for x in range(denoise_roi_slices.shape[2]):
                        denoise_coord = (z, y, x)
                        denoise_roi = sub_roi[denoise_roi_slices[denoise_coord]]
                        _logger.debug(
                            f"preprocessing sub-sub-ROI {denoise_coord} of "
                            f"{np.subtract(denoise_roi_slices.shape, 1)} "
                            f"(shape {denoise_roi.shape} within sub-ROI shape "
                            f"{sub_roi.shape})")
                        denoise_roi = plot_3d.saturate_roi(
                            denoise_roi, channel=channel)
                        denoise_roi = plot_3d.denoise_roi(
                            denoise_roi, channel=channel)
                        # replace slices with denoised ROI
                        denoise_roi_slices[denoise_coord] = denoise_roi
            
            # re-merge back into the sub-ROI
            merged_shape = chunking.get_split_stack_total_shape(
                denoise_roi_slices)
            merged = np.zeros(
                tuple(merged_shape), dtype=denoise_roi_slices[0, 0, 0].dtype)
            chunking.merge_split_stack2(denoise_roi_slices, None, 0, merged)
            sub_roi = merged
        
        if exclude_border is None:
            exclude = None
        else:
            exclude = np.array([exclude_border, exclude_border])
            exclude[0, np.equal(coord, 0)] = 0
            exclude[1, np.equal(coord, last_coord)] = 0
        segments = detector.detect_blobs(sub_roi, channel, exclude)
        if coloc and segments is not None:
            # co-localize blobs and append to blobs array
            colocs = colocalizer.colocalize_blobs(sub_roi, segments)
            segments = np.hstack((segments, colocs))
        #print("segs before (offset: {}):\n{}".format(offset, segments))
        if segments is not None:
            # shift both coordinate sets (at beginning and end of array) to 
            # absolute positioning, using the latter set to store shifted 
            # coordinates based on duplicates and the former for initial 
            # positions to check for multiple duplicates
            detector.shift_blob_rel_coords(segments, offset)
            detector.shift_blob_abs_coords(segments, offset)
            #print("segs after:\n{}".format(segments))
        return coord, segments