Beispiel #1
0
 def it_makes_a_gaussian_kernel_that_has_fractaional_position():
     g = imops.generate_gauss_kernel(1.5, 0.5, 0.0, mea=9)
     com = ndi.measurements.center_of_mass(g)
     x_shift = com[1] - g.shape[1] // 2
     y_shift = com[0] - g.shape[0] // 2
     assert x_shift > 0.45 and x_shift < 0.55
     assert y_shift >= -0.01 and y_shift < 0.01
Beispiel #2
0
def _step_2_align(raw_chcy_ims, sigproc_params):
    """
    Each cycle the stage moves, but it is not perfectly accurate when it returns
    to the same field.

    The stage does _not_ move between channels, therefore
    the channels for each field over each cycle can be merged to improve alignment.

    Returns:
        A DataFrame of all the results, most importantly the shift_y, shift_x
            needed to each cycle to align the images.
        ch_merged_cy_ims
    """

    n_outchannels, n_inchannels, n_cycles, dim = sigproc_params.channels_cycles_dim

    raw_mask_rects, anomaly_removed_ims = _step_2a_mask_anomalies(
        raw_chcy_ims, sigproc_params)

    medians_by_ch_cy = _step_2b_find_bg_median(raw_chcy_ims, sigproc_params)

    ch_merged_cy_ims = _step_2c_composite_channels(anomaly_removed_ims,
                                                   medians_by_ch_cy,
                                                   sigproc_params)

    # GENERATE fiducial_ims
    kernel = imops.generate_gauss_kernel(1.0)
    kernel = kernel - kernel.mean()  # Eliminate DC bias
    fiducial_ims = np.array(
        [imops.convolve(im.clip(min=0), kernel) for im in ch_merged_cy_ims])

    alignment_offsets = imops.align(fiducial_ims)

    ch_out_to_in = sigproc_params.output_channel_to_input_channel

    field_df = pd.DataFrame([
        dict(
            cycle_i=cy,
            shift_y=off[0],
            shift_x=off[1],
            channel_i=outch,
            bg_median=medians_by_ch_cy[ch_out_to_in(outch), cy],
            n_mask_rects=len(raw_mask_rects[ch_out_to_in(outch)][cy]),
            mask_area=sum([
                rect[2] * rect[3]
                for rect in raw_mask_rects[ch_out_to_in(outch)][cy]
            ]),
        ) for outch in range(n_outchannels)
        for cy, off in zip(range(n_cycles), alignment_offsets)
    ])

    return field_df, ch_merged_cy_ims, raw_mask_rects
Beispiel #3
0
def spotty_images():
    # CREATE a test spot
    spot = imops.generate_gauss_kernel(2.0)
    spot = spot / np.max(spot)

    dim = WH(50, 50)
    spot_locs = [XY(15, 15), XY(10, 20), XY(20, 21)]

    # CREATE test images with spots
    test_images = []
    for loc in spot_locs:
        im = np.zeros(dim)
        # im = np.random.normal(0, 0.1, dim)
        # im = np.ones(dim) * 0.1
        imops.accum_inplace(im, spot, loc=loc, center=True)
        test_images += [im]

    return spot_locs, np.array(test_images)
Beispiel #4
0
    def __init__(
        self,
        n_peaks=1,  # Number of peaks to add
        n_cycles=1,  # Number of frames to make in the stack
        n_channels=1,  # Number of channels.
        dim=(512, 512),  # dims of frames
        bg_bias=0,  # bias of the background
        bg_std=0.5,  # std of the background noise per channel
        peak_focus=1.0,  # The std of the peaks (all will be the same)
        peak_mean=(
            40.0, ),  # Mean of the peak area distribution for each channel
        peak_std=2.0,  # Std of the peak areas distribution
        peak_xs=None,  # Used to put peaks at know locations
        peak_ys=None,  # Used to put peaks at know locations
        peak_area_all_cycles=None,  # Areas for all peaks on each cycle (len == n_cycles)
        peak_area_by_channel_cycle=None,  # Areas, all peaks, channels, cycles (len ==  n_channels * n_cycles * n_peaks)
        grid_distribution=False,  # When True the peaks are laid out in a grid
        all_aligned=False,  # When True all cycles will be aligned
        digitize=False,
        frame_offsets=None,
        anomalies=None,  # int, number of anomalies per image
        random_seed=None,
    ):

        if random_seed is not None:
            np.random.seed(random_seed)

        if not isinstance(bg_bias, tuple):
            bg_bias = tuple([bg_bias] * n_channels)

        if not isinstance(bg_std, tuple):
            bg_std = tuple([bg_std] * n_channels)

        self.bg_bias = bg_bias
        assert len(bg_bias) == n_channels

        self.bg_std = bg_std
        assert len(bg_std) == n_channels

        self.dim = HW(dim)

        self.anomalies = []

        # Peaks are a floating point positions (not perfectly aligned with pixels)
        # and the Gaussians are sampled around those points

        self.n_peaks = n_peaks
        self.n_cycles = n_cycles
        self.n_channels = n_channels
        self.peak_mean = peak_mean
        self.peak_std = peak_std
        self.peak_focus = peak_focus

        # unit_peak is only a reference; the real peaks are sub-pixel sampled but will have the same dimensions
        self.unit_peak = imops.generate_gauss_kernel(self.peak_focus)

        self.peak_dim = self.unit_peak.shape[0]

        if peak_xs is not None and peak_ys is not None:
            # Place peaks at specific locations
            self.peak_xs = np.array(peak_xs)
            self.peak_ys = np.array(peak_ys)
        else:
            # Place peaks in patterns or random
            if grid_distribution:
                # Put the peaks on a grid
                n_cols = int(math.sqrt(n_peaks) + 0.5)
                ixs = np.remainder(np.arange(n_peaks), n_cols)
                iys = np.arange(n_peaks) // n_cols
                border = 15
                self.peak_xs = (self.dim.w -
                                border * 2) * ixs / n_cols + border
                self.peak_ys = (self.dim.h -
                                border * 2) * iys / n_cols + border
            else:
                # Distribute the peaks randomly
                self.peak_xs = np.random.uniform(
                    low=self.peak_dim + 1.0,
                    high=self.dim.w - self.peak_dim - 1.0,
                    size=n_peaks,
                )
                self.peak_ys = np.random.uniform(
                    low=self.peak_dim + 1.0,
                    high=self.dim.h - self.peak_dim - 1.0,
                    size=n_peaks,
                )

        self.peak_locs = [XY(x, y) for x, y in zip(self.peak_xs, self.peak_ys)]
        if self.n_peaks > 0:
            peak_dists = distance.cdist(self.peak_locs, self.peak_locs,
                                        "euclidean")
            peak_dists[peak_dists == 0] = 10000.0
            self.closest = peak_dists.min(axis=0)

        self.peak_areas = np.empty((n_channels, n_cycles, n_peaks))
        if peak_area_all_cycles is not None:
            # Use one area for all peaks, all channels for each cycle
            assert len(peak_area_all_cycles) == n_cycles
            for cycle in range(n_cycles):
                self.peak_areas[:, cycle, :] = peak_area_all_cycles[cycle]
        elif peak_area_by_channel_cycle is not None:
            # Specified areas for each peak, each channel, each cycle
            assert peak_area_by_channel_cycle.shape == (n_channels, n_cycles,
                                                        n_peaks)
            self.peak_areas[:] = peak_area_by_channel_cycle[:]
        elif n_peaks > 0:
            # Make random peak areas by channel means
            for channel in range(n_channels):
                self.peak_areas[channel, :, :] = np.random.normal(
                    loc=self.peak_mean[channel],
                    scale=self.peak_std,
                    size=(n_cycles, n_peaks),
                )
                self.peak_areas[channel] = np.clip(self.peak_areas[channel],
                                                   0.0, 1000.0)

        # Frames are integer aligned because this is the best that the aligner would be able to do
        if frame_offsets is None:
            self.frame_xs = np.random.randint(low=-5, high=5, size=n_cycles)
            self.frame_ys = np.random.randint(low=-5, high=5, size=n_cycles)
        else:
            self.frame_xs = [i[1] for i in frame_offsets]
            self.frame_ys = [i[0] for i in frame_offsets]

        # Cycle 0 always has no offset
        self.frame_xs[0] = 0
        self.frame_ys[0] = 0

        if all_aligned:
            self.frame_xs = np.zeros((n_cycles, ))
            self.frame_ys = np.zeros((n_cycles, ))

        self.ims = np.zeros((n_channels, n_cycles, dim[0], dim[1]))
        for cycle in range(n_cycles):
            for channel in range(n_channels):
                # Background has bg_std plus bias
                im = bg_bias[channel] + np.random.normal(
                    size=dim, scale=self.bg_std[channel])

                # Peaks are on the pixel lattice from their floating point positions
                # No signal-proportional noise is added
                for x, y, a in zip(self.peak_xs, self.peak_ys,
                                   self.peak_areas[channel, cycle, :]):
                    # The center of the peak is at the floor pixel and the offset is the fractional part
                    _x = self.frame_xs[cycle] + x
                    _y = self.frame_ys[cycle] + y
                    ix = int(_x + 0.5)
                    iy = int(_y + 0.5)
                    frac_x = _x - ix
                    frac_y = _y - iy
                    g = imops.generate_gauss_kernel(self.peak_focus,
                                                    offset_x=frac_x,
                                                    offset_y=frac_y)
                    imops.accum_inplace(im, g * a, loc=XY(ix, iy), center=True)

                # overwrite with random anomalies if specified
                if anomalies is not None:
                    if cycle == 0:
                        # in cycle 0 pick location and size of anomalies for this image
                        anomalies_for_channel = []
                        self.anomalies += [anomalies_for_channel]
                        for i in range(anomalies):
                            sz = np.random.randint(10, 100, size=2)
                            l = np.random.randint(0, self.dim[0], size=2)
                            anomalies_for_channel += [(l, sz)]
                    for l, sz in self.anomalies[channel]:
                        # in all cycles, vary the location, size, and intensity of the anomaly,
                        # and print it to the image.
                        l = l * (1.0 + np.random.random() / 10.0)
                        sz = sz * (1.0 + np.random.random() / 10.0)
                        im[ROI(
                            l,
                            sz)] = im[ROI(l, sz)] * np.random.randint(2, 20)

                if digitize:
                    im = (im.clip(min=0) + 0.5).astype(
                        np.uint8)  # +0.5 to round up
                else:
                    im = im.clip(min=0)
                self.ims[channel, cycle, :, :] = im
Beispiel #5
0
def _step_4_find_peaks(
    aligned_composite_bg_removed_im,
    aligned_roi_rect,
    raw_mask_rects,
    border_size,
    field_df,
    sigproc_params,
):
    """
    Find peaks on the composite image

    TASK: Remove the mask rect checks and replace with the same masking
    logic that is now implemented in the alignment phase. That is, just remove
    the peaks from the source instead of in post-processing.
    """
    from skimage.feature import peak_local_max  # Defer slow import
    from scipy.stats import iqr

    n_outchannels, n_inchannels, n_cycles, dim = sigproc_params.channels_cycles_dim
    assert (
        aligned_composite_bg_removed_im.shape[0]
        == aligned_composite_bg_removed_im.shape[1]
    )
    aligned_dim, _ = aligned_composite_bg_removed_im.shape
    check.array_t(aligned_composite_bg_removed_im, is_square=True)

    hat_rad = sigproc_params.hat_rad
    brim_rad = sigproc_params.hat_rad + 1
    hat_mask, brim_mask = _hat_masks(hat_rad, brim_rad)

    kernel = imops.generate_gauss_kernel(1.0)
    kernel = kernel - kernel.mean()
    _fiducial_im = imops.convolve(aligned_composite_bg_removed_im, kernel)

    # Black out the convolution artifact around the perimeter of the _fiducial_im
    search_roi_rect = Rect(
        aligned_roi_rect.b + brim_rad,
        aligned_roi_rect.t - brim_rad,
        aligned_roi_rect.l + brim_rad,
        aligned_roi_rect.r - brim_rad,
    )
    search_roi = search_roi_rect.roi()
    composite_fiducial_im = np.zeros_like(aligned_composite_bg_removed_im)

    # Use Inter-Quartile Range for some easy filtering
    _iqr = 0
    if sigproc_params.iqr_rng is not None:
        _iqr = iqr(
            _fiducial_im[search_roi],
            rng=(100 - sigproc_params.iqr_rng, sigproc_params.iqr_rng),
        )

    composite_fiducial_im[search_roi] = (_fiducial_im[search_roi] - _iqr).clip(min=0)

    locs = peak_local_max(
        composite_fiducial_im,
        min_distance=hat_rad,
        threshold_abs=sigproc_params.threshold_abs,
    )

    # Emergency exit to prevent memory overflows
    # check.affirm(len(locs) < 7000, f"Too many peaks {len(locs)}")

    shift = field_df.set_index("cycle_i").sort_index()[["shift_y", "shift_x"]].values
    shift_y = shift[:, 0]
    shift_x = shift[:, 1]

    # Discard any peak in any mask_rect
    # ALIGN the mask rects to the composite coordinate system
    aligned_mask_rects = []
    for channel in range(sigproc_params.n_output_channels):
        channel_rects = safe_list_get(raw_mask_rects, channel, [])
        for cycle in range(n_cycles):
            for rect in safe_list_get(channel_rects, cycle, []):
                yx = XY(rect[0], rect[1])
                hw = WH(rect[2], rect[3])
                yx += XY(border_size, border_size) - XY(shift_x[cycle], shift_y[cycle])
                aligned_mask_rects += [(yx[0], yx[1], yx[0] + hw[0], yx[1] + hw[1])]

    aligned_mask_rects = np.array(aligned_mask_rects)
    if aligned_mask_rects.shape[0] > 0:

        # To compare every loc with every mask rect we use the tricky np.fn.outer()
        y_hits = np.greater_equal.outer(locs[:, 0], aligned_mask_rects[:, 0])
        y_hits &= np.less.outer(locs[:, 0], aligned_mask_rects[:, 2])

        x_hits = np.greater_equal.outer(locs[:, 1], aligned_mask_rects[:, 1])
        x_hits &= np.less.outer(locs[:, 1], aligned_mask_rects[:, 3])

        inside_rect = x_hits & y_hits  # inside a rect if x and y are inside the rect
        locs_to_keep = ~np.any(
            inside_rect, axis=1
        )  # Reject if inside of any masked rect
        locs = locs[locs_to_keep]

    circle_im = np.zeros((aligned_dim, aligned_dim))

    center = aligned_dim / 2

    peak_rows = []
    for field_peak_i, loc in enumerate(locs):
        if sigproc_params.radial_filter is not None:
            radius = math.sqrt((loc[0] - center) ** 2 + (loc[1] - center) ** 2)
            radius /= center
            if radius >= sigproc_params.radial_filter:
                continue

        imops.set_with_mask_in_place(circle_im, brim_mask, 1, loc=loc, center=True)

        peak_rows += [
            Munch(
                peak_i=0,
                field_peak_i=field_peak_i,
                aln_y=int(loc[0]),
                aln_x=int(loc[1]),
            )
        ]

    peak_df = pd.DataFrame(peak_rows)

    return peak_df, circle_im, aligned_mask_rects
Beispiel #6
0
def _radiometry(im, loc, dim, bg_bias, localbg_mask):
    """
    Arguments:
        im: 2D image (some field, channel, cycle)
        loc: the (y,x) coordinates of the peak
        dim: the integer dimensions of the mask
        bg_bias: the background bias to remove

    Returns:
        signal, noise (or NAN if something goes wrong). Both are always non-negative
        localbg: The median of a surrounding aria

    After a careful analysis considering three types of radiometers the Kernel Method was chosen
    The others considered were:
        * Hat method where the signal is the sum(hat_pixel - brim_median).
            * This is fast but scores poorly compared to Kernel Method.
              And, it can not estimate noise.
        * Fitted method where a 2D Gaussian is fit to the data.
            * This method is very expensive, fails to converge in various edge cases,
              and doesn't score any better than the Kernel Method.
        * Kernel method (chosen) where a center-of-mass calculation is used
          to generate a unit-area Gaussian Kernel which is then used to
          weigh the peak data. Noise can be estimated by squaring that mask and
          then computing residuals.
    """
    assert localbg_mask.shape[0] == dim[0] and localbg_mask.shape[1] == dim[1]
    roi = ROI(loc, dim, center=True)
    nans = (np.nan, np.nan, np.nan)

    # REJECT if too near edges
    if (
        roi[0].start < 0
        or roi[1].start < 0
        or roi[0].stop >= im.shape[0]
        or roi[1].stop >= im.shape[1]
    ):
        return nans

    peak_im = im[roi[0], roi[1]]
    localbg = np.median(peak_im[localbg_mask])

    # CENTER by finding the Center Of Mass (COM)
    positive = np.where(peak_im > 0, peak_im, 0.0)
    if positive.sum() == 0.0:
        # Avoid COM warning from library by testing for all zeros
        return nans

    com = ndimage.measurements.center_of_mass(positive)
    offset_y = com[0] - int(peak_im.shape[0] / 2)
    offset_x = com[1] - int(peak_im.shape[1] / 2)

    if not (-2 <= offset_y <= 2 and -2 <= offset_x <= 2):
        # Data is so poor that the center-of-mass outside of reasonable bounds
        return nans

    # REMOVE the background
    peak_im = (peak_im - bg_bias).clip(min=0)

    kernel = imops.generate_gauss_kernel(1.0, offset_x, offset_y, mea=peak_im.shape[0])
    kernel_squared = kernel * kernel
    kernel_squared_sum = kernel_squared.sum()

    # WEIGH the data with the kernel and then normalize by the kernel_squared_sum to estimate signal
    weighted = kernel * peak_im
    signal = weighted.sum() / kernel_squared_sum

    # COMPUTE the noise by examining the residuals
    residuals = peak_im - signal * kernel
    var_residuals = np.var(residuals)
    noise = np.sqrt(var_residuals / kernel_squared_sum)
    assert noise >= 0.0

    return max(0, signal), noise, localbg
Beispiel #7
0
 def it_makes_a_gaussian_kernel_that_is_odd_sqaure_and_integral_one():
     for std in np.linspace(1.0, 3.0, 5):
         g = imops.generate_gauss_kernel(std)
         assert g.shape[0] & 1 == 1
         assert g.shape[0] == g.shape[1]
         assert np.abs(1.0 - g.sum()) < 0.1