コード例 #1
0
ファイル: zest_imops.py プロジェクト: manastech/plaster
 def it_clips2d_inclusive():
     """
     ssssssssssss
     sttttttttsss
     sttttttttsss
     ssssssssssss
     """
     tar_roi, src_roi = tools.image.coord.clip2d(-1, 5, 10, -2, 4, 15)
     assert tar_roi == ROI(XY(0, 0), WH(5, 4))
     assert src_roi == ROI(XY(1, 2), WH(5, 4))
コード例 #2
0
def extract_with_mask(im, mask, loc, center=False):
    """
    Extracts the values from im at loc using the mask.
    Returns zero where the mask was False
    """
    a = im[ROI(loc, mask.shape, center=center)]
    return np.where(mask, a, 0.0)
コード例 #3
0
def extract_trace(imstack, loc, dim, center=True):
    """Extract a trace of dim at loc from the stack"""
    imstack = stack(imstack)
    dim = HW(dim)
    loc = YX(loc)
    roi = ROI(loc, dim, center=center)
    return imstack[:, roi[0], roi[1]]
コード例 #4
0
def extract_with_mask(im, mask, loc, center=False):
    """
    Extracts the values from im at loc using the mask.
    Returns zero where the mask was False
    """
    try:
        a = im[ROI(loc, mask.shape, center=center)]
        return np.where(mask, a, 0.0)
    except ValueError:
        spy(
            loc,
            im.shape,
            mask.shape,
            center,
            a.shape,
            ROI(loc, mask.shape, center=center),
        )
コード例 #5
0
 def it_bounds_1():
     offsets = np.array(
         [
             [0, 0],
             [-5, -2],
             [-8, 1],
             [-15, 2],
             [-8, -3],
             [-8, -2],
             [-8, -1],
             [-17, -5],
             [-8, -11],
         ],
     )
     raw_dim = (1024, 1024)
     roi = worker._intersection_roi_from_aln_offsets(offsets, raw_dim)
     expected = ROI((17, 11), (1024 - 17, 1024 - 11 - 2))
     assert roi == expected
コード例 #6
0
def _quality(im):
    """
    Measure the quality of an image by spatial low-pass filter.
    High quality images are one where there is very little low-frequency
    (but above DC) bands.
    """
    a = np.copy(im)
    a -= np.mean(a)
    power = np.abs(np.fft.fftshift(np.fft.fft2(a)))
    power[power == 0] = 1

    cen = YX(power.shape) / 2
    dim_half = 3
    dim = HW(dim_half * 2 + 1, dim_half * 2 + 1)
    roi = ROI(cen, dim, center=True)
    im = power[roi]

    eigen = imops.eigen_moments(im)
    score = power.sum() / np.sqrt(eigen.sum())
    return score
コード例 #7
0
def intersection_roi_from_aln_offsets(aln_offsets, raw_dim):
    """
    Compute the ROI that contains pixels from all frames
    given the aln_offsets (returned from align)
    and the dim of the original images.
    """
    aln_offsets = np.array(aln_offsets)
    check.affirm(np.all(aln_offsets[0] == (0, 0)),
                 "intersection roi must start with (0,0)")

    # intersection_roi is the ROI in the coordinate space of
    # the [0] frame that has pixels from every cycle.
    clip_dim = (
        np.min(aln_offsets[:, 0] + raw_dim[0]) - np.max(aln_offsets[:, 0]),
        np.min(aln_offsets[:, 1] + raw_dim[1]) - np.max(aln_offsets[:, 1]),
    )

    b = max(0, -np.min(aln_offsets[:, 0]))
    t = min(raw_dim[0], b + clip_dim[0])
    l = max(0, -np.min(aln_offsets[:, 1]))
    r = min(raw_dim[1], l + clip_dim[1])
    return ROI(loc=YX(b, l), dim=HW(t - b, r - l))
コード例 #8
0
def low_frequency_power(im, dim_half=3):
    """
    Measure the low_frequency_power (excluding DC) of an image
    by spatial low-pass filter.

    dim_half is the half the width of the region
    """
    a = np.copy(im)
    a -= np.mean(a)
    power = np.abs(np.fft.fftshift(np.fft.fft2(a)))
    power[power == 0] = 1

    cen = YX(power.shape) / 2

    dim = HW(dim_half * 2 + 1, dim_half * 2 + 1)

    # PLUCK out the center (which is the low frequencies)
    roi = ROI(cen, dim, center=True)
    im = power[roi]
    eigen = eigen_moments(im)
    score = power.sum() / np.sqrt(eigen.sum())
    return score
コード例 #9
0
 def it_shifts_an_roi():
     roi = ROI(loc=YX(5, 10), dim=HW(15, 20))
     new_roi = roi_shift(roi, YX(-2, -3))
     assert new_roi == ROI(YX(5 - 2, 10 - 3), HW(15, 20))
コード例 #10
0
 def it_centers_an_roi_with_a_tuple():
     orig_dim = (50, 100)  # Note this is in H, W
     roi = roi_center(orig_dim, percent=0.5)
     expected = ROI(loc=XY(25, 12), dim=WH(50, 25))
     assert roi == expected
コード例 #11
0
 def it_centers_an_roi_with_a_coord():
     orig_dim = WH(100, 50)
     roi = roi_center(orig_dim, percent=0.5)
     assert roi == ROI(loc=XY(25, 12), dim=WH(50, 25))
コード例 #12
0
 def it_can_can_slice_an_roi_with_centering():
     roi = ROI(XY(1, 2), WH(2, 4), center=True)
     assert roi[0] == slice(0, 4) and roi[1] == slice(0, 2)
コード例 #13
0
 def it_can_can_slice_and_dice_with_roi():
     dim = WH(10, 10)
     image = np.zeros(dim)
     roi = ROI(XY(1, 1), dim - WH(2, 2))
     cropped_image = image[roi]
     assert np.array_equal(image[1:9, 1:9], cropped_image[:, :])
コード例 #14
0
    def __init__(
        self,
        n_peaks=1,  # Number of peaks to add
        n_cycles=1,  # Number of frames to make in the stack
        n_channels=1,  # Number of channels.
        dim=(512, 512),  # dims of frames
        bg_bias=0,  # bias of the background
        bg_std=0.5,  # std of the background noise per channel
        peak_focus=1.0,  # The std of the peaks (all will be the same)
        peak_mean=(
            40.0, ),  # Mean of the peak area distribution for each channel
        peak_std=2.0,  # Std of the peak areas distribution
        peak_xs=None,  # Used to put peaks at know locations
        peak_ys=None,  # Used to put peaks at know locations
        peak_area_all_cycles=None,  # Areas for all peaks on each cycle (len == n_cycles)
        peak_area_by_channel_cycle=None,  # Areas, all peaks, channels, cycles (len ==  n_channels * n_cycles * n_peaks)
        grid_distribution=False,  # When True the peaks are laid out in a grid
        all_aligned=False,  # When True all cycles will be aligned
        digitize=False,
        frame_offsets=None,
        anomalies=None,  # int, number of anomalies per image
        random_seed=None,
    ):

        if random_seed is not None:
            np.random.seed(random_seed)

        if not isinstance(bg_bias, tuple):
            bg_bias = tuple([bg_bias] * n_channels)

        if not isinstance(bg_std, tuple):
            bg_std = tuple([bg_std] * n_channels)

        self.bg_bias = bg_bias
        assert len(bg_bias) == n_channels

        self.bg_std = bg_std
        assert len(bg_std) == n_channels

        self.dim = HW(dim)

        self.anomalies = []

        # Peaks are a floating point positions (not perfectly aligned with pixels)
        # and the Gaussians are sampled around those points

        self.n_peaks = n_peaks
        self.n_cycles = n_cycles
        self.n_channels = n_channels
        self.peak_mean = peak_mean
        self.peak_std = peak_std
        self.peak_focus = peak_focus

        # unit_peak is only a reference; the real peaks are sub-pixel sampled but will have the same dimensions
        self.unit_peak = imops.generate_gauss_kernel(self.peak_focus)

        self.peak_dim = self.unit_peak.shape[0]

        if peak_xs is not None and peak_ys is not None:
            # Place peaks at specific locations
            self.peak_xs = np.array(peak_xs)
            self.peak_ys = np.array(peak_ys)
        else:
            # Place peaks in patterns or random
            if grid_distribution:
                # Put the peaks on a grid
                n_cols = int(math.sqrt(n_peaks) + 0.5)
                ixs = np.remainder(np.arange(n_peaks), n_cols)
                iys = np.arange(n_peaks) // n_cols
                border = 15
                self.peak_xs = (self.dim.w -
                                border * 2) * ixs / n_cols + border
                self.peak_ys = (self.dim.h -
                                border * 2) * iys / n_cols + border
            else:
                # Distribute the peaks randomly
                self.peak_xs = np.random.uniform(
                    low=self.peak_dim + 1.0,
                    high=self.dim.w - self.peak_dim - 1.0,
                    size=n_peaks,
                )
                self.peak_ys = np.random.uniform(
                    low=self.peak_dim + 1.0,
                    high=self.dim.h - self.peak_dim - 1.0,
                    size=n_peaks,
                )

        self.peak_locs = [XY(x, y) for x, y in zip(self.peak_xs, self.peak_ys)]
        if self.n_peaks > 0:
            peak_dists = distance.cdist(self.peak_locs, self.peak_locs,
                                        "euclidean")
            peak_dists[peak_dists == 0] = 10000.0
            self.closest = peak_dists.min(axis=0)

        self.peak_areas = np.empty((n_channels, n_cycles, n_peaks))
        if peak_area_all_cycles is not None:
            # Use one area for all peaks, all channels for each cycle
            assert len(peak_area_all_cycles) == n_cycles
            for cycle in range(n_cycles):
                self.peak_areas[:, cycle, :] = peak_area_all_cycles[cycle]
        elif peak_area_by_channel_cycle is not None:
            # Specified areas for each peak, each channel, each cycle
            assert peak_area_by_channel_cycle.shape == (n_channels, n_cycles,
                                                        n_peaks)
            self.peak_areas[:] = peak_area_by_channel_cycle[:]
        elif n_peaks > 0:
            # Make random peak areas by channel means
            for channel in range(n_channels):
                self.peak_areas[channel, :, :] = np.random.normal(
                    loc=self.peak_mean[channel],
                    scale=self.peak_std,
                    size=(n_cycles, n_peaks),
                )
                self.peak_areas[channel] = np.clip(self.peak_areas[channel],
                                                   0.0, 1000.0)

        # Frames are integer aligned because this is the best that the aligner would be able to do
        if frame_offsets is None:
            self.frame_xs = np.random.randint(low=-5, high=5, size=n_cycles)
            self.frame_ys = np.random.randint(low=-5, high=5, size=n_cycles)
        else:
            self.frame_xs = [i[1] for i in frame_offsets]
            self.frame_ys = [i[0] for i in frame_offsets]

        # Cycle 0 always has no offset
        self.frame_xs[0] = 0
        self.frame_ys[0] = 0

        if all_aligned:
            self.frame_xs = np.zeros((n_cycles, ))
            self.frame_ys = np.zeros((n_cycles, ))

        self.ims = np.zeros((n_channels, n_cycles, dim[0], dim[1]))
        for cycle in range(n_cycles):
            for channel in range(n_channels):
                # Background has bg_std plus bias
                im = bg_bias[channel] + np.random.normal(
                    size=dim, scale=self.bg_std[channel])

                # Peaks are on the pixel lattice from their floating point positions
                # No signal-proportional noise is added
                for x, y, a in zip(self.peak_xs, self.peak_ys,
                                   self.peak_areas[channel, cycle, :]):
                    # The center of the peak is at the floor pixel and the offset is the fractional part
                    _x = self.frame_xs[cycle] + x
                    _y = self.frame_ys[cycle] + y
                    ix = int(_x + 0.5)
                    iy = int(_y + 0.5)
                    frac_x = _x - ix
                    frac_y = _y - iy
                    g = imops.generate_gauss_kernel(self.peak_focus,
                                                    offset_x=frac_x,
                                                    offset_y=frac_y)
                    imops.accum_inplace(im, g * a, loc=XY(ix, iy), center=True)

                # overwrite with random anomalies if specified
                if anomalies is not None:
                    if cycle == 0:
                        # in cycle 0 pick location and size of anomalies for this image
                        anomalies_for_channel = []
                        self.anomalies += [anomalies_for_channel]
                        for i in range(anomalies):
                            sz = np.random.randint(10, 100, size=2)
                            l = np.random.randint(0, self.dim[0], size=2)
                            anomalies_for_channel += [(l, sz)]
                    for l, sz in self.anomalies[channel]:
                        # in all cycles, vary the location, size, and intensity of the anomaly,
                        # and print it to the image.
                        l = l * (1.0 + np.random.random() / 10.0)
                        sz = sz * (1.0 + np.random.random() / 10.0)
                        im[ROI(
                            l,
                            sz)] = im[ROI(l, sz)] * np.random.randint(2, 20)

                if digitize:
                    im = (im.clip(min=0) + 0.5).astype(
                        np.uint8)  # +0.5 to round up
                else:
                    im = im.clip(min=0)
                self.ims[channel, cycle, :, :] = im
コード例 #15
0
ファイル: sigproc_v2_result.py プロジェクト: erisyon/plaster
def df_filter(
    df,
    fields=None,
    reject_fields=None,
    roi=None,
    channel_i=0,
    dark=None,
    on_through_cy_i=None,
    off_at_cy_i=None,
    monotonic=None,
    min_intensity_cy_0=None,
    max_intensity_cy_0=None,
    max_intensity_any_cycle=None,
    min_intensity_per_cycle=None,
    max_intensity_per_cycle=None,
    radmat_field="signal",
    max_k=None,
    min_score=None,
):
    """
    A general filtering tool that operates on the dataframe returned by
    sigproc_v2.fields__n_peaks__peaks__radmat()
    """
    n_channels = df.channel_i.max() + 1

    # REMOVE unwanted fields
    if fields is None:
        fields = list(range(df.field_i.max() + 1))
    if reject_fields is not None:
        fields = list(filter(lambda x: x not in reject_fields, fields))
    _df = df[df.field_i.isin(fields)].reset_index(drop=True)

    # REMOVE unwanted peaks by ROI
    if roi is None:
        roi = ROI(YX(0, 0), HW(df.raw_y.max(), df.raw_x.max()))
    _df = _df[(roi[0].start <= _df.raw_y)
              & (_df.raw_y < roi[0].stop)
              & (roi[1].start <= _df.raw_x)
              & (_df.raw_x < roi[1].stop)].reset_index(drop=True)

    if max_k is not None:
        _df = _df[_df.k <= max_k]

    if min_score is not None:
        _df = _df[_df.score >= min_score]

    # OPERATE on radmat if needed
    fields_that_operate_on_radmat = [
        dark,
        on_through_cy_i,
        off_at_cy_i,
        monotonic,
        min_intensity_cy_0,
        max_intensity_cy_0,
        max_intensity_any_cycle,
        min_intensity_per_cycle,
        max_intensity_per_cycle,
    ]

    if any([field is not None for field in fields_that_operate_on_radmat]):
        assert 0 <= channel_i < n_channels

        rad_pt = pd.pivot_table(_df,
                                values=radmat_field,
                                index=["peak_i"],
                                columns=["channel_i", "cycle_i"])
        ch_rad_pt = rad_pt.loc[:, channel_i]
        keep_peaks_mask = np.ones((ch_rad_pt.shape[0], ), dtype=bool)

        if on_through_cy_i is not None:
            assert dark is not None
            keep_peaks_mask &= np.all(
                ch_rad_pt.loc[:, 0:on_through_cy_i + 1] > dark, axis=1)

        if off_at_cy_i is not None:
            assert dark is not None
            keep_peaks_mask &= np.all(ch_rad_pt.loc[:, off_at_cy_i:] < dark,
                                      axis=1)

        if monotonic is not None:
            d = np.diff(ch_rad_pt.values, axis=1)
            keep_peaks_mask &= np.all(d < monotonic, axis=1)

        if min_intensity_cy_0 is not None:
            keep_peaks_mask &= ch_rad_pt.loc[:, 0] >= min_intensity_cy_0

        if max_intensity_cy_0 is not None:
            keep_peaks_mask &= ch_rad_pt.loc[:, 0] <= max_intensity_cy_0

        if max_intensity_any_cycle is not None:
            keep_peaks_mask &= np.all(
                ch_rad_pt.loc[:, :] <= max_intensity_any_cycle, axis=1)

        if min_intensity_per_cycle is not None:
            for cy_i, inten in enumerate(min_intensity_per_cycle):
                if inten is not None:
                    keep_peaks_mask &= ch_rad_pt.loc[:, cy_i] >= inten

        if max_intensity_per_cycle is not None:
            for cy_i, inten in enumerate(max_intensity_per_cycle):
                if inten is not None:
                    keep_peaks_mask &= ch_rad_pt.loc[:, cy_i] <= inten

        keep_peak_i = ch_rad_pt[keep_peaks_mask].index.values
        keep_df = pd.DataFrame(
            dict(keep_peak_i=keep_peak_i)).set_index("keep_peak_i")
        _df = keep_df.join(df.set_index("peak_i", drop=False))

    return _df
コード例 #16
0
def _roi_from_edges(b, t, l, r):
    return ROI(loc=YX(b, l), dim=HW(t - b, r - l))
コード例 #17
0
def _radiometry(im, loc, dim, bg_bias, localbg_mask):
    """
    Arguments:
        im: 2D image (some field, channel, cycle)
        loc: the (y,x) coordinates of the peak
        dim: the integer dimensions of the mask
        bg_bias: the background bias to remove

    Returns:
        signal, noise (or NAN if something goes wrong). Both are always non-negative
        localbg: The median of a surrounding aria

    After a careful analysis considering three types of radiometers the Kernel Method was chosen
    The others considered were:
        * Hat method where the signal is the sum(hat_pixel - brim_median).
            * This is fast but scores poorly compared to Kernel Method.
              And, it can not estimate noise.
        * Fitted method where a 2D Gaussian is fit to the data.
            * This method is very expensive, fails to converge in various edge cases,
              and doesn't score any better than the Kernel Method.
        * Kernel method (chosen) where a center-of-mass calculation is used
          to generate a unit-area Gaussian Kernel which is then used to
          weigh the peak data. Noise can be estimated by squaring that mask and
          then computing residuals.
    """
    assert localbg_mask.shape[0] == dim[0] and localbg_mask.shape[1] == dim[1]
    roi = ROI(loc, dim, center=True)
    nans = (np.nan, np.nan, np.nan)

    # REJECT if too near edges
    if (
        roi[0].start < 0
        or roi[1].start < 0
        or roi[0].stop >= im.shape[0]
        or roi[1].stop >= im.shape[1]
    ):
        return nans

    peak_im = im[roi[0], roi[1]]
    localbg = np.median(peak_im[localbg_mask])

    # CENTER by finding the Center Of Mass (COM)
    positive = np.where(peak_im > 0, peak_im, 0.0)
    if positive.sum() == 0.0:
        # Avoid COM warning from library by testing for all zeros
        return nans

    com = ndimage.measurements.center_of_mass(positive)
    offset_y = com[0] - int(peak_im.shape[0] / 2)
    offset_x = com[1] - int(peak_im.shape[1] / 2)

    if not (-2 <= offset_y <= 2 and -2 <= offset_x <= 2):
        # Data is so poor that the center-of-mass outside of reasonable bounds
        return nans

    # REMOVE the background
    peak_im = (peak_im - bg_bias).clip(min=0)

    kernel = imops.generate_gauss_kernel(1.0, offset_x, offset_y, mea=peak_im.shape[0])
    kernel_squared = kernel * kernel
    kernel_squared_sum = kernel_squared.sum()

    # WEIGH the data with the kernel and then normalize by the kernel_squared_sum to estimate signal
    weighted = kernel * peak_im
    signal = weighted.sum() / kernel_squared_sum

    # COMPUTE the noise by examining the residuals
    residuals = peak_im - signal * kernel
    var_residuals = np.var(residuals)
    noise = np.sqrt(var_residuals / kernel_squared_sum)
    assert noise >= 0.0

    return max(0, signal), noise, localbg
コード例 #18
0
def crop(src, off=XY(0, 0), dim=WH(-1, -1), center=False):
    if dim.h == -1 and dim.w == -1:
        dim = HW(src.shape)
    return src[ROI(off, dim, center=center)]
コード例 #19
0
 def it_returns_the_intersection():
     roi = worker._intersection_roi_from_aln_offsets(
         [(0, 0), (1, 0), (0, 1)], (10, 10)
     )
     assert roi == ROI((0, 0), (9, 9))