def test_wrong_number_of_target_names_raises_error():
    """here we request 3 codes but provide only two names, which should raise an error"""
    with pytest.raises(AssertionError):
        Codebook.synthetic_one_hot_codebook(n_round=2,
                                            n_channel=2,
                                            n_codes=3,
                                            target_names=list('ab'))
Beispiel #2
0
def test_target_names_are_incorporated_into_synthetic_codebook():
    """Verify that target names are incorporated in order."""
    codebook: Codebook = Codebook.synthetic_one_hot_codebook(
        n_round=3,
        n_channel=6,
        n_codes=2,
        target_names=list('ab'),
    )

    assert np.array_equal(codebook[Features.TARGET], list('ab'))
def test_synthetic_one_hot_codebook_returns_requested_codebook():
    """
    Make a request and verify that the size and shape match the request, and
    that each round has only one round 'on'.
    """
    codebook: Codebook = Codebook.synthetic_one_hot_codebook(n_round=4,
                                                             n_channel=2,
                                                             n_codes=3)

    assert codebook.sizes == {Axes.CH: 2, Axes.ROUND: 4, Features.TARGET: 3}
    assert np.all(codebook.sum(Axes.CH.value) ==
                  1), "the numbers of channels on per round != 1"
Beispiel #4
0
 def codebook(self) -> Codebook:
     return Codebook.synthetic_one_hot_codebook(self.n_round, self.n_ch,
                                                self.n_codes)
     min_distance=args.small_peak_dist,
     stringency=0,
     min_obj_area=args.small_peak_min,
     max_obj_area=args.small_peak_max,
     min_num_spots_detected=2500,
     is_volume=False,
     verbose=False)
 lmp_big = FindSpots.LocalMaxPeakFinder(min_distance=args.big_peak_dist,
                                        stringency=0,
                                        min_obj_area=args.big_peak_min,
                                        max_obj_area=args.big_peak_max,
                                        min_num_spots_detected=2500,
                                        is_volume=False,
                                        verbose=False)
 sd = Codebook.synthetic_one_hot_codebook(n_round=1,
                                          n_channel=1,
                                          n_codes=1)
 decoder = DecodeSpots.PerRoundMaxChannel(codebook=sd)
 block_dim = int(max(imarr.shape) * args.block_dim_fraction)
 SpotCoords = np.zeros((0, 2), dtype=np.int64)
 for i in range(
         0, imarr.shape[-2] - 1, block_dim
 ):  # subtracting 1 from range because starfish breaks with x or y axis size of 1
     for j in range(0, imarr.shape[-1] - 1, block_dim):
         imgs = ImageStack.from_numpy(imarr[:, :, :, i:i + block_dim,
                                            j:j + block_dim])
         imgs = bandpass.run(imgs).reduce({Axes.ZPLANE}, func="max")
         spots = lmp_small.run(imgs)
         decoded_intensities = decoder.run(spots=spots)
         spot_coords_small = np.stack([
             decoded_intensities[Axes.Y.value],
Beispiel #6
0
def find_spots(input_path,
               output_path,
               intensity_percentile=99.995,
               filter_width=2,
               small_peak_min=4,
               small_peak_max=100,
               big_peak_min=25,
               big_peak_max=10000,
               small_peak_dist=2,
               big_peak_dist=0.75,
               block_dim_fraction=0.25,
               spot_pad_pixels=2,
               keep_existing=False):
    """
    Find and keep only spots from stitched images.

    """

    image_stack = imageio.volread(input_path)

    print(image_stack.shape)
    thresholded_image = np.copy(image_stack)

    _, height, width = image_stack.shape

    threshold = np.percentile(thresholded_image, intensity_percentile)
    thresholded_image[thresholded_image > threshold] = threshold + (
        np.log(thresholded_image[thresholded_image > threshold] - threshold) /
        np.log(1.1)).astype(thresholded_image.dtype)

    #May need to fiddle with the sigma parameters in each step, depending on the image.

    #High Pass Filter (Background Subtraction)
    gaussian_high_pass = Filter.GaussianHighPass(sigma=(1, filter_width,
                                                        filter_width),
                                                 is_volume=True)

    # enhance brightness of spots
    laplace_filter = Filter.Laplace(sigma=(0.2, 0.5, 0.5), is_volume=True)
    local_max_peakfinder_small = FindSpots.LocalMaxPeakFinder(
        min_distance=small_peak_dist,
        stringency=0,
        min_obj_area=small_peak_min,
        max_obj_area=small_peak_max,
        min_num_spots_detected=2500,
        is_volume=True,
        verbose=True)

    local_max_peakfinder_big = FindSpots.LocalMaxPeakFinder(
        min_distance=big_peak_dist,
        stringency=0,
        min_obj_area=big_peak_min,
        max_obj_area=big_peak_max,
        min_num_spots_detected=2500,
        is_volume=True,
        verbose=True)

    synthetic_codebook = Codebook.synthetic_one_hot_codebook(n_round=1,
                                                             n_channel=1,
                                                             n_codes=1)
    decoder = DecodeSpots.PerRoundMaxChannel(codebook=synthetic_codebook)

    block_dimension = int(max(thresholded_image.shape) * block_dim_fraction)
    spot_coordinates = np.zeros((0, 2), dtype=np.int64)

    # Finding spots by block_dimension x block_dimension size blocks
    # We skip the blocks at the edges with the - 1 (TODO: pad to full block size)
    for row in range(0, height - 1, block_dimension):
        for column in range(0, width - 1, block_dimension):
            # Cutout block and expand dimensions for channel and round
            block = thresholded_image[np.newaxis, np.newaxis, :,
                                      row:row + block_dimension,
                                      column:column + block_dimension]
            images = ImageStack.from_numpy(block)
            high_pass_filtered = gaussian_high_pass.run(images,
                                                        verbose=False,
                                                        in_place=False)
            laplace = laplace_filter.run(high_pass_filtered,
                                         in_place=False,
                                         verbose=False)

            small_spots = local_max_peakfinder_small.run(
                laplace.reduce({Axes.ZPLANE}, func="max"))
            decoded_intensities = decoder.run(spots=small_spots)
            small_spot_coords = np.stack([
                decoded_intensities[Axes.Y.value],
                decoded_intensities[Axes.X.value]
            ]).T

            big_spots = local_max_peakfinder_big.run(
                laplace.reduce({Axes.ZPLANE}, func="max"))
            decoded_intensities = decoder.run(spots=big_spots)
            big_spot_coords = np.stack([
                decoded_intensities[Axes.Y.value],
                decoded_intensities[Axes.X.value]
            ]).T

            all_spot_coords = np.vstack([small_spot_coords, big_spot_coords])
            all_spot_coords += (row, column)

            spot_coordinates = np.vstack([spot_coordinates, all_spot_coords])

    # Copying over only non-zero pixels
    image_spots = np.zeros((height, width), dtype=np.uint16)
    for spot_coordinate in spot_coordinates:
        spot_column, spot_row = spot_coordinate
        for row in range(max(0, spot_column - spot_pad_pixels),
                         min(spot_column + spot_pad_pixels + 1, height)):
            for column in range(max(0, spot_row - spot_pad_pixels),
                                min(spot_row + spot_pad_pixels + 1, width)):
                # Max projecting over z-stack
                image_spots[row, column] = image_stack[:, row, column].max(0)

    imageio.imsave(output_path, image_spots)

    return image_spots