def test_lmpf_uniform_peak():
    data_array = np.zeros(shape=(1, 1, 1, 100, 100), dtype=np.float32)
    data_array[0, 0, 0, 45:55, 45:55] = 1
    imagestack = ImageStack.from_numpy(data_array)

    # standard local max peak finder, should find spots for all the evenly illuminated pixels.
    lmpf_no_kwarg = FindSpots.LocalMaxPeakFinder(1, 1, 1, sys.maxsize)
    peaks = lmpf_no_kwarg.run(imagestack)
    results_no_kwarg = peaks[{Axes.ROUND: 0, Axes.CH: 0}]
    assert len(results_no_kwarg.spot_attrs.data) == 100

    # local max peak finder, capped at one peak per label.
    lmpf_kwarg = FindSpots.LocalMaxPeakFinder(1, 1, 1, sys.maxsize, num_peaks_per_label=1)
    peaks = lmpf_kwarg.run(imagestack)
    results_kwarg = peaks[{Axes.ROUND: 0, Axes.CH: 0}]
    assert len(results_kwarg.spot_attrs.data) == 1
def process_fov(fov: FieldOfView, codebook: Codebook) -> DecodedIntensityTable:
    """Process a single field of view of ISS data

    Parameters
    ----------
    fov : FieldOfView
        the field of view to process
    codebook : Codebook
        the Codebook to use for decoding

    Returns
    -------
    DecodedSpots :
        tabular object containing the locations of detected spots.
    """

    # note the structure of the 5D tensor containing the raw imaging data
    imgs = fov.get_image(FieldOfView.PRIMARY_IMAGES)
    dots = fov.get_image("dots")
    nuclei = fov.get_image("nuclei")

    print("Learning Transform")
    learn_translation = LearnTransform.Translation(reference_stack=dots,
                                                   axes=Axes.ROUND,
                                                   upsampling=1000)
    transforms_list = learn_translation.run(
        imgs.reduce({Axes.CH, Axes.ZPLANE}, func="max"))

    print("Applying transform")
    warp = ApplyTransform.Warp()
    registered_imgs = warp.run(imgs,
                               transforms_list=transforms_list,
                               verbose=True)

    print("Filter WhiteTophat")
    filt = Filter.WhiteTophat(masking_radius=15, is_volume=False)

    filtered_imgs = filt.run(registered_imgs, verbose=True)
    filt.run(dots, verbose=True, in_place=True)
    filt.run(nuclei, verbose=True, in_place=True)

    print("Detecting")
    detector = FindSpots.BlobDetector(
        min_sigma=1,
        max_sigma=10,
        num_sigma=30,
        threshold=0.01,
        measurement_type='mean',
    )
    dots_max = dots.reduce((Axes.ROUND, Axes.ZPLANE),
                           func="max",
                           module=FunctionSource.np)
    spots = detector.run(image_stack=filtered_imgs, reference_image=dots_max)

    print("Decoding")
    decoder = DecodeSpots.PerRoundMaxChannel(codebook=codebook)
    decoded = decoder.run(spots=spots)
    return decoded
예제 #3
0
def find_spots(imgs, dots):

    p = FindSpots.BlobDetector(
        min_sigma=1,
        max_sigma=10,
        num_sigma=30,
        threshold=0.01,
        measurement_type='mean',
    )

    intensities = p.run(image_stack=imgs, reference_image=dots)
    return intensities
예제 #4
0
def iss_pipeline(fov, codebook):
    primary_image = fov.get_image(starfish.FieldOfView.PRIMARY_IMAGES)

    # register the raw image
    learn_translation = LearnTransform.Translation(
        reference_stack=fov.get_image('dots'), axes=Axes.ROUND, upsampling=100)
    transforms_list = learn_translation.run(
        primary_image.reduce({Axes.CH, Axes.ZPLANE}, func="max"))
    warp = ApplyTransform.Warp()
    registered = warp.run(primary_image,
                          transforms_list=transforms_list,
                          in_place=False,
                          verbose=True)

    # filter raw data
    masking_radius = 15
    filt = Filter.WhiteTophat(masking_radius, is_volume=False)
    filtered = filt.run(registered, verbose=True, in_place=False)

    bd = FindSpots.BlobDetector(
        min_sigma=1,
        max_sigma=10,
        num_sigma=30,
        threshold=0.01,
        measurement_type='mean',
    )

    # detect spots using laplacian of gaussians approach
    dots_max = fov.get_image('dots').reduce((Axes.ROUND, Axes.ZPLANE),
                                            func="max",
                                            module=FunctionSource.np)
    # locate spots in a reference image
    spots = bd.run(reference_image=dots_max, image_stack=filtered)

    # decode the pixel traces using the codebook
    decoder = DecodeSpots.PerRoundMaxChannel(codebook=codebook)
    decoded = decoder.run(spots=spots)

    # segment cells
    seg = Segment.Watershed(
        nuclei_threshold=.16,
        input_threshold=.22,
        min_distance=57,
    )
    label_image = seg.run(primary_image, fov.get_image('dots'))

    # assign spots to cells
    ta = AssignTargets.Label()
    assigned = ta.run(label_image, decoded)

    return assigned, label_image
예제 #5
0
def test_allen_smFISH_cropped_data():

    # set random seed to errors provoked by optimization functions
    np.random.seed(777)

    # load the experiment
    experiment = starfish.data.allen_smFISH(use_test_data=True)

    primary_image = experiment.fov().get_image(FieldOfView.PRIMARY_IMAGES)

    clip = Filter.Clip(p_min=10, p_max=100)
    clipped_image = clip.run(primary_image, in_place=False)

    bandpass = Filter.Bandpass(lshort=0.5, llong=7, threshold=0.0, truncate=4)
    bandpassed_image = bandpass.run(clipped_image, in_place=False)

    clip = Filter.Clip(p_min=10, p_max=100, is_volume=False)
    clipped_bandpassed_image = clip.run(bandpassed_image, in_place=False)

    sigma = (1, 0, 0)  # filter only in z, do nothing in x, y
    glp = Filter.GaussianLowPass(sigma=sigma, is_volume=True)
    z_filtered_image = glp.run(clipped_bandpassed_image, in_place=False)

    tlmpf = FindSpots.TrackpyLocalMaxPeakFinder(
        spot_diameter=5,  # must be odd integer
        min_mass=0.02,
        max_size=2,  # this is max radius
        separation=7,
        noise_size=0.65,  # this is not used because preprocess is False
        preprocess=False,
        percentile=10,
        # this is irrelevant when min_mass, spot_diameter, and max_size are set properly
        verbose=True,
        is_volume=True,
    )
    spots = tlmpf.run(z_filtered_image)  # noqa

    decoder = starfish.spots.DecodeSpots.PerRoundMaxChannel(
        codebook=experiment.codebook,
        trace_building_strategy=TraceBuildingStrategies.SEQUENTIAL
    )
    decoder.run(spots=spots)
예제 #6
0
masking_radius = 15
filt = Filter.WhiteTophat(masking_radius, is_volume=False)
filt.run(imgs, in_place=True)
filt.run(dots, in_place=True)

# register primary images to reference round
learn_translation = LearnTransform.Translation(reference_stack=dots,
                                               axes=Axes.ROUND,
                                               upsampling=1000)
transforms_list = learn_translation.run(
    imgs.reduce({Axes.CH, Axes.ZPLANE}, func="max"))
warp = ApplyTransform.Warp()
warp.run(imgs, transforms_list=transforms_list, in_place=True)

# run blob detector on dots (reference image with every spot)
bd = FindSpots.BlobDetector(
    min_sigma=1,
    max_sigma=10,
    num_sigma=30,
    threshold=0.01,
    measurement_type='mean',
)
dots_max = dots.reduce((Axes.ROUND, Axes.ZPLANE), func="max")
spots = bd.run(image_stack=imgs, reference_image=dots_max)

# Decode spots with PerRoundMaxChannel
from starfish.spots import DecodeSpots
decoder = DecodeSpots.PerRoundMaxChannel(
    codebook=experiment.codebook,
    trace_building_strategy=TraceBuildingStrategies.EXACT_MATCH)
decoded_intensities = decoder.run(spots=spots)
from starfish.image import Filter
from starfish.spots import FindSpots

experiment = data.allen_smFISH(use_test_data=True)
img = experiment['fov_001'].get_image(FieldOfView.PRIMARY_IMAGES)

# filter to remove noise, remove background, blur, and clip
bandpass = Filter.Bandpass(lshort=.5, llong=7, threshold=0.0)
glp = Filter.GaussianLowPass(
    sigma=(1, 0, 0),
    is_volume=True
)
clip1 = Filter.Clip(p_min=50, p_max=100)
clip2 = Filter.Clip(p_min=99, p_max=100, is_volume=True)
clip1.run(img, in_place=True)
bandpass.run(img, in_place=True)
glp.run(img, in_place=True)
clip2.run(img, in_place=True)


tlmpf = FindSpots.TrackpyLocalMaxPeakFinder(
    spot_diameter=5,  # must be odd integer
    min_mass=0.02,
    max_size=2,  # this is max radius
    separation=7,
    preprocess=False,
    percentile=10,  # this has no effect when min_mass, spot_diameter, and max_size are set properly
    verbose=True,
)
spots = tlmpf.run(img)
예제 #8
0
fov = experiment.fov()
imgs = fov.get_image(FieldOfView.PRIMARY_IMAGES) # primary images
dots = fov.get_image("dots") # reference round where every spot labeled with fluorophore

# filter raw data
masking_radius = 15
filt = Filter.WhiteTophat(masking_radius, is_volume=False)
filt.run(imgs, in_place=True)
filt.run(dots, in_place=True)

# register primary images to reference round
learn_translation = LearnTransform.Translation(reference_stack=dots, axes=Axes.ROUND, upsampling=1000)
transforms_list = learn_translation.run(imgs.reduce({Axes.CH, Axes.ZPLANE}, func="max"))
warp = ApplyTransform.Warp()
warp.run(imgs, transforms_list=transforms_list, in_place=True)

# view dots to estimate radius of spots: radius range from 1.5 to 4 pixels
imshow_plane(dots, {Axes.X: (500, 550), Axes.Y: (600, 650)})

# run blob detector with dots as reference image
# following guideline of sigma = radius/sqrt(2) for 2D images
# threshold is set conservatively low
bd = FindSpots.BlobDetector(
    min_sigma=1,
    max_sigma=3,
    num_sigma=10,
    threshold=0.01,
    is_volume=False,
    measurement_type='mean',
)
spots = bd.run(image_stack=imgs, reference_image=dots)
예제 #9
0
             })

####################################################################################################
# The ``dots`` reference image shows spots are approximately 10 pixels in diameter and can be
# tightly packed together. This helped inform the parameter settings of the spot finders below.
# However, the accuracy can always be improved by further tuning parameters. For example,
# if low intensity background noise is being detected as spots, increasing values for
# ``threshold``, ``stringency``, ``min_mass``, and ``percentile`` can remove them. If large spots
# are not missed, increasing values such as ``max_sigma``, ``max_obj_area``, ``spot_diameter``,
# and ``max_size`` could include them. Moreover, signal enhancement and background reduction
# prior to this step can also improve accuracy of spot finding.

bd = FindSpots.BlobDetector(
    min_sigma=2,
    max_sigma=6,
    num_sigma=20,
    threshold=0.1,
    is_volume=True,
    measurement_type='mean',
)

lmp = FindSpots.LocalMaxPeakFinder(min_distance=2,
                                   stringency=8,
                                   min_obj_area=6,
                                   max_obj_area=600,
                                   is_volume=True)

tlmpf = FindSpots.TrackpyLocalMaxPeakFinder(
    spot_diameter=11,
    min_mass=0.2,
    max_size=8,
    separation=3,
예제 #10
0
     imageio.imsave(args.out_path, imarr.max(0))
 else:
     imarr_orig = np.copy(imarr)
     #adds a "channel" dimension
     imarr = np.expand_dims(imarr, axis=0)
     #adds a "round" dimension
     imarr = np.expand_dims(imarr, axis=0)
     thresh = np.percentile(imarr, args.intensity_ptile)
     imarr[imarr > thresh] = thresh + (
         np.log(imarr[imarr > thresh] - thresh) / np.log(1.1)).astype(
             imarr.dtype)
     bandpass = Filter.Bandpass(lshort=.5, llong=7, threshold=0.0)
     lmp_small = FindSpots.LocalMaxPeakFinder(
         min_distance=args.small_peak_dist,
         stringency=0,
         min_obj_area=args.small_peak_min,
         max_obj_area=args.small_peak_max,
         min_num_spots_detected=2500,
         is_volume=False,
         verbose=False)
     lmp_big = FindSpots.LocalMaxPeakFinder(min_distance=args.big_peak_dist,
                                            stringency=0,
                                            min_obj_area=args.big_peak_min,
                                            max_obj_area=args.big_peak_max,
                                            min_num_spots_detected=2500,
                                            is_volume=False,
                                            verbose=False)
     sd = Codebook.synthetic_one_hot_codebook(n_round=1,
                                              n_channel=1,
                                              n_codes=1)
     decoder = DecodeSpots.PerRoundMaxChannel(codebook=sd)
     block_dim = int(max(imarr.shape) * args.block_dim_fraction)
예제 #11
0
    :py:class:`.LocalMaxPeakFinder` is not compatible with cropped data sets.

"""

# Load osmFISH experiment
from starfish import FieldOfView, data
from starfish.image import Filter
from starfish.spots import DecodeSpots, FindSpots
from starfish.types import Axes, TraceBuildingStrategies
experiment = data.osmFISH(use_test_data=True)
imgs = experiment["fov_000"].get_image(FieldOfView.PRIMARY_IMAGES)

# filter raw data
filter_ghp = Filter.GaussianHighPass(sigma=(1,8,8), is_volume=True)
filter_laplace = Filter.Laplace(sigma=(0.2, 0.5, 0.5), is_volume=True)
filter_ghp.run(imgs, in_place=True)
filter_laplace.run(imgs, in_place=True)

# z project
max_imgs = imgs.reduce({Axes.ZPLANE}, func="max")

# run LocalMaxPeakFinder on max projected image
lmp = FindSpots.LocalMaxPeakFinder(
    min_distance=6,
    stringency=0,
    min_obj_area=6,
    max_obj_area=600,
    is_volume=True
)
spots = lmp.run(max_imgs)
예제 #12
0
# Finally, a local blob detector that finds spots in each (z, y, x) volume separately is applied.
# The user selects an "anchor round" and spots found in all channels of that round are used to
# seed a local search across other rounds and channels. The closest spot is selected,
# and any spots outside the search radius (here 10 pixels) is discarded.
#
# The Spot finder returns an IntensityTable containing all spots from round zero. Note that many
# of the spots do not identify spots in other rounds and channels and will therefore fail
# decoding. Because of the stringency built into the STARmap codebook, it is OK to be relatively
# permissive with the spot finding parameters for this assay.

import numpy as np
from starfish.spots import FindSpots

bd = FindSpots.BlobDetector(min_sigma=1,
                            max_sigma=8,
                            num_sigma=10,
                            threshold=np.percentile(
                                np.ravel(stack.xarray.values), 95),
                            exclude_border=2)

spots = bd.run(scaled)

###################################################################################################
# Decode spots
# ------------
# Next, spots are decoded. There is really no good way to display 3-d spot detection in 2-d planes,
# so we encourage you to grab this notebook and uncomment the below lines.

from starfish.spots import DecodeSpots
from starfish.types import TraceBuildingStrategies

decoder = DecodeSpots.PerRoundMaxChannel(
예제 #13
0
def find_spots(input_path,
               output_path,
               intensity_percentile=99.995,
               filter_width=2,
               small_peak_min=4,
               small_peak_max=100,
               big_peak_min=25,
               big_peak_max=10000,
               small_peak_dist=2,
               big_peak_dist=0.75,
               block_dim_fraction=0.25,
               spot_pad_pixels=2,
               keep_existing=False):
    """
    Find and keep only spots from stitched images.

    """

    image_stack = imageio.volread(input_path)

    print(image_stack.shape)
    thresholded_image = np.copy(image_stack)

    _, height, width = image_stack.shape

    threshold = np.percentile(thresholded_image, intensity_percentile)
    thresholded_image[thresholded_image > threshold] = threshold + (
        np.log(thresholded_image[thresholded_image > threshold] - threshold) /
        np.log(1.1)).astype(thresholded_image.dtype)

    #May need to fiddle with the sigma parameters in each step, depending on the image.

    #High Pass Filter (Background Subtraction)
    gaussian_high_pass = Filter.GaussianHighPass(sigma=(1, filter_width,
                                                        filter_width),
                                                 is_volume=True)

    # enhance brightness of spots
    laplace_filter = Filter.Laplace(sigma=(0.2, 0.5, 0.5), is_volume=True)
    local_max_peakfinder_small = FindSpots.LocalMaxPeakFinder(
        min_distance=small_peak_dist,
        stringency=0,
        min_obj_area=small_peak_min,
        max_obj_area=small_peak_max,
        min_num_spots_detected=2500,
        is_volume=True,
        verbose=True)

    local_max_peakfinder_big = FindSpots.LocalMaxPeakFinder(
        min_distance=big_peak_dist,
        stringency=0,
        min_obj_area=big_peak_min,
        max_obj_area=big_peak_max,
        min_num_spots_detected=2500,
        is_volume=True,
        verbose=True)

    synthetic_codebook = Codebook.synthetic_one_hot_codebook(n_round=1,
                                                             n_channel=1,
                                                             n_codes=1)
    decoder = DecodeSpots.PerRoundMaxChannel(codebook=synthetic_codebook)

    block_dimension = int(max(thresholded_image.shape) * block_dim_fraction)
    spot_coordinates = np.zeros((0, 2), dtype=np.int64)

    # Finding spots by block_dimension x block_dimension size blocks
    # We skip the blocks at the edges with the - 1 (TODO: pad to full block size)
    for row in range(0, height - 1, block_dimension):
        for column in range(0, width - 1, block_dimension):
            # Cutout block and expand dimensions for channel and round
            block = thresholded_image[np.newaxis, np.newaxis, :,
                                      row:row + block_dimension,
                                      column:column + block_dimension]
            images = ImageStack.from_numpy(block)
            high_pass_filtered = gaussian_high_pass.run(images,
                                                        verbose=False,
                                                        in_place=False)
            laplace = laplace_filter.run(high_pass_filtered,
                                         in_place=False,
                                         verbose=False)

            small_spots = local_max_peakfinder_small.run(
                laplace.reduce({Axes.ZPLANE}, func="max"))
            decoded_intensities = decoder.run(spots=small_spots)
            small_spot_coords = np.stack([
                decoded_intensities[Axes.Y.value],
                decoded_intensities[Axes.X.value]
            ]).T

            big_spots = local_max_peakfinder_big.run(
                laplace.reduce({Axes.ZPLANE}, func="max"))
            decoded_intensities = decoder.run(spots=big_spots)
            big_spot_coords = np.stack([
                decoded_intensities[Axes.Y.value],
                decoded_intensities[Axes.X.value]
            ]).T

            all_spot_coords = np.vstack([small_spot_coords, big_spot_coords])
            all_spot_coords += (row, column)

            spot_coordinates = np.vstack([spot_coordinates, all_spot_coords])

    # Copying over only non-zero pixels
    image_spots = np.zeros((height, width), dtype=np.uint16)
    for spot_coordinate in spot_coordinates:
        spot_column, spot_row = spot_coordinate
        for row in range(max(0, spot_column - spot_pad_pixels),
                         min(spot_column + spot_pad_pixels + 1, height)):
            for column in range(max(0, spot_row - spot_pad_pixels),
                                min(spot_row + spot_pad_pixels + 1, width)):
                # Max projecting over z-stack
                image_spots[row, column] = image_stack[:, row, column].max(0)

    imageio.imsave(output_path, image_spots)

    return image_spots
예제 #14
0
파일: recipe.py 프로젝트: xyanqian/starfish
def process_fov(field_num: int, experiment_str: str):
    """Process a single field of view of ISS data
    Parameters
    ----------
    field_num : int
        the field of view to process
    experiment_str : int
        path of experiment json file

    Returns
    -------
    DecodedSpots :
        tabular object containing the locations of detected spots.
    """

    fov_str: str = f"fov_{int(field_num):03d}"

    # load experiment
    experiment = starfish.Experiment.from_json(experiment_str)

    print(f"loading fov: {fov_str}")
    fov = experiment[fov_str]

    # note the structure of the 5D tensor containing the raw imaging data
    imgs = fov.get_image(FieldOfView.PRIMARY_IMAGES)
    dots = fov.get_image("dots")
    nuclei = fov.get_image("nuclei")

    print("Learning Transform")
    learn_translation = LearnTransform.Translation(reference_stack=dots,
                                                   axes=Axes.ROUND,
                                                   upsampling=1000)
    transforms_list = learn_translation.run(
        imgs.reduce({Axes.CH, Axes.ZPLANE}, func="max"))

    print("Applying transform")
    warp = ApplyTransform.Warp()
    registered_imgs = warp.run(imgs,
                               transforms_list=transforms_list,
                               in_place=True,
                               verbose=True)

    print("Filter WhiteTophat")
    filt = Filter.WhiteTophat(masking_radius=15, is_volume=False)

    filtered_imgs = filt.run(registered_imgs, verbose=True, in_place=True)
    filt.run(dots, verbose=True, in_place=True)
    filt.run(nuclei, verbose=True, in_place=True)

    print("Detecting")
    detector = FindSpots.BlobDetector(
        min_sigma=1,
        max_sigma=10,
        num_sigma=30,
        threshold=0.01,
        measurement_type='mean',
    )
    dots_max_projector = Filter.Reduce((Axes.ROUND, Axes.ZPLANE),
                                       func=FunctionSource.np("max"))
    dots_max = dots_max_projector.run(dots)

    spots = detector.run(image_stack=filtered_imgs, reference_image=dots_max)

    decoder = DecodeSpots.PerRoundMaxChannel(codebook=experiment.codebook)
    decoded = decoder.run(spots=spots)
    df = decoded.to_decoded_dataframe()
    return df