示例#1
0
def test_allen_smFISH_cropped_data():

    # set random seed to errors provoked by optimization functions
    np.random.seed(777)

    # load the experiment
    experiment = starfish.data.allen_smFISH(use_test_data=True)

    primary_image = experiment.fov().get_image(FieldOfView.PRIMARY_IMAGES)

    clip = Filter.Clip(p_min=10, p_max=100)
    clipped_image = clip.run(primary_image, in_place=False)

    bandpass = Filter.Bandpass(lshort=0.5, llong=7, threshold=None, truncate=4)
    bandpassed_image = bandpass.run(clipped_image, in_place=False)

    clip = Filter.Clip(p_min=10, p_max=100, is_volume=False)
    clipped_bandpassed_image = clip.run(bandpassed_image, in_place=False)

    sigma = (1, 0, 0)  # filter only in z, do nothing in x, y
    glp = Filter.GaussianLowPass(sigma=sigma, is_volume=True)
    z_filtered_image = glp.run(clipped_bandpassed_image, in_place=False)

    lmpf = DetectSpots.TrackpyLocalMaxPeakFinder(
        spot_diameter=3,
        min_mass=300,
        max_size=3,
        separation=5,
        noise_size=0.65,
        preprocess=False,
        percentile=10,
        verbose=True,
        is_volume=True,
    )
    intensities = lmpf.run(z_filtered_image)  # noqa
示例#2
0
def process_fov(field_num: int, experiment_str: str):
    """Process a single field of view of ISS data
    Parameters
    ----------
    field_num : int
        the field of view to process
    experiment_str : int
        path of experiment json file

    Returns
    -------
    DecodedSpots :
        tabular object containing the locations of detected spots.
    """

    fov_str: str = f"fov_{int(field_num):03d}"

    # load experiment
    experiment = starfish.Experiment.from_json(experiment_str)

    print(f"loading fov: {fov_str}")
    fov = experiment[fov_str]

    # note the structure of the 5D tensor containing the raw imaging data
    imgs = fov.get_image(FieldOfView.PRIMARY_IMAGES)
    dots = fov.get_image("dots")
    nuclei = fov.get_image("nuclei")

    print("Learning Transform")
    learn_translation = LearnTransform.Translation(reference_stack=dots, axes=Axes.ROUND, upsampling=1000)
    transforms_list = learn_translation.run(imgs.reduce({Axes.CH, Axes.ZPLANE}, func="max"))

    print("Applying transform")
    warp = ApplyTransform.Warp()
    registered_imgs = warp.run(imgs, transforms_list=transforms_list, in_place=True, verbose=True)

    print("Filter WhiteTophat")
    filt = Filter.WhiteTophat(masking_radius=15, is_volume=False)

    filtered_imgs = filt.run(registered_imgs, verbose=True, in_place=True)
    filt.run(dots, verbose=True, in_place=True)
    filt.run(nuclei, verbose=True, in_place=True)

    print("Detecting")
    detector = DetectSpots.BlobDetector(
        min_sigma=1,
        max_sigma=10,
        num_sigma=30,
        threshold=0.01,
        measurement_type='mean',
    )

    intensities = detector.run(filtered_imgs, blobs_image=dots, blobs_axes=(Axes.ROUND, Axes.ZPLANE))

    decoded = experiment.codebook.decode_per_round_max(intensities)
    df = decoded.to_decoded_dataframe()
    return df
示例#3
0
def iss_pipeline(fov, codebook):
    primary_image = fov.get_image(starfish.FieldOfView.PRIMARY_IMAGES)

    # register the raw image
    learn_translation = LearnTransform.Translation(
        reference_stack=fov.get_image('dots'), axes=Axes.ROUND, upsampling=100)
    max_projector = Filter.Reduce((Axes.CH, Axes.ZPLANE),
                                  func="max",
                                  module=Filter.Reduce.FunctionSource.np)
    transforms_list = learn_translation.run(max_projector.run(primary_image))
    warp = ApplyTransform.Warp()
    registered = warp.run(primary_image,
                          transforms_list=transforms_list,
                          in_place=False,
                          verbose=True)

    # filter raw data
    masking_radius = 15
    filt = Filter.WhiteTophat(masking_radius, is_volume=False)
    filtered = filt.run(registered, verbose=True, in_place=False)

    # detect spots using laplacian of gaussians approach
    p = DetectSpots.BlobDetector(
        min_sigma=1,
        max_sigma=10,
        num_sigma=30,
        threshold=0.01,
        measurement_type='mean',
    )

    intensities = p.run(filtered,
                        blobs_image=fov.get_image('dots'),
                        blobs_axes=(Axes.ROUND, Axes.ZPLANE))

    # decode the pixel traces using the codebook
    decoded = codebook.decode_per_round_max(intensities)

    # segment cells
    seg = Segment.Watershed(
        nuclei_threshold=.16,
        input_threshold=.22,
        min_distance=57,
    )
    label_image = seg.run(primary_image, fov.get_image('dots'))

    # assign spots to cells
    ta = AssignTargets.Label()
    assigned = ta.run(label_image, decoded)

    return assigned, label_image
示例#4
0
# EPY: START markdown
### Decode the processed data into spatially resolved gene expression
# EPY: END markdown

# EPY: START markdown
#Decoding in a non-multiplexed image based transcriptomics method is equivalent to simple spot finding, since each spot in each color channel and round corresponds to a different gene. To find spots in osmFISH data, the authors employ a peak finder that distinguishes local maxima from their surroundings whose absolute intensities exceed a threshold value. It tests a number of different thresholds, building a curve from the number of peaks detected at each threshold. A threshold in the _stable region_ or _knee_ of the curve is selected, and final peaks are called with that threshold.
#
#This process is repeated independently for each round and channel. Here we show this process on a single round and channel to demonstrate the procedure. See the documentation for a precise description of the parameters.
# EPY: END markdown

# EPY: START code
from starfish.spots import DetectSpots

lmp = DetectSpots.LocalMaxPeakFinder(
    min_distance=6,
    stringency=0,
    min_obj_area=6,
    max_obj_area=600,
)
spot_intensities = lmp.run(mp)

# EPY: END code

# EPY: START markdown
### Compare to pySMFISH peak calls
# EPY: END markdown

# EPY: START markdown
#The Field of view that we've used for the test data corresponds to Aldoc, imaged in round one, in position 33. We've also packaged the results from the osmFISH publication for this target to demonstrate that starfish is capable of recovering the same results.
# EPY: END markdown

示例#5
0
# EPY: START markdown
### Decode the processed data into spatially resolved gene expression profiles
#
#To decode, first we find spots, and record, for reach spot, the average pixel intensities across rounds and channels. This spot detection can be achieved by the ```BlobDetector``` algorithm
# EPY: END markdown

# EPY: START code
from starfish.spots import DetectSpots
import warnings

# parameters to define the allowable gaussian sizes (parameter space)
p = DetectSpots.BlobDetector(
    min_sigma=1,
    max_sigma=10,
    num_sigma=30,
    threshold=0.01,
    measurement_type='mean',
)

# blobs = dots; define the spots in the dots image, but then find them again in the stack.
intensities = p.run(registered_imgs,
                    blobs_image=dots,
                    blobs_axes=(Axes.ROUND, Axes.ZPLANE))
# EPY: END code

# EPY: START markdown
#The resulting "Intensity Table" table is the standardized file format for the output of a spot detector, and is the first output file format in the pipeline that is not an image or set of images
# EPY: END markdown

# EPY: START code