예제 #1
0
def imshow_3channels(stack: starfish.ImageStack, r: int):
    fig = plt.figure(dpi=150)
    ax1 = fig.add_subplot(131, title='ch: 0')
    ax2 = fig.add_subplot(132, title='ch: 1')
    ax3 = fig.add_subplot(133, title='ch: 2')
    imshow_plane(stack, sel={Axes.ROUND: r, Axes.CH: 0}, ax=ax1)
    imshow_plane(stack, sel={Axes.ROUND: r, Axes.CH: 1}, ax=ax2)
    imshow_plane(stack, sel={Axes.ROUND: r, Axes.CH: 2}, ax=ax3)
예제 #2
0
###################################################################################################
# Visualize raw data
# ------------------
# A nice way to page through all this data is to use the display command. We have commented this out
# for now, because it will not render in the docs. Instead, we simply show an image from the first
# round and color channel.

# display(imgs)

from starfish.types import Axes
from starfish.util.plot import imshow_plane

# for this vignette, we'll pick one plane and track it through the processing steps
plane_selector = {Axes.CH: 0, Axes.ROUND: 0, Axes.ZPLANE: 0}
imshow_plane(imgs, sel=plane_selector, title='Round: 0, Chanel:0')

###################################################################################################
# Filter and scale raw data before decoding into spatially resolved gene expression
# ---------------------------------------------------------------------------------
# First, we equalize the intensity of the images by scaling each image by its maximum intensity,
# which is equivalent to scaling by the 100th percentile value of the pixel values in each image.

from starfish.image import Filter
from starfish.types import Levels

sc_filt = Filter.Clip(p_max=100, level_method=Levels.SCALE_BY_CHUNK)
norm_imgs = sc_filt.run(imgs)

###################################################################################################
# Next, for each imaging round, and each pixel location, we zero the intensity values across all
예제 #3
0
fov = experiment.fov()
imgs = fov.get_image(FieldOfView.PRIMARY_IMAGES) # primary images
dots = fov.get_image("dots") # reference round where every spot labeled with fluorophore

# filter raw data
masking_radius = 15
filt = Filter.WhiteTophat(masking_radius, is_volume=False)
filt.run(imgs, in_place=True)
filt.run(dots, in_place=True)

# register primary images to reference round
learn_translation = LearnTransform.Translation(reference_stack=dots, axes=Axes.ROUND, upsampling=1000)
transforms_list = learn_translation.run(imgs.reduce({Axes.CH, Axes.ZPLANE}, func="max"))
warp = ApplyTransform.Warp()
warp.run(imgs, transforms_list=transforms_list, in_place=True)

# view dots to estimate radius of spots: radius range from 1.5 to 4 pixels
imshow_plane(dots, {Axes.X: (500, 550), Axes.Y: (600, 650)})

# run blob detector with dots as reference image
# following guideline of sigma = radius/sqrt(2) for 2D images
# threshold is set conservatively low
bd = FindSpots.BlobDetector(
    min_sigma=1,
    max_sigma=3,
    num_sigma=10,
    threshold=0.01,
    is_volume=False,
    measurement_type='mean',
)
spots = bd.run(image_stack=imgs, reference_image=dots)
예제 #4
0
###################################################################################################
# Select one plane
# ----------------
# starfish data are 5-dimensional, but to demonstrate what they look like in a non-interactive
# fashion, it's best to visualize the data in 2D. For interactive visualization using napari use
# the :py:func:`.display` function.

from starfish.types import Axes
from starfish.util.plot import imshow_plane

# for this vignette, we'll pick one plane and track it through the processing steps
plane_selector = {Axes.CH: 0, Axes.ROUND: 0, Axes.ZPLANE: 8}

f, (ax1, ax2) = plt.subplots(ncols=2)
imshow_plane(img, sel=plane_selector, ax=ax1, title="primary image")
imshow_plane(nissl, sel=plane_selector, ax=ax2, title="nissl image")

###################################################################################################
# Register the data
# -----------------
# The first step in BaristaSeq is to do some rough registration. For this data, the rough
# registration has been done for us by the authors, so it is omitted from this notebook.

###################################################################################################
# Project into 2D
# ---------------
# BaristaSeq is typically processed in 2D. Starfish allows users to reduce data using arbitrary
# methods via :py:class:`.image.Filter.Reduce`. Here we max project Z for both the nissl images and
# the primary images.
예제 #5
0
파일: ISS.py 프로젝트: zwdiscover/starfish
# EPY: START markdown
### Visualize raw data
#
#A nice way to page through all this data is to use the display command. We have commented this out for now, because it will not render in Github. Instead, we simply show an image from the first round and color channel.
# EPY: END markdown

# EPY: START code
# # Display all the data in an interactive pop-up window. Uncomment to have this version work.
# %gui qt5
# display(imgs)

# Display a single plane of data
sel = {Axes.ROUND: 0, Axes.CH: 0, Axes.ZPLANE: 0}
single_plane = imgs.sel(sel)
imshow_plane(single_plane, title="Round: 0, Channel: 0")
# EPY: END code

# EPY: START markdown
#'dots' is a general stain for all possible transcripts. This image should correspond to the maximum projcection of all color channels within a single imaging round. This auxiliary image is useful for registering images from multiple imaging rounds to this reference image. We'll see an example of this further on in the notebook
# EPY: END markdown

# EPY: START code
dots = fov.get_image("dots")
dots_single_plane = dots.max_proj(Axes.ROUND, Axes.CH, Axes.ZPLANE)
imshow_plane(dots_single_plane, title="Anchor channel, all RNA molecules")
# EPY: END code

# EPY: START markdown
#Below is a DAPI image, which specifically marks nuclei. This is useful cell segmentation later on in the processing.
# EPY: END markdown
예제 #6
0
projection = imgs.reduce({Axes.CH, Axes.ZPLANE}, func="max")
reference_image = projection.sel({Axes.ROUND: 0})
ltt = LearnTransform.Translation(reference_stack=reference_image,
                                 axes=Axes.ROUND,
                                 upsampling=1000)
transforms = ltt.run(projection)
warp = ApplyTransform.Warp()
imgs = warp.run(stack=imgs, transforms_list=transforms)

# make reference round for decoding
dots = imgs.reduce({Axes.CH, Axes.ROUND}, func="max")

# view a cropped region of image for spot finding
imshow_plane(dots,
             sel={
                 Axes.ZPLANE: 15,
                 Axes.X: (400, 600),
                 Axes.Y: (400, 600)
             })

####################################################################################################
# The ``dots`` reference image shows spots are approximately 10 pixels in diameter and can be
# tightly packed together. This helped inform the parameter settings of the spot finders below.
# However, the accuracy can always be improved by further tuning parameters. For example,
# if low intensity background noise is being detected as spots, increasing values for
# ``threshold``, ``stringency``, ``min_mass``, and ``percentile`` can remove them. If large spots
# are not missed, increasing values such as ``max_sigma``, ``max_obj_area``, ``spot_diameter``,
# and ``max_size`` could include them. Moreover, signal enhancement and background reduction
# prior to this step can also improve accuracy of spot finding.

bd = FindSpots.BlobDetector(
    min_sigma=2,
예제 #7
0
# Maximum project ImageStack along z-axis
projection = stack.reduce({Axes.ZPLANE}, func="max")
print(projection)

# Plot
import matplotlib
import matplotlib.pyplot as plt
from starfish.util.plot import imshow_plane
matplotlib.rcParams["figure.dpi"] = 150
f, (ax1, ax2, ax3) = plt.subplots(ncols=3)

# Plot first round and channel of projected ImageStack
imshow_plane(stack,
             sel={
                 Axes.ROUND: 0,
                 Axes.CH: 1,
                 Axes.ZPLANE: 10
             },
             ax=ax1,
             title='Z: 10')
imshow_plane(projection,
             sel={
                 Axes.ROUND: 0,
                 Axes.CH: 1
             },
             ax=ax2,
             title='Max Projection')
# Plot ROI of projected image
selector = {Axes.CH: 0, Axes.ROUND: 0, Axes.X: (400, 600), Axes.Y: (550, 750)}
imshow_plane(projection, sel=selector, ax=ax3, title='Max Projection\nROI')
예제 #8
0
#rounds. Ideally, these should form fairly coherent spots, indicating that the
#data are well registered. By contrast, if there are patterns whereby pairs of
#spots are consistently present at small shifts, that can indicate systematic
#registration offsets which should be corrected prior to analysis.
# EPY: END markdown

# EPY: START code
experiment = starfish.data.STARmap(use_test_data=True)
stack = experiment['fov_000'].get_image('primary')
# EPY: END code

# EPY: START code
ch_r_projection = stack.max_proj(Axes.CH, Axes.ROUND)

f = plt.figure(dpi=150)
imshow_plane(ch_r_projection, sel={Axes.ZPLANE: 15})
# EPY: END code

# EPY: START markdown
#Visualize the codebook
#----------------------
#The STARmap codebook maps pixel intensities across the rounds and channels to
#the corresponding barcodes and genes that those pixels code for. For this
#dataset, the codebook specifies 160 gene targets.
# EPY: END markdown

# EPY: START code
print(experiment.codebook)
# EPY: END code

# EPY: START markdown